source: release/1.7/source/lisp-kernel/ppc-exceptions.c @ 15267

Last change on this file since 15267 was 14880, checked in by rme, 8 years ago

Merge trunk changes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 88.6 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <sys/mman.h>
31#include <fpu_control.h>
32#include <linux/prctl.h>
33#endif
34
35#ifdef DARWIN
36#include <sys/mman.h>
37#define _FPU_RESERVED 0xffffff00
38#ifndef SA_NODEFER
39#define SA_NODEFER 0
40#endif
41#include <sysexits.h>
42
43/* a distinguished UUO at a distinguished address */
44extern void pseudo_sigreturn(ExceptionInformation *);
45#endif
46
47
48#include "threads.h"
49
50#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
51#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
52#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
53extern void enable_fp_exceptions(void);
54extern void disable_fp_exceptions(void);
55
56#ifdef LINUX
57/* Some relatively recent kernels support this interface.
58   If this prctl isn't supported, assume that we're always
59   running with excptions enabled and "precise".
60*/
61#ifndef PR_SET_FPEXC
62#define PR_SET_FPEXC 12
63#endif
64#ifndef PR_FP_EXC_DISABLED
65#define PR_FP_EXC_DISABLED 0
66#endif
67#ifndef PR_FP_EXC_PRECISE
68#define PR_FP_EXC_PRECISE 3
69#endif
70
71void
72enable_fp_exceptions()
73{
74  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
75}
76
77void
78disable_fp_exceptions()
79{
80  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
81}
82
83#endif
84
85/*
86  Handle exceptions.
87
88*/
89
90extern LispObj lisp_nil;
91
92extern natural lisp_heap_gc_threshold;
93extern Boolean grow_dynamic_area(natural);
94
95
96
97
98
99
100int
101page_size = 4096;
102
103int
104log2_page_size = 12;
105
106
107
108
109
110/*
111  If the PC is pointing to an allocation trap, the previous instruction
112  must have decremented allocptr.  Return the non-zero amount by which
113  allocptr was decremented.
114*/
115signed_natural
116allocptr_displacement(ExceptionInformation *xp)
117{
118  pc program_counter = xpPC(xp);
119  opcode instr = *program_counter, prev_instr = *(program_counter-1);
120
121  if (instr == ALLOC_TRAP_INSTRUCTION) {
122    if (match_instr(prev_instr, 
123                    XO_MASK | RT_MASK | RB_MASK,
124                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
125                    RT(allocptr) |
126                    RB(allocptr))) {
127      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
128    }
129    if (match_instr(prev_instr,
130                    OP_MASK | RT_MASK | RA_MASK,
131                    OP(major_opcode_ADDI) | 
132                    RT(allocptr) |
133                    RA(allocptr))) {
134      return (signed_natural) -((short) prev_instr);
135    }
136    Bug(xp, "Can't determine allocation displacement");
137  }
138  return 0;
139}
140
141
142/*
143  A cons cell's been successfully allocated, but the allocptr's
144  still tagged (as fulltag_cons, of course.)  Emulate any instructions
145  that might follow the allocation (stores to the car or cdr, an
146  assignment to the "result" gpr) that take place while the allocptr's
147  tag is non-zero, advancing over each such instruction.  When we're
148  done, the cons cell will be allocated and initialized, the result
149  register will point to it, the allocptr will be untagged, and
150  the PC will point past the instruction that clears the allocptr's
151  tag.
152*/
153void
154finish_allocating_cons(ExceptionInformation *xp)
155{
156  pc program_counter = xpPC(xp);
157  opcode instr;
158  LispObj cur_allocptr = xpGPR(xp, allocptr);
159  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
160  int target_reg;
161
162  while (1) {
163    instr = *program_counter++;
164
165    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
166      xpGPR(xp, allocptr) = untag(cur_allocptr);
167      xpPC(xp) = program_counter;
168      return;
169    }
170   
171    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
172    case STORE_CAR_ALLOCPTR_INSTRUCTION:
173      c->car = xpGPR(xp,RT_field(instr));
174      break;
175    case STORE_CDR_ALLOCPTR_INSTRUCTION:
176      c->cdr = xpGPR(xp,RT_field(instr));
177      break;
178    default:
179      /* Assume that this is an assignment: {rt/ra} <- allocptr.
180         There are several equivalent instruction forms
181         that might have that effect; just assign to target here.
182      */
183      if (major_opcode_p(instr,major_opcode_X31)) {
184        target_reg = RA_field(instr);
185      } else {
186        target_reg = RT_field(instr);
187      }
188      xpGPR(xp,target_reg) = cur_allocptr;
189      break;
190    }
191  }
192}
193
194/*
195  We were interrupted in the process of allocating a uvector; we
196  survived the allocation trap, and allocptr is tagged as fulltag_misc.
197  Emulate any instructions which store a header into the uvector,
198  assign the value of allocptr to some other register, and clear
199  allocptr's tag.  Don't expect/allow any other instructions in
200  this environment.
201*/
202void
203finish_allocating_uvector(ExceptionInformation *xp)
204{
205  pc program_counter = xpPC(xp);
206  opcode instr;
207  LispObj cur_allocptr = xpGPR(xp, allocptr);
208  int target_reg;
209
210  while (1) {
211    instr = *program_counter++;
212    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
213      xpGPR(xp, allocptr) = untag(cur_allocptr);
214      xpPC(xp) = program_counter;
215      return;
216    }
217    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
218        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
219      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
220    } else {
221      /* assume that this is an assignment */
222
223      if (major_opcode_p(instr,major_opcode_X31)) {
224        target_reg = RA_field(instr);
225      } else {
226        target_reg = RT_field(instr);
227      }
228      xpGPR(xp,target_reg) = cur_allocptr;
229    }
230  }
231}
232
233
234Boolean
235allocate_object(ExceptionInformation *xp,
236                natural bytes_needed, 
237                signed_natural disp_from_allocptr,
238                TCR *tcr)
239{
240  area *a = active_dynamic_area;
241
242  /* Maybe do an EGC */
243  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
244    if (((a->active)-(a->low)) >= a->threshold) {
245      gc_from_xp(xp, 0L);
246    }
247  }
248
249  /* Life is pretty simple if we can simply grab a segment
250     without extending the heap.
251  */
252  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
253    xpGPR(xp, allocptr) += disp_from_allocptr;
254#ifdef DEBUG
255    fprintf(dbgout, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
256            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
257#endif
258    return true;
259  }
260 
261  /* It doesn't make sense to try a full GC if the object
262     we're trying to allocate is larger than everything
263     allocated so far.
264  */
265  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
266    untenure_from_area(tenured_area); /* force a full GC */
267    gc_from_xp(xp, 0L);
268  }
269 
270  /* Try again, growing the heap if necessary */
271  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
272    xpGPR(xp, allocptr) += disp_from_allocptr;
273#ifdef DEBUG
274    fprintf(dbgout, "New heap segment for #x%x after GC: #x%x/#x%x\n",
275            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
276#endif
277    return true;
278  }
279 
280  return false;
281}
282
283#ifndef XNOMEM
284#define XNOMEM 10
285#endif
286
287void
288update_bytes_allocated(TCR* tcr, void *cur_allocptr)
289{
290  BytePtr
291    last = (BytePtr) tcr->last_allocptr, 
292    current = (BytePtr) cur_allocptr;
293  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
294    tcr->bytes_allocated += last-current;
295  }
296  tcr->last_allocptr = 0;
297}
298
299void
300lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
301{
302  /* Couldn't allocate the object.  If it's smaller than some arbitrary
303     size (say 128K bytes), signal a "chronically out-of-memory" condition;
304     else signal a "allocation request failed" condition.
305  */
306  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
307  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
308}
309
310/*
311  Allocate a large list, where "large" means "large enough to
312  possibly trigger the EGC several times if this was done
313  by individually allocating each CONS."  The number of
314  ocnses in question is in arg_z; on successful return,
315  the list will be in arg_z
316*/
317
318Boolean
319allocate_list(ExceptionInformation *xp, TCR *tcr)
320{
321  natural
322    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
323    bytes_needed = (nconses << dnode_shift);
324  LispObj
325    prev = lisp_nil,
326    current,
327    initial = xpGPR(xp,arg_y);
328
329  if (nconses == 0) {
330    /* Silly case */
331    xpGPR(xp,arg_z) = lisp_nil;
332    xpGPR(xp,allocptr) = lisp_nil;
333    return true;
334  }
335  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
336  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
337    for (current = xpGPR(xp,allocptr);
338         nconses;
339         prev = current, current+= dnode_size, nconses--) {
340      deref(current,0) = prev;
341      deref(current,1) = initial;
342    }
343    xpGPR(xp,arg_z) = prev;
344    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
345    xpGPR(xp,allocptr)-=fulltag_cons;
346  } else {
347    lisp_allocation_failure(xp,tcr,bytes_needed);
348  }
349  return true;
350}
351
352OSStatus
353handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
354{
355  pc program_counter;
356  natural cur_allocptr, bytes_needed = 0;
357  opcode prev_instr;
358  signed_natural disp = 0;
359  unsigned allocptr_tag;
360
361  cur_allocptr = xpGPR(xp,allocptr);
362  program_counter = xpPC(xp);
363  prev_instr = *(program_counter-1);
364  allocptr_tag = fulltag_of(cur_allocptr);
365
366  switch (allocptr_tag) {
367  case fulltag_cons:
368    bytes_needed = sizeof(cons);
369    disp = -sizeof(cons) + fulltag_cons;
370    break;
371
372  case fulltag_even_fixnum:
373  case fulltag_odd_fixnum:
374    break;
375
376  case fulltag_misc:
377    if (match_instr(prev_instr, 
378                    XO_MASK | RT_MASK | RB_MASK,
379                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
380                    RT(allocptr) |
381                    RB(allocptr))) {
382      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
383    } else if (match_instr(prev_instr,
384                           OP_MASK | RT_MASK | RA_MASK,
385                           OP(major_opcode_ADDI) | 
386                           RT(allocptr) |
387                           RA(allocptr))) {
388      disp = (signed_natural) ((short) prev_instr);
389    }
390    if (disp) {
391      bytes_needed = (-disp) + fulltag_misc;
392      break;
393    }
394    /* else fall thru */
395  default:
396    return -1;
397  }
398
399  if (bytes_needed) {
400    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
401    if (allocate_object(xp, bytes_needed, disp, tcr)) {
402#if 0
403      fprintf(dbgout, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
404              tcr, xpGPR(xp, allocptr));
405#endif
406      adjust_exception_pc(xp,4);
407      return 0;
408    }
409    lisp_allocation_failure(xp,tcr,bytes_needed);
410    return -1;
411  }
412  return -1;
413}
414
415natural gc_deferred = 0, full_gc_deferred = 0;
416
417signed_natural
418flash_freeze(TCR *tcr, signed_natural param)
419{
420  return 0;
421}
422
423OSStatus
424handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
425{
426  LispObj
427    selector = xpGPR(xp,imm0), 
428    arg = xpGPR(xp,imm1);
429  area *a = active_dynamic_area;
430  Boolean egc_was_enabled = (a->older != NULL);
431  natural gc_previously_deferred = gc_deferred;
432
433
434  switch (selector) {
435  case GC_TRAP_FUNCTION_EGC_CONTROL:
436    egc_control(arg != 0, a->active);
437    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
438    break;
439
440  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
441    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
442    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
443    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
444    xpGPR(xp,arg_z) = lisp_nil+t_offset;
445    break;
446
447  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
448    if (((signed_natural) arg) > 0) {
449      lisp_heap_gc_threshold = 
450        align_to_power_of_2((arg-1) +
451                            (heap_segment_size - 1),
452                            log2_heap_segment_size);
453    }
454    /* fall through */
455  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
456    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
457    break;
458
459  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
460    /*  Try to put the current threshold in effect.  This may
461        need to disable/reenable the EGC. */
462    untenure_from_area(tenured_area);
463    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
464    if (egc_was_enabled) {
465      if ((a->high - a->active) >= a->threshold) {
466        tenure_to_area(tenured_area);
467      }
468    }
469    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
470    break;
471
472  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
473    ensure_static_conses(xp,tcr,32768);
474    break;
475
476  case GC_TRAP_FUNCTION_FLASH_FREEZE:
477    untenure_from_area(tenured_area);
478    gc_like_from_xp(xp,flash_freeze,0);
479    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
480    tenured_area->static_dnodes = area_dnode(a->active, a->low);
481    if (egc_was_enabled) {
482      tenure_to_area(tenured_area);
483    }
484    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
485    break;
486
487  default:
488    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
489
490    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
491      if (!full_gc_deferred) {
492        gc_from_xp(xp, 0L);
493        break;
494      }
495      /* Tried to do a full GC when gc was disabled.  That failed,
496         so try full GC now */
497      selector = GC_TRAP_FUNCTION_GC;
498    }
499   
500    if (egc_was_enabled) {
501      egc_control(false, (BytePtr) a->active);
502    }
503    gc_from_xp(xp, 0L);
504    if (gc_deferred > gc_previously_deferred) {
505      full_gc_deferred = 1;
506    } else {
507      full_gc_deferred = 0;
508    }
509    if (selector > GC_TRAP_FUNCTION_GC) {
510      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
511        impurify_from_xp(xp, 0L);
512        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
513        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
514        gc_from_xp(xp, 0L);
515      }
516      if (selector & GC_TRAP_FUNCTION_PURIFY) {
517        purify_from_xp(xp, 0L);
518        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
519        gc_from_xp(xp, 0L);
520      }
521      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
522        OSErr err;
523        extern OSErr save_application(unsigned, Boolean);
524        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
525        area *vsarea = tcr->vs_area;
526       
527        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
528        err = save_application(arg, egc_was_enabled);
529        if (err == noErr) {
530          _exit(0);
531        }
532        fatal_oserr(": save_application", err);
533      }
534      switch (selector) {
535
536
537      case GC_TRAP_FUNCTION_FREEZE:
538        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
539        tenured_area->static_dnodes = area_dnode(a->active, a->low);
540        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
541        break;
542      default:
543        break;
544      }
545    }
546   
547    if (egc_was_enabled) {
548      egc_control(true, NULL);
549    }
550    break;
551   
552  }
553
554  adjust_exception_pc(xp,4);
555  return 0;
556}
557
558
559
560void
561signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
562{
563  /* The cstack just overflowed.  Force the current thread's
564     control stack to do so until all stacks are well under their overflow
565     limits.
566  */
567
568#if 0
569  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
570#endif
571  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
572}
573
574/*
575  Lower (move toward 0) the "end" of the soft protected area associated
576  with a by a page, if we can.
577*/
578
579void
580adjust_soft_protection_limit(area *a)
581{
582  char *proposed_new_soft_limit = a->softlimit - 4096;
583  protected_area_ptr p = a->softprot;
584 
585  if (proposed_new_soft_limit >= (p->start+16384)) {
586    p->end = proposed_new_soft_limit;
587    p->protsize = p->end-p->start;
588    a->softlimit = proposed_new_soft_limit;
589  }
590  protect_area(p);
591}
592
593void
594restore_soft_stack_limit(unsigned stkreg)
595{
596  area *a;
597  TCR *tcr = get_tcr(true);
598
599  switch (stkreg) {
600  case sp:
601    a = tcr->cs_area;
602    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
603      a->softlimit -= 4096;
604    }
605    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
606    break;
607  case vsp:
608    a = tcr->vs_area;
609    adjust_soft_protection_limit(a);
610    break;
611  case tsp:
612    a = tcr->ts_area;
613    adjust_soft_protection_limit(a);
614  }
615}
616
617/* Maybe this'll work someday.  We may have to do something to
618   make the thread look like it's not handling an exception */
619void
620reset_lisp_process(ExceptionInformation *xp)
621{
622  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
623  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
624
625  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
626  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
627
628  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
629  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
630
631  start_lisp(tcr, 1);
632}
633
634/*
635  This doesn't GC; it returns true if it made enough room, false
636  otherwise.
637  If "extend" is true, it can try to extend the dynamic area to
638  satisfy the request.
639*/
640
641Boolean
642new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
643{
644  area *a;
645  natural newlimit, oldlimit;
646  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
647
648  a  = active_dynamic_area;
649  oldlimit = (natural) a->active;
650  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
651              align_to_power_of_2(need, log2_allocation_quantum));
652  if (newlimit > (natural) (a->high)) {
653    if (extend) {
654      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
655      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
656      do {
657        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
658          break;
659        }
660        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
661        if (extend_by < 4<<20) {
662          return false;
663        }
664      } while (1);
665    } else {
666      return false;
667    }
668  }
669  a->active = (BytePtr) newlimit;
670  tcr->last_allocptr = (void *)newlimit;
671  xpGPR(xp,allocptr) = (LispObj) newlimit;
672  xpGPR(xp,allocbase) = (LispObj) oldlimit;
673
674  return true;
675}
676
677 
678void
679update_area_active (area **aptr, BytePtr value)
680{
681  area *a = *aptr;
682  for (; a; a = a->older) {
683    if ((a->low <= value) && (a->high >= value)) break;
684  };
685  if (a == NULL) Bug(NULL, "Can't find active area");
686  a->active = value;
687  *aptr = a;
688
689  for (a = a->younger; a; a = a->younger) {
690    a->active = a->high;
691  }
692}
693
694LispObj *
695tcr_frame_ptr(TCR *tcr)
696{
697  ExceptionInformation *xp;
698  LispObj *bp = NULL;
699
700  if (tcr->pending_exception_context)
701    xp = tcr->pending_exception_context;
702  else {
703    xp = tcr->suspend_context;
704  }
705  if (xp) {
706    bp = (LispObj *) xpGPR(xp, sp);
707  }
708  return bp;
709}
710
711void
712normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
713{
714  void *cur_allocptr = NULL;
715  LispObj freeptr = 0;
716
717  if (xp) {
718    if (is_other_tcr) {
719      pc_luser_xp(xp, tcr, NULL);
720      freeptr = xpGPR(xp, allocptr);
721      if (fulltag_of(freeptr) == 0){
722        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
723      }
724    }
725    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
726    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
727    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
728#ifdef DEBUG
729    fprintf(dbgout, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
730            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
731    fprintf(dbgout, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
732            tcr,
733            xpGPR(xp, allocbase),
734            xpGPR(xp, allocptr),
735            xpPC(xp));
736    fprintf(dbgout, "TCR 0x%x, exception context = 0x%x\n",
737            tcr,
738            tcr->pending_exception_context);
739#endif
740  } else {
741    /* In ff-call.  No need to update cs_area */
742    cur_allocptr = (void *) (tcr->save_allocptr);
743#ifdef DEBUG
744    fprintf(dbgout, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
745            tcr, tcr->save_vsp, tcr->save_tsp);
746    fprintf(dbgout, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
747            tcr,
748            tcr->save_allocbase,
749            tcr->save_allocptr,
750            xpPC(xp));
751
752#endif
753    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
754    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
755  }
756
757
758  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
759  if (cur_allocptr) {
760    update_bytes_allocated(tcr, cur_allocptr);
761    if (freeptr) {
762      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
763      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
764    }
765  }
766}
767
768TCR *gc_tcr = NULL;
769
770/* Suspend and "normalize" other tcrs, then call a gc-like function
771   in that context.  Resume the other tcrs, then return what the
772   function returned */
773
774signed_natural
775gc_like_from_xp(ExceptionInformation *xp, 
776                signed_natural(*fun)(TCR *, signed_natural), 
777                signed_natural param)
778{
779  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
780  int result;
781  signed_natural inhibit;
782
783  suspend_other_threads(true);
784  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
785  if (inhibit != 0) {
786    if (inhibit > 0) {
787      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
788    }
789    resume_other_threads(true);
790    gc_deferred++;
791    return 0;
792  }
793  gc_deferred = 0;
794
795  gc_tcr = tcr;
796
797  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
798  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
799
800  normalize_tcr(xp, tcr, false);
801
802
803  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
804    if (other_tcr->pending_exception_context) {
805      other_tcr->gc_context = other_tcr->pending_exception_context;
806    } else if (other_tcr->valence == TCR_STATE_LISP) {
807      other_tcr->gc_context = other_tcr->suspend_context;
808    } else {
809      /* no pending exception, didn't suspend in lisp state:
810         must have executed a synchronous ff-call.
811      */
812      other_tcr->gc_context = NULL;
813    }
814    normalize_tcr(other_tcr->gc_context, other_tcr, true);
815  }
816   
817
818
819  result = fun(tcr, param);
820
821  other_tcr = tcr;
822  do {
823    other_tcr->gc_context = NULL;
824    other_tcr = other_tcr->next;
825  } while (other_tcr != tcr);
826
827  gc_tcr = NULL;
828
829  resume_other_threads(true);
830
831  return result;
832
833}
834
835
836
837/* Returns #bytes freed by invoking GC */
838
839signed_natural
840gc_from_tcr(TCR *tcr, signed_natural param)
841{
842  area *a;
843  BytePtr oldfree, newfree;
844  BytePtr oldend, newend;
845
846#ifdef DEBUG
847  fprintf(dbgout, "Start GC  in 0x%lx\n", tcr);
848#endif
849  a = active_dynamic_area;
850  oldend = a->high;
851  oldfree = a->active;
852  gc(tcr, param);
853  newfree = a->active;
854  newend = a->high;
855#if 0
856  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
857#endif
858  return ((oldfree-newfree)+(newend-oldend));
859}
860
861signed_natural
862gc_from_xp(ExceptionInformation *xp, signed_natural param)
863{
864  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
865
866  freeGCptrs();
867  return status;
868}
869
870signed_natural
871purify_from_xp(ExceptionInformation *xp, signed_natural param)
872{
873  return gc_like_from_xp(xp, purify, param);
874}
875
876signed_natural
877impurify_from_xp(ExceptionInformation *xp, signed_natural param)
878{
879  return gc_like_from_xp(xp, impurify, param);
880}
881
882
883
884
885
886
887protection_handler
888 * protection_handlers[] = {
889   do_spurious_wp_fault,
890   do_soft_stack_overflow,
891   do_soft_stack_overflow,
892   do_soft_stack_overflow,
893   do_hard_stack_overflow,   
894   do_hard_stack_overflow,
895   do_hard_stack_overflow
896   };
897
898
899Boolean
900is_write_fault(ExceptionInformation *xp, siginfo_t *info)
901{
902  /* use the siginfo if it's available.  Some versions of Linux
903     don't propagate the DSISR and TRAP fields correctly from
904     64- to 32-bit handlers.
905  */
906  if (info) {
907    /*
908       To confuse matters still further, the value of SEGV_ACCERR
909       varies quite a bit among LinuxPPC variants (the value defined
910       in the header files varies, and the value actually set by
911       the kernel also varies.  So far, we're only looking at the
912       siginfo under Linux and Linux always seems to generate
913       SIGSEGV, so check for SIGSEGV and check the low 16 bits
914       of the si_code.
915    */
916    return ((info->si_signo == SIGSEGV) &&
917            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
918  }
919  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
920         (xpTRAP(xp) == 
921#ifdef LINUX
9220x0300
923#endif
924#ifdef DARWIN
9250x0300/0x100
926#endif
927)
928         );
929#if 0 
930  /* Maybe worth keeping around; not sure if it's an exhaustive
931     list of PPC instructions that could cause a WP fault */
932  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
933     valid values of those SPRs in the context they provide to
934     exception handlers.  Look at the opcode of the offending
935     instruction & recognize 32-bit store operations */
936  opcode instr = *(xpPC(xp));
937
938  if (xp->regs->trap != 0x300) {
939    return 0;
940  }
941  switch (instr >> 26) {
942  case 47:                      /* STMW */
943  case 36:                      /* STW */
944  case 37:                      /* STWU */
945    return 1;
946  case 31:
947    switch ((instr >> 1) & 1023) {
948    case 151:                   /* STWX */
949    case 183:                   /* STWUX */
950      return 1;
951    default:
952      return 0;
953    }
954  default:
955    return 0;
956  }
957#endif
958}
959
960OSStatus
961handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
962{
963  BytePtr addr;
964  protected_area_ptr area;
965  protection_handler *handler;
966  extern Boolean touch_page(void *);
967  extern void touch_page_end(void);
968
969  if (info) {
970    addr = (BytePtr)(info->si_addr);
971  } else {
972    addr = (BytePtr) ((natural) (xpDAR(xp)));
973  }
974
975  if (addr && (addr == tcr->safe_ref_address)) {
976    adjust_exception_pc(xp,4);
977
978    xpGPR(xp,imm0) = 0;
979    return 0;
980  }
981
982  if (xpPC(xp) == (pc)touch_page) {
983    xpGPR(xp,imm0) = 0;
984    xpPC(xp) = (pc)touch_page_end;
985    return 0;
986  }
987
988
989  if (is_write_fault(xp,info)) {
990    area = find_protected_area(addr);
991    if (area != NULL) {
992      handler = protection_handlers[area->why];
993      return handler(xp, area, addr);
994    } else {
995      if ((addr >= readonly_area->low) &&
996          (addr < readonly_area->active)) {
997        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
998                        page_size);
999        return 0;
1000      }
1001    }
1002  }
1003  if (old_valence == TCR_STATE_LISP) {
1004    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
1005  }
1006  return -1;
1007}
1008
1009
1010
1011
1012
1013OSStatus
1014do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1015{
1016#ifdef SUPPORT_PRAGMA_UNUSED
1017#pragma unused(area,addr)
1018#endif
1019  reset_lisp_process(xp);
1020  return -1;
1021}
1022
1023extern area*
1024allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
1025
1026extern area*
1027allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
1028
1029#ifdef EXTEND_VSTACK
1030Boolean
1031catch_frame_p(lisp_frame *spPtr)
1032{
1033  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1034
1035  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1036    if (spPtr == ((lisp_frame *) catch->csp)) {
1037      return true;
1038    }
1039  }
1040  return false;
1041}
1042#endif
1043
1044Boolean
1045unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1046{
1047  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1048      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1049    return true;
1050  } else {
1051    return false;
1052  }
1053}
1054
1055Boolean
1056lexpr_entry_frame_p(lisp_frame *spPtr)
1057{
1058  LispObj savelr = spPtr->savelr;
1059  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1060  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1061  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1062
1063  return
1064    (savelr == lexpr_return1v) ||
1065    (savelr == lexpr_return) ||
1066    ((savelr == ret1valn) &&
1067     (((spPtr->backlink)->savelr) == lexpr_return));
1068}
1069
1070Boolean
1071lisp_frame_p(lisp_frame *spPtr)
1072{
1073  LispObj savefn;
1074  /* We can't just look at the size of the stack frame under the EABI
1075     calling sequence, but that's the first thing to check. */
1076  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1077    return false;
1078  }
1079  savefn = spPtr->savefn;
1080  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1081 
1082}
1083
1084
1085int ffcall_overflow_count = 0;
1086
1087/* Find a frame that is neither a catch frame nor one of the
1088   lexpr_entry frames We don't check for non-lisp frames here because
1089   we'll always stop before we get there due to a dummy lisp frame
1090   pushed by .SPcallback that masks out the foreign frames.  The one
1091   exception is that there is a non-lisp frame without a valid VSP
1092   while in the process of ppc-ff-call. We recognize that because its
1093   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1094   next frame is 0, then we're executing an unwind-protect cleanup
1095   form, and the top stack frame belongs to its (no longer extant)
1096   catch frame.  */
1097
1098#ifdef EXTEND_VSTACK
1099lisp_frame *
1100find_non_catch_frame_from_xp (ExceptionInformation *xp)
1101{
1102  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1103  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1104    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1105  }
1106  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1107         unwind_protect_cleanup_frame_p(spPtr) ||
1108         catch_frame_p(spPtr) ||
1109         lexpr_entry_frame_p(spPtr) ; ) {
1110     spPtr = spPtr->backlink;
1111     };
1112  return spPtr;
1113}
1114#endif
1115
1116#ifdef EXTEND_VSTACK
1117Boolean
1118db_link_chain_in_area_p (area *a)
1119{
1120  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1121          *high = (LispObj *) a->high,
1122          *low = (LispObj *) a->low;
1123  for (; db; db = (LispObj *) *db) {
1124    if ((db >= low) && (db < high)) return true;
1125  };
1126  return false;
1127}
1128#endif
1129
1130
1131
1132
1133/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1134  the current value of VSP (TSP) or an older area.  */
1135
1136OSStatus
1137do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1138{
1139  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1140  area *a = tcr->vs_area;
1141  protected_area_ptr vsp_soft = a->softprot;
1142  unprotect_area(vsp_soft);
1143  signal_stack_soft_overflow(xp,vsp);
1144  return 0;
1145}
1146
1147
1148OSStatus
1149do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1150{
1151  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1152  area *a = tcr->ts_area;
1153  protected_area_ptr tsp_soft = a->softprot;
1154  unprotect_area(tsp_soft);
1155  signal_stack_soft_overflow(xp,tsp);
1156  return 0;
1157}
1158
1159OSStatus
1160do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1161{
1162  /* Trying to write into a guard page on the vstack or tstack.
1163     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1164     signal an error_stack_overflow condition.
1165      */
1166  lisp_protection_kind which = prot_area->why;
1167  Boolean on_TSP = (which == kTSPsoftguard);
1168
1169  if (on_TSP) {
1170    return do_tsp_overflow(xp, addr);
1171   } else {
1172    return do_vsp_overflow(xp, addr);
1173   }
1174}
1175
1176OSStatus
1177do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1178{
1179#ifdef SUPPORT_PRAGMA_UNUSED
1180#pragma unused(xp,area,addr)
1181#endif
1182  return -1;
1183}
1184
1185
1186/*
1187  We have a couple of choices here.  We can simply unprotect the page
1188  and let the store happen on return, or we can try to emulate writes
1189  that we know will involve an intergenerational reference.  Both are
1190  correct as far as EGC constraints go, but the latter approach is
1191  probably more efficient.  (This only matters in the case where the
1192  GC runs after this exception handler returns but before the write
1193  actually happens.  If we didn't emulate node stores here, the EGC
1194  would scan the newly-writen page, find nothing interesting, and
1195  run to completion.  This thread will try the write again afer it
1196  resumes, the page'll be re-protected, and we'll have taken this
1197  fault twice.  The whole scenario shouldn't happen very often, but
1198  (having already taken a fault and committed to an mprotect syscall)
1199  we might as well emulate stores involving intergenerational references,
1200  since they're pretty easy to identify.
1201
1202  Note that cases involving two or more threads writing to the same
1203  page (before either of them can run this handler) is benign: one
1204  invocation of the handler will just unprotect an unprotected page in
1205  that case.
1206
1207  If there are GCs (or any other suspensions of the thread between
1208  the time that the write fault was detected and the time that the
1209  exception lock is obtained) none of this stuff happens.
1210*/
1211
1212/*
1213  Return true (and emulate the instruction) iff:
1214  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1215     instruction.
1216  b) RS is a node register (>= fn)
1217  c) RS is tagged as a cons or vector
1218  d) RS is in some ephemeral generation.
1219  This is slightly conservative, since RS may be no younger than the
1220  EA being written to.
1221*/
1222Boolean
1223is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1224{
1225  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1226    opcode instr = *xpPC(xp);
1227   
1228    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1229        major_opcode_p(instr, major_opcode_STW)) {
1230      LispObj
1231        rs = RS_field(instr), 
1232        rsval = xpGPR(xp,rs),
1233        tag = fulltag_of(rsval);
1234     
1235      if (rs >= fn) {
1236        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1237          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1238              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1239            *(LispObj *)ea = rsval;
1240            return true;
1241          }
1242        }
1243      }
1244    }
1245  }
1246  return false;
1247}
1248
1249     
1250
1251
1252
1253
1254
1255OSStatus
1256handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1257{
1258  (void) zero_fpscr(tcr);
1259  enable_fp_exceptions();
1260
1261
1262  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1263
1264  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1265     operation; there's an FPU operation right at the PC, so tell it to start
1266     looking one word beyond */
1267  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1268}
1269
1270   
1271int
1272altivec_present = 1;
1273
1274
1275/* This only tries to implement the "optional" fsqrt and fsqrts
1276   instructions, which were generally implemented on IBM hardware
1277   but generally not available on Motorola/Freescale systems.
1278*/               
1279OSStatus
1280handle_unimplemented_instruction(ExceptionInformation *xp,
1281                                 opcode instruction,
1282                                 TCR *tcr)
1283{
1284  (void) zero_fpscr(tcr);
1285  enable_fp_exceptions();
1286  /* the rc bit (bit 0 in the instruction) is supposed to cause
1287     some FPSCR bits to be copied to CR1.  Clozure CL doesn't generate
1288     fsqrt. or fsqrts.
1289  */
1290  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1291       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1292      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1293    double b, d, sqrt(double);
1294
1295    b = xpFPR(xp,RB_field(instruction));
1296    d = sqrt(b);
1297    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1298                   (get_fpscr() & _FPU_RESERVED));
1299    xpFPR(xp,RT_field(instruction)) = d;
1300    adjust_exception_pc(xp,4);
1301    return 0;
1302  }
1303
1304  return -1;
1305}
1306
1307OSStatus
1308PMCL_exception_handler(int xnum, 
1309                       ExceptionInformation *xp, 
1310                       TCR *tcr, 
1311                       siginfo_t *info,
1312                       int old_valence)
1313{
1314  OSStatus status = -1;
1315  pc program_counter;
1316  opcode instruction = 0;
1317
1318
1319  program_counter = xpPC(xp);
1320 
1321  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1322    instruction = *program_counter;
1323  }
1324
1325  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1326    status = handle_alloc_trap(xp, tcr);
1327  } else if ((xnum == SIGSEGV) ||
1328             (xnum == SIGBUS)) {
1329    status = handle_protection_violation(xp, info, tcr, old_valence);
1330  } else if (xnum == SIGFPE) {
1331    status = handle_sigfpe(xp, tcr);
1332  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1333    if (instruction == GC_TRAP_INSTRUCTION) {
1334      status = handle_gc_trap(xp, tcr);
1335    } else if (IS_UUO(instruction)) {
1336      status = handle_uuo(xp, instruction, program_counter);
1337    } else if (is_conditional_trap(instruction)) {
1338      status = handle_trap(xp, instruction, program_counter, info);
1339    } else {
1340      status = handle_unimplemented_instruction(xp,instruction,tcr);
1341    }
1342  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1343    tcr->interrupt_pending = 0;
1344    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1345    status = 0;
1346  }
1347
1348  return status;
1349}
1350
1351void
1352adjust_exception_pc(ExceptionInformation *xp, int delta)
1353{
1354  xpPC(xp) += (delta >> 2);
1355}
1356
1357
1358/*
1359  This wants to scan backwards until "where" points to an instruction
1360   whose major opcode is either 63 (double-float) or 59 (single-float)
1361*/
1362
1363OSStatus
1364handle_fpux_binop(ExceptionInformation *xp, pc where)
1365{
1366  OSStatus err;
1367  opcode *there = (opcode *) where, instr, errnum = 0;
1368  int i = TRAP_LOOKUP_TRIES, delta = 0;
1369 
1370  while (i--) {
1371    instr = *--there;
1372    delta -= 4;
1373    if (codevec_hdr_p(instr)) {
1374      return -1;
1375    }
1376    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1377      errnum = error_FPU_exception_double;
1378      break;
1379    }
1380
1381    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1382      errnum = error_FPU_exception_short;
1383      break;
1384    }
1385  }
1386 
1387  err = handle_error(xp, errnum, rcontext, 0,  there);
1388  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1389 
1390  adjust_exception_pc(xp, delta);
1391  xpFPSCR(xp)  &=  0x03fff;
1392 
1393  return err;
1394
1395}
1396
1397OSStatus
1398handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1399{
1400#ifdef SUPPORT_PRAGMA_UNUSED
1401#pragma unused(where)
1402#endif
1403  unsigned 
1404    minor = UUO_MINOR(the_uuo),
1405    rb = 0x1f & (the_uuo >> 11),
1406    errnum = 0x3ff & (the_uuo >> 16);
1407
1408  OSStatus status = -1;
1409
1410  int bump = 4;
1411
1412  switch (minor) {
1413
1414  case UUO_ZERO_FPSCR:
1415    status = 0;
1416    xpFPSCR(xp) = 0;
1417    break;
1418
1419
1420  case UUO_INTERR:
1421    {
1422      TCR * target = (TCR *)xpGPR(xp,arg_z);
1423      status = 0;
1424      switch (errnum) {
1425      case error_propagate_suspend:
1426        break;
1427      case error_interrupt:
1428        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1429        break;
1430      case error_suspend:
1431        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1432        break;
1433      case error_suspend_all:
1434        lisp_suspend_other_threads();
1435        break;
1436      case error_resume:
1437        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1438        break;
1439      case error_resume_all:
1440        lisp_resume_other_threads();
1441        break;
1442      case error_kill:
1443        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1444        break;
1445      case error_allocate_list:
1446        allocate_list(xp,get_tcr(true));
1447        break;
1448      default:
1449        status = handle_error(xp, errnum, rb, 0,  where);
1450        break;
1451      }
1452    }
1453    break;
1454
1455  case UUO_INTCERR:
1456    status = handle_error(xp, errnum, rb, 1,  where);
1457    if (errnum == error_udf_call) {
1458      /* If lisp's returned from a continuable undefined-function call,
1459         it's put a code vector in the xp's PC.  Don't advance the
1460         PC ... */
1461      bump = 0;
1462    }
1463    break;
1464
1465  case UUO_FPUX_BINOP:
1466    status = handle_fpux_binop(xp, where);
1467    bump = 0;
1468    break;
1469
1470  default:
1471    status = -1;
1472    bump = 0;
1473  }
1474 
1475  if ((!status) && bump) {
1476    adjust_exception_pc(xp, bump);
1477  }
1478  return status;
1479}
1480
1481natural
1482register_codevector_contains_pc (natural lisp_function, pc where)
1483{
1484  natural code_vector, size;
1485
1486  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1487      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1488    code_vector = deref(lisp_function, 1);
1489    size = header_element_count(header_of(code_vector)) << 2;
1490    if ((untag(code_vector) < (natural)where) && 
1491        ((natural)where < (code_vector + size)))
1492      return(code_vector);
1493  }
1494
1495  return(0);
1496}
1497
1498/* Callback to lisp to handle a trap. Need to translate the
1499   PC (where) into one of two forms of pairs:
1500
1501   1. If PC is in fn or nfn's code vector, use the register number
1502      of fn or nfn and the index into that function's code vector.
1503   2. Otherwise use 0 and the pc itself
1504*/
1505void
1506callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1507                   natural arg1, natural arg2, natural arg3)
1508{
1509  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1510  unsigned register_number = fn;
1511  natural index = (natural)where;
1512
1513  if (code_vector == 0) {
1514    register_number = nfn;
1515    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1516  }
1517  if (code_vector == 0)
1518    register_number = 0;
1519  else
1520    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1521  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1522}
1523
1524void
1525callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1526                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1527{
1528  natural  callback_ptr;
1529  area *a;
1530
1531  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1532
1533  /* Put the active stack pointer where .SPcallback expects it */
1534  a = tcr->cs_area;
1535  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1536
1537  /* Copy globals from the exception frame to tcr */
1538  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1539  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1540  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1541  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1542
1543#ifdef DARWIN
1544  enable_fp_exceptions();
1545#endif
1546
1547
1548  /* Call back.
1549     Lisp will handle trampolining through some code that
1550     will push lr/fn & pc/nfn stack frames for backtrace.
1551  */
1552  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1553#ifdef DEBUG
1554  fprintf(dbgout, "0x%x releasing exception lock for callback\n", tcr);
1555#endif
1556  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1557  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1558  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1559#ifdef DEBUG
1560  fprintf(dbgout, "0x%x acquired exception lock after callback\n", tcr);
1561#endif
1562
1563
1564
1565  /* Copy GC registers back into exception frame */
1566  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1567  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1568}
1569
1570area *
1571allocate_no_stack (natural size)
1572{
1573#ifdef SUPPORT_PRAGMA_UNUSED
1574#pragma unused(size)
1575#endif
1576
1577  return (area *) NULL;
1578}
1579
1580
1581
1582
1583
1584
1585/* callback to (symbol-value cmain) if it is a macptr,
1586   otherwise report cause and function name to console.
1587   Returns noErr if exception handled OK */
1588OSStatus
1589handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1590{
1591  LispObj   cmain = nrs_CMAIN.vcell;
1592  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1593
1594  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1595     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1596
1597  /* If it's a "trllt" instruction where RA == sp, it's a failed
1598     control stack overflow check.  In that case:
1599     
1600     a) We're in "yellow zone" mode if the value of the
1601     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1602     we're not already in yellow zone mode, attempt to create a new
1603     thread and continue execution on its stack. If that fails, call
1604     signal_stack_soft_overflow to enter yellow zone mode and signal
1605     the condition to lisp.
1606     
1607     b) If we're already in "yellow zone" mode, then:
1608     
1609     1) if the SP is past the current control-stack area's hard
1610     overflow limit, signal a "hard" stack overflow error (e.g., throw
1611     to toplevel as quickly as possible. If we aren't in "yellow zone"
1612     mode, attempt to continue on another thread first.
1613     
1614     2) if SP is "well" (> 4K) below its soft overflow limit, set
1615     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1616     "yellow zone mode" in this case.
1617     
1618     3) Otherwise, do nothing.  We'll continue to trap every time
1619     something gets pushed on the control stack, so we should try to
1620     detect and handle all of these cases fairly quickly.  Of course,
1621     the trap overhead is going to slow things down quite a bit.
1622     */
1623
1624  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1625      (RA_field(the_trap) == sp) &&
1626      (TO_field(the_trap) == TO_LO)) {
1627    area
1628      *CS_area = tcr->cs_area,
1629      *VS_area = tcr->vs_area;
1630     
1631    natural
1632      current_SP = xpGPR(xp,sp),
1633      current_VSP = xpGPR(xp,vsp);
1634
1635    if (current_SP  < (natural) (CS_area->hardlimit)) {
1636      /* If we're not in soft overflow mode yet, assume that the
1637         user has set the soft overflow size very small and try to
1638         continue on another thread before throwing to toplevel */
1639      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1640        reset_lisp_process(xp);
1641      }
1642    } else {
1643      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1644        /* If the control stack pointer is at least 4K away from its soft limit
1645           and the value stack pointer is at least 4K away from its soft limit,
1646           stop trapping.  Else keep trapping. */
1647        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1648            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1649          protected_area_ptr vs_soft = VS_area->softprot;
1650          if (vs_soft->nprot == 0) {
1651            protect_area(vs_soft);
1652          }
1653          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1654        }
1655      } else {
1656        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1657        signal_stack_soft_overflow(xp, sp);
1658      }
1659    }
1660   
1661    adjust_exception_pc(xp, 4);
1662    return noErr;
1663  } else {
1664    if (the_trap == LISP_BREAK_INSTRUCTION) {
1665      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1666      set_xpPC(xp, xpLR(xp));
1667      if (message == NULL) {
1668        message = "Lisp Breakpoint";
1669      }
1670      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1671      return noErr;
1672    }
1673    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1674      adjust_exception_pc(xp,4);
1675      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1676      return noErr;
1677    }
1678    /*
1679      twlle ra,rb is used to detect tlb overflow, where RA = current
1680      limit and RB = index to use.
1681    */
1682    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1683        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1684      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1685        return noErr;
1686      }
1687      return -1;
1688    }
1689
1690    if ((fulltag_of(cmain) == fulltag_misc) &&
1691        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1692      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1693        /* reset interrup_level, interrupt_pending */
1694        TCR_INTERRUPT_LEVEL(tcr) = 0;
1695        tcr->interrupt_pending = 0;
1696      }
1697#if 0
1698      fprintf(dbgout, "About to do trap callback in 0x%x\n",tcr);
1699#endif
1700      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1701      adjust_exception_pc(xp, 4);
1702      return(noErr);
1703    }
1704    return -1;
1705  }
1706}
1707
1708
1709/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1710   Stop if subtag_code_vector is encountered. */
1711unsigned
1712scan_for_instr( unsigned target, unsigned mask, pc where )
1713{
1714  int i = TRAP_LOOKUP_TRIES;
1715
1716  while( i-- ) {
1717    unsigned instr = *(--where);
1718    if ( codevec_hdr_p(instr) ) {
1719      return 0;
1720    } else if ( match_instr(instr, mask, target) ) {
1721      return instr;
1722    }
1723  }
1724  return 0;
1725}
1726
1727
1728void non_fatal_error( char *msg )
1729{
1730  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1731  fflush( dbgout );
1732}
1733
1734/* The main opcode.  */
1735
1736int 
1737is_conditional_trap(opcode instr)
1738{
1739  unsigned to = TO_field(instr);
1740  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1741
1742#ifndef MACOS
1743  if ((instr == LISP_BREAK_INSTRUCTION) ||
1744      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1745    return 1;
1746  }
1747#endif
1748  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1749    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1750       EQ bit must be set in the TO mask and either the register
1751       operands (if "tw") are the same or either both of the signed or
1752       both of the unsigned inequality bits must be set. */
1753    if (! (to & TO_EQ)) {
1754      return 1;                 /* Won't trap on EQ: conditional */
1755    }
1756    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1757      return 0;                 /* Will trap on EQ, same regs: unconditional */
1758    }
1759    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1760        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1761      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1762    }
1763    return 1;                   /* must be conditional */
1764  }
1765  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1766                                   debugger have it */
1767}
1768
1769OSStatus
1770handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1771{
1772  LispObj   errdisp = nrs_ERRDISP.vcell;
1773
1774  if ((fulltag_of(errdisp) == fulltag_misc) &&
1775      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1776    /* errdisp is a macptr, we can call back to lisp */
1777    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1778    return(0);
1779    }
1780
1781  return(-1);
1782}
1783               
1784
1785/*
1786   Current thread has all signals masked.  Before unmasking them,
1787   make it appear that the current thread has been suspended.
1788   (This is to handle the case where another thread is trying
1789   to GC before this thread is able to sieze the exception lock.)
1790*/
1791int
1792prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1793{
1794  int old_valence = tcr->valence;
1795
1796  tcr->pending_exception_context = context;
1797  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1798
1799  ALLOW_EXCEPTIONS(context);
1800  return old_valence;
1801} 
1802
1803void
1804wait_for_exception_lock_in_handler(TCR *tcr, 
1805                                   ExceptionInformation *context,
1806                                   xframe_list *xf)
1807{
1808
1809  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1810#ifdef DEBUG
1811  fprintf(dbgout, "0x%x has exception lock\n", tcr);
1812#endif
1813  xf->curr = context;
1814  xf->prev = tcr->xframe;
1815  tcr->xframe =  xf;
1816  tcr->pending_exception_context = NULL;
1817  tcr->valence = TCR_STATE_FOREIGN; 
1818}
1819
1820void
1821unlock_exception_lock_in_handler(TCR *tcr)
1822{
1823  tcr->pending_exception_context = tcr->xframe->curr;
1824  tcr->xframe = tcr->xframe->prev;
1825  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1826#ifdef DEBUG
1827  fprintf(dbgout, "0x%x releasing exception lock\n", tcr);
1828#endif
1829  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1830}
1831
1832/*
1833   If an interrupt is pending on exception exit, try to ensure
1834   that the thread sees it as soon as it's able to run.
1835*/
1836void
1837raise_pending_interrupt(TCR *tcr)
1838{
1839  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1840    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1841  }
1842}
1843
1844void
1845exit_signal_handler(TCR *tcr, int old_valence)
1846{
1847  sigset_t mask;
1848  sigfillset(&mask);
1849 
1850  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1851  tcr->valence = old_valence;
1852  tcr->pending_exception_context = NULL;
1853}
1854
1855
1856void
1857signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1858{
1859  xframe_list xframe_link;
1860
1861  if (!use_mach_exception_handling) {
1862   
1863    tcr = (TCR *) get_interrupt_tcr(false);
1864 
1865    /* The signal handler's entered with all signals (notably the
1866       thread_suspend signal) blocked.  Don't allow any other signals
1867       (notably the thread_suspend signal) to preempt us until we've
1868       set the TCR's xframe slot to include the current exception
1869       context.
1870    */
1871   
1872    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1873  }
1874
1875  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1876    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1877    pthread_kill(pthread_self(), thread_suspend_signal);
1878  }
1879
1880 
1881  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1882  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1883    char msg[512];
1884    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1885    if (lisp_Debugger(context, info, signum, false, msg)) {
1886      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1887    }
1888  }
1889
1890  unlock_exception_lock_in_handler(tcr);
1891
1892  /* This thread now looks like a thread that was suspended while
1893     executing lisp code.  If some other thread gets the exception
1894     lock and GCs, the context (this thread's suspend_context) will
1895     be updated.  (That's only of concern if it happens before we
1896     can return to the kernel/to the Mach exception handler).
1897  */
1898  if (!use_mach_exception_handling) {
1899    exit_signal_handler(tcr, old_valence);
1900    raise_pending_interrupt(tcr);
1901  }
1902}
1903
1904/*
1905  If it looks like we're in the middle of an atomic operation, make
1906  it seem as if that operation is either complete or hasn't started
1907  yet.
1908
1909  The cases handled include:
1910
1911  a) storing into a newly-allocated lisp frame on the stack.
1912  b) marking a newly-allocated TSP frame as containing "raw" data.
1913  c) consing: the GC has its own ideas about how this should be
1914     handled, but other callers would be best advised to back
1915     up or move forward, according to whether we're in the middle
1916     of allocating a cons cell or allocating a uvector.
1917  d) a STMW to the vsp
1918  e) EGC write-barrier subprims.
1919*/
1920
1921extern opcode
1922  egc_write_barrier_start,
1923  egc_write_barrier_end, 
1924  egc_store_node_conditional, 
1925  egc_store_node_conditional_test,
1926  egc_set_hash_key,
1927  egc_gvset,
1928  egc_rplaca,
1929  egc_rplacd,
1930  egc_set_hash_key_conditional,
1931  egc_set_hash_key_conditional_test;
1932
1933
1934extern opcode ffcall_return_window, ffcall_return_window_end;
1935
1936void
1937pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1938{
1939  pc program_counter = xpPC(xp);
1940  opcode instr = *program_counter;
1941  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1942  LispObj cur_allocptr = xpGPR(xp, allocptr);
1943  int allocptr_tag = fulltag_of(cur_allocptr);
1944 
1945
1946
1947  if ((program_counter < &egc_write_barrier_end) && 
1948      (program_counter >= &egc_write_barrier_start)) {
1949    LispObj *ea = 0, val = 0, root = 0;
1950    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1951    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1952
1953    if (program_counter >= &egc_set_hash_key_conditional) {
1954      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1955          ((program_counter == &egc_set_hash_key_conditional_test) &&
1956           (! (xpCCR(xp) & 0x20000000)))) {
1957        return;
1958      }
1959      need_store = false;
1960      root = xpGPR(xp,arg_x);
1961      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1962      need_memoize_root = true;
1963    } else if (program_counter >= &egc_store_node_conditional) {
1964      if ((program_counter < &egc_store_node_conditional_test) ||
1965          ((program_counter == &egc_store_node_conditional_test) &&
1966           (! (xpCCR(xp) & 0x20000000)))) {
1967        /* The conditional store either hasn't been attempted yet, or
1968           has failed.  No need to adjust the PC, or do memoization. */
1969        return;
1970      }
1971      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1972      xpGPR(xp,arg_z) = t_value;
1973      need_store = false;
1974    } else if (program_counter >= &egc_set_hash_key) {
1975      root = xpGPR(xp,arg_x);
1976      val = xpGPR(xp,arg_z);
1977      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1978      need_memoize_root = true;
1979    } else if (program_counter >= &egc_gvset) {
1980      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1981      val = xpGPR(xp,arg_z);
1982    } else if (program_counter >= &egc_rplacd) {
1983      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1984      val = xpGPR(xp,arg_z);
1985    } else {                      /* egc_rplaca */
1986      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1987      val = xpGPR(xp,arg_z);
1988    }
1989    if (need_store) {
1990      *ea = val;
1991    }
1992    if (need_check_memo) {
1993      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1994      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1995          ((LispObj)ea < val)) {
1996        atomic_set_bit(refbits, bitnumber);
1997        if (need_memoize_root) {
1998          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1999          atomic_set_bit(refbits, bitnumber);
2000        }
2001      }
2002    }
2003    set_xpPC(xp, xpLR(xp));
2004    return;
2005  }
2006
2007
2008  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
2009    LispObj tsp_val = xpGPR(xp,tsp);
2010   
2011    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
2012    adjust_exception_pc(xp, 4);
2013    return;
2014  }
2015 
2016  if (frame->backlink == (frame+1)) {
2017    if (
2018#ifdef PPC64
2019        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
2020        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
2021#else
2022        (major_opcode_p(instr, major_opcode_STW)) && 
2023#endif
2024        (RA_field(instr) == sp) &&
2025        /* There are a few places in the runtime that store into
2026           a previously-allocated frame atop the stack when
2027           throwing values around.  We only care about the case
2028           where the frame was newly allocated, in which case
2029           there must have been a CREATE_LISP_FRAME_INSTRUCTION
2030           a few instructions before the current program counter.
2031           (The whole point here is that a newly allocated frame
2032           might contain random values that we don't want the
2033           GC to see; a previously allocated frame should already
2034           be completely initialized.)
2035        */
2036        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
2037         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
2038         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
2039#ifdef PPC64
2040      int disp = DS_field(instr);
2041#else     
2042      int disp = D_field(instr);
2043#endif
2044
2045
2046      if (disp < (4*node_size)) {
2047#if 0
2048        fprintf(dbgout, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
2049#endif
2050        frame->savevsp = 0;
2051        if (disp < (3*node_size)) {
2052          frame->savelr = 0;
2053          if (disp == node_size) {
2054            frame->savefn = 0;
2055          }
2056        }
2057      }
2058      return;
2059    }
2060  }
2061
2062  if (allocptr_tag != tag_fixnum) {
2063    signed_natural disp = allocptr_displacement(xp);
2064
2065    if (disp) {
2066      /* Being architecturally "at" the alloc trap doesn't tell
2067         us much (in particular, it doesn't tell us whether
2068         or not the thread has committed to taking the trap
2069         and is waiting for the exception lock (or waiting
2070         for the Mach exception thread to tell it how bad
2071         things are) or is about to execute a conditional
2072         trap.
2073         Regardless of which case applies, we want the
2074         other thread to take (or finish taking) the
2075         trap, and we don't want it to consider its
2076         current allocptr to be valid.
2077         The difference between this case (suspend other
2078         thread for GC) and the previous case (suspend
2079         current thread for interrupt) is solely a
2080         matter of what happens after we leave this
2081         function: some non-current thread will stay
2082         suspended until the GC finishes, then take
2083         (or start processing) the alloc trap.   The
2084         current thread will go off and do PROCESS-INTERRUPT
2085         or something, and may return from the interrupt
2086         and need to finish the allocation that got interrupted.
2087      */
2088
2089      if (alloc_disp) {
2090        *alloc_disp = disp;
2091        xpGPR(xp,allocptr) += disp;
2092        /* Leave the PC at the alloc trap.  When the interrupt
2093           handler returns, it'll decrement allocptr by disp
2094           and the trap may or may not be taken.
2095        */
2096      } else {
2097        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2098        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2099        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2100      }
2101    } else {
2102#ifdef DEBUG
2103      fprintf(dbgout, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2104#endif
2105      /* If we're already past the alloc_trap, finish allocating
2106         the object. */
2107      if (allocptr_tag == fulltag_cons) {
2108        finish_allocating_cons(xp);
2109#ifdef DEBUG
2110          fprintf(dbgout, "finish allocating cons in TCR = #x%x\n",
2111                  tcr);
2112#endif
2113      } else {
2114        if (allocptr_tag == fulltag_misc) {
2115#ifdef DEBUG
2116          fprintf(dbgout, "finish allocating uvector in TCR = #x%x\n",
2117                  tcr);
2118#endif
2119          finish_allocating_uvector(xp);
2120        } else {
2121          Bug(xp, "what's being allocated here ?");
2122        }
2123      }
2124      /* Whatever we finished allocating, reset allocptr/allocbase to
2125         VOID_ALLOCPTR */
2126      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2127    }
2128    return;
2129  }
2130
2131  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2132    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2133    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2134#if 0
2135        fprintf(dbgout, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2136#endif
2137
2138    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2139      deref(frame,idx) = 0;
2140    }
2141    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2142    return;
2143  }
2144
2145#ifndef PC64
2146  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2147      (RA_field(instr) == vsp)) {
2148    int r;
2149    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2150   
2151    for (r = RS_field(instr); r <= 31; r++) {
2152      *vspptr++ = xpGPR(xp,r);
2153    }
2154    adjust_exception_pc(xp, 4);
2155  }
2156#endif
2157}
2158
2159void
2160interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2161{
2162  TCR *tcr = get_interrupt_tcr(false);
2163  if (tcr) {
2164    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2165      tcr->interrupt_pending = 1 << fixnumshift;
2166    } else {
2167      LispObj cmain = nrs_CMAIN.vcell;
2168
2169      if ((fulltag_of(cmain) == fulltag_misc) &&
2170          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2171        /*
2172           This thread can (allegedly) take an interrupt now.
2173           It's tricky to do that if we're executing
2174           foreign code (especially Linuxthreads code, much
2175           of which isn't reentrant.)
2176           If we're unwinding the stack, we also want to defer
2177           the interrupt.
2178        */
2179        if ((tcr->valence != TCR_STATE_LISP) ||
2180            (tcr->unwinding != 0)) {
2181          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2182        } else {
2183          xframe_list xframe_link;
2184          int old_valence;
2185          signed_natural disp=0;
2186         
2187          pc_luser_xp(context, tcr, &disp);
2188          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2189          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2190#ifdef DEBUG
2191          fprintf(dbgout, "[0x%x acquired exception lock for interrupt]\n",tcr);
2192#endif
2193          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2194          if (disp) {
2195            xpGPR(context,allocptr) -= disp;
2196          }
2197          unlock_exception_lock_in_handler(tcr);
2198#ifdef DEBUG
2199          fprintf(dbgout, "[0x%x released exception lock for interrupt]\n",tcr);
2200#endif
2201          exit_signal_handler(tcr, old_valence);
2202        }
2203      }
2204    }
2205  }
2206#ifdef DARWIN
2207    DarwinSigReturn(context);
2208#endif
2209}
2210
2211
2212
2213void
2214install_signal_handler(int signo, void *handler, unsigned flags)
2215{
2216  struct sigaction sa;
2217  int err;
2218 
2219  sa.sa_sigaction = (void *)handler;
2220  sigfillset(&sa.sa_mask);
2221  sa.sa_flags = SA_SIGINFO;
2222
2223  if (flags & RESTART_SYSCALLS)
2224    sa.sa_flags |= SA_RESTART;
2225  if (flags & RESERVE_FOR_LISP) {
2226    extern sigset_t user_signals_reserved;
2227    sigaddset(&user_signals_reserved, signo);
2228  }
2229
2230  err = sigaction(signo, &sa, NULL);
2231  if (err) {
2232    perror("sigaction");
2233    exit(1);
2234  }
2235}
2236
2237void
2238install_pmcl_exception_handlers()
2239{
2240#ifdef DARWIN
2241  extern Boolean use_mach_exception_handling;
2242#endif
2243
2244  Boolean install_signal_handlers_for_exceptions =
2245#ifdef DARWIN
2246    !use_mach_exception_handling
2247#else
2248    true
2249#endif
2250    ;
2251  if (install_signal_handlers_for_exceptions) {
2252    extern int no_sigtrap;
2253    install_signal_handler(SIGILL, (void *)signal_handler, RESERVE_FOR_LISP);
2254    if (no_sigtrap != 1) {
2255      install_signal_handler(SIGTRAP, (void *)signal_handler, RESERVE_FOR_LISP);
2256    }
2257    install_signal_handler(SIGBUS,  (void *)signal_handler, RESERVE_FOR_LISP);
2258    install_signal_handler(SIGSEGV, (void *)signal_handler, RESERVE_FOR_LISP);
2259    install_signal_handler(SIGFPE, (void *)signal_handler, RESERVE_FOR_LISP);
2260  }
2261 
2262  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2263                         (void *)interrupt_handler, RESERVE_FOR_LISP);
2264  signal(SIGPIPE, SIG_IGN);
2265}
2266
2267void
2268thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2269{
2270  TCR *tcr = get_tcr(false);
2271  area *a;
2272  sigset_t mask;
2273 
2274  sigemptyset(&mask);
2275
2276  if (tcr) {
2277    tcr->valence = TCR_STATE_FOREIGN;
2278    a = tcr->vs_area;
2279    if (a) {
2280      a->active = a->high;
2281    }
2282    a = tcr->ts_area;
2283    if (a) {
2284      a->active = a->high;
2285    }
2286    a = tcr->cs_area;
2287    if (a) {
2288      a->active = a->high;
2289    }
2290  }
2291 
2292  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2293  pthread_exit(NULL);
2294}
2295
2296void
2297thread_signal_setup()
2298{
2299  thread_suspend_signal = SIG_SUSPEND_THREAD;
2300  thread_kill_signal = SIG_KILL_THREAD;
2301
2302  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
2303                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
2304  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler,
2305                         RESERVE_FOR_LISP);
2306}
2307
2308
2309
2310void
2311unprotect_all_areas()
2312{
2313  protected_area_ptr p;
2314
2315  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2316    unprotect_area(p);
2317  }
2318}
2319
2320/*
2321  A binding subprim has just done "twlle limit_regno,idx_regno" and
2322  the trap's been taken.  Extend the tcr's tlb so that the index will
2323  be in bounds and the new limit will be on a page boundary, filling
2324  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2325  the tcr fields and the registers in the xp and return true if this
2326  all works, false otherwise.
2327
2328  Note that the tlb was allocated via malloc, so realloc can do some
2329  of the hard work.
2330*/
2331Boolean
2332extend_tcr_tlb(TCR *tcr, 
2333               ExceptionInformation *xp, 
2334               unsigned limit_regno,
2335               unsigned idx_regno)
2336{
2337  unsigned
2338    index = (unsigned) (xpGPR(xp,idx_regno)),
2339    old_limit = tcr->tlb_limit,
2340    new_limit = align_to_power_of_2(index+1,12),
2341    new_bytes = new_limit-old_limit;
2342  LispObj
2343    *old_tlb = tcr->tlb_pointer,
2344    *new_tlb = realloc(old_tlb, new_limit),
2345    *work;
2346
2347  if (new_tlb == NULL) {
2348    return false;
2349  }
2350 
2351  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2352
2353  while (new_bytes) {
2354    *work++ = no_thread_local_binding_marker;
2355    new_bytes -= sizeof(LispObj);
2356  }
2357  tcr->tlb_pointer = new_tlb;
2358  tcr->tlb_limit = new_limit;
2359  xpGPR(xp, limit_regno) = new_limit;
2360  return true;
2361}
2362
2363
2364
2365void
2366exception_init()
2367{
2368  install_pmcl_exception_handlers();
2369}
2370
2371
2372
2373
2374
2375#ifdef DARWIN
2376
2377
2378#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2379#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2380
2381
2382
2383#define LISP_EXCEPTIONS_HANDLED_MASK \
2384 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2385
2386/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2387#define NUM_LISP_EXCEPTIONS_HANDLED 4
2388
2389typedef struct {
2390  int foreign_exception_port_count;
2391  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2392  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2393  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2394  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2395} MACH_foreign_exception_state;
2396
2397
2398
2399
2400/*
2401  Mach's exception mechanism works a little better than its signal
2402  mechanism (and, not incidentally, it gets along with GDB a lot
2403  better.
2404
2405  Initially, we install an exception handler to handle each native
2406  thread's exceptions.  This process involves creating a distinguished
2407  thread which listens for kernel exception messages on a set of
2408  0 or more thread exception ports.  As threads are created, they're
2409  added to that port set; a thread's exception port is destroyed
2410  (and therefore removed from the port set) when the thread exits.
2411
2412  A few exceptions can be handled directly in the handler thread;
2413  others require that we resume the user thread (and that the
2414  exception thread resumes listening for exceptions.)  The user
2415  thread might eventually want to return to the original context
2416  (possibly modified somewhat.)
2417
2418  As it turns out, the simplest way to force the faulting user
2419  thread to handle its own exceptions is to do pretty much what
2420  signal() does: the exception handlng thread sets up a sigcontext
2421  on the user thread's stack and forces the user thread to resume
2422  execution as if a signal handler had been called with that
2423  context as an argument.  We can use a distinguished UUO at a
2424  distinguished address to do something like sigreturn(); that'll
2425  have the effect of resuming the user thread's execution in
2426  the (pseudo-) signal context.
2427
2428  Since:
2429    a) we have miles of code in C and in Lisp that knows how to
2430    deal with Linux sigcontexts
2431    b) Linux sigcontexts contain a little more useful information
2432    (the DAR, DSISR, etc.) than their Darwin counterparts
2433    c) we have to create a sigcontext ourselves when calling out
2434    to the user thread: we aren't really generating a signal, just
2435    leveraging existing signal-handling code.
2436
2437  we create a Linux sigcontext struct.
2438
2439  Simple ?  Hopefully from the outside it is ...
2440
2441  We want the process of passing a thread's own context to it to
2442  appear to be atomic: in particular, we don't want the GC to suspend
2443  a thread that's had an exception but has not yet had its user-level
2444  exception handler called, and we don't want the thread's exception
2445  context to be modified by a GC while the Mach handler thread is
2446  copying it around.  On Linux (and on Jaguar), we avoid this issue
2447  because (a) the kernel sets up the user-level signal handler and
2448  (b) the signal handler blocks signals (including the signal used
2449  by the GC to suspend threads) until tcr->xframe is set up.
2450
2451  The GC and the Mach server thread therefore contend for the lock
2452  "mach_exception_lock".  The Mach server thread holds the lock
2453  when copying exception information between the kernel and the
2454  user thread; the GC holds this lock during most of its execution
2455  (delaying exception processing until it can be done without
2456  GC interference.)
2457
2458*/
2459
2460#ifdef PPC64
2461#define C_REDZONE_LEN           320
2462#define C_STK_ALIGN             32
2463#else
2464#define C_REDZONE_LEN           224
2465#define C_STK_ALIGN             16
2466#endif
2467#define C_PARAMSAVE_LEN         64
2468#define C_LINKAGE_LEN           48
2469
2470#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2471
2472void
2473fatal_mach_error(char *format, ...);
2474
2475#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2476
2477
2478void
2479restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2480{
2481  kern_return_t kret;
2482  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2483
2484  /* Set the thread's FP state from the pseudosigcontext */
2485  kret = thread_set_state(thread,
2486                          PPC_FLOAT_STATE,
2487                          (thread_state_t)&(mc->__fs),
2488                          PPC_FLOAT_STATE_COUNT);
2489
2490  MACH_CHECK_ERROR("setting thread FP state", kret);
2491
2492  /* The thread'll be as good as new ... */
2493#ifdef PPC64
2494  kret = thread_set_state(thread,
2495                          PPC_THREAD_STATE64,
2496                          (thread_state_t)&(mc->__ss),
2497                          PPC_THREAD_STATE64_COUNT);
2498#else
2499  kret = thread_set_state(thread, 
2500                          MACHINE_THREAD_STATE,
2501                          (thread_state_t)&(mc->__ss),
2502                          MACHINE_THREAD_STATE_COUNT);
2503#endif
2504  MACH_CHECK_ERROR("setting thread state", kret);
2505} 
2506
2507/* This code runs in the exception handling thread, in response
2508   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2509   in response to a call to pseudo_sigreturn() from the specified
2510   user thread.
2511   Find that context (the user thread's R3 points to it), then
2512   use that context to set the user thread's state.  When this
2513   function's caller returns, the Mach kernel will resume the
2514   user thread.
2515*/
2516
2517kern_return_t
2518do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2519{
2520  ExceptionInformation *xp;
2521
2522#ifdef DEBUG_MACH_EXCEPTIONS
2523  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2524#endif
2525  xp = tcr->pending_exception_context;
2526  if (xp) {
2527    tcr->pending_exception_context = NULL;
2528    tcr->valence = TCR_STATE_LISP;
2529    restore_mach_thread_state(thread, xp);
2530    raise_pending_interrupt(tcr);
2531  } else {
2532    Bug(NULL, "no xp here!\n");
2533  }
2534#ifdef DEBUG_MACH_EXCEPTIONS
2535  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2536#endif
2537  return KERN_SUCCESS;
2538} 
2539
2540ExceptionInformation *
2541create_thread_context_frame(mach_port_t thread, 
2542                            natural *new_stack_top)
2543{
2544#ifdef PPC64
2545  ppc_thread_state64_t ts;
2546#else
2547  ppc_thread_state_t ts;
2548#endif
2549  mach_msg_type_number_t thread_state_count;
2550  kern_return_t result;
2551  ExceptionInformation *pseudosigcontext;
2552  MCONTEXT_T mc;
2553  natural stackp, backlink;
2554
2555#ifdef PPC64
2556  thread_state_count = PPC_THREAD_STATE64_COUNT;
2557  result = thread_get_state(thread,
2558                            PPC_THREAD_STATE64,
2559                            (thread_state_t)&ts,
2560                            &thread_state_count);
2561#else
2562  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2563  result = thread_get_state(thread, 
2564                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2565                            (thread_state_t)&ts,
2566                            &thread_state_count);
2567#endif
2568 
2569  if (result != KERN_SUCCESS) {
2570    get_tcr(true);
2571    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2572  }
2573  stackp = ts.__r1;
2574  backlink = stackp;
2575  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2576  stackp -= sizeof(*pseudosigcontext);
2577  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2578
2579  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2580  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2581  memmove(&(mc->__ss),&ts,sizeof(ts));
2582
2583  thread_state_count = PPC_FLOAT_STATE_COUNT;
2584  thread_get_state(thread,
2585                   PPC_FLOAT_STATE,
2586                   (thread_state_t)&(mc->__fs),
2587                   &thread_state_count);
2588
2589
2590#ifdef PPC64
2591  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2592#else
2593  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2594#endif
2595  thread_get_state(thread,
2596#ifdef PPC64
2597                   PPC_EXCEPTION_STATE64,
2598#else
2599                   PPC_EXCEPTION_STATE,
2600#endif
2601                   (thread_state_t)&(mc->__es),
2602                   &thread_state_count);
2603
2604
2605  UC_MCONTEXT(pseudosigcontext) = mc;
2606  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2607  stackp -= C_LINKAGE_LEN;
2608  *(natural *)ptr_from_lispobj(stackp) = backlink;
2609  if (new_stack_top) {
2610    *new_stack_top = stackp;
2611  }
2612  return pseudosigcontext;
2613}
2614
2615/*
2616  This code sets up the user thread so that it executes a "pseudo-signal
2617  handler" function when it resumes.  Create a linux sigcontext struct
2618  on the thread's stack and pass it as an argument to the pseudo-signal
2619  handler.
2620
2621  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2622  which will restore the thread's context.
2623
2624  If the handler invokes code that throws (or otherwise never sigreturn()'s
2625  to the context), that's fine.
2626
2627  Actually, check that: throw (and variants) may need to be careful and
2628  pop the tcr's xframe list until it's younger than any frame being
2629  entered.
2630*/
2631
2632int
2633setup_signal_frame(mach_port_t thread,
2634                   void *handler_address,
2635                   int signum,
2636                   int code,
2637                   TCR *tcr)
2638{
2639#ifdef PPC64
2640  ppc_thread_state64_t ts;
2641#else
2642  ppc_thread_state_t ts;
2643#endif
2644  ExceptionInformation *pseudosigcontext;
2645  int old_valence = tcr->valence;
2646  natural stackp;
2647
2648#ifdef DEBUG_MACH_EXCEPTIONS
2649  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2650#endif
2651  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2652  pseudosigcontext->uc_onstack = 0;
2653  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2654  tcr->pending_exception_context = pseudosigcontext;
2655  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2656 
2657
2658  /*
2659     It seems like we've created a  sigcontext on the thread's
2660     stack.  Set things up so that we call the handler (with appropriate
2661     args) when the thread's resumed.
2662  */
2663
2664  ts.__srr0 = (natural) handler_address;
2665  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2666  ts.__r1 = stackp;
2667  ts.__r3 = signum;
2668  ts.__r4 = (natural)pseudosigcontext;
2669  ts.__r5 = (natural)tcr;
2670  ts.__r6 = (natural)old_valence;
2671  ts.__lr = (natural)pseudo_sigreturn;
2672
2673
2674#ifdef PPC64
2675  ts.__r13 = xpGPR(pseudosigcontext,13);
2676  thread_set_state(thread,
2677                   PPC_THREAD_STATE64,
2678                   (thread_state_t)&ts,
2679                   PPC_THREAD_STATE64_COUNT);
2680#else
2681  thread_set_state(thread, 
2682                   MACHINE_THREAD_STATE,
2683                   (thread_state_t)&ts,
2684                   MACHINE_THREAD_STATE_COUNT);
2685#endif
2686#ifdef DEBUG_MACH_EXCEPTIONS
2687  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2688#endif
2689  return 0;
2690}
2691
2692
2693void
2694pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2695{
2696  signal_handler(signum, NULL, context, tcr, old_valence);
2697} 
2698
2699
2700int
2701thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2702{
2703#ifdef PPC64
2704  ppc_thread_state64_t ts;
2705#else
2706  ppc_thread_state_t ts;
2707#endif
2708  mach_msg_type_number_t thread_state_count;
2709
2710#ifdef PPC64
2711  thread_state_count = PPC_THREAD_STATE64_COUNT;
2712#else
2713  thread_state_count = PPC_THREAD_STATE_COUNT;
2714#endif
2715  thread_get_state(thread, 
2716#ifdef PPC64
2717                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2718#else
2719                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2720#endif
2721                   (thread_state_t)&ts,
2722                   &thread_state_count);
2723  if (enabled) {
2724    ts.__srr1 |= MSR_FE0_FE1_MASK;
2725  } else {
2726    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2727  }
2728 
2729  ts.__srr0 += 4;
2730  thread_set_state(thread, 
2731#ifdef PPC64
2732                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2733#else
2734                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2735#endif
2736                   (thread_state_t)&ts,
2737#ifdef PPC64
2738                   PPC_THREAD_STATE64_COUNT
2739#else
2740                   PPC_THREAD_STATE_COUNT
2741#endif
2742                   );
2743
2744  return 0;
2745}
2746
2747/*
2748  This function runs in the exception handling thread.  It's
2749  called (by this precise name) from the library function "exc_server()"
2750  when the thread's exception ports are set up.  (exc_server() is called
2751  via mach_msg_server(), which is a function that waits for and dispatches
2752  on exception messages from the Mach kernel.)
2753
2754  This checks to see if the exception was caused by a pseudo_sigreturn()
2755  UUO; if so, it arranges for the thread to have its state restored
2756  from the specified context.
2757
2758  Otherwise, it tries to map the exception to a signal number and
2759  arranges that the thread run a "pseudo signal handler" to handle
2760  the exception.
2761
2762  Some exceptions could and should be handled here directly.
2763*/
2764
2765kern_return_t
2766catch_exception_raise(mach_port_t exception_port,
2767                      mach_port_t thread,
2768                      mach_port_t task, 
2769                      exception_type_t exception,
2770                      exception_data_t code_vector,
2771                      mach_msg_type_number_t code_count)
2772{
2773  int signum = 0, code = *code_vector, code1;
2774  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2775  kern_return_t kret;
2776
2777#ifdef DEBUG_MACH_EXCEPTIONS
2778  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2779#endif
2780
2781  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2782    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2783  } 
2784  if ((exception == EXC_BAD_INSTRUCTION) &&
2785      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2786      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2787       (code1 == (int)enable_fp_exceptions) ||
2788       (code1 == (int)disable_fp_exceptions))) {
2789    if (code1 == (int)pseudo_sigreturn) {
2790      kret = do_pseudo_sigreturn(thread, tcr);
2791#if 0
2792      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2793#endif
2794       
2795    } else if (code1 == (int)enable_fp_exceptions) {
2796      kret = thread_set_fp_exceptions_enabled(thread, true);
2797    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2798  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2799    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2800    kret = 17;
2801  } else {
2802    switch (exception) {
2803    case EXC_BAD_ACCESS:
2804      signum = SIGSEGV;
2805      break;
2806       
2807    case EXC_BAD_INSTRUCTION:
2808      signum = SIGILL;
2809      break;
2810     
2811    case EXC_SOFTWARE:
2812      if (code == EXC_PPC_TRAP) {
2813        signum = SIGTRAP;
2814      }
2815      break;
2816     
2817    case EXC_ARITHMETIC:
2818      signum = SIGFPE;
2819      break;
2820
2821    default:
2822      break;
2823    }
2824    if (signum) {
2825      kret = setup_signal_frame(thread,
2826                                (void *)pseudo_signal_handler,
2827                                signum,
2828                                code,
2829                                tcr);
2830#if 0
2831      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2832#endif
2833
2834    } else {
2835      kret = 17;
2836    }
2837  }
2838
2839  return kret;
2840}
2841
2842
2843
2844typedef struct {
2845  mach_msg_header_t Head;
2846  /* start of the kernel processed data */
2847  mach_msg_body_t msgh_body;
2848  mach_msg_port_descriptor_t thread;
2849  mach_msg_port_descriptor_t task;
2850  /* end of the kernel processed data */
2851  NDR_record_t NDR;
2852  exception_type_t exception;
2853  mach_msg_type_number_t codeCnt;
2854  integer_t code[2];
2855  mach_msg_trailer_t trailer;
2856} exceptionRequest;
2857
2858
2859boolean_t
2860openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2861{
2862  static NDR_record_t _NDR = {0};
2863  kern_return_t handled;
2864  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2865  exceptionRequest *req = (exceptionRequest *) in;
2866
2867  reply->NDR = _NDR;
2868
2869  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2870  out->msgh_remote_port = in->msgh_remote_port;
2871  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2872  out->msgh_local_port = MACH_PORT_NULL;
2873  out->msgh_id = in->msgh_id+100;
2874
2875  /* Could handle other exception flavors in the range 2401-2403 */
2876
2877
2878  if (in->msgh_id != 2401) {
2879    reply->RetCode = MIG_BAD_ID;
2880    return FALSE;
2881  }
2882  handled = catch_exception_raise(req->Head.msgh_local_port,
2883                                  req->thread.name,
2884                                  req->task.name,
2885                                  req->exception,
2886                                  req->code,
2887                                  req->codeCnt);
2888  reply->RetCode = handled;
2889  return TRUE;
2890}
2891
2892/*
2893  The initial function for an exception-handling thread.
2894*/
2895
2896void *
2897exception_handler_proc(void *arg)
2898{
2899  extern boolean_t exc_server();
2900  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2901
2902  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2903  /* Should never return. */
2904  abort();
2905}
2906
2907
2908
2909mach_port_t
2910mach_exception_port_set()
2911{
2912  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2913  kern_return_t kret; 
2914  if (__exception_port_set == MACH_PORT_NULL) {
2915    kret = mach_port_allocate(mach_task_self(),
2916                              MACH_PORT_RIGHT_PORT_SET,
2917                              &__exception_port_set);
2918    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2919    create_system_thread(0,
2920                         NULL,
2921                         exception_handler_proc, 
2922                         (void *)((natural)__exception_port_set));
2923  }
2924  return __exception_port_set;
2925}
2926
2927/*
2928  Setup a new thread to handle those exceptions specified by
2929  the mask "which".  This involves creating a special Mach
2930  message port, telling the Mach kernel to send exception
2931  messages for the calling thread to that port, and setting
2932  up a handler thread which listens for and responds to
2933  those messages.
2934
2935*/
2936
2937/*
2938  Establish the lisp thread's TCR as its exception port, and determine
2939  whether any other ports have been established by foreign code for
2940  exceptions that lisp cares about.
2941
2942  If this happens at all, it should happen on return from foreign
2943  code and on entry to lisp code via a callback.
2944
2945  This is a lot of trouble (and overhead) to support Java, or other
2946  embeddable systems that clobber their caller's thread exception ports.
2947 
2948*/
2949kern_return_t
2950tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2951{
2952  kern_return_t kret;
2953  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2954  int i;
2955  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2956  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2957  exception_mask_t mask = 0;
2958
2959  kret = thread_swap_exception_ports(thread,
2960                                     LISP_EXCEPTIONS_HANDLED_MASK,
2961                                     lisp_port,
2962                                     EXCEPTION_DEFAULT,
2963                                     THREAD_STATE_NONE,
2964                                     fxs->masks,
2965                                     &n,
2966                                     fxs->ports,
2967                                     fxs->behaviors,
2968                                     fxs->flavors);
2969  if (kret == KERN_SUCCESS) {
2970    fxs->foreign_exception_port_count = n;
2971    for (i = 0; i < n; i ++) {
2972      foreign_port = fxs->ports[i];
2973
2974      if ((foreign_port != lisp_port) &&
2975          (foreign_port != MACH_PORT_NULL)) {
2976        mask |= fxs->masks[i];
2977      }
2978    }
2979    tcr->foreign_exception_status = (int) mask;
2980  }
2981  return kret;
2982}
2983
2984kern_return_t
2985tcr_establish_lisp_exception_port(TCR *tcr)
2986{
2987  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2988}
2989
2990/*
2991  Do this when calling out to or returning from foreign code, if
2992  any conflicting foreign exception ports were established when we
2993  last entered lisp code.
2994*/
2995kern_return_t
2996restore_foreign_exception_ports(TCR *tcr)
2997{
2998  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2999 
3000  if (m) {
3001    MACH_foreign_exception_state *fxs  = 
3002      (MACH_foreign_exception_state *) tcr->native_thread_info;
3003    int i, n = fxs->foreign_exception_port_count;
3004    exception_mask_t tm;
3005
3006    for (i = 0; i < n; i++) {
3007      if ((tm = fxs->masks[i]) & m) {
3008        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3009                                   tm,
3010                                   fxs->ports[i],
3011                                   fxs->behaviors[i],
3012                                   fxs->flavors[i]);
3013      }
3014    }
3015  }
3016}
3017                                   
3018
3019/*
3020  This assumes that a Mach port (to be used as the thread's exception port) whose
3021  "name" matches the TCR's 32-bit address has already been allocated.
3022*/
3023
3024kern_return_t
3025setup_mach_exception_handling(TCR *tcr)
3026{
3027  mach_port_t
3028    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3029    task_self = mach_task_self();
3030  kern_return_t kret;
3031
3032  kret = mach_port_insert_right(task_self,
3033                                thread_exception_port,
3034                                thread_exception_port,
3035                                MACH_MSG_TYPE_MAKE_SEND);
3036  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3037
3038  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3039  if (kret == KERN_SUCCESS) {
3040    mach_port_t exception_port_set = mach_exception_port_set();
3041
3042    kret = mach_port_move_member(task_self,
3043                                 thread_exception_port,
3044                                 exception_port_set);
3045  }
3046  return kret;
3047}
3048
3049void
3050darwin_exception_init(TCR *tcr)
3051{
3052  void tcr_monitor_exception_handling(TCR*, Boolean);
3053  kern_return_t kret;
3054  MACH_foreign_exception_state *fxs = 
3055    calloc(1, sizeof(MACH_foreign_exception_state));
3056 
3057  tcr->native_thread_info = (void *) fxs;
3058
3059  if ((kret = setup_mach_exception_handling(tcr))
3060      != KERN_SUCCESS) {
3061    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
3062    terminate_lisp();
3063  }
3064}
3065
3066/*
3067  The tcr is the "name" of the corresponding thread's exception port.
3068  Destroying the port should remove it from all port sets of which it's
3069  a member (notably, the exception port set.)
3070*/
3071void
3072darwin_exception_cleanup(TCR *tcr)
3073{
3074  void *fxs = tcr->native_thread_info;
3075  extern Boolean use_mach_exception_handling;
3076
3077  if (fxs) {
3078    tcr->native_thread_info = NULL;
3079    free(fxs);
3080  }
3081  if (use_mach_exception_handling) {
3082    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3083    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3084  }
3085}
3086
3087
3088Boolean
3089suspend_mach_thread(mach_port_t mach_thread)
3090{
3091  kern_return_t status;
3092  Boolean aborted = false;
3093 
3094  do {
3095    aborted = false;
3096    status = thread_suspend(mach_thread);
3097    if (status == KERN_SUCCESS) {
3098      status = thread_abort_safely(mach_thread);
3099      if (status == KERN_SUCCESS) {
3100        aborted = true;
3101      } else {
3102        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
3103        thread_resume(mach_thread);
3104      }
3105    } else {
3106      return false;
3107    }
3108  } while (! aborted);
3109  return true;
3110}
3111
3112/*
3113  Only do this if pthread_kill indicated that the pthread isn't
3114  listening to signals anymore, as can happen as soon as pthread_exit()
3115  is called on Darwin.  The thread could still call out to lisp as it
3116  is exiting, so we need another way to suspend it in this case.
3117*/
3118Boolean
3119mach_suspend_tcr(TCR *tcr)
3120{
3121  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3122  ExceptionInformation *pseudosigcontext;
3123  Boolean result = false;
3124 
3125  result = suspend_mach_thread(mach_thread);
3126  if (result) {
3127    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3128    pseudosigcontext->uc_onstack = 0;
3129    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3130    tcr->suspend_context = pseudosigcontext;
3131  }
3132  return result;
3133}
3134
3135void
3136mach_resume_tcr(TCR *tcr)
3137{
3138  ExceptionInformation *xp;
3139  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3140 
3141  xp = tcr->suspend_context;
3142#ifdef DEBUG_MACH_EXCEPTIONS
3143  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3144          tcr, tcr->pending_exception_context);
3145#endif
3146  tcr->suspend_context = NULL;
3147  restore_mach_thread_state(mach_thread, xp);
3148#ifdef DEBUG_MACH_EXCEPTIONS
3149  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3150          tcr, tcr->pending_exception_context);
3151#endif
3152  thread_resume(mach_thread);
3153}
3154
3155void
3156fatal_mach_error(char *format, ...)
3157{
3158  va_list args;
3159  char s[512];
3160 
3161
3162  va_start(args, format);
3163  vsnprintf(s, sizeof(s),format, args);
3164  va_end(args);
3165
3166  Fatal("Mach error", s);
3167}
3168
3169void
3170pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3171{
3172  interrupt_handler(signum, NULL, context);
3173}
3174
3175int
3176mach_raise_thread_interrupt(TCR *target)
3177{
3178  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3179  kern_return_t kret;
3180  Boolean result = false;
3181  TCR *current = get_tcr(false);
3182  thread_basic_info_data_t info; 
3183  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3184
3185  LOCK(lisp_global(TCR_AREA_LOCK), current);
3186
3187  if (suspend_mach_thread(mach_thread)) {
3188    if (thread_info(mach_thread,
3189                    THREAD_BASIC_INFO,
3190                    (thread_info_t)&info,
3191                    &info_count) == KERN_SUCCESS) {
3192      if (info.suspend_count == 1) {
3193        if ((target->valence == TCR_STATE_LISP) &&
3194            (!target->unwinding) &&
3195            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3196          kret = setup_signal_frame(mach_thread,
3197                                    (void *)pseudo_interrupt_handler,
3198                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3199                                    0,
3200                                    target);
3201          if (kret == KERN_SUCCESS) {
3202            result = true;
3203          }
3204        }
3205      }
3206    }
3207    if (! result) {
3208      target->interrupt_pending = 1 << fixnumshift;
3209    }
3210    thread_resume(mach_thread);
3211   
3212  }
3213  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3214  return 0;
3215}
3216
3217#endif
Note: See TracBrowser for help on using the repository browser.