source: trunk/source/lisp-kernel/ppc-exceptions.c @ 14261

Last change on this file since 14261 was 14197, checked in by rme, 9 years ago

Rename Threads.h to threads.h (with no capital letter).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 89.5 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <sys/mman.h>
31#include <fpu_control.h>
32#include <linux/prctl.h>
33#endif
34
35#ifdef DARWIN
36#include <sys/mman.h>
37#define _FPU_RESERVED 0xffffff00
38#ifndef SA_NODEFER
39#define SA_NODEFER 0
40#endif
41#include <sysexits.h>
42
43/* a distinguished UUO at a distinguished address */
44extern void pseudo_sigreturn(ExceptionInformation *);
45#endif
46
47
48#include "threads.h"
49
50#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
51#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
52#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
53extern void enable_fp_exceptions(void);
54extern void disable_fp_exceptions(void);
55
56#ifdef LINUX
57/* Some relatively recent kernels support this interface.
58   If this prctl isn't supported, assume that we're always
59   running with excptions enabled and "precise".
60*/
61#ifndef PR_SET_FPEXC
62#define PR_SET_FPEXC 12
63#endif
64#ifndef PR_FP_EXC_DISABLED
65#define PR_FP_EXC_DISABLED 0
66#endif
67#ifndef PR_FP_EXC_PRECISE
68#define PR_FP_EXC_PRECISE 3
69#endif
70
71void
72enable_fp_exceptions()
73{
74  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
75}
76
77void
78disable_fp_exceptions()
79{
80  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
81}
82
83#endif
84
85/*
86  Handle exceptions.
87
88*/
89
90extern LispObj lisp_nil;
91
92extern natural lisp_heap_gc_threshold;
93extern Boolean grow_dynamic_area(natural);
94
95
96
97
98
99
100int
101page_size = 4096;
102
103int
104log2_page_size = 12;
105
106
107
108
109
110/*
111  If the PC is pointing to an allocation trap, the previous instruction
112  must have decremented allocptr.  Return the non-zero amount by which
113  allocptr was decremented.
114*/
115signed_natural
116allocptr_displacement(ExceptionInformation *xp)
117{
118  pc program_counter = xpPC(xp);
119  opcode instr = *program_counter, prev_instr = *(program_counter-1);
120
121  if (instr == ALLOC_TRAP_INSTRUCTION) {
122    if (match_instr(prev_instr, 
123                    XO_MASK | RT_MASK | RB_MASK,
124                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
125                    RT(allocptr) |
126                    RB(allocptr))) {
127      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
128    }
129    if (match_instr(prev_instr,
130                    OP_MASK | RT_MASK | RA_MASK,
131                    OP(major_opcode_ADDI) | 
132                    RT(allocptr) |
133                    RA(allocptr))) {
134      return (signed_natural) -((short) prev_instr);
135    }
136    Bug(xp, "Can't determine allocation displacement");
137  }
138  return 0;
139}
140
141
142/*
143  A cons cell's been successfully allocated, but the allocptr's
144  still tagged (as fulltag_cons, of course.)  Emulate any instructions
145  that might follow the allocation (stores to the car or cdr, an
146  assignment to the "result" gpr) that take place while the allocptr's
147  tag is non-zero, advancing over each such instruction.  When we're
148  done, the cons cell will be allocated and initialized, the result
149  register will point to it, the allocptr will be untagged, and
150  the PC will point past the instruction that clears the allocptr's
151  tag.
152*/
153void
154finish_allocating_cons(ExceptionInformation *xp)
155{
156  pc program_counter = xpPC(xp);
157  opcode instr;
158  LispObj cur_allocptr = xpGPR(xp, allocptr);
159  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
160  int target_reg;
161
162  while (1) {
163    instr = *program_counter++;
164
165    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
166      xpGPR(xp, allocptr) = untag(cur_allocptr);
167      xpPC(xp) = program_counter;
168      return;
169    }
170   
171    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
172    case STORE_CAR_ALLOCPTR_INSTRUCTION:
173      c->car = xpGPR(xp,RT_field(instr));
174      break;
175    case STORE_CDR_ALLOCPTR_INSTRUCTION:
176      c->cdr = xpGPR(xp,RT_field(instr));
177      break;
178    default:
179      /* Assume that this is an assignment: {rt/ra} <- allocptr.
180         There are several equivalent instruction forms
181         that might have that effect; just assign to target here.
182      */
183      if (major_opcode_p(instr,major_opcode_X31)) {
184        target_reg = RA_field(instr);
185      } else {
186        target_reg = RT_field(instr);
187      }
188      xpGPR(xp,target_reg) = cur_allocptr;
189      break;
190    }
191  }
192}
193
194/*
195  We were interrupted in the process of allocating a uvector; we
196  survived the allocation trap, and allocptr is tagged as fulltag_misc.
197  Emulate any instructions which store a header into the uvector,
198  assign the value of allocptr to some other register, and clear
199  allocptr's tag.  Don't expect/allow any other instructions in
200  this environment.
201*/
202void
203finish_allocating_uvector(ExceptionInformation *xp)
204{
205  pc program_counter = xpPC(xp);
206  opcode instr;
207  LispObj cur_allocptr = xpGPR(xp, allocptr);
208  int target_reg;
209
210  while (1) {
211    instr = *program_counter++;
212    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
213      xpGPR(xp, allocptr) = untag(cur_allocptr);
214      xpPC(xp) = program_counter;
215      return;
216    }
217    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
218        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
219      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
220    } else {
221      /* assume that this is an assignment */
222
223      if (major_opcode_p(instr,major_opcode_X31)) {
224        target_reg = RA_field(instr);
225      } else {
226        target_reg = RT_field(instr);
227      }
228      xpGPR(xp,target_reg) = cur_allocptr;
229    }
230  }
231}
232
233
234Boolean
235allocate_object(ExceptionInformation *xp,
236                natural bytes_needed, 
237                signed_natural disp_from_allocptr,
238                TCR *tcr)
239{
240  area *a = active_dynamic_area;
241
242  /* Maybe do an EGC */
243  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
244    if (((a->active)-(a->low)) >= a->threshold) {
245      gc_from_xp(xp, 0L);
246    }
247  }
248
249  /* Life is pretty simple if we can simply grab a segment
250     without extending the heap.
251  */
252  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
253    xpGPR(xp, allocptr) += disp_from_allocptr;
254#ifdef DEBUG
255    fprintf(dbgout, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
256            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
257#endif
258    return true;
259  }
260 
261  /* It doesn't make sense to try a full GC if the object
262     we're trying to allocate is larger than everything
263     allocated so far.
264  */
265  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
266    untenure_from_area(tenured_area); /* force a full GC */
267    gc_from_xp(xp, 0L);
268  }
269 
270  /* Try again, growing the heap if necessary */
271  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
272    xpGPR(xp, allocptr) += disp_from_allocptr;
273#ifdef DEBUG
274    fprintf(dbgout, "New heap segment for #x%x after GC: #x%x/#x%x\n",
275            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
276#endif
277    return true;
278  }
279 
280  return false;
281}
282
283#ifndef XNOMEM
284#define XNOMEM 10
285#endif
286
287void
288update_bytes_allocated(TCR* tcr, void *cur_allocptr)
289{
290  BytePtr
291    last = (BytePtr) tcr->last_allocptr, 
292    current = (BytePtr) cur_allocptr;
293  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
294    tcr->bytes_allocated += last-current;
295  }
296  tcr->last_allocptr = 0;
297}
298
299void
300lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
301{
302  /* Couldn't allocate the object.  If it's smaller than some arbitrary
303     size (say 128K bytes), signal a "chronically out-of-memory" condition;
304     else signal a "allocation request failed" condition.
305  */
306  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
307  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
308}
309
310/*
311  Allocate a large list, where "large" means "large enough to
312  possibly trigger the EGC several times if this was done
313  by individually allocating each CONS."  The number of
314  ocnses in question is in arg_z; on successful return,
315  the list will be in arg_z
316*/
317
318Boolean
319allocate_list(ExceptionInformation *xp, TCR *tcr)
320{
321  natural
322    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
323    bytes_needed = (nconses << dnode_shift);
324  LispObj
325    prev = lisp_nil,
326    current,
327    initial = xpGPR(xp,arg_y);
328
329  if (nconses == 0) {
330    /* Silly case */
331    xpGPR(xp,arg_z) = lisp_nil;
332    xpGPR(xp,allocptr) = lisp_nil;
333    return true;
334  }
335  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
336  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
337    for (current = xpGPR(xp,allocptr);
338         nconses;
339         prev = current, current+= dnode_size, nconses--) {
340      deref(current,0) = prev;
341      deref(current,1) = initial;
342    }
343    xpGPR(xp,arg_z) = prev;
344    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
345    xpGPR(xp,allocptr)-=fulltag_cons;
346  } else {
347    lisp_allocation_failure(xp,tcr,bytes_needed);
348  }
349  return true;
350}
351
352OSStatus
353handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
354{
355  pc program_counter;
356  natural cur_allocptr, bytes_needed = 0;
357  opcode prev_instr;
358  signed_natural disp = 0;
359  unsigned allocptr_tag;
360
361  cur_allocptr = xpGPR(xp,allocptr);
362  program_counter = xpPC(xp);
363  prev_instr = *(program_counter-1);
364  allocptr_tag = fulltag_of(cur_allocptr);
365
366  switch (allocptr_tag) {
367  case fulltag_cons:
368    bytes_needed = sizeof(cons);
369    disp = -sizeof(cons) + fulltag_cons;
370    break;
371
372  case fulltag_even_fixnum:
373  case fulltag_odd_fixnum:
374    break;
375
376  case fulltag_misc:
377    if (match_instr(prev_instr, 
378                    XO_MASK | RT_MASK | RB_MASK,
379                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
380                    RT(allocptr) |
381                    RB(allocptr))) {
382      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
383    } else if (match_instr(prev_instr,
384                           OP_MASK | RT_MASK | RA_MASK,
385                           OP(major_opcode_ADDI) | 
386                           RT(allocptr) |
387                           RA(allocptr))) {
388      disp = (signed_natural) ((short) prev_instr);
389    }
390    if (disp) {
391      bytes_needed = (-disp) + fulltag_misc;
392      break;
393    }
394    /* else fall thru */
395  default:
396    return -1;
397  }
398
399  if (bytes_needed) {
400    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
401    if (allocate_object(xp, bytes_needed, disp, tcr)) {
402#if 0
403      fprintf(dbgout, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
404              tcr, xpGPR(xp, allocptr));
405#endif
406      adjust_exception_pc(xp,4);
407      return 0;
408    }
409    lisp_allocation_failure(xp,tcr,bytes_needed);
410    return -1;
411  }
412  return -1;
413}
414
415natural gc_deferred = 0, full_gc_deferred = 0;
416
417signed_natural
418flash_freeze(TCR *tcr, signed_natural param)
419{
420  return 0;
421}
422
423OSStatus
424handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
425{
426  LispObj
427    selector = xpGPR(xp,imm0), 
428    arg = xpGPR(xp,imm1);
429  area *a = active_dynamic_area;
430  Boolean egc_was_enabled = (a->older != NULL);
431  natural gc_previously_deferred = gc_deferred;
432
433
434  switch (selector) {
435  case GC_TRAP_FUNCTION_EGC_CONTROL:
436    egc_control(arg != 0, a->active);
437    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
438    break;
439
440  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
441    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
442    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
443    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
444    xpGPR(xp,arg_z) = lisp_nil+t_offset;
445    break;
446
447  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
448    if (((signed_natural) arg) > 0) {
449      lisp_heap_gc_threshold = 
450        align_to_power_of_2((arg-1) +
451                            (heap_segment_size - 1),
452                            log2_heap_segment_size);
453    }
454    /* fall through */
455  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
456    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
457    break;
458
459  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
460    /*  Try to put the current threshold in effect.  This may
461        need to disable/reenable the EGC. */
462    untenure_from_area(tenured_area);
463    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
464    if (egc_was_enabled) {
465      if ((a->high - a->active) >= a->threshold) {
466        tenure_to_area(tenured_area);
467      }
468    }
469    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
470    break;
471
472  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
473    ensure_static_conses(xp,tcr,32768);
474    break;
475
476  case GC_TRAP_FUNCTION_FLASH_FREEZE:
477    untenure_from_area(tenured_area);
478    gc_like_from_xp(xp,flash_freeze,0);
479    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
480    tenured_area->static_dnodes = area_dnode(a->active, a->low);
481    if (egc_was_enabled) {
482      tenure_to_area(tenured_area);
483    }
484    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
485    break;
486
487  default:
488    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
489
490    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
491      if (!full_gc_deferred) {
492        gc_from_xp(xp, 0L);
493        break;
494      }
495      /* Tried to do a full GC when gc was disabled.  That failed,
496         so try full GC now */
497      selector = GC_TRAP_FUNCTION_GC;
498    }
499   
500    if (egc_was_enabled) {
501      egc_control(false, (BytePtr) a->active);
502    }
503    gc_from_xp(xp, 0L);
504    if (gc_deferred > gc_previously_deferred) {
505      full_gc_deferred = 1;
506    } else {
507      full_gc_deferred = 0;
508    }
509    if (selector > GC_TRAP_FUNCTION_GC) {
510      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
511        impurify_from_xp(xp, 0L);
512        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
513        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
514        gc_from_xp(xp, 0L);
515      }
516      if (selector & GC_TRAP_FUNCTION_PURIFY) {
517        purify_from_xp(xp, 0L);
518        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
519        gc_from_xp(xp, 0L);
520      }
521      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
522        OSErr err;
523        extern OSErr save_application(unsigned, Boolean);
524        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
525        area *vsarea = tcr->vs_area;
526       
527        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
528        err = save_application(arg, egc_was_enabled);
529        if (err == noErr) {
530          _exit(0);
531        }
532        fatal_oserr(": save_application", err);
533      }
534      switch (selector) {
535
536
537      case GC_TRAP_FUNCTION_FREEZE:
538        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
539        tenured_area->static_dnodes = area_dnode(a->active, a->low);
540        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
541        break;
542      default:
543        break;
544      }
545    }
546   
547    if (egc_was_enabled) {
548      egc_control(true, NULL);
549    }
550    break;
551   
552  }
553
554  adjust_exception_pc(xp,4);
555  return 0;
556}
557
558
559
560void
561signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
562{
563  /* The cstack just overflowed.  Force the current thread's
564     control stack to do so until all stacks are well under their overflow
565     limits.
566  */
567
568#if 0
569  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
570#endif
571  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
572}
573
574/*
575  Lower (move toward 0) the "end" of the soft protected area associated
576  with a by a page, if we can.
577*/
578
579void
580adjust_soft_protection_limit(area *a)
581{
582  char *proposed_new_soft_limit = a->softlimit - 4096;
583  protected_area_ptr p = a->softprot;
584 
585  if (proposed_new_soft_limit >= (p->start+16384)) {
586    p->end = proposed_new_soft_limit;
587    p->protsize = p->end-p->start;
588    a->softlimit = proposed_new_soft_limit;
589  }
590  protect_area(p);
591}
592
593void
594restore_soft_stack_limit(unsigned stkreg)
595{
596  area *a;
597  TCR *tcr = get_tcr(true);
598
599  switch (stkreg) {
600  case sp:
601    a = tcr->cs_area;
602    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
603      a->softlimit -= 4096;
604    }
605    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
606    break;
607  case vsp:
608    a = tcr->vs_area;
609    adjust_soft_protection_limit(a);
610    break;
611  case tsp:
612    a = tcr->ts_area;
613    adjust_soft_protection_limit(a);
614  }
615}
616
617/* Maybe this'll work someday.  We may have to do something to
618   make the thread look like it's not handling an exception */
619void
620reset_lisp_process(ExceptionInformation *xp)
621{
622  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
623  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
624
625  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
626  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
627
628  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
629  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
630
631  start_lisp(tcr, 1);
632}
633
634/*
635  This doesn't GC; it returns true if it made enough room, false
636  otherwise.
637  If "extend" is true, it can try to extend the dynamic area to
638  satisfy the request.
639*/
640
641Boolean
642new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
643{
644  area *a;
645  natural newlimit, oldlimit;
646  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
647
648  a  = active_dynamic_area;
649  oldlimit = (natural) a->active;
650  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
651              align_to_power_of_2(need, log2_allocation_quantum));
652  if (newlimit > (natural) (a->high)) {
653    if (extend) {
654      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
655      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
656      do {
657        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
658          break;
659        }
660        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
661        if (extend_by < 4<<20) {
662          return false;
663        }
664      } while (1);
665    } else {
666      return false;
667    }
668  }
669  a->active = (BytePtr) newlimit;
670  tcr->last_allocptr = (void *)newlimit;
671  xpGPR(xp,allocptr) = (LispObj) newlimit;
672  xpGPR(xp,allocbase) = (LispObj) oldlimit;
673
674  return true;
675}
676
677 
678void
679update_area_active (area **aptr, BytePtr value)
680{
681  area *a = *aptr;
682  for (; a; a = a->older) {
683    if ((a->low <= value) && (a->high >= value)) break;
684  };
685  if (a == NULL) Bug(NULL, "Can't find active area");
686  a->active = value;
687  *aptr = a;
688
689  for (a = a->younger; a; a = a->younger) {
690    a->active = a->high;
691  }
692}
693
694LispObj *
695tcr_frame_ptr(TCR *tcr)
696{
697  ExceptionInformation *xp;
698  LispObj *bp = NULL;
699
700  if (tcr->pending_exception_context)
701    xp = tcr->pending_exception_context;
702  else {
703    xp = tcr->suspend_context;
704  }
705  if (xp) {
706    bp = (LispObj *) xpGPR(xp, sp);
707  }
708  return bp;
709}
710
711void
712normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
713{
714  void *cur_allocptr = NULL;
715  LispObj freeptr = 0;
716
717  if (xp) {
718    if (is_other_tcr) {
719      pc_luser_xp(xp, tcr, NULL);
720      freeptr = xpGPR(xp, allocptr);
721      if (fulltag_of(freeptr) == 0){
722        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
723      }
724    }
725    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
726    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
727    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
728#ifdef DEBUG
729    fprintf(dbgout, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
730            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
731    fprintf(dbgout, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
732            tcr,
733            xpGPR(xp, allocbase),
734            xpGPR(xp, allocptr),
735            xpPC(xp));
736    fprintf(dbgout, "TCR 0x%x, exception context = 0x%x\n",
737            tcr,
738            tcr->pending_exception_context);
739#endif
740  } else {
741    /* In ff-call.  No need to update cs_area */
742    cur_allocptr = (void *) (tcr->save_allocptr);
743#ifdef DEBUG
744    fprintf(dbgout, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
745            tcr, tcr->save_vsp, tcr->save_tsp);
746    fprintf(dbgout, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
747            tcr,
748            tcr->save_allocbase,
749            tcr->save_allocptr,
750            xpPC(xp));
751
752#endif
753    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
754    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
755  }
756
757
758  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
759  if (cur_allocptr) {
760    update_bytes_allocated(tcr, cur_allocptr);
761    if (freeptr) {
762      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
763      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
764    }
765  }
766}
767
768TCR *gc_tcr = NULL;
769
770/* Suspend and "normalize" other tcrs, then call a gc-like function
771   in that context.  Resume the other tcrs, then return what the
772   function returned */
773
774signed_natural
775gc_like_from_xp(ExceptionInformation *xp, 
776                signed_natural(*fun)(TCR *, signed_natural), 
777                signed_natural param)
778{
779  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
780  int result;
781  signed_natural inhibit;
782
783  suspend_other_threads(true);
784  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
785  if (inhibit != 0) {
786    if (inhibit > 0) {
787      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
788    }
789    resume_other_threads(true);
790    gc_deferred++;
791    return 0;
792  }
793  gc_deferred = 0;
794
795  gc_tcr = tcr;
796
797  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
798  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
799
800  normalize_tcr(xp, tcr, false);
801
802
803  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
804    if (other_tcr->pending_exception_context) {
805      other_tcr->gc_context = other_tcr->pending_exception_context;
806    } else if (other_tcr->valence == TCR_STATE_LISP) {
807      other_tcr->gc_context = other_tcr->suspend_context;
808    } else {
809      /* no pending exception, didn't suspend in lisp state:
810         must have executed a synchronous ff-call.
811      */
812      other_tcr->gc_context = NULL;
813    }
814    normalize_tcr(other_tcr->gc_context, other_tcr, true);
815  }
816   
817
818
819  result = fun(tcr, param);
820
821  other_tcr = tcr;
822  do {
823    other_tcr->gc_context = NULL;
824    other_tcr = other_tcr->next;
825  } while (other_tcr != tcr);
826
827  gc_tcr = NULL;
828
829  resume_other_threads(true);
830
831  return result;
832
833}
834
835
836
837/* Returns #bytes freed by invoking GC */
838
839signed_natural
840gc_from_tcr(TCR *tcr, signed_natural param)
841{
842  area *a;
843  BytePtr oldfree, newfree;
844  BytePtr oldend, newend;
845
846#ifdef DEBUG
847  fprintf(dbgout, "Start GC  in 0x%lx\n", tcr);
848#endif
849  a = active_dynamic_area;
850  oldend = a->high;
851  oldfree = a->active;
852  gc(tcr, param);
853  newfree = a->active;
854  newend = a->high;
855#if 0
856  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
857#endif
858  return ((oldfree-newfree)+(newend-oldend));
859}
860
861signed_natural
862gc_from_xp(ExceptionInformation *xp, signed_natural param)
863{
864  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
865
866  freeGCptrs();
867  return status;
868}
869
870signed_natural
871purify_from_xp(ExceptionInformation *xp, signed_natural param)
872{
873  return gc_like_from_xp(xp, purify, param);
874}
875
876signed_natural
877impurify_from_xp(ExceptionInformation *xp, signed_natural param)
878{
879  return gc_like_from_xp(xp, impurify, param);
880}
881
882
883
884
885
886
887protection_handler
888 * protection_handlers[] = {
889   do_spurious_wp_fault,
890   do_soft_stack_overflow,
891   do_soft_stack_overflow,
892   do_soft_stack_overflow,
893   do_hard_stack_overflow,   
894   do_hard_stack_overflow,
895   do_hard_stack_overflow
896   };
897
898
899Boolean
900is_write_fault(ExceptionInformation *xp, siginfo_t *info)
901{
902  /* use the siginfo if it's available.  Some versions of Linux
903     don't propagate the DSISR and TRAP fields correctly from
904     64- to 32-bit handlers.
905  */
906  if (info) {
907    /*
908       To confuse matters still further, the value of SEGV_ACCERR
909       varies quite a bit among LinuxPPC variants (the value defined
910       in the header files varies, and the value actually set by
911       the kernel also varies.  So far, we're only looking at the
912       siginfo under Linux and Linux always seems to generate
913       SIGSEGV, so check for SIGSEGV and check the low 16 bits
914       of the si_code.
915    */
916    return ((info->si_signo == SIGSEGV) &&
917            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
918  }
919  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
920         (xpTRAP(xp) == 
921#ifdef LINUX
9220x0300
923#endif
924#ifdef DARWIN
9250x0300/0x100
926#endif
927)
928         );
929#if 0 
930  /* Maybe worth keeping around; not sure if it's an exhaustive
931     list of PPC instructions that could cause a WP fault */
932  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
933     valid values of those SPRs in the context they provide to
934     exception handlers.  Look at the opcode of the offending
935     instruction & recognize 32-bit store operations */
936  opcode instr = *(xpPC(xp));
937
938  if (xp->regs->trap != 0x300) {
939    return 0;
940  }
941  switch (instr >> 26) {
942  case 47:                      /* STMW */
943  case 36:                      /* STW */
944  case 37:                      /* STWU */
945    return 1;
946  case 31:
947    switch ((instr >> 1) & 1023) {
948    case 151:                   /* STWX */
949    case 183:                   /* STWUX */
950      return 1;
951    default:
952      return 0;
953    }
954  default:
955    return 0;
956  }
957#endif
958}
959
960OSStatus
961handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
962{
963  BytePtr addr;
964  protected_area_ptr area;
965  protection_handler *handler;
966  extern Boolean touch_page(void *);
967  extern void touch_page_end(void);
968
969  if (info) {
970    addr = (BytePtr)(info->si_addr);
971  } else {
972    addr = (BytePtr) ((natural) (xpDAR(xp)));
973  }
974
975  if (addr && (addr == tcr->safe_ref_address)) {
976    adjust_exception_pc(xp,4);
977
978    xpGPR(xp,imm0) = 0;
979    return 0;
980  }
981
982  if (xpPC(xp) == (pc)touch_page) {
983    xpGPR(xp,imm0) = 0;
984    xpPC(xp) = (pc)touch_page_end;
985    return 0;
986  }
987
988
989  if (is_write_fault(xp,info)) {
990    area = find_protected_area(addr);
991    if (area != NULL) {
992      handler = protection_handlers[area->why];
993      return handler(xp, area, addr);
994    } else {
995      if ((addr >= readonly_area->low) &&
996          (addr < readonly_area->active)) {
997        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
998                        page_size);
999        return 0;
1000      }
1001    }
1002  }
1003  if (old_valence == TCR_STATE_LISP) {
1004    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
1005  }
1006  return -1;
1007}
1008
1009
1010
1011
1012
1013OSStatus
1014do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1015{
1016#ifdef SUPPORT_PRAGMA_UNUSED
1017#pragma unused(area,addr)
1018#endif
1019  reset_lisp_process(xp);
1020  return -1;
1021}
1022
1023extern area*
1024allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
1025
1026extern area*
1027allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
1028
1029#ifdef EXTEND_VSTACK
1030Boolean
1031catch_frame_p(lisp_frame *spPtr)
1032{
1033  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1034
1035  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1036    if (spPtr == ((lisp_frame *) catch->csp)) {
1037      return true;
1038    }
1039  }
1040  return false;
1041}
1042#endif
1043
1044Boolean
1045unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1046{
1047  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1048      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1049    return true;
1050  } else {
1051    return false;
1052  }
1053}
1054
1055Boolean
1056lexpr_entry_frame_p(lisp_frame *spPtr)
1057{
1058  LispObj savelr = spPtr->savelr;
1059  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1060  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1061  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1062
1063  return
1064    (savelr == lexpr_return1v) ||
1065    (savelr == lexpr_return) ||
1066    ((savelr == ret1valn) &&
1067     (((spPtr->backlink)->savelr) == lexpr_return));
1068}
1069
1070Boolean
1071lisp_frame_p(lisp_frame *spPtr)
1072{
1073  LispObj savefn;
1074  /* We can't just look at the size of the stack frame under the EABI
1075     calling sequence, but that's the first thing to check. */
1076  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1077    return false;
1078  }
1079  savefn = spPtr->savefn;
1080  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1081 
1082}
1083
1084
1085int ffcall_overflow_count = 0;
1086
1087/* Find a frame that is neither a catch frame nor one of the
1088   lexpr_entry frames We don't check for non-lisp frames here because
1089   we'll always stop before we get there due to a dummy lisp frame
1090   pushed by .SPcallback that masks out the foreign frames.  The one
1091   exception is that there is a non-lisp frame without a valid VSP
1092   while in the process of ppc-ff-call. We recognize that because its
1093   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1094   next frame is 0, then we're executing an unwind-protect cleanup
1095   form, and the top stack frame belongs to its (no longer extant)
1096   catch frame.  */
1097
1098#ifdef EXTEND_VSTACK
1099lisp_frame *
1100find_non_catch_frame_from_xp (ExceptionInformation *xp)
1101{
1102  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1103  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1104    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1105  }
1106  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1107         unwind_protect_cleanup_frame_p(spPtr) ||
1108         catch_frame_p(spPtr) ||
1109         lexpr_entry_frame_p(spPtr) ; ) {
1110     spPtr = spPtr->backlink;
1111     };
1112  return spPtr;
1113}
1114#endif
1115
1116#ifdef EXTEND_VSTACK
1117Boolean
1118db_link_chain_in_area_p (area *a)
1119{
1120  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1121          *high = (LispObj *) a->high,
1122          *low = (LispObj *) a->low;
1123  for (; db; db = (LispObj *) *db) {
1124    if ((db >= low) && (db < high)) return true;
1125  };
1126  return false;
1127}
1128#endif
1129
1130
1131
1132
1133/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1134  the current value of VSP (TSP) or an older area.  */
1135
1136OSStatus
1137do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1138{
1139  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1140  area *a = tcr->vs_area;
1141  protected_area_ptr vsp_soft = a->softprot;
1142  unprotect_area(vsp_soft);
1143  signal_stack_soft_overflow(xp,vsp);
1144  return 0;
1145}
1146
1147
1148OSStatus
1149do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1150{
1151  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1152  area *a = tcr->ts_area;
1153  protected_area_ptr tsp_soft = a->softprot;
1154  unprotect_area(tsp_soft);
1155  signal_stack_soft_overflow(xp,tsp);
1156  return 0;
1157}
1158
1159OSStatus
1160do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1161{
1162  /* Trying to write into a guard page on the vstack or tstack.
1163     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1164     signal an error_stack_overflow condition.
1165      */
1166  lisp_protection_kind which = prot_area->why;
1167  Boolean on_TSP = (which == kTSPsoftguard);
1168
1169  if (on_TSP) {
1170    return do_tsp_overflow(xp, addr);
1171   } else {
1172    return do_vsp_overflow(xp, addr);
1173   }
1174}
1175
1176OSStatus
1177do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1178{
1179#ifdef SUPPORT_PRAGMA_UNUSED
1180#pragma unused(xp,area,addr)
1181#endif
1182  return -1;
1183}
1184
1185
1186/*
1187  We have a couple of choices here.  We can simply unprotect the page
1188  and let the store happen on return, or we can try to emulate writes
1189  that we know will involve an intergenerational reference.  Both are
1190  correct as far as EGC constraints go, but the latter approach is
1191  probably more efficient.  (This only matters in the case where the
1192  GC runs after this exception handler returns but before the write
1193  actually happens.  If we didn't emulate node stores here, the EGC
1194  would scan the newly-writen page, find nothing interesting, and
1195  run to completion.  This thread will try the write again afer it
1196  resumes, the page'll be re-protected, and we'll have taken this
1197  fault twice.  The whole scenario shouldn't happen very often, but
1198  (having already taken a fault and committed to an mprotect syscall)
1199  we might as well emulate stores involving intergenerational references,
1200  since they're pretty easy to identify.
1201
1202  Note that cases involving two or more threads writing to the same
1203  page (before either of them can run this handler) is benign: one
1204  invocation of the handler will just unprotect an unprotected page in
1205  that case.
1206
1207  If there are GCs (or any other suspensions of the thread between
1208  the time that the write fault was detected and the time that the
1209  exception lock is obtained) none of this stuff happens.
1210*/
1211
1212/*
1213  Return true (and emulate the instruction) iff:
1214  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1215     instruction.
1216  b) RS is a node register (>= fn)
1217  c) RS is tagged as a cons or vector
1218  d) RS is in some ephemeral generation.
1219  This is slightly conservative, since RS may be no younger than the
1220  EA being written to.
1221*/
1222Boolean
1223is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1224{
1225  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1226    opcode instr = *xpPC(xp);
1227   
1228    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1229        major_opcode_p(instr, major_opcode_STW)) {
1230      LispObj
1231        rs = RS_field(instr), 
1232        rsval = xpGPR(xp,rs),
1233        tag = fulltag_of(rsval);
1234     
1235      if (rs >= fn) {
1236        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1237          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1238              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1239            *(LispObj *)ea = rsval;
1240            return true;
1241          }
1242        }
1243      }
1244    }
1245  }
1246  return false;
1247}
1248
1249     
1250
1251
1252
1253
1254
1255OSStatus
1256handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1257{
1258  (void) zero_fpscr(tcr);
1259  enable_fp_exceptions();
1260
1261
1262  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1263
1264  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1265     operation; there's an FPU operation right at the PC, so tell it to start
1266     looking one word beyond */
1267  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1268}
1269
1270   
1271int
1272altivec_present = 1;
1273
1274
1275/* This only tries to implement the "optional" fsqrt and fsqrts
1276   instructions, which were generally implemented on IBM hardware
1277   but generally not available on Motorola/Freescale systems.
1278*/               
1279OSStatus
1280handle_unimplemented_instruction(ExceptionInformation *xp,
1281                                 opcode instruction,
1282                                 TCR *tcr)
1283{
1284  (void) zero_fpscr(tcr);
1285  enable_fp_exceptions();
1286  /* the rc bit (bit 0 in the instruction) is supposed to cause
1287     some FPSCR bits to be copied to CR1.  Clozure CL doesn't generate
1288     fsqrt. or fsqrts.
1289  */
1290  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1291       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1292      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1293    double b, d, sqrt(double);
1294
1295    b = xpFPR(xp,RB_field(instruction));
1296    d = sqrt(b);
1297    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1298                   (get_fpscr() & _FPU_RESERVED));
1299    xpFPR(xp,RT_field(instruction)) = d;
1300    adjust_exception_pc(xp,4);
1301    return 0;
1302  }
1303
1304  return -1;
1305}
1306
1307OSStatus
1308PMCL_exception_handler(int xnum, 
1309                       ExceptionInformation *xp, 
1310                       TCR *tcr, 
1311                       siginfo_t *info,
1312                       int old_valence)
1313{
1314  OSStatus status = -1;
1315  pc program_counter;
1316  opcode instruction = 0;
1317
1318
1319  program_counter = xpPC(xp);
1320 
1321  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1322    instruction = *program_counter;
1323  }
1324
1325  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1326    status = handle_alloc_trap(xp, tcr);
1327  } else if ((xnum == SIGSEGV) ||
1328             (xnum == SIGBUS)) {
1329    status = handle_protection_violation(xp, info, tcr, old_valence);
1330  } else if (xnum == SIGFPE) {
1331    status = handle_sigfpe(xp, tcr);
1332  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1333    if (instruction == GC_TRAP_INSTRUCTION) {
1334      status = handle_gc_trap(xp, tcr);
1335    } else if (IS_UUO(instruction)) {
1336      status = handle_uuo(xp, instruction, program_counter);
1337    } else if (is_conditional_trap(instruction)) {
1338      status = handle_trap(xp, instruction, program_counter, info);
1339    } else {
1340      status = handle_unimplemented_instruction(xp,instruction,tcr);
1341    }
1342  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1343    tcr->interrupt_pending = 0;
1344    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1345    status = 0;
1346  }
1347
1348  return status;
1349}
1350
1351void
1352adjust_exception_pc(ExceptionInformation *xp, int delta)
1353{
1354  xpPC(xp) += (delta >> 2);
1355}
1356
1357
1358/*
1359  This wants to scan backwards until "where" points to an instruction
1360   whose major opcode is either 63 (double-float) or 59 (single-float)
1361*/
1362
1363OSStatus
1364handle_fpux_binop(ExceptionInformation *xp, pc where)
1365{
1366  OSStatus err;
1367  opcode *there = (opcode *) where, instr, errnum = 0;
1368  int i = TRAP_LOOKUP_TRIES, delta = 0;
1369 
1370  while (i--) {
1371    instr = *--there;
1372    delta -= 4;
1373    if (codevec_hdr_p(instr)) {
1374      return -1;
1375    }
1376    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1377      errnum = error_FPU_exception_double;
1378      break;
1379    }
1380
1381    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1382      errnum = error_FPU_exception_short;
1383      break;
1384    }
1385  }
1386 
1387  err = handle_error(xp, errnum, rcontext, 0,  there);
1388  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1389 
1390  adjust_exception_pc(xp, delta);
1391  xpFPSCR(xp)  &=  0x03fff;
1392 
1393  return err;
1394
1395}
1396
1397OSStatus
1398handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1399{
1400#ifdef SUPPORT_PRAGMA_UNUSED
1401#pragma unused(where)
1402#endif
1403  unsigned 
1404    minor = UUO_MINOR(the_uuo),
1405    rb = 0x1f & (the_uuo >> 11),
1406    errnum = 0x3ff & (the_uuo >> 16);
1407
1408  OSStatus status = -1;
1409
1410  int bump = 4;
1411
1412  switch (minor) {
1413
1414  case UUO_ZERO_FPSCR:
1415    status = 0;
1416    xpFPSCR(xp) = 0;
1417    break;
1418
1419
1420  case UUO_INTERR:
1421    {
1422      TCR * target = (TCR *)xpGPR(xp,arg_z);
1423      status = 0;
1424      switch (errnum) {
1425      case error_propagate_suspend:
1426        break;
1427      case error_interrupt:
1428        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1429        break;
1430      case error_suspend:
1431        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1432        break;
1433      case error_suspend_all:
1434        lisp_suspend_other_threads();
1435        break;
1436      case error_resume:
1437        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1438        break;
1439      case error_resume_all:
1440        lisp_resume_other_threads();
1441        break;
1442      case error_kill:
1443        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1444        break;
1445      case error_allocate_list:
1446        allocate_list(xp,get_tcr(true));
1447        break;
1448      default:
1449        status = handle_error(xp, errnum, rb, 0,  where);
1450        break;
1451      }
1452    }
1453    break;
1454
1455  case UUO_INTCERR:
1456    status = handle_error(xp, errnum, rb, 1,  where);
1457    if (errnum == error_udf_call) {
1458      /* If lisp's returned from a continuable undefined-function call,
1459         it's put a code vector in the xp's PC.  Don't advance the
1460         PC ... */
1461      bump = 0;
1462    }
1463    break;
1464
1465  case UUO_FPUX_BINOP:
1466    status = handle_fpux_binop(xp, where);
1467    bump = 0;
1468    break;
1469
1470  default:
1471    status = -1;
1472    bump = 0;
1473  }
1474 
1475  if ((!status) && bump) {
1476    adjust_exception_pc(xp, bump);
1477  }
1478  return status;
1479}
1480
1481natural
1482register_codevector_contains_pc (natural lisp_function, pc where)
1483{
1484  natural code_vector, size;
1485
1486  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1487      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1488    code_vector = deref(lisp_function, 1);
1489    size = header_element_count(header_of(code_vector)) << 2;
1490    if ((untag(code_vector) < (natural)where) && 
1491        ((natural)where < (code_vector + size)))
1492      return(code_vector);
1493  }
1494
1495  return(0);
1496}
1497
1498/* Callback to lisp to handle a trap. Need to translate the
1499   PC (where) into one of two forms of pairs:
1500
1501   1. If PC is in fn or nfn's code vector, use the register number
1502      of fn or nfn and the index into that function's code vector.
1503   2. Otherwise use 0 and the pc itself
1504*/
1505void
1506callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1507                   natural arg1, natural arg2, natural arg3)
1508{
1509  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1510  unsigned register_number = fn;
1511  natural index = (natural)where;
1512
1513  if (code_vector == 0) {
1514    register_number = nfn;
1515    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1516  }
1517  if (code_vector == 0)
1518    register_number = 0;
1519  else
1520    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1521  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1522}
1523
1524void
1525callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1526                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1527{
1528  natural  callback_ptr;
1529  area *a;
1530
1531  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1532
1533  /* Put the active stack pointer where .SPcallback expects it */
1534  a = tcr->cs_area;
1535  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1536
1537  /* Copy globals from the exception frame to tcr */
1538  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1539  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1540  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1541  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1542
1543
1544
1545  /* Call back.
1546     Lisp will handle trampolining through some code that
1547     will push lr/fn & pc/nfn stack frames for backtrace.
1548  */
1549  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1550#ifdef DEBUG
1551  fprintf(dbgout, "0x%x releasing exception lock for callback\n", tcr);
1552#endif
1553  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1554  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1555  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1556#ifdef DEBUG
1557  fprintf(dbgout, "0x%x acquired exception lock after callback\n", tcr);
1558#endif
1559
1560
1561
1562  /* Copy GC registers back into exception frame */
1563  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1564  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1565}
1566
1567area *
1568allocate_no_stack (natural size)
1569{
1570#ifdef SUPPORT_PRAGMA_UNUSED
1571#pragma unused(size)
1572#endif
1573
1574  return (area *) NULL;
1575}
1576
1577
1578
1579
1580
1581
1582/* callback to (symbol-value cmain) if it is a macptr,
1583   otherwise report cause and function name to console.
1584   Returns noErr if exception handled OK */
1585OSStatus
1586handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1587{
1588  LispObj   cmain = nrs_CMAIN.vcell;
1589  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1590
1591  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1592     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1593
1594  /* If it's a "trllt" instruction where RA == sp, it's a failed
1595     control stack overflow check.  In that case:
1596     
1597     a) We're in "yellow zone" mode if the value of the
1598     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1599     we're not already in yellow zone mode, attempt to create a new
1600     thread and continue execution on its stack. If that fails, call
1601     signal_stack_soft_overflow to enter yellow zone mode and signal
1602     the condition to lisp.
1603     
1604     b) If we're already in "yellow zone" mode, then:
1605     
1606     1) if the SP is past the current control-stack area's hard
1607     overflow limit, signal a "hard" stack overflow error (e.g., throw
1608     to toplevel as quickly as possible. If we aren't in "yellow zone"
1609     mode, attempt to continue on another thread first.
1610     
1611     2) if SP is "well" (> 4K) below its soft overflow limit, set
1612     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1613     "yellow zone mode" in this case.
1614     
1615     3) Otherwise, do nothing.  We'll continue to trap every time
1616     something gets pushed on the control stack, so we should try to
1617     detect and handle all of these cases fairly quickly.  Of course,
1618     the trap overhead is going to slow things down quite a bit.
1619     */
1620
1621  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1622      (RA_field(the_trap) == sp) &&
1623      (TO_field(the_trap) == TO_LO)) {
1624    area
1625      *CS_area = tcr->cs_area,
1626      *VS_area = tcr->vs_area;
1627     
1628    natural
1629      current_SP = xpGPR(xp,sp),
1630      current_VSP = xpGPR(xp,vsp);
1631
1632    if (current_SP  < (natural) (CS_area->hardlimit)) {
1633      /* If we're not in soft overflow mode yet, assume that the
1634         user has set the soft overflow size very small and try to
1635         continue on another thread before throwing to toplevel */
1636      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1637        reset_lisp_process(xp);
1638      }
1639    } else {
1640      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1641        /* If the control stack pointer is at least 4K away from its soft limit
1642           and the value stack pointer is at least 4K away from its soft limit,
1643           stop trapping.  Else keep trapping. */
1644        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1645            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1646          protected_area_ptr vs_soft = VS_area->softprot;
1647          if (vs_soft->nprot == 0) {
1648            protect_area(vs_soft);
1649          }
1650          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1651        }
1652      } else {
1653        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1654        signal_stack_soft_overflow(xp, sp);
1655      }
1656    }
1657   
1658    adjust_exception_pc(xp, 4);
1659    return noErr;
1660  } else {
1661    if (the_trap == LISP_BREAK_INSTRUCTION) {
1662      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1663      set_xpPC(xp, xpLR(xp));
1664      if (message == NULL) {
1665        message = "Lisp Breakpoint";
1666      }
1667      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1668      return noErr;
1669    }
1670    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1671      adjust_exception_pc(xp,4);
1672      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1673      return noErr;
1674    }
1675    /*
1676      twlle ra,rb is used to detect tlb overflow, where RA = current
1677      limit and RB = index to use.
1678    */
1679    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1680        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1681      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1682        return noErr;
1683      }
1684      return -1;
1685    }
1686
1687    if ((fulltag_of(cmain) == fulltag_misc) &&
1688        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1689      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1690        /* reset interrup_level, interrupt_pending */
1691        TCR_INTERRUPT_LEVEL(tcr) = 0;
1692        tcr->interrupt_pending = 0;
1693      }
1694#if 0
1695      fprintf(dbgout, "About to do trap callback in 0x%x\n",tcr);
1696#endif
1697      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1698      adjust_exception_pc(xp, 4);
1699      return(noErr);
1700    }
1701    return -1;
1702  }
1703}
1704
1705
1706/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1707   Stop if subtag_code_vector is encountered. */
1708unsigned
1709scan_for_instr( unsigned target, unsigned mask, pc where )
1710{
1711  int i = TRAP_LOOKUP_TRIES;
1712
1713  while( i-- ) {
1714    unsigned instr = *(--where);
1715    if ( codevec_hdr_p(instr) ) {
1716      return 0;
1717    } else if ( match_instr(instr, mask, target) ) {
1718      return instr;
1719    }
1720  }
1721  return 0;
1722}
1723
1724
1725void non_fatal_error( char *msg )
1726{
1727  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1728  fflush( dbgout );
1729}
1730
1731/* The main opcode.  */
1732
1733int 
1734is_conditional_trap(opcode instr)
1735{
1736  unsigned to = TO_field(instr);
1737  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1738
1739#ifndef MACOS
1740  if ((instr == LISP_BREAK_INSTRUCTION) ||
1741      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1742    return 1;
1743  }
1744#endif
1745  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1746    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1747       EQ bit must be set in the TO mask and either the register
1748       operands (if "tw") are the same or either both of the signed or
1749       both of the unsigned inequality bits must be set. */
1750    if (! (to & TO_EQ)) {
1751      return 1;                 /* Won't trap on EQ: conditional */
1752    }
1753    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1754      return 0;                 /* Will trap on EQ, same regs: unconditional */
1755    }
1756    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1757        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1758      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1759    }
1760    return 1;                   /* must be conditional */
1761  }
1762  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1763                                   debugger have it */
1764}
1765
1766OSStatus
1767handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1768{
1769  LispObj   errdisp = nrs_ERRDISP.vcell;
1770
1771  if ((fulltag_of(errdisp) == fulltag_misc) &&
1772      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1773    /* errdisp is a macptr, we can call back to lisp */
1774    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1775    return(0);
1776    }
1777
1778  return(-1);
1779}
1780               
1781
1782/*
1783   Current thread has all signals masked.  Before unmasking them,
1784   make it appear that the current thread has been suspended.
1785   (This is to handle the case where another thread is trying
1786   to GC before this thread is able to sieze the exception lock.)
1787*/
1788int
1789prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1790{
1791  int old_valence = tcr->valence;
1792
1793  tcr->pending_exception_context = context;
1794  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1795
1796  ALLOW_EXCEPTIONS(context);
1797  return old_valence;
1798} 
1799
1800void
1801wait_for_exception_lock_in_handler(TCR *tcr, 
1802                                   ExceptionInformation *context,
1803                                   xframe_list *xf)
1804{
1805
1806  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1807#ifdef DEBUG
1808  fprintf(dbgout, "0x%x has exception lock\n", tcr);
1809#endif
1810  xf->curr = context;
1811  xf->prev = tcr->xframe;
1812  tcr->xframe =  xf;
1813  tcr->pending_exception_context = NULL;
1814  tcr->valence = TCR_STATE_FOREIGN; 
1815}
1816
1817void
1818unlock_exception_lock_in_handler(TCR *tcr)
1819{
1820  tcr->pending_exception_context = tcr->xframe->curr;
1821  tcr->xframe = tcr->xframe->prev;
1822  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1823#ifdef DEBUG
1824  fprintf(dbgout, "0x%x releasing exception lock\n", tcr);
1825#endif
1826  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1827}
1828
1829/*
1830   If an interrupt is pending on exception exit, try to ensure
1831   that the thread sees it as soon as it's able to run.
1832*/
1833void
1834raise_pending_interrupt(TCR *tcr)
1835{
1836  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1837    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1838  }
1839}
1840
1841void
1842exit_signal_handler(TCR *tcr, int old_valence)
1843{
1844  sigset_t mask;
1845  sigfillset(&mask);
1846 
1847  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1848  tcr->valence = old_valence;
1849  tcr->pending_exception_context = NULL;
1850}
1851
1852
1853void
1854signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1855{
1856  xframe_list xframe_link;
1857
1858  if (!use_mach_exception_handling) {
1859   
1860    tcr = (TCR *) get_interrupt_tcr(false);
1861 
1862    /* The signal handler's entered with all signals (notably the
1863       thread_suspend signal) blocked.  Don't allow any other signals
1864       (notably the thread_suspend signal) to preempt us until we've
1865       set the TCR's xframe slot to include the current exception
1866       context.
1867    */
1868   
1869    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1870  }
1871
1872  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1873    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1874    pthread_kill(pthread_self(), thread_suspend_signal);
1875  }
1876
1877 
1878  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1879  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1880    char msg[512];
1881    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1882    if (lisp_Debugger(context, info, signum, false, msg)) {
1883      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1884    }
1885  }
1886
1887  unlock_exception_lock_in_handler(tcr);
1888
1889  /* This thread now looks like a thread that was suspended while
1890     executing lisp code.  If some other thread gets the exception
1891     lock and GCs, the context (this thread's suspend_context) will
1892     be updated.  (That's only of concern if it happens before we
1893     can return to the kernel/to the Mach exception handler).
1894  */
1895  if (!use_mach_exception_handling) {
1896    exit_signal_handler(tcr, old_valence);
1897    raise_pending_interrupt(tcr);
1898  }
1899}
1900
1901/*
1902  If it looks like we're in the middle of an atomic operation, make
1903  it seem as if that operation is either complete or hasn't started
1904  yet.
1905
1906  The cases handled include:
1907
1908  a) storing into a newly-allocated lisp frame on the stack.
1909  b) marking a newly-allocated TSP frame as containing "raw" data.
1910  c) consing: the GC has its own ideas about how this should be
1911     handled, but other callers would be best advised to back
1912     up or move forward, according to whether we're in the middle
1913     of allocating a cons cell or allocating a uvector.
1914  d) a STMW to the vsp
1915  e) EGC write-barrier subprims.
1916*/
1917
1918extern opcode
1919  egc_write_barrier_start,
1920  egc_write_barrier_end, 
1921  egc_store_node_conditional, 
1922  egc_store_node_conditional_test,
1923  egc_set_hash_key,
1924  egc_gvset,
1925  egc_rplaca,
1926  egc_rplacd,
1927  egc_set_hash_key_conditional,
1928  egc_set_hash_key_conditional_test;
1929
1930
1931extern opcode ffcall_return_window, ffcall_return_window_end;
1932
1933void
1934pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1935{
1936  pc program_counter = xpPC(xp);
1937  opcode instr = *program_counter;
1938  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1939  LispObj cur_allocptr = xpGPR(xp, allocptr);
1940  int allocptr_tag = fulltag_of(cur_allocptr);
1941 
1942
1943
1944  if ((program_counter < &egc_write_barrier_end) && 
1945      (program_counter >= &egc_write_barrier_start)) {
1946    LispObj *ea = 0, val = 0, root = 0;
1947    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1948    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1949
1950    if (program_counter >= &egc_set_hash_key_conditional) {
1951      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1952          ((program_counter == &egc_set_hash_key_conditional_test) &&
1953           (! (xpCCR(xp) & 0x20000000)))) {
1954        return;
1955      }
1956      need_store = false;
1957      root = xpGPR(xp,arg_x);
1958      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1959      need_memoize_root = true;
1960    } else if (program_counter >= &egc_store_node_conditional) {
1961      if ((program_counter < &egc_store_node_conditional_test) ||
1962          ((program_counter == &egc_store_node_conditional_test) &&
1963           (! (xpCCR(xp) & 0x20000000)))) {
1964        /* The conditional store either hasn't been attempted yet, or
1965           has failed.  No need to adjust the PC, or do memoization. */
1966        return;
1967      }
1968      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1969      xpGPR(xp,arg_z) = t_value;
1970      need_store = false;
1971    } else if (program_counter >= &egc_set_hash_key) {
1972      root = xpGPR(xp,arg_x);
1973      val = xpGPR(xp,arg_z);
1974      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1975      need_memoize_root = true;
1976    } else if (program_counter >= &egc_gvset) {
1977      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1978      val = xpGPR(xp,arg_z);
1979    } else if (program_counter >= &egc_rplacd) {
1980      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1981      val = xpGPR(xp,arg_z);
1982    } else {                      /* egc_rplaca */
1983      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1984      val = xpGPR(xp,arg_z);
1985    }
1986    if (need_store) {
1987      *ea = val;
1988    }
1989    if (need_check_memo) {
1990      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1991      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1992          ((LispObj)ea < val)) {
1993        atomic_set_bit(refbits, bitnumber);
1994        if (need_memoize_root) {
1995          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1996          atomic_set_bit(refbits, bitnumber);
1997        }
1998      }
1999    }
2000    set_xpPC(xp, xpLR(xp));
2001    return;
2002  }
2003
2004
2005  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
2006    LispObj tsp_val = xpGPR(xp,tsp);
2007   
2008    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
2009    adjust_exception_pc(xp, 4);
2010    return;
2011  }
2012 
2013  if (frame->backlink == (frame+1)) {
2014    if (
2015#ifdef PPC64
2016        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
2017        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
2018#else
2019        (major_opcode_p(instr, major_opcode_STW)) && 
2020#endif
2021        (RA_field(instr) == sp) &&
2022        /* There are a few places in the runtime that store into
2023           a previously-allocated frame atop the stack when
2024           throwing values around.  We only care about the case
2025           where the frame was newly allocated, in which case
2026           there must have been a CREATE_LISP_FRAME_INSTRUCTION
2027           a few instructions before the current program counter.
2028           (The whole point here is that a newly allocated frame
2029           might contain random values that we don't want the
2030           GC to see; a previously allocated frame should already
2031           be completely initialized.)
2032        */
2033        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
2034         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
2035         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
2036#ifdef PPC64
2037      int disp = DS_field(instr);
2038#else     
2039      int disp = D_field(instr);
2040#endif
2041
2042
2043      if (disp < (4*node_size)) {
2044#if 0
2045        fprintf(dbgout, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
2046#endif
2047        frame->savevsp = 0;
2048        if (disp < (3*node_size)) {
2049          frame->savelr = 0;
2050          if (disp == node_size) {
2051            frame->savefn = 0;
2052          }
2053        }
2054      }
2055      return;
2056    }
2057  }
2058
2059  if (allocptr_tag != tag_fixnum) {
2060    signed_natural disp = allocptr_displacement(xp);
2061
2062    if (disp) {
2063      /* Being architecturally "at" the alloc trap doesn't tell
2064         us much (in particular, it doesn't tell us whether
2065         or not the thread has committed to taking the trap
2066         and is waiting for the exception lock (or waiting
2067         for the Mach exception thread to tell it how bad
2068         things are) or is about to execute a conditional
2069         trap.
2070         Regardless of which case applies, we want the
2071         other thread to take (or finish taking) the
2072         trap, and we don't want it to consider its
2073         current allocptr to be valid.
2074         The difference between this case (suspend other
2075         thread for GC) and the previous case (suspend
2076         current thread for interrupt) is solely a
2077         matter of what happens after we leave this
2078         function: some non-current thread will stay
2079         suspended until the GC finishes, then take
2080         (or start processing) the alloc trap.   The
2081         current thread will go off and do PROCESS-INTERRUPT
2082         or something, and may return from the interrupt
2083         and need to finish the allocation that got interrupted.
2084      */
2085
2086      if (alloc_disp) {
2087        *alloc_disp = disp;
2088        xpGPR(xp,allocptr) += disp;
2089        /* Leave the PC at the alloc trap.  When the interrupt
2090           handler returns, it'll decrement allocptr by disp
2091           and the trap may or may not be taken.
2092        */
2093      } else {
2094        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2095        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2096        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2097      }
2098    } else {
2099#ifdef DEBUG
2100      fprintf(dbgout, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2101#endif
2102      /* If we're already past the alloc_trap, finish allocating
2103         the object. */
2104      if (allocptr_tag == fulltag_cons) {
2105        finish_allocating_cons(xp);
2106#ifdef DEBUG
2107          fprintf(dbgout, "finish allocating cons in TCR = #x%x\n",
2108                  tcr);
2109#endif
2110      } else {
2111        if (allocptr_tag == fulltag_misc) {
2112#ifdef DEBUG
2113          fprintf(dbgout, "finish allocating uvector in TCR = #x%x\n",
2114                  tcr);
2115#endif
2116          finish_allocating_uvector(xp);
2117        } else {
2118          Bug(xp, "what's being allocated here ?");
2119        }
2120      }
2121      /* Whatever we finished allocating, reset allocptr/allocbase to
2122         VOID_ALLOCPTR */
2123      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2124    }
2125    return;
2126  }
2127
2128  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2129    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2130    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2131#if 0
2132        fprintf(dbgout, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2133#endif
2134
2135    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2136      deref(frame,idx) = 0;
2137    }
2138    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2139    return;
2140  }
2141
2142#ifndef PC64
2143  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2144      (RA_field(instr) == vsp)) {
2145    int r;
2146    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2147   
2148    for (r = RS_field(instr); r <= 31; r++) {
2149      *vspptr++ = xpGPR(xp,r);
2150    }
2151    adjust_exception_pc(xp, 4);
2152  }
2153#endif
2154}
2155
2156void
2157interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2158{
2159  TCR *tcr = get_interrupt_tcr(false);
2160  if (tcr) {
2161    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2162      tcr->interrupt_pending = 1 << fixnumshift;
2163    } else {
2164      LispObj cmain = nrs_CMAIN.vcell;
2165
2166      if ((fulltag_of(cmain) == fulltag_misc) &&
2167          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2168        /*
2169           This thread can (allegedly) take an interrupt now.
2170           It's tricky to do that if we're executing
2171           foreign code (especially Linuxthreads code, much
2172           of which isn't reentrant.)
2173           If we're unwinding the stack, we also want to defer
2174           the interrupt.
2175        */
2176        if ((tcr->valence != TCR_STATE_LISP) ||
2177            (tcr->unwinding != 0)) {
2178          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2179        } else {
2180          xframe_list xframe_link;
2181          int old_valence;
2182          signed_natural disp=0;
2183         
2184          pc_luser_xp(context, tcr, &disp);
2185          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2186          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2187#ifdef DEBUG
2188          fprintf(dbgout, "[0x%x acquired exception lock for interrupt]\n",tcr);
2189#endif
2190          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2191          if (disp) {
2192            xpGPR(context,allocptr) -= disp;
2193          }
2194          unlock_exception_lock_in_handler(tcr);
2195#ifdef DEBUG
2196          fprintf(dbgout, "[0x%x released exception lock for interrupt]\n",tcr);
2197#endif
2198          exit_signal_handler(tcr, old_valence);
2199        }
2200      }
2201    }
2202  }
2203#ifdef DARWIN
2204    DarwinSigReturn(context);
2205#endif
2206}
2207
2208
2209
2210void
2211install_signal_handler(int signo, void *handler, Boolean system, Boolean on_altstack)
2212{
2213  struct sigaction sa;
2214 
2215  sa.sa_sigaction = (void *)handler;
2216  sigfillset(&sa.sa_mask);
2217  sa.sa_flags = 
2218    0 /* SA_RESTART */
2219    | SA_SIGINFO
2220#ifdef DARWIN
2221#ifdef PPC64
2222    | SA_64REGSET
2223#endif
2224#endif
2225    ;
2226
2227  sigaction(signo, &sa, NULL);
2228  if (system) {
2229    extern sigset_t user_signals_reserved;
2230    sigaddset(&user_signals_reserved, signo);
2231  }
2232
2233}
2234
2235void
2236install_pmcl_exception_handlers()
2237{
2238#ifdef DARWIN
2239  extern Boolean use_mach_exception_handling;
2240#endif
2241
2242  Boolean install_signal_handlers_for_exceptions =
2243#ifdef DARWIN
2244    !use_mach_exception_handling
2245#else
2246    true
2247#endif
2248    ;
2249  if (install_signal_handlers_for_exceptions) {
2250    extern int no_sigtrap;
2251    install_signal_handler(SIGILL, (void *)signal_handler, true, false);
2252    if (no_sigtrap != 1) {
2253      install_signal_handler(SIGTRAP, (void *)signal_handler, true, false);
2254    }
2255    install_signal_handler(SIGBUS,  (void *)signal_handler, true, false);
2256    install_signal_handler(SIGSEGV, (void *)signal_handler, true, false);
2257    install_signal_handler(SIGFPE, (void *)signal_handler, true, false);
2258  }
2259 
2260  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2261                         (void *)interrupt_handler, true, false);
2262  signal(SIGPIPE, SIG_IGN);
2263}
2264
2265void
2266thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2267{
2268  TCR *tcr = get_tcr(false);
2269  area *a;
2270  sigset_t mask;
2271 
2272  sigemptyset(&mask);
2273
2274  if (tcr) {
2275    tcr->valence = TCR_STATE_FOREIGN;
2276    a = tcr->vs_area;
2277    if (a) {
2278      a->active = a->high;
2279    }
2280    a = tcr->ts_area;
2281    if (a) {
2282      a->active = a->high;
2283    }
2284    a = tcr->cs_area;
2285    if (a) {
2286      a->active = a->high;
2287    }
2288  }
2289 
2290  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2291  pthread_exit(NULL);
2292}
2293
2294void
2295thread_signal_setup()
2296{
2297  thread_suspend_signal = SIG_SUSPEND_THREAD;
2298  thread_kill_signal = SIG_KILL_THREAD;
2299
2300  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler, true, false);
2301  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler, true, false);
2302}
2303
2304
2305
2306void
2307unprotect_all_areas()
2308{
2309  protected_area_ptr p;
2310
2311  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2312    unprotect_area(p);
2313  }
2314}
2315
2316/*
2317  A binding subprim has just done "twlle limit_regno,idx_regno" and
2318  the trap's been taken.  Extend the tcr's tlb so that the index will
2319  be in bounds and the new limit will be on a page boundary, filling
2320  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2321  the tcr fields and the registers in the xp and return true if this
2322  all works, false otherwise.
2323
2324  Note that the tlb was allocated via malloc, so realloc can do some
2325  of the hard work.
2326*/
2327Boolean
2328extend_tcr_tlb(TCR *tcr, 
2329               ExceptionInformation *xp, 
2330               unsigned limit_regno,
2331               unsigned idx_regno)
2332{
2333  unsigned
2334    index = (unsigned) (xpGPR(xp,idx_regno)),
2335    old_limit = tcr->tlb_limit,
2336    new_limit = align_to_power_of_2(index+1,12),
2337    new_bytes = new_limit-old_limit;
2338  LispObj
2339    *old_tlb = tcr->tlb_pointer,
2340    *new_tlb = realloc(old_tlb, new_limit),
2341    *work;
2342
2343  if (new_tlb == NULL) {
2344    return false;
2345  }
2346 
2347  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2348
2349  while (new_bytes) {
2350    *work++ = no_thread_local_binding_marker;
2351    new_bytes -= sizeof(LispObj);
2352  }
2353  tcr->tlb_pointer = new_tlb;
2354  tcr->tlb_limit = new_limit;
2355  xpGPR(xp, limit_regno) = new_limit;
2356  return true;
2357}
2358
2359
2360
2361void
2362exception_init()
2363{
2364  install_pmcl_exception_handlers();
2365}
2366
2367
2368
2369
2370
2371#ifdef DARWIN
2372
2373
2374#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2375#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2376
2377
2378
2379#define LISP_EXCEPTIONS_HANDLED_MASK \
2380 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2381
2382/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2383#define NUM_LISP_EXCEPTIONS_HANDLED 4
2384
2385typedef struct {
2386  int foreign_exception_port_count;
2387  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2388  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2389  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2390  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2391} MACH_foreign_exception_state;
2392
2393
2394
2395
2396/*
2397  Mach's exception mechanism works a little better than its signal
2398  mechanism (and, not incidentally, it gets along with GDB a lot
2399  better.
2400
2401  Initially, we install an exception handler to handle each native
2402  thread's exceptions.  This process involves creating a distinguished
2403  thread which listens for kernel exception messages on a set of
2404  0 or more thread exception ports.  As threads are created, they're
2405  added to that port set; a thread's exception port is destroyed
2406  (and therefore removed from the port set) when the thread exits.
2407
2408  A few exceptions can be handled directly in the handler thread;
2409  others require that we resume the user thread (and that the
2410  exception thread resumes listening for exceptions.)  The user
2411  thread might eventually want to return to the original context
2412  (possibly modified somewhat.)
2413
2414  As it turns out, the simplest way to force the faulting user
2415  thread to handle its own exceptions is to do pretty much what
2416  signal() does: the exception handlng thread sets up a sigcontext
2417  on the user thread's stack and forces the user thread to resume
2418  execution as if a signal handler had been called with that
2419  context as an argument.  We can use a distinguished UUO at a
2420  distinguished address to do something like sigreturn(); that'll
2421  have the effect of resuming the user thread's execution in
2422  the (pseudo-) signal context.
2423
2424  Since:
2425    a) we have miles of code in C and in Lisp that knows how to
2426    deal with Linux sigcontexts
2427    b) Linux sigcontexts contain a little more useful information
2428    (the DAR, DSISR, etc.) than their Darwin counterparts
2429    c) we have to create a sigcontext ourselves when calling out
2430    to the user thread: we aren't really generating a signal, just
2431    leveraging existing signal-handling code.
2432
2433  we create a Linux sigcontext struct.
2434
2435  Simple ?  Hopefully from the outside it is ...
2436
2437  We want the process of passing a thread's own context to it to
2438  appear to be atomic: in particular, we don't want the GC to suspend
2439  a thread that's had an exception but has not yet had its user-level
2440  exception handler called, and we don't want the thread's exception
2441  context to be modified by a GC while the Mach handler thread is
2442  copying it around.  On Linux (and on Jaguar), we avoid this issue
2443  because (a) the kernel sets up the user-level signal handler and
2444  (b) the signal handler blocks signals (including the signal used
2445  by the GC to suspend threads) until tcr->xframe is set up.
2446
2447  The GC and the Mach server thread therefore contend for the lock
2448  "mach_exception_lock".  The Mach server thread holds the lock
2449  when copying exception information between the kernel and the
2450  user thread; the GC holds this lock during most of its execution
2451  (delaying exception processing until it can be done without
2452  GC interference.)
2453
2454*/
2455
2456#ifdef PPC64
2457#define C_REDZONE_LEN           320
2458#define C_STK_ALIGN             32
2459#else
2460#define C_REDZONE_LEN           224
2461#define C_STK_ALIGN             16
2462#endif
2463#define C_PARAMSAVE_LEN         64
2464#define C_LINKAGE_LEN           48
2465
2466#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2467
2468void
2469fatal_mach_error(char *format, ...);
2470
2471#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2472
2473
2474void
2475restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2476{
2477  kern_return_t kret;
2478  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
2479
2480  /* Set the thread's FP state from the pseudosigcontext */
2481  kret = thread_set_state(thread,
2482                          PPC_FLOAT_STATE,
2483                          (thread_state_t)&(mc->__fs),
2484                          PPC_FLOAT_STATE_COUNT);
2485
2486  MACH_CHECK_ERROR("setting thread FP state", kret);
2487
2488  /* The thread'll be as good as new ... */
2489#ifdef PPC64
2490  kret = thread_set_state(thread,
2491                          PPC_THREAD_STATE64,
2492                          (thread_state_t)&(mc->__ss),
2493                          PPC_THREAD_STATE64_COUNT);
2494#else
2495  kret = thread_set_state(thread, 
2496                          MACHINE_THREAD_STATE,
2497                          (thread_state_t)&(mc->__ss),
2498                          MACHINE_THREAD_STATE_COUNT);
2499#endif
2500  MACH_CHECK_ERROR("setting thread state", kret);
2501} 
2502
2503/* This code runs in the exception handling thread, in response
2504   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2505   in response to a call to pseudo_sigreturn() from the specified
2506   user thread.
2507   Find that context (the user thread's R3 points to it), then
2508   use that context to set the user thread's state.  When this
2509   function's caller returns, the Mach kernel will resume the
2510   user thread.
2511*/
2512
2513kern_return_t
2514do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2515{
2516  ExceptionInformation *xp;
2517
2518#ifdef DEBUG_MACH_EXCEPTIONS
2519  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2520#endif
2521  xp = tcr->pending_exception_context;
2522  if (xp) {
2523    tcr->pending_exception_context = NULL;
2524    tcr->valence = TCR_STATE_LISP;
2525    restore_mach_thread_state(thread, xp);
2526    raise_pending_interrupt(tcr);
2527  } else {
2528    Bug(NULL, "no xp here!\n");
2529  }
2530#ifdef DEBUG_MACH_EXCEPTIONS
2531  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2532#endif
2533  return KERN_SUCCESS;
2534} 
2535
2536ExceptionInformation *
2537create_thread_context_frame(mach_port_t thread, 
2538                            natural *new_stack_top)
2539{
2540#ifdef PPC64
2541  ppc_thread_state64_t ts;
2542#else
2543  ppc_thread_state_t ts;
2544#endif
2545  mach_msg_type_number_t thread_state_count;
2546  kern_return_t result;
2547  ExceptionInformation *pseudosigcontext;
2548  MCONTEXT_T mc;
2549  natural stackp, backlink;
2550
2551#ifdef PPC64
2552  thread_state_count = PPC_THREAD_STATE64_COUNT;
2553  result = thread_get_state(thread,
2554                            PPC_THREAD_STATE64,
2555                            (thread_state_t)&ts,
2556                            &thread_state_count);
2557#else
2558  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2559  result = thread_get_state(thread, 
2560                            PPC_THREAD_STATE,   /* GPRs, some SPRs  */
2561                            (thread_state_t)&ts,
2562                            &thread_state_count);
2563#endif
2564 
2565  if (result != KERN_SUCCESS) {
2566    get_tcr(true);
2567    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2568  }
2569  stackp = ts.__r1;
2570  backlink = stackp;
2571  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
2572  stackp -= sizeof(*pseudosigcontext);
2573  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2574
2575  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
2576  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
2577  memmove(&(mc->__ss),&ts,sizeof(ts));
2578
2579  thread_state_count = PPC_FLOAT_STATE_COUNT;
2580  thread_get_state(thread,
2581                   PPC_FLOAT_STATE,
2582                   (thread_state_t)&(mc->__fs),
2583                   &thread_state_count);
2584
2585
2586#ifdef PPC64
2587  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
2588#else
2589  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
2590#endif
2591  thread_get_state(thread,
2592#ifdef PPC64
2593                   PPC_EXCEPTION_STATE64,
2594#else
2595                   PPC_EXCEPTION_STATE,
2596#endif
2597                   (thread_state_t)&(mc->__es),
2598                   &thread_state_count);
2599
2600
2601  UC_MCONTEXT(pseudosigcontext) = mc;
2602  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
2603  stackp -= C_LINKAGE_LEN;
2604  *(natural *)ptr_from_lispobj(stackp) = backlink;
2605  if (new_stack_top) {
2606    *new_stack_top = stackp;
2607  }
2608  return pseudosigcontext;
2609}
2610
2611/*
2612  This code sets up the user thread so that it executes a "pseudo-signal
2613  handler" function when it resumes.  Create a linux sigcontext struct
2614  on the thread's stack and pass it as an argument to the pseudo-signal
2615  handler.
2616
2617  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2618  which will restore the thread's context.
2619
2620  If the handler invokes code that throws (or otherwise never sigreturn()'s
2621  to the context), that's fine.
2622
2623  Actually, check that: throw (and variants) may need to be careful and
2624  pop the tcr's xframe list until it's younger than any frame being
2625  entered.
2626*/
2627
2628int
2629setup_signal_frame(mach_port_t thread,
2630                   void *handler_address,
2631                   int signum,
2632                   int code,
2633                   TCR *tcr)
2634{
2635#ifdef PPC64
2636  ppc_thread_state64_t ts;
2637#else
2638  ppc_thread_state_t ts;
2639#endif
2640  ExceptionInformation *pseudosigcontext;
2641  int old_valence = tcr->valence;
2642  natural stackp;
2643
2644#ifdef DEBUG_MACH_EXCEPTIONS
2645  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2646#endif
2647  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2648  pseudosigcontext->uc_onstack = 0;
2649  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2650  tcr->pending_exception_context = pseudosigcontext;
2651  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2652 
2653
2654  /*
2655     It seems like we've created a  sigcontext on the thread's
2656     stack.  Set things up so that we call the handler (with appropriate
2657     args) when the thread's resumed.
2658  */
2659
2660  ts.__srr0 = (natural) handler_address;
2661  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
2662  ts.__r1 = stackp;
2663  ts.__r3 = signum;
2664  ts.__r4 = (natural)pseudosigcontext;
2665  ts.__r5 = (natural)tcr;
2666  ts.__r6 = (natural)old_valence;
2667  ts.__lr = (natural)pseudo_sigreturn;
2668
2669
2670#ifdef PPC64
2671  ts.__r13 = xpGPR(pseudosigcontext,13);
2672  thread_set_state(thread,
2673                   PPC_THREAD_STATE64,
2674                   (thread_state_t)&ts,
2675                   PPC_THREAD_STATE64_COUNT);
2676#else
2677  thread_set_state(thread, 
2678                   MACHINE_THREAD_STATE,
2679                   (thread_state_t)&ts,
2680                   MACHINE_THREAD_STATE_COUNT);
2681#endif
2682#ifdef DEBUG_MACH_EXCEPTIONS
2683  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2684#endif
2685  return 0;
2686}
2687
2688
2689void
2690pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2691{
2692  signal_handler(signum, NULL, context, tcr, old_valence);
2693} 
2694
2695
2696int
2697thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2698{
2699#ifdef PPC64
2700  ppc_thread_state64_t ts;
2701#else
2702  ppc_thread_state_t ts;
2703#endif
2704  mach_msg_type_number_t thread_state_count;
2705
2706#ifdef PPC64
2707  thread_state_count = PPC_THREAD_STATE64_COUNT;
2708#else
2709  thread_state_count = PPC_THREAD_STATE_COUNT;
2710#endif
2711  thread_get_state(thread, 
2712#ifdef PPC64
2713                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2714#else
2715                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2716#endif
2717                   (thread_state_t)&ts,
2718                   &thread_state_count);
2719  if (enabled) {
2720    ts.__srr1 |= MSR_FE0_FE1_MASK;
2721  } else {
2722    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
2723  }
2724  /*
2725     Hack-o-rama warning (isn't it about time for such a warning?):
2726     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
2727     Our handler for lisp's use of pthread_kill() pushes a phony
2728     lisp frame on the stack and force the context to resume at
2729     the UUO in enable_fp_exceptions(); the "saveLR" field of that
2730     lisp frame contains the -real- address that process_interrupt
2731     should have returned to, and the fact that it's in a lisp
2732     frame should convince the GC to notice that address if it
2733     runs in the tiny time window between returning from our
2734     interrupt handler and ... here.
2735     If the top frame on the stack is a lisp frame, discard it
2736     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
2737     just adjust ts.srr0 to skip over the UUO.
2738  */
2739  {
2740    lisp_frame *tos = (lisp_frame *)ts.__r1,
2741      *next_frame = tos->backlink;
2742   
2743    if (tos == (next_frame -1)) {
2744      ts.__srr0 = tos->savelr;
2745      ts.__r1 = (LispObj) next_frame;
2746    } else {
2747      ts.__srr0 += 4;
2748    }
2749  }
2750  thread_set_state(thread, 
2751#ifdef PPC64
2752                   PPC_THREAD_STATE64,  /* GPRs, some SPRs  */
2753#else
2754                   PPC_THREAD_STATE,    /* GPRs, some SPRs  */
2755#endif
2756                   (thread_state_t)&ts,
2757#ifdef PPC64
2758                   PPC_THREAD_STATE64_COUNT
2759#else
2760                   PPC_THREAD_STATE_COUNT
2761#endif
2762                   );
2763
2764  return 0;
2765}
2766
2767/*
2768  This function runs in the exception handling thread.  It's
2769  called (by this precise name) from the library function "exc_server()"
2770  when the thread's exception ports are set up.  (exc_server() is called
2771  via mach_msg_server(), which is a function that waits for and dispatches
2772  on exception messages from the Mach kernel.)
2773
2774  This checks to see if the exception was caused by a pseudo_sigreturn()
2775  UUO; if so, it arranges for the thread to have its state restored
2776  from the specified context.
2777
2778  Otherwise, it tries to map the exception to a signal number and
2779  arranges that the thread run a "pseudo signal handler" to handle
2780  the exception.
2781
2782  Some exceptions could and should be handled here directly.
2783*/
2784
2785kern_return_t
2786catch_exception_raise(mach_port_t exception_port,
2787                      mach_port_t thread,
2788                      mach_port_t task, 
2789                      exception_type_t exception,
2790                      exception_data_t code_vector,
2791                      mach_msg_type_number_t code_count)
2792{
2793  int signum = 0, code = *code_vector, code1;
2794  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2795  kern_return_t kret;
2796
2797#ifdef DEBUG_MACH_EXCEPTIONS
2798  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2799#endif
2800
2801  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2802    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2803  } 
2804  if ((exception == EXC_BAD_INSTRUCTION) &&
2805      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
2806      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
2807       (code1 == (int)enable_fp_exceptions) ||
2808       (code1 == (int)disable_fp_exceptions))) {
2809    if (code1 == (int)pseudo_sigreturn) {
2810      kret = do_pseudo_sigreturn(thread, tcr);
2811#if 0
2812      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
2813#endif
2814       
2815    } else if (code1 == (int)enable_fp_exceptions) {
2816      kret = thread_set_fp_exceptions_enabled(thread, true);
2817    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
2818  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2819    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2820    kret = 17;
2821  } else {
2822    switch (exception) {
2823    case EXC_BAD_ACCESS:
2824      signum = SIGSEGV;
2825      break;
2826       
2827    case EXC_BAD_INSTRUCTION:
2828      signum = SIGILL;
2829      break;
2830     
2831    case EXC_SOFTWARE:
2832      if (code == EXC_PPC_TRAP) {
2833        signum = SIGTRAP;
2834      }
2835      break;
2836     
2837    case EXC_ARITHMETIC:
2838      signum = SIGFPE;
2839      break;
2840
2841    default:
2842      break;
2843    }
2844    if (signum) {
2845      kret = setup_signal_frame(thread,
2846                                (void *)pseudo_signal_handler,
2847                                signum,
2848                                code,
2849                                tcr);
2850#if 0
2851      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2852#endif
2853
2854    } else {
2855      kret = 17;
2856    }
2857  }
2858
2859  return kret;
2860}
2861
2862
2863
2864typedef struct {
2865  mach_msg_header_t Head;
2866  /* start of the kernel processed data */
2867  mach_msg_body_t msgh_body;
2868  mach_msg_port_descriptor_t thread;
2869  mach_msg_port_descriptor_t task;
2870  /* end of the kernel processed data */
2871  NDR_record_t NDR;
2872  exception_type_t exception;
2873  mach_msg_type_number_t codeCnt;
2874  integer_t code[2];
2875  mach_msg_trailer_t trailer;
2876} exceptionRequest;
2877
2878
2879boolean_t
2880openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2881{
2882  static NDR_record_t _NDR = {0};
2883  kern_return_t handled;
2884  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2885  exceptionRequest *req = (exceptionRequest *) in;
2886
2887  reply->NDR = _NDR;
2888
2889  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2890  out->msgh_remote_port = in->msgh_remote_port;
2891  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2892  out->msgh_local_port = MACH_PORT_NULL;
2893  out->msgh_id = in->msgh_id+100;
2894
2895  /* Could handle other exception flavors in the range 2401-2403 */
2896
2897
2898  if (in->msgh_id != 2401) {
2899    reply->RetCode = MIG_BAD_ID;
2900    return FALSE;
2901  }
2902  handled = catch_exception_raise(req->Head.msgh_local_port,
2903                                  req->thread.name,
2904                                  req->task.name,
2905                                  req->exception,
2906                                  req->code,
2907                                  req->codeCnt);
2908  reply->RetCode = handled;
2909  return TRUE;
2910}
2911
2912/*
2913  The initial function for an exception-handling thread.
2914*/
2915
2916void *
2917exception_handler_proc(void *arg)
2918{
2919  extern boolean_t exc_server();
2920  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2921
2922  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2923  /* Should never return. */
2924  abort();
2925}
2926
2927
2928
2929mach_port_t
2930mach_exception_port_set()
2931{
2932  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2933  kern_return_t kret; 
2934  if (__exception_port_set == MACH_PORT_NULL) {
2935    kret = mach_port_allocate(mach_task_self(),
2936                              MACH_PORT_RIGHT_PORT_SET,
2937                              &__exception_port_set);
2938    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2939    create_system_thread(0,
2940                         NULL,
2941                         exception_handler_proc, 
2942                         (void *)((natural)__exception_port_set));
2943  }
2944  return __exception_port_set;
2945}
2946
2947/*
2948  Setup a new thread to handle those exceptions specified by
2949  the mask "which".  This involves creating a special Mach
2950  message port, telling the Mach kernel to send exception
2951  messages for the calling thread to that port, and setting
2952  up a handler thread which listens for and responds to
2953  those messages.
2954
2955*/
2956
2957/*
2958  Establish the lisp thread's TCR as its exception port, and determine
2959  whether any other ports have been established by foreign code for
2960  exceptions that lisp cares about.
2961
2962  If this happens at all, it should happen on return from foreign
2963  code and on entry to lisp code via a callback.
2964
2965  This is a lot of trouble (and overhead) to support Java, or other
2966  embeddable systems that clobber their caller's thread exception ports.
2967 
2968*/
2969kern_return_t
2970tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2971{
2972  kern_return_t kret;
2973  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2974  int i;
2975  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2976  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2977  exception_mask_t mask = 0;
2978
2979  kret = thread_swap_exception_ports(thread,
2980                                     LISP_EXCEPTIONS_HANDLED_MASK,
2981                                     lisp_port,
2982                                     EXCEPTION_DEFAULT,
2983                                     THREAD_STATE_NONE,
2984                                     fxs->masks,
2985                                     &n,
2986                                     fxs->ports,
2987                                     fxs->behaviors,
2988                                     fxs->flavors);
2989  if (kret == KERN_SUCCESS) {
2990    fxs->foreign_exception_port_count = n;
2991    for (i = 0; i < n; i ++) {
2992      foreign_port = fxs->ports[i];
2993
2994      if ((foreign_port != lisp_port) &&
2995          (foreign_port != MACH_PORT_NULL)) {
2996        mask |= fxs->masks[i];
2997      }
2998    }
2999    tcr->foreign_exception_status = (int) mask;
3000  }
3001  return kret;
3002}
3003
3004kern_return_t
3005tcr_establish_lisp_exception_port(TCR *tcr)
3006{
3007  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
3008}
3009
3010/*
3011  Do this when calling out to or returning from foreign code, if
3012  any conflicting foreign exception ports were established when we
3013  last entered lisp code.
3014*/
3015kern_return_t
3016restore_foreign_exception_ports(TCR *tcr)
3017{
3018  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
3019 
3020  if (m) {
3021    MACH_foreign_exception_state *fxs  = 
3022      (MACH_foreign_exception_state *) tcr->native_thread_info;
3023    int i, n = fxs->foreign_exception_port_count;
3024    exception_mask_t tm;
3025
3026    for (i = 0; i < n; i++) {
3027      if ((tm = fxs->masks[i]) & m) {
3028        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3029                                   tm,
3030                                   fxs->ports[i],
3031                                   fxs->behaviors[i],
3032                                   fxs->flavors[i]);
3033      }
3034    }
3035  }
3036}
3037                                   
3038
3039/*
3040  This assumes that a Mach port (to be used as the thread's exception port) whose
3041  "name" matches the TCR's 32-bit address has already been allocated.
3042*/
3043
3044kern_return_t
3045setup_mach_exception_handling(TCR *tcr)
3046{
3047  mach_port_t
3048    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3049    task_self = mach_task_self();
3050  kern_return_t kret;
3051
3052  kret = mach_port_insert_right(task_self,
3053                                thread_exception_port,
3054                                thread_exception_port,
3055                                MACH_MSG_TYPE_MAKE_SEND);
3056  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3057
3058  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3059  if (kret == KERN_SUCCESS) {
3060    mach_port_t exception_port_set = mach_exception_port_set();
3061
3062    kret = mach_port_move_member(task_self,
3063                                 thread_exception_port,
3064                                 exception_port_set);
3065  }
3066  return kret;
3067}
3068
3069void
3070darwin_exception_init(TCR *tcr)
3071{
3072  void tcr_monitor_exception_handling(TCR*, Boolean);
3073  kern_return_t kret;
3074  MACH_foreign_exception_state *fxs = 
3075    calloc(1, sizeof(MACH_foreign_exception_state));
3076 
3077  tcr->native_thread_info = (void *) fxs;
3078
3079  if ((kret = setup_mach_exception_handling(tcr))
3080      != KERN_SUCCESS) {
3081    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
3082    terminate_lisp();
3083  }
3084}
3085
3086/*
3087  The tcr is the "name" of the corresponding thread's exception port.
3088  Destroying the port should remove it from all port sets of which it's
3089  a member (notably, the exception port set.)
3090*/
3091void
3092darwin_exception_cleanup(TCR *tcr)
3093{
3094  void *fxs = tcr->native_thread_info;
3095  extern Boolean use_mach_exception_handling;
3096
3097  if (fxs) {
3098    tcr->native_thread_info = NULL;
3099    free(fxs);
3100  }
3101  if (use_mach_exception_handling) {
3102    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3103    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
3104  }
3105}
3106
3107
3108Boolean
3109suspend_mach_thread(mach_port_t mach_thread)
3110{
3111  kern_return_t status;
3112  Boolean aborted = false;
3113 
3114  do {
3115    aborted = false;
3116    status = thread_suspend(mach_thread);
3117    if (status == KERN_SUCCESS) {
3118      status = thread_abort_safely(mach_thread);
3119      if (status == KERN_SUCCESS) {
3120        aborted = true;
3121      } else {
3122        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
3123        thread_resume(mach_thread);
3124      }
3125    } else {
3126      return false;
3127    }
3128  } while (! aborted);
3129  return true;
3130}
3131
3132/*
3133  Only do this if pthread_kill indicated that the pthread isn't
3134  listening to signals anymore, as can happen as soon as pthread_exit()
3135  is called on Darwin.  The thread could still call out to lisp as it
3136  is exiting, so we need another way to suspend it in this case.
3137*/
3138Boolean
3139mach_suspend_tcr(TCR *tcr)
3140{
3141  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
3142  ExceptionInformation *pseudosigcontext;
3143  Boolean result = false;
3144 
3145  result = suspend_mach_thread(mach_thread);
3146  if (result) {
3147    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
3148    pseudosigcontext->uc_onstack = 0;
3149    pseudosigcontext->uc_sigmask = (sigset_t) 0;
3150    tcr->suspend_context = pseudosigcontext;
3151  }
3152  return result;
3153}
3154
3155void
3156mach_resume_tcr(TCR *tcr)
3157{
3158  ExceptionInformation *xp;
3159  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
3160 
3161  xp = tcr->suspend_context;
3162#ifdef DEBUG_MACH_EXCEPTIONS
3163  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
3164          tcr, tcr->pending_exception_context);
3165#endif
3166  tcr->suspend_context = NULL;
3167  restore_mach_thread_state(mach_thread, xp);
3168#ifdef DEBUG_MACH_EXCEPTIONS
3169  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
3170          tcr, tcr->pending_exception_context);
3171#endif
3172  thread_resume(mach_thread);
3173}
3174
3175void
3176fatal_mach_error(char *format, ...)
3177{
3178  va_list args;
3179  char s[512];
3180 
3181
3182  va_start(args, format);
3183  vsnprintf(s, sizeof(s),format, args);
3184  va_end(args);
3185
3186  Fatal("Mach error", s);
3187}
3188
3189void
3190pseudo_interrupt_handler(int signum, ExceptionInformation *context)
3191{
3192  interrupt_handler(signum, NULL, context);
3193}
3194
3195int
3196mach_raise_thread_interrupt(TCR *target)
3197{
3198  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
3199  kern_return_t kret;
3200  Boolean result = false;
3201  TCR *current = get_tcr(false);
3202  thread_basic_info_data_t info; 
3203  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
3204
3205  LOCK(lisp_global(TCR_AREA_LOCK), current);
3206
3207  if (suspend_mach_thread(mach_thread)) {
3208    if (thread_info(mach_thread,
3209                    THREAD_BASIC_INFO,
3210                    (thread_info_t)&info,
3211                    &info_count) == KERN_SUCCESS) {
3212      if (info.suspend_count == 1) {
3213        if ((target->valence == TCR_STATE_LISP) &&
3214            (!target->unwinding) &&
3215            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
3216          kret = setup_signal_frame(mach_thread,
3217                                    (void *)pseudo_interrupt_handler,
3218                                    SIGNAL_FOR_PROCESS_INTERRUPT,
3219                                    0,
3220                                    target);
3221          if (kret == KERN_SUCCESS) {
3222            result = true;
3223          }
3224        }
3225      }
3226    }
3227    if (! result) {
3228      target->interrupt_pending = 1 << fixnumshift;
3229    }
3230    thread_resume(mach_thread);
3231   
3232  }
3233  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
3234  return 0;
3235}
3236
3237#endif
Note: See TracBrowser for help on using the repository browser.