source: branches/gb-egc/lisp-kernel/ppc-exceptions.c @ 15831

Last change on this file since 15831 was 15831, checked in by gb, 8 years ago

Zero dnodes when allocating segments, not in GC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 64.9 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <sys/mman.h>
31#include <fpu_control.h>
32#include <linux/prctl.h>
33#endif
34
35#ifdef DARWIN
36#include <sys/mman.h>
37#define _FPU_RESERVED 0xffffff00
38#ifndef SA_NODEFER
39#define SA_NODEFER 0
40#endif
41#include <sysexits.h>
42
43/* a distinguished UUO at a distinguished address */
44extern void pseudo_sigreturn(ExceptionInformation *);
45#endif
46
47
48#include "threads.h"
49
50#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
51#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
52#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
53extern void enable_fp_exceptions(void);
54extern void disable_fp_exceptions(void);
55
56#ifdef LINUX
57/* Some relatively recent kernels support this interface.
58   If this prctl isn't supported, assume that we're always
59   running with excptions enabled and "precise".
60*/
61#ifndef PR_SET_FPEXC
62#define PR_SET_FPEXC 12
63#endif
64#ifndef PR_FP_EXC_DISABLED
65#define PR_FP_EXC_DISABLED 0
66#endif
67#ifndef PR_FP_EXC_PRECISE
68#define PR_FP_EXC_PRECISE 3
69#endif
70
71void
72enable_fp_exceptions()
73{
74  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
75}
76
77void
78disable_fp_exceptions()
79{
80  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
81}
82
83#endif
84
85/*
86  Handle exceptions.
87
88*/
89
90extern LispObj lisp_nil;
91
92extern natural lisp_heap_gc_threshold;
93extern Boolean grow_dynamic_area(natural);
94
95
96
97
98
99
100int
101page_size = 4096;
102
103int
104log2_page_size = 12;
105
106
107
108
109
110/*
111  If the PC is pointing to an allocation trap, the previous instruction
112  must have decremented allocptr.  Return the non-zero amount by which
113  allocptr was decremented.
114*/
115signed_natural
116allocptr_displacement(ExceptionInformation *xp)
117{
118  pc program_counter = xpPC(xp);
119  opcode instr = *program_counter, prev_instr = *(program_counter-1);
120
121  if (instr == ALLOC_TRAP_INSTRUCTION) {
122    if (match_instr(prev_instr, 
123                    XO_MASK | RT_MASK | RB_MASK,
124                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
125                    RT(allocptr) |
126                    RB(allocptr))) {
127      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
128    }
129    if (match_instr(prev_instr,
130                    OP_MASK | RT_MASK | RA_MASK,
131                    OP(major_opcode_ADDI) | 
132                    RT(allocptr) |
133                    RA(allocptr))) {
134      return (signed_natural) -((short) prev_instr);
135    }
136    Bug(xp, "Can't determine allocation displacement");
137  }
138  return 0;
139}
140
141
142/*
143  A cons cell's been successfully allocated, but the allocptr's
144  still tagged (as fulltag_cons, of course.)  Emulate any instructions
145  that might follow the allocation (stores to the car or cdr, an
146  assignment to the "result" gpr) that take place while the allocptr's
147  tag is non-zero, advancing over each such instruction.  When we're
148  done, the cons cell will be allocated and initialized, the result
149  register will point to it, the allocptr will be untagged, and
150  the PC will point past the instruction that clears the allocptr's
151  tag.
152*/
153void
154finish_allocating_cons(ExceptionInformation *xp)
155{
156  pc program_counter = xpPC(xp);
157  opcode instr;
158  LispObj cur_allocptr = xpGPR(xp, allocptr);
159  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
160  int target_reg;
161
162  while (1) {
163    instr = *program_counter++;
164
165    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
166      xpGPR(xp, allocptr) = untag(cur_allocptr);
167      xpPC(xp) = program_counter;
168      return;
169    }
170   
171    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
172    case STORE_CAR_ALLOCPTR_INSTRUCTION:
173      c->car = xpGPR(xp,RT_field(instr));
174      break;
175    case STORE_CDR_ALLOCPTR_INSTRUCTION:
176      c->cdr = xpGPR(xp,RT_field(instr));
177      break;
178    default:
179      /* Assume that this is an assignment: {rt/ra} <- allocptr.
180         There are several equivalent instruction forms
181         that might have that effect; just assign to target here.
182      */
183      if (major_opcode_p(instr,major_opcode_X31)) {
184        target_reg = RA_field(instr);
185      } else {
186        target_reg = RT_field(instr);
187      }
188      xpGPR(xp,target_reg) = cur_allocptr;
189      break;
190    }
191  }
192}
193
194/*
195  We were interrupted in the process of allocating a uvector; we
196  survived the allocation trap, and allocptr is tagged as fulltag_misc.
197  Emulate any instructions which store a header into the uvector,
198  assign the value of allocptr to some other register, and clear
199  allocptr's tag.  Don't expect/allow any other instructions in
200  this environment.
201*/
202void
203finish_allocating_uvector(ExceptionInformation *xp)
204{
205  pc program_counter = xpPC(xp);
206  opcode instr;
207  LispObj cur_allocptr = xpGPR(xp, allocptr);
208  int target_reg;
209
210  while (1) {
211    instr = *program_counter++;
212    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
213      xpGPR(xp, allocptr) = untag(cur_allocptr);
214      xpPC(xp) = program_counter;
215      return;
216    }
217    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
218        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
219      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
220    } else {
221      /* assume that this is an assignment */
222
223      if (major_opcode_p(instr,major_opcode_X31)) {
224        target_reg = RA_field(instr);
225      } else {
226        target_reg = RT_field(instr);
227      }
228      xpGPR(xp,target_reg) = cur_allocptr;
229    }
230  }
231}
232
233
234Boolean
235allocate_object(ExceptionInformation *xp,
236                natural bytes_needed, 
237                signed_natural disp_from_allocptr,
238                TCR *tcr)
239{
240  area *a = active_dynamic_area;
241
242  /* Maybe do an EGC */
243  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
244    if (((a->active)-(a->low)) >= a->threshold) {
245      gc_from_xp(xp, 0L);
246    }
247  }
248
249  /* Life is pretty simple if we can simply grab a segment
250     without extending the heap.
251  */
252  if (new_heap_segment(xp, bytes_needed, false, tcr, NULL)) {
253    xpGPR(xp, allocptr) += disp_from_allocptr;
254#ifdef DEBUG
255    fprintf(dbgout, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
256            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
257#endif
258    return true;
259  }
260 
261  /* It doesn't make sense to try a full GC if the object
262     we're trying to allocate is larger than everything
263     allocated so far.
264  */
265  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
266    untenure_from_area(tenured_area); /* force a full GC */
267    gc_from_xp(xp, 0L);
268  }
269 
270  /* Try again, growing the heap if necessary */
271  if (new_heap_segment(xp, bytes_needed, true, tcr, NULL)) {
272    xpGPR(xp, allocptr) += disp_from_allocptr;
273#ifdef DEBUG
274    fprintf(dbgout, "New heap segment for #x%x after GC: #x%x/#x%x\n",
275            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
276#endif
277    return true;
278  }
279 
280  return false;
281}
282
283#ifndef XNOMEM
284#define XNOMEM 10
285#endif
286
287void
288update_bytes_allocated(TCR* tcr, void *cur_allocptr)
289{
290  BytePtr
291    last = (BytePtr) tcr->last_allocptr, 
292    current = (BytePtr) cur_allocptr;
293  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
294    tcr->bytes_allocated += last-current;
295  }
296  tcr->last_allocptr = 0;
297}
298
299void
300lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
301{
302  /* Couldn't allocate the object.  If it's smaller than some arbitrary
303     size (say 128K bytes), signal a "chronically out-of-memory" condition;
304     else signal a "allocation request failed" condition.
305  */
306  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
307  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
308}
309
310/*
311  Allocate a large list, where "large" means "large enough to
312  possibly trigger the EGC several times if this was done
313  by individually allocating each CONS."  The number of
314  ocnses in question is in arg_z; on successful return,
315  the list will be in arg_z
316*/
317
318Boolean
319allocate_list(ExceptionInformation *xp, TCR *tcr)
320{
321  natural
322    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
323    bytes_needed = (nconses << dnode_shift);
324  LispObj
325    prev = lisp_nil,
326    current,
327    initial = xpGPR(xp,arg_y);
328
329  if (nconses == 0) {
330    /* Silly case */
331    xpGPR(xp,arg_z) = lisp_nil;
332    xpGPR(xp,allocptr) = lisp_nil;
333    return true;
334  }
335  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
336  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
337    for (current = xpGPR(xp,allocptr);
338         nconses;
339         prev = current, current+= dnode_size, nconses--) {
340      deref(current,0) = prev;
341      deref(current,1) = initial;
342    }
343    xpGPR(xp,arg_z) = prev;
344    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
345    xpGPR(xp,allocptr)-=fulltag_cons;
346  } else {
347    lisp_allocation_failure(xp,tcr,bytes_needed);
348  }
349  return true;
350}
351
352OSStatus
353handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
354{
355  pc program_counter;
356  natural cur_allocptr, bytes_needed = 0;
357  opcode prev_instr;
358  signed_natural disp = 0;
359  unsigned allocptr_tag;
360
361  cur_allocptr = xpGPR(xp,allocptr);
362  program_counter = xpPC(xp);
363  prev_instr = *(program_counter-1);
364  allocptr_tag = fulltag_of(cur_allocptr);
365
366  switch (allocptr_tag) {
367  case fulltag_cons:
368    bytes_needed = sizeof(cons);
369    disp = -sizeof(cons) + fulltag_cons;
370    break;
371
372  case fulltag_even_fixnum:
373  case fulltag_odd_fixnum:
374    break;
375
376  case fulltag_misc:
377    if (match_instr(prev_instr, 
378                    XO_MASK | RT_MASK | RB_MASK,
379                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
380                    RT(allocptr) |
381                    RB(allocptr))) {
382      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
383    } else if (match_instr(prev_instr,
384                           OP_MASK | RT_MASK | RA_MASK,
385                           OP(major_opcode_ADDI) | 
386                           RT(allocptr) |
387                           RA(allocptr))) {
388      disp = (signed_natural) ((short) prev_instr);
389    }
390    if (disp) {
391      bytes_needed = (-disp) + fulltag_misc;
392      break;
393    }
394    /* else fall thru */
395  default:
396    return -1;
397  }
398
399  if (bytes_needed) {
400    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
401    if (allocate_object(xp, bytes_needed, disp, tcr)) {
402#if 0
403      fprintf(dbgout, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
404              tcr, xpGPR(xp, allocptr));
405#endif
406      adjust_exception_pc(xp,4);
407      return 0;
408    }
409    lisp_allocation_failure(xp,tcr,bytes_needed);
410    return -1;
411  }
412  return -1;
413}
414
415natural gc_deferred = 0, full_gc_deferred = 0;
416
417signed_natural
418flash_freeze(TCR *tcr, signed_natural param)
419{
420  return 0;
421}
422
423OSStatus
424handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
425{
426  LispObj
427    selector = xpGPR(xp,imm0), 
428    arg = xpGPR(xp,imm1);
429  area *a = active_dynamic_area;
430  Boolean egc_was_enabled = (a->older != NULL);
431  natural gc_previously_deferred = gc_deferred;
432
433
434  switch (selector) {
435  case GC_TRAP_FUNCTION_EGC_CONTROL:
436    egc_control(arg != 0, a->active);
437    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
438    break;
439
440  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
441    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
442    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
443    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
444    xpGPR(xp,arg_z) = lisp_nil+t_offset;
445    break;
446
447  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
448    if (((signed_natural) arg) > 0) {
449      lisp_heap_gc_threshold = 
450        align_to_power_of_2((arg-1) +
451                            (heap_segment_size - 1),
452                            log2_heap_segment_size);
453    }
454    /* fall through */
455  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
456    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
457    break;
458
459  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
460    /*  Try to put the current threshold in effect.  This may
461        need to disable/reenable the EGC. */
462    untenure_from_area(tenured_area);
463    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
464    if (egc_was_enabled) {
465      if ((a->high - a->active) >= a->threshold) {
466        tenure_to_area(tenured_area);
467      }
468    }
469    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
470    break;
471
472  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
473    ensure_static_conses(xp,tcr,32768);
474    break;
475
476  case GC_TRAP_FUNCTION_FLASH_FREEZE:
477    untenure_from_area(tenured_area);
478    gc_like_from_xp(xp,flash_freeze,0);
479    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
480    tenured_area->static_dnodes = area_dnode(a->active, a->low);
481    if (egc_was_enabled) {
482      tenure_to_area(tenured_area);
483    }
484    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
485    break;
486
487  default:
488    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
489
490    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
491      if (!full_gc_deferred) {
492        gc_from_xp(xp, 0L);
493        break;
494      }
495      /* Tried to do a full GC when gc was disabled.  That failed,
496         so try full GC now */
497      selector = GC_TRAP_FUNCTION_GC;
498    }
499   
500    if (egc_was_enabled) {
501      egc_control(false, (BytePtr) a->active);
502    }
503    gc_from_xp(xp, 0L);
504    if (gc_deferred > gc_previously_deferred) {
505      full_gc_deferred = 1;
506    } else {
507      full_gc_deferred = 0;
508    }
509    if (selector > GC_TRAP_FUNCTION_GC) {
510      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
511        impurify_from_xp(xp, 0L);
512        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
513        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
514        gc_from_xp(xp, 0L);
515      }
516      if (selector & GC_TRAP_FUNCTION_PURIFY) {
517        purify_from_xp(xp, 0L);
518        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
519        gc_from_xp(xp, 0L);
520      }
521      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
522        OSErr err;
523        extern OSErr save_application(unsigned, Boolean);
524        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
525        area *vsarea = tcr->vs_area;
526       
527        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
528        err = save_application(arg, egc_was_enabled);
529        if (err == noErr) {
530          _exit(0);
531        }
532        fatal_oserr(": save_application", err);
533      }
534      switch (selector) {
535
536
537      case GC_TRAP_FUNCTION_FREEZE:
538        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
539        tenured_area->static_dnodes = area_dnode(a->active, a->low);
540        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
541        break;
542      default:
543        break;
544      }
545    }
546   
547    if (egc_was_enabled) {
548      egc_control(true, NULL);
549    }
550    break;
551   
552  }
553
554  adjust_exception_pc(xp,4);
555  return 0;
556}
557
558
559
560void
561signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
562{
563  /* The cstack just overflowed.  Force the current thread's
564     control stack to do so until all stacks are well under their overflow
565     limits.
566  */
567
568#if 0
569  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
570#endif
571  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
572}
573
574/*
575  Lower (move toward 0) the "end" of the soft protected area associated
576  with a by a page, if we can.
577*/
578
579void
580adjust_soft_protection_limit(area *a)
581{
582  char *proposed_new_soft_limit = a->softlimit - 4096;
583  protected_area_ptr p = a->softprot;
584 
585  if (proposed_new_soft_limit >= (p->start+16384)) {
586    p->end = proposed_new_soft_limit;
587    p->protsize = p->end-p->start;
588    a->softlimit = proposed_new_soft_limit;
589  }
590  protect_area(p);
591}
592
593void
594restore_soft_stack_limit(unsigned stkreg)
595{
596  area *a;
597  TCR *tcr = get_tcr(true);
598
599  switch (stkreg) {
600  case sp:
601    a = tcr->cs_area;
602    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
603      a->softlimit -= 4096;
604    }
605    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
606    break;
607  case vsp:
608    a = tcr->vs_area;
609    adjust_soft_protection_limit(a);
610    break;
611  case tsp:
612    a = tcr->ts_area;
613    adjust_soft_protection_limit(a);
614  }
615}
616
617/* Maybe this'll work someday.  We may have to do something to
618   make the thread look like it's not handling an exception */
619void
620reset_lisp_process(ExceptionInformation *xp)
621{
622  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
623  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
624
625  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
626  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
627
628  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
629  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
630
631  start_lisp(tcr, 1);
632}
633
634/*
635  This doesn't GC; it returns true if it made enough room, false
636  otherwise.
637  If "extend" is true, it can try to extend the dynamic area to
638  satisfy the request.
639*/
640
641Boolean
642new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
643{
644  area *a;
645  natural newlimit, oldlimit;
646  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
647
648  a  = active_dynamic_area;
649  oldlimit = (natural) a->active;
650  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
651              align_to_power_of_2(need, log2_allocation_quantum));
652  if (newlimit > (natural) (a->high)) {
653    if (extend) {
654      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
655      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
656      do {
657        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
658          break;
659        }
660        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
661        if (extend_by < 4<<20) {
662          return false;
663        }
664      } while (1);
665    } else {
666      return false;
667    }
668  }
669  a->active = (BytePtr) newlimit;
670
671  return true;
672}
673
674void
675platform_new_heap_segment(ExceptionInformation *xp, TCR *tcr, BytePtr low, BytePtr high)
676{
677  tcr->last_allocptr = (void *)high;
678  xpGPR(xp,allocptr) = (LispObj) high;
679  xpGPR(xp,allocbase) = (LispObj) low;
680}
681
682 
683void
684update_area_active (area **aptr, BytePtr value)
685{
686  area *a = *aptr;
687  for (; a; a = a->older) {
688    if ((a->low <= value) && (a->high >= value)) break;
689  };
690  if (a == NULL) Bug(NULL, "Can't find active area");
691  a->active = value;
692  *aptr = a;
693
694  for (a = a->younger; a; a = a->younger) {
695    a->active = a->high;
696  }
697}
698
699LispObj *
700tcr_frame_ptr(TCR *tcr)
701{
702  ExceptionInformation *xp;
703  LispObj *bp = NULL;
704
705  if (tcr->pending_exception_context)
706    xp = tcr->pending_exception_context;
707  else {
708    xp = tcr->suspend_context;
709  }
710  if (xp) {
711    bp = (LispObj *) xpGPR(xp, sp);
712  }
713  return bp;
714}
715
716void
717normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
718{
719  void *cur_allocptr = NULL;
720  LispObj freeptr = 0;
721
722  if (xp) {
723    if (is_other_tcr) {
724      pc_luser_xp(xp, tcr, NULL);
725      freeptr = xpGPR(xp, allocptr);
726      if (fulltag_of(freeptr) == 0){
727        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
728      }
729    }
730    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
731    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
732    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
733#ifdef DEBUG
734    fprintf(dbgout, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
735            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
736    fprintf(dbgout, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
737            tcr,
738            xpGPR(xp, allocbase),
739            xpGPR(xp, allocptr),
740            xpPC(xp));
741    fprintf(dbgout, "TCR 0x%x, exception context = 0x%x\n",
742            tcr,
743            tcr->pending_exception_context);
744#endif
745  } else {
746    /* In ff-call.  No need to update cs_area */
747    cur_allocptr = (void *) (tcr->save_allocptr);
748#ifdef DEBUG
749    fprintf(dbgout, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
750            tcr, tcr->save_vsp, tcr->save_tsp);
751    fprintf(dbgout, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
752            tcr,
753            tcr->save_allocbase,
754            tcr->save_allocptr,
755            xpPC(xp));
756
757#endif
758    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
759    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
760  }
761
762
763  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
764  if (cur_allocptr) {
765    update_bytes_allocated(tcr, cur_allocptr);
766    if (freeptr) {
767      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
768      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
769    }
770  }
771}
772
773TCR *gc_tcr = NULL;
774
775/* Suspend and "normalize" other tcrs, then call a gc-like function
776   in that context.  Resume the other tcrs, then return what the
777   function returned */
778
779signed_natural
780gc_like_from_xp(ExceptionInformation *xp, 
781                signed_natural(*fun)(TCR *, signed_natural), 
782                signed_natural param)
783{
784  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
785  int result;
786  signed_natural inhibit;
787
788  suspend_other_threads(true);
789  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
790  if (inhibit != 0) {
791    if (inhibit > 0) {
792      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
793    }
794    resume_other_threads(true);
795    gc_deferred++;
796    return 0;
797  }
798  gc_deferred = 0;
799
800  gc_tcr = tcr;
801
802  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
803  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
804
805  normalize_tcr(xp, tcr, false);
806
807
808  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
809    if (other_tcr->pending_exception_context) {
810      other_tcr->gc_context = other_tcr->pending_exception_context;
811    } else if (other_tcr->valence == TCR_STATE_LISP) {
812      other_tcr->gc_context = other_tcr->suspend_context;
813    } else {
814      /* no pending exception, didn't suspend in lisp state:
815         must have executed a synchronous ff-call.
816      */
817      other_tcr->gc_context = NULL;
818    }
819    normalize_tcr(other_tcr->gc_context, other_tcr, true);
820  }
821   
822
823
824  result = fun(tcr, param);
825
826  other_tcr = tcr;
827  do {
828    other_tcr->gc_context = NULL;
829    other_tcr = other_tcr->next;
830  } while (other_tcr != tcr);
831
832  gc_tcr = NULL;
833
834  resume_other_threads(true);
835
836  return result;
837
838}
839
840
841
842/* Returns #bytes freed by invoking GC */
843
844signed_natural
845gc_from_tcr(TCR *tcr, signed_natural param)
846{
847  area *a;
848  BytePtr oldfree, newfree;
849  BytePtr oldend, newend;
850
851#ifdef DEBUG
852  fprintf(dbgout, "Start GC  in 0x%lx\n", tcr);
853#endif
854  a = active_dynamic_area;
855  oldend = a->high;
856  oldfree = a->active;
857  gc(tcr, param);
858  newfree = a->active;
859  newend = a->high;
860#if 0
861  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
862#endif
863  return ((oldfree-newfree)+(newend-oldend));
864}
865
866signed_natural
867gc_from_xp(ExceptionInformation *xp, signed_natural param)
868{
869  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
870
871  freeGCptrs();
872  return status;
873}
874
875signed_natural
876purify_from_xp(ExceptionInformation *xp, signed_natural param)
877{
878  return gc_like_from_xp(xp, purify, param);
879}
880
881signed_natural
882impurify_from_xp(ExceptionInformation *xp, signed_natural param)
883{
884  return gc_like_from_xp(xp, impurify, param);
885}
886
887
888
889
890
891
892protection_handler
893 * protection_handlers[] = {
894   do_spurious_wp_fault,
895   do_soft_stack_overflow,
896   do_soft_stack_overflow,
897   do_soft_stack_overflow,
898   do_hard_stack_overflow,   
899   do_hard_stack_overflow,
900   do_hard_stack_overflow
901   };
902
903
904Boolean
905is_write_fault(ExceptionInformation *xp, siginfo_t *info)
906{
907  /* use the siginfo if it's available.  Some versions of Linux
908     don't propagate the DSISR and TRAP fields correctly from
909     64- to 32-bit handlers.
910  */
911  if (info) {
912    /*
913       To confuse matters still further, the value of SEGV_ACCERR
914       varies quite a bit among LinuxPPC variants (the value defined
915       in the header files varies, and the value actually set by
916       the kernel also varies.  So far, we're only looking at the
917       siginfo under Linux and Linux always seems to generate
918       SIGSEGV, so check for SIGSEGV and check the low 16 bits
919       of the si_code.
920    */
921    return ((info->si_signo == SIGSEGV) &&
922            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
923  }
924  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
925         (xpTRAP(xp) == 
926#ifdef LINUX
9270x0300
928#endif
929#ifdef DARWIN
9300x0300/0x100
931#endif
932)
933         );
934#if 0 
935  /* Maybe worth keeping around; not sure if it's an exhaustive
936     list of PPC instructions that could cause a WP fault */
937  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
938     valid values of those SPRs in the context they provide to
939     exception handlers.  Look at the opcode of the offending
940     instruction & recognize 32-bit store operations */
941  opcode instr = *(xpPC(xp));
942
943  if (xp->regs->trap != 0x300) {
944    return 0;
945  }
946  switch (instr >> 26) {
947  case 47:                      /* STMW */
948  case 36:                      /* STW */
949  case 37:                      /* STWU */
950    return 1;
951  case 31:
952    switch ((instr >> 1) & 1023) {
953    case 151:                   /* STWX */
954    case 183:                   /* STWUX */
955      return 1;
956    default:
957      return 0;
958    }
959  default:
960    return 0;
961  }
962#endif
963}
964
965OSStatus
966handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
967{
968  BytePtr addr;
969  protected_area_ptr area;
970  protection_handler *handler;
971  extern Boolean touch_page(void *);
972  extern void touch_page_end(void);
973
974  if (info) {
975    addr = (BytePtr)(info->si_addr);
976  } else {
977    addr = (BytePtr) ((natural) (xpDAR(xp)));
978  }
979
980  if (addr && (addr == tcr->safe_ref_address)) {
981    adjust_exception_pc(xp,4);
982
983    xpGPR(xp,imm0) = 0;
984    return 0;
985  }
986
987  if (xpPC(xp) == (pc)touch_page) {
988    xpGPR(xp,imm0) = 0;
989    xpPC(xp) = (pc)touch_page_end;
990    return 0;
991  }
992
993
994  if (is_write_fault(xp,info)) {
995    area = find_protected_area(addr);
996    if (area != NULL) {
997      handler = protection_handlers[area->why];
998      return handler(xp, area, addr);
999    } else {
1000      if ((addr >= readonly_area->low) &&
1001          (addr < readonly_area->active)) {
1002        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
1003                        page_size);
1004        return 0;
1005      }
1006    }
1007  }
1008  if (old_valence == TCR_STATE_LISP) {
1009    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
1010  }
1011  return -1;
1012}
1013
1014
1015
1016
1017
1018OSStatus
1019do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1020{
1021#ifdef SUPPORT_PRAGMA_UNUSED
1022#pragma unused(area,addr)
1023#endif
1024  reset_lisp_process(xp);
1025  return -1;
1026}
1027
1028extern area*
1029allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
1030
1031extern area*
1032allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
1033
1034#ifdef EXTEND_VSTACK
1035Boolean
1036catch_frame_p(lisp_frame *spPtr)
1037{
1038  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1039
1040  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1041    if (spPtr == ((lisp_frame *) catch->csp)) {
1042      return true;
1043    }
1044  }
1045  return false;
1046}
1047#endif
1048
1049Boolean
1050unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1051{
1052  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1053      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1054    return true;
1055  } else {
1056    return false;
1057  }
1058}
1059
1060Boolean
1061lexpr_entry_frame_p(lisp_frame *spPtr)
1062{
1063  LispObj savelr = spPtr->savelr;
1064  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1065  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1066  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1067
1068  return
1069    (savelr == lexpr_return1v) ||
1070    (savelr == lexpr_return) ||
1071    ((savelr == ret1valn) &&
1072     (((spPtr->backlink)->savelr) == lexpr_return));
1073}
1074
1075Boolean
1076lisp_frame_p(lisp_frame *spPtr)
1077{
1078  LispObj savefn;
1079  /* We can't just look at the size of the stack frame under the EABI
1080     calling sequence, but that's the first thing to check. */
1081  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1082    return false;
1083  }
1084  savefn = spPtr->savefn;
1085  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1086 
1087}
1088
1089
1090int ffcall_overflow_count = 0;
1091
1092/* Find a frame that is neither a catch frame nor one of the
1093   lexpr_entry frames We don't check for non-lisp frames here because
1094   we'll always stop before we get there due to a dummy lisp frame
1095   pushed by .SPcallback that masks out the foreign frames.  The one
1096   exception is that there is a non-lisp frame without a valid VSP
1097   while in the process of ppc-ff-call. We recognize that because its
1098   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1099   next frame is 0, then we're executing an unwind-protect cleanup
1100   form, and the top stack frame belongs to its (no longer extant)
1101   catch frame.  */
1102
1103#ifdef EXTEND_VSTACK
1104lisp_frame *
1105find_non_catch_frame_from_xp (ExceptionInformation *xp)
1106{
1107  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1108  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1109    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1110  }
1111  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1112         unwind_protect_cleanup_frame_p(spPtr) ||
1113         catch_frame_p(spPtr) ||
1114         lexpr_entry_frame_p(spPtr) ; ) {
1115     spPtr = spPtr->backlink;
1116     };
1117  return spPtr;
1118}
1119#endif
1120
1121#ifdef EXTEND_VSTACK
1122Boolean
1123db_link_chain_in_area_p (area *a)
1124{
1125  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1126          *high = (LispObj *) a->high,
1127          *low = (LispObj *) a->low;
1128  for (; db; db = (LispObj *) *db) {
1129    if ((db >= low) && (db < high)) return true;
1130  };
1131  return false;
1132}
1133#endif
1134
1135
1136
1137
1138/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1139  the current value of VSP (TSP) or an older area.  */
1140
1141OSStatus
1142do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1143{
1144  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1145  area *a = tcr->vs_area;
1146  protected_area_ptr vsp_soft = a->softprot;
1147  unprotect_area(vsp_soft);
1148  signal_stack_soft_overflow(xp,vsp);
1149  return 0;
1150}
1151
1152
1153OSStatus
1154do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1155{
1156  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1157  area *a = tcr->ts_area;
1158  protected_area_ptr tsp_soft = a->softprot;
1159  unprotect_area(tsp_soft);
1160  signal_stack_soft_overflow(xp,tsp);
1161  return 0;
1162}
1163
1164OSStatus
1165do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1166{
1167  /* Trying to write into a guard page on the vstack or tstack.
1168     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1169     signal an error_stack_overflow condition.
1170      */
1171  lisp_protection_kind which = prot_area->why;
1172  Boolean on_TSP = (which == kTSPsoftguard);
1173
1174  if (on_TSP) {
1175    return do_tsp_overflow(xp, addr);
1176   } else {
1177    return do_vsp_overflow(xp, addr);
1178   }
1179}
1180
1181OSStatus
1182do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1183{
1184#ifdef SUPPORT_PRAGMA_UNUSED
1185#pragma unused(xp,area,addr)
1186#endif
1187  return -1;
1188}
1189
1190
1191/*
1192  We have a couple of choices here.  We can simply unprotect the page
1193  and let the store happen on return, or we can try to emulate writes
1194  that we know will involve an intergenerational reference.  Both are
1195  correct as far as EGC constraints go, but the latter approach is
1196  probably more efficient.  (This only matters in the case where the
1197  GC runs after this exception handler returns but before the write
1198  actually happens.  If we didn't emulate node stores here, the EGC
1199  would scan the newly-writen page, find nothing interesting, and
1200  run to completion.  This thread will try the write again afer it
1201  resumes, the page'll be re-protected, and we'll have taken this
1202  fault twice.  The whole scenario shouldn't happen very often, but
1203  (having already taken a fault and committed to an mprotect syscall)
1204  we might as well emulate stores involving intergenerational references,
1205  since they're pretty easy to identify.
1206
1207  Note that cases involving two or more threads writing to the same
1208  page (before either of them can run this handler) is benign: one
1209  invocation of the handler will just unprotect an unprotected page in
1210  that case.
1211
1212  If there are GCs (or any other suspensions of the thread between
1213  the time that the write fault was detected and the time that the
1214  exception lock is obtained) none of this stuff happens.
1215*/
1216
1217/*
1218  Return true (and emulate the instruction) iff:
1219  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1220     instruction.
1221  b) RS is a node register (>= fn)
1222  c) RS is tagged as a cons or vector
1223  d) RS is in some ephemeral generation.
1224  This is slightly conservative, since RS may be no younger than the
1225  EA being written to.
1226*/
1227Boolean
1228is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1229{
1230  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1231    opcode instr = *xpPC(xp);
1232   
1233    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1234        major_opcode_p(instr, major_opcode_STW)) {
1235      LispObj
1236        rs = RS_field(instr), 
1237        rsval = xpGPR(xp,rs),
1238        tag = fulltag_of(rsval);
1239     
1240      if (rs >= fn) {
1241        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1242          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1243              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1244            *(LispObj *)ea = rsval;
1245            return true;
1246          }
1247        }
1248      }
1249    }
1250  }
1251  return false;
1252}
1253
1254     
1255
1256
1257
1258
1259
1260OSStatus
1261handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1262{
1263  (void) zero_fpscr(tcr);
1264  enable_fp_exceptions();
1265
1266
1267  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1268
1269  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1270     operation; there's an FPU operation right at the PC, so tell it to start
1271     looking one word beyond */
1272  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1273}
1274
1275   
1276int
1277altivec_present = 1;
1278
1279
1280/* This only tries to implement the "optional" fsqrt and fsqrts
1281   instructions, which were generally implemented on IBM hardware
1282   but generally not available on Motorola/Freescale systems.
1283*/               
1284OSStatus
1285handle_unimplemented_instruction(ExceptionInformation *xp,
1286                                 opcode instruction,
1287                                 TCR *tcr)
1288{
1289  (void) zero_fpscr(tcr);
1290  enable_fp_exceptions();
1291  /* the rc bit (bit 0 in the instruction) is supposed to cause
1292     some FPSCR bits to be copied to CR1.  Clozure CL doesn't generate
1293     fsqrt. or fsqrts.
1294  */
1295  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1296       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1297      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1298    double b, d, sqrt(double);
1299
1300    b = xpFPR(xp,RB_field(instruction));
1301    d = sqrt(b);
1302    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1303                   (get_fpscr() & _FPU_RESERVED));
1304    xpFPR(xp,RT_field(instruction)) = d;
1305    adjust_exception_pc(xp,4);
1306    return 0;
1307  }
1308
1309  return -1;
1310}
1311
1312OSStatus
1313PMCL_exception_handler(int xnum, 
1314                       ExceptionInformation *xp, 
1315                       TCR *tcr, 
1316                       siginfo_t *info,
1317                       int old_valence)
1318{
1319  OSStatus status = -1;
1320  pc program_counter;
1321  opcode instruction = 0;
1322
1323
1324  program_counter = xpPC(xp);
1325 
1326  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1327    instruction = *program_counter;
1328  }
1329
1330  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1331    status = handle_alloc_trap(xp, tcr);
1332  } else if ((xnum == SIGSEGV) ||
1333             (xnum == SIGBUS)) {
1334    status = handle_protection_violation(xp, info, tcr, old_valence);
1335  } else if (xnum == SIGFPE) {
1336    status = handle_sigfpe(xp, tcr);
1337  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1338    if (instruction == GC_TRAP_INSTRUCTION) {
1339      status = handle_gc_trap(xp, tcr);
1340    } else if (IS_UUO(instruction)) {
1341      status = handle_uuo(xp, instruction, program_counter);
1342    } else if (is_conditional_trap(instruction)) {
1343      status = handle_trap(xp, instruction, program_counter, info);
1344    } else {
1345      status = handle_unimplemented_instruction(xp,instruction,tcr);
1346    }
1347  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1348    tcr->interrupt_pending = 0;
1349    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1350    status = 0;
1351  }
1352
1353  return status;
1354}
1355
1356void
1357adjust_exception_pc(ExceptionInformation *xp, int delta)
1358{
1359  xpPC(xp) += (delta >> 2);
1360}
1361
1362
1363/*
1364  This wants to scan backwards until "where" points to an instruction
1365   whose major opcode is either 63 (double-float) or 59 (single-float)
1366*/
1367
1368OSStatus
1369handle_fpux_binop(ExceptionInformation *xp, pc where)
1370{
1371  OSStatus err;
1372  opcode *there = (opcode *) where, instr, errnum = 0;
1373  int i = TRAP_LOOKUP_TRIES, delta = 0;
1374 
1375  while (i--) {
1376    instr = *--there;
1377    delta -= 4;
1378    if (codevec_hdr_p(instr)) {
1379      return -1;
1380    }
1381    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1382      errnum = error_FPU_exception_double;
1383      break;
1384    }
1385
1386    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1387      errnum = error_FPU_exception_short;
1388      break;
1389    }
1390  }
1391 
1392  err = handle_error(xp, errnum, rcontext, 0,  there);
1393  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1394 
1395  adjust_exception_pc(xp, delta);
1396  xpFPSCR(xp)  &=  0x03fff;
1397 
1398  return err;
1399
1400}
1401
1402OSStatus
1403handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1404{
1405#ifdef SUPPORT_PRAGMA_UNUSED
1406#pragma unused(where)
1407#endif
1408  unsigned 
1409    minor = UUO_MINOR(the_uuo),
1410    rb = 0x1f & (the_uuo >> 11),
1411    errnum = 0x3ff & (the_uuo >> 16);
1412
1413  OSStatus status = -1;
1414
1415  int bump = 4;
1416
1417  switch (minor) {
1418
1419  case UUO_ZERO_FPSCR:
1420    status = 0;
1421    xpFPSCR(xp) = 0;
1422    break;
1423
1424
1425  case UUO_INTERR:
1426    {
1427      TCR * target = (TCR *)xpGPR(xp,arg_z);
1428      status = 0;
1429      switch (errnum) {
1430      case error_propagate_suspend:
1431        break;
1432      case error_interrupt:
1433        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1434        break;
1435      case error_suspend:
1436        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1437        break;
1438      case error_suspend_all:
1439        lisp_suspend_other_threads();
1440        break;
1441      case error_resume:
1442        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1443        break;
1444      case error_resume_all:
1445        lisp_resume_other_threads();
1446        break;
1447      case error_kill:
1448        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1449        break;
1450      case error_allocate_list:
1451        allocate_list(xp,get_tcr(true));
1452        break;
1453      default:
1454        status = handle_error(xp, errnum, rb, 0,  where);
1455        break;
1456      }
1457    }
1458    break;
1459
1460  case UUO_INTCERR:
1461    status = handle_error(xp, errnum, rb, 1,  where);
1462    if (errnum == error_udf_call) {
1463      /* If lisp's returned from a continuable undefined-function call,
1464         it's put a code vector in the xp's PC.  Don't advance the
1465         PC ... */
1466      bump = 0;
1467    }
1468    break;
1469
1470  case UUO_FPUX_BINOP:
1471    status = handle_fpux_binop(xp, where);
1472    bump = 0;
1473    break;
1474
1475  default:
1476    status = -1;
1477    bump = 0;
1478  }
1479 
1480  if ((!status) && bump) {
1481    adjust_exception_pc(xp, bump);
1482  }
1483  return status;
1484}
1485
1486natural
1487register_codevector_contains_pc (natural lisp_function, pc where)
1488{
1489  natural code_vector, size;
1490
1491  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1492      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1493    code_vector = deref(lisp_function, 1);
1494    size = header_element_count(header_of(code_vector)) << 2;
1495    if ((untag(code_vector) < (natural)where) && 
1496        ((natural)where < (code_vector + size)))
1497      return(code_vector);
1498  }
1499
1500  return(0);
1501}
1502
1503/* Callback to lisp to handle a trap. Need to translate the
1504   PC (where) into one of two forms of pairs:
1505
1506   1. If PC is in fn or nfn's code vector, use the register number
1507      of fn or nfn and the index into that function's code vector.
1508   2. Otherwise use 0 and the pc itself
1509*/
1510void
1511callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1512                   natural arg1, natural arg2, natural arg3)
1513{
1514  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1515  unsigned register_number = fn;
1516  natural index = (natural)where;
1517
1518  if (code_vector == 0) {
1519    register_number = nfn;
1520    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1521  }
1522  if (code_vector == 0)
1523    register_number = 0;
1524  else
1525    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1526  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1527}
1528
1529void
1530callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1531                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1532{
1533  natural  callback_ptr;
1534  area *a;
1535
1536  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1537
1538  /* Put the active stack pointer where .SPcallback expects it */
1539  a = tcr->cs_area;
1540  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1541
1542  /* Copy globals from the exception frame to tcr */
1543  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1544  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1545  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1546  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1547
1548#ifdef DARWIN
1549  enable_fp_exceptions();
1550#endif
1551
1552
1553  /* Call back.
1554     Lisp will handle trampolining through some code that
1555     will push lr/fn & pc/nfn stack frames for backtrace.
1556  */
1557  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1558#ifdef DEBUG
1559  fprintf(dbgout, "0x%x releasing exception lock for callback\n", tcr);
1560#endif
1561  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1562  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1563  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1564#ifdef DEBUG
1565  fprintf(dbgout, "0x%x acquired exception lock after callback\n", tcr);
1566#endif
1567
1568
1569
1570  /* Copy GC registers back into exception frame */
1571  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1572  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1573}
1574
1575area *
1576allocate_no_stack (natural size)
1577{
1578#ifdef SUPPORT_PRAGMA_UNUSED
1579#pragma unused(size)
1580#endif
1581
1582  return (area *) NULL;
1583}
1584
1585
1586
1587
1588
1589
1590/* callback to (symbol-value cmain) if it is a macptr,
1591   otherwise report cause and function name to console.
1592   Returns noErr if exception handled OK */
1593OSStatus
1594handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1595{
1596  LispObj   cmain = nrs_CMAIN.vcell;
1597  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1598
1599  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1600     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1601
1602  /* If it's a "trllt" instruction where RA == sp, it's a failed
1603     control stack overflow check.  In that case:
1604     
1605     a) We're in "yellow zone" mode if the value of the
1606     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1607     we're not already in yellow zone mode, attempt to create a new
1608     thread and continue execution on its stack. If that fails, call
1609     signal_stack_soft_overflow to enter yellow zone mode and signal
1610     the condition to lisp.
1611     
1612     b) If we're already in "yellow zone" mode, then:
1613     
1614     1) if the SP is past the current control-stack area's hard
1615     overflow limit, signal a "hard" stack overflow error (e.g., throw
1616     to toplevel as quickly as possible. If we aren't in "yellow zone"
1617     mode, attempt to continue on another thread first.
1618     
1619     2) if SP is "well" (> 4K) below its soft overflow limit, set
1620     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1621     "yellow zone mode" in this case.
1622     
1623     3) Otherwise, do nothing.  We'll continue to trap every time
1624     something gets pushed on the control stack, so we should try to
1625     detect and handle all of these cases fairly quickly.  Of course,
1626     the trap overhead is going to slow things down quite a bit.
1627     */
1628
1629  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1630      (RA_field(the_trap) == sp) &&
1631      (TO_field(the_trap) == TO_LO)) {
1632    area
1633      *CS_area = tcr->cs_area,
1634      *VS_area = tcr->vs_area;
1635     
1636    natural
1637      current_SP = xpGPR(xp,sp),
1638      current_VSP = xpGPR(xp,vsp);
1639
1640    if (current_SP  < (natural) (CS_area->hardlimit)) {
1641      /* If we're not in soft overflow mode yet, assume that the
1642         user has set the soft overflow size very small and try to
1643         continue on another thread before throwing to toplevel */
1644      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1645        reset_lisp_process(xp);
1646      }
1647    } else {
1648      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1649        /* If the control stack pointer is at least 4K away from its soft limit
1650           and the value stack pointer is at least 4K away from its soft limit,
1651           stop trapping.  Else keep trapping. */
1652        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1653            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1654          protected_area_ptr vs_soft = VS_area->softprot;
1655          if (vs_soft->nprot == 0) {
1656            protect_area(vs_soft);
1657          }
1658          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1659        }
1660      } else {
1661        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1662        signal_stack_soft_overflow(xp, sp);
1663      }
1664    }
1665   
1666    adjust_exception_pc(xp, 4);
1667    return noErr;
1668  } else {
1669    if (the_trap == LISP_BREAK_INSTRUCTION) {
1670      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1671      set_xpPC(xp, xpLR(xp));
1672      if (message == NULL) {
1673        message = "Lisp Breakpoint";
1674      }
1675      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1676      return noErr;
1677    }
1678    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1679      adjust_exception_pc(xp,4);
1680      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1681      return noErr;
1682    }
1683    /*
1684      twlle ra,rb is used to detect tlb overflow, where RA = current
1685      limit and RB = index to use.
1686    */
1687    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1688        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1689      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1690        return noErr;
1691      }
1692      return -1;
1693    }
1694
1695    if ((fulltag_of(cmain) == fulltag_misc) &&
1696        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1697      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1698        /* reset interrup_level, interrupt_pending */
1699        TCR_INTERRUPT_LEVEL(tcr) = 0;
1700        tcr->interrupt_pending = 0;
1701      }
1702#if 0
1703      fprintf(dbgout, "About to do trap callback in 0x%x\n",tcr);
1704#endif
1705      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1706      adjust_exception_pc(xp, 4);
1707      return(noErr);
1708    }
1709    return -1;
1710  }
1711}
1712
1713
1714/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1715   Stop if subtag_code_vector is encountered. */
1716unsigned
1717scan_for_instr( unsigned target, unsigned mask, pc where )
1718{
1719  int i = TRAP_LOOKUP_TRIES;
1720
1721  while( i-- ) {
1722    unsigned instr = *(--where);
1723    if ( codevec_hdr_p(instr) ) {
1724      return 0;
1725    } else if ( match_instr(instr, mask, target) ) {
1726      return instr;
1727    }
1728  }
1729  return 0;
1730}
1731
1732
1733void non_fatal_error( char *msg )
1734{
1735  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1736  fflush( dbgout );
1737}
1738
1739/* The main opcode.  */
1740
1741int 
1742is_conditional_trap(opcode instr)
1743{
1744  unsigned to = TO_field(instr);
1745  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1746
1747#ifndef MACOS
1748  if ((instr == LISP_BREAK_INSTRUCTION) ||
1749      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1750    return 1;
1751  }
1752#endif
1753  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1754    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1755       EQ bit must be set in the TO mask and either the register
1756       operands (if "tw") are the same or either both of the signed or
1757       both of the unsigned inequality bits must be set. */
1758    if (! (to & TO_EQ)) {
1759      return 1;                 /* Won't trap on EQ: conditional */
1760    }
1761    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1762      return 0;                 /* Will trap on EQ, same regs: unconditional */
1763    }
1764    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1765        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1766      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1767    }
1768    return 1;                   /* must be conditional */
1769  }
1770  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1771                                   debugger have it */
1772}
1773
1774OSStatus
1775handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1776{
1777  LispObj   errdisp = nrs_ERRDISP.vcell;
1778
1779  if ((fulltag_of(errdisp) == fulltag_misc) &&
1780      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1781    /* errdisp is a macptr, we can call back to lisp */
1782    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1783    return(0);
1784    }
1785
1786  return(-1);
1787}
1788               
1789
1790/*
1791   Current thread has all signals masked.  Before unmasking them,
1792   make it appear that the current thread has been suspended.
1793   (This is to handle the case where another thread is trying
1794   to GC before this thread is able to sieze the exception lock.)
1795*/
1796int
1797prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1798{
1799  int old_valence = tcr->valence;
1800
1801  tcr->pending_exception_context = context;
1802  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1803
1804  ALLOW_EXCEPTIONS(context);
1805  return old_valence;
1806} 
1807
1808void
1809wait_for_exception_lock_in_handler(TCR *tcr, 
1810                                   ExceptionInformation *context,
1811                                   xframe_list *xf)
1812{
1813
1814  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1815#ifdef DEBUG
1816  fprintf(dbgout, "0x%x has exception lock\n", tcr);
1817#endif
1818  xf->curr = context;
1819  xf->prev = tcr->xframe;
1820  tcr->xframe =  xf;
1821  tcr->pending_exception_context = NULL;
1822  tcr->valence = TCR_STATE_FOREIGN; 
1823}
1824
1825void
1826unlock_exception_lock_in_handler(TCR *tcr)
1827{
1828  tcr->pending_exception_context = tcr->xframe->curr;
1829  tcr->xframe = tcr->xframe->prev;
1830  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1831#ifdef DEBUG
1832  fprintf(dbgout, "0x%x releasing exception lock\n", tcr);
1833#endif
1834  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1835}
1836
1837/*
1838   If an interrupt is pending on exception exit, try to ensure
1839   that the thread sees it as soon as it's able to run.
1840*/
1841void
1842raise_pending_interrupt(TCR *tcr)
1843{
1844  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1845    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1846  }
1847}
1848
1849void
1850exit_signal_handler(TCR *tcr, int old_valence)
1851{
1852  sigset_t mask;
1853  sigfillset(&mask);
1854 
1855  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1856  tcr->valence = old_valence;
1857  tcr->pending_exception_context = NULL;
1858}
1859
1860
1861void
1862signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
1863{
1864  TCR *tcr;
1865  int old_valence;
1866  xframe_list xframe_link;
1867
1868  tcr = (TCR *) get_interrupt_tcr(false);
1869 
1870  /* The signal handler's entered with all signals (notably the
1871     thread_suspend signal) blocked.  Don't allow any other signals
1872     (notably the thread_suspend signal) to preempt us until we've
1873     set the TCR's xframe slot to include the current exception
1874     context.
1875  */
1876   
1877    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1878
1879  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1880    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1881    pthread_kill(pthread_self(), thread_suspend_signal);
1882  }
1883
1884 
1885  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1886  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1887    char msg[512];
1888    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1889    if (lisp_Debugger(context, info, signum, false, msg)) {
1890      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1891    }
1892  }
1893
1894  unlock_exception_lock_in_handler(tcr);
1895
1896  /* This thread now looks like a thread that was suspended while
1897     executing lisp code.  If some other thread gets the exception
1898     lock and GCs, the context (this thread's suspend_context) will
1899     be updated.  (That's only of concern if it happens before we
1900     can return to the kernel/to the Mach exception handler).
1901  */
1902  exit_signal_handler(tcr, old_valence);
1903  raise_pending_interrupt(tcr);
1904}
1905
1906/*
1907  If it looks like we're in the middle of an atomic operation, make
1908  it seem as if that operation is either complete or hasn't started
1909  yet.
1910
1911  The cases handled include:
1912
1913  a) storing into a newly-allocated lisp frame on the stack.
1914  b) marking a newly-allocated TSP frame as containing "raw" data.
1915  c) consing: the GC has its own ideas about how this should be
1916     handled, but other callers would be best advised to back
1917     up or move forward, according to whether we're in the middle
1918     of allocating a cons cell or allocating a uvector.
1919  d) a STMW to the vsp
1920  e) EGC write-barrier subprims.
1921*/
1922
1923extern opcode
1924  egc_write_barrier_start,
1925  egc_write_barrier_end, 
1926  egc_store_node_conditional, 
1927  egc_store_node_conditional_test,
1928  egc_set_hash_key, egc_set_hash_key_did_store,
1929  egc_gvset, egc_gvset_did_store,
1930  egc_rplaca, egc_rplaca_did_store,
1931  egc_rplacd, egc_rplacd_did_store,
1932  egc_set_hash_key_conditional,
1933  egc_set_hash_key_conditional_test;
1934
1935
1936extern opcode ffcall_return_window, ffcall_return_window_end;
1937
1938void
1939pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1940{
1941  pc program_counter = xpPC(xp);
1942  opcode instr = *program_counter;
1943  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1944  LispObj cur_allocptr = xpGPR(xp, allocptr);
1945  int allocptr_tag = fulltag_of(cur_allocptr);
1946 
1947
1948
1949  if ((program_counter < &egc_write_barrier_end) && 
1950      (program_counter >= &egc_write_barrier_start)) {
1951    LispObj *ea = 0, val = 0, root = 0;
1952    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1953    Boolean need_check_memo = true, need_memoize_root = false;
1954
1955    if (program_counter >= &egc_set_hash_key_conditional) {
1956      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1957          ((program_counter == &egc_set_hash_key_conditional_test) &&
1958           (! (xpCCR(xp) & 0x20000000)))) {
1959        return;
1960      }
1961      root = xpGPR(xp,arg_x);
1962      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1963      need_memoize_root = true;
1964    } else if (program_counter >= &egc_store_node_conditional) {
1965      if ((program_counter < &egc_store_node_conditional_test) ||
1966          ((program_counter == &egc_store_node_conditional_test) &&
1967           (! (xpCCR(xp) & 0x20000000)))) {
1968        /* The conditional store either hasn't been attempted yet, or
1969           has failed.  No need to adjust the PC, or do memoization. */
1970        return;
1971      }
1972      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1973      xpGPR(xp,arg_z) = t_value;
1974    } else if (program_counter >= &egc_set_hash_key) {
1975      if (program_counter < &egc_set_hash_key_did_store) {
1976        return;
1977      }
1978      root = xpGPR(xp,arg_x);
1979      val = xpGPR(xp,arg_z);
1980      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1981      need_memoize_root = true;
1982    } else if (program_counter >= &egc_gvset) {
1983      if (program_counter < &egc_gvset_did_store) {
1984        return;
1985      }
1986      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1987      val = xpGPR(xp,arg_z);
1988    } else if (program_counter >= &egc_rplacd) {
1989      if (program_counter < &egc_rplacd_did_store) {
1990        return;
1991      }
1992      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1993      val = xpGPR(xp,arg_z);
1994    } else {                      /* egc_rplaca */
1995      if (program_counter < &egc_rplaca_did_store) {
1996        return;
1997      }
1998      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1999      val = xpGPR(xp,arg_z);
2000    }
2001    if (need_check_memo) {
2002      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
2003      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
2004          ((LispObj)ea < val)) {
2005        atomic_set_bit(refbits, bitnumber);
2006        atomic_set_bit(global_refidx, bitnumber>>8);
2007        if (need_memoize_root) {
2008          bitnumber = area_dnode(root, lisp_global(REF_BASE));
2009          atomic_set_bit(refbits, bitnumber);
2010          atomic_set_bit(global_refidx,bitnumber>>8);
2011        }
2012      }
2013    }
2014    set_xpPC(xp, xpLR(xp));
2015    return;
2016  }
2017
2018
2019  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
2020    LispObj tsp_val = xpGPR(xp,tsp);
2021   
2022    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
2023    adjust_exception_pc(xp, 4);
2024    return;
2025  }
2026 
2027  if (frame->backlink == (frame+1)) {
2028    if (
2029#ifdef PPC64
2030        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
2031        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
2032#else
2033        (major_opcode_p(instr, major_opcode_STW)) && 
2034#endif
2035        (RA_field(instr) == sp) &&
2036        /* There are a few places in the runtime that store into
2037           a previously-allocated frame atop the stack when
2038           throwing values around.  We only care about the case
2039           where the frame was newly allocated, in which case
2040           there must have been a CREATE_LISP_FRAME_INSTRUCTION
2041           a few instructions before the current program counter.
2042           (The whole point here is that a newly allocated frame
2043           might contain random values that we don't want the
2044           GC to see; a previously allocated frame should already
2045           be completely initialized.)
2046        */
2047        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
2048         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
2049         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
2050#ifdef PPC64
2051      int disp = DS_field(instr);
2052#else     
2053      int disp = D_field(instr);
2054#endif
2055
2056
2057      if (disp < (4*node_size)) {
2058#if 0
2059        fprintf(dbgout, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
2060#endif
2061        frame->savevsp = 0;
2062        if (disp < (3*node_size)) {
2063          frame->savelr = 0;
2064          if (disp == node_size) {
2065            frame->savefn = 0;
2066          }
2067        }
2068      }
2069      return;
2070    }
2071  }
2072
2073  if (allocptr_tag != tag_fixnum) {
2074    signed_natural disp = allocptr_displacement(xp);
2075
2076    if (disp) {
2077      /* Being architecturally "at" the alloc trap doesn't tell
2078         us much (in particular, it doesn't tell us whether
2079         or not the thread has committed to taking the trap
2080         and is waiting for the exception lock (or waiting
2081         for the Mach exception thread to tell it how bad
2082         things are) or is about to execute a conditional
2083         trap.
2084         Regardless of which case applies, we want the
2085         other thread to take (or finish taking) the
2086         trap, and we don't want it to consider its
2087         current allocptr to be valid.
2088         The difference between this case (suspend other
2089         thread for GC) and the previous case (suspend
2090         current thread for interrupt) is solely a
2091         matter of what happens after we leave this
2092         function: some non-current thread will stay
2093         suspended until the GC finishes, then take
2094         (or start processing) the alloc trap.   The
2095         current thread will go off and do PROCESS-INTERRUPT
2096         or something, and may return from the interrupt
2097         and need to finish the allocation that got interrupted.
2098      */
2099
2100      if (alloc_disp) {
2101        *alloc_disp = disp;
2102        xpGPR(xp,allocptr) += disp;
2103        /* Leave the PC at the alloc trap.  When the interrupt
2104           handler returns, it'll decrement allocptr by disp
2105           and the trap may or may not be taken.
2106        */
2107      } else {
2108        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2109        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2110        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2111      }
2112    } else {
2113#ifdef DEBUG
2114      fprintf(dbgout, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2115#endif
2116      /* If we're already past the alloc_trap, finish allocating
2117         the object. */
2118      if (allocptr_tag == fulltag_cons) {
2119        finish_allocating_cons(xp);
2120#ifdef DEBUG
2121          fprintf(dbgout, "finish allocating cons in TCR = #x%x\n",
2122                  tcr);
2123#endif
2124      } else {
2125        if (allocptr_tag == fulltag_misc) {
2126#ifdef DEBUG
2127          fprintf(dbgout, "finish allocating uvector in TCR = #x%x\n",
2128                  tcr);
2129#endif
2130          finish_allocating_uvector(xp);
2131        } else {
2132          Bug(xp, "what's being allocated here ?");
2133        }
2134      }
2135      /* Whatever we finished allocating, reset allocptr/allocbase to
2136         VOID_ALLOCPTR */
2137      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2138    }
2139    return;
2140  }
2141
2142  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2143    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2144    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2145#if 0
2146        fprintf(dbgout, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2147#endif
2148
2149    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2150      deref(frame,idx) = 0;
2151    }
2152    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2153    return;
2154  }
2155
2156#ifndef PC64
2157  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2158      (RA_field(instr) == vsp)) {
2159    int r;
2160    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2161   
2162    for (r = RS_field(instr); r <= 31; r++) {
2163      *vspptr++ = xpGPR(xp,r);
2164    }
2165    adjust_exception_pc(xp, 4);
2166  }
2167#endif
2168}
2169
2170void
2171interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2172{
2173  TCR *tcr = get_interrupt_tcr(false);
2174  if (tcr) {
2175    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2176      tcr->interrupt_pending = 1 << fixnumshift;
2177    } else {
2178      LispObj cmain = nrs_CMAIN.vcell;
2179
2180      if ((fulltag_of(cmain) == fulltag_misc) &&
2181          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2182        /*
2183           This thread can (allegedly) take an interrupt now.
2184           It's tricky to do that if we're executing
2185           foreign code (especially Linuxthreads code, much
2186           of which isn't reentrant.)
2187           If we're unwinding the stack, we also want to defer
2188           the interrupt.
2189        */
2190        if ((tcr->valence != TCR_STATE_LISP) ||
2191            (tcr->unwinding != 0)) {
2192          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2193        } else {
2194          xframe_list xframe_link;
2195          int old_valence;
2196          signed_natural disp=0;
2197         
2198          pc_luser_xp(context, tcr, &disp);
2199          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2200          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2201#ifdef DEBUG
2202          fprintf(dbgout, "[0x%x acquired exception lock for interrupt]\n",tcr);
2203#endif
2204          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2205          if (disp) {
2206            xpGPR(context,allocptr) -= disp;
2207          }
2208          unlock_exception_lock_in_handler(tcr);
2209#ifdef DEBUG
2210          fprintf(dbgout, "[0x%x released exception lock for interrupt]\n",tcr);
2211#endif
2212          exit_signal_handler(tcr, old_valence);
2213        }
2214      }
2215    }
2216  }
2217#ifdef DARWIN
2218    DarwinSigReturn(context);
2219#endif
2220}
2221
2222
2223
2224void
2225install_signal_handler(int signo, void *handler, unsigned flags)
2226{
2227  struct sigaction sa;
2228  int err;
2229 
2230  sa.sa_sigaction = (void *)handler;
2231  sigfillset(&sa.sa_mask);
2232  sa.sa_flags = SA_SIGINFO;
2233
2234  if (flags & RESTART_SYSCALLS)
2235    sa.sa_flags |= SA_RESTART;
2236  if (flags & RESERVE_FOR_LISP) {
2237    extern sigset_t user_signals_reserved;
2238    sigaddset(&user_signals_reserved, signo);
2239  }
2240
2241  err = sigaction(signo, &sa, NULL);
2242  if (err) {
2243    perror("sigaction");
2244    exit(1);
2245  }
2246}
2247
2248void
2249install_pmcl_exception_handlers()
2250{
2251
2252  extern int no_sigtrap;
2253  install_signal_handler(SIGILL, (void *)signal_handler, RESERVE_FOR_LISP);
2254  if (no_sigtrap != 1) {
2255    install_signal_handler(SIGTRAP, (void *)signal_handler, RESERVE_FOR_LISP);
2256  }
2257  install_signal_handler(SIGBUS,  (void *)signal_handler, RESERVE_FOR_LISP);
2258  install_signal_handler(SIGSEGV, (void *)signal_handler, RESERVE_FOR_LISP);
2259  install_signal_handler(SIGFPE, (void *)signal_handler, RESERVE_FOR_LISP);
2260
2261 
2262  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2263                         (void *)interrupt_handler, RESERVE_FOR_LISP);
2264  signal(SIGPIPE, SIG_IGN);
2265}
2266
2267void
2268thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2269{
2270  TCR *tcr = get_tcr(false);
2271  area *a;
2272  sigset_t mask;
2273 
2274  sigemptyset(&mask);
2275
2276  if (tcr) {
2277    tcr->valence = TCR_STATE_FOREIGN;
2278    a = tcr->vs_area;
2279    if (a) {
2280      a->active = a->high;
2281    }
2282    a = tcr->ts_area;
2283    if (a) {
2284      a->active = a->high;
2285    }
2286    a = tcr->cs_area;
2287    if (a) {
2288      a->active = a->high;
2289    }
2290  }
2291 
2292  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2293  pthread_exit(NULL);
2294}
2295
2296void
2297thread_signal_setup()
2298{
2299  thread_suspend_signal = SIG_SUSPEND_THREAD;
2300  thread_kill_signal = SIG_KILL_THREAD;
2301
2302  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
2303                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
2304  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler,
2305                         RESERVE_FOR_LISP);
2306}
2307
2308
2309
2310void
2311unprotect_all_areas()
2312{
2313  protected_area_ptr p;
2314
2315  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2316    unprotect_area(p);
2317  }
2318}
2319
2320/*
2321  A binding subprim has just done "twlle limit_regno,idx_regno" and
2322  the trap's been taken.  Extend the tcr's tlb so that the index will
2323  be in bounds and the new limit will be on a page boundary, filling
2324  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2325  the tcr fields and the registers in the xp and return true if this
2326  all works, false otherwise.
2327
2328  Note that the tlb was allocated via malloc, so realloc can do some
2329  of the hard work.
2330*/
2331Boolean
2332extend_tcr_tlb(TCR *tcr, 
2333               ExceptionInformation *xp, 
2334               unsigned limit_regno,
2335               unsigned idx_regno)
2336{
2337  unsigned
2338    index = (unsigned) (xpGPR(xp,idx_regno)),
2339    old_limit = tcr->tlb_limit,
2340    new_limit = align_to_power_of_2(index+1,12),
2341    new_bytes = new_limit-old_limit;
2342  LispObj
2343    *old_tlb = tcr->tlb_pointer,
2344    *new_tlb = realloc(old_tlb, new_limit),
2345    *work;
2346
2347  if (new_tlb == NULL) {
2348    return false;
2349  }
2350 
2351  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2352
2353  while (new_bytes) {
2354    *work++ = no_thread_local_binding_marker;
2355    new_bytes -= sizeof(LispObj);
2356  }
2357  tcr->tlb_pointer = new_tlb;
2358  tcr->tlb_limit = new_limit;
2359  xpGPR(xp, limit_regno) = new_limit;
2360  return true;
2361}
2362
2363
2364
2365void
2366exception_init()
2367{
2368  install_pmcl_exception_handlers();
2369}
2370
2371
Note: See TracBrowser for help on using the repository browser.