source: release/1.9/source/lisp-kernel/ppc-exceptions.c @ 16083

Last change on this file since 16083 was 15755, checked in by gb, 6 years ago

Recent changes from trunk.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 64.7 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <sys/mman.h>
31#include <fpu_control.h>
32#include <linux/prctl.h>
33#endif
34
35#ifdef DARWIN
36#include <sys/mman.h>
37#define _FPU_RESERVED 0xffffff00
38#ifndef SA_NODEFER
39#define SA_NODEFER 0
40#endif
41#include <sysexits.h>
42
43/* a distinguished UUO at a distinguished address */
44extern void pseudo_sigreturn(ExceptionInformation *);
45#endif
46
47
48#include "threads.h"
49
50#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
51#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
52#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
53extern void enable_fp_exceptions(void);
54extern void disable_fp_exceptions(void);
55
56#ifdef LINUX
57/* Some relatively recent kernels support this interface.
58   If this prctl isn't supported, assume that we're always
59   running with excptions enabled and "precise".
60*/
61#ifndef PR_SET_FPEXC
62#define PR_SET_FPEXC 12
63#endif
64#ifndef PR_FP_EXC_DISABLED
65#define PR_FP_EXC_DISABLED 0
66#endif
67#ifndef PR_FP_EXC_PRECISE
68#define PR_FP_EXC_PRECISE 3
69#endif
70
71void
72enable_fp_exceptions()
73{
74  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
75}
76
77void
78disable_fp_exceptions()
79{
80  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
81}
82
83#endif
84
85/*
86  Handle exceptions.
87
88*/
89
90extern LispObj lisp_nil;
91
92extern natural lisp_heap_gc_threshold;
93extern Boolean grow_dynamic_area(natural);
94
95
96
97
98
99
100int
101page_size = 4096;
102
103int
104log2_page_size = 12;
105
106
107
108
109
110/*
111  If the PC is pointing to an allocation trap, the previous instruction
112  must have decremented allocptr.  Return the non-zero amount by which
113  allocptr was decremented.
114*/
115signed_natural
116allocptr_displacement(ExceptionInformation *xp)
117{
118  pc program_counter = xpPC(xp);
119  opcode instr = *program_counter, prev_instr = *(program_counter-1);
120
121  if (instr == ALLOC_TRAP_INSTRUCTION) {
122    if (match_instr(prev_instr, 
123                    XO_MASK | RT_MASK | RB_MASK,
124                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
125                    RT(allocptr) |
126                    RB(allocptr))) {
127      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
128    }
129    if (match_instr(prev_instr,
130                    OP_MASK | RT_MASK | RA_MASK,
131                    OP(major_opcode_ADDI) | 
132                    RT(allocptr) |
133                    RA(allocptr))) {
134      return (signed_natural) -((short) prev_instr);
135    }
136    Bug(xp, "Can't determine allocation displacement");
137  }
138  return 0;
139}
140
141
142/*
143  A cons cell's been successfully allocated, but the allocptr's
144  still tagged (as fulltag_cons, of course.)  Emulate any instructions
145  that might follow the allocation (stores to the car or cdr, an
146  assignment to the "result" gpr) that take place while the allocptr's
147  tag is non-zero, advancing over each such instruction.  When we're
148  done, the cons cell will be allocated and initialized, the result
149  register will point to it, the allocptr will be untagged, and
150  the PC will point past the instruction that clears the allocptr's
151  tag.
152*/
153void
154finish_allocating_cons(ExceptionInformation *xp)
155{
156  pc program_counter = xpPC(xp);
157  opcode instr;
158  LispObj cur_allocptr = xpGPR(xp, allocptr);
159  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
160  int target_reg;
161
162  while (1) {
163    instr = *program_counter++;
164
165    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
166      xpGPR(xp, allocptr) = untag(cur_allocptr);
167      xpPC(xp) = program_counter;
168      return;
169    }
170   
171    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
172    case STORE_CAR_ALLOCPTR_INSTRUCTION:
173      c->car = xpGPR(xp,RT_field(instr));
174      break;
175    case STORE_CDR_ALLOCPTR_INSTRUCTION:
176      c->cdr = xpGPR(xp,RT_field(instr));
177      break;
178    default:
179      /* Assume that this is an assignment: {rt/ra} <- allocptr.
180         There are several equivalent instruction forms
181         that might have that effect; just assign to target here.
182      */
183      if (major_opcode_p(instr,major_opcode_X31)) {
184        target_reg = RA_field(instr);
185      } else {
186        target_reg = RT_field(instr);
187      }
188      xpGPR(xp,target_reg) = cur_allocptr;
189      break;
190    }
191  }
192}
193
194/*
195  We were interrupted in the process of allocating a uvector; we
196  survived the allocation trap, and allocptr is tagged as fulltag_misc.
197  Emulate any instructions which store a header into the uvector,
198  assign the value of allocptr to some other register, and clear
199  allocptr's tag.  Don't expect/allow any other instructions in
200  this environment.
201*/
202void
203finish_allocating_uvector(ExceptionInformation *xp)
204{
205  pc program_counter = xpPC(xp);
206  opcode instr;
207  LispObj cur_allocptr = xpGPR(xp, allocptr);
208  int target_reg;
209
210  while (1) {
211    instr = *program_counter++;
212    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
213      xpGPR(xp, allocptr) = untag(cur_allocptr);
214      xpPC(xp) = program_counter;
215      return;
216    }
217    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
218        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
219      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
220    } else {
221      /* assume that this is an assignment */
222
223      if (major_opcode_p(instr,major_opcode_X31)) {
224        target_reg = RA_field(instr);
225      } else {
226        target_reg = RT_field(instr);
227      }
228      xpGPR(xp,target_reg) = cur_allocptr;
229    }
230  }
231}
232
233
234Boolean
235allocate_object(ExceptionInformation *xp,
236                natural bytes_needed, 
237                signed_natural disp_from_allocptr,
238                TCR *tcr)
239{
240  area *a = active_dynamic_area;
241
242  /* Maybe do an EGC */
243  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
244    if (((a->active)-(a->low)) >= a->threshold) {
245      gc_from_xp(xp, 0L);
246    }
247  }
248
249  /* Life is pretty simple if we can simply grab a segment
250     without extending the heap.
251  */
252  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
253    xpGPR(xp, allocptr) += disp_from_allocptr;
254#ifdef DEBUG
255    fprintf(dbgout, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
256            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
257#endif
258    return true;
259  }
260 
261  /* It doesn't make sense to try a full GC if the object
262     we're trying to allocate is larger than everything
263     allocated so far.
264  */
265  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
266    untenure_from_area(tenured_area); /* force a full GC */
267    gc_from_xp(xp, 0L);
268  }
269 
270  /* Try again, growing the heap if necessary */
271  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
272    xpGPR(xp, allocptr) += disp_from_allocptr;
273#ifdef DEBUG
274    fprintf(dbgout, "New heap segment for #x%x after GC: #x%x/#x%x\n",
275            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
276#endif
277    return true;
278  }
279 
280  return false;
281}
282
283#ifndef XNOMEM
284#define XNOMEM 10
285#endif
286
287void
288update_bytes_allocated(TCR* tcr, void *cur_allocptr)
289{
290  BytePtr
291    last = (BytePtr) tcr->last_allocptr, 
292    current = (BytePtr) cur_allocptr;
293  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
294    tcr->bytes_allocated += last-current;
295  }
296  tcr->last_allocptr = 0;
297}
298
299void
300lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
301{
302  /* Couldn't allocate the object.  If it's smaller than some arbitrary
303     size (say 128K bytes), signal a "chronically out-of-memory" condition;
304     else signal a "allocation request failed" condition.
305  */
306  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
307  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
308}
309
310/*
311  Allocate a large list, where "large" means "large enough to
312  possibly trigger the EGC several times if this was done
313  by individually allocating each CONS."  The number of
314  ocnses in question is in arg_z; on successful return,
315  the list will be in arg_z
316*/
317
318Boolean
319allocate_list(ExceptionInformation *xp, TCR *tcr)
320{
321  natural
322    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
323    bytes_needed = (nconses << dnode_shift);
324  LispObj
325    prev = lisp_nil,
326    current,
327    initial = xpGPR(xp,arg_y);
328
329  if (nconses == 0) {
330    /* Silly case */
331    xpGPR(xp,arg_z) = lisp_nil;
332    xpGPR(xp,allocptr) = lisp_nil;
333    return true;
334  }
335  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
336  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
337    for (current = xpGPR(xp,allocptr);
338         nconses;
339         prev = current, current+= dnode_size, nconses--) {
340      deref(current,0) = prev;
341      deref(current,1) = initial;
342    }
343    xpGPR(xp,arg_z) = prev;
344    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
345    xpGPR(xp,allocptr)-=fulltag_cons;
346  } else {
347    lisp_allocation_failure(xp,tcr,bytes_needed);
348  }
349  return true;
350}
351
352OSStatus
353handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
354{
355  pc program_counter;
356  natural cur_allocptr, bytes_needed = 0;
357  opcode prev_instr;
358  signed_natural disp = 0;
359  unsigned allocptr_tag;
360
361  cur_allocptr = xpGPR(xp,allocptr);
362  program_counter = xpPC(xp);
363  prev_instr = *(program_counter-1);
364  allocptr_tag = fulltag_of(cur_allocptr);
365
366  switch (allocptr_tag) {
367  case fulltag_cons:
368    bytes_needed = sizeof(cons);
369    disp = -sizeof(cons) + fulltag_cons;
370    break;
371
372  case fulltag_even_fixnum:
373  case fulltag_odd_fixnum:
374    break;
375
376  case fulltag_misc:
377    if (match_instr(prev_instr, 
378                    XO_MASK | RT_MASK | RB_MASK,
379                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
380                    RT(allocptr) |
381                    RB(allocptr))) {
382      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
383    } else if (match_instr(prev_instr,
384                           OP_MASK | RT_MASK | RA_MASK,
385                           OP(major_opcode_ADDI) | 
386                           RT(allocptr) |
387                           RA(allocptr))) {
388      disp = (signed_natural) ((short) prev_instr);
389    }
390    if (disp) {
391      bytes_needed = (-disp) + fulltag_misc;
392      break;
393    }
394    /* else fall thru */
395  default:
396    return -1;
397  }
398
399  if (bytes_needed) {
400    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
401    if (allocate_object(xp, bytes_needed, disp, tcr)) {
402#if 0
403      fprintf(dbgout, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
404              tcr, xpGPR(xp, allocptr));
405#endif
406      adjust_exception_pc(xp,4);
407      return 0;
408    }
409    lisp_allocation_failure(xp,tcr,bytes_needed);
410    return -1;
411  }
412  return -1;
413}
414
415natural gc_deferred = 0, full_gc_deferred = 0;
416
417signed_natural
418flash_freeze(TCR *tcr, signed_natural param)
419{
420  return 0;
421}
422
423OSStatus
424handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
425{
426  LispObj
427    selector = xpGPR(xp,imm0), 
428    arg = xpGPR(xp,imm1);
429  area *a = active_dynamic_area;
430  Boolean egc_was_enabled = (a->older != NULL);
431  natural gc_previously_deferred = gc_deferred;
432
433
434  switch (selector) {
435  case GC_TRAP_FUNCTION_EGC_CONTROL:
436    egc_control(arg != 0, a->active);
437    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
438    break;
439
440  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
441    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
442    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
443    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
444    xpGPR(xp,arg_z) = lisp_nil+t_offset;
445    break;
446
447  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
448    if (((signed_natural) arg) > 0) {
449      lisp_heap_gc_threshold = 
450        align_to_power_of_2((arg-1) +
451                            (heap_segment_size - 1),
452                            log2_heap_segment_size);
453    }
454    /* fall through */
455  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
456    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
457    break;
458
459  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
460    /*  Try to put the current threshold in effect.  This may
461        need to disable/reenable the EGC. */
462    untenure_from_area(tenured_area);
463    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
464    if (egc_was_enabled) {
465      if ((a->high - a->active) >= a->threshold) {
466        tenure_to_area(tenured_area);
467      }
468    }
469    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
470    break;
471
472  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
473    ensure_static_conses(xp,tcr,32768);
474    break;
475
476  case GC_TRAP_FUNCTION_FLASH_FREEZE:
477    untenure_from_area(tenured_area);
478    gc_like_from_xp(xp,flash_freeze,0);
479    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
480    tenured_area->static_dnodes = area_dnode(a->active, a->low);
481    if (egc_was_enabled) {
482      tenure_to_area(tenured_area);
483    }
484    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
485    break;
486
487  default:
488    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
489
490    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
491      if (!full_gc_deferred) {
492        gc_from_xp(xp, 0L);
493        break;
494      }
495      /* Tried to do a full GC when gc was disabled.  That failed,
496         so try full GC now */
497      selector = GC_TRAP_FUNCTION_GC;
498    }
499   
500    if (egc_was_enabled) {
501      egc_control(false, (BytePtr) a->active);
502    }
503    gc_from_xp(xp, 0L);
504    if (gc_deferred > gc_previously_deferred) {
505      full_gc_deferred = 1;
506    } else {
507      full_gc_deferred = 0;
508    }
509    if (selector > GC_TRAP_FUNCTION_GC) {
510      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
511        impurify_from_xp(xp, 0L);
512        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
513        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
514        gc_from_xp(xp, 0L);
515      }
516      if (selector & GC_TRAP_FUNCTION_PURIFY) {
517        purify_from_xp(xp, 0L);
518        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
519        gc_from_xp(xp, 0L);
520      }
521      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
522        OSErr err;
523        extern OSErr save_application(unsigned, Boolean);
524        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
525        area *vsarea = tcr->vs_area;
526       
527        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
528        err = save_application(arg, egc_was_enabled);
529        if (err == noErr) {
530          _exit(0);
531        }
532        fatal_oserr(": save_application", err);
533      }
534      switch (selector) {
535
536
537      case GC_TRAP_FUNCTION_FREEZE:
538        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
539        tenured_area->static_dnodes = area_dnode(a->active, a->low);
540        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
541        break;
542      default:
543        break;
544      }
545    }
546   
547    if (egc_was_enabled) {
548      egc_control(true, NULL);
549    }
550    break;
551   
552  }
553
554  adjust_exception_pc(xp,4);
555  return 0;
556}
557
558
559
560void
561signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
562{
563  /* The cstack just overflowed.  Force the current thread's
564     control stack to do so until all stacks are well under their overflow
565     limits.
566  */
567
568#if 0
569  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
570#endif
571  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
572}
573
574/*
575  Lower (move toward 0) the "end" of the soft protected area associated
576  with a by a page, if we can.
577*/
578
579void
580adjust_soft_protection_limit(area *a)
581{
582  char *proposed_new_soft_limit = a->softlimit - 4096;
583  protected_area_ptr p = a->softprot;
584 
585  if (proposed_new_soft_limit >= (p->start+16384)) {
586    p->end = proposed_new_soft_limit;
587    p->protsize = p->end-p->start;
588    a->softlimit = proposed_new_soft_limit;
589  }
590  protect_area(p);
591}
592
593void
594restore_soft_stack_limit(unsigned stkreg)
595{
596  area *a;
597  TCR *tcr = get_tcr(true);
598
599  switch (stkreg) {
600  case sp:
601    a = tcr->cs_area;
602    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
603      a->softlimit -= 4096;
604    }
605    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
606    break;
607  case vsp:
608    a = tcr->vs_area;
609    adjust_soft_protection_limit(a);
610    break;
611  case tsp:
612    a = tcr->ts_area;
613    adjust_soft_protection_limit(a);
614  }
615}
616
617/* Maybe this'll work someday.  We may have to do something to
618   make the thread look like it's not handling an exception */
619void
620reset_lisp_process(ExceptionInformation *xp)
621{
622  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
623  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
624
625  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
626  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
627
628  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
629  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
630
631  start_lisp(tcr, 1);
632}
633
634/*
635  This doesn't GC; it returns true if it made enough room, false
636  otherwise.
637  If "extend" is true, it can try to extend the dynamic area to
638  satisfy the request.
639*/
640
641Boolean
642new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
643{
644  area *a;
645  natural newlimit, oldlimit;
646  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
647
648  a  = active_dynamic_area;
649  oldlimit = (natural) a->active;
650  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
651              align_to_power_of_2(need, log2_allocation_quantum));
652  if (newlimit > (natural) (a->high)) {
653    if (extend) {
654      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
655      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
656      do {
657        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
658          break;
659        }
660        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
661        if (extend_by < 4<<20) {
662          return false;
663        }
664      } while (1);
665    } else {
666      return false;
667    }
668  }
669  a->active = (BytePtr) newlimit;
670  tcr->last_allocptr = (void *)newlimit;
671  xpGPR(xp,allocptr) = (LispObj) newlimit;
672  xpGPR(xp,allocbase) = (LispObj) oldlimit;
673
674  return true;
675}
676
677 
678void
679update_area_active (area **aptr, BytePtr value)
680{
681  area *a = *aptr;
682  for (; a; a = a->older) {
683    if ((a->low <= value) && (a->high >= value)) break;
684  };
685  if (a == NULL) Bug(NULL, "Can't find active area");
686  a->active = value;
687  *aptr = a;
688
689  for (a = a->younger; a; a = a->younger) {
690    a->active = a->high;
691  }
692}
693
694LispObj *
695tcr_frame_ptr(TCR *tcr)
696{
697  ExceptionInformation *xp;
698  LispObj *bp = NULL;
699
700  if (tcr->pending_exception_context)
701    xp = tcr->pending_exception_context;
702  else {
703    xp = tcr->suspend_context;
704  }
705  if (xp) {
706    bp = (LispObj *) xpGPR(xp, sp);
707  }
708  return bp;
709}
710
711void
712normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
713{
714  void *cur_allocptr = NULL;
715  LispObj freeptr = 0;
716
717  if (xp) {
718    if (is_other_tcr) {
719      pc_luser_xp(xp, tcr, NULL);
720      freeptr = xpGPR(xp, allocptr);
721      if (fulltag_of(freeptr) == 0){
722        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
723      }
724    }
725    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
726    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
727    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
728#ifdef DEBUG
729    fprintf(dbgout, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
730            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
731    fprintf(dbgout, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
732            tcr,
733            xpGPR(xp, allocbase),
734            xpGPR(xp, allocptr),
735            xpPC(xp));
736    fprintf(dbgout, "TCR 0x%x, exception context = 0x%x\n",
737            tcr,
738            tcr->pending_exception_context);
739#endif
740  } else {
741    /* In ff-call.  No need to update cs_area */
742    cur_allocptr = (void *) (tcr->save_allocptr);
743#ifdef DEBUG
744    fprintf(dbgout, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
745            tcr, tcr->save_vsp, tcr->save_tsp);
746    fprintf(dbgout, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
747            tcr,
748            tcr->save_allocbase,
749            tcr->save_allocptr,
750            xpPC(xp));
751
752#endif
753    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
754    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
755  }
756
757
758  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
759  if (cur_allocptr) {
760    update_bytes_allocated(tcr, cur_allocptr);
761    if (freeptr) {
762      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
763      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
764    }
765  }
766}
767
768TCR *gc_tcr = NULL;
769
770/* Suspend and "normalize" other tcrs, then call a gc-like function
771   in that context.  Resume the other tcrs, then return what the
772   function returned */
773
774signed_natural
775gc_like_from_xp(ExceptionInformation *xp, 
776                signed_natural(*fun)(TCR *, signed_natural), 
777                signed_natural param)
778{
779  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
780  int result;
781  signed_natural inhibit;
782
783  suspend_other_threads(true);
784  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
785  if (inhibit != 0) {
786    if (inhibit > 0) {
787      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
788    }
789    resume_other_threads(true);
790    gc_deferred++;
791    return 0;
792  }
793  gc_deferred = 0;
794
795  gc_tcr = tcr;
796
797  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
798  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
799
800  normalize_tcr(xp, tcr, false);
801
802
803  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
804    if (other_tcr->pending_exception_context) {
805      other_tcr->gc_context = other_tcr->pending_exception_context;
806    } else if (other_tcr->valence == TCR_STATE_LISP) {
807      other_tcr->gc_context = other_tcr->suspend_context;
808    } else {
809      /* no pending exception, didn't suspend in lisp state:
810         must have executed a synchronous ff-call.
811      */
812      other_tcr->gc_context = NULL;
813    }
814    normalize_tcr(other_tcr->gc_context, other_tcr, true);
815  }
816   
817
818
819  result = fun(tcr, param);
820
821  other_tcr = tcr;
822  do {
823    other_tcr->gc_context = NULL;
824    other_tcr = other_tcr->next;
825  } while (other_tcr != tcr);
826
827  gc_tcr = NULL;
828
829  resume_other_threads(true);
830
831  return result;
832
833}
834
835
836
837/* Returns #bytes freed by invoking GC */
838
839signed_natural
840gc_from_tcr(TCR *tcr, signed_natural param)
841{
842  area *a;
843  BytePtr oldfree, newfree;
844  BytePtr oldend, newend;
845
846#ifdef DEBUG
847  fprintf(dbgout, "Start GC  in 0x%lx\n", tcr);
848#endif
849  a = active_dynamic_area;
850  oldend = a->high;
851  oldfree = a->active;
852  gc(tcr, param);
853  newfree = a->active;
854  newend = a->high;
855#if 0
856  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
857#endif
858  return ((oldfree-newfree)+(newend-oldend));
859}
860
861signed_natural
862gc_from_xp(ExceptionInformation *xp, signed_natural param)
863{
864  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
865
866  freeGCptrs();
867  return status;
868}
869
870signed_natural
871purify_from_xp(ExceptionInformation *xp, signed_natural param)
872{
873  return gc_like_from_xp(xp, purify, param);
874}
875
876signed_natural
877impurify_from_xp(ExceptionInformation *xp, signed_natural param)
878{
879  return gc_like_from_xp(xp, impurify, param);
880}
881
882
883
884
885
886
887protection_handler
888 * protection_handlers[] = {
889   do_spurious_wp_fault,
890   do_soft_stack_overflow,
891   do_soft_stack_overflow,
892   do_soft_stack_overflow,
893   do_hard_stack_overflow,   
894   do_hard_stack_overflow,
895   do_hard_stack_overflow
896   };
897
898
899Boolean
900is_write_fault(ExceptionInformation *xp, siginfo_t *info)
901{
902  /* use the siginfo if it's available.  Some versions of Linux
903     don't propagate the DSISR and TRAP fields correctly from
904     64- to 32-bit handlers.
905  */
906  if (info) {
907    /*
908       To confuse matters still further, the value of SEGV_ACCERR
909       varies quite a bit among LinuxPPC variants (the value defined
910       in the header files varies, and the value actually set by
911       the kernel also varies.  So far, we're only looking at the
912       siginfo under Linux and Linux always seems to generate
913       SIGSEGV, so check for SIGSEGV and check the low 16 bits
914       of the si_code.
915    */
916    return ((info->si_signo == SIGSEGV) &&
917            ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
918  }
919  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
920         (xpTRAP(xp) == 
921#ifdef LINUX
9220x0300
923#endif
924#ifdef DARWIN
9250x0300/0x100
926#endif
927)
928         );
929#if 0 
930  /* Maybe worth keeping around; not sure if it's an exhaustive
931     list of PPC instructions that could cause a WP fault */
932  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
933     valid values of those SPRs in the context they provide to
934     exception handlers.  Look at the opcode of the offending
935     instruction & recognize 32-bit store operations */
936  opcode instr = *(xpPC(xp));
937
938  if (xp->regs->trap != 0x300) {
939    return 0;
940  }
941  switch (instr >> 26) {
942  case 47:                      /* STMW */
943  case 36:                      /* STW */
944  case 37:                      /* STWU */
945    return 1;
946  case 31:
947    switch ((instr >> 1) & 1023) {
948    case 151:                   /* STWX */
949    case 183:                   /* STWUX */
950      return 1;
951    default:
952      return 0;
953    }
954  default:
955    return 0;
956  }
957#endif
958}
959
960OSStatus
961handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
962{
963  BytePtr addr;
964  protected_area_ptr area;
965  protection_handler *handler;
966  extern Boolean touch_page(void *);
967  extern void touch_page_end(void);
968
969  if (info) {
970    addr = (BytePtr)(info->si_addr);
971  } else {
972    addr = (BytePtr) ((natural) (xpDAR(xp)));
973  }
974
975  if (addr && (addr == tcr->safe_ref_address)) {
976    adjust_exception_pc(xp,4);
977
978    xpGPR(xp,imm0) = 0;
979    return 0;
980  }
981
982  if (xpPC(xp) == (pc)touch_page) {
983    xpGPR(xp,imm0) = 0;
984    xpPC(xp) = (pc)touch_page_end;
985    return 0;
986  }
987
988
989  if (is_write_fault(xp,info)) {
990    area = find_protected_area(addr);
991    if (area != NULL) {
992      handler = protection_handlers[area->why];
993      return handler(xp, area, addr);
994    } else {
995      if ((addr >= readonly_area->low) &&
996          (addr < readonly_area->active)) {
997        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
998                        page_size);
999        return 0;
1000      }
1001    }
1002  }
1003  if (old_valence == TCR_STATE_LISP) {
1004    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
1005  }
1006  return -1;
1007}
1008
1009
1010
1011
1012
1013OSStatus
1014do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1015{
1016#ifdef SUPPORT_PRAGMA_UNUSED
1017#pragma unused(area,addr)
1018#endif
1019  reset_lisp_process(xp);
1020  return -1;
1021}
1022
1023extern area*
1024allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
1025
1026extern area*
1027allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
1028
1029#ifdef EXTEND_VSTACK
1030Boolean
1031catch_frame_p(lisp_frame *spPtr)
1032{
1033  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
1034
1035  for (; catch; catch = (catch_frame *) untag(catch->link)) {
1036    if (spPtr == ((lisp_frame *) catch->csp)) {
1037      return true;
1038    }
1039  }
1040  return false;
1041}
1042#endif
1043
1044Boolean
1045unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
1046{
1047  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
1048      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
1049    return true;
1050  } else {
1051    return false;
1052  }
1053}
1054
1055Boolean
1056lexpr_entry_frame_p(lisp_frame *spPtr)
1057{
1058  LispObj savelr = spPtr->savelr;
1059  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
1060  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
1061  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
1062
1063  return
1064    (savelr == lexpr_return1v) ||
1065    (savelr == lexpr_return) ||
1066    ((savelr == ret1valn) &&
1067     (((spPtr->backlink)->savelr) == lexpr_return));
1068}
1069
1070Boolean
1071lisp_frame_p(lisp_frame *spPtr)
1072{
1073  LispObj savefn;
1074  /* We can't just look at the size of the stack frame under the EABI
1075     calling sequence, but that's the first thing to check. */
1076  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
1077    return false;
1078  }
1079  savefn = spPtr->savefn;
1080  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
1081 
1082}
1083
1084
1085int ffcall_overflow_count = 0;
1086
1087/* Find a frame that is neither a catch frame nor one of the
1088   lexpr_entry frames We don't check for non-lisp frames here because
1089   we'll always stop before we get there due to a dummy lisp frame
1090   pushed by .SPcallback that masks out the foreign frames.  The one
1091   exception is that there is a non-lisp frame without a valid VSP
1092   while in the process of ppc-ff-call. We recognize that because its
1093   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
1094   next frame is 0, then we're executing an unwind-protect cleanup
1095   form, and the top stack frame belongs to its (no longer extant)
1096   catch frame.  */
1097
1098#ifdef EXTEND_VSTACK
1099lisp_frame *
1100find_non_catch_frame_from_xp (ExceptionInformation *xp)
1101{
1102  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
1103  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
1104    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
1105  }
1106  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
1107         unwind_protect_cleanup_frame_p(spPtr) ||
1108         catch_frame_p(spPtr) ||
1109         lexpr_entry_frame_p(spPtr) ; ) {
1110     spPtr = spPtr->backlink;
1111     };
1112  return spPtr;
1113}
1114#endif
1115
1116#ifdef EXTEND_VSTACK
1117Boolean
1118db_link_chain_in_area_p (area *a)
1119{
1120  LispObj *db = (LispObj *) lisp_global(DB_LINK),
1121          *high = (LispObj *) a->high,
1122          *low = (LispObj *) a->low;
1123  for (; db; db = (LispObj *) *db) {
1124    if ((db >= low) && (db < high)) return true;
1125  };
1126  return false;
1127}
1128#endif
1129
1130
1131
1132
1133/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
1134  the current value of VSP (TSP) or an older area.  */
1135
1136OSStatus
1137do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
1138{
1139  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1140  area *a = tcr->vs_area;
1141  protected_area_ptr vsp_soft = a->softprot;
1142  unprotect_area(vsp_soft);
1143  signal_stack_soft_overflow(xp,vsp);
1144  return 0;
1145}
1146
1147
1148OSStatus
1149do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
1150{
1151  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1152  area *a = tcr->ts_area;
1153  protected_area_ptr tsp_soft = a->softprot;
1154  unprotect_area(tsp_soft);
1155  signal_stack_soft_overflow(xp,tsp);
1156  return 0;
1157}
1158
1159OSStatus
1160do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
1161{
1162  /* Trying to write into a guard page on the vstack or tstack.
1163     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
1164     signal an error_stack_overflow condition.
1165      */
1166  lisp_protection_kind which = prot_area->why;
1167  Boolean on_TSP = (which == kTSPsoftguard);
1168
1169  if (on_TSP) {
1170    return do_tsp_overflow(xp, addr);
1171   } else {
1172    return do_vsp_overflow(xp, addr);
1173   }
1174}
1175
1176OSStatus
1177do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
1178{
1179#ifdef SUPPORT_PRAGMA_UNUSED
1180#pragma unused(xp,area,addr)
1181#endif
1182  return -1;
1183}
1184
1185
1186/*
1187  We have a couple of choices here.  We can simply unprotect the page
1188  and let the store happen on return, or we can try to emulate writes
1189  that we know will involve an intergenerational reference.  Both are
1190  correct as far as EGC constraints go, but the latter approach is
1191  probably more efficient.  (This only matters in the case where the
1192  GC runs after this exception handler returns but before the write
1193  actually happens.  If we didn't emulate node stores here, the EGC
1194  would scan the newly-writen page, find nothing interesting, and
1195  run to completion.  This thread will try the write again afer it
1196  resumes, the page'll be re-protected, and we'll have taken this
1197  fault twice.  The whole scenario shouldn't happen very often, but
1198  (having already taken a fault and committed to an mprotect syscall)
1199  we might as well emulate stores involving intergenerational references,
1200  since they're pretty easy to identify.
1201
1202  Note that cases involving two or more threads writing to the same
1203  page (before either of them can run this handler) is benign: one
1204  invocation of the handler will just unprotect an unprotected page in
1205  that case.
1206
1207  If there are GCs (or any other suspensions of the thread between
1208  the time that the write fault was detected and the time that the
1209  exception lock is obtained) none of this stuff happens.
1210*/
1211
1212/*
1213  Return true (and emulate the instruction) iff:
1214  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
1215     instruction.
1216  b) RS is a node register (>= fn)
1217  c) RS is tagged as a cons or vector
1218  d) RS is in some ephemeral generation.
1219  This is slightly conservative, since RS may be no younger than the
1220  EA being written to.
1221*/
1222Boolean
1223is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
1224{
1225  if (((ptr_to_lispobj(ea)) & 3) == 0) {
1226    opcode instr = *xpPC(xp);
1227   
1228    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
1229        major_opcode_p(instr, major_opcode_STW)) {
1230      LispObj
1231        rs = RS_field(instr), 
1232        rsval = xpGPR(xp,rs),
1233        tag = fulltag_of(rsval);
1234     
1235      if (rs >= fn) {
1236        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
1237          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
1238              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
1239            *(LispObj *)ea = rsval;
1240            return true;
1241          }
1242        }
1243      }
1244    }
1245  }
1246  return false;
1247}
1248
1249     
1250
1251
1252
1253
1254
1255OSStatus
1256handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1257{
1258  (void) zero_fpscr(tcr);
1259  enable_fp_exceptions();
1260
1261
1262  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
1263
1264  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
1265     operation; there's an FPU operation right at the PC, so tell it to start
1266     looking one word beyond */
1267  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
1268}
1269
1270   
1271int
1272altivec_present = 1;
1273
1274
1275/* This only tries to implement the "optional" fsqrt and fsqrts
1276   instructions, which were generally implemented on IBM hardware
1277   but generally not available on Motorola/Freescale systems.
1278*/               
1279OSStatus
1280handle_unimplemented_instruction(ExceptionInformation *xp,
1281                                 opcode instruction,
1282                                 TCR *tcr)
1283{
1284  (void) zero_fpscr(tcr);
1285  enable_fp_exceptions();
1286  /* the rc bit (bit 0 in the instruction) is supposed to cause
1287     some FPSCR bits to be copied to CR1.  Clozure CL doesn't generate
1288     fsqrt. or fsqrts.
1289  */
1290  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
1291       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
1292      ((instruction & ((1 << 6) -2)) == (22<<1))) {
1293    double b, d, sqrt(double);
1294
1295    b = xpFPR(xp,RB_field(instruction));
1296    d = sqrt(b);
1297    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
1298                   (get_fpscr() & _FPU_RESERVED));
1299    xpFPR(xp,RT_field(instruction)) = d;
1300    adjust_exception_pc(xp,4);
1301    return 0;
1302  }
1303
1304  return -1;
1305}
1306
1307OSStatus
1308PMCL_exception_handler(int xnum, 
1309                       ExceptionInformation *xp, 
1310                       TCR *tcr, 
1311                       siginfo_t *info,
1312                       int old_valence)
1313{
1314  OSStatus status = -1;
1315  pc program_counter;
1316  opcode instruction = 0;
1317
1318
1319  program_counter = xpPC(xp);
1320 
1321  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1322    instruction = *program_counter;
1323  }
1324
1325  if (instruction == ALLOC_TRAP_INSTRUCTION) {
1326    status = handle_alloc_trap(xp, tcr);
1327  } else if ((xnum == SIGSEGV) ||
1328             (xnum == SIGBUS)) {
1329    status = handle_protection_violation(xp, info, tcr, old_valence);
1330  } else if (xnum == SIGFPE) {
1331    status = handle_sigfpe(xp, tcr);
1332  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
1333    if (instruction == GC_TRAP_INSTRUCTION) {
1334      status = handle_gc_trap(xp, tcr);
1335    } else if (IS_UUO(instruction)) {
1336      status = handle_uuo(xp, instruction, program_counter);
1337    } else if (is_conditional_trap(instruction)) {
1338      status = handle_trap(xp, instruction, program_counter, info);
1339    } else {
1340      status = handle_unimplemented_instruction(xp,instruction,tcr);
1341    }
1342  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1343    tcr->interrupt_pending = 0;
1344    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
1345    status = 0;
1346  }
1347
1348  return status;
1349}
1350
1351void
1352adjust_exception_pc(ExceptionInformation *xp, int delta)
1353{
1354  xpPC(xp) += (delta >> 2);
1355}
1356
1357
1358/*
1359  This wants to scan backwards until "where" points to an instruction
1360   whose major opcode is either 63 (double-float) or 59 (single-float)
1361*/
1362
1363OSStatus
1364handle_fpux_binop(ExceptionInformation *xp, pc where)
1365{
1366  OSStatus err;
1367  opcode *there = (opcode *) where, instr, errnum = 0;
1368  int i = TRAP_LOOKUP_TRIES, delta = 0;
1369 
1370  while (i--) {
1371    instr = *--there;
1372    delta -= 4;
1373    if (codevec_hdr_p(instr)) {
1374      return -1;
1375    }
1376    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
1377      errnum = error_FPU_exception_double;
1378      break;
1379    }
1380
1381    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
1382      errnum = error_FPU_exception_short;
1383      break;
1384    }
1385  }
1386 
1387  err = handle_error(xp, errnum, rcontext, 0,  there);
1388  /* Yeah, we said "non-continuable".  In case we ever change that ... */
1389 
1390  adjust_exception_pc(xp, delta);
1391  xpFPSCR(xp)  &=  0x03fff;
1392 
1393  return err;
1394
1395}
1396
1397OSStatus
1398handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
1399{
1400#ifdef SUPPORT_PRAGMA_UNUSED
1401#pragma unused(where)
1402#endif
1403  unsigned 
1404    minor = UUO_MINOR(the_uuo),
1405    rb = 0x1f & (the_uuo >> 11),
1406    errnum = 0x3ff & (the_uuo >> 16);
1407
1408  OSStatus status = -1;
1409
1410  int bump = 4;
1411
1412  switch (minor) {
1413
1414  case UUO_ZERO_FPSCR:
1415    status = 0;
1416    xpFPSCR(xp) = 0;
1417    break;
1418
1419
1420  case UUO_INTERR:
1421    {
1422      TCR * target = (TCR *)xpGPR(xp,arg_z);
1423      status = 0;
1424      switch (errnum) {
1425      case error_propagate_suspend:
1426        break;
1427      case error_interrupt:
1428        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1429        break;
1430      case error_suspend:
1431        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1432        break;
1433      case error_suspend_all:
1434        lisp_suspend_other_threads();
1435        break;
1436      case error_resume:
1437        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1438        break;
1439      case error_resume_all:
1440        lisp_resume_other_threads();
1441        break;
1442      case error_kill:
1443        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1444        break;
1445      case error_allocate_list:
1446        allocate_list(xp,get_tcr(true));
1447        break;
1448      default:
1449        status = handle_error(xp, errnum, rb, 0,  where);
1450        break;
1451      }
1452    }
1453    break;
1454
1455  case UUO_INTCERR:
1456    status = handle_error(xp, errnum, rb, 1,  where);
1457    if (errnum == error_udf_call) {
1458      /* If lisp's returned from a continuable undefined-function call,
1459         it's put a code vector in the xp's PC.  Don't advance the
1460         PC ... */
1461      bump = 0;
1462    }
1463    break;
1464
1465  case UUO_FPUX_BINOP:
1466    status = handle_fpux_binop(xp, where);
1467    bump = 0;
1468    break;
1469
1470  default:
1471    status = -1;
1472    bump = 0;
1473  }
1474 
1475  if ((!status) && bump) {
1476    adjust_exception_pc(xp, bump);
1477  }
1478  return status;
1479}
1480
1481natural
1482register_codevector_contains_pc (natural lisp_function, pc where)
1483{
1484  natural code_vector, size;
1485
1486  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1487      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1488    code_vector = deref(lisp_function, 1);
1489    size = header_element_count(header_of(code_vector)) << 2;
1490    if ((untag(code_vector) < (natural)where) && 
1491        ((natural)where < (code_vector + size)))
1492      return(code_vector);
1493  }
1494
1495  return(0);
1496}
1497
1498/* Callback to lisp to handle a trap. Need to translate the
1499   PC (where) into one of two forms of pairs:
1500
1501   1. If PC is in fn or nfn's code vector, use the register number
1502      of fn or nfn and the index into that function's code vector.
1503   2. Otherwise use 0 and the pc itself
1504*/
1505void
1506callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
1507                   natural arg1, natural arg2, natural arg3)
1508{
1509  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
1510  unsigned register_number = fn;
1511  natural index = (natural)where;
1512
1513  if (code_vector == 0) {
1514    register_number = nfn;
1515    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
1516  }
1517  if (code_vector == 0)
1518    register_number = 0;
1519  else
1520    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
1521  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
1522}
1523
1524void
1525callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1526                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
1527{
1528  natural  callback_ptr;
1529  area *a;
1530
1531  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1532
1533  /* Put the active stack pointer where .SPcallback expects it */
1534  a = tcr->cs_area;
1535  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
1536
1537  /* Copy globals from the exception frame to tcr */
1538  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1539  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
1540  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1541  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
1542
1543#ifdef DARWIN
1544  enable_fp_exceptions();
1545#endif
1546
1547
1548  /* Call back.
1549     Lisp will handle trampolining through some code that
1550     will push lr/fn & pc/nfn stack frames for backtrace.
1551  */
1552  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1553#ifdef DEBUG
1554  fprintf(dbgout, "0x%x releasing exception lock for callback\n", tcr);
1555#endif
1556  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1557  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
1558  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1559#ifdef DEBUG
1560  fprintf(dbgout, "0x%x acquired exception lock after callback\n", tcr);
1561#endif
1562
1563
1564
1565  /* Copy GC registers back into exception frame */
1566  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
1567  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1568}
1569
1570area *
1571allocate_no_stack (natural size)
1572{
1573#ifdef SUPPORT_PRAGMA_UNUSED
1574#pragma unused(size)
1575#endif
1576
1577  return (area *) NULL;
1578}
1579
1580
1581
1582
1583
1584
1585/* callback to (symbol-value cmain) if it is a macptr,
1586   otherwise report cause and function name to console.
1587   Returns noErr if exception handled OK */
1588OSStatus
1589handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1590{
1591  LispObj   cmain = nrs_CMAIN.vcell;
1592  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1593
1594  /* If we got here, "the_trap" is either a TRI or a TR instruction.
1595     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
1596
1597  /* If it's a "trllt" instruction where RA == sp, it's a failed
1598     control stack overflow check.  In that case:
1599     
1600     a) We're in "yellow zone" mode if the value of the
1601     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
1602     we're not already in yellow zone mode, attempt to create a new
1603     thread and continue execution on its stack. If that fails, call
1604     signal_stack_soft_overflow to enter yellow zone mode and signal
1605     the condition to lisp.
1606     
1607     b) If we're already in "yellow zone" mode, then:
1608     
1609     1) if the SP is past the current control-stack area's hard
1610     overflow limit, signal a "hard" stack overflow error (e.g., throw
1611     to toplevel as quickly as possible. If we aren't in "yellow zone"
1612     mode, attempt to continue on another thread first.
1613     
1614     2) if SP is "well" (> 4K) below its soft overflow limit, set
1615     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
1616     "yellow zone mode" in this case.
1617     
1618     3) Otherwise, do nothing.  We'll continue to trap every time
1619     something gets pushed on the control stack, so we should try to
1620     detect and handle all of these cases fairly quickly.  Of course,
1621     the trap overhead is going to slow things down quite a bit.
1622     */
1623
1624  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
1625      (RA_field(the_trap) == sp) &&
1626      (TO_field(the_trap) == TO_LO)) {
1627    area
1628      *CS_area = tcr->cs_area,
1629      *VS_area = tcr->vs_area;
1630     
1631    natural
1632      current_SP = xpGPR(xp,sp),
1633      current_VSP = xpGPR(xp,vsp);
1634
1635    if (current_SP  < (natural) (CS_area->hardlimit)) {
1636      /* If we're not in soft overflow mode yet, assume that the
1637         user has set the soft overflow size very small and try to
1638         continue on another thread before throwing to toplevel */
1639      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
1640        reset_lisp_process(xp);
1641      }
1642    } else {
1643      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
1644        /* If the control stack pointer is at least 4K away from its soft limit
1645           and the value stack pointer is at least 4K away from its soft limit,
1646           stop trapping.  Else keep trapping. */
1647        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
1648            (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
1649          protected_area_ptr vs_soft = VS_area->softprot;
1650          if (vs_soft->nprot == 0) {
1651            protect_area(vs_soft);
1652          }
1653          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
1654        }
1655      } else {
1656        tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);       
1657        signal_stack_soft_overflow(xp, sp);
1658      }
1659    }
1660   
1661    adjust_exception_pc(xp, 4);
1662    return noErr;
1663  } else {
1664    if (the_trap == LISP_BREAK_INSTRUCTION) {
1665      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
1666      set_xpPC(xp, xpLR(xp));
1667      if (message == NULL) {
1668        message = "Lisp Breakpoint";
1669      }
1670      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
1671      return noErr;
1672    }
1673    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
1674      adjust_exception_pc(xp,4);
1675      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1676      return noErr;
1677    }
1678    /*
1679      twlle ra,rb is used to detect tlb overflow, where RA = current
1680      limit and RB = index to use.
1681    */
1682    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
1683        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
1684      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
1685        return noErr;
1686      }
1687      return -1;
1688    }
1689
1690    if ((fulltag_of(cmain) == fulltag_misc) &&
1691        (header_subtag(header_of(cmain)) == subtag_macptr)) {
1692      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
1693        /* reset interrup_level, interrupt_pending */
1694        TCR_INTERRUPT_LEVEL(tcr) = 0;
1695        tcr->interrupt_pending = 0;
1696      }
1697#if 0
1698      fprintf(dbgout, "About to do trap callback in 0x%x\n",tcr);
1699#endif
1700      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
1701      adjust_exception_pc(xp, 4);
1702      return(noErr);
1703    }
1704    return -1;
1705  }
1706}
1707
1708
1709/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
1710   Stop if subtag_code_vector is encountered. */
1711unsigned
1712scan_for_instr( unsigned target, unsigned mask, pc where )
1713{
1714  int i = TRAP_LOOKUP_TRIES;
1715
1716  while( i-- ) {
1717    unsigned instr = *(--where);
1718    if ( codevec_hdr_p(instr) ) {
1719      return 0;
1720    } else if ( match_instr(instr, mask, target) ) {
1721      return instr;
1722    }
1723  }
1724  return 0;
1725}
1726
1727
1728void non_fatal_error( char *msg )
1729{
1730  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1731  fflush( dbgout );
1732}
1733
1734/* The main opcode.  */
1735
1736int 
1737is_conditional_trap(opcode instr)
1738{
1739  unsigned to = TO_field(instr);
1740  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
1741
1742#ifndef MACOS
1743  if ((instr == LISP_BREAK_INSTRUCTION) ||
1744      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
1745    return 1;
1746  }
1747#endif
1748  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
1749    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
1750       EQ bit must be set in the TO mask and either the register
1751       operands (if "tw") are the same or either both of the signed or
1752       both of the unsigned inequality bits must be set. */
1753    if (! (to & TO_EQ)) {
1754      return 1;                 /* Won't trap on EQ: conditional */
1755    }
1756    if (is_tr && (RA_field(instr) == RB_field(instr))) {
1757      return 0;                 /* Will trap on EQ, same regs: unconditional */
1758    }
1759    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
1760        ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
1761      return 0;                 /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
1762    }
1763    return 1;                   /* must be conditional */
1764  }
1765  return 0;                     /* Not "tw/td" or "twi/tdi".  Let
1766                                   debugger have it */
1767}
1768
1769OSStatus
1770handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
1771{
1772  LispObj   errdisp = nrs_ERRDISP.vcell;
1773
1774  if ((fulltag_of(errdisp) == fulltag_misc) &&
1775      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1776    /* errdisp is a macptr, we can call back to lisp */
1777    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
1778    return(0);
1779    }
1780
1781  return(-1);
1782}
1783               
1784
1785/*
1786   Current thread has all signals masked.  Before unmasking them,
1787   make it appear that the current thread has been suspended.
1788   (This is to handle the case where another thread is trying
1789   to GC before this thread is able to sieze the exception lock.)
1790*/
1791int
1792prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1793{
1794  int old_valence = tcr->valence;
1795
1796  tcr->pending_exception_context = context;
1797  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1798
1799  ALLOW_EXCEPTIONS(context);
1800  return old_valence;
1801} 
1802
1803void
1804wait_for_exception_lock_in_handler(TCR *tcr, 
1805                                   ExceptionInformation *context,
1806                                   xframe_list *xf)
1807{
1808
1809  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1810#ifdef DEBUG
1811  fprintf(dbgout, "0x%x has exception lock\n", tcr);
1812#endif
1813  xf->curr = context;
1814  xf->prev = tcr->xframe;
1815  tcr->xframe =  xf;
1816  tcr->pending_exception_context = NULL;
1817  tcr->valence = TCR_STATE_FOREIGN; 
1818}
1819
1820void
1821unlock_exception_lock_in_handler(TCR *tcr)
1822{
1823  tcr->pending_exception_context = tcr->xframe->curr;
1824  tcr->xframe = tcr->xframe->prev;
1825  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1826#ifdef DEBUG
1827  fprintf(dbgout, "0x%x releasing exception lock\n", tcr);
1828#endif
1829  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1830}
1831
1832/*
1833   If an interrupt is pending on exception exit, try to ensure
1834   that the thread sees it as soon as it's able to run.
1835*/
1836void
1837raise_pending_interrupt(TCR *tcr)
1838{
1839  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1840    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1841  }
1842}
1843
1844void
1845exit_signal_handler(TCR *tcr, int old_valence)
1846{
1847  sigset_t mask;
1848  sigfillset(&mask);
1849 
1850  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1851  tcr->valence = old_valence;
1852  tcr->pending_exception_context = NULL;
1853}
1854
1855
1856void
1857signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
1858{
1859  TCR *tcr;
1860  int old_valence;
1861  xframe_list xframe_link;
1862
1863  tcr = (TCR *) get_interrupt_tcr(false);
1864 
1865  /* The signal handler's entered with all signals (notably the
1866     thread_suspend signal) blocked.  Don't allow any other signals
1867     (notably the thread_suspend signal) to preempt us until we've
1868     set the TCR's xframe slot to include the current exception
1869     context.
1870  */
1871   
1872    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1873
1874  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1875    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1876    pthread_kill(pthread_self(), thread_suspend_signal);
1877  }
1878
1879 
1880  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1881  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
1882    char msg[512];
1883    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1884    if (lisp_Debugger(context, info, signum, false, msg)) {
1885      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1886    }
1887  }
1888
1889  unlock_exception_lock_in_handler(tcr);
1890
1891  /* This thread now looks like a thread that was suspended while
1892     executing lisp code.  If some other thread gets the exception
1893     lock and GCs, the context (this thread's suspend_context) will
1894     be updated.  (That's only of concern if it happens before we
1895     can return to the kernel/to the Mach exception handler).
1896  */
1897  exit_signal_handler(tcr, old_valence);
1898  raise_pending_interrupt(tcr);
1899}
1900
1901/*
1902  If it looks like we're in the middle of an atomic operation, make
1903  it seem as if that operation is either complete or hasn't started
1904  yet.
1905
1906  The cases handled include:
1907
1908  a) storing into a newly-allocated lisp frame on the stack.
1909  b) marking a newly-allocated TSP frame as containing "raw" data.
1910  c) consing: the GC has its own ideas about how this should be
1911     handled, but other callers would be best advised to back
1912     up or move forward, according to whether we're in the middle
1913     of allocating a cons cell or allocating a uvector.
1914  d) a STMW to the vsp
1915  e) EGC write-barrier subprims.
1916*/
1917
1918extern opcode
1919  egc_write_barrier_start,
1920  egc_write_barrier_end, 
1921  egc_store_node_conditional, 
1922  egc_store_node_conditional_test,
1923  egc_set_hash_key, egc_set_hash_key_did_store,
1924  egc_gvset, egc_gvset_did_store,
1925  egc_rplaca, egc_rplaca_did_store,
1926  egc_rplacd, egc_rplacd_did_store,
1927  egc_set_hash_key_conditional,
1928  egc_set_hash_key_conditional_test;
1929
1930
1931extern opcode ffcall_return_window, ffcall_return_window_end;
1932
1933void
1934pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1935{
1936  pc program_counter = xpPC(xp);
1937  opcode instr = *program_counter;
1938  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
1939  LispObj cur_allocptr = xpGPR(xp, allocptr);
1940  int allocptr_tag = fulltag_of(cur_allocptr);
1941 
1942
1943
1944  if ((program_counter < &egc_write_barrier_end) && 
1945      (program_counter >= &egc_write_barrier_start)) {
1946    LispObj *ea = 0, val = 0, root = 0;
1947    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1948    Boolean need_check_memo = true, need_memoize_root = false;
1949
1950    if (program_counter >= &egc_set_hash_key_conditional) {
1951      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1952          ((program_counter == &egc_set_hash_key_conditional_test) &&
1953           (! (xpCCR(xp) & 0x20000000)))) {
1954        return;
1955      }
1956      root = xpGPR(xp,arg_x);
1957      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1958      need_memoize_root = true;
1959    } else if (program_counter >= &egc_store_node_conditional) {
1960      if ((program_counter < &egc_store_node_conditional_test) ||
1961          ((program_counter == &egc_store_node_conditional_test) &&
1962           (! (xpCCR(xp) & 0x20000000)))) {
1963        /* The conditional store either hasn't been attempted yet, or
1964           has failed.  No need to adjust the PC, or do memoization. */
1965        return;
1966      }
1967      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
1968      xpGPR(xp,arg_z) = t_value;
1969    } else if (program_counter >= &egc_set_hash_key) {
1970      if (program_counter < &egc_set_hash_key_did_store) {
1971        return;
1972      }
1973      root = xpGPR(xp,arg_x);
1974      val = xpGPR(xp,arg_z);
1975      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1976      need_memoize_root = true;
1977    } else if (program_counter >= &egc_gvset) {
1978      if (program_counter < &egc_gvset_did_store) {
1979        return;
1980      }
1981      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1982      val = xpGPR(xp,arg_z);
1983    } else if (program_counter >= &egc_rplacd) {
1984      if (program_counter < &egc_rplacd_did_store) {
1985        return;
1986      }
1987      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1988      val = xpGPR(xp,arg_z);
1989    } else {                      /* egc_rplaca */
1990      if (program_counter < &egc_rplaca_did_store) {
1991        return;
1992      }
1993      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1994      val = xpGPR(xp,arg_z);
1995    }
1996    if (need_check_memo) {
1997      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1998      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1999          ((LispObj)ea < val)) {
2000        atomic_set_bit(refbits, bitnumber);
2001        if (need_memoize_root) {
2002          bitnumber = area_dnode(root, lisp_global(REF_BASE));
2003          atomic_set_bit(refbits, bitnumber);
2004        }
2005      }
2006    }
2007    set_xpPC(xp, xpLR(xp));
2008    return;
2009  }
2010
2011
2012  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
2013    LispObj tsp_val = xpGPR(xp,tsp);
2014   
2015    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
2016    adjust_exception_pc(xp, 4);
2017    return;
2018  }
2019 
2020  if (frame->backlink == (frame+1)) {
2021    if (
2022#ifdef PPC64
2023        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
2024        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
2025#else
2026        (major_opcode_p(instr, major_opcode_STW)) && 
2027#endif
2028        (RA_field(instr) == sp) &&
2029        /* There are a few places in the runtime that store into
2030           a previously-allocated frame atop the stack when
2031           throwing values around.  We only care about the case
2032           where the frame was newly allocated, in which case
2033           there must have been a CREATE_LISP_FRAME_INSTRUCTION
2034           a few instructions before the current program counter.
2035           (The whole point here is that a newly allocated frame
2036           might contain random values that we don't want the
2037           GC to see; a previously allocated frame should already
2038           be completely initialized.)
2039        */
2040        ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
2041         (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
2042         (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
2043#ifdef PPC64
2044      int disp = DS_field(instr);
2045#else     
2046      int disp = D_field(instr);
2047#endif
2048
2049
2050      if (disp < (4*node_size)) {
2051#if 0
2052        fprintf(dbgout, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
2053#endif
2054        frame->savevsp = 0;
2055        if (disp < (3*node_size)) {
2056          frame->savelr = 0;
2057          if (disp == node_size) {
2058            frame->savefn = 0;
2059          }
2060        }
2061      }
2062      return;
2063    }
2064  }
2065
2066  if (allocptr_tag != tag_fixnum) {
2067    signed_natural disp = allocptr_displacement(xp);
2068
2069    if (disp) {
2070      /* Being architecturally "at" the alloc trap doesn't tell
2071         us much (in particular, it doesn't tell us whether
2072         or not the thread has committed to taking the trap
2073         and is waiting for the exception lock (or waiting
2074         for the Mach exception thread to tell it how bad
2075         things are) or is about to execute a conditional
2076         trap.
2077         Regardless of which case applies, we want the
2078         other thread to take (or finish taking) the
2079         trap, and we don't want it to consider its
2080         current allocptr to be valid.
2081         The difference between this case (suspend other
2082         thread for GC) and the previous case (suspend
2083         current thread for interrupt) is solely a
2084         matter of what happens after we leave this
2085         function: some non-current thread will stay
2086         suspended until the GC finishes, then take
2087         (or start processing) the alloc trap.   The
2088         current thread will go off and do PROCESS-INTERRUPT
2089         or something, and may return from the interrupt
2090         and need to finish the allocation that got interrupted.
2091      */
2092
2093      if (alloc_disp) {
2094        *alloc_disp = disp;
2095        xpGPR(xp,allocptr) += disp;
2096        /* Leave the PC at the alloc trap.  When the interrupt
2097           handler returns, it'll decrement allocptr by disp
2098           and the trap may or may not be taken.
2099        */
2100      } else {
2101        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
2102        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
2103        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
2104      }
2105    } else {
2106#ifdef DEBUG
2107      fprintf(dbgout, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
2108#endif
2109      /* If we're already past the alloc_trap, finish allocating
2110         the object. */
2111      if (allocptr_tag == fulltag_cons) {
2112        finish_allocating_cons(xp);
2113#ifdef DEBUG
2114          fprintf(dbgout, "finish allocating cons in TCR = #x%x\n",
2115                  tcr);
2116#endif
2117      } else {
2118        if (allocptr_tag == fulltag_misc) {
2119#ifdef DEBUG
2120          fprintf(dbgout, "finish allocating uvector in TCR = #x%x\n",
2121                  tcr);
2122#endif
2123          finish_allocating_uvector(xp);
2124        } else {
2125          Bug(xp, "what's being allocated here ?");
2126        }
2127      }
2128      /* Whatever we finished allocating, reset allocptr/allocbase to
2129         VOID_ALLOCPTR */
2130      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
2131    }
2132    return;
2133  }
2134
2135  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
2136    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
2137    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
2138#if 0
2139        fprintf(dbgout, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
2140#endif
2141
2142    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
2143      deref(frame,idx) = 0;
2144    }
2145    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
2146    return;
2147  }
2148
2149#ifndef PC64
2150  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
2151      (RA_field(instr) == vsp)) {
2152    int r;
2153    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
2154   
2155    for (r = RS_field(instr); r <= 31; r++) {
2156      *vspptr++ = xpGPR(xp,r);
2157    }
2158    adjust_exception_pc(xp, 4);
2159  }
2160#endif
2161}
2162
2163void
2164interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
2165{
2166  TCR *tcr = get_interrupt_tcr(false);
2167  if (tcr) {
2168    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
2169      tcr->interrupt_pending = 1 << fixnumshift;
2170    } else {
2171      LispObj cmain = nrs_CMAIN.vcell;
2172
2173      if ((fulltag_of(cmain) == fulltag_misc) &&
2174          (header_subtag(header_of(cmain)) == subtag_macptr)) {
2175        /*
2176           This thread can (allegedly) take an interrupt now.
2177           It's tricky to do that if we're executing
2178           foreign code (especially Linuxthreads code, much
2179           of which isn't reentrant.)
2180           If we're unwinding the stack, we also want to defer
2181           the interrupt.
2182        */
2183        if ((tcr->valence != TCR_STATE_LISP) ||
2184            (tcr->unwinding != 0)) {
2185          TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
2186        } else {
2187          xframe_list xframe_link;
2188          int old_valence;
2189          signed_natural disp=0;
2190         
2191          pc_luser_xp(context, tcr, &disp);
2192          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2193          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
2194#ifdef DEBUG
2195          fprintf(dbgout, "[0x%x acquired exception lock for interrupt]\n",tcr);
2196#endif
2197          PMCL_exception_handler(signum, context, tcr, info, old_valence);
2198          if (disp) {
2199            xpGPR(context,allocptr) -= disp;
2200          }
2201          unlock_exception_lock_in_handler(tcr);
2202#ifdef DEBUG
2203          fprintf(dbgout, "[0x%x released exception lock for interrupt]\n",tcr);
2204#endif
2205          exit_signal_handler(tcr, old_valence);
2206        }
2207      }
2208    }
2209  }
2210#ifdef DARWIN
2211    DarwinSigReturn(context);
2212#endif
2213}
2214
2215
2216
2217void
2218install_signal_handler(int signo, void *handler, unsigned flags)
2219{
2220  struct sigaction sa;
2221  int err;
2222 
2223  sa.sa_sigaction = (void *)handler;
2224  sigfillset(&sa.sa_mask);
2225  sa.sa_flags = SA_SIGINFO;
2226
2227  if (flags & RESTART_SYSCALLS)
2228    sa.sa_flags |= SA_RESTART;
2229  if (flags & RESERVE_FOR_LISP) {
2230    extern sigset_t user_signals_reserved;
2231    sigaddset(&user_signals_reserved, signo);
2232  }
2233
2234  err = sigaction(signo, &sa, NULL);
2235  if (err) {
2236    perror("sigaction");
2237    exit(1);
2238  }
2239}
2240
2241void
2242install_pmcl_exception_handlers()
2243{
2244
2245  extern int no_sigtrap;
2246  install_signal_handler(SIGILL, (void *)signal_handler, RESERVE_FOR_LISP);
2247  if (no_sigtrap != 1) {
2248    install_signal_handler(SIGTRAP, (void *)signal_handler, RESERVE_FOR_LISP);
2249  }
2250  install_signal_handler(SIGBUS,  (void *)signal_handler, RESERVE_FOR_LISP);
2251  install_signal_handler(SIGSEGV, (void *)signal_handler, RESERVE_FOR_LISP);
2252  install_signal_handler(SIGFPE, (void *)signal_handler, RESERVE_FOR_LISP);
2253
2254 
2255  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
2256                         (void *)interrupt_handler, RESERVE_FOR_LISP);
2257  signal(SIGPIPE, SIG_IGN);
2258}
2259
2260void
2261thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
2262{
2263  TCR *tcr = get_tcr(false);
2264  area *a;
2265  sigset_t mask;
2266 
2267  sigemptyset(&mask);
2268
2269  if (tcr) {
2270    tcr->valence = TCR_STATE_FOREIGN;
2271    a = tcr->vs_area;
2272    if (a) {
2273      a->active = a->high;
2274    }
2275    a = tcr->ts_area;
2276    if (a) {
2277      a->active = a->high;
2278    }
2279    a = tcr->cs_area;
2280    if (a) {
2281      a->active = a->high;
2282    }
2283  }
2284 
2285  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2286  pthread_exit(NULL);
2287}
2288
2289void
2290thread_signal_setup()
2291{
2292  thread_suspend_signal = SIG_SUSPEND_THREAD;
2293  thread_kill_signal = SIG_KILL_THREAD;
2294
2295  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
2296                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
2297  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler,
2298                         RESERVE_FOR_LISP);
2299}
2300
2301
2302
2303void
2304unprotect_all_areas()
2305{
2306  protected_area_ptr p;
2307
2308  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2309    unprotect_area(p);
2310  }
2311}
2312
2313/*
2314  A binding subprim has just done "twlle limit_regno,idx_regno" and
2315  the trap's been taken.  Extend the tcr's tlb so that the index will
2316  be in bounds and the new limit will be on a page boundary, filling
2317  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2318  the tcr fields and the registers in the xp and return true if this
2319  all works, false otherwise.
2320
2321  Note that the tlb was allocated via malloc, so realloc can do some
2322  of the hard work.
2323*/
2324Boolean
2325extend_tcr_tlb(TCR *tcr, 
2326               ExceptionInformation *xp, 
2327               unsigned limit_regno,
2328               unsigned idx_regno)
2329{
2330  unsigned
2331    index = (unsigned) (xpGPR(xp,idx_regno)),
2332    old_limit = tcr->tlb_limit,
2333    new_limit = align_to_power_of_2(index+1,12),
2334    new_bytes = new_limit-old_limit;
2335  LispObj
2336    *old_tlb = tcr->tlb_pointer,
2337    *new_tlb = realloc(old_tlb, new_limit),
2338    *work;
2339
2340  if (new_tlb == NULL) {
2341    return false;
2342  }
2343 
2344  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2345
2346  while (new_bytes) {
2347    *work++ = no_thread_local_binding_marker;
2348    new_bytes -= sizeof(LispObj);
2349  }
2350  tcr->tlb_pointer = new_tlb;
2351  tcr->tlb_limit = new_limit;
2352  xpGPR(xp, limit_regno) = new_limit;
2353  return true;
2354}
2355
2356
2357
2358void
2359exception_init()
2360{
2361  install_pmcl_exception_handlers();
2362}
2363
2364
Note: See TracBrowser for help on using the repository browser.