source: trunk/source/lisp-kernel/arm-exceptions.c @ 15158

Last change on this file since 15158 was 15158, checked in by gb, 9 years ago

Define another 3-register-argument UUO ('uuo_error_array_axis_bounds');
use it to report array bounds errors for multidimensional array access
(incorporating the axis/dimension in the UUO and therefore the error
message.)

File size: 73.7 KB
Line 
1
2
3/*
4   Copyright (C) 2010 Clozure Associates
5   This file is part of Clozure CL. 
6
7   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
8   License , known as the LLGPL and distributed with Clozure CL as the
9   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
10   which is distributed with Clozure CL as the file "LGPL".  Where these
11   conflict, the preamble takes precedence. 
12
13   Clozure CL is referenced in the preamble as the "LIBRARY."
14
15   The LLGPL is also available online at
16   http://opensource.franz.com/preamble.html
17*/
18
19#include "lisp.h"
20#include "lisp-exceptions.h"
21#include "lisp_globals.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#ifndef ANDROID
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36#endif
37
38#ifdef DARWIN
39#include <sys/mman.h>
40#ifndef SA_NODEFER
41#define SA_NODEFER 0
42#endif
43#include <sysexits.h>
44
45
46/* a distinguished UUO at a distinguished address */
47extern void pseudo_sigreturn(ExceptionInformation *);
48#endif
49
50
51#include "threads.h"
52
53#ifdef ANDROID
54#define pthread_sigmask(how,in,out) rt_sigprocmask(how,in,out,8)
55#endif
56
57#ifdef LINUX
58
59void
60enable_fp_exceptions()
61{
62}
63
64void
65disable_fp_exceptions()
66{
67}
68#endif
69
70/*
71  Handle exceptions.
72
73*/
74
75extern LispObj lisp_nil;
76
77extern natural lisp_heap_gc_threshold;
78extern Boolean grow_dynamic_area(natural);
79
80Boolean allocation_enabled = true;
81
82
83
84
85int
86page_size = 4096;
87
88int
89log2_page_size = 12;
90
91
92
93
94
95/*
96  If the PC is pointing to an allocation trap, the previous instruction
97  must have decremented allocptr.  Return the non-zero amount by which
98  allocptr was decremented.
99*/
100signed_natural
101allocptr_displacement(ExceptionInformation *xp)
102{
103  pc program_counter = xpPC(xp);
104  opcode instr = *program_counter, prev_instr;
105  int delta = -3;
106
107  if (IS_ALLOC_TRAP(instr)) {
108    /* The alloc trap must have been preceded by a cmp and a
109       load from tcr.allocbase. */
110    if (IS_BRANCH_AROUND_ALLOC_TRAP(program_counter[-1])) {
111      delta = -4;
112    }
113    prev_instr = program_counter[delta];
114
115    if (IS_SUB_RM_FROM_ALLOCPTR(prev_instr)) {
116      return -((signed_natural)xpGPR(xp,RM_field(prev_instr)));
117    }
118   
119    if (IS_SUB_LO_FROM_ALLOCPTR(prev_instr)) {
120      return -((signed_natural)(prev_instr & 0xff));
121    }
122
123    if (IS_SUB_FROM_ALLOCPTR(prev_instr)) {
124      natural disp = ror(prev_instr&0xff,(prev_instr&0xf00)>>7);
125
126      instr = program_counter[delta-1];
127      if (IS_SUB_LO_FROM_ALLOCPTR(instr)) {
128        return -((signed_natural)(disp | (instr & 0xff)));
129      }
130    }
131    Bug(xp, "Can't determine allocation displacement");
132  }
133  return 0;
134}
135
136
137/*
138  A cons cell's been successfully allocated, but the allocptr's
139  still tagged (as fulltag_cons, of course.)  Emulate any instructions
140  that might follow the allocation (stores to the car or cdr, an
141  assignment to the "result" gpr) that take place while the allocptr's
142  tag is non-zero, advancing over each such instruction.  When we're
143  done, the cons cell will be allocated and initialized, the result
144  register will point to it, the allocptr will be untagged, and
145  the PC will point past the instruction that clears the allocptr's
146  tag.
147*/
148void
149finish_allocating_cons(ExceptionInformation *xp)
150{
151  pc program_counter = xpPC(xp);
152  opcode instr;
153  LispObj cur_allocptr = xpGPR(xp, allocptr);
154  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
155  int target_reg;
156
157  while (1) {
158    instr = *program_counter++;
159
160    if (IS_CLR_ALLOCPTR_TAG(instr)) {
161      xpGPR(xp, allocptr) = untag(cur_allocptr);
162      xpPC(xp) = program_counter;
163      return;
164    } else if (IS_SET_ALLOCPTR_CAR_RD(instr)) {
165      c->car = xpGPR(xp,RD_field(instr));
166    } else if (IS_SET_ALLOCPTR_CDR_RD(instr)) {
167      c->cdr = xpGPR(xp,RD_field(instr));
168    } else {
169      /* assert(IS_SET_ALLOCPTR_RESULT_RD(instr)) */
170      xpGPR(xp,RD_field(instr)) = cur_allocptr;
171    }
172  }
173}
174
175/*
176  We were interrupted in the process of allocating a uvector; we
177  survived the allocation trap, and allocptr is tagged as fulltag_misc.
178  Emulate any instructions which store a header into the uvector,
179  assign the value of allocptr to some other register, and clear
180  allocptr's tag.  Don't expect/allow any other instructions in
181  this environment.
182*/
183void
184finish_allocating_uvector(ExceptionInformation *xp)
185{
186  pc program_counter = xpPC(xp);
187  opcode instr;
188  LispObj cur_allocptr = xpGPR(xp, allocptr);
189  int target_reg;
190
191  while (1) {
192    instr = *program_counter++;
193    if (IS_CLR_ALLOCPTR_TAG(instr)) {
194      xpGPR(xp, allocptr) = untag(cur_allocptr);
195      xpPC(xp) = program_counter;
196      return;
197    }
198    if (IS_SET_ALLOCPTR_HEADER_RD(instr)) {
199      header_of(cur_allocptr) = xpGPR(xp,RD_field(instr));
200    } else if (IS_SET_ALLOCPTR_RESULT_RD(instr)) {
201      xpGPR(xp,RD_field(instr)) = cur_allocptr;
202    } else {
203      Bug(xp, "Unexpected instruction following alloc trap at " LISP ":",program_counter);
204    }
205  }
206}
207
208
209Boolean
210allocate_object(ExceptionInformation *xp,
211                natural bytes_needed, 
212                signed_natural disp_from_allocptr,
213                TCR *tcr)
214{
215  area *a = active_dynamic_area;
216
217  /* Maybe do an EGC */
218  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
219    if (((a->active)-(a->low)) >= a->threshold) {
220      gc_from_xp(xp, 0L);
221    }
222  }
223
224  /* Life is pretty simple if we can simply grab a segment
225     without extending the heap.
226  */
227  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
228    xpGPR(xp, allocptr) += disp_from_allocptr;
229    return true;
230  }
231 
232  /* It doesn't make sense to try a full GC if the object
233     we're trying to allocate is larger than everything
234     allocated so far.
235  */
236  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
237    untenure_from_area(tenured_area); /* force a full GC */
238    gc_from_xp(xp, 0L);
239  }
240 
241  /* Try again, growing the heap if necessary */
242  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
243    xpGPR(xp, allocptr) += disp_from_allocptr;
244    return true;
245  }
246 
247  return false;
248}
249
250#ifndef XNOMEM
251#define XNOMEM 10
252#endif
253
254void
255update_bytes_allocated(TCR* tcr, void *cur_allocptr)
256{
257  BytePtr
258    last = (BytePtr) tcr->last_allocptr, 
259    current = (BytePtr) cur_allocptr;
260  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
261    tcr->bytes_allocated += last-current;
262  }
263  tcr->last_allocptr = 0;
264}
265
266void
267lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
268{
269  /* Couldn't allocate the object.  If it's smaller than some arbitrary
270     size (say 128K bytes), signal a "chronically out-of-memory" condition;
271     else signal a "allocation request failed" condition.
272  */
273  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
274  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed,0, NULL);
275}
276
277/*
278  Allocate a large list, where "large" means "large enough to
279  possibly trigger the EGC several times if this was done
280  by individually allocating each CONS."  The number of
281  ocnses in question is in arg_z; on successful return,
282  the list will be in arg_z
283*/
284
285Boolean
286allocate_list(ExceptionInformation *xp, TCR *tcr)
287{
288  natural
289    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
290    bytes_needed = (nconses << dnode_shift);
291  LispObj
292    prev = lisp_nil,
293    current,
294    initial = xpGPR(xp,arg_y);
295
296  if (nconses == 0) {
297    /* Silly case */
298    xpGPR(xp,arg_z) = lisp_nil;
299    xpGPR(xp,allocptr) = lisp_nil;
300    return true;
301  }
302  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
303  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
304    for (current = xpGPR(xp,allocptr);
305         nconses;
306         prev = current, current+= dnode_size, nconses--) {
307      deref(current,0) = prev;
308      deref(current,1) = initial;
309    }
310    xpGPR(xp,arg_z) = prev;
311    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
312    xpGPR(xp,allocptr)-=fulltag_cons;
313  } else {
314    lisp_allocation_failure(xp,tcr,bytes_needed);
315  }
316  return true;
317}
318
319Boolean
320handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
321{
322  pc program_counter;
323  natural cur_allocptr, bytes_needed = 0;
324  opcode prev_instr;
325  signed_natural disp = 0;
326  unsigned allocptr_tag;
327
328  if (!allocation_enabled) {
329    /* Back up before the alloc_trap, then let pc_luser_xp() back
330       up some more. */
331    xpPC(xp)-=1;
332    pc_luser_xp(xp,tcr, NULL);
333    allocation_enabled = true;
334    tcr->save_allocbase = (void *)VOID_ALLOCPTR;
335    handle_error(xp, error_allocation_disabled,0,NULL);
336    return true;
337  }
338
339  cur_allocptr = xpGPR(xp,allocptr);
340
341  allocptr_tag = fulltag_of(cur_allocptr);
342
343  switch (allocptr_tag) {
344  case fulltag_cons:
345    bytes_needed = sizeof(cons);
346    disp = -sizeof(cons) + fulltag_cons;
347    break;
348
349  case fulltag_misc:
350    disp = allocptr_displacement(xp);
351    bytes_needed = (-disp) + fulltag_misc;
352    break;
353
354    /* else fall thru */
355  default:
356    return false;
357  }
358
359  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
360  if (allocate_object(xp, bytes_needed, disp, tcr)) {
361    adjust_exception_pc(xp,4);
362    return true;
363  }
364  lisp_allocation_failure(xp,tcr,bytes_needed);
365  return true;
366}
367
368natural gc_deferred = 0, full_gc_deferred = 0;
369
370signed_natural
371flash_freeze(TCR *tcr, signed_natural param)
372{
373  return 0;
374}
375
376Boolean
377handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
378{
379  LispObj
380    selector = xpGPR(xp,imm0), 
381    arg = xpGPR(xp,imm1);
382  area *a = active_dynamic_area;
383  Boolean egc_was_enabled = (a->older != NULL);
384  natural gc_previously_deferred = gc_deferred;
385
386
387  switch (selector) {
388  case GC_TRAP_FUNCTION_EGC_CONTROL:
389    egc_control(arg != 0, a->active);
390    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
391    break;
392
393  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
394    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
395    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
396    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
397    xpGPR(xp,arg_z) = lisp_nil+t_offset;
398    break;
399
400  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
401    if (((signed_natural) arg) > 0) {
402      lisp_heap_gc_threshold = 
403        align_to_power_of_2((arg-1) +
404                            (heap_segment_size - 1),
405                            log2_heap_segment_size);
406    }
407    /* fall through */
408  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
409    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
410    break;
411
412  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
413    /*  Try to put the current threshold in effect.  This may
414        need to disable/reenable the EGC. */
415    untenure_from_area(tenured_area);
416    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
417    if (egc_was_enabled) {
418      if ((a->high - a->active) >= a->threshold) {
419        tenure_to_area(tenured_area);
420      }
421    }
422    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
423    break;
424
425  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
426    ensure_static_conses(xp,tcr,32768);
427    break;
428
429  case GC_TRAP_FUNCTION_FLASH_FREEZE:
430    untenure_from_area(tenured_area);
431    gc_like_from_xp(xp,flash_freeze,0);
432    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
433    tenured_area->static_dnodes = area_dnode(a->active, a->low);
434    if (egc_was_enabled) {
435      tenure_to_area(tenured_area);
436    }
437    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
438    break;
439
440  case GC_TRAP_FUNCTION_ALLOCATION_CONTROL:
441    switch(arg) {
442    case 0: /* disable if allocation enabled */
443      xpGPR(xp, arg_z) = lisp_nil;
444      if (allocation_enabled) {
445        TCR *other_tcr;
446        ExceptionInformation *other_context;
447        suspend_other_threads(true);
448        normalize_tcr(xp,tcr,false);
449        for (other_tcr=tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
450          other_context = other_tcr->pending_exception_context;
451          if (other_context == NULL) {
452            other_context = other_tcr->suspend_context;
453          }
454          normalize_tcr(other_context, other_tcr, true);
455        }
456        allocation_enabled = false;
457        xpGPR(xp, arg_z) = t_value;
458        resume_other_threads(true);
459      }
460      break;
461
462    case 1:                     /* enable if disabled */
463      xpGPR(xp, arg_z) = lisp_nil;
464      if (!allocation_enabled) {
465        allocation_enabled = true;
466        xpGPR(xp, arg_z) = t_value;
467      }
468      break;
469
470    default:
471      xpGPR(xp, arg_z) = lisp_nil;
472      if (allocation_enabled) {
473        xpGPR(xp, arg_z) = t_value;
474      }
475      break;
476    }
477    break;
478
479       
480  default:
481    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
482
483    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
484      if (!full_gc_deferred) {
485        gc_from_xp(xp, 0L);
486        break;
487      }
488      /* Tried to do a full GC when gc was disabled.  That failed,
489         so try full GC now */
490      selector = GC_TRAP_FUNCTION_GC;
491    }
492   
493    if (egc_was_enabled) {
494      egc_control(false, (BytePtr) a->active);
495    }
496    gc_from_xp(xp, 0L);
497    if (gc_deferred > gc_previously_deferred) {
498      full_gc_deferred = 1;
499    } else {
500      full_gc_deferred = 0;
501    }
502    if (selector > GC_TRAP_FUNCTION_GC) {
503      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
504        impurify_from_xp(xp, 0L);
505        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
506        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
507        gc_from_xp(xp, 0L);
508      }
509      if (selector & GC_TRAP_FUNCTION_PURIFY) {
510        purify_from_xp(xp, 0L);
511        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
512        gc_from_xp(xp, 0L);
513      }
514      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
515        OSErr err;
516        extern OSErr save_application(unsigned, Boolean);
517        TCR *tcr = get_tcr(true);
518        area *vsarea = tcr->vs_area;
519       
520        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
521        err = save_application(arg, egc_was_enabled);
522        if (err == noErr) {
523          _exit(0);
524        }
525        fatal_oserr(": save_application", err);
526      }
527      switch (selector) {
528
529
530      case GC_TRAP_FUNCTION_FREEZE:
531        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
532        tenured_area->static_dnodes = area_dnode(a->active, a->low);
533        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
534        break;
535      default:
536        break;
537      }
538    }
539   
540    if (egc_was_enabled) {
541      egc_control(true, NULL);
542    }
543    break;
544   
545  }
546
547  adjust_exception_pc(xp,4);
548  return true;
549}
550
551
552
553void
554signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
555{
556  /* The cstack just overflowed.  Force the current thread's
557     control stack to do so until all stacks are well under their overflow
558     limits.
559  */
560
561#if 0
562  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
563#endif
564  handle_error(xp, error_stack_overflow, reg, NULL);
565}
566
567/*
568  Lower (move toward 0) the "end" of the soft protected area associated
569  with a by a page, if we can.
570*/
571
572void
573adjust_soft_protection_limit(area *a)
574{
575  char *proposed_new_soft_limit = a->softlimit - 4096;
576  protected_area_ptr p = a->softprot;
577 
578  if (proposed_new_soft_limit >= (p->start+16384)) {
579    p->end = proposed_new_soft_limit;
580    p->protsize = p->end-p->start;
581    a->softlimit = proposed_new_soft_limit;
582  }
583  protect_area(p);
584}
585
586void
587restore_soft_stack_limit(unsigned stkreg)
588{
589  area *a;
590  TCR *tcr = get_tcr(true);
591
592  switch (stkreg) {
593  case Rsp:
594    a = tcr->cs_area;
595    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
596      a->softlimit -= 4096;
597    }
598    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
599    break;
600  case vsp:
601    a = tcr->vs_area;
602    adjust_soft_protection_limit(a);
603    break;
604  }
605}
606
607/* Maybe this'll work someday.  We may have to do something to
608   make the thread look like it's not handling an exception */
609void
610reset_lisp_process(ExceptionInformation *xp)
611{
612#if 0
613  TCR *tcr = get_tcr(true);
614  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
615
616  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
617
618  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
619
620  start_lisp(tcr, 1);
621#endif
622}
623
624/*
625  This doesn't GC; it returns true if it made enough room, false
626  otherwise.
627  If "extend" is true, it can try to extend the dynamic area to
628  satisfy the request.
629*/
630
631Boolean
632new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
633{
634  area *a;
635  natural newlimit, oldlimit;
636  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
637
638  a  = active_dynamic_area;
639  oldlimit = (natural) a->active;
640  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
641              align_to_power_of_2(need, log2_allocation_quantum));
642  if (newlimit > (natural) (a->high)) {
643    if (extend) {
644      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
645      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
646      do {
647        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
648          break;
649        }
650        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
651        if (extend_by < 4<<20) {
652          return false;
653        }
654      } while (1);
655    } else {
656      return false;
657    }
658  }
659  a->active = (BytePtr) newlimit;
660  tcr->last_allocptr = (void *)newlimit;
661  xpGPR(xp,allocptr) = (LispObj) newlimit;
662  tcr->save_allocbase = (void*) oldlimit;
663
664  return true;
665}
666
667 
668void
669update_area_active (area **aptr, BytePtr value)
670{
671  area *a = *aptr;
672  for (; a; a = a->older) {
673    if ((a->low <= value) && (a->high >= value)) break;
674  };
675  if (a == NULL) Bug(NULL, "Can't find active area");
676  a->active = value;
677  *aptr = a;
678
679  for (a = a->younger; a; a = a->younger) {
680    a->active = a->high;
681  }
682}
683
684LispObj *
685tcr_frame_ptr(TCR *tcr)
686{
687  ExceptionInformation *xp;
688  LispObj *bp = NULL;
689
690  if (tcr->pending_exception_context)
691    xp = tcr->pending_exception_context;
692  else {
693    xp = tcr->suspend_context;
694  }
695  if (xp) {
696    bp = (LispObj *) xpGPR(xp, Rsp);
697  }
698  return bp;
699}
700
701void
702normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
703
704{
705  void *cur_allocptr = NULL;
706  LispObj freeptr = 0;
707
708  if (xp) {
709    if (is_other_tcr) {
710      pc_luser_xp(xp, tcr, NULL);
711      freeptr = xpGPR(xp, allocptr);
712      if (fulltag_of(freeptr) == 0){
713        cur_allocptr = (void *) ptr_from_lispobj(freeptr);
714      }
715    }
716    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp)));
717    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
718  } else {
719    /* In ff-call. */
720    cur_allocptr = (void *) (tcr->save_allocptr);
721    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
722    update_area_active((area **)&tcr->cs_area, (BytePtr) tcr->last_lisp_frame);
723  }
724
725
726  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
727  if (cur_allocptr) {
728    update_bytes_allocated(tcr, cur_allocptr);
729    if (freeptr) {
730      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
731    }
732  }
733}
734
735TCR *gc_tcr = NULL;
736
737/* Suspend and "normalize" other tcrs, then call a gc-like function
738   in that context.  Resume the other tcrs, then return what the
739   function returned */
740
741signed_natural
742gc_like_from_xp(ExceptionInformation *xp, 
743                signed_natural(*fun)(TCR *, signed_natural), 
744                signed_natural param)
745{
746  TCR *tcr = get_tcr(true), *other_tcr;
747  int result;
748  signed_natural inhibit;
749
750  suspend_other_threads(true);
751  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
752  if (inhibit != 0) {
753    if (inhibit > 0) {
754      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
755    }
756    resume_other_threads(true);
757    gc_deferred++;
758    return 0;
759  }
760  gc_deferred = 0;
761
762  gc_tcr = tcr;
763
764  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
765
766  normalize_tcr(xp, tcr, false);
767
768
769  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
770    if (other_tcr->pending_exception_context) {
771      other_tcr->gc_context = other_tcr->pending_exception_context;
772    } else if (other_tcr->valence == TCR_STATE_LISP) {
773      other_tcr->gc_context = other_tcr->suspend_context;
774    } else {
775      /* no pending exception, didn't suspend in lisp state:
776         must have executed a synchronous ff-call.
777      */
778      other_tcr->gc_context = NULL;
779    }
780    normalize_tcr(other_tcr->gc_context, other_tcr, true);
781  }
782   
783
784
785  result = fun(tcr, param);
786
787  other_tcr = tcr;
788  do {
789    other_tcr->gc_context = NULL;
790    other_tcr = other_tcr->next;
791  } while (other_tcr != tcr);
792
793  gc_tcr = NULL;
794
795  resume_other_threads(true);
796
797  return result;
798
799}
800
801
802
803/* Returns #bytes freed by invoking GC */
804
805signed_natural
806gc_from_tcr(TCR *tcr, signed_natural param)
807{
808  area *a;
809  BytePtr oldfree, newfree;
810  BytePtr oldend, newend;
811
812  a = active_dynamic_area;
813  oldend = a->high;
814  oldfree = a->active;
815  gc(tcr, param);
816  newfree = a->active;
817  newend = a->high;
818#if 0
819  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
820#endif
821  return ((oldfree-newfree)+(newend-oldend));
822}
823
824signed_natural
825gc_from_xp(ExceptionInformation *xp, signed_natural param)
826{
827  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
828
829  freeGCptrs();
830  return status;
831}
832
833signed_natural
834purify_from_xp(ExceptionInformation *xp, signed_natural param)
835{
836  return gc_like_from_xp(xp, purify, param);
837}
838
839signed_natural
840impurify_from_xp(ExceptionInformation *xp, signed_natural param)
841{
842  return gc_like_from_xp(xp, impurify, param);
843}
844
845
846
847
848
849
850protection_handler
851 * protection_handlers[] = {
852   do_spurious_wp_fault,
853   do_soft_stack_overflow,
854   do_soft_stack_overflow,
855   do_soft_stack_overflow,
856   do_hard_stack_overflow,   
857   do_hard_stack_overflow,
858   do_hard_stack_overflow
859   };
860
861
862Boolean
863is_write_fault(ExceptionInformation *xp, siginfo_t *info)
864{
865  return ((xpFaultStatus(xp) & 0x800) != 0);
866}
867
868Boolean
869handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
870{
871  BytePtr addr;
872  protected_area_ptr area;
873  protection_handler *handler;
874  extern Boolean touch_page(void *);
875  extern void touch_page_end(void);
876
877#ifdef LINUX
878  addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
879#else
880  if (info) {
881    addr = (BytePtr)(info->si_addr);
882  } else {
883    addr = (BytePtr) ((natural) (xpFaultAddress(xp)));
884  }
885#endif
886
887  if (addr && (addr == tcr->safe_ref_address)) {
888    adjust_exception_pc(xp,4);
889
890    xpGPR(xp,imm0) = 0;
891    return true;
892  }
893
894  if (xpPC(xp) == (pc)touch_page) {
895    xpGPR(xp,imm0) = 0;
896    xpPC(xp) = (pc)touch_page_end;
897    return true;
898  }
899
900
901  if (is_write_fault(xp,info)) {
902    area = find_protected_area(addr);
903    if (area != NULL) {
904      handler = protection_handlers[area->why];
905      return handler(xp, area, addr);
906    } else {
907      if ((addr >= readonly_area->low) &&
908          (addr < readonly_area->active)) {
909        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
910                        page_size);
911        return true;
912      }
913    }
914  }
915  if (old_valence == TCR_STATE_LISP) {
916    LispObj cmain = nrs_CMAIN.vcell;
917   
918    if ((fulltag_of(cmain) == fulltag_misc) &&
919      (header_subtag(header_of(cmain)) == subtag_macptr)) {
920     
921      callback_for_trap(nrs_CMAIN.vcell, xp, is_write_fault(xp,info)?SIGBUS:SIGSEGV, (natural)addr, NULL);
922    }
923  }
924  return false;
925}
926
927
928
929
930
931OSStatus
932do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
933{
934#ifdef SUPPORT_PRAGMA_UNUSED
935#pragma unused(area,addr)
936#endif
937  reset_lisp_process(xp);
938  return -1;
939}
940
941extern area*
942allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
943
944extern area*
945allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
946
947
948
949
950
951
952Boolean
953lisp_frame_p(lisp_frame *spPtr)
954{
955  return (spPtr->marker == lisp_frame_marker);
956}
957
958
959int ffcall_overflow_count = 0;
960
961
962
963
964
965
966/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
967  the current value of VSP (TSP) or an older area.  */
968
969OSStatus
970do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
971{
972  TCR* tcr = get_tcr(true);
973  area *a = tcr->vs_area;
974  protected_area_ptr vsp_soft = a->softprot;
975  unprotect_area(vsp_soft);
976  signal_stack_soft_overflow(xp,vsp);
977  return 0;
978}
979
980
981
982OSStatus
983do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
984{
985  /* Trying to write into a guard page on the vstack or tstack.
986     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
987     signal an error_stack_overflow condition.
988      */
989  if (prot_area->why == kVSPsoftguard) {
990    return do_vsp_overflow(xp,addr);
991  }
992  unprotect_area(prot_area);
993  signal_stack_soft_overflow(xp,Rsp);
994  return 0;
995}
996
997OSStatus
998do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
999{
1000#ifdef SUPPORT_PRAGMA_UNUSED
1001#pragma unused(xp,area,addr)
1002#endif
1003  return -1;
1004}
1005
1006
1007
1008
1009     
1010
1011
1012
1013
1014
1015Boolean
1016handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
1017{
1018  return false;
1019}
1020
1021
1022Boolean
1023handle_unimplemented_instruction(ExceptionInformation *xp,
1024                                 opcode instruction,
1025                                 TCR *tcr)
1026{
1027
1028  return false;
1029}
1030
1031Boolean
1032handle_exception(int xnum, 
1033                 ExceptionInformation *xp, 
1034                 TCR *tcr, 
1035                 siginfo_t *info,
1036                 int old_valence)
1037{
1038  pc program_counter;
1039  opcode instruction = 0;
1040
1041  if (old_valence != TCR_STATE_LISP) {
1042    return false;
1043  }
1044
1045  program_counter = xpPC(xp);
1046 
1047  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
1048    instruction = *program_counter;
1049  }
1050
1051  if (IS_ALLOC_TRAP(instruction)) {
1052    return handle_alloc_trap(xp, tcr);
1053  } else if ((xnum == SIGSEGV) ||
1054             (xnum == SIGBUS)) {
1055    return handle_protection_violation(xp, info, tcr, old_valence);
1056  } else if (xnum == SIGFPE) {
1057    return handle_sigfpe(xp, tcr);
1058  } else if ((xnum == SIGILL)) {
1059    if (IS_GC_TRAP(instruction)) {
1060      return handle_gc_trap(xp, tcr);
1061    } else if (IS_UUO(instruction)) {
1062      return handle_uuo(xp, info, instruction);
1063    } else {
1064      return handle_unimplemented_instruction(xp,instruction,tcr);
1065    }
1066  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
1067    tcr->interrupt_pending = 0;
1068    callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1069    return true;
1070  }
1071
1072  return false;
1073}
1074
1075void
1076adjust_exception_pc(ExceptionInformation *xp, int delta)
1077{
1078  xpPC(xp) += (delta >> 2);
1079}
1080
1081
1082/*
1083  This wants to scan backwards until "where" points to an instruction
1084   whose major opcode is either 63 (double-float) or 59 (single-float)
1085*/
1086
1087OSStatus
1088handle_fpux_binop(ExceptionInformation *xp, pc where)
1089{
1090  OSStatus err = -1;
1091  opcode *there = (opcode *) where, instr, errnum = 0;
1092  return err;
1093}
1094
1095Boolean
1096handle_uuo(ExceptionInformation *xp, siginfo_t *info, opcode the_uuo) 
1097{
1098  unsigned 
1099    format = UUO_FORMAT(the_uuo);
1100  Boolean handled = false;
1101  int bump = 4;
1102  TCR *tcr = get_tcr(true);
1103
1104  switch (format) {
1105  case uuo_format_kernel_service:
1106    {
1107      TCR * target = (TCR *)xpGPR(xp,arg_z);
1108      int service = UUO_UNARY_field(the_uuo);
1109
1110      switch (service) {
1111      case error_propagate_suspend:
1112        handled = true;
1113        break;
1114      case error_interrupt:
1115        xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
1116        handled = true;
1117        break;
1118      case error_suspend:
1119        xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
1120        handled = true;
1121        break;
1122      case error_suspend_all:
1123        lisp_suspend_other_threads();
1124        handled = true;
1125        break;
1126      case error_resume:
1127        xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
1128        handled = true;
1129        break;
1130      case error_resume_all:
1131        lisp_resume_other_threads();
1132        handled = true;
1133        break;
1134      case error_kill:
1135        xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
1136        handled = true;
1137        break;
1138      case error_allocate_list:
1139        allocate_list(xp,tcr);
1140        handled = true;
1141        break;
1142      default:
1143        handled = false;
1144        break;
1145      }
1146      break;
1147    }
1148
1149  case uuo_format_unary:
1150    switch(UUO_UNARY_field(the_uuo)) {
1151    case 3:
1152      if (extend_tcr_tlb(tcr,xp,UUOA_field(the_uuo))) {
1153        handled = true;
1154        bump = 4;
1155        break;
1156      }
1157      /* fall in */
1158    default:
1159      handled = false;
1160      break;
1161
1162    }
1163    break;
1164
1165  case uuo_format_nullary:
1166    switch (UUOA_field(the_uuo)) {
1167    case 3:
1168      adjust_exception_pc(xp, bump);
1169      bump = 0;
1170      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
1171      handled = true;
1172      break;
1173
1174    case 4:
1175      tcr->interrupt_pending = 0;
1176      callback_for_trap(nrs_CMAIN.vcell, xp, 0, 0, NULL);
1177      handled = true;
1178      break;
1179    default:
1180      handled = false;
1181      break;
1182    }
1183    break;
1184
1185
1186  case uuo_format_error_lisptag:
1187  case uuo_format_error_fulltag:
1188  case uuo_format_error_xtype:
1189  case uuo_format_cerror_lisptag:
1190  case uuo_format_cerror_fulltag:
1191  case uuo_format_cerror_xtype:
1192  case uuo_format_nullary_error:
1193  case uuo_format_unary_error:
1194  case uuo_format_binary_error:
1195  case uuo_format_ternary:
1196  case uuo_format_ternary2:
1197    handled = handle_error(xp,0,the_uuo, &bump);
1198    break;
1199
1200  default:
1201    handled = false;
1202    bump = 0;
1203  }
1204 
1205  if (handled && bump) {
1206    adjust_exception_pc(xp, bump);
1207  }
1208  return handled;
1209}
1210
1211natural
1212register_codevector_contains_pc (natural lisp_function, pc where)
1213{
1214  natural code_vector, size;
1215
1216  if ((fulltag_of(lisp_function) == fulltag_misc) &&
1217      (header_subtag(header_of(lisp_function)) == subtag_function)) {
1218    code_vector = deref(lisp_function, 2);
1219    size = header_element_count(header_of(code_vector)) << 2;
1220    if ((untag(code_vector) < (natural)where) && 
1221        ((natural)where < (code_vector + size)))
1222      return(code_vector);
1223  }
1224
1225  return(0);
1226}
1227
1228Boolean
1229callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, natural info,natural arg, int *bumpP)
1230{
1231  return callback_to_lisp(callback_macptr, xp, info,arg, bumpP);
1232}
1233
1234Boolean
1235callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
1236                  natural arg1, natural arg2, int *bumpP)
1237{
1238  natural  callback_ptr;
1239  area *a;
1240  natural fnreg = Rfn,  codevector, offset;
1241  pc where = xpPC(xp);
1242  int delta;
1243
1244  codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1245  if (codevector == 0) {
1246    fnreg = nfn;
1247    codevector = register_codevector_contains_pc(xpGPR(xp,fnreg), where);
1248    if (codevector == 0) {
1249      fnreg = 0;
1250    }
1251  }
1252  if (codevector) {
1253    offset = (natural)where - (codevector - (fulltag_misc-node_size));
1254  } else {
1255    offset = (natural)where;
1256  }
1257                                                 
1258                                               
1259
1260  TCR *tcr = get_tcr(true);
1261
1262  /* Put the active stack pointer where .SPcallback expects it */
1263  a = tcr->cs_area;
1264  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, Rsp));
1265
1266  /* Copy globals from the exception frame to tcr */
1267  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
1268  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
1269
1270
1271
1272  /* Call back.
1273     Lisp will handle trampolining through some code that
1274     will push lr/fn & pc/nfn stack frames for backtrace.
1275  */
1276  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1277  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
1278  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, fnreg, offset);
1279  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1280
1281  if (bumpP) {
1282    *bumpP = delta;
1283  }
1284
1285  /* Copy GC registers back into exception frame */
1286  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
1287  return true;
1288}
1289
1290area *
1291allocate_no_stack (natural size)
1292{
1293#ifdef SUPPORT_PRAGMA_UNUSED
1294#pragma unused(size)
1295#endif
1296
1297  return (area *) NULL;
1298}
1299
1300
1301
1302
1303
1304
1305/* callback to (symbol-value cmain) if it is a macptr,
1306   otherwise report cause and function name to console.
1307   Returns noErr if exception handled OK */
1308OSStatus
1309handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
1310{
1311  LispObj   cmain = nrs_CMAIN.vcell;
1312  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
1313
1314}
1315
1316
1317
1318
1319void non_fatal_error( char *msg )
1320{
1321  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
1322  fflush( dbgout );
1323}
1324
1325
1326
1327Boolean
1328handle_error(ExceptionInformation *xp, unsigned arg1, unsigned arg2, int *bumpP)
1329{
1330  LispObj   errdisp = nrs_ERRDISP.vcell;
1331
1332  if ((fulltag_of(errdisp) == fulltag_misc) &&
1333      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
1334    /* errdisp is a macptr, we can call back to lisp */
1335    return callback_for_trap(errdisp, xp, arg1, arg2, bumpP);
1336    }
1337
1338  return false;
1339}
1340               
1341
1342/*
1343   Current thread has all signals masked.  Before unmasking them,
1344   make it appear that the current thread has been suspended.
1345   (This is to handle the case where another thread is trying
1346   to GC before this thread is able to sieze the exception lock.)
1347*/
1348int
1349prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1350{
1351  int old_valence = tcr->valence;
1352
1353  tcr->pending_exception_context = context;
1354  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1355
1356  ALLOW_EXCEPTIONS(context);
1357  return old_valence;
1358} 
1359
1360void
1361wait_for_exception_lock_in_handler(TCR *tcr, 
1362                                   ExceptionInformation *context,
1363                                   xframe_list *xf)
1364{
1365
1366  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1367  xf->curr = context;
1368  xf->prev = tcr->xframe;
1369  tcr->xframe =  xf;
1370  tcr->pending_exception_context = NULL;
1371  tcr->valence = TCR_STATE_FOREIGN; 
1372}
1373
1374void
1375unlock_exception_lock_in_handler(TCR *tcr)
1376{
1377  tcr->pending_exception_context = tcr->xframe->curr;
1378  tcr->xframe = tcr->xframe->prev;
1379  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1380  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1381}
1382
1383/*
1384   If an interrupt is pending on exception exit, try to ensure
1385   that the thread sees it as soon as it's able to run.
1386*/
1387void
1388raise_pending_interrupt(TCR *tcr)
1389{
1390  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
1391    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1392  }
1393}
1394
1395void
1396exit_signal_handler(TCR *tcr, int old_valence, natural old_last_lisp_frame)
1397{
1398#ifndef ANDROID
1399  sigset_t mask;
1400  sigfillset(&mask);
1401#else
1402  int mask [] = {-1,-1};
1403#endif
1404 
1405  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask, NULL);
1406  tcr->valence = old_valence;
1407  tcr->pending_exception_context = NULL;
1408  tcr->last_lisp_frame = old_last_lisp_frame;
1409}
1410
1411
1412void
1413signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context
1414#ifdef DARWIN
1415, TCR *tcr, int old_valence, natural old_last_lisp_frame
1416#endif
1417)
1418{
1419  xframe_list xframe_link;
1420#ifndef DARWIN
1421   
1422    TCR *tcr = (TCR *) get_interrupt_tcr(false);
1423 
1424    /* The signal handler's entered with all signals (notably the
1425       thread_suspend signal) blocked.  Don't allow any other signals
1426       (notably the thread_suspend signal) to preempt us until we've
1427       set the TCR's xframe slot to include the current exception
1428       context.
1429    */
1430   
1431    natural  old_last_lisp_frame = tcr->last_lisp_frame;
1432    int old_valence;
1433
1434    tcr->last_lisp_frame = xpGPR(context,Rsp);
1435    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1436#endif
1437
1438  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1439    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1440    pthread_kill(pthread_self(), thread_suspend_signal);
1441  }
1442
1443 
1444  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1445  if ((!handle_exception(signum, context, tcr, info, old_valence))) {
1446    char msg[512];
1447    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
1448    if (lisp_Debugger(context, info, signum, (old_valence != TCR_STATE_LISP), msg)) {
1449      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1450    }
1451  }
1452  unlock_exception_lock_in_handler(tcr);
1453
1454  /* This thread now looks like a thread that was suspended while
1455     executing lisp code.  If some other thread gets the exception
1456     lock and GCs, the context (this thread's suspend_context) will
1457     be updated.  (That's only of concern if it happens before we
1458     can return to the kernel/to the Mach exception handler).
1459  */
1460  if (!use_mach_exception_handling) {
1461    exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1462    raise_pending_interrupt(tcr);
1463  }
1464}
1465
1466
1467void
1468sigill_handler(int signum, siginfo_t *info, ExceptionInformation  *xp)
1469{
1470  pc program_counter = xpPC(xp);
1471  opcode instr = *program_counter;
1472
1473  if (IS_UUO(instr)) {
1474    natural psr = xpPSR(xp);
1475    Boolean opcode_matched_condition = false,
1476      flip = ((instr & (1<<28)) != 0);
1477   
1478
1479    switch (instr >> 29) {
1480    case 0: 
1481      opcode_matched_condition = ((psr & PSR_Z_MASK) != 0);
1482      break;
1483    case 1:
1484      opcode_matched_condition = ((psr & PSR_C_MASK) != 0);
1485      break;
1486    case 2:
1487      opcode_matched_condition = ((psr & PSR_N_MASK) != 0);
1488      break;
1489    case 3:
1490      opcode_matched_condition = ((psr & PSR_V_MASK) != 0);
1491      break;
1492    case 4:
1493      opcode_matched_condition = (((psr & PSR_C_MASK) != 0) &&
1494                                  ((psr & PSR_Z_MASK) == 0));
1495      break;
1496    case 5:
1497      opcode_matched_condition = (((psr & PSR_N_MASK) != 0) ==
1498                                  ((psr & PSR_V_MASK) != 0));
1499      break;
1500    case 6:
1501      opcode_matched_condition = ((((psr & PSR_N_MASK) != 0) ==
1502                                   ((psr & PSR_V_MASK) != 0)) &&
1503                                  ((psr & PSR_Z_MASK) == 0));
1504      break;
1505    case 7:
1506      opcode_matched_condition = true;
1507      flip = false;
1508      break;
1509    }
1510    if (flip) {
1511      opcode_matched_condition = !opcode_matched_condition;
1512    }
1513    if (!opcode_matched_condition) {
1514      adjust_exception_pc(xp,4);
1515      return;
1516    }
1517  }
1518  signal_handler(signum,info,xp);
1519}
1520
1521
1522#ifdef USE_SIGALTSTACK
1523void
1524invoke_handler_on_main_stack(int signo, siginfo_t *info, ExceptionInformation *xp, void *return_address, void *handler)
1525{
1526  ExceptionInformation *xp_copy;
1527  siginfo_t *info_copy;
1528  extern void call_handler_on_main_stack(int, siginfo_t *, ExceptionInformation *,void *, void *);
1529 
1530  BytePtr target_sp= (BytePtr)xpGPR(xp,Rsp);
1531  target_sp -= sizeof(ucontext_t);
1532  xp_copy = (ExceptionInformation *)target_sp;
1533  memmove(target_sp,xp,sizeof(*xp));
1534  xp_copy->uc_stack.ss_sp = 0;
1535  xp_copy->uc_stack.ss_size = 0;
1536  xp_copy->uc_stack.ss_flags = 0;
1537  xp_copy->uc_link = NULL;
1538  target_sp -= sizeof(siginfo_t);
1539  info_copy = (siginfo_t *)target_sp;
1540  memmove(target_sp,info,sizeof(*info));
1541  call_handler_on_main_stack(signo, info_copy, xp_copy, return_address, handler);
1542}
1543 
1544void
1545altstack_signal_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1546{
1547  TCR *tcr=get_tcr(true);
1548 
1549  if (signo == SIGBUS) {
1550    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address); 
1551    area *a = tcr->cs_area;
1552    if (((BytePtr)truncate_to_power_of_2(addr,log2_page_size))== a->softlimit) 
1553{
1554      if (mmap(a->softlimit,
1555               page_size,
1556               PROT_READ|PROT_WRITE|PROT_EXEC,
1557               MAP_PRIVATE|MAP_ANON|MAP_FIXED,
1558               -1,
1559               0) == a->softlimit) {
1560        return;
1561      }
1562    }
1563  } else if (signo == SIGSEGV) {
1564    BytePtr addr = (BytePtr)(xp->uc_mcontext.fault_address);
1565    area *a = tcr->cs_area;
1566   
1567    if ((addr >= a->low) &&
1568        (addr < a->softlimit)) {
1569      if (addr < a->hardlimit) {
1570        Bug(xp, "hard stack overflow");
1571      } else {
1572        UnProtectMemory(a->hardlimit,a->softlimit-a->hardlimit);
1573      }
1574    }
1575  }
1576  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), signal_handler);
1577}
1578#endif
1579
1580/*
1581  If it looks like we're in the middle of an atomic operation, make
1582  it seem as if that operation is either complete or hasn't started
1583  yet.
1584
1585  The cases handled include:
1586
1587  a) storing into a newly-allocated lisp frame on the stack.
1588  b) marking a newly-allocated TSP frame as containing "raw" data.
1589  c) consing: the GC has its own ideas about how this should be
1590     handled, but other callers would be best advised to back
1591     up or move forward, according to whether we're in the middle
1592     of allocating a cons cell or allocating a uvector.
1593  d) a STMW to the vsp
1594  e) EGC write-barrier subprims.
1595*/
1596
1597extern opcode
1598  egc_write_barrier_start,
1599  egc_write_barrier_end, 
1600  egc_store_node_conditional, 
1601  egc_store_node_conditional_test,
1602  egc_set_hash_key,
1603  egc_gvset,
1604  egc_rplaca,
1605  egc_rplacd,
1606  egc_set_hash_key_conditional,
1607  egc_set_hash_key_conditional_test,
1608  swap_lr_lisp_frame_temp0,
1609  swap_lr_lisp_frame_arg_z;
1610
1611
1612extern opcode ffcall_return_window, ffcall_return_window_end;
1613
1614void
1615pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
1616{
1617  pc program_counter = xpPC(xp);
1618  opcode instr = *program_counter;
1619  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,Rsp));
1620  LispObj cur_allocptr = xpGPR(xp, allocptr);
1621  int allocptr_tag = fulltag_of(cur_allocptr);
1622 
1623
1624
1625  if ((program_counter < &egc_write_barrier_end) && 
1626      (program_counter >= &egc_write_barrier_start)) {
1627    LispObj *ea = 0, val = 0, root = 0;
1628    bitvector refbits = (bitvector)(lisp_global(REFBITS));
1629    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
1630
1631    if (program_counter >= &egc_set_hash_key_conditional) {
1632      if ((program_counter < &egc_set_hash_key_conditional_test) ||
1633          ((program_counter == &egc_set_hash_key_conditional_test) &&
1634           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1635        return;
1636      }
1637      need_store = false;
1638      root = xpGPR(xp,arg_x);
1639      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1640      need_memoize_root = true;
1641    } else if (program_counter >= &egc_store_node_conditional) {
1642      if ((program_counter < &egc_store_node_conditional_test) ||
1643          ((program_counter == &egc_store_node_conditional_test) &&
1644           (! (xpPSR(xp) & PSR_Z_MASK)))) {
1645        /* The conditional store either hasn't been attempted yet, or
1646           has failed.  No need to adjust the PC, or do memoization. */
1647        return;
1648      }
1649      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm0));
1650      xpGPR(xp,arg_z) = t_value;
1651      need_store = false;
1652    } else if (program_counter >= &egc_set_hash_key) {
1653      root = xpGPR(xp,arg_x);
1654      val = xpGPR(xp,arg_z);
1655      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
1656      need_memoize_root = true;
1657    } else if (program_counter >= &egc_gvset) {
1658      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
1659      val = xpGPR(xp,arg_z);
1660    } else if (program_counter >= &egc_rplacd) {
1661      ea = (LispObj *) untag(xpGPR(xp,arg_y));
1662      val = xpGPR(xp,arg_z);
1663    } else {                      /* egc_rplaca */
1664      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
1665      val = xpGPR(xp,arg_z);
1666    }
1667    if (need_store) {
1668      *ea = val;
1669    }
1670    if (need_check_memo) {
1671      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
1672      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
1673          ((LispObj)ea < val)) {
1674        atomic_set_bit(refbits, bitnumber);
1675        if (need_memoize_root) {
1676          bitnumber = area_dnode(root, lisp_global(REF_BASE));
1677          atomic_set_bit(refbits, bitnumber);
1678        }
1679      }
1680    }
1681    xpPC(xp) = xpLR(xp);
1682    return;
1683  }
1684
1685
1686 
1687  if (allocptr_tag != tag_fixnum) {
1688    signed_natural disp = allocptr_displacement(xp);
1689
1690    if (disp) {
1691      /* Being architecturally "at" the alloc trap doesn't tell
1692         us much (in particular, it doesn't tell us whether
1693         or not the thread has committed to taking the trap
1694         and is waiting for the exception lock (or waiting
1695         for the Mach exception thread to tell it how bad
1696         things are) or is about to execute a conditional
1697         trap.
1698         Regardless of which case applies, we want the
1699         other thread to take (or finish taking) the
1700         trap, and we don't want it to consider its
1701         current allocptr to be valid.
1702         The difference between this case (suspend other
1703         thread for GC) and the previous case (suspend
1704         current thread for interrupt) is solely a
1705         matter of what happens after we leave this
1706         function: some non-current thread will stay
1707         suspended until the GC finishes, then take
1708         (or start processing) the alloc trap.   The
1709         current thread will go off and do PROCESS-INTERRUPT
1710         or something, and may return from the interrupt
1711         and need to finish the allocation that got interrupted.
1712      */
1713
1714      if (alloc_disp) {
1715        *alloc_disp = disp;
1716        xpGPR(xp,allocptr) -= disp;
1717        /* Leave the PC at the alloc trap.  When the interrupt
1718           handler returns, it'll decrement allocptr by disp
1719           and the trap may or may not be taken.
1720        */
1721      } else {
1722        Boolean ok = false;
1723        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr - disp));
1724        xpGPR(xp, allocptr) = VOID_ALLOCPTR + disp;
1725        instr = program_counter[-1];
1726        if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1727          instr = program_counter[-2];
1728          if (IS_COMPARE_ALLOCPTR_TO_RM(instr)){
1729            xpGPR(xp,RM_field(instr)) = VOID_ALLOCPTR;
1730            ok = true;
1731          }
1732        }
1733        if (ok) {
1734          /* Clear the carry bit, so that the trap will be taken. */
1735          xpPSR(xp) &= ~PSR_C_MASK;
1736        } else {
1737          Bug(NULL, "unexpected instruction preceding alloc trap.");
1738        }
1739      }
1740    } else {
1741      /* we may be before or after the alloc trap.  If before, set
1742         allocptr to VOID_ALLOCPTR and back up to the start of the
1743         instruction sequence; if after, finish the allocation. */
1744      Boolean before_alloc_trap = false;
1745
1746      if (IS_BRANCH_AROUND_ALLOC_TRAP(instr)) {
1747        before_alloc_trap = true;
1748        --program_counter;
1749        instr = *program_counter;
1750      }
1751      if (IS_COMPARE_ALLOCPTR_TO_RM(instr)) {
1752        before_alloc_trap = true;
1753        --program_counter;
1754        instr = *program_counter;
1755      }
1756      if (IS_LOAD_RD_FROM_ALLOCBASE(instr)) {
1757        before_alloc_trap = true;
1758        --program_counter;
1759        instr = *program_counter;
1760      }
1761      if (IS_SUB_HI_FROM_ALLOCPTR(instr)) {
1762        before_alloc_trap = true;
1763        --program_counter;
1764      }
1765      if (before_alloc_trap) {
1766        xpPC(xp) = program_counter;
1767        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1768      } else {
1769        /* If we're already past the alloc_trap, finish allocating
1770           the object. */
1771        if (allocptr_tag == fulltag_cons) {
1772          finish_allocating_cons(xp);
1773        } else {
1774          if (allocptr_tag == fulltag_misc) {
1775            finish_allocating_uvector(xp);
1776          } else {
1777            Bug(xp, "what's being allocated here ?");
1778          }
1779        }
1780        /* Whatever we finished allocating, reset allocptr/allocbase to
1781           VOID_ALLOCPTR */
1782        xpGPR(xp,allocptr) = VOID_ALLOCPTR;
1783      }
1784      return;
1785    }
1786    return;
1787  }
1788  {
1789    lisp_frame *swap_frame = NULL;
1790    pc base = &swap_lr_lisp_frame_temp0;
1791   
1792    if ((program_counter >base)             /* sic */
1793        && (program_counter < (base+3))) {
1794      swap_frame = (lisp_frame *)xpGPR(xp,temp0);
1795    } else {
1796      base = &swap_lr_lisp_frame_arg_z;
1797      if ((program_counter > base) && (program_counter < (base+3))) { 
1798        swap_frame = (lisp_frame *)xpGPR(xp,arg_z);
1799      }
1800    }
1801    if (swap_frame) {
1802      if (program_counter == (base+1)) {
1803        swap_frame->savelr = xpGPR(xp,Rlr);
1804      }
1805      xpGPR(xp,Rlr) = xpGPR(xp,imm0);
1806      xpPC(xp) = base+3;
1807      return;
1808    }
1809  }
1810}
1811
1812void
1813interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1814{
1815  TCR *tcr = get_interrupt_tcr(false);
1816  if (tcr) {
1817    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
1818      tcr->interrupt_pending = 1 << fixnumshift;
1819    } else {
1820      LispObj cmain = nrs_CMAIN.vcell;
1821
1822      if ((fulltag_of(cmain) == fulltag_misc) &&
1823          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1824        /*
1825           This thread can (allegedly) take an interrupt now.
1826           It's tricky to do that if we're executing
1827           foreign code (especially Linuxthreads code, much
1828           of which isn't reentrant.)
1829           If we're unwinding the stack, we also want to defer
1830           the interrupt.
1831        */
1832        if ((tcr->valence != TCR_STATE_LISP) ||
1833            (tcr->unwinding != 0)) {
1834          tcr->interrupt_pending = 1 << fixnumshift;
1835        } else {
1836          xframe_list xframe_link;
1837          int old_valence;
1838          signed_natural disp=0;
1839          natural old_last_lisp_frame = tcr->last_lisp_frame;
1840         
1841          tcr->last_lisp_frame = xpGPR(context,Rsp);
1842          pc_luser_xp(context, tcr, &disp);
1843          old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1844          wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1845          handle_exception(signum, context, tcr, info, old_valence);
1846          if (disp) {
1847            xpGPR(context,allocptr) -= disp;
1848          }
1849          unlock_exception_lock_in_handler(tcr);
1850          exit_signal_handler(tcr, old_valence, old_last_lisp_frame);
1851        }
1852      }
1853    }
1854  }
1855#ifdef DARWIN
1856    DarwinSigReturn(context);
1857#endif
1858}
1859
1860#ifdef USE_SIGALTSTACK
1861void
1862altstack_interrupt_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1863{
1864  invoke_handler_on_main_stack(signum, info, context, __builtin_return_address(0),interrupt_handler);
1865}
1866#endif
1867
1868
1869void
1870install_signal_handler(int signo, void *handler, unsigned flags)
1871{
1872  struct sigaction sa;
1873  int err;
1874
1875  sigfillset(&sa.sa_mask);
1876 
1877  sa.sa_sigaction = (void *)handler;
1878  sigfillset(&sa.sa_mask);
1879  sa.sa_flags = SA_SIGINFO;
1880
1881#ifdef ANDROID
1882  sa.sa_flags |= SA_NODEFER;
1883#endif
1884#ifdef USE_SIGALTSTACK
1885  if (flags & ON_ALTSTACK)
1886    sa.sa_flags |= SA_ONSTACK;
1887#endif
1888  if (flags & RESTART_SYSCALLS)
1889    sa.sa_flags |= SA_RESTART;
1890  if (flags & RESERVE_FOR_LISP) {
1891    extern sigset_t user_signals_reserved;
1892    sigaddset(&user_signals_reserved, signo);
1893  }
1894
1895  err = sigaction(signo, &sa, NULL);
1896  if (err) {
1897    perror("sigaction");
1898    exit(1);
1899  }
1900}
1901
1902
1903void
1904install_pmcl_exception_handlers()
1905{
1906#ifdef DARWIN
1907  extern Boolean use_mach_exception_handling;
1908#endif
1909
1910  Boolean install_signal_handlers_for_exceptions =
1911#ifdef DARWIN
1912    !use_mach_exception_handling
1913#else
1914    true
1915#endif
1916    ;
1917  if (install_signal_handlers_for_exceptions) {
1918    install_signal_handler(SIGILL, (void *)sigill_handler, RESERVE_FOR_LISP);
1919    install_signal_handler(SIGSEGV, (void *)ALTSTACK(signal_handler),
1920                           RESERVE_FOR_LISP|ON_ALTSTACK);
1921    install_signal_handler(SIGBUS, (void *)ALTSTACK(signal_handler),
1922                           RESERVE_FOR_LISP|ON_ALTSTACK);
1923
1924  }
1925 
1926  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
1927                         (void *)interrupt_handler, RESERVE_FOR_LISP);
1928  signal(SIGPIPE, SIG_IGN);
1929}
1930
1931
1932#ifdef USE_SIGALTSTACK
1933void
1934setup_sigaltstack(area *a)
1935{
1936  stack_t stack;
1937#if 0
1938  stack.ss_sp = a->low;
1939  a->low += SIGSTKSZ*8;
1940#endif
1941  stack.ss_size = SIGSTKSZ*8;
1942  stack.ss_flags = 0;
1943  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
1944  if (sigaltstack(&stack, NULL) != 0) {
1945    perror("sigaltstack");
1946    exit(-1);
1947  }
1948}
1949#endif
1950
1951void
1952thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
1953{
1954  TCR *tcr = get_tcr(false);
1955  area *a;
1956#ifndef ANDROID
1957  sigset_t mask;
1958 
1959  sigemptyset(&mask);
1960#else
1961  int mask[] = {0,0};
1962#endif
1963
1964  if (tcr) {
1965    tcr->valence = TCR_STATE_FOREIGN;
1966    a = tcr->vs_area;
1967    if (a) {
1968      a->active = a->high;
1969    }
1970    a = tcr->cs_area;
1971    if (a) {
1972      a->active = a->high;
1973    }
1974  }
1975 
1976  pthread_sigmask(SIG_SETMASK,(sigset_t *)&mask,NULL);
1977  pthread_exit(NULL);
1978}
1979
1980#ifdef USE_SIGALTSTACK
1981void
1982altstack_thread_kill_handler(int signo, siginfo_t *info, ExceptionInformation *xp)
1983{
1984  invoke_handler_on_main_stack(signo, info, xp, __builtin_return_address(0), thread_kill_handler);
1985}
1986#endif
1987
1988void
1989thread_signal_setup()
1990{
1991  thread_suspend_signal = SIG_SUSPEND_THREAD;
1992  thread_kill_signal = SIG_KILL_THREAD;
1993
1994  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
1995                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
1996  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler,
1997                         RESERVE_FOR_LISP);
1998}
1999
2000
2001
2002void
2003unprotect_all_areas()
2004{
2005  protected_area_ptr p;
2006
2007  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
2008    unprotect_area(p);
2009  }
2010}
2011
2012/*
2013  A binding subprim has just done "twlle limit_regno,idx_regno" and
2014  the trap's been taken.  Extend the tcr's tlb so that the index will
2015  be in bounds and the new limit will be on a page boundary, filling
2016  in the new page(s) with 'no_thread_local_binding_marker'.  Update
2017  the tcr fields and the registers in the xp and return true if this
2018  all works, false otherwise.
2019
2020  Note that the tlb was allocated via malloc, so realloc can do some
2021  of the hard work.
2022*/
2023Boolean
2024extend_tcr_tlb(TCR *tcr, 
2025               ExceptionInformation *xp, 
2026               unsigned idx_regno)
2027{
2028  unsigned
2029    index = (unsigned) (xpGPR(xp,idx_regno)),
2030    old_limit = tcr->tlb_limit,
2031    new_limit = align_to_power_of_2(index+1,12),
2032    new_bytes = new_limit-old_limit;
2033  LispObj
2034    *old_tlb = tcr->tlb_pointer,
2035    *new_tlb = realloc(old_tlb, new_limit),
2036    *work;
2037
2038  if (new_tlb == NULL) {
2039    return false;
2040  }
2041 
2042  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
2043
2044  while (new_bytes) {
2045    *work++ = no_thread_local_binding_marker;
2046    new_bytes -= sizeof(LispObj);
2047  }
2048  tcr->tlb_pointer = new_tlb;
2049  tcr->tlb_limit = new_limit;
2050  return true;
2051}
2052
2053
2054
2055void
2056exception_init()
2057{
2058  install_pmcl_exception_handlers();
2059}
2060
2061
2062
2063
2064
2065#ifdef DARWIN
2066
2067
2068#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
2069#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
2070
2071
2072
2073#define LISP_EXCEPTIONS_HANDLED_MASK \
2074 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2075
2076/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
2077#define NUM_LISP_EXCEPTIONS_HANDLED 4
2078
2079typedef struct {
2080  int foreign_exception_port_count;
2081  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
2082  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
2083  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
2084  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
2085} MACH_foreign_exception_state;
2086
2087
2088
2089
2090/*
2091  Mach's exception mechanism works a little better than its signal
2092  mechanism (and, not incidentally, it gets along with GDB a lot
2093  better.
2094
2095  Initially, we install an exception handler to handle each native
2096  thread's exceptions.  This process involves creating a distinguished
2097  thread which listens for kernel exception messages on a set of
2098  0 or more thread exception ports.  As threads are created, they're
2099  added to that port set; a thread's exception port is destroyed
2100  (and therefore removed from the port set) when the thread exits.
2101
2102  A few exceptions can be handled directly in the handler thread;
2103  others require that we resume the user thread (and that the
2104  exception thread resumes listening for exceptions.)  The user
2105  thread might eventually want to return to the original context
2106  (possibly modified somewhat.)
2107
2108  As it turns out, the simplest way to force the faulting user
2109  thread to handle its own exceptions is to do pretty much what
2110  signal() does: the exception handlng thread sets up a sigcontext
2111  on the user thread's stack and forces the user thread to resume
2112  execution as if a signal handler had been called with that
2113  context as an argument.  We can use a distinguished UUO at a
2114  distinguished address to do something like sigreturn(); that'll
2115  have the effect of resuming the user thread's execution in
2116  the (pseudo-) signal context.
2117
2118  Since:
2119    a) we have miles of code in C and in Lisp that knows how to
2120    deal with Linux sigcontexts
2121    b) Linux sigcontexts contain a little more useful information
2122    (the DAR, DSISR, etc.) than their Darwin counterparts
2123    c) we have to create a sigcontext ourselves when calling out
2124    to the user thread: we aren't really generating a signal, just
2125    leveraging existing signal-handling code.
2126
2127  we create a Linux sigcontext struct.
2128
2129  Simple ?  Hopefully from the outside it is ...
2130
2131  We want the process of passing a thread's own context to it to
2132  appear to be atomic: in particular, we don't want the GC to suspend
2133  a thread that's had an exception but has not yet had its user-level
2134  exception handler called, and we don't want the thread's exception
2135  context to be modified by a GC while the Mach handler thread is
2136  copying it around.  On Linux (and on Jaguar), we avoid this issue
2137  because (a) the kernel sets up the user-level signal handler and
2138  (b) the signal handler blocks signals (including the signal used
2139  by the GC to suspend threads) until tcr->xframe is set up.
2140
2141  The GC and the Mach server thread therefore contend for the lock
2142  "mach_exception_lock".  The Mach server thread holds the lock
2143  when copying exception information between the kernel and the
2144  user thread; the GC holds this lock during most of its execution
2145  (delaying exception processing until it can be done without
2146  GC interference.)
2147
2148*/
2149
2150
2151#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
2152
2153void
2154fatal_mach_error(char *format, ...);
2155
2156#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
2157
2158
2159void
2160restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
2161{
2162  kern_return_t kret;
2163  _STRUCT_MCONTEXT *mc = UC_MCONTEXT(pseudosigcontext);
2164
2165  /* Set the thread's FP state from the pseudosigcontext */
2166  kret = thread_set_state(thread,
2167                          ARM_VFP_STATE,
2168                          (thread_state_t)&(mc->__fs),
2169                          ARM_VFP_STATE_COUNT);
2170
2171  MACH_CHECK_ERROR("setting thread FP state", kret);
2172
2173  /* The thread'll be as good as new ... */
2174  kret = thread_set_state(thread, 
2175                          MACHINE_THREAD_STATE,
2176                          (thread_state_t)&(mc->__ss),
2177                          MACHINE_THREAD_STATE_COUNT);
2178  MACH_CHECK_ERROR("setting thread state", kret);
2179} 
2180
2181/* This code runs in the exception handling thread, in response
2182   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
2183   in response to a call to pseudo_sigreturn() from the specified
2184   user thread.
2185   Find that context (the user thread's R3 points to it), then
2186   use that context to set the user thread's state.  When this
2187   function's caller returns, the Mach kernel will resume the
2188   user thread.
2189*/
2190
2191kern_return_t
2192do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
2193{
2194  ExceptionInformation *xp;
2195
2196#ifdef DEBUG_MACH_EXCEPTIONS
2197  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
2198#endif
2199  tcr->last_lisp_frame = *((natural *)(tcr->last_lisp_frame));
2200  xp = tcr->pending_exception_context;
2201  if (xp) {
2202    tcr->pending_exception_context = NULL;
2203    tcr->valence = TCR_STATE_LISP;
2204    restore_mach_thread_state(thread, xp);
2205    raise_pending_interrupt(tcr);
2206  } else {
2207    Bug(NULL, "no xp here!\n");
2208  }
2209#ifdef DEBUG_MACH_EXCEPTIONS
2210  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
2211#endif
2212  return KERN_SUCCESS;
2213} 
2214
2215ExceptionInformation *
2216create_thread_context_frame(mach_port_t thread, 
2217                            natural *new_stack_top)
2218{
2219  arm_thread_state_t ts;
2220  mach_msg_type_number_t thread_state_count;
2221  kern_return_t result;
2222  ExceptionInformation *pseudosigcontext;
2223  _STRUCT_MCONTEXT *mc;
2224  natural stackp, backlink;
2225
2226  thread_state_count = MACHINE_THREAD_STATE_COUNT;
2227  result = thread_get_state(thread, 
2228                            ARM_THREAD_STATE,   /* GPRs, some SPRs  */
2229                            (thread_state_t)&ts,
2230                            &thread_state_count);
2231 
2232  if (result != KERN_SUCCESS) {
2233    get_tcr(true);
2234    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
2235  }
2236  stackp = ts.__sp;
2237  backlink = stackp;
2238
2239  stackp -= sizeof(*pseudosigcontext);
2240  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
2241
2242  stackp -= sizeof(*mc);
2243  mc = (_STRUCT_MCONTEXT *) ptr_from_lispobj(stackp);
2244  memmove(&(mc->__ss),&ts,sizeof(ts));
2245
2246  thread_state_count = ARM_VFP_STATE_COUNT;
2247  thread_get_state(thread,
2248                   ARM_VFP_STATE,
2249                   (thread_state_t)&(mc->__fs),
2250                   &thread_state_count);
2251
2252
2253  thread_state_count = ARM_EXCEPTION_STATE_COUNT;
2254  thread_get_state(thread,
2255                   ARM_EXCEPTION_STATE,
2256                   (thread_state_t)&(mc->__es),
2257                   &thread_state_count);
2258
2259
2260  UC_MCONTEXT(pseudosigcontext) = mc;
2261  if (new_stack_top) {
2262    *new_stack_top = stackp;
2263  }
2264  return pseudosigcontext;
2265}
2266
2267/*
2268  This code sets up the user thread so that it executes a "pseudo-signal
2269  handler" function when it resumes.  Create a linux sigcontext struct
2270  on the thread's stack and pass it as an argument to the pseudo-signal
2271  handler.
2272
2273  Things are set up so that the handler "returns to" pseudo_sigreturn(),
2274  which will restore the thread's context.
2275
2276  If the handler invokes code that throws (or otherwise never sigreturn()'s
2277  to the context), that's fine.
2278
2279  Actually, check that: throw (and variants) may need to be careful and
2280  pop the tcr's xframe list until it's younger than any frame being
2281  entered.
2282*/
2283
2284int
2285setup_signal_frame(mach_port_t thread,
2286                   void *handler_address,
2287                   int signum,
2288                   int code,
2289                   TCR *tcr)
2290{
2291  arm_thread_state_t ts;
2292  ExceptionInformation *pseudosigcontext;
2293  int old_valence = tcr->valence;
2294  natural stackp, *pstackp;
2295
2296#ifdef DEBUG_MACH_EXCEPTIONS
2297  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
2298#endif
2299  pseudosigcontext = create_thread_context_frame(thread, &stackp);
2300  pstackp = (natural *)stackp;
2301  *--pstackp = tcr->last_lisp_frame;
2302  stackp = (natural)pstackp;
2303  tcr->last_lisp_frame = stackp;
2304  pseudosigcontext->uc_onstack = 0;
2305  pseudosigcontext->uc_sigmask = (sigset_t) 0;
2306  pseudosigcontext->uc_mcsize = ARM_MCONTEXT_SIZE;
2307  tcr->pending_exception_context = pseudosigcontext;
2308  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
2309 
2310
2311  /*
2312     It seems like we've created a  sigcontext on the thread's
2313     stack.  Set things up so that we call the handler (with appropriate
2314     args) when the thread's resumed.
2315  */
2316
2317  ts.__pc = (natural) handler_address;
2318  ts.__sp = stackp;
2319  ts.__r[0] = signum;
2320  ts.__r[1] = (natural)pseudosigcontext;
2321  ts.__r[2] = (natural)tcr;
2322  ts.__r[3] = (natural)old_valence;
2323  ts.__lr = (natural)pseudo_sigreturn;
2324  ts.__cpsr = xpPSR(pseudosigcontext);
2325
2326
2327  thread_set_state(thread, 
2328                   MACHINE_THREAD_STATE,
2329                   (thread_state_t)&ts,
2330                   MACHINE_THREAD_STATE_COUNT);
2331#ifdef DEBUG_MACH_EXCEPTIONS
2332  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
2333#endif
2334  return 0;
2335}
2336
2337
2338void
2339pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
2340{
2341  signal_handler(signum, NULL, context, tcr, old_valence, 0);
2342} 
2343
2344
2345int
2346thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
2347{
2348  /* Likely hopeless. */
2349  return 0;
2350}
2351
2352/*
2353  This function runs in the exception handling thread.  It's
2354  called (by this precise name) from the library function "exc_server()"
2355  when the thread's exception ports are set up.  (exc_server() is called
2356  via mach_msg_server(), which is a function that waits for and dispatches
2357  on exception messages from the Mach kernel.)
2358
2359  This checks to see if the exception was caused by a pseudo_sigreturn()
2360  UUO; if so, it arranges for the thread to have its state restored
2361  from the specified context.
2362
2363  Otherwise, it tries to map the exception to a signal number and
2364  arranges that the thread run a "pseudo signal handler" to handle
2365  the exception.
2366
2367  Some exceptions could and should be handled here directly.
2368*/
2369
2370kern_return_t
2371catch_exception_raise(mach_port_t exception_port,
2372                      mach_port_t thread,
2373                      mach_port_t task, 
2374                      exception_type_t exception,
2375                      exception_data_t code_vector,
2376                      mach_msg_type_number_t code_count)
2377{
2378  int signum = 0, code = *code_vector;
2379  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
2380  kern_return_t kret;
2381
2382#ifdef DEBUG_MACH_EXCEPTIONS
2383  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
2384#endif
2385
2386  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
2387    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
2388  } 
2389  /* On the ARM, code_vector[1] contains the undefined instruction
2390     in this case, not its address.  */
2391  if ((exception == EXC_BAD_INSTRUCTION) &&
2392      (code_vector[0] == EXC_ARM_UNDEFINED) &&
2393      (code_vector[1] == PSEUDO_SIGRETURN_UUO)) {
2394    kret = do_pseudo_sigreturn(thread, tcr);
2395  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
2396    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2397    kret = 17;
2398  } else {
2399    switch (exception) {
2400    case EXC_BAD_ACCESS:
2401      signum = SIGSEGV;
2402      break;
2403       
2404    case EXC_BAD_INSTRUCTION:
2405      signum = SIGILL;
2406      break;
2407     
2408      break;
2409     
2410    case EXC_ARITHMETIC:
2411      signum = SIGFPE;
2412      break;
2413
2414    default:
2415      break;
2416    }
2417    if (signum) {
2418      kret = setup_signal_frame(thread,
2419                                (void *)pseudo_signal_handler,
2420                                signum,
2421                                code,
2422                                tcr);
2423#if 0
2424      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
2425#endif
2426
2427    } else {
2428      kret = 17;
2429    }
2430  }
2431
2432  return kret;
2433}
2434
2435
2436
2437typedef struct {
2438  mach_msg_header_t Head;
2439  /* start of the kernel processed data */
2440  mach_msg_body_t msgh_body;
2441  mach_msg_port_descriptor_t thread;
2442  mach_msg_port_descriptor_t task;
2443  /* end of the kernel processed data */
2444  NDR_record_t NDR;
2445  exception_type_t exception;
2446  mach_msg_type_number_t codeCnt;
2447  integer_t code[2];
2448  mach_msg_trailer_t trailer;
2449} exceptionRequest;
2450
2451
2452boolean_t
2453openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
2454{
2455  static NDR_record_t _NDR = {0};
2456  kern_return_t handled;
2457  mig_reply_error_t *reply = (mig_reply_error_t *) out;
2458  exceptionRequest *req = (exceptionRequest *) in;
2459
2460  reply->NDR = _NDR;
2461
2462  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
2463  out->msgh_remote_port = in->msgh_remote_port;
2464  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
2465  out->msgh_local_port = MACH_PORT_NULL;
2466  out->msgh_id = in->msgh_id+100;
2467
2468  /* Could handle other exception flavors in the range 2401-2403 */
2469
2470
2471  if (in->msgh_id != 2401) {
2472    reply->RetCode = MIG_BAD_ID;
2473    return FALSE;
2474  }
2475  handled = catch_exception_raise(req->Head.msgh_local_port,
2476                                  req->thread.name,
2477                                  req->task.name,
2478                                  req->exception,
2479                                  req->code,
2480                                  req->codeCnt);
2481  reply->RetCode = handled;
2482  return TRUE;
2483}
2484
2485/*
2486  The initial function for an exception-handling thread.
2487*/
2488
2489void *
2490exception_handler_proc(void *arg)
2491{
2492  extern boolean_t exc_server();
2493  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
2494
2495  mach_msg_server(openmcl_exc_server, 2048, p, 0);
2496  /* Should never return. */
2497  abort();
2498}
2499
2500
2501
2502mach_port_t
2503mach_exception_port_set()
2504{
2505  static mach_port_t __exception_port_set = MACH_PORT_NULL;
2506  kern_return_t kret; 
2507  if (__exception_port_set == MACH_PORT_NULL) {
2508    kret = mach_port_allocate(mach_task_self(),
2509                              MACH_PORT_RIGHT_PORT_SET,
2510                              &__exception_port_set);
2511    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
2512    create_system_thread(0,
2513                         NULL,
2514                         exception_handler_proc, 
2515                         (void *)((natural)__exception_port_set));
2516  }
2517  return __exception_port_set;
2518}
2519
2520/*
2521  Setup a new thread to handle those exceptions specified by
2522  the mask "which".  This involves creating a special Mach
2523  message port, telling the Mach kernel to send exception
2524  messages for the calling thread to that port, and setting
2525  up a handler thread which listens for and responds to
2526  those messages.
2527
2528*/
2529
2530/*
2531  Establish the lisp thread's TCR as its exception port, and determine
2532  whether any other ports have been established by foreign code for
2533  exceptions that lisp cares about.
2534
2535  If this happens at all, it should happen on return from foreign
2536  code and on entry to lisp code via a callback.
2537
2538  This is a lot of trouble (and overhead) to support Java, or other
2539  embeddable systems that clobber their caller's thread exception ports.
2540 
2541*/
2542kern_return_t
2543tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
2544{
2545  kern_return_t kret;
2546  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
2547  int i;
2548  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
2549  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
2550  exception_mask_t mask = 0;
2551
2552  kret = thread_swap_exception_ports(thread,
2553                                     LISP_EXCEPTIONS_HANDLED_MASK,
2554                                     lisp_port,
2555                                     EXCEPTION_DEFAULT,
2556                                     THREAD_STATE_NONE,
2557                                     fxs->masks,
2558                                     &n,
2559                                     fxs->ports,
2560                                     fxs->behaviors,
2561                                     fxs->flavors);
2562  if (kret == KERN_SUCCESS) {
2563    fxs->foreign_exception_port_count = n;
2564    for (i = 0; i < n; i ++) {
2565      foreign_port = fxs->ports[i];
2566
2567      if ((foreign_port != lisp_port) &&
2568          (foreign_port != MACH_PORT_NULL)) {
2569        mask |= fxs->masks[i];
2570      }
2571    }
2572    tcr->foreign_exception_status = (int) mask;
2573  }
2574  return kret;
2575}
2576
2577kern_return_t
2578tcr_establish_lisp_exception_port(TCR *tcr)
2579{
2580  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
2581}
2582
2583/*
2584  Do this when calling out to or returning from foreign code, if
2585  any conflicting foreign exception ports were established when we
2586  last entered lisp code.
2587*/
2588kern_return_t
2589restore_foreign_exception_ports(TCR *tcr)
2590{
2591  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
2592 
2593  if (m) {
2594    MACH_foreign_exception_state *fxs  = 
2595      (MACH_foreign_exception_state *) tcr->native_thread_info;
2596    int i, n = fxs->foreign_exception_port_count;
2597    exception_mask_t tm;
2598
2599    for (i = 0; i < n; i++) {
2600      if ((tm = fxs->masks[i]) & m) {
2601        thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
2602                                   tm,
2603                                   fxs->ports[i],
2604                                   fxs->behaviors[i],
2605                                   fxs->flavors[i]);
2606      }
2607    }
2608  }
2609}
2610                                   
2611
2612/*
2613  This assumes that a Mach port (to be used as the thread's exception port) whose
2614  "name" matches the TCR's 32-bit address has already been allocated.
2615*/
2616
2617kern_return_t
2618setup_mach_exception_handling(TCR *tcr)
2619{
2620  mach_port_t
2621    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
2622    task_self = mach_task_self();
2623  kern_return_t kret;
2624
2625  kret = mach_port_insert_right(task_self,
2626                                thread_exception_port,
2627                                thread_exception_port,
2628                                MACH_MSG_TYPE_MAKE_SEND);
2629  MACH_CHECK_ERROR("adding send right to exception_port",kret);
2630
2631  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
2632  if (kret == KERN_SUCCESS) {
2633    mach_port_t exception_port_set = mach_exception_port_set();
2634
2635    kret = mach_port_move_member(task_self,
2636                                 thread_exception_port,
2637                                 exception_port_set);
2638  }
2639  return kret;
2640}
2641
2642void
2643darwin_exception_init(TCR *tcr)
2644{
2645  void tcr_monitor_exception_handling(TCR*, Boolean);
2646  kern_return_t kret;
2647  MACH_foreign_exception_state *fxs = 
2648    calloc(1, sizeof(MACH_foreign_exception_state));
2649 
2650  tcr->native_thread_info = (void *) fxs;
2651
2652  if ((kret = setup_mach_exception_handling(tcr))
2653      != KERN_SUCCESS) {
2654    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
2655    terminate_lisp();
2656  }
2657}
2658
2659/*
2660  The tcr is the "name" of the corresponding thread's exception port.
2661  Destroying the port should remove it from all port sets of which it's
2662  a member (notably, the exception port set.)
2663*/
2664void
2665darwin_exception_cleanup(TCR *tcr)
2666{
2667  void *fxs = tcr->native_thread_info;
2668  extern Boolean use_mach_exception_handling;
2669
2670  if (fxs) {
2671    tcr->native_thread_info = NULL;
2672    free(fxs);
2673  }
2674  if (use_mach_exception_handling) {
2675    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2676    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
2677  }
2678}
2679
2680
2681Boolean
2682suspend_mach_thread(mach_port_t mach_thread)
2683{
2684  kern_return_t status;
2685  Boolean aborted = false;
2686 
2687  do {
2688    aborted = false;
2689    status = thread_suspend(mach_thread);
2690    if (status == KERN_SUCCESS) {
2691      status = thread_abort_safely(mach_thread);
2692      if (status == KERN_SUCCESS) {
2693        aborted = true;
2694      } else {
2695        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
2696        thread_resume(mach_thread);
2697      }
2698    } else {
2699      return false;
2700    }
2701  } while (! aborted);
2702  return true;
2703}
2704
2705/*
2706  Only do this if pthread_kill indicated that the pthread isn't
2707  listening to signals anymore, as can happen as soon as pthread_exit()
2708  is called on Darwin.  The thread could still call out to lisp as it
2709  is exiting, so we need another way to suspend it in this case.
2710*/
2711Boolean
2712mach_suspend_tcr(TCR *tcr)
2713{
2714  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
2715  ExceptionInformation *pseudosigcontext;
2716  Boolean result = false;
2717 
2718  result = suspend_mach_thread(mach_thread);
2719  if (result) {
2720    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
2721    pseudosigcontext->uc_onstack = 0;
2722    pseudosigcontext->uc_sigmask = (sigset_t) 0;
2723    tcr->suspend_context = pseudosigcontext;
2724  }
2725  return result;
2726}
2727
2728void
2729mach_resume_tcr(TCR *tcr)
2730{
2731  ExceptionInformation *xp;
2732  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
2733 
2734  xp = tcr->suspend_context;
2735#ifdef DEBUG_MACH_EXCEPTIONS
2736  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
2737          tcr, tcr->pending_exception_context);
2738#endif
2739  tcr->suspend_context = NULL;
2740  restore_mach_thread_state(mach_thread, xp);
2741#ifdef DEBUG_MACH_EXCEPTIONS
2742  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
2743          tcr, tcr->pending_exception_context);
2744#endif
2745  thread_resume(mach_thread);
2746}
2747
2748void
2749fatal_mach_error(char *format, ...)
2750{
2751  va_list args;
2752  char s[512];
2753 
2754
2755  va_start(args, format);
2756  vsnprintf(s, sizeof(s),format, args);
2757  va_end(args);
2758
2759  Fatal("Mach error", s);
2760}
2761
2762void
2763pseudo_interrupt_handler(int signum, ExceptionInformation *context)
2764{
2765  interrupt_handler(signum, NULL, context);
2766}
2767
2768int
2769mach_raise_thread_interrupt(TCR *target)
2770{
2771  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
2772  kern_return_t kret;
2773  Boolean result = false;
2774  TCR *current = get_tcr(false);
2775  thread_basic_info_data_t info; 
2776  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
2777
2778  LOCK(lisp_global(TCR_AREA_LOCK), current);
2779
2780  if (suspend_mach_thread(mach_thread)) {
2781    if (thread_info(mach_thread,
2782                    THREAD_BASIC_INFO,
2783                    (thread_info_t)&info,
2784                    &info_count) == KERN_SUCCESS) {
2785      if (info.suspend_count == 1) {
2786        if ((target->valence == TCR_STATE_LISP) &&
2787            (!target->unwinding) &&
2788            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
2789          kret = setup_signal_frame(mach_thread,
2790                                    (void *)pseudo_interrupt_handler,
2791                                    SIGNAL_FOR_PROCESS_INTERRUPT,
2792                                    0,
2793                                    target);
2794          if (kret == KERN_SUCCESS) {
2795            result = true;
2796          }
2797        }
2798      }
2799    }
2800    if (! result) {
2801      target->interrupt_pending = 1 << fixnumshift;
2802    }
2803    thread_resume(mach_thread);
2804   
2805  }
2806  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
2807  return 0;
2808}
2809
2810#endif
Note: See TracBrowser for help on using the repository browser.