source: trunk/source/lisp-kernel/x86-exceptions.c @ 16364

Last change on this file since 16364 was 16364, checked in by gb, 5 years ago

for 64-bit Linux: copy_fpregs() copies the secret AVX state correctly.
fixes ticket:1271 in the trunk. TODO: look at x8632, Darwin, Solaris,
and Windows.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 107.6 KB
Line 
1/*
2   Copyright (C) 2005-2009 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "x86-utils.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35#ifdef DARWIN
36#include <sysexits.h>
37#include <dlfcn.h>
38#include <mach/machine/vm_types.h>
39#endif
40#ifndef WINDOWS
41#include <sys/syslog.h>
42#endif
43#ifdef WINDOWS
44#include <windows.h>
45#ifdef WIN_64
46#include <winternl.h>
47#include <ntstatus.h>
48#endif
49#ifndef EXCEPTION_WRITE_FAULT
50#define EXCEPTION_WRITE_FAULT 1
51#endif
52#endif
53
54void
55normalize_tcr(ExceptionInformation *, TCR *, Boolean);
56
57/*
58  We do all kinds of funky things to avoid handling a signal on the lisp
59  stack.  One of those funky things involves using the  __builtin_return_address()
60  intrinsic so that the real handler returns to the right place when it exits,
61  even if it returns on a different stack.  Code at "the right place" is presumed
62  to just do a sigrereturn, however the OS does that.
63
64  Sadly, some JVMs (at least) do what they call "signal chaining": they install
65  their own handlers for signals that we expect to handle, and call our handler
66  when they realize that they don't know how to handle what we raise.  They
67  don't observe sigaltstack, and they don't necessarily do call our handler
68  tail-recursively (so our stack-switching code would cause our handler to
69  return to the JVM's, running on the wrong stack.
70
71  Try to work around this by setting up an "early" signal handler (before any
72  of this JVM nonsense can take effect) and noting the address it'd return to.
73*/
74
75pc
76real_sigreturn = (pc)0;
77
78#define SIGRETURN_ADDRESS() (real_sigreturn ? real_sigreturn : __builtin_return_address(0))
79
80#ifndef WINDOWS
81#ifndef DARWIN
82void
83early_intn_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
84{
85  real_sigreturn = (pc) __builtin_return_address(0);
86  xpPC(xp) += 2;
87}
88
89#endif
90#endif
91
92void
93do_intn()
94{
95  __asm volatile("int $0xcd");
96}
97
98void
99x86_early_exception_init()
100{
101#ifndef WINDOWS
102#ifndef DARWIN
103  struct sigaction action, oaction;
104
105  action.sa_sigaction = (void *) early_intn_handler;
106  sigfillset(&action.sa_mask);
107  action.sa_flags = SA_SIGINFO;
108  sigaction(SIGNUM_FOR_INTN_TRAP,&action,&oaction);
109 
110  do_intn();
111  sigaction(SIGNUM_FOR_INTN_TRAP,&oaction, NULL);
112#endif
113#endif
114}
115
116int
117page_size = 4096;
118
119int
120log2_page_size = 12;
121
122Boolean
123did_gc_notification_since_last_full_gc = false;
124
125Boolean
126allocation_enabled = true;
127
128void
129update_bytes_allocated(TCR* tcr, void *cur_allocptr)
130{
131  char *last = tcr->last_allocptr;
132  char *current = cur_allocptr;
133  uint64_t *bytes_allocated = (uint64_t *)&TCR_AUX(tcr)->bytes_allocated;
134
135  if (last && (tcr->save_allocbase != ((void *)VOID_ALLOCPTR))) {
136    *bytes_allocated += last - current;
137  }
138  tcr->last_allocptr = 0;
139}
140
141
142
143//  This doesn't GC; it returns true if it made enough room, false
144//  otherwise.
145//  If "extend" is true, it can try to extend the dynamic area to
146//  satisfy the request.
147
148
149
150void
151platform_new_heap_segment(ExceptionInformation *xp, TCR *tcr, BytePtr low, BytePtr high)
152{
153  tcr->last_allocptr = (void *)high;
154  tcr->save_allocptr = (void *)high;
155  xpGPR(xp,Iallocptr) = (LispObj) high;
156  tcr->save_allocbase = (void *) low;
157}
158
159Boolean
160allocate_object(ExceptionInformation *xp,
161                natural bytes_needed, 
162                signed_natural disp_from_allocptr,
163                TCR *tcr,
164                Boolean *crossed_threshold)
165{
166  area *a = active_dynamic_area;
167
168  /* Maybe do an EGC */
169  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
170    if (((a->active)-(a->low)) >= a->threshold) {
171      gc_from_xp(xp, 0L);
172    }
173  }
174
175  /* Life is pretty simple if we can simply grab a segment
176     without extending the heap.
177  */
178  if (new_heap_segment(xp, bytes_needed, false, tcr, crossed_threshold)) {
179    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
180    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
181    return true;
182  }
183 
184  /* It doesn't make sense to try a full GC if the object
185     we're trying to allocate is larger than everything
186     allocated so far.
187  */
188  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
189    untenure_from_area(tenured_area); /* force a full GC */
190    gc_from_xp(xp, 0L);
191    did_gc_notification_since_last_full_gc = false;
192  }
193 
194  /* Try again, growing the heap if necessary */
195  if (new_heap_segment(xp, bytes_needed, true, tcr, NULL)) {
196    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
197    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
198    return true;
199  }
200 
201  return false;
202}
203
204natural gc_deferred = 0, full_gc_deferred = 0;
205
206signed_natural
207flash_freeze(TCR *tcr, signed_natural param)
208{
209  return 0;
210}
211
212
213Boolean
214handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
215{
216  LispObj selector = xpGPR(xp,Iimm0);
217#ifdef X8664
218  LispObj arg = xpGPR(xp,Iimm1);
219#else
220  LispObj arg = xpMMXreg(xp,Imm0);
221#endif
222  area *a = active_dynamic_area;
223  Boolean egc_was_enabled = (a->older != NULL);
224 
225  natural gc_previously_deferred = gc_deferred;
226
227  switch (selector) {
228  case GC_TRAP_FUNCTION_EGC_CONTROL:
229    egc_control(arg != 0, a->active);
230    xpGPR(xp,Iarg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
231    break;
232
233  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
234#ifdef X8664
235    a->threshold = unbox_fixnum(xpGPR(xp, Iarg_x));
236#else
237    a->threshold = unbox_fixnum(xpGPR(xp, Itemp0));
238#endif
239    g1_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_y));
240    g2_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_z));
241    xpGPR(xp,Iarg_z) = lisp_nil+t_offset;
242    break;
243
244  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
245    if (((signed_natural) arg) > 0) {
246      lisp_heap_gc_threshold = 
247        align_to_power_of_2((arg-1) +
248                            (heap_segment_size - 1),
249                            log2_heap_segment_size);
250    }
251    /* fall through */
252  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
253    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
254    break;
255
256  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
257    /*  Try to put the current threshold in effect.  This may
258        need to disable/reenable the EGC. */
259    untenure_from_area(tenured_area);
260    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
261    if (egc_was_enabled) {
262      if ((a->high - a->active) >= a->threshold) {
263        tenure_to_area(tenured_area);
264      }
265    }
266    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
267    break;
268
269  case GC_TRAP_FUNCTION_SET_GC_NOTIFICATION_THRESHOLD:
270    if ((signed_natural)arg >= 0) {
271      lisp_heap_notify_threshold = arg;
272      did_gc_notification_since_last_full_gc = false;
273    }
274    /* fall through */
275
276  case GC_TRAP_FUNCTION_GET_GC_NOTIFICATION_THRESHOLD:
277    xpGPR(xp, Iimm0) = lisp_heap_notify_threshold;
278    break;
279
280  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
281    ensure_static_conses(xp, tcr, 32768);
282    break;
283
284  case GC_TRAP_FUNCTION_FLASH_FREEZE: /* Like freeze below, but no GC */
285    untenure_from_area(tenured_area);
286    gc_like_from_xp(xp,flash_freeze,0);
287    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
288    tenured_area->static_dnodes = area_dnode(a->active, a->low);
289    if (egc_was_enabled) {
290      tenure_to_area(tenured_area);
291    }
292    xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
293    break;
294
295  case GC_TRAP_FUNCTION_ALLOCATION_CONTROL:
296    switch(arg) {
297    case 0: /* disable if allocation enabled */
298      xpGPR(xp, Iarg_z) = lisp_nil;
299      if (allocation_enabled) {
300        TCR *other_tcr;
301        ExceptionInformation *other_context;
302        suspend_other_threads(true);
303        normalize_tcr(xp,tcr,false);
304        for (other_tcr=TCR_AUX(tcr)->next; other_tcr != tcr; other_tcr = TCR_AUX(other_tcr)->next) {
305          other_context = other_tcr->pending_exception_context;
306          if (other_context == NULL) {
307            other_context = TCR_AUX(other_tcr)->suspend_context;
308          }
309          normalize_tcr(other_context, other_tcr, true);
310        }
311        allocation_enabled = false;
312        xpGPR(xp, Iarg_z) = t_value;
313        resume_other_threads(true);
314      }
315      break;
316
317    case 1:                     /* enable if disabled */
318      xpGPR(xp, Iarg_z) = lisp_nil;
319      if (!allocation_enabled) {
320        allocation_enabled = true;
321        xpGPR(xp, Iarg_z) = t_value;
322      }
323      break;
324
325    default:
326      xpGPR(xp, Iarg_z) = lisp_nil;
327      if (allocation_enabled) {
328        xpGPR(xp, Iarg_z) = t_value;
329      }
330      break;
331    }
332    break;
333
334  default:
335    update_bytes_allocated(tcr, (void *) tcr->save_allocptr);
336
337    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
338      if (!full_gc_deferred) {
339        gc_from_xp(xp, 0L);
340        did_gc_notification_since_last_full_gc = false;
341        break;
342      }
343      /* Tried to do a full GC when gc was disabled.  That failed,
344         so try full GC now */
345      selector = GC_TRAP_FUNCTION_GC;
346    }
347   
348    if (egc_was_enabled) {
349      egc_control(false, (BytePtr) a->active);
350    }
351    gc_from_xp(xp, 0L);
352    did_gc_notification_since_last_full_gc = false;
353    if (gc_deferred > gc_previously_deferred) {
354      full_gc_deferred = 1;
355    } else {
356      full_gc_deferred = 0;
357    }
358    if (selector > GC_TRAP_FUNCTION_GC) {
359      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
360        impurify_from_xp(xp, 0L);
361        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
362        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
363        gc_from_xp(xp, 0L);
364      }
365      if (selector & GC_TRAP_FUNCTION_PURIFY) {
366        purify_from_xp(xp, 1);
367        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
368        gc_from_xp(xp, 0L);
369      }
370      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
371        OSErr err;
372        extern OSErr save_application(int, Boolean);
373        area *vsarea = tcr->vs_area;
374
375#ifdef WINDOWS 
376        arg = _open_osfhandle(arg,0);
377#endif
378        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
379        err = save_application((int)arg, egc_was_enabled);
380        if (err == noErr) {
381          _exit(0);
382        }
383        fatal_oserr(": save_application", err);
384      }
385      switch (selector) {
386      case GC_TRAP_FUNCTION_FREEZE:
387        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
388        tenured_area->static_dnodes = area_dnode(a->active, a->low);
389        xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
390        break;
391      default:
392        break;
393      }
394    }
395    if (egc_was_enabled) {
396      egc_control(true, NULL);
397    }
398    break;
399  }
400  return true;
401}
402
403 
404
405
406
407void
408push_on_lisp_stack(ExceptionInformation *xp, LispObj value)
409{
410  LispObj *vsp = (LispObj *)xpGPR(xp,Isp);
411  *--vsp = value;
412  xpGPR(xp,Isp) = (LispObj)vsp;
413}
414
415
416/* Hard to know if or whether this is necessary in general.  For now,
417   do it when we get a "wrong number of arguments" trap.
418*/
419void
420finish_function_entry(ExceptionInformation *xp)
421{
422  natural nargs = xpGPR(xp,Inargs)>>fixnumshift;
423  signed_natural disp = nargs - nargregs;
424  LispObj *vsp =  (LispObj *) xpGPR(xp,Isp), ra = *vsp++;
425   
426  xpGPR(xp,Isp) = (LispObj) vsp;
427
428  if (disp > 0) {               /* implies that nargs > nargregs */
429    vsp[disp] = xpGPR(xp,Ifp);
430    vsp[disp+1] = ra;
431    xpGPR(xp,Ifp) = (LispObj)(vsp+disp);
432#ifdef X8664
433    push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
434#endif
435    push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
436    push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
437  } else {
438    push_on_lisp_stack(xp,ra);
439    push_on_lisp_stack(xp,xpGPR(xp,Ifp));
440    xpGPR(xp,Ifp) = xpGPR(xp,Isp);
441#ifdef X8664
442    if (nargs == 3) {
443      push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
444    }
445#endif
446    if (nargs >= 2) {
447      push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
448    }
449    if (nargs >= 1) {
450      push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
451    }
452  }
453}
454
455Boolean
456object_contains_pc(LispObj container, LispObj addr)
457{
458  if (fulltag_of(container) >= fulltag_misc) {
459    natural elements = header_element_count(header_of(container));
460    if ((addr >= container) &&
461        (addr < ((LispObj)&(deref(container,1+elements))))) {
462      return true;
463    }
464  }
465  return false;
466}
467
468LispObj
469create_exception_callback_frame(ExceptionInformation *xp, TCR *tcr)
470{
471  LispObj containing_uvector = 0, 
472    relative_pc = lisp_nil,
473    nominal_function = lisp_nil, 
474    f, tra, tra_f = 0, abs_pc;
475  LispObj pc_low, pc_high;
476
477  f = xpGPR(xp,Ifn);
478  tra = *(LispObj*)(xpGPR(xp,Isp));
479  if (tra_p(tra)) {
480    char *p = (char *)tra;
481    extern char *spentry_start, *spentry_end;
482
483    if (ptr_in_area(p, tcr->ts_area) ||
484        (p > spentry_start && p < spentry_end) ||
485        in_any_consing_area(tra))
486      tra_f = tra_function(tra);
487    else
488      Bug(xp, "martian tra %p\n", tra);
489  }
490  abs_pc = (LispObj)xpPC(xp);
491#if WORD_SIZE == 64
492  pc_high = ((abs_pc >> 32) & 0xffffffff) << fixnumshift;
493  pc_low = (abs_pc & 0xffffffff) << fixnumshift;
494#else
495  pc_high = ((abs_pc >> 16) & 0xffff) << fixnumshift;
496  pc_low = (abs_pc & 0xffff) << fixnumshift;
497#endif
498
499
500  if (functionp(f))
501    nominal_function = f;
502  else if (tra_f)
503    nominal_function = tra_f;
504 
505  f = xpGPR(xp,Ifn);
506  if (object_contains_pc(f, abs_pc)) {
507    containing_uvector = untag(f)+fulltag_misc;
508  } else {
509    f = xpGPR(xp,Ixfn);
510    if (object_contains_pc(f, abs_pc)) {
511      containing_uvector = untag(f)+fulltag_misc;
512    } else {
513      if (tra_f) {
514        f = tra_f;
515        if (object_contains_pc(f, abs_pc)) {
516          containing_uvector = untag(f)+fulltag_misc;
517          relative_pc = (abs_pc - f) << fixnumshift;
518        }
519      }
520    }
521  }
522  if (containing_uvector) {
523    relative_pc = (abs_pc - (LispObj)&(deref(containing_uvector,1))) << fixnumshift;
524  } else {
525    containing_uvector = lisp_nil;
526  }
527  push_on_lisp_stack(xp, pc_high);
528  push_on_lisp_stack(xp, pc_low);
529  push_on_lisp_stack(xp,(LispObj)(tcr->xframe->prev));
530  push_on_lisp_stack(xp,(LispObj)(tcr->foreign_sp));
531  push_on_lisp_stack(xp,tra);
532  push_on_lisp_stack(xp,(LispObj)xp);
533  push_on_lisp_stack(xp,containing_uvector); 
534  push_on_lisp_stack(xp,relative_pc);
535  push_on_lisp_stack(xp,nominal_function);
536  push_on_lisp_stack(xp,0);
537  push_on_lisp_stack(xp,xpGPR(xp,Ifp));
538  xpGPR(xp,Ifp) = xpGPR(xp,Isp);
539  return xpGPR(xp,Isp);
540}
541
542#ifndef XMEMFULL
543#define XMEMFULL (76)
544#endif
545
546void
547lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed )
548{
549  LispObj xcf = create_exception_callback_frame(xp, tcr),
550    cmain = nrs_CMAIN.vcell;
551  int skip;
552   
553  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
554  xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
555
556  skip = callback_to_lisp(tcr, cmain, xp, xcf, -1, XMEMFULL, 0, 0);
557  xpPC(xp) += skip;
558}
559
560#ifndef SIGTRAP
561#define SIGTRAP 5
562#endif
563
564void
565callback_for_gc_notification(ExceptionInformation *xp, TCR *tcr)
566{
567  LispObj cmain = nrs_CMAIN.vcell;
568  if ((fulltag_of(cmain) == fulltag_misc) &&
569      (header_subtag(header_of(cmain)) == subtag_macptr)) {
570    LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
571      word_beyond_vsp = save_vsp[-1],
572      save_fp = xpGPR(xp,Ifp),
573      xcf = create_exception_callback_frame(xp, tcr);
574
575    callback_to_lisp(tcr, cmain, xp, xcf, SIGTRAP, 0, 0, 0);
576    did_gc_notification_since_last_full_gc = true;
577    xpGPR(xp,Ifp) = save_fp;
578    xpGPR(xp,Isp) = (LispObj)save_vsp;
579    save_vsp[-1] = word_beyond_vsp;
580  }
581}
582
583
584/*
585  Allocate a large list, where "large" means "large enough to
586  possibly trigger the EGC several times if this was done
587  by individually allocating each CONS."  The number of
588  ocnses in question is in arg_z; on successful return,
589  the list will be in arg_z
590*/
591
592Boolean
593allocate_list(ExceptionInformation *xp, TCR *tcr)
594{
595  natural
596    nconses = (unbox_fixnum(xpGPR(xp,Iarg_z))),
597    bytes_needed = (nconses << dnode_shift);
598  LispObj
599    prev = lisp_nil,
600    current,
601    initial = xpGPR(xp,Iarg_y);
602  Boolean notify_pending_gc = false;
603
604  if (nconses == 0) {
605    /* Silly case */
606    xpGPR(xp,Iarg_z) = lisp_nil;
607    return true;
608  }
609  update_bytes_allocated(tcr, (void *)tcr->save_allocptr);
610  if (allocate_object(xp,bytes_needed,bytes_needed-fulltag_cons,tcr, &notify_pending_gc)) {
611    tcr->save_allocptr -= fulltag_cons;
612    for (current = xpGPR(xp,Iallocptr);
613         nconses;
614         prev = current, current+= dnode_size, nconses--) {
615      deref(current,0) = prev;
616      deref(current,1) = initial;
617    }
618    xpGPR(xp,Iarg_z) = prev;
619    if (notify_pending_gc && !did_gc_notification_since_last_full_gc) {
620      callback_for_gc_notification(xp,tcr);
621    }
622  } else {
623    lisp_allocation_failure(xp,tcr,bytes_needed);
624  }
625  return true;
626}
627
628Boolean
629handle_alloc_trap(ExceptionInformation *xp, TCR *tcr, Boolean *notify)
630{
631  natural cur_allocptr, bytes_needed;
632  unsigned allocptr_tag;
633  signed_natural disp;
634
635  if (!allocation_enabled) {
636    LispObj xcf,
637      save_sp = xpGPR(xp,Isp),
638      save_fp = xpGPR(xp,Ifp),
639      cmain = nrs_CMAIN.vcell;
640    xcf =create_exception_callback_frame(xp, tcr),
641    pc_luser_xp(xp,tcr,NULL);
642    allocation_enabled = true;
643    tcr->save_allocbase = (void *)VOID_ALLOCPTR;
644    callback_to_lisp(tcr, cmain, xp, xcf, -1, 0, 0, error_allocation_disabled);
645    xpGPR(xp,Isp) = save_sp;
646    xpGPR(xp,Ifp) = save_fp;
647    return true;
648  }
649  cur_allocptr = xpGPR(xp,Iallocptr);
650  allocptr_tag = fulltag_of(cur_allocptr);
651  if (allocptr_tag == fulltag_misc) {
652#ifdef X8664
653    disp = xpGPR(xp,Iimm1);
654#else
655    disp = xpGPR(xp,Iimm0);
656#endif
657  } else {
658    disp = dnode_size-fulltag_cons;
659  }
660  bytes_needed = disp+allocptr_tag;
661
662  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr+disp)));
663  if (allocate_object(xp, bytes_needed, disp, tcr, notify)) {
664    if (notify && *notify) {
665      xpPC(xp)+=2;
666      /* Finish the allocation: add a header if necessary,
667         clear the tag bits in tcr.save_allocptr. */
668      pc_luser_xp(xp,tcr,NULL);
669      callback_for_gc_notification(xp,tcr);
670    }
671    return true;
672  }
673 
674  lisp_allocation_failure(xp,tcr,bytes_needed);
675
676  return true;
677}
678
679 
680int
681callback_to_lisp (TCR * tcr, LispObj callback_macptr, ExceptionInformation *xp,
682                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
683{
684  natural  callback_ptr;
685  int delta;
686  unsigned old_mxcsr = get_mxcsr();
687#ifdef X8632
688  natural saved_node_regs_mask = tcr->node_regs_mask;
689  natural saved_unboxed0 = tcr->unboxed0;
690  natural saved_unboxed1 = tcr->unboxed1;
691  LispObj *vsp = (LispObj *)xpGPR(xp, Isp);
692#endif
693  set_mxcsr(0x1f80);
694
695  /* Put the active stack pointers where .SPcallback expects them */
696#ifdef X8632
697  tcr->node_regs_mask = X8632_DEFAULT_NODE_REGS_MASK;
698
699  *--vsp = tcr->save0;
700  *--vsp = tcr->save1;
701  *--vsp = tcr->save2;
702  *--vsp = tcr->save3;
703  *--vsp = tcr->next_method_context;
704  xpGPR(xp, Isp) = (LispObj)vsp;
705#endif
706  tcr->save_vsp = (LispObj *)xpGPR(xp, Isp);
707  tcr->save_fp = (LispObj *)xpGPR(xp, Ifp);
708
709  /* Call back.  The caller of this function may have modified stack/frame
710     pointers (and at least should have called prepare_for_callback()).
711  */
712  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
713  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
714  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
715  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
716
717#ifdef X8632
718  tcr->next_method_context = *vsp++;
719  tcr->save3 = *vsp++;
720  tcr->save2 = *vsp++;
721  tcr->save1 = *vsp++;
722  tcr->save0 = *vsp++;
723  xpGPR(xp, Isp) = (LispObj)vsp;
724
725  tcr->node_regs_mask = saved_node_regs_mask;
726  tcr->unboxed0 = saved_unboxed0;
727  tcr->unboxed1 = saved_unboxed1;
728#endif
729  set_mxcsr(old_mxcsr);
730  return delta;
731}
732
733void
734callback_for_interrupt(TCR *tcr, ExceptionInformation *xp)
735{
736  LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
737    word_beyond_vsp = save_vsp[-1],
738    save_fp = xpGPR(xp,Ifp),
739    xcf = create_exception_callback_frame(xp, tcr);
740  int save_errno = errno;
741
742  callback_to_lisp(tcr, nrs_CMAIN.vcell,xp, xcf, 0, 0, 0, 0);
743  xpGPR(xp,Ifp) = save_fp;
744  xpGPR(xp,Isp) = (LispObj)save_vsp;
745  save_vsp[-1] = word_beyond_vsp;
746  errno = save_errno;
747}
748
749Boolean
750handle_error(TCR *tcr, ExceptionInformation *xp)
751{
752  pc program_counter = (pc)xpPC(xp);
753  unsigned char op0 = program_counter[0], op1 = program_counter[1];
754  LispObj rpc, errdisp = nrs_ERRDISP.vcell,
755    save_vsp = xpGPR(xp,Isp), xcf0,
756    save_fp = xpGPR(xp,Ifp);
757  int skip;
758
759  if ((fulltag_of(errdisp) == fulltag_misc) &&
760      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
761
762    if ((op0 == 0xcd) && (op1 >= 0xc0) && (op1 <= 0xc2)) {
763      finish_function_entry(xp);
764    }
765    xcf0 = create_exception_callback_frame(xp, tcr);
766    skip = callback_to_lisp(tcr, errdisp, xp, xcf0, 0, 0, 0, 0);
767    if (skip == -1) {
768      xcf *xcf1 = (xcf *)xcf0;
769      LispObj container = xcf1->containing_uvector;
770     
771      rpc = xcf1->relative_pc >> fixnumshift;
772      if (container == lisp_nil) {
773        xpPC(xp) = rpc;
774      } else {
775        xpPC(xp) = (LispObj)(&(deref(container,
776#ifdef X8664
777                                     1
778#else
779                                     0
780#endif
781)))+rpc;
782      }
783       
784      skip = 0;
785    }
786    xpGPR(xp,Ifp) = save_fp;
787    xpGPR(xp,Isp) = save_vsp;
788    if ((op0 == 0xcd) && (op1 == 0xc7)) {
789      /* Continue after an undefined function call. The function
790         that had been undefined has already been called (in the
791         break loop), and a list of the values that it returned
792         in in the xp's %arg_z.  A function that returns those
793         values in in the xp's %fn; we just have to adjust the
794         stack (keeping the return address in the right place
795         and discarding any stack args/reserved stack frame),
796         then set nargs and the PC so that that function's
797         called when we resume.
798      */
799      LispObj *vsp =(LispObj *)save_vsp, ra = *vsp;
800      int nargs = xpGPR(xp, Inargs)>>fixnumshift;
801
802#ifdef X8664
803      if (nargs > 3) {
804        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 3)));
805        push_on_lisp_stack(xp,ra);
806      }
807#else
808      if (nargs > 2) {
809        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 2)));
810        push_on_lisp_stack(xp,ra);
811      }
812#endif
813      xpPC(xp) = xpGPR(xp,Ifn);
814      xpGPR(xp,Inargs) = 1<<fixnumshift;
815    } else {
816      xpPC(xp) += skip;
817    }
818    return true;
819  } else {
820    return false;
821  }
822}
823
824
825protection_handler
826* protection_handlers[] = {
827  do_spurious_wp_fault,
828  do_soft_stack_overflow,
829  do_soft_stack_overflow,
830  do_soft_stack_overflow,
831  do_hard_stack_overflow,   
832  do_hard_stack_overflow,
833  do_hard_stack_overflow,
834};
835
836
837/* Maybe this'll work someday.  We may have to do something to
838   make the thread look like it's not handling an exception */
839void
840reset_lisp_process(ExceptionInformation *xp)
841{
842}
843
844Boolean
845do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
846{
847  /*  reset_lisp_process(xp); */
848  Bug(xp, "Unrecoverable stack overflow.");
849  return false;
850}
851
852
853Boolean
854do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
855{
856
857  return false;
858}
859
860Boolean
861do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
862{
863  /* Trying to write into a guard page on the vstack or tstack.
864     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
865     signal an error_stack_overflow condition.
866      */
867  lisp_protection_kind which = prot_area->why;
868  Boolean on_TSP = (which == kTSPsoftguard);
869  LispObj save_fp = xpGPR(xp,Ifp);
870  LispObj save_vsp = xpGPR(xp,Isp), 
871    xcf,
872    cmain = nrs_CMAIN.vcell;
873  area *a;
874  protected_area_ptr soft;
875  TCR *tcr = get_tcr(false);
876  int skip;
877
878  if ((fulltag_of(cmain) == fulltag_misc) &&
879      (header_subtag(header_of(cmain)) == subtag_macptr)) {
880    if (on_TSP) {
881      a = tcr->ts_area;
882    } else {
883      a = tcr->vs_area;
884    }
885    soft = a->softprot;
886    unprotect_area(soft);
887    xcf = create_exception_callback_frame(xp, tcr);
888    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, on_TSP, 0, 0);
889    xpGPR(xp,Ifp) = save_fp;
890    xpGPR(xp,Isp) = save_vsp;
891    xpPC(xp) += skip;
892    return true;
893  }
894  return false;
895}
896
897Boolean
898is_write_fault(ExceptionInformation *xp, siginfo_t *info)
899{
900#ifdef DARWIN
901  return (UC_MCONTEXT(xp)->__es.__err & 0x2) != 0;
902#endif
903#if defined(LINUX) || defined(SOLARIS)
904  return (xpGPR(xp,REG_ERR) & 0x2) != 0;
905#endif
906#ifdef FREEBSD
907  return (xp->uc_mcontext.mc_err & 0x2) != 0;
908#endif
909#ifdef WINDOWS
910  return (info->ExceptionFlags == EXCEPTION_WRITE_FAULT);
911#endif
912}
913
914Boolean
915handle_fault(TCR *tcr, ExceptionInformation *xp, siginfo_t *info, int old_valence)
916{
917#ifdef FREEBSD
918#ifdef X8664
919  BytePtr addr = (BytePtr) xp->uc_mcontext.mc_addr;
920#else
921  BytePtr addr = (BytePtr) info->si_addr;
922#endif
923#else
924#ifdef WINDOWS
925  BytePtr addr = (BytePtr) info->ExceptionInformation[1];
926#else
927  BytePtr addr = (BytePtr) info->si_addr;
928#endif
929#endif
930  Boolean valid = IS_PAGE_FAULT(info,xp);
931
932  if (TCR_AUX(tcr)->safe_ref_address != NULL) {
933    xpGPR(xp,Iimm0) = 0;
934    xpPC(xp) = xpGPR(xp,Ira0);
935    TCR_AUX(tcr)->safe_ref_address = NULL;
936    return true;
937  }
938
939  if (valid) {
940    {
941      protected_area *a = find_protected_area(addr);
942      protection_handler *handler;
943     
944      if (a) {
945        handler = protection_handlers[a->why];
946        return handler(xp, a, addr);
947      }
948    }
949
950    if ((addr >= readonly_area->low) &&
951        (addr < readonly_area->active)) {
952      UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
953                      page_size);
954      return true;
955    }
956
957    {
958      area *a = area_containing(addr);
959
960      if (a && a->code == AREA_WATCHED && addr < a->high) {
961        /* caught a write to a watched object */
962        LispObj *p = (LispObj *)a->low;
963        LispObj node = *p;
964        unsigned tag_n = fulltag_of(node);
965        LispObj cmain = nrs_CMAIN.vcell;
966        LispObj obj;
967
968        if (immheader_tag_p(tag_n) || nodeheader_tag_p(tag_n))
969          obj = (LispObj)p + fulltag_misc;
970        else
971          obj = (LispObj)p + fulltag_cons;
972
973        if ((fulltag_of(cmain) == fulltag_misc) &&
974            (header_subtag(header_of(cmain)) == subtag_macptr)) {
975          LispObj save_vsp = xpGPR(xp, Isp);
976          LispObj save_fp = xpGPR(xp, Ifp);
977          LispObj xcf;
978          natural offset = (LispObj)addr - obj;
979          int skip;
980
981          push_on_lisp_stack(xp, obj);
982          xcf = create_exception_callback_frame(xp, tcr);
983
984          /* The magic 2 means this was a write to a watchd object */
985          skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, 2,
986                                  (natural)addr, offset);
987          xpPC(xp) += skip;
988          xpGPR(xp, Ifp) = save_fp;
989          xpGPR(xp, Isp) = save_vsp;
990          return true;
991        }
992      }
993    }
994  }
995
996  if (old_valence == TCR_STATE_LISP) {
997    LispObj cmain = nrs_CMAIN.vcell,
998      xcf;
999    if ((fulltag_of(cmain) == fulltag_misc) &&
1000      (header_subtag(header_of(cmain)) == subtag_macptr)) {
1001      xcf = create_exception_callback_frame(xp, tcr);
1002      callback_to_lisp(tcr, cmain, xp, xcf, SIGBUS, valid ? is_write_fault(xp,info) : (natural)-1, valid ? (natural)addr : 0, 0);
1003    }
1004  }
1005  return false;
1006}
1007
1008Boolean
1009handle_foreign_fpe(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
1010{
1011#ifdef X8632
1012  return false;
1013#else
1014  int code;
1015
1016#ifdef WINDOWS
1017  if (info->ExceptionCode == EXCEPTION_INT_DIVIDE_BY_ZERO)
1018    return false;
1019#else
1020  if (info->si_code == FPE_INTDIV)
1021    return false;
1022#endif
1023
1024  /*
1025   * Cooperate with .SPffcall to avoid saving and restoring the MXCSR
1026   * around every foreign call.
1027   */
1028    if (! (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN_FPE))) {
1029      tcr->flags |= (1<<TCR_FLAG_BIT_FOREIGN_FPE);
1030      tcr->lisp_mxcsr = xpMXCSR(xp) & ~MXCSR_STATUS_MASK;
1031    }
1032    xpMXCSR(xp) &= ~MXCSR_STATUS_MASK;
1033    xpMXCSR(xp) |= MXCSR_CONTROL_MASK;
1034    return true;
1035#endif
1036}
1037
1038Boolean
1039handle_floating_point_exception(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
1040{
1041  int code,skip;
1042  LispObj  xcf, cmain = nrs_CMAIN.vcell,
1043    save_vsp = xpGPR(xp,Isp),
1044    save_fp = xpGPR(xp,Ifp);
1045#ifdef DARWIN /* bug in <= 10.5 */
1046  void decode_vector_fp_exception(siginfo_t *, uint32_t);
1047
1048  decode_vector_fp_exception(info, (uint32_t)(UC_MCONTEXT(xp)->__fs.__fpu_mxcsr));
1049#endif
1050
1051#ifdef WINDOWS
1052  code = info->ExceptionCode;
1053#else
1054  code = info->si_code;
1055#endif 
1056
1057  if ((fulltag_of(cmain) == fulltag_misc) &&
1058      (header_subtag(header_of(cmain)) == subtag_macptr)) {
1059    xcf = create_exception_callback_frame(xp, tcr);
1060    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGFPE, code, 0, 0);
1061    xpPC(xp) += skip;
1062    xpGPR(xp,Ifp) = save_fp;
1063    xpGPR(xp,Isp) = save_vsp;
1064    return true;
1065  } else {
1066    return false;
1067  }
1068}
1069
1070
1071Boolean
1072extend_tcr_tlb(TCR *tcr, ExceptionInformation *xp)
1073{
1074  LispObj index, old_limit = tcr->tlb_limit, new_limit, new_bytes;
1075  LispObj *old_tlb = tcr->tlb_pointer, *new_tlb, *work, *tos;
1076
1077  tos = (LispObj*)(xpGPR(xp,Isp));
1078  index = *tos++;
1079  (xpGPR(xp,Isp))=(LispObj)tos;
1080 
1081  new_limit = align_to_power_of_2(index+1,12);
1082  new_bytes = new_limit-old_limit;
1083  new_tlb = realloc(old_tlb, new_limit);
1084
1085  if (new_tlb == NULL) {
1086    return false;
1087  }
1088  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1089
1090  while (new_bytes) {
1091    *work++ = no_thread_local_binding_marker;
1092    new_bytes -= sizeof(LispObj);
1093  }
1094  tcr->tlb_pointer = new_tlb;
1095  tcr->tlb_limit = new_limit;
1096  return true;
1097}
1098
1099
1100#if defined(FREEBSD) || defined(DARWIN)
1101static
1102char mxcsr_bit_to_fpe_code[] = {
1103  FPE_FLTINV,                   /* ie */
1104  0,                            /* de */
1105  FPE_FLTDIV,                   /* ze */
1106  FPE_FLTOVF,                   /* oe */
1107  FPE_FLTUND,                   /* ue */
1108  FPE_FLTRES                    /* pe */
1109};
1110
1111void
1112decode_vector_fp_exception(siginfo_t *info, uint32_t mxcsr)
1113{
1114  /* If the exception appears to be an XMM FP exception, try to
1115     determine what it was by looking at bits in the mxcsr.
1116  */
1117  int xbit, maskbit;
1118 
1119  for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
1120    if ((mxcsr & (1 << xbit)) &&
1121        !(mxcsr & (1 << maskbit))) {
1122      info->si_code = mxcsr_bit_to_fpe_code[xbit];
1123      return;
1124    }
1125  }
1126  /* Nothing enabled and set in the mxcsr, assume integer /0 */
1127  info->si_code = FPE_INTDIV;
1128}
1129
1130#ifdef FREEBSD
1131void
1132freebsd_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
1133{
1134  if (info->si_code == 0) {
1135#ifdef X8664
1136    struct savefpu *fpu = (struct savefpu *) &(xp->uc_mcontext.mc_fpstate);
1137#else
1138    struct savexmm *fpu = (struct savexmm *) &(xp->uc_mcontext.mc_fpstate);
1139#endif
1140    uint32_t mxcsr = fpu->sv_env.en_mxcsr;
1141
1142    decode_vector_fp_exception(info, mxcsr);
1143  }
1144}
1145#endif
1146
1147#ifdef DARWIN
1148void
1149darwin_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
1150{
1151  if (info->si_code == EXC_I386_SSEEXTERR) {
1152    uint32_t mxcsr = UC_MCONTEXT(xp)->__fs.__fpu_mxcsr;
1153
1154    decode_vector_fp_exception(info, mxcsr);
1155  }
1156}
1157
1158#endif
1159
1160#endif
1161
1162void
1163get_lisp_string(LispObj lisp_string, char *c_string, natural max)
1164{
1165  lisp_char_code *src = (lisp_char_code *)  (ptr_from_lispobj(lisp_string + misc_data_offset));
1166  natural i, n = header_element_count(header_of(lisp_string));
1167
1168  if (n > max) {
1169    n = max;
1170  }
1171
1172  for (i = 0; i < n; i++) {
1173    c_string[i] = 0xff & (src[i]);
1174  }
1175  c_string[n] = 0;
1176}
1177
1178Boolean handle_watch_trap(ExceptionInformation *xp, TCR *tcr);
1179
1180Boolean
1181handle_exception(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1182{
1183  pc program_counter = (pc)xpPC(context);
1184
1185  if (old_valence != TCR_STATE_LISP) {
1186    if (old_valence == TCR_STATE_FOREIGN && signum == SIGFPE) {
1187      return handle_foreign_fpe(tcr, context, info);
1188    } else {
1189      return false;
1190    }
1191  }
1192
1193  switch (signum) {
1194  case SIGNUM_FOR_INTN_TRAP:
1195    if (IS_MAYBE_INT_TRAP(info,context)) {
1196      /* Something mapped to SIGSEGV/SIGBUS that has nothing to do with
1197         a memory fault.  On x86, an "int n" instruction that's
1198         not otherwise implemented causes a "protecton fault".  Of
1199         course that has nothing to do with accessing protected
1200         memory; of course, most Unices act as if it did.*/
1201      if ((program_counter != NULL) &&
1202          (*program_counter == INTN_OPCODE)) {
1203        program_counter++;
1204        switch (*program_counter) {
1205        case UUO_ALLOC_TRAP:
1206          {
1207            Boolean did_notify = false,
1208              allocation_disabled = !allocation_enabled,
1209              *notify_ptr = &did_notify;
1210            if (did_gc_notification_since_last_full_gc) {
1211              notify_ptr = NULL;
1212            }
1213            if (handle_alloc_trap(context, tcr, notify_ptr)) {
1214              if (! did_notify) {
1215                if (! allocation_disabled) {
1216                  xpPC(context) += 2;   /* we might have GCed. */
1217                }
1218              }
1219              return true;
1220            }
1221          }
1222          break;
1223        case UUO_GC_TRAP:
1224          if (handle_gc_trap(context, tcr)) {
1225            xpPC(context) += 2;
1226            return true;
1227          }
1228          break;
1229        case UUO_WATCH_TRAP:
1230          /* add or remove watched object */
1231          if (handle_watch_trap(context, tcr)) {
1232            xpPC(context) += 2;
1233            return true;
1234          }
1235          break;
1236        case UUO_DEBUG_TRAP:
1237          xpPC(context) = (natural) (program_counter+1);
1238          lisp_Debugger(context, info, debug_entry_dbg, false, "Lisp Breakpoint");
1239          return true;
1240           
1241        case UUO_DEBUG_TRAP_WITH_STRING:
1242          xpPC(context) = (natural) (program_counter+1);
1243          {
1244            char msg[512];
1245
1246            get_lisp_string(xpGPR(context,Iarg_z),msg, sizeof(msg)-1);
1247            lisp_Debugger(context, info, debug_entry_dbg, false, msg);
1248          }
1249          return true;
1250         
1251        default:
1252          return handle_error(tcr, context);
1253        }
1254      } else {
1255        return false;
1256      }
1257
1258    } else {
1259      return handle_fault(tcr, context, info, old_valence);
1260    }
1261    break;
1262
1263  case SIGNAL_FOR_PROCESS_INTERRUPT:
1264    tcr->interrupt_pending = 0;
1265    callback_for_interrupt(tcr, context);
1266    return true;
1267    break;
1268
1269
1270  case SIGILL:
1271    if ((program_counter[0] == XUUO_OPCODE_0) &&
1272        (program_counter[1] == XUUO_OPCODE_1)) {
1273      TCR *target = (TCR *)xpGPR(context, Iarg_z);
1274
1275      switch (program_counter[2]) {
1276      case XUUO_TLB_TOO_SMALL:
1277        if (extend_tcr_tlb(tcr,context)) {
1278          xpPC(context)+=3;
1279          return true;
1280        }
1281        break;
1282       
1283      case XUUO_INTERRUPT_NOW:
1284        callback_for_interrupt(tcr,context);
1285        xpPC(context)+=3;
1286        return true;
1287
1288      case XUUO_SUSPEND_NOW:
1289        xpPC(context)+=3;
1290        return true;
1291
1292      case XUUO_INTERRUPT:
1293        raise_thread_interrupt(target);
1294        xpPC(context)+=3;
1295        return true;
1296
1297      case XUUO_SUSPEND:
1298        xpGPR(context,Iimm0) = (LispObj) lisp_suspend_tcr(target);
1299        xpPC(context)+=3;
1300        return true;
1301
1302      case XUUO_SUSPEND_ALL:
1303        lisp_suspend_other_threads();
1304        xpPC(context)+=3;
1305        return true;
1306
1307
1308      case XUUO_RESUME:
1309        xpGPR(context,Iimm0) = (LispObj) lisp_resume_tcr(target);
1310        xpPC(context)+=3;
1311        return true;
1312       
1313      case XUUO_RESUME_ALL:
1314        lisp_resume_other_threads();
1315        xpPC(context)+=3;
1316        return true;
1317       
1318      case XUUO_KILL:
1319        xpGPR(context,Iimm0) = (LispObj)kill_tcr(target);
1320        xpPC(context)+=3;
1321        return true;
1322
1323      case XUUO_ALLOCATE_LIST:
1324        allocate_list(context,tcr);
1325        xpPC(context)+=3;
1326        return true;
1327
1328      default:
1329        return false;
1330      }
1331    } else {
1332      return false;
1333    }
1334    break;
1335   
1336  case SIGFPE:
1337#ifdef FREEBSD
1338    /* As of 6.1, FreeBSD/AMD64 doesn't seem real comfortable
1339       with this newfangled XMM business (and therefore info->si_code
1340       is often 0 on an XMM FP exception.
1341       Try to figure out what really happened by decoding mxcsr
1342       bits.
1343    */
1344    freebsd_decode_vector_fp_exception(info,context);
1345#endif
1346#ifdef DARWIN
1347    /* Same general problem with Darwin as of 8.7.2 */
1348    darwin_decode_vector_fp_exception(info,context);
1349#endif
1350
1351    return handle_floating_point_exception(tcr, context, info);
1352
1353#if SIGBUS != SIGNUM_FOR_INTN_TRAP
1354  case SIGBUS:
1355    return handle_fault(tcr, context, info, old_valence);
1356#endif
1357   
1358#if SIGSEGV != SIGNUM_FOR_INTN_TRAP
1359  case SIGSEGV:
1360    return handle_fault(tcr, context, info, old_valence);
1361#endif   
1362   
1363  default:
1364    return false;
1365  }
1366  return false;
1367}
1368
1369
1370/*
1371   Current thread has all signals masked.  Before unmasking them,
1372   make it appear that the current thread has been suspended.
1373   (This is to handle the case where another thread is trying
1374   to GC before this thread is able to seize the exception lock.)
1375*/
1376int
1377prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1378{
1379  int old_valence = tcr->valence;
1380
1381  tcr->pending_exception_context = context;
1382  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1383
1384#ifdef WINDOWS
1385  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1386    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1387    SEM_RAISE(TCR_AUX(tcr)->suspend);
1388    SEM_WAIT_FOREVER(TCR_AUX(tcr)->resume);
1389  }
1390#else
1391  ALLOW_EXCEPTIONS(context);
1392#endif
1393  return old_valence;
1394} 
1395
1396void
1397wait_for_exception_lock_in_handler(TCR *tcr, 
1398                                   ExceptionInformation *context,
1399                                   xframe_list *xf)
1400{
1401
1402  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1403#if 0
1404  fprintf(dbgout, "0x" LISP " has exception lock\n", tcr);
1405#endif
1406  xf->curr = context;
1407#ifdef X8632
1408  xf->node_regs_mask = tcr->node_regs_mask;
1409#endif
1410  xf->prev = tcr->xframe;
1411  tcr->xframe =  xf;
1412  tcr->pending_exception_context = NULL;
1413  tcr->valence = TCR_STATE_FOREIGN; 
1414}
1415
1416void
1417unlock_exception_lock_in_handler(TCR *tcr)
1418{
1419  tcr->pending_exception_context = tcr->xframe->curr;
1420#ifdef X8632
1421  tcr->node_regs_mask = tcr->xframe->node_regs_mask;
1422#endif
1423  tcr->xframe = tcr->xframe->prev;
1424  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1425  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1426#if 0
1427  fprintf(dbgout, "0x" LISP " released exception lock\n", tcr);
1428#endif
1429}
1430
1431/*
1432   If an interrupt is pending on exception exit, try to ensure
1433   that the thread sees it as soon as it's able to run.
1434*/
1435#ifdef WINDOWS
1436void
1437raise_pending_interrupt(TCR *tcr)
1438{
1439}
1440void
1441exit_signal_handler(TCR *tcr, int old_valence)
1442{
1443}
1444void
1445signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1446{
1447}
1448#else
1449void
1450raise_pending_interrupt(TCR *tcr)
1451{
1452  if ((TCR_INTERRUPT_LEVEL(tcr) >= 0) &&
1453      (tcr->interrupt_pending)) {
1454    pthread_kill((pthread_t)(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1455  }
1456}
1457
1458void
1459exit_signal_handler(TCR *tcr, int old_valence)
1460{
1461  sigset_t mask;
1462  sigfillset(&mask);
1463#ifdef FREEBSD
1464  sigdelset(&mask,SIGTRAP);
1465#endif
1466 
1467  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1468  tcr->valence = old_valence;
1469  tcr->pending_exception_context = NULL;
1470}
1471
1472void
1473signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context
1474#ifdef DARWIN
1475               , TCR *tcr, int old_valence
1476#endif
1477)
1478{
1479  xframe_list xframe_link;
1480#ifndef DARWIN
1481  TCR *tcr = get_tcr(false);
1482
1483  int old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1484#endif
1485  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1486    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1487    pthread_kill(pthread_self(), thread_suspend_signal);
1488  }
1489  wait_for_exception_lock_in_handler(tcr,context, &xframe_link);
1490
1491
1492  if (! handle_exception(signum, info, context, tcr, old_valence)) {
1493    char msg[512];
1494    Boolean foreign = (old_valence != TCR_STATE_LISP);
1495
1496    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x" LISP ", context->regs at #x" LISP "", signum, xpPC(context), (natural)xpGPRvector(context));
1497   
1498    if (lisp_Debugger(context, info, signum,  foreign, msg)) {
1499      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1500    }
1501  }
1502  unlock_exception_lock_in_handler(tcr);
1503#ifndef DARWIN_USE_PSEUDO_SIGRETURN
1504  exit_signal_handler(tcr, old_valence);
1505#endif
1506  /* raise_pending_interrupt(tcr); */
1507#ifndef DARWIN_USE_PSEUDO_SIGRETURN
1508  SIGRETURN(context);
1509#endif
1510}
1511#endif
1512
1513
1514
1515
1516#ifdef LINUX
1517/* type of pointer to saved fp state */
1518
1519typedef struct _fpstate *FPREGS;
1520#define FPREGSsize_in_bytes(f) linux_fpstate_size_in_bytes(f)
1521
1522size_t
1523linux_fpstate_size_in_bytes(FPREGS state)
1524{
1525  if (WORD_SIZE == 64){
1526    /* see <asm/sigcontext.h> It would be way too useful if we could
1527       include that file without conflicting with <bits/sigcontext.h> */
1528    /* I didn't make this stuff up */
1529    struct _fpx_sw_bytes * sw = (struct _fpx_sw_bytes *) (((char *)state)+464);
1530    if (sw->magic1 == FP_XSTATE_MAGIC1) {
1531      return sw->extended_size;
1532    }
1533
1534  }
1535  return (sizeof (*state));
1536}
1537
1538LispObj *
1539copy_fpregs(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
1540{
1541  FPREGS src = (FPREGS)(xp->uc_mcontext.fpregs), dest;
1542  size_t nbytes = FPREGSsize_in_bytes(src);
1543  if (src) {
1544    BytePtr bc = (BytePtr)current - nbytes;
1545    dest = (FPREGS) (truncate_to_power_of_2(bc,6));
1546    memcpy(dest,src,nbytes);
1547    *destptr = dest;
1548    current = (LispObj *)dest;
1549  }
1550  return current;
1551}
1552
1553#endif
1554
1555
1556#ifdef FREEBSD
1557typedef void *FPREGS;
1558
1559
1560LispObj *
1561copy_avx(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
1562{
1563  natural sp;
1564
1565  *destptr = (FPREGS)AVX_CONTEXT_PTR(xp);
1566
1567  if (AVX_CONTEXT_PRESENT(xp)) {
1568    sp = (natural)current;
1569    sp -= AVX_CONTEXT_SIZE(xp);
1570    sp = truncate_to_power_of_2(sp,6);
1571    memcpy((void *)sp,(void *)AVX_CONTEXT_PTR(xp),AVX_CONTEXT_SIZE(xp));
1572    current = (LispObj *)sp;
1573    *destptr = (FPREGS)current;
1574  }
1575  return current;
1576}
1577#endif
1578
1579#ifdef DARWIN
1580LispObj *
1581copy_darwin_mcontext(MCONTEXT_T context, 
1582                     LispObj *current, 
1583                     MCONTEXT_T *out)
1584{
1585  MCONTEXT_T dest = ((MCONTEXT_T)current)-1;
1586  dest = (MCONTEXT_T) (((LispObj)dest) & ~15);
1587
1588  *dest = *context;
1589  *out = dest;
1590  return (LispObj *)dest;
1591}
1592#endif
1593
1594LispObj *
1595copy_siginfo(siginfo_t *info, LispObj *current)
1596{
1597  siginfo_t *dest = ((siginfo_t *)current) - 1;
1598#if !defined(LINUX) || !defined(X8632)
1599  dest = (siginfo_t *) (((LispObj)dest)&~15);
1600#endif
1601  *dest = *info;
1602  return (LispObj *)dest;
1603}
1604
1605#ifdef LINUX
1606typedef FPREGS copy_ucontext_last_arg_t;
1607#else
1608typedef void * copy_ucontext_last_arg_t;
1609#endif
1610
1611#ifndef WINDOWS
1612LispObj *
1613copy_ucontext(ExceptionInformation *context, LispObj *current, copy_ucontext_last_arg_t fp)
1614{
1615  ExceptionInformation *dest = ((ExceptionInformation *)current)-1;
1616#if !defined(LINUX) || !defined(X8632)
1617  dest = (ExceptionInformation *) (((LispObj)dest) & ~15);
1618#endif
1619
1620  *dest = *context;
1621  /* Fix it up a little; where's the signal mask allocated, if indeed
1622     it is "allocated" ? */
1623#ifdef LINUX
1624  dest->uc_mcontext.fpregs = (fpregset_t)fp;
1625#endif
1626#ifdef FREEBSD
1627  if (AVX_CONTEXT_PRESENT(context)) {
1628    AVX_CONTEXT_PTR(context) = (natural)fp;
1629  }
1630#endif
1631  dest->uc_stack.ss_sp = 0;
1632  dest->uc_stack.ss_size = 0;
1633  dest->uc_stack.ss_flags = 0;
1634  dest->uc_link = NULL;
1635  return (LispObj *)dest;
1636}
1637#endif
1638
1639
1640LispObj *
1641tcr_frame_ptr(TCR *tcr)
1642{
1643  ExceptionInformation *xp;
1644  LispObj *fp;
1645
1646  if (tcr->pending_exception_context)
1647    xp = tcr->pending_exception_context;
1648  else if (tcr->valence == TCR_STATE_LISP) {
1649    xp = TCR_AUX(tcr)->suspend_context;
1650  } else {
1651    xp = NULL;
1652  }
1653  if (xp) {
1654    fp = (LispObj *)xpGPR(xp, Ifp);
1655  } else {
1656    fp = tcr->save_fp;
1657  }
1658  return fp;
1659}
1660
1661
1662LispObj *
1663find_foreign_rsp(LispObj rsp, area *foreign_area, TCR *tcr)
1664{
1665
1666  if (((BytePtr)rsp < foreign_area->low) ||
1667      ((BytePtr)rsp > foreign_area->high)) {
1668    rsp = (LispObj)(tcr->foreign_sp);
1669  }
1670  return (LispObj *) (((rsp-128) & ~15));
1671}
1672
1673#ifdef X8632
1674#ifdef LINUX
1675/* This is here for debugging.  On entry to a signal handler that
1676   receives info and context arguments, the stack should look exactly
1677   like this.  The "pretcode field" of the structure is the address
1678   of code that does an rt_sigreturn syscall, and rt_sigreturn expects
1679   %esp at the time of that syscall to be pointing just past the
1680   pretcode field.
1681   handle_signal_on_foreign_stack() and helpers have to be very
1682   careful to duplicate this "structure" exactly.
1683   Note that on x8664 Linux, rt_sigreturn expects a ucontext to
1684   be on top of the stack (with a siginfo_t underneath it.)
1685   It sort of half-works to do sigreturn via setcontext() on
1686   x8632 Linux, but (a) it may not be available on some distributions
1687   and (b) even a relatively modern version of it uses "fldenv" to
1688   restore FP context, and "fldenv" isn't nearly good enough.
1689*/
1690
1691struct rt_sigframe {
1692        char *pretcode;
1693        int sig;
1694        siginfo_t  *pinfo;
1695        void  *puc;
1696        siginfo_t info;
1697        struct ucontext uc;
1698        struct _fpstate fpstate;
1699        char retcode[8];
1700};
1701struct rt_sigframe *rtsf = 0;
1702
1703#endif
1704#endif
1705
1706
1707#ifndef WINDOWS
1708/* x8632 Linux requires that the stack-allocated siginfo is nearer
1709   the top of stack than the stack-allocated ucontext.  If other
1710   platforms care, they expect the ucontext to be nearer the top
1711   of stack.
1712*/
1713
1714#if defined(LINUX) && defined(X8632)
1715#define UCONTEXT_ON_TOP_OF_STACK 0
1716#else
1717#define UCONTEXT_ON_TOP_OF_STACK 1
1718#endif
1719void
1720handle_signal_on_foreign_stack(TCR *tcr,
1721                               void *handler, 
1722                               int signum, 
1723                               siginfo_t *info, 
1724                               ExceptionInformation *context,
1725                               LispObj return_address
1726                               )
1727{
1728#ifdef LINUX
1729  FPREGS fpregs = NULL;
1730#else
1731  void *fpregs = NULL;
1732#endif
1733#ifdef DARWIN
1734  MCONTEXT_T mcontextp = NULL;
1735#endif
1736  siginfo_t *info_copy = NULL;
1737  ExceptionInformation *xp = NULL;
1738  LispObj *foreign_rsp = find_foreign_rsp(xpGPR(context,Isp), tcr->cs_area, tcr);
1739
1740#ifdef LINUX
1741  foreign_rsp = copy_fpregs(context, foreign_rsp, &fpregs);
1742#endif
1743#ifdef FREEBSD
1744  foreign_rsp = copy_avx(context, foreign_rsp, &fpregs);
1745#endif
1746#ifdef DARWIN
1747  foreign_rsp = copy_darwin_mcontext(UC_MCONTEXT(context), foreign_rsp, &mcontextp);
1748#endif
1749#if UCONTEXT_ON_TOP_OF_STACK
1750  /* copy info first */
1751  foreign_rsp = copy_siginfo(info, foreign_rsp);
1752  info_copy = (siginfo_t *)foreign_rsp;
1753  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
1754  xp = (ExceptionInformation *)foreign_rsp;
1755#else
1756  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
1757  xp = (ExceptionInformation *)foreign_rsp;
1758  foreign_rsp = copy_siginfo(info, foreign_rsp);
1759  info_copy = (siginfo_t *)foreign_rsp;
1760#endif
1761#ifdef DARWIN
1762  UC_MCONTEXT(xp) = mcontextp;
1763#endif
1764  *--foreign_rsp = return_address;
1765  switch_to_foreign_stack(foreign_rsp,handler,signum,info_copy,xp);
1766}
1767#endif
1768
1769
1770#ifndef WINDOWS
1771#ifndef USE_SIGALTSTACK
1772void
1773arbstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1774{
1775  TCR *tcr = get_interrupt_tcr(false);
1776#if 1
1777  if (tcr->valence != TCR_STATE_LISP) {
1778    lisp_Debugger(context, info, signum, true, "exception in foreign context");
1779  }
1780#endif
1781  {
1782    area *vs = tcr->vs_area;
1783    BytePtr current_sp = (BytePtr) current_stack_pointer();
1784
1785
1786    if ((current_sp >= vs->low) &&
1787        (current_sp < vs->high)) {
1788      handle_signal_on_foreign_stack(tcr,
1789                                     signal_handler,
1790                                     signum,
1791                                     info,
1792                                     context,
1793                                     (LispObj)__builtin_return_address(0)
1794
1795                                     );
1796    } else {
1797      signal_handler(signum, info, context, tcr, 0);
1798    }
1799  }
1800}
1801
1802#else
1803void
1804altstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
1805{
1806  TCR* tcr = get_tcr(true);
1807  Boolean do_stack_switch = false;
1808  stack_t ss;
1809
1810  if ((tcr->valence != TCR_STATE_LISP) &&
1811      (tcr->safe_ref_address) &&
1812      ((signum == SIGBUS) || (signum == SIGSEGV))) {
1813    extern opcode ffcall_return;
1814    xpPC(context) = (natural)&ffcall_return;
1815    xpGPR(context,Iimm0) = 0;
1816    xpGPR(context,Isp) = (natural)(tcr->foreign_sp);
1817    return;
1818  }
1819
1820
1821
1822
1823
1824#if WORD_SIZE==64
1825  if ((signum == SIGFPE) && (tcr->valence != TCR_STATE_LISP)) {
1826    if (handle_foreign_fpe(tcr,context,info)) {
1827      return;
1828    }
1829  }
1830#endif
1831     
1832  /* Because of signal chaining - and the possibility that libraries
1833     that use it ignore sigaltstack-related issues - we have to check
1834     to see if we're actually on the altstack.
1835
1836     When OpenJDK VMs overwrite preinstalled signal handlers (that're
1837     there for a reason ...), they're also casual about SA_RESTART.
1838     We care about SA_RESTART (mostly) in the PROCESS-INTERRUPT case,
1839     and whether a JVM steals the signal used for PROCESS-INTERRUPT
1840     is platform-dependent.  On those platforms where the same signal
1841     is used, we should strongly consider trying to use another one.
1842  */
1843  sigaltstack(NULL, &ss);
1844  if (ss.ss_flags == SS_ONSTACK) {
1845    do_stack_switch = true;
1846  } else {
1847    area *vs = tcr->vs_area;
1848    BytePtr current_sp = (BytePtr) current_stack_pointer();
1849
1850    if ((current_sp >= vs->low) &&
1851        (current_sp < vs->high)) {
1852      do_stack_switch = true;
1853    }
1854  }
1855  if (do_stack_switch) {
1856    handle_signal_on_foreign_stack(tcr,signal_handler,signum,info,context,(LispObj)SIGRETURN_ADDRESS());
1857  } else {
1858    signal_handler(signum,info,context);
1859  }
1860}
1861#endif
1862#endif
1863
1864Boolean
1865stack_pointer_on_vstack_p(LispObj stack_pointer, TCR *tcr)
1866{
1867  area *a = tcr->vs_area;
1868 
1869  return (((BytePtr)stack_pointer <= a->high) &&
1870          ((BytePtr)stack_pointer > a->low));
1871}
1872
1873
1874#ifdef WINDOWS
1875extern DWORD restore_windows_context(ExceptionInformation *, TCR *, int);
1876#endif
1877
1878void
1879interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1880{
1881  TCR *tcr = get_interrupt_tcr(false);
1882  int old_valence = tcr->valence;
1883
1884  if (tcr) {
1885    if ((TCR_INTERRUPT_LEVEL(tcr) < 0) ||
1886        (tcr->valence != TCR_STATE_LISP) ||
1887        (tcr->unwinding != 0) ||
1888        ! stack_pointer_on_vstack_p(xpGPR(context,Isp), tcr) ||
1889        ! stack_pointer_on_vstack_p(xpGPR(context,Ifp), tcr)) {
1890      tcr->interrupt_pending = (((natural) 1)<< (nbits_in_word - ((natural)1)));
1891    } else {
1892      LispObj cmain = nrs_CMAIN.vcell;
1893     
1894      if ((fulltag_of(cmain) == fulltag_misc) &&
1895          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1896        /*
1897           This thread can (allegedly) take an interrupt now.
1898        */
1899
1900        xframe_list xframe_link;
1901        signed_natural alloc_displacement = 0;
1902        LispObj
1903          *next_tsp = tcr->next_tsp,
1904          *save_tsp = tcr->save_tsp,
1905          *p,
1906          q;
1907        natural old_foreign_exception = tcr->flags & (1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
1908
1909        tcr->flags &= ~(1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
1910           
1911        if (next_tsp != save_tsp) {
1912          tcr->next_tsp = save_tsp;
1913        } else {
1914          next_tsp = NULL;
1915        }
1916        /* have to do this before allowing interrupts */
1917        pc_luser_xp(context, tcr, &alloc_displacement);
1918        old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1919        wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1920        handle_exception(signum, info, context, tcr, old_valence);
1921        if (alloc_displacement) {
1922          tcr->save_allocptr -= alloc_displacement;
1923        }
1924        if (next_tsp) {
1925          tcr->next_tsp = next_tsp;
1926          p = next_tsp;
1927          while (p != save_tsp) {
1928            *p++ = 0;
1929          }
1930          q = (LispObj)save_tsp;
1931          *next_tsp = q;
1932        }
1933        tcr->flags |= old_foreign_exception;
1934        unlock_exception_lock_in_handler(tcr);
1935#ifndef WINDOWS
1936        exit_signal_handler(tcr, old_valence);
1937#endif
1938      }
1939    }
1940  }
1941#ifdef WINDOWS
1942  restore_windows_context(context,tcr,old_valence);
1943#else
1944  SIGRETURN(context);
1945#endif
1946}
1947
1948
1949#ifndef WINDOWS
1950#ifndef USE_SIGALTSTACK
1951void
1952arbstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1953{
1954  TCR *tcr = get_interrupt_tcr(false);
1955  area *vs = tcr->vs_area;
1956  BytePtr current_sp = (BytePtr) current_stack_pointer();
1957
1958  if ((current_sp >= vs->low) &&
1959      (current_sp < vs->high)) {
1960    handle_signal_on_foreign_stack(tcr,
1961                                   interrupt_handler,
1962                                   signum,
1963                                   info,
1964                                   context,
1965                                   (LispObj)__builtin_return_address(0)
1966                                   );
1967  } else {
1968    /* If we're not on the value stack, we pretty much have to be on
1969       the C stack.  Just run the handler. */
1970    interrupt_handler(signum, info, context);
1971  }
1972}
1973
1974#else /* altstack works */
1975
1976/*
1977   There aren't likely any JVM-related signal-chaining issues here, since
1978   on platforms where that could be an issue we use either an RT signal
1979   or an unused synchronous hardware signal to raise interrupts.
1980*/
1981void
1982altstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1983{
1984  TCR *tcr = get_interrupt_tcr(false);
1985  handle_signal_on_foreign_stack(tcr,interrupt_handler,signum,info,context,(LispObj)__builtin_return_address(0)
1986                                 );
1987}
1988
1989#endif
1990#endif
1991
1992#ifndef WINDOWS
1993void
1994install_signal_handler(int signo, void *handler, unsigned flags)
1995{
1996  struct sigaction sa;
1997  int err;
1998 
1999  sa.sa_sigaction = (void *)handler;
2000  sigfillset(&sa.sa_mask);
2001#ifdef FREEBSD
2002  /* Strange FreeBSD behavior wrt synchronous signals */
2003  sigdelset(&sa.sa_mask,SIGTRAP);  /* let GDB work */
2004#endif
2005  sa.sa_flags = SA_SIGINFO;
2006
2007#ifdef USE_SIGALTSTACK
2008  if (flags & ON_ALTSTACK)
2009    sa.sa_flags |= SA_ONSTACK;
2010#endif
2011  if (flags & RESTART_SYSCALLS)
2012    sa.sa_flags |= SA_RESTART;
2013  if (flags & RESERVE_FOR_LISP) {
2014    extern sigset_t user_signals_reserved;
2015    sigaddset(&user_signals_reserved, signo);
2016  }
2017
2018  err = sigaction(signo, &sa, NULL);
2019  if (err) {
2020    perror("sigaction");
2021    exit(1);
2022  }
2023}
2024#endif
2025
2026#ifdef WINDOWS
2027BOOL
2028CALLBACK ControlEventHandler(DWORD event)
2029{
2030  switch(event) {
2031  case CTRL_C_EVENT:
2032    lisp_global(INTFLAG) = (1 << fixnumshift);
2033    return TRUE;
2034    break;
2035  case CTRL_BREAK_EVENT:
2036    lisp_global(INTFLAG) = (2 << fixnumshift);
2037    return TRUE;
2038    break;
2039  default:
2040    return FALSE;
2041  }
2042}
2043
2044static
2045DWORD mxcsr_bit_to_fpe_code[] = {
2046  EXCEPTION_FLT_INVALID_OPERATION, /* ie */
2047  0,                            /* de */
2048  EXCEPTION_FLT_DIVIDE_BY_ZERO, /* ze */
2049  EXCEPTION_FLT_OVERFLOW,       /* oe */
2050  EXCEPTION_FLT_UNDERFLOW,      /* ue */
2051  EXCEPTION_FLT_INEXACT_RESULT  /* pe */
2052};
2053
2054#ifndef STATUS_FLOAT_MULTIPLE_FAULTS
2055#define STATUS_FLOAT_MULTIPLE_FAULTS 0xc00002b4
2056#endif
2057
2058#ifndef STATUS_FLOAT_MULTIPLE_TRAPS
2059#define  STATUS_FLOAT_MULTIPLE_TRAPS 0xc00002b5
2060#endif
2061
2062int
2063map_windows_exception_code_to_posix_signal(DWORD code, siginfo_t *info, ExceptionInformation *context)
2064{
2065  switch (code) {
2066#ifdef WIN_32
2067  case STATUS_FLOAT_MULTIPLE_FAULTS:
2068  case STATUS_FLOAT_MULTIPLE_TRAPS:
2069    {
2070      int xbit, maskbit;
2071      DWORD mxcsr = *(xpMXCSRptr(context));
2072
2073      for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
2074        if ((mxcsr & (1 << xbit)) &&
2075            !(mxcsr & (1 << maskbit))) {
2076          info->ExceptionCode = mxcsr_bit_to_fpe_code[xbit];
2077          break;
2078        }
2079      }
2080    }
2081    return SIGFPE;
2082#endif
2083     
2084  case EXCEPTION_ACCESS_VIOLATION:
2085    return SIGSEGV;
2086  case EXCEPTION_FLT_DENORMAL_OPERAND:
2087  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2088  case EXCEPTION_FLT_INEXACT_RESULT:
2089  case EXCEPTION_FLT_INVALID_OPERATION:
2090  case EXCEPTION_FLT_OVERFLOW:
2091  case EXCEPTION_FLT_STACK_CHECK:
2092  case EXCEPTION_FLT_UNDERFLOW:
2093  case EXCEPTION_INT_DIVIDE_BY_ZERO:
2094  case EXCEPTION_INT_OVERFLOW:
2095    return SIGFPE;
2096  case EXCEPTION_PRIV_INSTRUCTION:
2097  case EXCEPTION_ILLEGAL_INSTRUCTION:
2098    return SIGILL;
2099  case EXCEPTION_IN_PAGE_ERROR:
2100  case STATUS_GUARD_PAGE_VIOLATION:
2101    return SIGBUS;
2102  default:
2103    return -1;
2104  }
2105}
2106
2107
2108LONG
2109windows_exception_handler(EXCEPTION_POINTERS *exception_pointers, TCR *tcr, int signal_number)
2110{
2111  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
2112  int old_valence;
2113  ExceptionInformation *context = exception_pointers->ContextRecord;
2114  siginfo_t *info = exception_pointers->ExceptionRecord;
2115  xframe_list xframes;
2116
2117  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2118  wait_for_exception_lock_in_handler(tcr, context, &xframes);
2119
2120
2121 
2122  if (!handle_exception(signal_number, info, context, tcr, old_valence)) {
2123    char msg[512];
2124    Boolean foreign = (old_valence != TCR_STATE_LISP);
2125
2126    snprintf(msg, sizeof(msg), "Unhandled exception %d (windows code 0x%x) at 0x%Ix, context->regs at 0x%Ix", signal_number, code, xpPC(context), (natural)xpGPRvector(context));
2127   
2128    if (lisp_Debugger(context, info, signal_number,  foreign, msg)) {
2129      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2130    }
2131  }
2132  unlock_exception_lock_in_handler(tcr);
2133  return restore_windows_context(context, tcr, old_valence);
2134}
2135
2136void
2137setup_exception_handler_call(CONTEXT *context,
2138                             LispObj new_sp,
2139                             void *handler,
2140                             EXCEPTION_POINTERS *new_ep,
2141                             TCR *tcr,
2142                             int signal_number)
2143{
2144  extern void windows_halt(void);
2145  LispObj *p = (LispObj *)new_sp;
2146#ifdef WIN_64
2147  p-=4;                         /* win64 abi argsave nonsense */
2148  *(--p) = (LispObj)windows_halt;
2149  context->Rsp = (DWORD64)p;
2150  context->Rip = (DWORD64)handler;
2151  context->Rcx = (DWORD64)new_ep;
2152  context->Rdx = (DWORD64)tcr;
2153  context->R8 = (DWORD64)signal_number;
2154#else
2155  p-=4;                          /* args on stack, stack aligned */
2156  p[0] = (LispObj)new_ep;
2157  p[1] = (LispObj)tcr;
2158  p[2] = signal_number;
2159  *(--p) = (LispObj)windows_halt;
2160  context->Esp = (DWORD)p;
2161  context->Eip = (DWORD)handler;
2162#endif
2163  context->EFlags &= ~0x400;  /* clear direction flag */
2164}
2165
2166void
2167prepare_to_handle_windows_exception_on_foreign_stack(TCR *tcr,
2168
2169                                                     CONTEXT *context,
2170                                                     void *handler,
2171                                                     EXCEPTION_POINTERS *original_ep, 
2172                                                     int signal_number)
2173{
2174  LispObj foreign_rsp = 
2175    (LispObj) (tcr->foreign_sp - 128) & ~15;
2176  CONTEXT *new_context;
2177  siginfo_t *new_info;
2178  EXCEPTION_POINTERS *new_ep;
2179
2180  new_context = ((CONTEXT *)(foreign_rsp&~15))-1;
2181  *new_context = *context;
2182  foreign_rsp = (LispObj)new_context;
2183  new_info = ((siginfo_t *)(foreign_rsp&~15))-1;
2184  *new_info = *original_ep->ExceptionRecord;
2185  foreign_rsp = (LispObj)new_info;
2186  new_ep = ((EXCEPTION_POINTERS *)(foreign_rsp&~15))-1;
2187  foreign_rsp = (LispObj)new_ep & ~15;
2188  new_ep->ContextRecord = new_context;
2189  new_ep->ExceptionRecord = new_info;
2190  setup_exception_handler_call(context,foreign_rsp,handler,new_ep, tcr, signal_number);
2191}
2192
2193LONG CALLBACK
2194windows_arbstack_exception_handler(EXCEPTION_POINTERS *exception_pointers)
2195{
2196  extern void ensure_safe_for_string_operations(void);
2197  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
2198  siginfo_t *info = exception_pointers->ExceptionRecord;
2199  ExceptionInformation *context = exception_pointers->ContextRecord;
2200  int signal_number = map_windows_exception_code_to_posix_signal(code, info, context);
2201
2202
2203  if (signal_number <= 0) {
2204    return EXCEPTION_CONTINUE_SEARCH;
2205  } else {
2206    TCR *tcr = get_interrupt_tcr(false);
2207    area *cs = TCR_AUX(tcr)->cs_area;
2208    BytePtr current_sp = (BytePtr) current_stack_pointer();
2209   
2210    ensure_safe_for_string_operations();
2211
2212    if ((current_sp >= cs->low) &&
2213        (current_sp < cs->high)) {
2214      debug_show_registers(context, exception_pointers->ExceptionRecord, 0);
2215      FBug(context, "Exception on foreign stack\n");
2216      return EXCEPTION_CONTINUE_EXECUTION;
2217    }
2218
2219    prepare_to_handle_windows_exception_on_foreign_stack(tcr,
2220                                                         context,
2221                                                         windows_exception_handler,
2222                                                         exception_pointers,
2223                                                         signal_number);
2224    return EXCEPTION_CONTINUE_EXECUTION;
2225  }
2226}
2227
2228
2229void
2230install_pmcl_exception_handlers()
2231{
2232  AddVectoredExceptionHandler(1,windows_arbstack_exception_handler);
2233}
2234#else
2235void
2236install_pmcl_exception_handlers()
2237{
2238  void *handler, *interrupt_handler;
2239
2240#ifdef USE_SIGALTSTACK
2241  handler = (void *)altstack_signal_handler;
2242  interrupt_handler = (void *)altstack_interrupt_handler;
2243#else
2244  handler = (void *)arbstack_signal_handler;
2245  interrupt_handler = (void *)arbstack_interrupt_handler;
2246#endif
2247
2248#ifndef DARWIN
2249  install_signal_handler(SIGILL, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2250  install_signal_handler(SIGBUS, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2251  install_signal_handler(SIGSEGV, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2252  install_signal_handler(SIGFPE, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2253#endif
2254 
2255  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT, interrupt_handler,
2256                         RESERVE_FOR_LISP|ON_ALTSTACK);
2257  signal(SIGPIPE, SIG_IGN);
2258}
2259#endif
2260
2261
2262
2263/* This should only be called when the tcr_area_lock is held */
2264void
2265empty_tcr_stacks(TCR *tcr)
2266{
2267  if (tcr) {
2268    area *a;
2269
2270    tcr->valence = TCR_STATE_FOREIGN;
2271    a = tcr->vs_area;
2272    if (a) {
2273      a->active = a->high;
2274    }
2275    a = tcr->ts_area;
2276    if (a) {
2277      a->active = a->high;
2278    }
2279    a = TCR_AUX(tcr)->cs_area;
2280    if (a) {
2281      a->active = a->high;
2282    }
2283  }
2284}
2285
2286#ifdef WINDOWS
2287void
2288thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
2289{
2290}
2291#else
2292void
2293thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
2294{
2295  TCR *tcr = get_tcr(false);
2296  sigset_t mask;
2297
2298  sigemptyset(&mask);
2299
2300  empty_tcr_stacks(tcr);
2301
2302  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2303  pthread_exit(NULL);
2304}
2305#endif
2306
2307#ifndef WINDOWS
2308#ifndef USE_SIGALTSTACK
2309void
2310arbstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
2311{
2312  TCR *tcr = get_interrupt_tcr(false);
2313  area *vs = tcr->vs_area;
2314  BytePtr current_sp = (BytePtr) current_stack_pointer();
2315
2316  if ((current_sp >= vs->low) &&
2317      (current_sp < vs->high)) {
2318    handle_signal_on_foreign_stack(tcr,
2319                                   thread_kill_handler,
2320                                   signum,
2321                                   info,
2322                                   context,
2323                                   (LispObj)__builtin_return_address(0)
2324                                   );
2325  } else {
2326    /* If we're not on the value stack, we pretty much have to be on
2327       the C stack.  Just run the handler. */
2328    thread_kill_handler(signum, info, context);
2329  }
2330}
2331
2332
2333#else
2334void
2335altstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
2336{
2337  TCR* tcr = get_tcr(true);
2338  handle_signal_on_foreign_stack(tcr,
2339                                 thread_kill_handler,
2340                                 signum,
2341                                 info,
2342                                 context,
2343                                 (LispObj)__builtin_return_address(0)
2344                                 );
2345}
2346#endif
2347#endif
2348
2349#ifdef USE_SIGALTSTACK
2350#define THREAD_KILL_HANDLER altstack_thread_kill_handler
2351#else
2352#define THREAD_KILL_HANDLER arbstack_thread_kill_handler
2353#endif
2354
2355#ifdef WINDOWS
2356void
2357thread_signal_setup()
2358{
2359}
2360#else
2361void
2362thread_signal_setup()
2363{
2364  thread_suspend_signal = SIG_SUSPEND_THREAD;
2365  thread_kill_signal = SIG_KILL_THREAD;
2366
2367  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
2368                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
2369  install_signal_handler(thread_kill_signal, (void *)THREAD_KILL_HANDLER,
2370                         RESERVE_FOR_LISP|ON_ALTSTACK);
2371}
2372#endif
2373
2374void
2375enable_fp_exceptions()
2376{
2377}
2378
2379void
2380exception_init()
2381{
2382  x86_early_exception_init();
2383  install_pmcl_exception_handlers();
2384}
2385
2386void
2387adjust_exception_pc(ExceptionInformation *xp, int delta)
2388{
2389  xpPC(xp) += delta;
2390}
2391
2392/*
2393  Lower (move toward 0) the "end" of the soft protected area associated
2394  with a by a page, if we can.
2395*/
2396
2397void
2398adjust_soft_protection_limit(area *a)
2399{
2400  char *proposed_new_soft_limit = a->softlimit - 4096;
2401  protected_area_ptr p = a->softprot;
2402 
2403  if (proposed_new_soft_limit >= (p->start+16384)) {
2404    p->end = proposed_new_soft_limit;
2405    p->protsize = p->end-p->start;
2406    a->softlimit = proposed_new_soft_limit;
2407  }
2408  protect_area(p);
2409}
2410
2411void
2412restore_soft_stack_limit(unsigned restore_tsp)
2413{
2414  TCR *tcr = get_tcr(false);
2415  area *a;
2416 
2417  if (restore_tsp) {
2418    a = tcr->ts_area;
2419  } else {
2420    a = tcr->vs_area;
2421  }
2422  adjust_soft_protection_limit(a);
2423}
2424
2425
2426#ifdef USE_SIGALTSTACK
2427void
2428setup_sigaltstack(area *a)
2429{
2430  stack_t stack;
2431
2432  stack.ss_size = SIGSTKSZ*8;
2433  stack.ss_flags = 0;
2434  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
2435#ifdef LINUX
2436  /* The ucontext pushed on the altstack may not contain the (largish)
2437     __fpregs_mem field; copy_ucontext() wants to copy what it thinks
2438     is a pointer to a full ucontext.  That'll touch a page beyond the
2439     bottom of the altstack, and when this happens on the initial
2440     thread's stack on a recent (2.6.32+?) kernel, we'll SIGBUS instead
2441     of mapping that page.
2442     It's easier to just reserve that page here than it would be to
2443     change copy_ucontext().
2444  */
2445  stack.ss_size -= sizeof(struct ucontext);
2446#endif
2447  if (sigaltstack(&stack, NULL) != 0) {
2448    perror("sigaltstack");
2449    exit(-1);
2450  }
2451}
2452#endif
2453
2454extern opcode egc_write_barrier_start, egc_write_barrier_end,
2455  egc_set_hash_key_conditional, egc_set_hash_key_conditional_success_test,
2456  egc_set_hash_key_conditional_retry,
2457  egc_store_node_conditional_success_end, egc_store_node_conditional_retry,
2458  egc_store_node_conditional_success_test,egc_store_node_conditional,
2459  egc_set_hash_key, egc_gvset, egc_rplacd, egc_rplaca;
2460
2461/* We use (extremely) rigidly defined instruction sequences for consing,
2462   mostly so that 'pc_luser_xp()' knows what to do if a thread is interrupted
2463   while consing.
2464
2465   Note that we can usually identify which of these instructions is about
2466   to be executed by a stopped thread without comparing all of the bytes
2467   to those at the stopped program counter, but we generally need to
2468   know the sizes of each of these instructions.
2469*/
2470
2471#ifdef X8664
2472opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
2473#ifdef TCR_IN_GPR
2474  {0x49,0x8b,0x9b,0xd8,0x00,0x00,0x00}
2475#else
2476  {0x65,0x48,0x8b,0x1c,0x25,0xd8,0x00,0x00,0x00}
2477#endif
2478;
2479opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
2480#ifdef TCR_IN_GPR
2481  {0x49,0x3b,0x9b,0xe0,0x00,0x00,0x00}
2482#else
2483  {0x65,0x48,0x3b,0x1c,0x25,0xe0,0x00,0x00,0x00}
2484#endif
2485
2486;
2487opcode branch_around_alloc_trap_instruction[] =
2488  {0x77,0x02};
2489opcode alloc_trap_instruction[] =
2490  {0xcd,0xc5};
2491opcode clear_tcr_save_allocptr_tag_instruction[] =
2492#ifdef TCR_IN_GPR
2493  {0x41,0x80,0xa3,0xd8,0x00,0x00,0x00,0xf0}
2494#else
2495  {0x65,0x80,0x24,0x25,0xd8,0x00,0x00,0x00,0xf0}
2496#endif
2497;
2498opcode set_allocptr_header_instruction[] =
2499  {0x48,0x89,0x43,0xf3};
2500
2501
2502alloc_instruction_id
2503recognize_alloc_instruction(pc program_counter)
2504{
2505  switch(program_counter[0]) {
2506  case 0xcd: return ID_alloc_trap_instruction;
2507  /* 0x7f is jg, which we used to use here instead of ja */
2508  case 0x7f:
2509  case 0x77: return ID_branch_around_alloc_trap_instruction;
2510  case 0x48: return ID_set_allocptr_header_instruction;
2511#ifdef TCR_IN_GPR
2512  case 0x41: return ID_clear_tcr_save_allocptr_tag_instruction;
2513  case 0x49:
2514    switch(program_counter[1]) {
2515    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2516    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2517    }
2518#else
2519  case 0x65: 
2520    switch(program_counter[1]) {
2521    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
2522    case 0x48:
2523      switch(program_counter[2]) {
2524      case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2525      case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2526      }
2527    }
2528#endif
2529  default: break;
2530  }
2531  return ID_unrecognized_alloc_instruction;
2532}
2533#endif
2534#ifdef X8632
2535#define TCR_SEG_PREFIX 0x64
2536
2537#ifdef WIN_32
2538#define SAVE_ALLOCPTR 0x9c,0x0e,0x0,0x0
2539#define SAVE_ALLOCBASE 0x98,0x0e,0x0,0x0
2540#else
2541#define SAVE_ALLOCPTR 0x84,0x0,0x0,0x0
2542#define SAVE_ALLOCBASE 0x88,0x0,0x0,0x0
2543#endif
2544
2545opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
2546  {TCR_SEG_PREFIX,0x8b,0x0d,SAVE_ALLOCPTR};
2547opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
2548  {TCR_SEG_PREFIX,0x3b,0x0d,SAVE_ALLOCBASE};
2549opcode branch_around_alloc_trap_instruction[] =
2550  {0x77,0x02};
2551opcode alloc_trap_instruction[] =
2552  {0xcd,0xc5};
2553opcode clear_tcr_save_allocptr_tag_instruction[] =
2554  {TCR_SEG_PREFIX,0x80,0x25,SAVE_ALLOCPTR,0xf8};
2555opcode set_allocptr_header_instruction[] =
2556  {0x0f,0x7e,0x41,0xfa};
2557
2558alloc_instruction_id
2559recognize_alloc_instruction(pc program_counter)
2560{
2561  switch(program_counter[0]) {
2562  case 0xcd: return ID_alloc_trap_instruction;
2563  case 0x77: return ID_branch_around_alloc_trap_instruction;
2564  case 0x0f: return ID_set_allocptr_header_instruction;
2565  case 0x64: 
2566    switch(program_counter[1]) {
2567    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
2568    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2569    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2570    }
2571  }
2572  return ID_unrecognized_alloc_instruction;
2573}
2574#endif     
2575
2576void
2577pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *interrupt_displacement)
2578{
2579  pc program_counter = (pc)xpPC(xp);
2580  int allocptr_tag = fulltag_of((LispObj)(tcr->save_allocptr));
2581
2582  if (allocptr_tag != 0) {
2583    alloc_instruction_id state = recognize_alloc_instruction(program_counter);
2584    signed_natural
2585      disp = (allocptr_tag == fulltag_cons) ?
2586      sizeof(cons) - fulltag_cons :
2587#ifdef X8664
2588      xpGPR(xp,Iimm1)
2589#else
2590      xpGPR(xp,Iimm0)
2591#endif
2592      ;
2593    LispObj new_vector;
2594
2595    if ((state == ID_unrecognized_alloc_instruction) ||
2596        ((state == ID_set_allocptr_header_instruction) &&
2597         (allocptr_tag != fulltag_misc))) {
2598      Bug(xp, "Can't determine state of thread 0x" LISP ", interrupted during memory allocation", tcr);
2599    }
2600    switch(state) {
2601    case ID_set_allocptr_header_instruction:
2602      /* We were consing a vector and we won.  Set the header of the
2603         new vector (in the allocptr register) to the header in %rax
2604         (%mm0 on ia32) and skip over this instruction, then fall into
2605         the next case. */
2606      new_vector = xpGPR(xp,Iallocptr);
2607      deref(new_vector,0) = 
2608#ifdef X8664
2609        xpGPR(xp,Iimm0)
2610#else
2611        xpMMXreg(xp,Imm0)
2612#endif
2613        ;
2614     
2615      xpPC(xp) += sizeof(set_allocptr_header_instruction);
2616
2617      /* Fall thru */
2618    case ID_clear_tcr_save_allocptr_tag_instruction:
2619      tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
2620      xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
2621
2622      break;
2623    case ID_alloc_trap_instruction:
2624      /* If we're looking at another thread, we're pretty much committed to
2625         taking the trap.  We don't want the allocptr register to be pointing
2626         into the heap, so make it point to (- VOID_ALLOCPTR disp), where 'disp'
2627         was determined above.
2628      */
2629      if (interrupt_displacement == NULL) {
2630        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR - disp;
2631        tcr->save_allocptr = (void *)(VOID_ALLOCPTR - disp);
2632      } else {
2633        /* Back out, and tell the caller how to resume the allocation attempt */
2634        *interrupt_displacement = disp;
2635        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2636        tcr->save_allocptr += disp;
2637        xpPC(xp) -= (sizeof(branch_around_alloc_trap_instruction)+
2638                     sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
2639                     sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
2640      }
2641      break;
2642    case ID_branch_around_alloc_trap_instruction:
2643      /* If we'd take the branch - which is a "ja" - around the alloc trap,
2644         we might as well finish the allocation.  Otherwise, back out of the
2645         attempt. */
2646      {
2647        int flags = (int)eflags_register(xp);
2648       
2649        if ((!(flags & (1 << X86_ZERO_FLAG_BIT))) &&
2650            (!(flags & (1 << X86_CARRY_FLAG_BIT)))) {
2651          /* The branch (ja) would have been taken.  Emulate taking it. */
2652          xpPC(xp) += (sizeof(branch_around_alloc_trap_instruction)+
2653                       sizeof(alloc_trap_instruction));
2654          if (allocptr_tag == fulltag_misc) {
2655            /* Slap the header on the new uvector */
2656            new_vector = xpGPR(xp,Iallocptr);
2657#ifdef X8664
2658            deref(new_vector,0) = xpGPR(xp,Iimm0);
2659#else
2660            deref(new_vector,0) = xpMMXreg(xp,Imm0);
2661#endif
2662            xpPC(xp) += sizeof(set_allocptr_header_instruction);
2663          }
2664          tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
2665          xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
2666        } else {
2667          /* Back up */
2668          xpPC(xp) -= (sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
2669                       sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
2670          xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2671          if (interrupt_displacement) {
2672            *interrupt_displacement = disp;
2673            tcr->save_allocptr += disp;
2674          } else {
2675            tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
2676          }
2677        }
2678      }
2679      break;
2680    case ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction:
2681      xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2682      xpPC(xp) -= sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction);
2683      /* Fall through */
2684    case ID_load_allocptr_reg_from_tcr_save_allocptr_instruction:
2685      if (interrupt_displacement) {
2686        tcr->save_allocptr += disp;
2687        *interrupt_displacement = disp;
2688      } else {
2689        tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
2690      }
2691      break;
2692    default: 
2693      break;
2694    }
2695    return;
2696  }
2697  if ((program_counter >= &egc_write_barrier_start) &&
2698      (program_counter < &egc_write_barrier_end)) {
2699    LispObj *ea = 0, val, root = 0;
2700    bitvector refbits = (bitvector)(lisp_global(REFBITS));
2701    Boolean  need_check_memo = true, need_memoize_root = false;
2702
2703    if (program_counter >= &egc_set_hash_key_conditional) {
2704      if (program_counter <= &egc_set_hash_key_conditional_retry) {
2705        return;
2706      }
2707      if (program_counter < &egc_set_hash_key_conditional_success_test) {
2708        /* Back up the PC, try again.  This is necessary since a pending
2709           GC may cause the value in %eax/%rax to move and backing up
2710           will reload %eax/%rax from a node register before trying the
2711           cmpxchg.
2712        */
2713        xpPC(xp) = (LispObj) &egc_set_hash_key_conditional_retry;
2714        return;
2715      }
2716      if ((program_counter == &egc_set_hash_key_conditional_success_test) &&
2717          !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT))) {
2718        /* Conditional store failed.  Return NIL. */
2719        LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2720        xpGPR(xp,Iarg_z) = lisp_nil;
2721        xpPC(xp) = ra;
2722        xpGPR(xp,Isp)=(LispObj)sp;
2723        return;
2724      }
2725      /* The conditional store succeeded.  Set the refbit, return to ra0 */
2726      val = xpGPR(xp,Iarg_z);
2727#ifdef X8664
2728      root = xpGPR(xp,Iarg_x);
2729      ea = (LispObj*)(root + (unbox_fixnum((signed_natural) xpGPR(xp,Itemp0))));
2730#else
2731      root = xpGPR(xp,Itemp1);
2732      ea = (LispObj *)(root + misc_data_offset + xpGPR(xp,Itemp0));
2733#endif
2734      need_memoize_root = true;
2735      xpGPR(xp,Iarg_z) = t_value;
2736    } else if (program_counter >= &egc_store_node_conditional) {
2737      if (program_counter <= &egc_store_node_conditional_retry) {
2738        return;
2739      }
2740      if (program_counter < &egc_store_node_conditional_success_test) {
2741        /* Back up the PC, try again.  Again, this is necessary because
2742           we're possibly keeping a node in %eax/%rax and haven't completed
2743           the cmpxchg yet. */
2744        xpPC(xp) = (LispObj) &egc_store_node_conditional_retry;
2745        return;
2746      }
2747      if ((program_counter == &egc_store_node_conditional_success_test) &&
2748           !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT))) {
2749        /* cmpxchg failed.  Return NIL. */
2750        LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2751        xpGPR(xp,Iarg_z) = lisp_nil;
2752        xpPC(xp) = ra;
2753        xpGPR(xp,Isp)=(LispObj)sp;
2754        return;
2755      }
2756
2757      if (program_counter >= &egc_store_node_conditional_success_end) {
2758        return;
2759      }
2760
2761      /* The conditional store succeeded.  Set the refbit, return to ra0 */
2762      val = xpGPR(xp,Iarg_z);
2763#ifdef X8664
2764      ea = (LispObj*)(xpGPR(xp,Iarg_x) + (unbox_fixnum((signed_natural)
2765                                                       xpGPR(xp,Itemp0))));
2766#else
2767      ea = (LispObj *)(misc_data_offset + xpGPR(xp,Itemp1) + xpGPR(xp,Itemp0));
2768#endif
2769      xpGPR(xp,Iarg_z) = t_value;
2770    } else if (program_counter >= &egc_set_hash_key) {
2771      if (program_counter == &egc_set_hash_key) {
2772        return;
2773      }
2774#ifdef X8664
2775      root = xpGPR(xp,Iarg_x);
2776#else
2777      root = xpGPR(xp,Itemp0);
2778#endif
2779      ea = (LispObj *) (root+xpGPR(xp,Iarg_y)+misc_data_offset);
2780      val = xpGPR(xp,Iarg_z);
2781      need_memoize_root = true;
2782    } else if (program_counter >= &egc_gvset) {
2783      /* This assumes that the store is the first instruction at _SPgvset.
2784         As of late February 2013 - just before the 1.9 release, that's
2785         a relatively recent change.
2786         If the store has already completed, don't do it again.
2787         See ticket:1058 for an example showing why this matters.
2788      */
2789      if (program_counter == &egc_gvset) {
2790        return;
2791      }
2792#ifdef X8664
2793      ea = (LispObj *) (xpGPR(xp,Iarg_x)+xpGPR(xp,Iarg_y)+misc_data_offset);
2794#else
2795      ea = (LispObj *) (xpGPR(xp,Itemp0)+xpGPR(xp,Iarg_y)+misc_data_offset);
2796#endif
2797      val = xpGPR(xp,Iarg_z);
2798    } else if (program_counter >= &egc_rplacd) {
2799      if (program_counter == &egc_rplacd) {
2800        return;
2801      }
2802      ea = (LispObj *) untag(xpGPR(xp,Iarg_y));
2803      val = xpGPR(xp,Iarg_z);
2804    } else {                      /* egc_rplaca */
2805      if (program_counter == &egc_rplaca) {
2806        return;
2807      }
2808      ea =  ((LispObj *) untag(xpGPR(xp,Iarg_y)))+1;
2809      val = xpGPR(xp,Iarg_z);
2810    }
2811    if (need_check_memo) {
2812      if ((LispObj)ea < val) {
2813        natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE)),
2814          rootbitnumber = area_dnode(root, lisp_global(REF_BASE));
2815        if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT))) {
2816          atomic_set_bit(refbits, bitnumber);
2817          atomic_set_bit(global_refidx,bitnumber>>8);
2818          if (need_memoize_root) {
2819            atomic_set_bit(refbits, rootbitnumber);
2820            atomic_set_bit(global_refidx,rootbitnumber>>8);
2821          }
2822        }
2823        if (bitnumber < lisp_global(MANAGED_STATIC_DNODES)) {
2824          atomic_set_bit(managed_static_refbits,bitnumber);
2825          atomic_set_bit(managed_static_refidx,bitnumber>>8);
2826          if (need_memoize_root) {
2827            atomic_set_bit(managed_static_refbits, rootbitnumber);
2828            atomic_set_bit(managed_static_refidx,rootbitnumber>>8);
2829          }
2830        }
2831      }
2832    }
2833    {
2834      /* These subprimitives are called via CALL/RET; need
2835         to pop the return address off the stack and set
2836         the PC there. */
2837      LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2838      xpPC(xp) = ra;
2839      xpGPR(xp,Isp)=(LispObj)sp;
2840    }
2841    return;
2842  }
2843}
2844
2845
2846void
2847normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
2848{
2849  void *cur_allocptr = (void *)(tcr->save_allocptr);
2850  LispObj lisprsp;
2851  area *a;
2852
2853  if (xp) {
2854    if (is_other_tcr) {
2855      pc_luser_xp(xp, tcr, NULL);
2856    }
2857    a = tcr->vs_area;
2858    lisprsp = xpGPR(xp, Isp);
2859    if (((BytePtr)lisprsp >= a->low) &&
2860        ((BytePtr)lisprsp < a->high)) {
2861      a->active = (BytePtr)lisprsp;
2862    } else {
2863      a->active = (BytePtr) tcr->save_vsp;
2864    }
2865    a = tcr->ts_area;
2866    a->active = (BytePtr) tcr->save_tsp;
2867  } else {
2868    /* In ff-call; get area active pointers from tcr */
2869    tcr->vs_area->active = (BytePtr) tcr->save_vsp;
2870    tcr->ts_area->active = (BytePtr) tcr->save_tsp;
2871  }
2872  if (cur_allocptr) {
2873    update_bytes_allocated(tcr, cur_allocptr);
2874  }
2875  tcr->save_allocbase = (void *)VOID_ALLOCPTR;
2876  if (fulltag_of((LispObj)(tcr->save_allocptr)) == 0) {
2877    tcr->save_allocptr = (void *)VOID_ALLOCPTR;
2878  }
2879}
2880
2881
2882/* Suspend and "normalize" other tcrs, then call a gc-like function
2883   in that context.  Resume the other tcrs, then return what the
2884   function returned */
2885
2886TCR *gc_tcr = NULL;
2887
2888
2889signed_natural
2890gc_like_from_xp(ExceptionInformation *xp, 
2891                signed_natural(*fun)(TCR *, signed_natural), 
2892                signed_natural param)
2893{
2894  TCR *tcr = get_tcr(false), *other_tcr;
2895  int result;
2896  signed_natural inhibit, barrier = 0;
2897
2898  atomic_incf(&barrier);
2899  suspend_other_threads(true);
2900  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
2901  if (inhibit != 0) {
2902    if (inhibit > 0) {
2903      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
2904    }
2905    atomic_decf(&barrier);
2906    resume_other_threads(true);
2907    gc_deferred++;
2908    return 0;
2909  }
2910  gc_deferred = 0;
2911
2912  gc_tcr = tcr;
2913
2914  /* This is generally necessary if the current thread invoked the GC
2915     via an alloc trap, and harmless if the GC was invoked via a GC
2916     trap.  (It's necessary in the first case because the "allocptr"
2917     register - %rbx - may be pointing into the middle of something
2918     below tcr->save_allocbase, and we wouldn't want the GC to see
2919     that bogus pointer.) */
2920  xpGPR(xp, Iallocptr) = VOID_ALLOCPTR; 
2921
2922  normalize_tcr(xp, tcr, false);
2923
2924
2925  for (other_tcr = TCR_AUX(tcr)->next; other_tcr != tcr;
2926       other_tcr = TCR_AUX(other_tcr)->next) {
2927    if (other_tcr->pending_exception_context) {
2928      TCR_AUX(other_tcr)->gc_context = other_tcr->pending_exception_context;
2929    } else if (other_tcr->valence == TCR_STATE_LISP) {
2930      TCR_AUX(other_tcr)->gc_context = TCR_AUX(other_tcr)->suspend_context;
2931    } else {
2932      /* no pending exception, didn't suspend in lisp state:
2933         must have executed a synchronous ff-call.
2934      */
2935      TCR_AUX(other_tcr)->gc_context = NULL;
2936    }
2937    normalize_tcr(TCR_AUX(other_tcr)->gc_context, other_tcr, true);
2938  }
2939   
2940
2941
2942  result = fun(tcr, param);
2943
2944  other_tcr = tcr;
2945  do {
2946    TCR_AUX(other_tcr)->gc_context = NULL;
2947    other_tcr = TCR_AUX(other_tcr)->next;
2948  } while (other_tcr != tcr);
2949
2950  gc_tcr = NULL;
2951
2952  atomic_decf(&barrier);
2953  resume_other_threads(true);
2954
2955  return result;
2956
2957}
2958
2959signed_natural
2960purify_from_xp(ExceptionInformation *xp, signed_natural param)
2961{
2962  return gc_like_from_xp(xp, purify, param);
2963}
2964
2965signed_natural
2966impurify_from_xp(ExceptionInformation *xp, signed_natural param)
2967{
2968  return gc_like_from_xp(xp, impurify, param);
2969}
2970
2971/* Returns #bytes freed by invoking GC */
2972
2973signed_natural
2974gc_from_tcr(TCR *tcr, signed_natural param)
2975{
2976  area *a;
2977  BytePtr oldfree, newfree;
2978  BytePtr oldend, newend;
2979
2980#if 0
2981  fprintf(dbgout, "Start GC  in 0x" LISP "\n", tcr);
2982#endif
2983  a = active_dynamic_area;
2984  oldend = a->high;
2985  oldfree = a->active;
2986  gc(tcr, param);
2987  newfree = a->active;
2988  newend = a->high;
2989#if 0
2990  fprintf(dbgout, "End GC  in 0x" LISP "\n", tcr);
2991#endif
2992  return ((oldfree-newfree)+(newend-oldend));
2993}
2994
2995signed_natural
2996gc_from_xp(ExceptionInformation *xp, signed_natural param)
2997{
2998  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
2999
3000  freeGCptrs();
3001  return status;
3002}
3003
3004#ifdef DARWIN
3005
3006#define TCR_FROM_EXCEPTION_PORT(p) find_tcr_from_exception_port(p)
3007#define TCR_TO_EXCEPTION_PORT(t) (mach_port_name_t)((natural) (((TCR *)t)->io_datum))
3008
3009
3010extern void pseudo_sigreturn(void);
3011
3012
3013
3014#define LISP_EXCEPTIONS_HANDLED_MASK \
3015 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
3016
3017/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
3018#define NUM_LISP_EXCEPTIONS_HANDLED 4
3019
3020typedef struct {
3021  int foreign_exception_port_count;
3022  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
3023  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
3024  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
3025  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
3026} MACH_foreign_exception_state;
3027
3028
3029
3030
3031/*
3032  Mach's exception mechanism works a little better than its signal
3033  mechanism (and, not incidentally, it gets along with GDB a lot
3034  better.
3035
3036  Initially, we install an exception handler to handle each native
3037  thread's exceptions.  This process involves creating a distinguished
3038  thread which listens for kernel exception messages on a set of
3039  0 or more thread exception ports.  As threads are created, they're
3040  added to that port set; a thread's exception port is destroyed
3041  (and therefore removed from the port set) when the thread exits.
3042
3043  A few exceptions can be handled directly in the handler thread;
3044  others require that we resume the user thread (and that the
3045  exception thread resumes listening for exceptions.)  The user
3046  thread might eventually want to return to the original context
3047  (possibly modified somewhat.)
3048
3049  As it turns out, the simplest way to force the faulting user
3050  thread to handle its own exceptions is to do pretty much what
3051  signal() does: the exception handlng thread sets up a sigcontext
3052  on the user thread's stack and forces the user thread to resume
3053  execution as if a signal handler had been called with that
3054  context as an argument.  We can use a distinguished UUO at a
3055  distinguished address to do something like sigreturn(); that'll
3056  have the effect of resuming the user thread's execution in
3057  the (pseudo-) signal context.
3058
3059  Since:
3060    a) we have miles of code in C and in Lisp that knows how to
3061    deal with Linux sigcontexts
3062    b) Linux sigcontexts contain a little more useful information
3063    (the DAR, DSISR, etc.) than their Darwin counterparts
3064    c) we have to create a sigcontext ourselves when calling out
3065    to the user thread: we aren't really generating a signal, just
3066    leveraging existing signal-handling code.
3067
3068  we create a Linux sigcontext struct.
3069
3070  Simple ?  Hopefully from the outside it is ...
3071
3072  We want the process of passing a thread's own context to it to
3073  appear to be atomic: in particular, we don't want the GC to suspend
3074  a thread that's had an exception but has not yet had its user-level
3075  exception handler called, and we don't want the thread's exception
3076  context to be modified by a GC while the Mach handler thread is
3077  copying it around.  On Linux (and on Jaguar), we avoid this issue
3078  because (a) the kernel sets up the user-level signal handler and
3079  (b) the signal handler blocks signals (including the signal used
3080  by the GC to suspend threads) until tcr->xframe is set up.
3081
3082  The GC and the Mach server thread therefore contend for the lock
3083  "mach_exception_lock".  The Mach server thread holds the lock
3084  when copying exception information between the kernel and the
3085  user thread; the GC holds this lock during most of its execution
3086  (delaying exception processing until it can be done without
3087  GC interference.)
3088
3089*/
3090
3091#ifdef PPC64
3092#define C_REDZONE_LEN           320
3093#define C_STK_ALIGN             32
3094#else
3095#define C_REDZONE_LEN           224
3096#define C_STK_ALIGN             16
3097#endif
3098#define C_PARAMSAVE_LEN         64
3099#define C_LINKAGE_LEN           48
3100
3101#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
3102
3103void
3104fatal_mach_error(char *format, ...);
3105
3106#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
3107
3108
3109void
3110restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext, native_thread_state_t *ts)
3111{
3112  kern_return_t kret;
3113#if WORD_SIZE == 64
3114  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
3115#else
3116  mcontext_t mc = UC_MCONTEXT(pseudosigcontext);
3117#endif
3118
3119  /* Set the thread's FP state from the pseudosigcontext */
3120  kret = thread_set_state(thread,
3121                          NATIVE_FLOAT_STATE_FLAVOR,
3122                          (thread_state_t)&(mc->__fs),
3123                          NATIVE_FLOAT_STATE_COUNT);
3124  MACH_CHECK_ERROR("setting thread FP state", kret);
3125  *ts = mc->__ss;
3126} 
3127
3128kern_return_t
3129do_pseudo_sigreturn(mach_port_t thread, TCR *tcr, native_thread_state_t *out)
3130{
3131  ExceptionInformation *xp;
3132
3133#ifdef DEBUG_MACH_EXCEPTIONS
3134  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
3135#endif
3136  xp = tcr->pending_exception_context;
3137  if (xp) {
3138    tcr->pending_exception_context = NULL;
3139    tcr->valence = TCR_STATE_LISP;
3140    restore_mach_thread_state(thread, xp, out);
3141    raise_pending_interrupt(tcr);
3142  } else {
3143    Bug(NULL, "no xp here!\n");
3144  }
3145#ifdef DEBUG_MACH_EXCEPTIONS
3146  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
3147#endif
3148  return KERN_SUCCESS;
3149} 
3150
3151ExceptionInformation *
3152create_thread_context_frame(mach_port_t thread, 
3153                            natural *new_stack_top,
3154                            siginfo_t **info_ptr,
3155                            TCR *tcr,
3156                            native_thread_state_t *ts
3157                            )
3158{
3159  mach_msg_type_number_t thread_state_count;
3160  ExceptionInformation *pseudosigcontext;
3161#ifdef X8664
3162  MCONTEXT_T mc;
3163#else
3164  mcontext_t mc;
3165#endif
3166  natural stackp;
3167
3168#ifdef X8664 
3169  stackp = (LispObj) find_foreign_rsp(ts->__rsp,tcr->cs_area,tcr);
3170  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
3171#else
3172  stackp = (LispObj) find_foreign_rsp(ts->__esp, tcr->cs_area, tcr);
3173#endif
3174  stackp = TRUNC_DOWN(stackp, sizeof(siginfo_t), C_STK_ALIGN);
3175  if (info_ptr) {
3176    *info_ptr = (siginfo_t *)stackp;
3177  }
3178  stackp = TRUNC_DOWN(stackp,sizeof(*pseudosigcontext), C_STK_ALIGN);
3179  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
3180
3181  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
3182  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
3183 
3184  memmove(&(mc->__ss),ts,sizeof(*ts));
3185
3186  thread_state_count = NATIVE_FLOAT_STATE_COUNT;
3187  thread_get_state(thread,
3188                   NATIVE_FLOAT_STATE_FLAVOR,
3189                   (thread_state_t)&(mc->__fs),
3190                   &thread_state_count);
3191
3192  thread_state_count = NATIVE_EXCEPTION_STATE_COUNT;
3193  thread_get_state(thread,
3194                   NATIVE_EXCEPTION_STATE_FLAVOR,
3195                   (thread_state_t)&(mc->__es),
3196                   &thread_state_count);
3197
3198
3199  UC_MCONTEXT(pseudosigcontext) = mc;
3200  if (new_stack_top) {
3201    *new_stack_top = stackp;
3202  }
3203  return pseudosigcontext;
3204}
3205
3206/*
3207  This code sets up the user thread so that it executes a "pseudo-signal
3208  handler" function when it resumes.  Create a fake ucontext struct
3209  on the thread's stack and pass it as an argument to the pseudo-signal
3210  handler.
3211
3212  Things are set up so that the handler "returns to" pseudo_sigreturn(),
3213  which will restore the thread's context.
3214
3215  If the handler invokes code that throws (or otherwise never sigreturn()'s
3216  to the context), that's fine.
3217
3218  Actually, check that: throw (and variants) may need to be careful and
3219  pop the tcr's xframe list until it's younger than any frame being
3220  entered.
3221*/
3222
3223int
3224setup_signal_frame(mach_port_t thread,
3225                   void *handler_address,
3226                   int signum,
3227                   int code,
3228                   TCR *tcr,
3229                   native_thread_state_t *ts,
3230                   native_thread_state_t *new_ts
3231                   )
3232{
3233  ExceptionInformation *pseudosigcontext;
3234  int  old_valence = tcr->valence;
3235  natural stackp, *stackpp;
3236  siginfo_t *info;
3237
3238#ifdef DEBUG_MACH_EXCEPTIONS
3239  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
3240#endif
3241  pseudosigcontext = create_thread_context_frame(thread, &stackp, &info, tcr,  ts);
3242  bzero(info, sizeof(*info));
3243  info->si_code = code;
3244  info->si_addr = (void *)(UC_MCONTEXT(pseudosigcontext)->__es.__faultvaddr);
3245  info->si_signo = signum;
3246  pseudosigcontext->uc_onstack = 0;
3247  pseudosigcontext->uc_sigmask = (sigset_t) 0;
3248  pseudosigcontext->uc_stack.ss_sp = 0;
3249  pseudosigcontext->uc_stack.ss_size = 0;
3250  pseudosigcontext->uc_stack.ss_flags = 0;
3251  pseudosigcontext->uc_link = NULL;
3252  pseudosigcontext->uc_mcsize = sizeof(*UC_MCONTEXT(pseudosigcontext));
3253  tcr->pending_exception_context = pseudosigcontext;
3254  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
3255 
3256
3257  /*
3258     It seems like we've created a  sigcontext on the thread's
3259     stack.  Set things up so that we call the handler (with appropriate
3260     args) when the thread's resumed.
3261  */
3262
3263#ifdef X8664
3264  new_ts->__rip = (natural) handler_address;
3265  stackpp = (natural *)stackp;
3266  *--stackpp = (natural)pseudo_sigreturn;
3267  stackp = (natural)stackpp;
3268  new_ts->__rdi = signum;
3269  new_ts->__rsi = (natural)info;
3270  new_ts->__rdx = (natural)pseudosigcontext;
3271  new_ts->__rcx = (natural)tcr;
3272  new_ts->__r8 = (natural)old_valence;
3273  new_ts->__rsp = stackp;
3274  new_ts->__rflags = ts->__rflags;
3275#else
3276  bzero(new_ts, sizeof(*new_ts));
3277  new_ts->__cs = ts->__cs;
3278  new_ts->__ss = ts->__ss;
3279  new_ts->__ds = ts->__ds;
3280  new_ts->__es = ts->__es;
3281  new_ts->__fs = ts->__fs;
3282  new_ts->__gs = ts->__gs;
3283
3284  new_ts->__eip = (natural)handler_address;
3285  stackpp = (natural *)stackp;
3286  *--stackpp = 0;               /* alignment */
3287  *--stackpp = 0;
3288  *--stackpp = 0;
3289  *--stackpp = (natural)old_valence;
3290  *--stackpp = (natural)tcr;
3291  *--stackpp = (natural)pseudosigcontext;
3292  *--stackpp = (natural)info;
3293  *--stackpp = (natural)signum;
3294  *--stackpp = (natural)pseudo_sigreturn;
3295  stackp = (natural)stackpp;
3296  new_ts->__esp = stackp;
3297  new_ts->__eflags = ts->__eflags;
3298#endif
3299#ifdef DEBUG_MACH_EXCEPTIONS
3300  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
3301#endif
3302  return 0;
3303}
3304
3305
3306
3307
3308
3309
3310/*
3311  This function runs in the exception handling thread.  It's
3312  called (by this precise name) from the library function "exc_server()"
3313  when the thread's exception ports are set up.  (exc_server() is called
3314  via mach_msg_server(), which is a function that waits for and dispatches
3315  on exception messages from the Mach kernel.)
3316
3317  This checks to see if the exception was caused by a pseudo_sigreturn()
3318  UUO; if so, it arranges for the thread to have its state restored
3319  from the specified context.
3320
3321  Otherwise, it tries to map the exception to a signal number and
3322  arranges that the thread run a "pseudo signal handler" to handle
3323  the exception.
3324
3325  Some exceptions could and should be handled here directly.
3326*/
3327
3328#define DARWIN_EXCEPTION_HANDLER signal_handler
3329
3330TCR *
3331find_tcr_from_exception_port(mach_port_t port)
3332{
3333    mach_vm_address_t context = 0;
3334    kern_return_t kret;
3335
3336    kret = mach_port_get_context(mach_task_self(), port, &context);
3337    MACH_CHECK_ERROR("finding TCR from exception port", kret);
3338    return (TCR *)context;
3339}
3340
3341void
3342associate_tcr_with_exception_port(mach_port_t port, TCR *tcr)
3343{
3344    kern_return_t kret;
3345   
3346    kret = mach_port_set_context(mach_task_self(),
3347                                 port, (mach_vm_address_t)tcr);
3348    MACH_CHECK_ERROR("associating TCR with exception port", kret);
3349}
3350
3351void
3352disassociate_tcr_from_exception_port(mach_port_t port)
3353{
3354  kern_return_t kret;
3355
3356  kret = mach_port_set_context(mach_task_self(), port, 0);
3357  MACH_CHECK_ERROR("disassociating TCR with exception port", kret);
3358}
3359
3360kern_return_t
3361catch_mach_exception_raise(mach_port_t exception_port,
3362                           mach_port_t thread,
3363                           mach_port_t task,
3364                           exception_type_t exception,
3365                           mach_exception_data_t code,
3366                           mach_msg_type_number_t code_count)
3367{
3368  abort();
3369  return KERN_FAILURE;
3370}
3371
3372kern_return_t
3373catch_mach_exception_raise_state(mach_port_t exception_port,
3374                                 exception_type_t exception,
3375                                 mach_exception_data_t code,
3376                                 mach_msg_type_number_t code_count,
3377                                 int *flavor,
3378                                 thread_state_t in_state,
3379                                 mach_msg_type_number_t in_state_count,
3380                                 thread_state_t out_state,
3381                                 mach_msg_type_number_t *out_state_count)
3382{
3383  int64_t code0 = code[0];
3384  int signum = 0;
3385  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
3386  mach_port_t thread = (mach_port_t)((natural)tcr->native_thread_id);
3387  kern_return_t kret, call_kret;
3388
3389  native_thread_state_t
3390    *ts = (native_thread_state_t *)in_state,
3391    *out_ts = (native_thread_state_t*)out_state;
3392  mach_msg_type_number_t thread_state_count;
3393
3394  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
3395    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
3396  } 
3397  if ((code0 == EXC_I386_GPFLT) &&
3398      ((natural)(ts_pc(ts)) == (natural)pseudo_sigreturn)) {
3399    kret = do_pseudo_sigreturn(thread, tcr, out_ts);
3400#if 0
3401    fprintf(dbgout, "Exception return in 0x%x\n",tcr);
3402#endif
3403  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
3404    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
3405    kret = 17;
3406  } else {
3407    switch (exception) {
3408    case EXC_BAD_ACCESS:
3409      if (code0 == EXC_I386_GPFLT) {
3410        signum = SIGSEGV;
3411      } else {
3412        signum = SIGBUS;
3413      }
3414      break;
3415     
3416    case EXC_BAD_INSTRUCTION:
3417      if (code0 == EXC_I386_GPFLT) {
3418        signum = SIGSEGV;
3419      } else {
3420        signum = SIGILL;
3421      }
3422      break;
3423     
3424    case EXC_SOFTWARE:
3425      signum = SIGILL;
3426      break;
3427     
3428    case EXC_ARITHMETIC:
3429      signum = SIGFPE;
3430      if (code0 == EXC_I386_DIV)
3431        code0 = FPE_INTDIV;
3432      break;
3433     
3434    default:
3435      break;
3436    }
3437#if WORD_SIZE==64
3438    if ((signum==SIGFPE) && 
3439        (code0 != FPE_INTDIV) && 
3440        (tcr->valence != TCR_STATE_LISP)) {
3441      mach_msg_type_number_t thread_state_count = x86_FLOAT_STATE64_COUNT;
3442      x86_float_state64_t fs;
3443     
3444      thread_get_state(thread,
3445                       x86_FLOAT_STATE64,
3446                       (thread_state_t)&fs,
3447                       &thread_state_count);
3448     
3449      if (! (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN_FPE))) {
3450        tcr->flags |= (1<<TCR_FLAG_BIT_FOREIGN_FPE);
3451        tcr->lisp_mxcsr = (fs.__fpu_mxcsr & ~MXCSR_STATUS_MASK);
3452      }
3453      fs.__fpu_mxcsr &= ~MXCSR_STATUS_MASK;
3454      fs.__fpu_mxcsr |= MXCSR_CONTROL_MASK;
3455      thread_set_state(thread,
3456                       x86_FLOAT_STATE64,
3457                       (thread_state_t)&fs,
3458                       x86_FLOAT_STATE64_COUNT);
3459      *out_state_count = NATIVE_THREAD_STATE_COUNT;
3460      *out_ts = *ts;
3461      return KERN_SUCCESS;
3462    }
3463#endif
3464    if (signum) {
3465      kret = setup_signal_frame(thread,
3466                                (void *)DARWIN_EXCEPTION_HANDLER,
3467                                signum,
3468                                code0,
3469                                tcr, 
3470                                ts,
3471                                out_ts);
3472     
3473    } else {
3474      kret = 17;
3475    }
3476  }
3477 
3478  if (kret) {
3479    *out_state_count = 0;
3480    *flavor = 0;
3481  } else {
3482    *out_state_count = NATIVE_THREAD_STATE_COUNT;
3483  }
3484  return kret;
3485}
3486
3487kern_return_t
3488catch_mach_exception_raise_state_identity(mach_port_t exception_port,
3489                                          mach_port_t thread,
3490                                          mach_port_t task,
3491                                          exception_type_t exception,
3492                                          mach_exception_data_t code,
3493                                          mach_msg_type_number_t code_count,
3494                                          int *flavor,
3495                                          thread_state_t old_state,
3496                                          mach_msg_type_number_t old_count,
3497                                          thread_state_t new_state,
3498                                          mach_msg_type_number_t *new_count)
3499{
3500  abort();
3501  return KERN_FAILURE;
3502}
3503
3504
3505
3506
3507
3508static mach_port_t mach_exception_thread = (mach_port_t)0;
3509
3510
3511/*
3512  The initial function for an exception-handling thread.
3513*/
3514
3515void *
3516exception_handler_proc(void *arg)
3517{
3518  extern boolean_t mach_exc_server();
3519  mach_port_t p = (mach_port_t)((natural)arg);
3520
3521  mach_exception_thread = pthread_mach_thread_np(pthread_self());
3522  mach_msg_server(mach_exc_server, 256, p, 0);
3523  /* Should never return. */
3524  abort();
3525}
3526
3527
3528
3529void
3530mach_exception_thread_shutdown()
3531{
3532  kern_return_t kret;
3533
3534  fprintf(dbgout, "terminating Mach exception thread, 'cause exit can't\n");
3535  kret = thread_terminate(mach_exception_thread);
3536  if (kret != KERN_SUCCESS) {
3537    fprintf(dbgout, "Couldn't terminate exception thread, kret = %d\n",kret);
3538  }
3539}
3540
3541
3542mach_port_t
3543mach_exception_port_set()
3544{
3545  static mach_port_t __exception_port_set = MACH_PORT_NULL;
3546  kern_return_t kret; 
3547  if (__exception_port_set == MACH_PORT_NULL) {
3548
3549    kret = mach_port_allocate(mach_task_self(),
3550                              MACH_PORT_RIGHT_PORT_SET,
3551                              &__exception_port_set);
3552    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
3553    create_system_thread(0,
3554                         NULL,
3555                         exception_handler_proc, 
3556                         (void *)((natural)__exception_port_set));
3557  }
3558  return __exception_port_set;
3559}
3560
3561/*
3562  Setup a new thread to handle those exceptions specified by
3563  the mask "which".  This involves creating a special Mach
3564  message port, telling the Mach kernel to send exception
3565  messages for the calling thread to that port, and setting
3566  up a handler thread which listens for and responds to
3567  those messages.
3568
3569*/
3570
3571/*
3572  Establish the lisp thread's TCR as its exception port, and determine
3573  whether any other ports have been established by foreign code for
3574  exceptions that lisp cares about.
3575
3576  If this happens at all, it should happen on return from foreign
3577  code and on entry to lisp code via a callback.
3578
3579  This is a lot of trouble (and overhead) to support Java, or other
3580  embeddable systems that clobber their caller's thread exception ports.
3581 
3582*/
3583kern_return_t
3584tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
3585{
3586  kern_return_t kret;
3587  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
3588  int i;
3589  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
3590  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
3591  exception_mask_t mask = 0;
3592
3593  kret = thread_swap_exception_ports(thread,
3594                                     LISP_EXCEPTIONS_HANDLED_MASK,
3595                                     lisp_port,
3596                                     MACH_EXCEPTION_CODES | EXCEPTION_STATE,
3597#if WORD_SIZE==64
3598                                     x86_THREAD_STATE64,
3599#else
3600                                     x86_THREAD_STATE32,
3601#endif
3602                                     fxs->masks,
3603                                     &n,
3604                                     fxs->ports,
3605                                     fxs->behaviors,
3606                                     fxs->flavors);
3607  if (kret == KERN_SUCCESS) {
3608    fxs->foreign_exception_port_count = n;
3609    for (i = 0; i < n; i ++) {
3610      foreign_port = fxs->ports[i];
3611
3612      if ((foreign_port != lisp_port) &&
3613          (foreign_port != MACH_PORT_NULL)) {
3614        mask |= fxs->masks[i];
3615      }
3616    }
3617    tcr->foreign_exception_status = (int) mask;
3618  }
3619  return kret;
3620}
3621
3622kern_return_t
3623tcr_establish_lisp_exception_port(TCR *tcr)
3624{
3625  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
3626}
3627
3628/*
3629  Do this when calling out to or returning from foreign code, if
3630  any conflicting foreign exception ports were established when we
3631  last entered lisp code.
3632*/
3633kern_return_t
3634restore_foreign_exception_ports(TCR *tcr)
3635{
3636  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
3637  kern_return_t kret;
3638
3639  if (m) {
3640    MACH_foreign_exception_state *fxs  = 
3641      (MACH_foreign_exception_state *) tcr->native_thread_info;
3642    int i, n = fxs->foreign_exception_port_count;
3643    exception_mask_t tm;
3644
3645    for (i = 0; i < n; i++) {
3646      if ((tm = fxs->masks[i]) & m) {
3647        kret = thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3648                                   tm,
3649                                   fxs->ports[i],
3650                                   fxs->behaviors[i],
3651                                   fxs->flavors[i]);
3652        MACH_CHECK_ERROR("restoring thread exception ports", kret);
3653      }
3654    }
3655  }
3656  return KERN_SUCCESS;
3657}
3658                                   
3659
3660/*
3661  This assumes that a Mach port (to be used as the thread's exception port) whose
3662  "name" matches the TCR's 32-bit address has already been allocated.
3663*/
3664
3665kern_return_t
3666setup_mach_exception_handling(TCR *tcr)
3667{
3668  mach_port_t
3669    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3670    task_self = mach_task_self();
3671  kern_return_t kret;
3672
3673  kret = mach_port_insert_right(task_self,
3674                                thread_exception_port,
3675                                thread_exception_port,
3676                                MACH_MSG_TYPE_MAKE_SEND);
3677  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3678
3679  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3680  if (kret == KERN_SUCCESS) {
3681    mach_port_t exception_port_set = mach_exception_port_set();
3682
3683    kret = mach_port_move_member(task_self,
3684                                 thread_exception_port,
3685                                 exception_port_set);
3686  }
3687  return kret;
3688}
3689
3690void
3691darwin_exception_init(TCR *tcr)
3692{
3693  void tcr_monitor_exception_handling(TCR*, Boolean);
3694  kern_return_t kret;
3695  MACH_foreign_exception_state *fxs = 
3696    calloc(1, sizeof(MACH_foreign_exception_state));
3697 
3698  tcr->native_thread_info = (void *) fxs;
3699
3700  if ((kret = setup_mach_exception_handling(tcr))
3701      != KERN_SUCCESS) {
3702    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
3703    terminate_lisp();
3704  }
3705}
3706
3707/*
3708  The tcr is the "name" of the corresponding thread's exception port.
3709  Destroying the port should remove it from all port sets of which it's
3710  a member (notably, the exception port set.)
3711*/
3712void
3713darwin_exception_cleanup(TCR *tcr)
3714{
3715  mach_port_t exception_port;
3716  void *fxs = tcr->native_thread_info;
3717
3718  if (fxs) {
3719    tcr->native_thread_info = NULL;
3720    free(fxs);
3721  }
3722
3723  exception_port = TCR_TO_EXCEPTION_PORT(tcr);
3724  disassociate_tcr_from_exception_port(exception_port);
3725  mach_port_deallocate(mach_task_self(), exception_port);
3726  /* Theoretically not needed, I guess... */
3727  mach_port_destroy(mach_task_self(), exception_port);
3728}
3729
3730
3731
3732
3733void
3734fatal_mach_error(char *format, ...)
3735{
3736  va_list args;
3737  char s[512];
3738 
3739
3740  va_start(args, format);
3741  vsnprintf(s, sizeof(s),format, args);
3742  va_end(args);
3743
3744  Fatal("Mach error", s);
3745}
3746
3747
3748
3749
3750#endif
3751
3752/* watchpoint stuff */
3753
3754area *
3755new_watched_area(natural size)
3756{
3757  char *p;
3758
3759  p = MapMemory(NULL, size, MEMPROTECT_RWX);
3760  if ((signed_natural)p == -1) {
3761    allocation_failure(true, size);
3762  }
3763  return new_area(p, p + size, AREA_WATCHED);
3764}
3765
3766void
3767delete_watched_area(area *a, TCR *tcr)
3768{
3769  natural nbytes = a->high - a->low;
3770  char *base = a->low;
3771
3772  condemn_area_holding_area_lock(a);
3773
3774  if (nbytes) {
3775    int err;
3776
3777    err = UnMapMemory(base, nbytes);
3778    if (err != 0)
3779      Fatal("munmap in delete_watched_area", "");
3780  }
3781}
3782
3783natural
3784uvector_total_size_in_bytes(LispObj *u)
3785{
3786  LispObj header = header_of(u);
3787  natural header_tag = fulltag_of(header);
3788  natural subtag = header_subtag(header);
3789  natural element_count = header_element_count(header);
3790  natural nbytes = 0;
3791
3792#ifdef X8632
3793  if ((nodeheader_tag_p(header_tag)) ||
3794      (subtag <= max_32_bit_ivector_subtag)) {
3795    nbytes = element_count << 2;
3796  } else if (subtag <= max_8_bit_ivector_subtag) {
3797    nbytes = element_count;
3798  } else if (subtag <= max_16_bit_ivector_subtag) {
3799    nbytes = element_count << 1;
3800  } else if (subtag == subtag_double_float_vector) {
3801    nbytes = element_count << 3;
3802  } else {
3803    nbytes = (element_count + 7) >> 3;
3804  }
3805  /* add 4 byte header and round up to multiple of 8 bytes */
3806  return ~7 & (4 + nbytes + 7);
3807#endif
3808#ifdef X8664
3809  if ((nodeheader_tag_p(header_tag)) || (header_tag == ivector_class_64_bit)) {
3810    nbytes = element_count << 3;
3811  } else if (header_tag == ivector_class_32_bit) {
3812    nbytes = element_count << 2;
3813  } else {
3814    /* ivector_class_other_bit contains 8, 16-bit arrays & bit vector */
3815    if (subtag == subtag_bit_vector) {
3816      nbytes = (element_count + 7) >> 3;
3817    } else if (subtag >= min_8_bit_ivector_subtag) {
3818      nbytes = element_count;
3819    } else {
3820      nbytes = element_count << 1;
3821    }
3822  }
3823  /* add 8 byte header and round up to multiple of 16 bytes */
3824  return ~15 & (8 + nbytes + 15);
3825#endif
3826}
3827
3828extern void wp_update_references(TCR *, LispObj, LispObj);
3829
3830/*
3831 * Other threads are suspended and pc-lusered.
3832 *
3833 * param contains a tagged pointer to a uvector or a cons cell
3834 */
3835signed_natural
3836watch_object(TCR *tcr, signed_natural param)
3837{
3838  LispObj object = (LispObj)param;
3839  unsigned tag = fulltag_of(object);
3840  LispObj *noderef = (LispObj *)untag(object);
3841  area *object_area = area_containing((BytePtr)noderef);
3842  natural size;
3843
3844  if (tag == fulltag_cons)
3845    size = 2 * node_size;
3846  else
3847    size = uvector_total_size_in_bytes(noderef);
3848
3849  if (object_area && object_area->code == AREA_DYNAMIC) {
3850    area *a = new_watched_area(size);
3851    LispObj old = object;
3852    LispObj new = (LispObj)((natural)a->low + tag);
3853
3854    add_area_holding_area_lock(a);
3855
3856    /* move object to watched area */
3857    memcpy(a->low, noderef, size);
3858    ProtectMemory(a->low, size);
3859    memset(noderef, 0, size);
3860    wp_update_references(tcr, old, new);
3861    check_all_areas(tcr);
3862    return 1;
3863  }
3864  return 0;
3865}
3866
3867/*
3868 * We expect the watched object in arg_y, and the new uninitialized
3869 * object (which is just zeroed) in arg_z.
3870 */
3871signed_natural
3872unwatch_object(TCR *tcr, signed_natural param)
3873{
3874  ExceptionInformation *xp = tcr->xframe->curr;
3875  LispObj old = xpGPR(xp, Iarg_y);
3876  unsigned tag = fulltag_of(old);
3877  LispObj new = xpGPR(xp, Iarg_z);
3878  LispObj *oldnode = (LispObj *)untag(old);
3879  LispObj *newnode = (LispObj *)untag(new);
3880  area *a = area_containing((BytePtr)old);
3881  extern void update_managed_refs(area *, BytePtr, natural);
3882
3883  if (a && a->code == AREA_WATCHED) {
3884    natural size;
3885
3886    if (tag == fulltag_cons)
3887      size = 2 * node_size;
3888    else
3889      size = uvector_total_size_in_bytes(oldnode);
3890
3891    memcpy(newnode, oldnode, size);
3892    delete_watched_area(a, tcr);
3893    wp_update_references(tcr, old, new);
3894    /* because wp_update_references doesn't update refbits */
3895    tenure_to_area(tenured_area);
3896    /* Unwatching can (re-)introduce managed_static->dynamic references */
3897    zero_bits(managed_static_area->refbits,managed_static_area->ndnodes);
3898    update_managed_refs(managed_static_area, low_markable_address, area_dnode(active_dynamic_area->active, low_markable_address));
3899    check_all_areas(tcr);
3900    xpGPR(xp, Iarg_z) = new;
3901  }
3902  return 0;
3903}
3904
3905Boolean
3906handle_watch_trap(ExceptionInformation *xp, TCR *tcr)
3907{
3908  LispObj selector = xpGPR(xp,Iimm0);
3909  LispObj object = xpGPR(xp, Iarg_z);
3910  signed_natural result;
3911 
3912  switch (selector) {
3913    case WATCH_TRAP_FUNCTION_WATCH:
3914      result = gc_like_from_xp(xp, watch_object, object);
3915      if (result == 0)
3916        xpGPR(xp,Iarg_z) = lisp_nil;
3917      break;
3918    case WATCH_TRAP_FUNCTION_UNWATCH:
3919      gc_like_from_xp(xp, unwatch_object, 0);
3920      break;
3921    default:
3922      break;
3923  }
3924  return true;
3925}
3926
Note: See TracBrowser for help on using the repository browser.