source: trunk/source/lisp-kernel/x86-exceptions.c @ 16365

Last change on this file since 16365 was 16365, checked in by gb, 5 years ago

Try again.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 108.0 KB
Line 
1/*
2   Copyright (C) 2005-2009 Clozure Associates
3   This file is part of Clozure CL. 
4
5   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with Clozure CL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with Clozure CL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   Clozure CL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "x86-utils.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35#ifdef DARWIN
36#include <sysexits.h>
37#include <dlfcn.h>
38#include <mach/machine/vm_types.h>
39#endif
40#ifndef WINDOWS
41#include <sys/syslog.h>
42#endif
43#ifdef WINDOWS
44#include <windows.h>
45#ifdef WIN_64
46#include <winternl.h>
47#include <ntstatus.h>
48#endif
49#ifndef EXCEPTION_WRITE_FAULT
50#define EXCEPTION_WRITE_FAULT 1
51#endif
52#endif
53
54void
55normalize_tcr(ExceptionInformation *, TCR *, Boolean);
56
57/*
58  We do all kinds of funky things to avoid handling a signal on the lisp
59  stack.  One of those funky things involves using the  __builtin_return_address()
60  intrinsic so that the real handler returns to the right place when it exits,
61  even if it returns on a different stack.  Code at "the right place" is presumed
62  to just do a sigrereturn, however the OS does that.
63
64  Sadly, some JVMs (at least) do what they call "signal chaining": they install
65  their own handlers for signals that we expect to handle, and call our handler
66  when they realize that they don't know how to handle what we raise.  They
67  don't observe sigaltstack, and they don't necessarily do call our handler
68  tail-recursively (so our stack-switching code would cause our handler to
69  return to the JVM's, running on the wrong stack.
70
71  Try to work around this by setting up an "early" signal handler (before any
72  of this JVM nonsense can take effect) and noting the address it'd return to.
73*/
74
75pc
76real_sigreturn = (pc)0;
77
78#define SIGRETURN_ADDRESS() (real_sigreturn ? real_sigreturn : __builtin_return_address(0))
79
80#ifndef WINDOWS
81#ifndef DARWIN
82void
83early_intn_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
84{
85  real_sigreturn = (pc) __builtin_return_address(0);
86  xpPC(xp) += 2;
87}
88
89#endif
90#endif
91
92void
93do_intn()
94{
95  __asm volatile("int $0xcd");
96}
97
98void
99x86_early_exception_init()
100{
101#ifndef WINDOWS
102#ifndef DARWIN
103  struct sigaction action, oaction;
104
105  action.sa_sigaction = (void *) early_intn_handler;
106  sigfillset(&action.sa_mask);
107  action.sa_flags = SA_SIGINFO;
108  sigaction(SIGNUM_FOR_INTN_TRAP,&action,&oaction);
109 
110  do_intn();
111  sigaction(SIGNUM_FOR_INTN_TRAP,&oaction, NULL);
112#endif
113#endif
114}
115
116int
117page_size = 4096;
118
119int
120log2_page_size = 12;
121
122Boolean
123did_gc_notification_since_last_full_gc = false;
124
125Boolean
126allocation_enabled = true;
127
128void
129update_bytes_allocated(TCR* tcr, void *cur_allocptr)
130{
131  char *last = tcr->last_allocptr;
132  char *current = cur_allocptr;
133  uint64_t *bytes_allocated = (uint64_t *)&TCR_AUX(tcr)->bytes_allocated;
134
135  if (last && (tcr->save_allocbase != ((void *)VOID_ALLOCPTR))) {
136    *bytes_allocated += last - current;
137  }
138  tcr->last_allocptr = 0;
139}
140
141
142
143//  This doesn't GC; it returns true if it made enough room, false
144//  otherwise.
145//  If "extend" is true, it can try to extend the dynamic area to
146//  satisfy the request.
147
148
149
150void
151platform_new_heap_segment(ExceptionInformation *xp, TCR *tcr, BytePtr low, BytePtr high)
152{
153  tcr->last_allocptr = (void *)high;
154  tcr->save_allocptr = (void *)high;
155  xpGPR(xp,Iallocptr) = (LispObj) high;
156  tcr->save_allocbase = (void *) low;
157}
158
159Boolean
160allocate_object(ExceptionInformation *xp,
161                natural bytes_needed, 
162                signed_natural disp_from_allocptr,
163                TCR *tcr,
164                Boolean *crossed_threshold)
165{
166  area *a = active_dynamic_area;
167
168  /* Maybe do an EGC */
169  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
170    if (((a->active)-(a->low)) >= a->threshold) {
171      gc_from_xp(xp, 0L);
172    }
173  }
174
175  /* Life is pretty simple if we can simply grab a segment
176     without extending the heap.
177  */
178  if (new_heap_segment(xp, bytes_needed, false, tcr, crossed_threshold)) {
179    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
180    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
181    return true;
182  }
183 
184  /* It doesn't make sense to try a full GC if the object
185     we're trying to allocate is larger than everything
186     allocated so far.
187  */
188  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
189    untenure_from_area(tenured_area); /* force a full GC */
190    gc_from_xp(xp, 0L);
191    did_gc_notification_since_last_full_gc = false;
192  }
193 
194  /* Try again, growing the heap if necessary */
195  if (new_heap_segment(xp, bytes_needed, true, tcr, NULL)) {
196    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
197    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
198    return true;
199  }
200 
201  return false;
202}
203
204natural gc_deferred = 0, full_gc_deferred = 0;
205
206signed_natural
207flash_freeze(TCR *tcr, signed_natural param)
208{
209  return 0;
210}
211
212
213Boolean
214handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
215{
216  LispObj selector = xpGPR(xp,Iimm0);
217#ifdef X8664
218  LispObj arg = xpGPR(xp,Iimm1);
219#else
220  LispObj arg = xpMMXreg(xp,Imm0);
221#endif
222  area *a = active_dynamic_area;
223  Boolean egc_was_enabled = (a->older != NULL);
224 
225  natural gc_previously_deferred = gc_deferred;
226
227  switch (selector) {
228  case GC_TRAP_FUNCTION_EGC_CONTROL:
229    egc_control(arg != 0, a->active);
230    xpGPR(xp,Iarg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
231    break;
232
233  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
234#ifdef X8664
235    a->threshold = unbox_fixnum(xpGPR(xp, Iarg_x));
236#else
237    a->threshold = unbox_fixnum(xpGPR(xp, Itemp0));
238#endif
239    g1_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_y));
240    g2_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_z));
241    xpGPR(xp,Iarg_z) = lisp_nil+t_offset;
242    break;
243
244  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
245    if (((signed_natural) arg) > 0) {
246      lisp_heap_gc_threshold = 
247        align_to_power_of_2((arg-1) +
248                            (heap_segment_size - 1),
249                            log2_heap_segment_size);
250    }
251    /* fall through */
252  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
253    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
254    break;
255
256  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
257    /*  Try to put the current threshold in effect.  This may
258        need to disable/reenable the EGC. */
259    untenure_from_area(tenured_area);
260    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
261    if (egc_was_enabled) {
262      if ((a->high - a->active) >= a->threshold) {
263        tenure_to_area(tenured_area);
264      }
265    }
266    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
267    break;
268
269  case GC_TRAP_FUNCTION_SET_GC_NOTIFICATION_THRESHOLD:
270    if ((signed_natural)arg >= 0) {
271      lisp_heap_notify_threshold = arg;
272      did_gc_notification_since_last_full_gc = false;
273    }
274    /* fall through */
275
276  case GC_TRAP_FUNCTION_GET_GC_NOTIFICATION_THRESHOLD:
277    xpGPR(xp, Iimm0) = lisp_heap_notify_threshold;
278    break;
279
280  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
281    ensure_static_conses(xp, tcr, 32768);
282    break;
283
284  case GC_TRAP_FUNCTION_FLASH_FREEZE: /* Like freeze below, but no GC */
285    untenure_from_area(tenured_area);
286    gc_like_from_xp(xp,flash_freeze,0);
287    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
288    tenured_area->static_dnodes = area_dnode(a->active, a->low);
289    if (egc_was_enabled) {
290      tenure_to_area(tenured_area);
291    }
292    xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
293    break;
294
295  case GC_TRAP_FUNCTION_ALLOCATION_CONTROL:
296    switch(arg) {
297    case 0: /* disable if allocation enabled */
298      xpGPR(xp, Iarg_z) = lisp_nil;
299      if (allocation_enabled) {
300        TCR *other_tcr;
301        ExceptionInformation *other_context;
302        suspend_other_threads(true);
303        normalize_tcr(xp,tcr,false);
304        for (other_tcr=TCR_AUX(tcr)->next; other_tcr != tcr; other_tcr = TCR_AUX(other_tcr)->next) {
305          other_context = other_tcr->pending_exception_context;
306          if (other_context == NULL) {
307            other_context = TCR_AUX(other_tcr)->suspend_context;
308          }
309          normalize_tcr(other_context, other_tcr, true);
310        }
311        allocation_enabled = false;
312        xpGPR(xp, Iarg_z) = t_value;
313        resume_other_threads(true);
314      }
315      break;
316
317    case 1:                     /* enable if disabled */
318      xpGPR(xp, Iarg_z) = lisp_nil;
319      if (!allocation_enabled) {
320        allocation_enabled = true;
321        xpGPR(xp, Iarg_z) = t_value;
322      }
323      break;
324
325    default:
326      xpGPR(xp, Iarg_z) = lisp_nil;
327      if (allocation_enabled) {
328        xpGPR(xp, Iarg_z) = t_value;
329      }
330      break;
331    }
332    break;
333
334  default:
335    update_bytes_allocated(tcr, (void *) tcr->save_allocptr);
336
337    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
338      if (!full_gc_deferred) {
339        gc_from_xp(xp, 0L);
340        did_gc_notification_since_last_full_gc = false;
341        break;
342      }
343      /* Tried to do a full GC when gc was disabled.  That failed,
344         so try full GC now */
345      selector = GC_TRAP_FUNCTION_GC;
346    }
347   
348    if (egc_was_enabled) {
349      egc_control(false, (BytePtr) a->active);
350    }
351    gc_from_xp(xp, 0L);
352    did_gc_notification_since_last_full_gc = false;
353    if (gc_deferred > gc_previously_deferred) {
354      full_gc_deferred = 1;
355    } else {
356      full_gc_deferred = 0;
357    }
358    if (selector > GC_TRAP_FUNCTION_GC) {
359      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
360        impurify_from_xp(xp, 0L);
361        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
362        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
363        gc_from_xp(xp, 0L);
364      }
365      if (selector & GC_TRAP_FUNCTION_PURIFY) {
366        purify_from_xp(xp, 1);
367        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
368        gc_from_xp(xp, 0L);
369      }
370      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
371        OSErr err;
372        extern OSErr save_application(int, Boolean);
373        area *vsarea = tcr->vs_area;
374
375#ifdef WINDOWS 
376        arg = _open_osfhandle(arg,0);
377#endif
378        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
379        err = save_application((int)arg, egc_was_enabled);
380        if (err == noErr) {
381          _exit(0);
382        }
383        fatal_oserr(": save_application", err);
384      }
385      switch (selector) {
386      case GC_TRAP_FUNCTION_FREEZE:
387        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
388        tenured_area->static_dnodes = area_dnode(a->active, a->low);
389        xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
390        break;
391      default:
392        break;
393      }
394    }
395    if (egc_was_enabled) {
396      egc_control(true, NULL);
397    }
398    break;
399  }
400  return true;
401}
402
403 
404
405
406
407void
408push_on_lisp_stack(ExceptionInformation *xp, LispObj value)
409{
410  LispObj *vsp = (LispObj *)xpGPR(xp,Isp);
411  *--vsp = value;
412  xpGPR(xp,Isp) = (LispObj)vsp;
413}
414
415
416/* Hard to know if or whether this is necessary in general.  For now,
417   do it when we get a "wrong number of arguments" trap.
418*/
419void
420finish_function_entry(ExceptionInformation *xp)
421{
422  natural nargs = xpGPR(xp,Inargs)>>fixnumshift;
423  signed_natural disp = nargs - nargregs;
424  LispObj *vsp =  (LispObj *) xpGPR(xp,Isp), ra = *vsp++;
425   
426  xpGPR(xp,Isp) = (LispObj) vsp;
427
428  if (disp > 0) {               /* implies that nargs > nargregs */
429    vsp[disp] = xpGPR(xp,Ifp);
430    vsp[disp+1] = ra;
431    xpGPR(xp,Ifp) = (LispObj)(vsp+disp);
432#ifdef X8664
433    push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
434#endif
435    push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
436    push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
437  } else {
438    push_on_lisp_stack(xp,ra);
439    push_on_lisp_stack(xp,xpGPR(xp,Ifp));
440    xpGPR(xp,Ifp) = xpGPR(xp,Isp);
441#ifdef X8664
442    if (nargs == 3) {
443      push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
444    }
445#endif
446    if (nargs >= 2) {
447      push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
448    }
449    if (nargs >= 1) {
450      push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
451    }
452  }
453}
454
455Boolean
456object_contains_pc(LispObj container, LispObj addr)
457{
458  if (fulltag_of(container) >= fulltag_misc) {
459    natural elements = header_element_count(header_of(container));
460    if ((addr >= container) &&
461        (addr < ((LispObj)&(deref(container,1+elements))))) {
462      return true;
463    }
464  }
465  return false;
466}
467
468LispObj
469create_exception_callback_frame(ExceptionInformation *xp, TCR *tcr)
470{
471  LispObj containing_uvector = 0, 
472    relative_pc = lisp_nil,
473    nominal_function = lisp_nil, 
474    f, tra, tra_f = 0, abs_pc;
475  LispObj pc_low, pc_high;
476
477  f = xpGPR(xp,Ifn);
478  tra = *(LispObj*)(xpGPR(xp,Isp));
479  if (tra_p(tra)) {
480    char *p = (char *)tra;
481    extern char *spentry_start, *spentry_end;
482
483    if (ptr_in_area(p, tcr->ts_area) ||
484        (p > spentry_start && p < spentry_end) ||
485        in_any_consing_area(tra))
486      tra_f = tra_function(tra);
487    else
488      Bug(xp, "martian tra %p\n", tra);
489  }
490  abs_pc = (LispObj)xpPC(xp);
491#if WORD_SIZE == 64
492  pc_high = ((abs_pc >> 32) & 0xffffffff) << fixnumshift;
493  pc_low = (abs_pc & 0xffffffff) << fixnumshift;
494#else
495  pc_high = ((abs_pc >> 16) & 0xffff) << fixnumshift;
496  pc_low = (abs_pc & 0xffff) << fixnumshift;
497#endif
498
499
500  if (functionp(f))
501    nominal_function = f;
502  else if (tra_f)
503    nominal_function = tra_f;
504 
505  f = xpGPR(xp,Ifn);
506  if (object_contains_pc(f, abs_pc)) {
507    containing_uvector = untag(f)+fulltag_misc;
508  } else {
509    f = xpGPR(xp,Ixfn);
510    if (object_contains_pc(f, abs_pc)) {
511      containing_uvector = untag(f)+fulltag_misc;
512    } else {
513      if (tra_f) {
514        f = tra_f;
515        if (object_contains_pc(f, abs_pc)) {
516          containing_uvector = untag(f)+fulltag_misc;
517          relative_pc = (abs_pc - f) << fixnumshift;
518        }
519      }
520    }
521  }
522  if (containing_uvector) {
523    relative_pc = (abs_pc - (LispObj)&(deref(containing_uvector,1))) << fixnumshift;
524  } else {
525    containing_uvector = lisp_nil;
526  }
527  push_on_lisp_stack(xp, pc_high);
528  push_on_lisp_stack(xp, pc_low);
529  push_on_lisp_stack(xp,(LispObj)(tcr->xframe->prev));
530  push_on_lisp_stack(xp,(LispObj)(tcr->foreign_sp));
531  push_on_lisp_stack(xp,tra);
532  push_on_lisp_stack(xp,(LispObj)xp);
533  push_on_lisp_stack(xp,containing_uvector); 
534  push_on_lisp_stack(xp,relative_pc);
535  push_on_lisp_stack(xp,nominal_function);
536  push_on_lisp_stack(xp,0);
537  push_on_lisp_stack(xp,xpGPR(xp,Ifp));
538  xpGPR(xp,Ifp) = xpGPR(xp,Isp);
539  return xpGPR(xp,Isp);
540}
541
542#ifndef XMEMFULL
543#define XMEMFULL (76)
544#endif
545
546void
547lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed )
548{
549  LispObj xcf = create_exception_callback_frame(xp, tcr),
550    cmain = nrs_CMAIN.vcell;
551  int skip;
552   
553  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
554  xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
555
556  skip = callback_to_lisp(tcr, cmain, xp, xcf, -1, XMEMFULL, 0, 0);
557  xpPC(xp) += skip;
558}
559
560#ifndef SIGTRAP
561#define SIGTRAP 5
562#endif
563
564void
565callback_for_gc_notification(ExceptionInformation *xp, TCR *tcr)
566{
567  LispObj cmain = nrs_CMAIN.vcell;
568  if ((fulltag_of(cmain) == fulltag_misc) &&
569      (header_subtag(header_of(cmain)) == subtag_macptr)) {
570    LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
571      word_beyond_vsp = save_vsp[-1],
572      save_fp = xpGPR(xp,Ifp),
573      xcf = create_exception_callback_frame(xp, tcr);
574
575    callback_to_lisp(tcr, cmain, xp, xcf, SIGTRAP, 0, 0, 0);
576    did_gc_notification_since_last_full_gc = true;
577    xpGPR(xp,Ifp) = save_fp;
578    xpGPR(xp,Isp) = (LispObj)save_vsp;
579    save_vsp[-1] = word_beyond_vsp;
580  }
581}
582
583
584/*
585  Allocate a large list, where "large" means "large enough to
586  possibly trigger the EGC several times if this was done
587  by individually allocating each CONS."  The number of
588  ocnses in question is in arg_z; on successful return,
589  the list will be in arg_z
590*/
591
592Boolean
593allocate_list(ExceptionInformation *xp, TCR *tcr)
594{
595  natural
596    nconses = (unbox_fixnum(xpGPR(xp,Iarg_z))),
597    bytes_needed = (nconses << dnode_shift);
598  LispObj
599    prev = lisp_nil,
600    current,
601    initial = xpGPR(xp,Iarg_y);
602  Boolean notify_pending_gc = false;
603
604  if (nconses == 0) {
605    /* Silly case */
606    xpGPR(xp,Iarg_z) = lisp_nil;
607    return true;
608  }
609  update_bytes_allocated(tcr, (void *)tcr->save_allocptr);
610  if (allocate_object(xp,bytes_needed,bytes_needed-fulltag_cons,tcr, &notify_pending_gc)) {
611    tcr->save_allocptr -= fulltag_cons;
612    for (current = xpGPR(xp,Iallocptr);
613         nconses;
614         prev = current, current+= dnode_size, nconses--) {
615      deref(current,0) = prev;
616      deref(current,1) = initial;
617    }
618    xpGPR(xp,Iarg_z) = prev;
619    if (notify_pending_gc && !did_gc_notification_since_last_full_gc) {
620      callback_for_gc_notification(xp,tcr);
621    }
622  } else {
623    lisp_allocation_failure(xp,tcr,bytes_needed);
624  }
625  return true;
626}
627
628Boolean
629handle_alloc_trap(ExceptionInformation *xp, TCR *tcr, Boolean *notify)
630{
631  natural cur_allocptr, bytes_needed;
632  unsigned allocptr_tag;
633  signed_natural disp;
634
635  if (!allocation_enabled) {
636    LispObj xcf,
637      save_sp = xpGPR(xp,Isp),
638      save_fp = xpGPR(xp,Ifp),
639      cmain = nrs_CMAIN.vcell;
640    xcf =create_exception_callback_frame(xp, tcr),
641    pc_luser_xp(xp,tcr,NULL);
642    allocation_enabled = true;
643    tcr->save_allocbase = (void *)VOID_ALLOCPTR;
644    callback_to_lisp(tcr, cmain, xp, xcf, -1, 0, 0, error_allocation_disabled);
645    xpGPR(xp,Isp) = save_sp;
646    xpGPR(xp,Ifp) = save_fp;
647    return true;
648  }
649  cur_allocptr = xpGPR(xp,Iallocptr);
650  allocptr_tag = fulltag_of(cur_allocptr);
651  if (allocptr_tag == fulltag_misc) {
652#ifdef X8664
653    disp = xpGPR(xp,Iimm1);
654#else
655    disp = xpGPR(xp,Iimm0);
656#endif
657  } else {
658    disp = dnode_size-fulltag_cons;
659  }
660  bytes_needed = disp+allocptr_tag;
661
662  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr+disp)));
663  if (allocate_object(xp, bytes_needed, disp, tcr, notify)) {
664    if (notify && *notify) {
665      xpPC(xp)+=2;
666      /* Finish the allocation: add a header if necessary,
667         clear the tag bits in tcr.save_allocptr. */
668      pc_luser_xp(xp,tcr,NULL);
669      callback_for_gc_notification(xp,tcr);
670    }
671    return true;
672  }
673 
674  lisp_allocation_failure(xp,tcr,bytes_needed);
675
676  return true;
677}
678
679 
680int
681callback_to_lisp (TCR * tcr, LispObj callback_macptr, ExceptionInformation *xp,
682                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
683{
684  natural  callback_ptr;
685  int delta;
686  unsigned old_mxcsr = get_mxcsr();
687#ifdef X8632
688  natural saved_node_regs_mask = tcr->node_regs_mask;
689  natural saved_unboxed0 = tcr->unboxed0;
690  natural saved_unboxed1 = tcr->unboxed1;
691  LispObj *vsp = (LispObj *)xpGPR(xp, Isp);
692#endif
693  set_mxcsr(0x1f80);
694
695  /* Put the active stack pointers where .SPcallback expects them */
696#ifdef X8632
697  tcr->node_regs_mask = X8632_DEFAULT_NODE_REGS_MASK;
698
699  *--vsp = tcr->save0;
700  *--vsp = tcr->save1;
701  *--vsp = tcr->save2;
702  *--vsp = tcr->save3;
703  *--vsp = tcr->next_method_context;
704  xpGPR(xp, Isp) = (LispObj)vsp;
705#endif
706  tcr->save_vsp = (LispObj *)xpGPR(xp, Isp);
707  tcr->save_fp = (LispObj *)xpGPR(xp, Ifp);
708
709  /* Call back.  The caller of this function may have modified stack/frame
710     pointers (and at least should have called prepare_for_callback()).
711  */
712  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
713  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
714  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
715  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
716
717#ifdef X8632
718  tcr->next_method_context = *vsp++;
719  tcr->save3 = *vsp++;
720  tcr->save2 = *vsp++;
721  tcr->save1 = *vsp++;
722  tcr->save0 = *vsp++;
723  xpGPR(xp, Isp) = (LispObj)vsp;
724
725  tcr->node_regs_mask = saved_node_regs_mask;
726  tcr->unboxed0 = saved_unboxed0;
727  tcr->unboxed1 = saved_unboxed1;
728#endif
729  set_mxcsr(old_mxcsr);
730  return delta;
731}
732
733void
734callback_for_interrupt(TCR *tcr, ExceptionInformation *xp)
735{
736  LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
737    word_beyond_vsp = save_vsp[-1],
738    save_fp = xpGPR(xp,Ifp),
739    xcf = create_exception_callback_frame(xp, tcr);
740  int save_errno = errno;
741
742  callback_to_lisp(tcr, nrs_CMAIN.vcell,xp, xcf, 0, 0, 0, 0);
743  xpGPR(xp,Ifp) = save_fp;
744  xpGPR(xp,Isp) = (LispObj)save_vsp;
745  save_vsp[-1] = word_beyond_vsp;
746  errno = save_errno;
747}
748
749Boolean
750handle_error(TCR *tcr, ExceptionInformation *xp)
751{
752  pc program_counter = (pc)xpPC(xp);
753  unsigned char op0 = program_counter[0], op1 = program_counter[1];
754  LispObj rpc, errdisp = nrs_ERRDISP.vcell,
755    save_vsp = xpGPR(xp,Isp), xcf0,
756    save_fp = xpGPR(xp,Ifp);
757  int skip;
758
759  if ((fulltag_of(errdisp) == fulltag_misc) &&
760      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
761
762    if ((op0 == 0xcd) && (op1 >= 0xc0) && (op1 <= 0xc2)) {
763      finish_function_entry(xp);
764    }
765    xcf0 = create_exception_callback_frame(xp, tcr);
766    skip = callback_to_lisp(tcr, errdisp, xp, xcf0, 0, 0, 0, 0);
767    if (skip == -1) {
768      xcf *xcf1 = (xcf *)xcf0;
769      LispObj container = xcf1->containing_uvector;
770     
771      rpc = xcf1->relative_pc >> fixnumshift;
772      if (container == lisp_nil) {
773        xpPC(xp) = rpc;
774      } else {
775        xpPC(xp) = (LispObj)(&(deref(container,
776#ifdef X8664
777                                     1
778#else
779                                     0
780#endif
781)))+rpc;
782      }
783       
784      skip = 0;
785    }
786    xpGPR(xp,Ifp) = save_fp;
787    xpGPR(xp,Isp) = save_vsp;
788    if ((op0 == 0xcd) && (op1 == 0xc7)) {
789      /* Continue after an undefined function call. The function
790         that had been undefined has already been called (in the
791         break loop), and a list of the values that it returned
792         in in the xp's %arg_z.  A function that returns those
793         values in in the xp's %fn; we just have to adjust the
794         stack (keeping the return address in the right place
795         and discarding any stack args/reserved stack frame),
796         then set nargs and the PC so that that function's
797         called when we resume.
798      */
799      LispObj *vsp =(LispObj *)save_vsp, ra = *vsp;
800      int nargs = xpGPR(xp, Inargs)>>fixnumshift;
801
802#ifdef X8664
803      if (nargs > 3) {
804        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 3)));
805        push_on_lisp_stack(xp,ra);
806      }
807#else
808      if (nargs > 2) {
809        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 2)));
810        push_on_lisp_stack(xp,ra);
811      }
812#endif
813      xpPC(xp) = xpGPR(xp,Ifn);
814      xpGPR(xp,Inargs) = 1<<fixnumshift;
815    } else {
816      xpPC(xp) += skip;
817    }
818    return true;
819  } else {
820    return false;
821  }
822}
823
824
825protection_handler
826* protection_handlers[] = {
827  do_spurious_wp_fault,
828  do_soft_stack_overflow,
829  do_soft_stack_overflow,
830  do_soft_stack_overflow,
831  do_hard_stack_overflow,   
832  do_hard_stack_overflow,
833  do_hard_stack_overflow,
834};
835
836
837/* Maybe this'll work someday.  We may have to do something to
838   make the thread look like it's not handling an exception */
839void
840reset_lisp_process(ExceptionInformation *xp)
841{
842}
843
844Boolean
845do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
846{
847  /*  reset_lisp_process(xp); */
848  Bug(xp, "Unrecoverable stack overflow.");
849  return false;
850}
851
852
853Boolean
854do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
855{
856
857  return false;
858}
859
860Boolean
861do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
862{
863  /* Trying to write into a guard page on the vstack or tstack.
864     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
865     signal an error_stack_overflow condition.
866      */
867  lisp_protection_kind which = prot_area->why;
868  Boolean on_TSP = (which == kTSPsoftguard);
869  LispObj save_fp = xpGPR(xp,Ifp);
870  LispObj save_vsp = xpGPR(xp,Isp), 
871    xcf,
872    cmain = nrs_CMAIN.vcell;
873  area *a;
874  protected_area_ptr soft;
875  TCR *tcr = get_tcr(false);
876  int skip;
877
878  if ((fulltag_of(cmain) == fulltag_misc) &&
879      (header_subtag(header_of(cmain)) == subtag_macptr)) {
880    if (on_TSP) {
881      a = tcr->ts_area;
882    } else {
883      a = tcr->vs_area;
884    }
885    soft = a->softprot;
886    unprotect_area(soft);
887    xcf = create_exception_callback_frame(xp, tcr);
888    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, on_TSP, 0, 0);
889    xpGPR(xp,Ifp) = save_fp;
890    xpGPR(xp,Isp) = save_vsp;
891    xpPC(xp) += skip;
892    return true;
893  }
894  return false;
895}
896
897Boolean
898is_write_fault(ExceptionInformation *xp, siginfo_t *info)
899{
900#ifdef DARWIN
901  return (UC_MCONTEXT(xp)->__es.__err & 0x2) != 0;
902#endif
903#if defined(LINUX) || defined(SOLARIS)
904  return (xpGPR(xp,REG_ERR) & 0x2) != 0;
905#endif
906#ifdef FREEBSD
907  return (xp->uc_mcontext.mc_err & 0x2) != 0;
908#endif
909#ifdef WINDOWS
910  return (info->ExceptionFlags == EXCEPTION_WRITE_FAULT);
911#endif
912}
913
914Boolean
915handle_fault(TCR *tcr, ExceptionInformation *xp, siginfo_t *info, int old_valence)
916{
917#ifdef FREEBSD
918#ifdef X8664
919  BytePtr addr = (BytePtr) xp->uc_mcontext.mc_addr;
920#else
921  BytePtr addr = (BytePtr) info->si_addr;
922#endif
923#else
924#ifdef WINDOWS
925  BytePtr addr = (BytePtr) info->ExceptionInformation[1];
926#else
927  BytePtr addr = (BytePtr) info->si_addr;
928#endif
929#endif
930  Boolean valid = IS_PAGE_FAULT(info,xp);
931
932  if (TCR_AUX(tcr)->safe_ref_address != NULL) {
933    xpGPR(xp,Iimm0) = 0;
934    xpPC(xp) = xpGPR(xp,Ira0);
935    TCR_AUX(tcr)->safe_ref_address = NULL;
936    return true;
937  }
938
939  if (valid) {
940    {
941      protected_area *a = find_protected_area(addr);
942      protection_handler *handler;
943     
944      if (a) {
945        handler = protection_handlers[a->why];
946        return handler(xp, a, addr);
947      }
948    }
949
950    if ((addr >= readonly_area->low) &&
951        (addr < readonly_area->active)) {
952      UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
953                      page_size);
954      return true;
955    }
956
957    {
958      area *a = area_containing(addr);
959
960      if (a && a->code == AREA_WATCHED && addr < a->high) {
961        /* caught a write to a watched object */
962        LispObj *p = (LispObj *)a->low;
963        LispObj node = *p;
964        unsigned tag_n = fulltag_of(node);
965        LispObj cmain = nrs_CMAIN.vcell;
966        LispObj obj;
967
968        if (immheader_tag_p(tag_n) || nodeheader_tag_p(tag_n))
969          obj = (LispObj)p + fulltag_misc;
970        else
971          obj = (LispObj)p + fulltag_cons;
972
973        if ((fulltag_of(cmain) == fulltag_misc) &&
974            (header_subtag(header_of(cmain)) == subtag_macptr)) {
975          LispObj save_vsp = xpGPR(xp, Isp);
976          LispObj save_fp = xpGPR(xp, Ifp);
977          LispObj xcf;
978          natural offset = (LispObj)addr - obj;
979          int skip;
980
981          push_on_lisp_stack(xp, obj);
982          xcf = create_exception_callback_frame(xp, tcr);
983
984          /* The magic 2 means this was a write to a watchd object */
985          skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, 2,
986                                  (natural)addr, offset);
987          xpPC(xp) += skip;
988          xpGPR(xp, Ifp) = save_fp;
989          xpGPR(xp, Isp) = save_vsp;
990          return true;
991        }
992      }
993    }
994  }
995
996  if (old_valence == TCR_STATE_LISP) {
997    LispObj cmain = nrs_CMAIN.vcell,
998      xcf;
999    if ((fulltag_of(cmain) == fulltag_misc) &&
1000      (header_subtag(header_of(cmain)) == subtag_macptr)) {
1001      xcf = create_exception_callback_frame(xp, tcr);
1002      callback_to_lisp(tcr, cmain, xp, xcf, SIGBUS, valid ? is_write_fault(xp,info) : (natural)-1, valid ? (natural)addr : 0, 0);
1003    }
1004  }
1005  return false;
1006}
1007
1008Boolean
1009handle_foreign_fpe(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
1010{
1011#ifdef X8632
1012  return false;
1013#else
1014  int code;
1015
1016#ifdef WINDOWS
1017  if (info->ExceptionCode == EXCEPTION_INT_DIVIDE_BY_ZERO)
1018    return false;
1019#else
1020  if (info->si_code == FPE_INTDIV)
1021    return false;
1022#endif
1023
1024  /*
1025   * Cooperate with .SPffcall to avoid saving and restoring the MXCSR
1026   * around every foreign call.
1027   */
1028    if (! (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN_FPE))) {
1029      tcr->flags |= (1<<TCR_FLAG_BIT_FOREIGN_FPE);
1030      tcr->lisp_mxcsr = xpMXCSR(xp) & ~MXCSR_STATUS_MASK;
1031    }
1032    xpMXCSR(xp) &= ~MXCSR_STATUS_MASK;
1033    xpMXCSR(xp) |= MXCSR_CONTROL_MASK;
1034    return true;
1035#endif
1036}
1037
1038Boolean
1039handle_floating_point_exception(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
1040{
1041  int code,skip;
1042  LispObj  xcf, cmain = nrs_CMAIN.vcell,
1043    save_vsp = xpGPR(xp,Isp),
1044    save_fp = xpGPR(xp,Ifp);
1045#ifdef DARWIN /* bug in <= 10.5 */
1046  void decode_vector_fp_exception(siginfo_t *, uint32_t);
1047
1048  decode_vector_fp_exception(info, (uint32_t)(UC_MCONTEXT(xp)->__fs.__fpu_mxcsr));
1049#endif
1050
1051#ifdef WINDOWS
1052  code = info->ExceptionCode;
1053#else
1054  code = info->si_code;
1055#endif 
1056
1057  if ((fulltag_of(cmain) == fulltag_misc) &&
1058      (header_subtag(header_of(cmain)) == subtag_macptr)) {
1059    xcf = create_exception_callback_frame(xp, tcr);
1060    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGFPE, code, 0, 0);
1061    xpPC(xp) += skip;
1062    xpGPR(xp,Ifp) = save_fp;
1063    xpGPR(xp,Isp) = save_vsp;
1064    return true;
1065  } else {
1066    return false;
1067  }
1068}
1069
1070
1071Boolean
1072extend_tcr_tlb(TCR *tcr, ExceptionInformation *xp)
1073{
1074  LispObj index, old_limit = tcr->tlb_limit, new_limit, new_bytes;
1075  LispObj *old_tlb = tcr->tlb_pointer, *new_tlb, *work, *tos;
1076
1077  tos = (LispObj*)(xpGPR(xp,Isp));
1078  index = *tos++;
1079  (xpGPR(xp,Isp))=(LispObj)tos;
1080 
1081  new_limit = align_to_power_of_2(index+1,12);
1082  new_bytes = new_limit-old_limit;
1083  new_tlb = realloc(old_tlb, new_limit);
1084
1085  if (new_tlb == NULL) {
1086    return false;
1087  }
1088  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1089
1090  while (new_bytes) {
1091    *work++ = no_thread_local_binding_marker;
1092    new_bytes -= sizeof(LispObj);
1093  }
1094  tcr->tlb_pointer = new_tlb;
1095  tcr->tlb_limit = new_limit;
1096  return true;
1097}
1098
1099
1100#if defined(FREEBSD) || defined(DARWIN)
1101static
1102char mxcsr_bit_to_fpe_code[] = {
1103  FPE_FLTINV,                   /* ie */
1104  0,                            /* de */
1105  FPE_FLTDIV,                   /* ze */
1106  FPE_FLTOVF,                   /* oe */
1107  FPE_FLTUND,                   /* ue */
1108  FPE_FLTRES                    /* pe */
1109};
1110
1111void
1112decode_vector_fp_exception(siginfo_t *info, uint32_t mxcsr)
1113{
1114  /* If the exception appears to be an XMM FP exception, try to
1115     determine what it was by looking at bits in the mxcsr.
1116  */
1117  int xbit, maskbit;
1118 
1119  for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
1120    if ((mxcsr & (1 << xbit)) &&
1121        !(mxcsr & (1 << maskbit))) {
1122      info->si_code = mxcsr_bit_to_fpe_code[xbit];
1123      return;
1124    }
1125  }
1126  /* Nothing enabled and set in the mxcsr, assume integer /0 */
1127  info->si_code = FPE_INTDIV;
1128}
1129
1130#ifdef FREEBSD
1131void
1132freebsd_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
1133{
1134  if (info->si_code == 0) {
1135#ifdef X8664
1136    struct savefpu *fpu = (struct savefpu *) &(xp->uc_mcontext.mc_fpstate);
1137#else
1138    struct savexmm *fpu = (struct savexmm *) &(xp->uc_mcontext.mc_fpstate);
1139#endif
1140    uint32_t mxcsr = fpu->sv_env.en_mxcsr;
1141
1142    decode_vector_fp_exception(info, mxcsr);
1143  }
1144}
1145#endif
1146
1147#ifdef DARWIN
1148void
1149darwin_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
1150{
1151  if (info->si_code == EXC_I386_SSEEXTERR) {
1152    uint32_t mxcsr = UC_MCONTEXT(xp)->__fs.__fpu_mxcsr;
1153
1154    decode_vector_fp_exception(info, mxcsr);
1155  }
1156}
1157
1158#endif
1159
1160#endif
1161
1162void
1163get_lisp_string(LispObj lisp_string, char *c_string, natural max)
1164{
1165  lisp_char_code *src = (lisp_char_code *)  (ptr_from_lispobj(lisp_string + misc_data_offset));
1166  natural i, n = header_element_count(header_of(lisp_string));
1167
1168  if (n > max) {
1169    n = max;
1170  }
1171
1172  for (i = 0; i < n; i++) {
1173    c_string[i] = 0xff & (src[i]);
1174  }
1175  c_string[n] = 0;
1176}
1177
1178Boolean handle_watch_trap(ExceptionInformation *xp, TCR *tcr);
1179
1180Boolean
1181handle_exception(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1182{
1183  pc program_counter = (pc)xpPC(context);
1184
1185  if (old_valence != TCR_STATE_LISP) {
1186    if (old_valence == TCR_STATE_FOREIGN && signum == SIGFPE) {
1187      return handle_foreign_fpe(tcr, context, info);
1188    } else {
1189      return false;
1190    }
1191  }
1192
1193  switch (signum) {
1194  case SIGNUM_FOR_INTN_TRAP:
1195    if (IS_MAYBE_INT_TRAP(info,context)) {
1196      /* Something mapped to SIGSEGV/SIGBUS that has nothing to do with
1197         a memory fault.  On x86, an "int n" instruction that's
1198         not otherwise implemented causes a "protecton fault".  Of
1199         course that has nothing to do with accessing protected
1200         memory; of course, most Unices act as if it did.*/
1201      if ((program_counter != NULL) &&
1202          (*program_counter == INTN_OPCODE)) {
1203        program_counter++;
1204        switch (*program_counter) {
1205        case UUO_ALLOC_TRAP:
1206          {
1207            Boolean did_notify = false,
1208              allocation_disabled = !allocation_enabled,
1209              *notify_ptr = &did_notify;
1210            if (did_gc_notification_since_last_full_gc) {
1211              notify_ptr = NULL;
1212            }
1213            if (handle_alloc_trap(context, tcr, notify_ptr)) {
1214              if (! did_notify) {
1215                if (! allocation_disabled) {
1216                  xpPC(context) += 2;   /* we might have GCed. */
1217                }
1218              }
1219              return true;
1220            }
1221          }
1222          break;
1223        case UUO_GC_TRAP:
1224          if (handle_gc_trap(context, tcr)) {
1225            xpPC(context) += 2;
1226            return true;
1227          }
1228          break;
1229        case UUO_WATCH_TRAP:
1230          /* add or remove watched object */
1231          if (handle_watch_trap(context, tcr)) {
1232            xpPC(context) += 2;
1233            return true;
1234          }
1235          break;
1236        case UUO_DEBUG_TRAP:
1237          xpPC(context) = (natural) (program_counter+1);
1238          lisp_Debugger(context, info, debug_entry_dbg, false, "Lisp Breakpoint");
1239          return true;
1240           
1241        case UUO_DEBUG_TRAP_WITH_STRING:
1242          xpPC(context) = (natural) (program_counter+1);
1243          {
1244            char msg[512];
1245
1246            get_lisp_string(xpGPR(context,Iarg_z),msg, sizeof(msg)-1);
1247            lisp_Debugger(context, info, debug_entry_dbg, false, msg);
1248          }
1249          return true;
1250         
1251        default:
1252          return handle_error(tcr, context);
1253        }
1254      } else {
1255        return false;
1256      }
1257
1258    } else {
1259      return handle_fault(tcr, context, info, old_valence);
1260    }
1261    break;
1262
1263  case SIGNAL_FOR_PROCESS_INTERRUPT:
1264    tcr->interrupt_pending = 0;
1265    callback_for_interrupt(tcr, context);
1266    return true;
1267    break;
1268
1269
1270  case SIGILL:
1271    if ((program_counter[0] == XUUO_OPCODE_0) &&
1272        (program_counter[1] == XUUO_OPCODE_1)) {
1273      TCR *target = (TCR *)xpGPR(context, Iarg_z);
1274
1275      switch (program_counter[2]) {
1276      case XUUO_TLB_TOO_SMALL:
1277        if (extend_tcr_tlb(tcr,context)) {
1278          xpPC(context)+=3;
1279          return true;
1280        }
1281        break;
1282       
1283      case XUUO_INTERRUPT_NOW:
1284        callback_for_interrupt(tcr,context);
1285        xpPC(context)+=3;
1286        return true;
1287
1288      case XUUO_SUSPEND_NOW:
1289        xpPC(context)+=3;
1290        return true;
1291
1292      case XUUO_INTERRUPT:
1293        raise_thread_interrupt(target);
1294        xpPC(context)+=3;
1295        return true;
1296
1297      case XUUO_SUSPEND:
1298        xpGPR(context,Iimm0) = (LispObj) lisp_suspend_tcr(target);
1299        xpPC(context)+=3;
1300        return true;
1301
1302      case XUUO_SUSPEND_ALL:
1303        lisp_suspend_other_threads();
1304        xpPC(context)+=3;
1305        return true;
1306
1307
1308      case XUUO_RESUME:
1309        xpGPR(context,Iimm0) = (LispObj) lisp_resume_tcr(target);
1310        xpPC(context)+=3;
1311        return true;
1312       
1313      case XUUO_RESUME_ALL:
1314        lisp_resume_other_threads();
1315        xpPC(context)+=3;
1316        return true;
1317       
1318      case XUUO_KILL:
1319        xpGPR(context,Iimm0) = (LispObj)kill_tcr(target);
1320        xpPC(context)+=3;
1321        return true;
1322
1323      case XUUO_ALLOCATE_LIST:
1324        allocate_list(context,tcr);
1325        xpPC(context)+=3;
1326        return true;
1327
1328      default:
1329        return false;
1330      }
1331    } else {
1332      return false;
1333    }
1334    break;
1335   
1336  case SIGFPE:
1337#ifdef FREEBSD
1338    /* As of 6.1, FreeBSD/AMD64 doesn't seem real comfortable
1339       with this newfangled XMM business (and therefore info->si_code
1340       is often 0 on an XMM FP exception.
1341       Try to figure out what really happened by decoding mxcsr
1342       bits.
1343    */
1344    freebsd_decode_vector_fp_exception(info,context);
1345#endif
1346#ifdef DARWIN
1347    /* Same general problem with Darwin as of 8.7.2 */
1348    darwin_decode_vector_fp_exception(info,context);
1349#endif
1350
1351    return handle_floating_point_exception(tcr, context, info);
1352
1353#if SIGBUS != SIGNUM_FOR_INTN_TRAP
1354  case SIGBUS:
1355    return handle_fault(tcr, context, info, old_valence);
1356#endif
1357   
1358#if SIGSEGV != SIGNUM_FOR_INTN_TRAP
1359  case SIGSEGV:
1360    return handle_fault(tcr, context, info, old_valence);
1361#endif   
1362   
1363  default:
1364    return false;
1365  }
1366  return false;
1367}
1368
1369
1370/*
1371   Current thread has all signals masked.  Before unmasking them,
1372   make it appear that the current thread has been suspended.
1373   (This is to handle the case where another thread is trying
1374   to GC before this thread is able to seize the exception lock.)
1375*/
1376int
1377prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1378{
1379  int old_valence = tcr->valence;
1380
1381  tcr->pending_exception_context = context;
1382  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1383
1384#ifdef WINDOWS
1385  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1386    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1387    SEM_RAISE(TCR_AUX(tcr)->suspend);
1388    SEM_WAIT_FOREVER(TCR_AUX(tcr)->resume);
1389  }
1390#else
1391  ALLOW_EXCEPTIONS(context);
1392#endif
1393  return old_valence;
1394} 
1395
1396void
1397wait_for_exception_lock_in_handler(TCR *tcr, 
1398                                   ExceptionInformation *context,
1399                                   xframe_list *xf)
1400{
1401
1402  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1403#if 0
1404  fprintf(dbgout, "0x" LISP " has exception lock\n", tcr);
1405#endif
1406  xf->curr = context;
1407#ifdef X8632
1408  xf->node_regs_mask = tcr->node_regs_mask;
1409#endif
1410  xf->prev = tcr->xframe;
1411  tcr->xframe =  xf;
1412  tcr->pending_exception_context = NULL;
1413  tcr->valence = TCR_STATE_FOREIGN; 
1414}
1415
1416void
1417unlock_exception_lock_in_handler(TCR *tcr)
1418{
1419  tcr->pending_exception_context = tcr->xframe->curr;
1420#ifdef X8632
1421  tcr->node_regs_mask = tcr->xframe->node_regs_mask;
1422#endif
1423  tcr->xframe = tcr->xframe->prev;
1424  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1425  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1426#if 0
1427  fprintf(dbgout, "0x" LISP " released exception lock\n", tcr);
1428#endif
1429}
1430
1431/*
1432   If an interrupt is pending on exception exit, try to ensure
1433   that the thread sees it as soon as it's able to run.
1434*/
1435#ifdef WINDOWS
1436void
1437raise_pending_interrupt(TCR *tcr)
1438{
1439}
1440void
1441exit_signal_handler(TCR *tcr, int old_valence)
1442{
1443}
1444void
1445signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1446{
1447}
1448#else
1449void
1450raise_pending_interrupt(TCR *tcr)
1451{
1452  if ((TCR_INTERRUPT_LEVEL(tcr) >= 0) &&
1453      (tcr->interrupt_pending)) {
1454    pthread_kill((pthread_t)(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1455  }
1456}
1457
1458void
1459exit_signal_handler(TCR *tcr, int old_valence)
1460{
1461  sigset_t mask;
1462  sigfillset(&mask);
1463#ifdef FREEBSD
1464  sigdelset(&mask,SIGTRAP);
1465#endif
1466 
1467  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1468  tcr->valence = old_valence;
1469  tcr->pending_exception_context = NULL;
1470}
1471
1472void
1473signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context
1474#ifdef DARWIN
1475               , TCR *tcr, int old_valence
1476#endif
1477)
1478{
1479  xframe_list xframe_link;
1480#ifndef DARWIN
1481  TCR *tcr = get_tcr(false);
1482
1483  int old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1484#endif
1485  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1486    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1487    pthread_kill(pthread_self(), thread_suspend_signal);
1488  }
1489  wait_for_exception_lock_in_handler(tcr,context, &xframe_link);
1490
1491
1492  if (! handle_exception(signum, info, context, tcr, old_valence)) {
1493    char msg[512];
1494    Boolean foreign = (old_valence != TCR_STATE_LISP);
1495
1496    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x" LISP ", context->regs at #x" LISP "", signum, xpPC(context), (natural)xpGPRvector(context));
1497   
1498    if (lisp_Debugger(context, info, signum,  foreign, msg)) {
1499      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1500    }
1501  }
1502  unlock_exception_lock_in_handler(tcr);
1503#ifndef DARWIN_USE_PSEUDO_SIGRETURN
1504  exit_signal_handler(tcr, old_valence);
1505#endif
1506  /* raise_pending_interrupt(tcr); */
1507#ifndef DARWIN_USE_PSEUDO_SIGRETURN
1508  SIGRETURN(context);
1509#endif
1510}
1511#endif
1512
1513
1514
1515
1516#ifdef LINUX
1517
1518/* some versions of  <bits/sigcontext.h> are too old to
1519define some magic numbers that we need. define those things here, prefixed
1520with "magic__ " to avoid name conflicts. what a waste of time!
1521*/
1522#define magic__FP_XSTATE_MAGIC1 0x46505853U
1523struct magic__fpx_sw_bytes
1524{
1525  __uint32_t magic1;
1526  __uint32_t extended_size;
1527  __uint64_t xstate_bv;
1528  __uint32_t xstate_size;
1529  __uint32_t padding[7];
1530};
1531
1532
1533
1534typedef struct _fpstate *FPREGS;
1535#define FPREGSsize_in_bytes(f) linux_fpstate_size_in_bytes(f)
1536
1537size_t
1538linux_fpstate_size_in_bytes(FPREGS state)
1539{
1540  if (WORD_SIZE == 64){
1541    /* see <asm/sigcontext.h> It would be way too useful if we could
1542       include that file without conflicting with <bits/sigcontext.h> */
1543    /* I didn't make this stuff up */
1544    struct magic__fpx_sw_bytes * sw = (struct magic__fpx_sw_bytes *) (((char *)state)+464);
1545    if (sw->magic1 == magic__FP_XSTATE_MAGIC1) {
1546      return sw->extended_size;
1547    }
1548
1549  }
1550  return (sizeof (*state));
1551}
1552
1553LispObj *
1554copy_fpregs(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
1555{
1556  FPREGS src = (FPREGS)(xp->uc_mcontext.fpregs), dest;
1557  size_t nbytes = FPREGSsize_in_bytes(src);
1558  if (src) {
1559    BytePtr bc = (BytePtr)current - nbytes;
1560    dest = (FPREGS) (truncate_to_power_of_2(bc,6));
1561    memcpy(dest,src,nbytes);
1562    *destptr = dest;
1563    current = (LispObj *)dest;
1564  }
1565  return current;
1566}
1567
1568#endif
1569
1570
1571#ifdef FREEBSD
1572typedef void *FPREGS;
1573
1574
1575LispObj *
1576copy_avx(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
1577{
1578  natural sp;
1579
1580  *destptr = (FPREGS)AVX_CONTEXT_PTR(xp);
1581
1582  if (AVX_CONTEXT_PRESENT(xp)) {
1583    sp = (natural)current;
1584    sp -= AVX_CONTEXT_SIZE(xp);
1585    sp = truncate_to_power_of_2(sp,6);
1586    memcpy((void *)sp,(void *)AVX_CONTEXT_PTR(xp),AVX_CONTEXT_SIZE(xp));
1587    current = (LispObj *)sp;
1588    *destptr = (FPREGS)current;
1589  }
1590  return current;
1591}
1592#endif
1593
1594#ifdef DARWIN
1595LispObj *
1596copy_darwin_mcontext(MCONTEXT_T context, 
1597                     LispObj *current, 
1598                     MCONTEXT_T *out)
1599{
1600  MCONTEXT_T dest = ((MCONTEXT_T)current)-1;
1601  dest = (MCONTEXT_T) (((LispObj)dest) & ~15);
1602
1603  *dest = *context;
1604  *out = dest;
1605  return (LispObj *)dest;
1606}
1607#endif
1608
1609LispObj *
1610copy_siginfo(siginfo_t *info, LispObj *current)
1611{
1612  siginfo_t *dest = ((siginfo_t *)current) - 1;
1613#if !defined(LINUX) || !defined(X8632)
1614  dest = (siginfo_t *) (((LispObj)dest)&~15);
1615#endif
1616  *dest = *info;
1617  return (LispObj *)dest;
1618}
1619
1620#ifdef LINUX
1621typedef FPREGS copy_ucontext_last_arg_t;
1622#else
1623typedef void * copy_ucontext_last_arg_t;
1624#endif
1625
1626#ifndef WINDOWS
1627LispObj *
1628copy_ucontext(ExceptionInformation *context, LispObj *current, copy_ucontext_last_arg_t fp)
1629{
1630  ExceptionInformation *dest = ((ExceptionInformation *)current)-1;
1631#if !defined(LINUX) || !defined(X8632)
1632  dest = (ExceptionInformation *) (((LispObj)dest) & ~15);
1633#endif
1634
1635  *dest = *context;
1636  /* Fix it up a little; where's the signal mask allocated, if indeed
1637     it is "allocated" ? */
1638#ifdef LINUX
1639  dest->uc_mcontext.fpregs = (fpregset_t)fp;
1640#endif
1641#ifdef FREEBSD
1642  if (AVX_CONTEXT_PRESENT(context)) {
1643    AVX_CONTEXT_PTR(context) = (natural)fp;
1644  }
1645#endif
1646  dest->uc_stack.ss_sp = 0;
1647  dest->uc_stack.ss_size = 0;
1648  dest->uc_stack.ss_flags = 0;
1649  dest->uc_link = NULL;
1650  return (LispObj *)dest;
1651}
1652#endif
1653
1654
1655LispObj *
1656tcr_frame_ptr(TCR *tcr)
1657{
1658  ExceptionInformation *xp;
1659  LispObj *fp;
1660
1661  if (tcr->pending_exception_context)
1662    xp = tcr->pending_exception_context;
1663  else if (tcr->valence == TCR_STATE_LISP) {
1664    xp = TCR_AUX(tcr)->suspend_context;
1665  } else {
1666    xp = NULL;
1667  }
1668  if (xp) {
1669    fp = (LispObj *)xpGPR(xp, Ifp);
1670  } else {
1671    fp = tcr->save_fp;
1672  }
1673  return fp;
1674}
1675
1676
1677LispObj *
1678find_foreign_rsp(LispObj rsp, area *foreign_area, TCR *tcr)
1679{
1680
1681  if (((BytePtr)rsp < foreign_area->low) ||
1682      ((BytePtr)rsp > foreign_area->high)) {
1683    rsp = (LispObj)(tcr->foreign_sp);
1684  }
1685  return (LispObj *) (((rsp-128) & ~15));
1686}
1687
1688#ifdef X8632
1689#ifdef LINUX
1690/* This is here for debugging.  On entry to a signal handler that
1691   receives info and context arguments, the stack should look exactly
1692   like this.  The "pretcode field" of the structure is the address
1693   of code that does an rt_sigreturn syscall, and rt_sigreturn expects
1694   %esp at the time of that syscall to be pointing just past the
1695   pretcode field.
1696   handle_signal_on_foreign_stack() and helpers have to be very
1697   careful to duplicate this "structure" exactly.
1698   Note that on x8664 Linux, rt_sigreturn expects a ucontext to
1699   be on top of the stack (with a siginfo_t underneath it.)
1700   It sort of half-works to do sigreturn via setcontext() on
1701   x8632 Linux, but (a) it may not be available on some distributions
1702   and (b) even a relatively modern version of it uses "fldenv" to
1703   restore FP context, and "fldenv" isn't nearly good enough.
1704*/
1705
1706struct rt_sigframe {
1707        char *pretcode;
1708        int sig;
1709        siginfo_t  *pinfo;
1710        void  *puc;
1711        siginfo_t info;
1712        struct ucontext uc;
1713        struct _fpstate fpstate;
1714        char retcode[8];
1715};
1716struct rt_sigframe *rtsf = 0;
1717
1718#endif
1719#endif
1720
1721
1722#ifndef WINDOWS
1723/* x8632 Linux requires that the stack-allocated siginfo is nearer
1724   the top of stack than the stack-allocated ucontext.  If other
1725   platforms care, they expect the ucontext to be nearer the top
1726   of stack.
1727*/
1728
1729#if defined(LINUX) && defined(X8632)
1730#define UCONTEXT_ON_TOP_OF_STACK 0
1731#else
1732#define UCONTEXT_ON_TOP_OF_STACK 1
1733#endif
1734void
1735handle_signal_on_foreign_stack(TCR *tcr,
1736                               void *handler, 
1737                               int signum, 
1738                               siginfo_t *info, 
1739                               ExceptionInformation *context,
1740                               LispObj return_address
1741                               )
1742{
1743#ifdef LINUX
1744  FPREGS fpregs = NULL;
1745#else
1746  void *fpregs = NULL;
1747#endif
1748#ifdef DARWIN
1749  MCONTEXT_T mcontextp = NULL;
1750#endif
1751  siginfo_t *info_copy = NULL;
1752  ExceptionInformation *xp = NULL;
1753  LispObj *foreign_rsp = find_foreign_rsp(xpGPR(context,Isp), tcr->cs_area, tcr);
1754
1755#ifdef LINUX
1756  foreign_rsp = copy_fpregs(context, foreign_rsp, &fpregs);
1757#endif
1758#ifdef FREEBSD
1759  foreign_rsp = copy_avx(context, foreign_rsp, &fpregs);
1760#endif
1761#ifdef DARWIN
1762  foreign_rsp = copy_darwin_mcontext(UC_MCONTEXT(context), foreign_rsp, &mcontextp);
1763#endif
1764#if UCONTEXT_ON_TOP_OF_STACK
1765  /* copy info first */
1766  foreign_rsp = copy_siginfo(info, foreign_rsp);
1767  info_copy = (siginfo_t *)foreign_rsp;
1768  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
1769  xp = (ExceptionInformation *)foreign_rsp;
1770#else
1771  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
1772  xp = (ExceptionInformation *)foreign_rsp;
1773  foreign_rsp = copy_siginfo(info, foreign_rsp);
1774  info_copy = (siginfo_t *)foreign_rsp;
1775#endif
1776#ifdef DARWIN
1777  UC_MCONTEXT(xp) = mcontextp;
1778#endif
1779  *--foreign_rsp = return_address;
1780  switch_to_foreign_stack(foreign_rsp,handler,signum,info_copy,xp);
1781}
1782#endif
1783
1784
1785#ifndef WINDOWS
1786#ifndef USE_SIGALTSTACK
1787void
1788arbstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1789{
1790  TCR *tcr = get_interrupt_tcr(false);
1791#if 1
1792  if (tcr->valence != TCR_STATE_LISP) {
1793    lisp_Debugger(context, info, signum, true, "exception in foreign context");
1794  }
1795#endif
1796  {
1797    area *vs = tcr->vs_area;
1798    BytePtr current_sp = (BytePtr) current_stack_pointer();
1799
1800
1801    if ((current_sp >= vs->low) &&
1802        (current_sp < vs->high)) {
1803      handle_signal_on_foreign_stack(tcr,
1804                                     signal_handler,
1805                                     signum,
1806                                     info,
1807                                     context,
1808                                     (LispObj)__builtin_return_address(0)
1809
1810                                     );
1811    } else {
1812      signal_handler(signum, info, context, tcr, 0);
1813    }
1814  }
1815}
1816
1817#else
1818void
1819altstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
1820{
1821  TCR* tcr = get_tcr(true);
1822  Boolean do_stack_switch = false;
1823  stack_t ss;
1824
1825  if ((tcr->valence != TCR_STATE_LISP) &&
1826      (tcr->safe_ref_address) &&
1827      ((signum == SIGBUS) || (signum == SIGSEGV))) {
1828    extern opcode ffcall_return;
1829    xpPC(context) = (natural)&ffcall_return;
1830    xpGPR(context,Iimm0) = 0;
1831    xpGPR(context,Isp) = (natural)(tcr->foreign_sp);
1832    return;
1833  }
1834
1835
1836
1837
1838
1839#if WORD_SIZE==64
1840  if ((signum == SIGFPE) && (tcr->valence != TCR_STATE_LISP)) {
1841    if (handle_foreign_fpe(tcr,context,info)) {
1842      return;
1843    }
1844  }
1845#endif
1846     
1847  /* Because of signal chaining - and the possibility that libraries
1848     that use it ignore sigaltstack-related issues - we have to check
1849     to see if we're actually on the altstack.
1850
1851     When OpenJDK VMs overwrite preinstalled signal handlers (that're
1852     there for a reason ...), they're also casual about SA_RESTART.
1853     We care about SA_RESTART (mostly) in the PROCESS-INTERRUPT case,
1854     and whether a JVM steals the signal used for PROCESS-INTERRUPT
1855     is platform-dependent.  On those platforms where the same signal
1856     is used, we should strongly consider trying to use another one.
1857  */
1858  sigaltstack(NULL, &ss);
1859  if (ss.ss_flags == SS_ONSTACK) {
1860    do_stack_switch = true;
1861  } else {
1862    area *vs = tcr->vs_area;
1863    BytePtr current_sp = (BytePtr) current_stack_pointer();
1864
1865    if ((current_sp >= vs->low) &&
1866        (current_sp < vs->high)) {
1867      do_stack_switch = true;
1868    }
1869  }
1870  if (do_stack_switch) {
1871    handle_signal_on_foreign_stack(tcr,signal_handler,signum,info,context,(LispObj)SIGRETURN_ADDRESS());
1872  } else {
1873    signal_handler(signum,info,context);
1874  }
1875}
1876#endif
1877#endif
1878
1879Boolean
1880stack_pointer_on_vstack_p(LispObj stack_pointer, TCR *tcr)
1881{
1882  area *a = tcr->vs_area;
1883 
1884  return (((BytePtr)stack_pointer <= a->high) &&
1885          ((BytePtr)stack_pointer > a->low));
1886}
1887
1888
1889#ifdef WINDOWS
1890extern DWORD restore_windows_context(ExceptionInformation *, TCR *, int);
1891#endif
1892
1893void
1894interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1895{
1896  TCR *tcr = get_interrupt_tcr(false);
1897  int old_valence = tcr->valence;
1898
1899  if (tcr) {
1900    if ((TCR_INTERRUPT_LEVEL(tcr) < 0) ||
1901        (tcr->valence != TCR_STATE_LISP) ||
1902        (tcr->unwinding != 0) ||
1903        ! stack_pointer_on_vstack_p(xpGPR(context,Isp), tcr) ||
1904        ! stack_pointer_on_vstack_p(xpGPR(context,Ifp), tcr)) {
1905      tcr->interrupt_pending = (((natural) 1)<< (nbits_in_word - ((natural)1)));
1906    } else {
1907      LispObj cmain = nrs_CMAIN.vcell;
1908     
1909      if ((fulltag_of(cmain) == fulltag_misc) &&
1910          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1911        /*
1912           This thread can (allegedly) take an interrupt now.
1913        */
1914
1915        xframe_list xframe_link;
1916        signed_natural alloc_displacement = 0;
1917        LispObj
1918          *next_tsp = tcr->next_tsp,
1919          *save_tsp = tcr->save_tsp,
1920          *p,
1921          q;
1922        natural old_foreign_exception = tcr->flags & (1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
1923
1924        tcr->flags &= ~(1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
1925           
1926        if (next_tsp != save_tsp) {
1927          tcr->next_tsp = save_tsp;
1928        } else {
1929          next_tsp = NULL;
1930        }
1931        /* have to do this before allowing interrupts */
1932        pc_luser_xp(context, tcr, &alloc_displacement);
1933        old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1934        wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1935        handle_exception(signum, info, context, tcr, old_valence);
1936        if (alloc_displacement) {
1937          tcr->save_allocptr -= alloc_displacement;
1938        }
1939        if (next_tsp) {
1940          tcr->next_tsp = next_tsp;
1941          p = next_tsp;
1942          while (p != save_tsp) {
1943            *p++ = 0;
1944          }
1945          q = (LispObj)save_tsp;
1946          *next_tsp = q;
1947        }
1948        tcr->flags |= old_foreign_exception;
1949        unlock_exception_lock_in_handler(tcr);
1950#ifndef WINDOWS
1951        exit_signal_handler(tcr, old_valence);
1952#endif
1953      }
1954    }
1955  }
1956#ifdef WINDOWS
1957  restore_windows_context(context,tcr,old_valence);
1958#else
1959  SIGRETURN(context);
1960#endif
1961}
1962
1963
1964#ifndef WINDOWS
1965#ifndef USE_SIGALTSTACK
1966void
1967arbstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1968{
1969  TCR *tcr = get_interrupt_tcr(false);
1970  area *vs = tcr->vs_area;
1971  BytePtr current_sp = (BytePtr) current_stack_pointer();
1972
1973  if ((current_sp >= vs->low) &&
1974      (current_sp < vs->high)) {
1975    handle_signal_on_foreign_stack(tcr,
1976                                   interrupt_handler,
1977                                   signum,
1978                                   info,
1979                                   context,
1980                                   (LispObj)__builtin_return_address(0)
1981                                   );
1982  } else {
1983    /* If we're not on the value stack, we pretty much have to be on
1984       the C stack.  Just run the handler. */
1985    interrupt_handler(signum, info, context);
1986  }
1987}
1988
1989#else /* altstack works */
1990
1991/*
1992   There aren't likely any JVM-related signal-chaining issues here, since
1993   on platforms where that could be an issue we use either an RT signal
1994   or an unused synchronous hardware signal to raise interrupts.
1995*/
1996void
1997altstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1998{
1999  TCR *tcr = get_interrupt_tcr(false);
2000  handle_signal_on_foreign_stack(tcr,interrupt_handler,signum,info,context,(LispObj)__builtin_return_address(0)
2001                                 );
2002}
2003
2004#endif
2005#endif
2006
2007#ifndef WINDOWS
2008void
2009install_signal_handler(int signo, void *handler, unsigned flags)
2010{
2011  struct sigaction sa;
2012  int err;
2013 
2014  sa.sa_sigaction = (void *)handler;
2015  sigfillset(&sa.sa_mask);
2016#ifdef FREEBSD
2017  /* Strange FreeBSD behavior wrt synchronous signals */
2018  sigdelset(&sa.sa_mask,SIGTRAP);  /* let GDB work */
2019#endif
2020  sa.sa_flags = SA_SIGINFO;
2021
2022#ifdef USE_SIGALTSTACK
2023  if (flags & ON_ALTSTACK)
2024    sa.sa_flags |= SA_ONSTACK;
2025#endif
2026  if (flags & RESTART_SYSCALLS)
2027    sa.sa_flags |= SA_RESTART;
2028  if (flags & RESERVE_FOR_LISP) {
2029    extern sigset_t user_signals_reserved;
2030    sigaddset(&user_signals_reserved, signo);
2031  }
2032
2033  err = sigaction(signo, &sa, NULL);
2034  if (err) {
2035    perror("sigaction");
2036    exit(1);
2037  }
2038}
2039#endif
2040
2041#ifdef WINDOWS
2042BOOL
2043CALLBACK ControlEventHandler(DWORD event)
2044{
2045  switch(event) {
2046  case CTRL_C_EVENT:
2047    lisp_global(INTFLAG) = (1 << fixnumshift);
2048    return TRUE;
2049    break;
2050  case CTRL_BREAK_EVENT:
2051    lisp_global(INTFLAG) = (2 << fixnumshift);
2052    return TRUE;
2053    break;
2054  default:
2055    return FALSE;
2056  }
2057}
2058
2059static
2060DWORD mxcsr_bit_to_fpe_code[] = {
2061  EXCEPTION_FLT_INVALID_OPERATION, /* ie */
2062  0,                            /* de */
2063  EXCEPTION_FLT_DIVIDE_BY_ZERO, /* ze */
2064  EXCEPTION_FLT_OVERFLOW,       /* oe */
2065  EXCEPTION_FLT_UNDERFLOW,      /* ue */
2066  EXCEPTION_FLT_INEXACT_RESULT  /* pe */
2067};
2068
2069#ifndef STATUS_FLOAT_MULTIPLE_FAULTS
2070#define STATUS_FLOAT_MULTIPLE_FAULTS 0xc00002b4
2071#endif
2072
2073#ifndef STATUS_FLOAT_MULTIPLE_TRAPS
2074#define  STATUS_FLOAT_MULTIPLE_TRAPS 0xc00002b5
2075#endif
2076
2077int
2078map_windows_exception_code_to_posix_signal(DWORD code, siginfo_t *info, ExceptionInformation *context)
2079{
2080  switch (code) {
2081#ifdef WIN_32
2082  case STATUS_FLOAT_MULTIPLE_FAULTS:
2083  case STATUS_FLOAT_MULTIPLE_TRAPS:
2084    {
2085      int xbit, maskbit;
2086      DWORD mxcsr = *(xpMXCSRptr(context));
2087
2088      for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
2089        if ((mxcsr & (1 << xbit)) &&
2090            !(mxcsr & (1 << maskbit))) {
2091          info->ExceptionCode = mxcsr_bit_to_fpe_code[xbit];
2092          break;
2093        }
2094      }
2095    }
2096    return SIGFPE;
2097#endif
2098     
2099  case EXCEPTION_ACCESS_VIOLATION:
2100    return SIGSEGV;
2101  case EXCEPTION_FLT_DENORMAL_OPERAND:
2102  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2103  case EXCEPTION_FLT_INEXACT_RESULT:
2104  case EXCEPTION_FLT_INVALID_OPERATION:
2105  case EXCEPTION_FLT_OVERFLOW:
2106  case EXCEPTION_FLT_STACK_CHECK:
2107  case EXCEPTION_FLT_UNDERFLOW:
2108  case EXCEPTION_INT_DIVIDE_BY_ZERO:
2109  case EXCEPTION_INT_OVERFLOW:
2110    return SIGFPE;
2111  case EXCEPTION_PRIV_INSTRUCTION:
2112  case EXCEPTION_ILLEGAL_INSTRUCTION:
2113    return SIGILL;
2114  case EXCEPTION_IN_PAGE_ERROR:
2115  case STATUS_GUARD_PAGE_VIOLATION:
2116    return SIGBUS;
2117  default:
2118    return -1;
2119  }
2120}
2121
2122
2123LONG
2124windows_exception_handler(EXCEPTION_POINTERS *exception_pointers, TCR *tcr, int signal_number)
2125{
2126  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
2127  int old_valence;
2128  ExceptionInformation *context = exception_pointers->ContextRecord;
2129  siginfo_t *info = exception_pointers->ExceptionRecord;
2130  xframe_list xframes;
2131
2132  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2133  wait_for_exception_lock_in_handler(tcr, context, &xframes);
2134
2135
2136 
2137  if (!handle_exception(signal_number, info, context, tcr, old_valence)) {
2138    char msg[512];
2139    Boolean foreign = (old_valence != TCR_STATE_LISP);
2140
2141    snprintf(msg, sizeof(msg), "Unhandled exception %d (windows code 0x%x) at 0x%Ix, context->regs at 0x%Ix", signal_number, code, xpPC(context), (natural)xpGPRvector(context));
2142   
2143    if (lisp_Debugger(context, info, signal_number,  foreign, msg)) {
2144      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2145    }
2146  }
2147  unlock_exception_lock_in_handler(tcr);
2148  return restore_windows_context(context, tcr, old_valence);
2149}
2150
2151void
2152setup_exception_handler_call(CONTEXT *context,
2153                             LispObj new_sp,
2154                             void *handler,
2155                             EXCEPTION_POINTERS *new_ep,
2156                             TCR *tcr,
2157                             int signal_number)
2158{
2159  extern void windows_halt(void);
2160  LispObj *p = (LispObj *)new_sp;
2161#ifdef WIN_64
2162  p-=4;                         /* win64 abi argsave nonsense */
2163  *(--p) = (LispObj)windows_halt;
2164  context->Rsp = (DWORD64)p;
2165  context->Rip = (DWORD64)handler;
2166  context->Rcx = (DWORD64)new_ep;
2167  context->Rdx = (DWORD64)tcr;
2168  context->R8 = (DWORD64)signal_number;
2169#else
2170  p-=4;                          /* args on stack, stack aligned */
2171  p[0] = (LispObj)new_ep;
2172  p[1] = (LispObj)tcr;
2173  p[2] = signal_number;
2174  *(--p) = (LispObj)windows_halt;
2175  context->Esp = (DWORD)p;
2176  context->Eip = (DWORD)handler;
2177#endif
2178  context->EFlags &= ~0x400;  /* clear direction flag */
2179}
2180
2181void
2182prepare_to_handle_windows_exception_on_foreign_stack(TCR *tcr,
2183
2184                                                     CONTEXT *context,
2185                                                     void *handler,
2186                                                     EXCEPTION_POINTERS *original_ep, 
2187                                                     int signal_number)
2188{
2189  LispObj foreign_rsp = 
2190    (LispObj) (tcr->foreign_sp - 128) & ~15;
2191  CONTEXT *new_context;
2192  siginfo_t *new_info;
2193  EXCEPTION_POINTERS *new_ep;
2194
2195  new_context = ((CONTEXT *)(foreign_rsp&~15))-1;
2196  *new_context = *context;
2197  foreign_rsp = (LispObj)new_context;
2198  new_info = ((siginfo_t *)(foreign_rsp&~15))-1;
2199  *new_info = *original_ep->ExceptionRecord;
2200  foreign_rsp = (LispObj)new_info;
2201  new_ep = ((EXCEPTION_POINTERS *)(foreign_rsp&~15))-1;
2202  foreign_rsp = (LispObj)new_ep & ~15;
2203  new_ep->ContextRecord = new_context;
2204  new_ep->ExceptionRecord = new_info;
2205  setup_exception_handler_call(context,foreign_rsp,handler,new_ep, tcr, signal_number);
2206}
2207
2208LONG CALLBACK
2209windows_arbstack_exception_handler(EXCEPTION_POINTERS *exception_pointers)
2210{
2211  extern void ensure_safe_for_string_operations(void);
2212  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
2213  siginfo_t *info = exception_pointers->ExceptionRecord;
2214  ExceptionInformation *context = exception_pointers->ContextRecord;
2215  int signal_number = map_windows_exception_code_to_posix_signal(code, info, context);
2216
2217
2218  if (signal_number <= 0) {
2219    return EXCEPTION_CONTINUE_SEARCH;
2220  } else {
2221    TCR *tcr = get_interrupt_tcr(false);
2222    area *cs = TCR_AUX(tcr)->cs_area;
2223    BytePtr current_sp = (BytePtr) current_stack_pointer();
2224   
2225    ensure_safe_for_string_operations();
2226
2227    if ((current_sp >= cs->low) &&
2228        (current_sp < cs->high)) {
2229      debug_show_registers(context, exception_pointers->ExceptionRecord, 0);
2230      FBug(context, "Exception on foreign stack\n");
2231      return EXCEPTION_CONTINUE_EXECUTION;
2232    }
2233
2234    prepare_to_handle_windows_exception_on_foreign_stack(tcr,
2235                                                         context,
2236                                                         windows_exception_handler,
2237                                                         exception_pointers,
2238                                                         signal_number);
2239    return EXCEPTION_CONTINUE_EXECUTION;
2240  }
2241}
2242
2243
2244void
2245install_pmcl_exception_handlers()
2246{
2247  AddVectoredExceptionHandler(1,windows_arbstack_exception_handler);
2248}
2249#else
2250void
2251install_pmcl_exception_handlers()
2252{
2253  void *handler, *interrupt_handler;
2254
2255#ifdef USE_SIGALTSTACK
2256  handler = (void *)altstack_signal_handler;
2257  interrupt_handler = (void *)altstack_interrupt_handler;
2258#else
2259  handler = (void *)arbstack_signal_handler;
2260  interrupt_handler = (void *)arbstack_interrupt_handler;
2261#endif
2262
2263#ifndef DARWIN
2264  install_signal_handler(SIGILL, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2265  install_signal_handler(SIGBUS, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2266  install_signal_handler(SIGSEGV, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2267  install_signal_handler(SIGFPE, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2268#endif
2269 
2270  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT, interrupt_handler,
2271                         RESERVE_FOR_LISP|ON_ALTSTACK);
2272  signal(SIGPIPE, SIG_IGN);
2273}
2274#endif
2275
2276
2277
2278/* This should only be called when the tcr_area_lock is held */
2279void
2280empty_tcr_stacks(TCR *tcr)
2281{
2282  if (tcr) {
2283    area *a;
2284
2285    tcr->valence = TCR_STATE_FOREIGN;
2286    a = tcr->vs_area;
2287    if (a) {
2288      a->active = a->high;
2289    }
2290    a = tcr->ts_area;
2291    if (a) {
2292      a->active = a->high;
2293    }
2294    a = TCR_AUX(tcr)->cs_area;
2295    if (a) {
2296      a->active = a->high;
2297    }
2298  }
2299}
2300
2301#ifdef WINDOWS
2302void
2303thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
2304{
2305}
2306#else
2307void
2308thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
2309{
2310  TCR *tcr = get_tcr(false);
2311  sigset_t mask;
2312
2313  sigemptyset(&mask);
2314
2315  empty_tcr_stacks(tcr);
2316
2317  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2318  pthread_exit(NULL);
2319}
2320#endif
2321
2322#ifndef WINDOWS
2323#ifndef USE_SIGALTSTACK
2324void
2325arbstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
2326{
2327  TCR *tcr = get_interrupt_tcr(false);
2328  area *vs = tcr->vs_area;
2329  BytePtr current_sp = (BytePtr) current_stack_pointer();
2330
2331  if ((current_sp >= vs->low) &&
2332      (current_sp < vs->high)) {
2333    handle_signal_on_foreign_stack(tcr,
2334                                   thread_kill_handler,
2335                                   signum,
2336                                   info,
2337                                   context,
2338                                   (LispObj)__builtin_return_address(0)
2339                                   );
2340  } else {
2341    /* If we're not on the value stack, we pretty much have to be on
2342       the C stack.  Just run the handler. */
2343    thread_kill_handler(signum, info, context);
2344  }
2345}
2346
2347
2348#else
2349void
2350altstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
2351{
2352  TCR* tcr = get_tcr(true);
2353  handle_signal_on_foreign_stack(tcr,
2354                                 thread_kill_handler,
2355                                 signum,
2356                                 info,
2357                                 context,
2358                                 (LispObj)__builtin_return_address(0)
2359                                 );
2360}
2361#endif
2362#endif
2363
2364#ifdef USE_SIGALTSTACK
2365#define THREAD_KILL_HANDLER altstack_thread_kill_handler
2366#else
2367#define THREAD_KILL_HANDLER arbstack_thread_kill_handler
2368#endif
2369
2370#ifdef WINDOWS
2371void
2372thread_signal_setup()
2373{
2374}
2375#else
2376void
2377thread_signal_setup()
2378{
2379  thread_suspend_signal = SIG_SUSPEND_THREAD;
2380  thread_kill_signal = SIG_KILL_THREAD;
2381
2382  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
2383                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
2384  install_signal_handler(thread_kill_signal, (void *)THREAD_KILL_HANDLER,
2385                         RESERVE_FOR_LISP|ON_ALTSTACK);
2386}
2387#endif
2388
2389void
2390enable_fp_exceptions()
2391{
2392}
2393
2394void
2395exception_init()
2396{
2397  x86_early_exception_init();
2398  install_pmcl_exception_handlers();
2399}
2400
2401void
2402adjust_exception_pc(ExceptionInformation *xp, int delta)
2403{
2404  xpPC(xp) += delta;
2405}
2406
2407/*
2408  Lower (move toward 0) the "end" of the soft protected area associated
2409  with a by a page, if we can.
2410*/
2411
2412void
2413adjust_soft_protection_limit(area *a)
2414{
2415  char *proposed_new_soft_limit = a->softlimit - 4096;
2416  protected_area_ptr p = a->softprot;
2417 
2418  if (proposed_new_soft_limit >= (p->start+16384)) {
2419    p->end = proposed_new_soft_limit;
2420    p->protsize = p->end-p->start;
2421    a->softlimit = proposed_new_soft_limit;
2422  }
2423  protect_area(p);
2424}
2425
2426void
2427restore_soft_stack_limit(unsigned restore_tsp)
2428{
2429  TCR *tcr = get_tcr(false);
2430  area *a;
2431 
2432  if (restore_tsp) {
2433    a = tcr->ts_area;
2434  } else {
2435    a = tcr->vs_area;
2436  }
2437  adjust_soft_protection_limit(a);
2438}
2439
2440
2441#ifdef USE_SIGALTSTACK
2442void
2443setup_sigaltstack(area *a)
2444{
2445  stack_t stack;
2446
2447  stack.ss_size = SIGSTKSZ*8;
2448  stack.ss_flags = 0;
2449  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
2450#ifdef LINUX
2451  /* The ucontext pushed on the altstack may not contain the (largish)
2452     __fpregs_mem field; copy_ucontext() wants to copy what it thinks
2453     is a pointer to a full ucontext.  That'll touch a page beyond the
2454     bottom of the altstack, and when this happens on the initial
2455     thread's stack on a recent (2.6.32+?) kernel, we'll SIGBUS instead
2456     of mapping that page.
2457     It's easier to just reserve that page here than it would be to
2458     change copy_ucontext().
2459  */
2460  stack.ss_size -= sizeof(struct ucontext);
2461#endif
2462  if (sigaltstack(&stack, NULL) != 0) {
2463    perror("sigaltstack");
2464    exit(-1);
2465  }
2466}
2467#endif
2468
2469extern opcode egc_write_barrier_start, egc_write_barrier_end,
2470  egc_set_hash_key_conditional, egc_set_hash_key_conditional_success_test,
2471  egc_set_hash_key_conditional_retry,
2472  egc_store_node_conditional_success_end, egc_store_node_conditional_retry,
2473  egc_store_node_conditional_success_test,egc_store_node_conditional,
2474  egc_set_hash_key, egc_gvset, egc_rplacd, egc_rplaca;
2475
2476/* We use (extremely) rigidly defined instruction sequences for consing,
2477   mostly so that 'pc_luser_xp()' knows what to do if a thread is interrupted
2478   while consing.
2479
2480   Note that we can usually identify which of these instructions is about
2481   to be executed by a stopped thread without comparing all of the bytes
2482   to those at the stopped program counter, but we generally need to
2483   know the sizes of each of these instructions.
2484*/
2485
2486#ifdef X8664
2487opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
2488#ifdef TCR_IN_GPR
2489  {0x49,0x8b,0x9b,0xd8,0x00,0x00,0x00}
2490#else
2491  {0x65,0x48,0x8b,0x1c,0x25,0xd8,0x00,0x00,0x00}
2492#endif
2493;
2494opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
2495#ifdef TCR_IN_GPR
2496  {0x49,0x3b,0x9b,0xe0,0x00,0x00,0x00}
2497#else
2498  {0x65,0x48,0x3b,0x1c,0x25,0xe0,0x00,0x00,0x00}
2499#endif
2500
2501;
2502opcode branch_around_alloc_trap_instruction[] =
2503  {0x77,0x02};
2504opcode alloc_trap_instruction[] =
2505  {0xcd,0xc5};
2506opcode clear_tcr_save_allocptr_tag_instruction[] =
2507#ifdef TCR_IN_GPR
2508  {0x41,0x80,0xa3,0xd8,0x00,0x00,0x00,0xf0}
2509#else
2510  {0x65,0x80,0x24,0x25,0xd8,0x00,0x00,0x00,0xf0}
2511#endif
2512;
2513opcode set_allocptr_header_instruction[] =
2514  {0x48,0x89,0x43,0xf3};
2515
2516
2517alloc_instruction_id
2518recognize_alloc_instruction(pc program_counter)
2519{
2520  switch(program_counter[0]) {
2521  case 0xcd: return ID_alloc_trap_instruction;
2522  /* 0x7f is jg, which we used to use here instead of ja */
2523  case 0x7f:
2524  case 0x77: return ID_branch_around_alloc_trap_instruction;
2525  case 0x48: return ID_set_allocptr_header_instruction;
2526#ifdef TCR_IN_GPR
2527  case 0x41: return ID_clear_tcr_save_allocptr_tag_instruction;
2528  case 0x49:
2529    switch(program_counter[1]) {
2530    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2531    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2532    }
2533#else
2534  case 0x65: 
2535    switch(program_counter[1]) {
2536    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
2537    case 0x48:
2538      switch(program_counter[2]) {
2539      case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2540      case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2541      }
2542    }
2543#endif
2544  default: break;
2545  }
2546  return ID_unrecognized_alloc_instruction;
2547}
2548#endif
2549#ifdef X8632
2550#define TCR_SEG_PREFIX 0x64
2551
2552#ifdef WIN_32
2553#define SAVE_ALLOCPTR 0x9c,0x0e,0x0,0x0
2554#define SAVE_ALLOCBASE 0x98,0x0e,0x0,0x0
2555#else
2556#define SAVE_ALLOCPTR 0x84,0x0,0x0,0x0
2557#define SAVE_ALLOCBASE 0x88,0x0,0x0,0x0
2558#endif
2559
2560opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
2561  {TCR_SEG_PREFIX,0x8b,0x0d,SAVE_ALLOCPTR};
2562opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
2563  {TCR_SEG_PREFIX,0x3b,0x0d,SAVE_ALLOCBASE};
2564opcode branch_around_alloc_trap_instruction[] =
2565  {0x77,0x02};
2566opcode alloc_trap_instruction[] =
2567  {0xcd,0xc5};
2568opcode clear_tcr_save_allocptr_tag_instruction[] =
2569  {TCR_SEG_PREFIX,0x80,0x25,SAVE_ALLOCPTR,0xf8};
2570opcode set_allocptr_header_instruction[] =
2571  {0x0f,0x7e,0x41,0xfa};
2572
2573alloc_instruction_id
2574recognize_alloc_instruction(pc program_counter)
2575{
2576  switch(program_counter[0]) {
2577  case 0xcd: return ID_alloc_trap_instruction;
2578  case 0x77: return ID_branch_around_alloc_trap_instruction;
2579  case 0x0f: return ID_set_allocptr_header_instruction;
2580  case 0x64: 
2581    switch(program_counter[1]) {
2582    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
2583    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2584    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2585    }
2586  }
2587  return ID_unrecognized_alloc_instruction;
2588}
2589#endif     
2590
2591void
2592pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *interrupt_displacement)
2593{
2594  pc program_counter = (pc)xpPC(xp);
2595  int allocptr_tag = fulltag_of((LispObj)(tcr->save_allocptr));
2596
2597  if (allocptr_tag != 0) {
2598    alloc_instruction_id state = recognize_alloc_instruction(program_counter);
2599    signed_natural
2600      disp = (allocptr_tag == fulltag_cons) ?
2601      sizeof(cons) - fulltag_cons :
2602#ifdef X8664
2603      xpGPR(xp,Iimm1)
2604#else
2605      xpGPR(xp,Iimm0)
2606#endif
2607      ;
2608    LispObj new_vector;
2609
2610    if ((state == ID_unrecognized_alloc_instruction) ||
2611        ((state == ID_set_allocptr_header_instruction) &&
2612         (allocptr_tag != fulltag_misc))) {
2613      Bug(xp, "Can't determine state of thread 0x" LISP ", interrupted during memory allocation", tcr);
2614    }
2615    switch(state) {
2616    case ID_set_allocptr_header_instruction:
2617      /* We were consing a vector and we won.  Set the header of the
2618         new vector (in the allocptr register) to the header in %rax
2619         (%mm0 on ia32) and skip over this instruction, then fall into
2620         the next case. */
2621      new_vector = xpGPR(xp,Iallocptr);
2622      deref(new_vector,0) = 
2623#ifdef X8664
2624        xpGPR(xp,Iimm0)
2625#else
2626        xpMMXreg(xp,Imm0)
2627#endif
2628        ;
2629     
2630      xpPC(xp) += sizeof(set_allocptr_header_instruction);
2631
2632      /* Fall thru */
2633    case ID_clear_tcr_save_allocptr_tag_instruction:
2634      tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
2635      xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
2636
2637      break;
2638    case ID_alloc_trap_instruction:
2639      /* If we're looking at another thread, we're pretty much committed to
2640         taking the trap.  We don't want the allocptr register to be pointing
2641         into the heap, so make it point to (- VOID_ALLOCPTR disp), where 'disp'
2642         was determined above.
2643      */
2644      if (interrupt_displacement == NULL) {
2645        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR - disp;
2646        tcr->save_allocptr = (void *)(VOID_ALLOCPTR - disp);
2647      } else {
2648        /* Back out, and tell the caller how to resume the allocation attempt */
2649        *interrupt_displacement = disp;
2650        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2651        tcr->save_allocptr += disp;
2652        xpPC(xp) -= (sizeof(branch_around_alloc_trap_instruction)+
2653                     sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
2654                     sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
2655      }
2656      break;
2657    case ID_branch_around_alloc_trap_instruction:
2658      /* If we'd take the branch - which is a "ja" - around the alloc trap,
2659         we might as well finish the allocation.  Otherwise, back out of the
2660         attempt. */
2661      {
2662        int flags = (int)eflags_register(xp);
2663       
2664        if ((!(flags & (1 << X86_ZERO_FLAG_BIT))) &&
2665            (!(flags & (1 << X86_CARRY_FLAG_BIT)))) {
2666          /* The branch (ja) would have been taken.  Emulate taking it. */
2667          xpPC(xp) += (sizeof(branch_around_alloc_trap_instruction)+
2668                       sizeof(alloc_trap_instruction));
2669          if (allocptr_tag == fulltag_misc) {
2670            /* Slap the header on the new uvector */
2671            new_vector = xpGPR(xp,Iallocptr);
2672#ifdef X8664
2673            deref(new_vector,0) = xpGPR(xp,Iimm0);
2674#else
2675            deref(new_vector,0) = xpMMXreg(xp,Imm0);
2676#endif
2677            xpPC(xp) += sizeof(set_allocptr_header_instruction);
2678          }
2679          tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
2680          xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
2681        } else {
2682          /* Back up */
2683          xpPC(xp) -= (sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
2684                       sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
2685          xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2686          if (interrupt_displacement) {
2687            *interrupt_displacement = disp;
2688            tcr->save_allocptr += disp;
2689          } else {
2690            tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
2691          }
2692        }
2693      }
2694      break;
2695    case ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction:
2696      xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2697      xpPC(xp) -= sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction);
2698      /* Fall through */
2699    case ID_load_allocptr_reg_from_tcr_save_allocptr_instruction:
2700      if (interrupt_displacement) {
2701        tcr->save_allocptr += disp;
2702        *interrupt_displacement = disp;
2703      } else {
2704        tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
2705      }
2706      break;
2707    default: 
2708      break;
2709    }
2710    return;
2711  }
2712  if ((program_counter >= &egc_write_barrier_start) &&
2713      (program_counter < &egc_write_barrier_end)) {
2714    LispObj *ea = 0, val, root = 0;
2715    bitvector refbits = (bitvector)(lisp_global(REFBITS));
2716    Boolean  need_check_memo = true, need_memoize_root = false;
2717
2718    if (program_counter >= &egc_set_hash_key_conditional) {
2719      if (program_counter <= &egc_set_hash_key_conditional_retry) {
2720        return;
2721      }
2722      if (program_counter < &egc_set_hash_key_conditional_success_test) {
2723        /* Back up the PC, try again.  This is necessary since a pending
2724           GC may cause the value in %eax/%rax to move and backing up
2725           will reload %eax/%rax from a node register before trying the
2726           cmpxchg.
2727        */
2728        xpPC(xp) = (LispObj) &egc_set_hash_key_conditional_retry;
2729        return;
2730      }
2731      if ((program_counter == &egc_set_hash_key_conditional_success_test) &&
2732          !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT))) {
2733        /* Conditional store failed.  Return NIL. */
2734        LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2735        xpGPR(xp,Iarg_z) = lisp_nil;
2736        xpPC(xp) = ra;
2737        xpGPR(xp,Isp)=(LispObj)sp;
2738        return;
2739      }
2740      /* The conditional store succeeded.  Set the refbit, return to ra0 */
2741      val = xpGPR(xp,Iarg_z);
2742#ifdef X8664
2743      root = xpGPR(xp,Iarg_x);
2744      ea = (LispObj*)(root + (unbox_fixnum((signed_natural) xpGPR(xp,Itemp0))));
2745#else
2746      root = xpGPR(xp,Itemp1);
2747      ea = (LispObj *)(root + misc_data_offset + xpGPR(xp,Itemp0));
2748#endif
2749      need_memoize_root = true;
2750      xpGPR(xp,Iarg_z) = t_value;
2751    } else if (program_counter >= &egc_store_node_conditional) {
2752      if (program_counter <= &egc_store_node_conditional_retry) {
2753        return;
2754      }
2755      if (program_counter < &egc_store_node_conditional_success_test) {
2756        /* Back up the PC, try again.  Again, this is necessary because
2757           we're possibly keeping a node in %eax/%rax and haven't completed
2758           the cmpxchg yet. */
2759        xpPC(xp) = (LispObj) &egc_store_node_conditional_retry;
2760        return;
2761      }
2762      if ((program_counter == &egc_store_node_conditional_success_test) &&
2763           !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT))) {
2764        /* cmpxchg failed.  Return NIL. */
2765        LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2766        xpGPR(xp,Iarg_z) = lisp_nil;
2767        xpPC(xp) = ra;
2768        xpGPR(xp,Isp)=(LispObj)sp;
2769        return;
2770      }
2771
2772      if (program_counter >= &egc_store_node_conditional_success_end) {
2773        return;
2774      }
2775
2776      /* The conditional store succeeded.  Set the refbit, return to ra0 */
2777      val = xpGPR(xp,Iarg_z);
2778#ifdef X8664
2779      ea = (LispObj*)(xpGPR(xp,Iarg_x) + (unbox_fixnum((signed_natural)
2780                                                       xpGPR(xp,Itemp0))));
2781#else
2782      ea = (LispObj *)(misc_data_offset + xpGPR(xp,Itemp1) + xpGPR(xp,Itemp0));
2783#endif
2784      xpGPR(xp,Iarg_z) = t_value;
2785    } else if (program_counter >= &egc_set_hash_key) {
2786      if (program_counter == &egc_set_hash_key) {
2787        return;
2788      }
2789#ifdef X8664
2790      root = xpGPR(xp,Iarg_x);
2791#else
2792      root = xpGPR(xp,Itemp0);
2793#endif
2794      ea = (LispObj *) (root+xpGPR(xp,Iarg_y)+misc_data_offset);
2795      val = xpGPR(xp,Iarg_z);
2796      need_memoize_root = true;
2797    } else if (program_counter >= &egc_gvset) {
2798      /* This assumes that the store is the first instruction at _SPgvset.
2799         As of late February 2013 - just before the 1.9 release, that's
2800         a relatively recent change.
2801         If the store has already completed, don't do it again.
2802         See ticket:1058 for an example showing why this matters.
2803      */
2804      if (program_counter == &egc_gvset) {
2805        return;
2806      }
2807#ifdef X8664
2808      ea = (LispObj *) (xpGPR(xp,Iarg_x)+xpGPR(xp,Iarg_y)+misc_data_offset);
2809#else
2810      ea = (LispObj *) (xpGPR(xp,Itemp0)+xpGPR(xp,Iarg_y)+misc_data_offset);
2811#endif
2812      val = xpGPR(xp,Iarg_z);
2813    } else if (program_counter >= &egc_rplacd) {
2814      if (program_counter == &egc_rplacd) {
2815        return;
2816      }
2817      ea = (LispObj *) untag(xpGPR(xp,Iarg_y));
2818      val = xpGPR(xp,Iarg_z);
2819    } else {                      /* egc_rplaca */
2820      if (program_counter == &egc_rplaca) {
2821        return;
2822      }
2823      ea =  ((LispObj *) untag(xpGPR(xp,Iarg_y)))+1;
2824      val = xpGPR(xp,Iarg_z);
2825    }
2826    if (need_check_memo) {
2827      if ((LispObj)ea < val) {
2828        natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE)),
2829          rootbitnumber = area_dnode(root, lisp_global(REF_BASE));
2830        if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT))) {
2831          atomic_set_bit(refbits, bitnumber);
2832          atomic_set_bit(global_refidx,bitnumber>>8);
2833          if (need_memoize_root) {
2834            atomic_set_bit(refbits, rootbitnumber);
2835            atomic_set_bit(global_refidx,rootbitnumber>>8);
2836          }
2837        }
2838        if (bitnumber < lisp_global(MANAGED_STATIC_DNODES)) {
2839          atomic_set_bit(managed_static_refbits,bitnumber);
2840          atomic_set_bit(managed_static_refidx,bitnumber>>8);
2841          if (need_memoize_root) {
2842            atomic_set_bit(managed_static_refbits, rootbitnumber);
2843            atomic_set_bit(managed_static_refidx,rootbitnumber>>8);
2844          }
2845        }
2846      }
2847    }
2848    {
2849      /* These subprimitives are called via CALL/RET; need
2850         to pop the return address off the stack and set
2851         the PC there. */
2852      LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2853      xpPC(xp) = ra;
2854      xpGPR(xp,Isp)=(LispObj)sp;
2855    }
2856    return;
2857  }
2858}
2859
2860
2861void
2862normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
2863{
2864  void *cur_allocptr = (void *)(tcr->save_allocptr);
2865  LispObj lisprsp;
2866  area *a;
2867
2868  if (xp) {
2869    if (is_other_tcr) {
2870      pc_luser_xp(xp, tcr, NULL);
2871    }
2872    a = tcr->vs_area;
2873    lisprsp = xpGPR(xp, Isp);
2874    if (((BytePtr)lisprsp >= a->low) &&
2875        ((BytePtr)lisprsp < a->high)) {
2876      a->active = (BytePtr)lisprsp;
2877    } else {
2878      a->active = (BytePtr) tcr->save_vsp;
2879    }
2880    a = tcr->ts_area;
2881    a->active = (BytePtr) tcr->save_tsp;
2882  } else {
2883    /* In ff-call; get area active pointers from tcr */
2884    tcr->vs_area->active = (BytePtr) tcr->save_vsp;
2885    tcr->ts_area->active = (BytePtr) tcr->save_tsp;
2886  }
2887  if (cur_allocptr) {
2888    update_bytes_allocated(tcr, cur_allocptr);
2889  }
2890  tcr->save_allocbase = (void *)VOID_ALLOCPTR;
2891  if (fulltag_of((LispObj)(tcr->save_allocptr)) == 0) {
2892    tcr->save_allocptr = (void *)VOID_ALLOCPTR;
2893  }
2894}
2895
2896
2897/* Suspend and "normalize" other tcrs, then call a gc-like function
2898   in that context.  Resume the other tcrs, then return what the
2899   function returned */
2900
2901TCR *gc_tcr = NULL;
2902
2903
2904signed_natural
2905gc_like_from_xp(ExceptionInformation *xp, 
2906                signed_natural(*fun)(TCR *, signed_natural), 
2907                signed_natural param)
2908{
2909  TCR *tcr = get_tcr(false), *other_tcr;
2910  int result;
2911  signed_natural inhibit, barrier = 0;
2912
2913  atomic_incf(&barrier);
2914  suspend_other_threads(true);
2915  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
2916  if (inhibit != 0) {
2917    if (inhibit > 0) {
2918      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
2919    }
2920    atomic_decf(&barrier);
2921    resume_other_threads(true);
2922    gc_deferred++;
2923    return 0;
2924  }
2925  gc_deferred = 0;
2926
2927  gc_tcr = tcr;
2928
2929  /* This is generally necessary if the current thread invoked the GC
2930     via an alloc trap, and harmless if the GC was invoked via a GC
2931     trap.  (It's necessary in the first case because the "allocptr"
2932     register - %rbx - may be pointing into the middle of something
2933     below tcr->save_allocbase, and we wouldn't want the GC to see
2934     that bogus pointer.) */
2935  xpGPR(xp, Iallocptr) = VOID_ALLOCPTR; 
2936
2937  normalize_tcr(xp, tcr, false);
2938
2939
2940  for (other_tcr = TCR_AUX(tcr)->next; other_tcr != tcr;
2941       other_tcr = TCR_AUX(other_tcr)->next) {
2942    if (other_tcr->pending_exception_context) {
2943      TCR_AUX(other_tcr)->gc_context = other_tcr->pending_exception_context;
2944    } else if (other_tcr->valence == TCR_STATE_LISP) {
2945      TCR_AUX(other_tcr)->gc_context = TCR_AUX(other_tcr)->suspend_context;
2946    } else {
2947      /* no pending exception, didn't suspend in lisp state:
2948         must have executed a synchronous ff-call.
2949      */
2950      TCR_AUX(other_tcr)->gc_context = NULL;
2951    }
2952    normalize_tcr(TCR_AUX(other_tcr)->gc_context, other_tcr, true);
2953  }
2954   
2955
2956
2957  result = fun(tcr, param);
2958
2959  other_tcr = tcr;
2960  do {
2961    TCR_AUX(other_tcr)->gc_context = NULL;
2962    other_tcr = TCR_AUX(other_tcr)->next;
2963  } while (other_tcr != tcr);
2964
2965  gc_tcr = NULL;
2966
2967  atomic_decf(&barrier);
2968  resume_other_threads(true);
2969
2970  return result;
2971
2972}
2973
2974signed_natural
2975purify_from_xp(ExceptionInformation *xp, signed_natural param)
2976{
2977  return gc_like_from_xp(xp, purify, param);
2978}
2979
2980signed_natural
2981impurify_from_xp(ExceptionInformation *xp, signed_natural param)
2982{
2983  return gc_like_from_xp(xp, impurify, param);
2984}
2985
2986/* Returns #bytes freed by invoking GC */
2987
2988signed_natural
2989gc_from_tcr(TCR *tcr, signed_natural param)
2990{
2991  area *a;
2992  BytePtr oldfree, newfree;
2993  BytePtr oldend, newend;
2994
2995#if 0
2996  fprintf(dbgout, "Start GC  in 0x" LISP "\n", tcr);
2997#endif
2998  a = active_dynamic_area;
2999  oldend = a->high;
3000  oldfree = a->active;
3001  gc(tcr, param);
3002  newfree = a->active;
3003  newend = a->high;
3004#if 0
3005  fprintf(dbgout, "End GC  in 0x" LISP "\n", tcr);
3006#endif
3007  return ((oldfree-newfree)+(newend-oldend));
3008}
3009
3010signed_natural
3011gc_from_xp(ExceptionInformation *xp, signed_natural param)
3012{
3013  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
3014
3015  freeGCptrs();
3016  return status;
3017}
3018
3019#ifdef DARWIN
3020
3021#define TCR_FROM_EXCEPTION_PORT(p) find_tcr_from_exception_port(p)
3022#define TCR_TO_EXCEPTION_PORT(t) (mach_port_name_t)((natural) (((TCR *)t)->io_datum))
3023
3024
3025extern void pseudo_sigreturn(void);
3026
3027
3028
3029#define LISP_EXCEPTIONS_HANDLED_MASK \
3030 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
3031
3032/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
3033#define NUM_LISP_EXCEPTIONS_HANDLED 4
3034
3035typedef struct {
3036  int foreign_exception_port_count;
3037  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
3038  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
3039  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
3040  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
3041} MACH_foreign_exception_state;
3042
3043
3044
3045
3046/*
3047  Mach's exception mechanism works a little better than its signal
3048  mechanism (and, not incidentally, it gets along with GDB a lot
3049  better.
3050
3051  Initially, we install an exception handler to handle each native
3052  thread's exceptions.  This process involves creating a distinguished
3053  thread which listens for kernel exception messages on a set of
3054  0 or more thread exception ports.  As threads are created, they're
3055  added to that port set; a thread's exception port is destroyed
3056  (and therefore removed from the port set) when the thread exits.
3057
3058  A few exceptions can be handled directly in the handler thread;
3059  others require that we resume the user thread (and that the
3060  exception thread resumes listening for exceptions.)  The user
3061  thread might eventually want to return to the original context
3062  (possibly modified somewhat.)
3063
3064  As it turns out, the simplest way to force the faulting user
3065  thread to handle its own exceptions is to do pretty much what
3066  signal() does: the exception handlng thread sets up a sigcontext
3067  on the user thread's stack and forces the user thread to resume
3068  execution as if a signal handler had been called with that
3069  context as an argument.  We can use a distinguished UUO at a
3070  distinguished address to do something like sigreturn(); that'll
3071  have the effect of resuming the user thread's execution in
3072  the (pseudo-) signal context.
3073
3074  Since:
3075    a) we have miles of code in C and in Lisp that knows how to
3076    deal with Linux sigcontexts
3077    b) Linux sigcontexts contain a little more useful information
3078    (the DAR, DSISR, etc.) than their Darwin counterparts
3079    c) we have to create a sigcontext ourselves when calling out
3080    to the user thread: we aren't really generating a signal, just
3081    leveraging existing signal-handling code.
3082
3083  we create a Linux sigcontext struct.
3084
3085  Simple ?  Hopefully from the outside it is ...
3086
3087  We want the process of passing a thread's own context to it to
3088  appear to be atomic: in particular, we don't want the GC to suspend
3089  a thread that's had an exception but has not yet had its user-level
3090  exception handler called, and we don't want the thread's exception
3091  context to be modified by a GC while the Mach handler thread is
3092  copying it around.  On Linux (and on Jaguar), we avoid this issue
3093  because (a) the kernel sets up the user-level signal handler and
3094  (b) the signal handler blocks signals (including the signal used
3095  by the GC to suspend threads) until tcr->xframe is set up.
3096
3097  The GC and the Mach server thread therefore contend for the lock
3098  "mach_exception_lock".  The Mach server thread holds the lock
3099  when copying exception information between the kernel and the
3100  user thread; the GC holds this lock during most of its execution
3101  (delaying exception processing until it can be done without
3102  GC interference.)
3103
3104*/
3105
3106#ifdef PPC64
3107#define C_REDZONE_LEN           320
3108#define C_STK_ALIGN             32
3109#else
3110#define C_REDZONE_LEN           224
3111#define C_STK_ALIGN             16
3112#endif
3113#define C_PARAMSAVE_LEN         64
3114#define C_LINKAGE_LEN           48
3115
3116#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
3117
3118void
3119fatal_mach_error(char *format, ...);
3120
3121#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
3122
3123
3124void
3125restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext, native_thread_state_t *ts)
3126{
3127  kern_return_t kret;
3128#if WORD_SIZE == 64
3129  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
3130#else
3131  mcontext_t mc = UC_MCONTEXT(pseudosigcontext);
3132#endif
3133
3134  /* Set the thread's FP state from the pseudosigcontext */
3135  kret = thread_set_state(thread,
3136                          NATIVE_FLOAT_STATE_FLAVOR,
3137                          (thread_state_t)&(mc->__fs),
3138                          NATIVE_FLOAT_STATE_COUNT);
3139  MACH_CHECK_ERROR("setting thread FP state", kret);
3140  *ts = mc->__ss;
3141} 
3142
3143kern_return_t
3144do_pseudo_sigreturn(mach_port_t thread, TCR *tcr, native_thread_state_t *out)
3145{
3146  ExceptionInformation *xp;
3147
3148#ifdef DEBUG_MACH_EXCEPTIONS
3149  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
3150#endif
3151  xp = tcr->pending_exception_context;
3152  if (xp) {
3153    tcr->pending_exception_context = NULL;
3154    tcr->valence = TCR_STATE_LISP;
3155    restore_mach_thread_state(thread, xp, out);
3156    raise_pending_interrupt(tcr);
3157  } else {
3158    Bug(NULL, "no xp here!\n");
3159  }
3160#ifdef DEBUG_MACH_EXCEPTIONS
3161  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
3162#endif
3163  return KERN_SUCCESS;
3164} 
3165
3166ExceptionInformation *
3167create_thread_context_frame(mach_port_t thread, 
3168                            natural *new_stack_top,
3169                            siginfo_t **info_ptr,
3170                            TCR *tcr,
3171                            native_thread_state_t *ts
3172                            )
3173{
3174  mach_msg_type_number_t thread_state_count;
3175  ExceptionInformation *pseudosigcontext;
3176#ifdef X8664
3177  MCONTEXT_T mc;
3178#else
3179  mcontext_t mc;
3180#endif
3181  natural stackp;
3182
3183#ifdef X8664 
3184  stackp = (LispObj) find_foreign_rsp(ts->__rsp,tcr->cs_area,tcr);
3185  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
3186#else
3187  stackp = (LispObj) find_foreign_rsp(ts->__esp, tcr->cs_area, tcr);
3188#endif
3189  stackp = TRUNC_DOWN(stackp, sizeof(siginfo_t), C_STK_ALIGN);
3190  if (info_ptr) {
3191    *info_ptr = (siginfo_t *)stackp;
3192  }
3193  stackp = TRUNC_DOWN(stackp,sizeof(*pseudosigcontext), C_STK_ALIGN);
3194  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
3195
3196  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
3197  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
3198 
3199  memmove(&(mc->__ss),ts,sizeof(*ts));
3200
3201  thread_state_count = NATIVE_FLOAT_STATE_COUNT;
3202  thread_get_state(thread,
3203                   NATIVE_FLOAT_STATE_FLAVOR,
3204                   (thread_state_t)&(mc->__fs),
3205                   &thread_state_count);
3206
3207  thread_state_count = NATIVE_EXCEPTION_STATE_COUNT;
3208  thread_get_state(thread,
3209                   NATIVE_EXCEPTION_STATE_FLAVOR,
3210                   (thread_state_t)&(mc->__es),
3211                   &thread_state_count);
3212
3213
3214  UC_MCONTEXT(pseudosigcontext) = mc;
3215  if (new_stack_top) {
3216    *new_stack_top = stackp;
3217  }
3218  return pseudosigcontext;
3219}
3220
3221/*
3222  This code sets up the user thread so that it executes a "pseudo-signal
3223  handler" function when it resumes.  Create a fake ucontext struct
3224  on the thread's stack and pass it as an argument to the pseudo-signal
3225  handler.
3226
3227  Things are set up so that the handler "returns to" pseudo_sigreturn(),
3228  which will restore the thread's context.
3229
3230  If the handler invokes code that throws (or otherwise never sigreturn()'s
3231  to the context), that's fine.
3232
3233  Actually, check that: throw (and variants) may need to be careful and
3234  pop the tcr's xframe list until it's younger than any frame being
3235  entered.
3236*/
3237
3238int
3239setup_signal_frame(mach_port_t thread,
3240                   void *handler_address,
3241                   int signum,
3242                   int code,
3243                   TCR *tcr,
3244                   native_thread_state_t *ts,
3245                   native_thread_state_t *new_ts
3246                   )
3247{
3248  ExceptionInformation *pseudosigcontext;
3249  int  old_valence = tcr->valence;
3250  natural stackp, *stackpp;
3251  siginfo_t *info;
3252
3253#ifdef DEBUG_MACH_EXCEPTIONS
3254  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
3255#endif
3256  pseudosigcontext = create_thread_context_frame(thread, &stackp, &info, tcr,  ts);
3257  bzero(info, sizeof(*info));
3258  info->si_code = code;
3259  info->si_addr = (void *)(UC_MCONTEXT(pseudosigcontext)->__es.__faultvaddr);
3260  info->si_signo = signum;
3261  pseudosigcontext->uc_onstack = 0;
3262  pseudosigcontext->uc_sigmask = (sigset_t) 0;
3263  pseudosigcontext->uc_stack.ss_sp = 0;
3264  pseudosigcontext->uc_stack.ss_size = 0;
3265  pseudosigcontext->uc_stack.ss_flags = 0;
3266  pseudosigcontext->uc_link = NULL;
3267  pseudosigcontext->uc_mcsize = sizeof(*UC_MCONTEXT(pseudosigcontext));
3268  tcr->pending_exception_context = pseudosigcontext;
3269  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
3270 
3271
3272  /*
3273     It seems like we've created a  sigcontext on the thread's
3274     stack.  Set things up so that we call the handler (with appropriate
3275     args) when the thread's resumed.
3276  */
3277
3278#ifdef X8664
3279  new_ts->__rip = (natural) handler_address;
3280  stackpp = (natural *)stackp;
3281  *--stackpp = (natural)pseudo_sigreturn;
3282  stackp = (natural)stackpp;
3283  new_ts->__rdi = signum;
3284  new_ts->__rsi = (natural)info;
3285  new_ts->__rdx = (natural)pseudosigcontext;
3286  new_ts->__rcx = (natural)tcr;
3287  new_ts->__r8 = (natural)old_valence;
3288  new_ts->__rsp = stackp;
3289  new_ts->__rflags = ts->__rflags;
3290#else
3291  bzero(new_ts, sizeof(*new_ts));
3292  new_ts->__cs = ts->__cs;
3293  new_ts->__ss = ts->__ss;
3294  new_ts->__ds = ts->__ds;
3295  new_ts->__es = ts->__es;
3296  new_ts->__fs = ts->__fs;
3297  new_ts->__gs = ts->__gs;
3298
3299  new_ts->__eip = (natural)handler_address;
3300  stackpp = (natural *)stackp;
3301  *--stackpp = 0;               /* alignment */
3302  *--stackpp = 0;
3303  *--stackpp = 0;
3304  *--stackpp = (natural)old_valence;
3305  *--stackpp = (natural)tcr;
3306  *--stackpp = (natural)pseudosigcontext;
3307  *--stackpp = (natural)info;
3308  *--stackpp = (natural)signum;
3309  *--stackpp = (natural)pseudo_sigreturn;
3310  stackp = (natural)stackpp;
3311  new_ts->__esp = stackp;
3312  new_ts->__eflags = ts->__eflags;
3313#endif
3314#ifdef DEBUG_MACH_EXCEPTIONS
3315  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
3316#endif
3317  return 0;
3318}
3319
3320
3321
3322
3323
3324
3325/*
3326  This function runs in the exception handling thread.  It's
3327  called (by this precise name) from the library function "exc_server()"
3328  when the thread's exception ports are set up.  (exc_server() is called
3329  via mach_msg_server(), which is a function that waits for and dispatches
3330  on exception messages from the Mach kernel.)
3331
3332  This checks to see if the exception was caused by a pseudo_sigreturn()
3333  UUO; if so, it arranges for the thread to have its state restored
3334  from the specified context.
3335
3336  Otherwise, it tries to map the exception to a signal number and
3337  arranges that the thread run a "pseudo signal handler" to handle
3338  the exception.
3339
3340  Some exceptions could and should be handled here directly.
3341*/
3342
3343#define DARWIN_EXCEPTION_HANDLER signal_handler
3344
3345TCR *
3346find_tcr_from_exception_port(mach_port_t port)
3347{
3348    mach_vm_address_t context = 0;
3349    kern_return_t kret;
3350
3351    kret = mach_port_get_context(mach_task_self(), port, &context);
3352    MACH_CHECK_ERROR("finding TCR from exception port", kret);
3353    return (TCR *)context;
3354}
3355
3356void
3357associate_tcr_with_exception_port(mach_port_t port, TCR *tcr)
3358{
3359    kern_return_t kret;
3360   
3361    kret = mach_port_set_context(mach_task_self(),
3362                                 port, (mach_vm_address_t)tcr);
3363    MACH_CHECK_ERROR("associating TCR with exception port", kret);
3364}
3365
3366void
3367disassociate_tcr_from_exception_port(mach_port_t port)
3368{
3369  kern_return_t kret;
3370
3371  kret = mach_port_set_context(mach_task_self(), port, 0);
3372  MACH_CHECK_ERROR("disassociating TCR with exception port", kret);
3373}
3374
3375kern_return_t
3376catch_mach_exception_raise(mach_port_t exception_port,
3377                           mach_port_t thread,
3378                           mach_port_t task,
3379                           exception_type_t exception,
3380                           mach_exception_data_t code,
3381                           mach_msg_type_number_t code_count)
3382{
3383  abort();
3384  return KERN_FAILURE;
3385}
3386
3387kern_return_t
3388catch_mach_exception_raise_state(mach_port_t exception_port,
3389                                 exception_type_t exception,
3390                                 mach_exception_data_t code,
3391                                 mach_msg_type_number_t code_count,
3392                                 int *flavor,
3393                                 thread_state_t in_state,
3394                                 mach_msg_type_number_t in_state_count,
3395                                 thread_state_t out_state,
3396                                 mach_msg_type_number_t *out_state_count)
3397{
3398  int64_t code0 = code[0];
3399  int signum = 0;
3400  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
3401  mach_port_t thread = (mach_port_t)((natural)tcr->native_thread_id);
3402  kern_return_t kret, call_kret;
3403
3404  native_thread_state_t
3405    *ts = (native_thread_state_t *)in_state,
3406    *out_ts = (native_thread_state_t*)out_state;
3407  mach_msg_type_number_t thread_state_count;
3408
3409  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
3410    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
3411  } 
3412  if ((code0 == EXC_I386_GPFLT) &&
3413      ((natural)(ts_pc(ts)) == (natural)pseudo_sigreturn)) {
3414    kret = do_pseudo_sigreturn(thread, tcr, out_ts);
3415#if 0
3416    fprintf(dbgout, "Exception return in 0x%x\n",tcr);
3417#endif
3418  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
3419    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
3420    kret = 17;
3421  } else {
3422    switch (exception) {
3423    case EXC_BAD_ACCESS:
3424      if (code0 == EXC_I386_GPFLT) {
3425        signum = SIGSEGV;
3426      } else {
3427        signum = SIGBUS;
3428      }
3429      break;
3430     
3431    case EXC_BAD_INSTRUCTION:
3432      if (code0 == EXC_I386_GPFLT) {
3433        signum = SIGSEGV;
3434      } else {
3435        signum = SIGILL;
3436      }
3437      break;
3438     
3439    case EXC_SOFTWARE:
3440      signum = SIGILL;
3441      break;
3442     
3443    case EXC_ARITHMETIC:
3444      signum = SIGFPE;
3445      if (code0 == EXC_I386_DIV)
3446        code0 = FPE_INTDIV;
3447      break;
3448     
3449    default:
3450      break;
3451    }
3452#if WORD_SIZE==64
3453    if ((signum==SIGFPE) && 
3454        (code0 != FPE_INTDIV) && 
3455        (tcr->valence != TCR_STATE_LISP)) {
3456      mach_msg_type_number_t thread_state_count = x86_FLOAT_STATE64_COUNT;
3457      x86_float_state64_t fs;
3458     
3459      thread_get_state(thread,
3460                       x86_FLOAT_STATE64,
3461                       (thread_state_t)&fs,
3462                       &thread_state_count);
3463     
3464      if (! (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN_FPE))) {
3465        tcr->flags |= (1<<TCR_FLAG_BIT_FOREIGN_FPE);
3466        tcr->lisp_mxcsr = (fs.__fpu_mxcsr & ~MXCSR_STATUS_MASK);
3467      }
3468      fs.__fpu_mxcsr &= ~MXCSR_STATUS_MASK;
3469      fs.__fpu_mxcsr |= MXCSR_CONTROL_MASK;
3470      thread_set_state(thread,
3471                       x86_FLOAT_STATE64,
3472                       (thread_state_t)&fs,
3473                       x86_FLOAT_STATE64_COUNT);
3474      *out_state_count = NATIVE_THREAD_STATE_COUNT;
3475      *out_ts = *ts;
3476      return KERN_SUCCESS;
3477    }
3478#endif
3479    if (signum) {
3480      kret = setup_signal_frame(thread,
3481                                (void *)DARWIN_EXCEPTION_HANDLER,
3482                                signum,
3483                                code0,
3484                                tcr, 
3485                                ts,
3486                                out_ts);
3487     
3488    } else {
3489      kret = 17;
3490    }
3491  }
3492 
3493  if (kret) {
3494    *out_state_count = 0;
3495    *flavor = 0;
3496  } else {
3497    *out_state_count = NATIVE_THREAD_STATE_COUNT;
3498  }
3499  return kret;
3500}
3501
3502kern_return_t
3503catch_mach_exception_raise_state_identity(mach_port_t exception_port,
3504                                          mach_port_t thread,
3505                                          mach_port_t task,
3506                                          exception_type_t exception,
3507                                          mach_exception_data_t code,
3508                                          mach_msg_type_number_t code_count,
3509                                          int *flavor,
3510                                          thread_state_t old_state,
3511                                          mach_msg_type_number_t old_count,
3512                                          thread_state_t new_state,
3513                                          mach_msg_type_number_t *new_count)
3514{
3515  abort();
3516  return KERN_FAILURE;
3517}
3518
3519
3520
3521
3522
3523static mach_port_t mach_exception_thread = (mach_port_t)0;
3524
3525
3526/*
3527  The initial function for an exception-handling thread.
3528*/
3529
3530void *
3531exception_handler_proc(void *arg)
3532{
3533  extern boolean_t mach_exc_server();
3534  mach_port_t p = (mach_port_t)((natural)arg);
3535
3536  mach_exception_thread = pthread_mach_thread_np(pthread_self());
3537  mach_msg_server(mach_exc_server, 256, p, 0);
3538  /* Should never return. */
3539  abort();
3540}
3541
3542
3543
3544void
3545mach_exception_thread_shutdown()
3546{
3547  kern_return_t kret;
3548
3549  fprintf(dbgout, "terminating Mach exception thread, 'cause exit can't\n");
3550  kret = thread_terminate(mach_exception_thread);
3551  if (kret != KERN_SUCCESS) {
3552    fprintf(dbgout, "Couldn't terminate exception thread, kret = %d\n",kret);
3553  }
3554}
3555
3556
3557mach_port_t
3558mach_exception_port_set()
3559{
3560  static mach_port_t __exception_port_set = MACH_PORT_NULL;
3561  kern_return_t kret; 
3562  if (__exception_port_set == MACH_PORT_NULL) {
3563
3564    kret = mach_port_allocate(mach_task_self(),
3565                              MACH_PORT_RIGHT_PORT_SET,
3566                              &__exception_port_set);
3567    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
3568    create_system_thread(0,
3569                         NULL,
3570                         exception_handler_proc, 
3571                         (void *)((natural)__exception_port_set));
3572  }
3573  return __exception_port_set;
3574}
3575
3576/*
3577  Setup a new thread to handle those exceptions specified by
3578  the mask "which".  This involves creating a special Mach
3579  message port, telling the Mach kernel to send exception
3580  messages for the calling thread to that port, and setting
3581  up a handler thread which listens for and responds to
3582  those messages.
3583
3584*/
3585
3586/*
3587  Establish the lisp thread's TCR as its exception port, and determine
3588  whether any other ports have been established by foreign code for
3589  exceptions that lisp cares about.
3590
3591  If this happens at all, it should happen on return from foreign
3592  code and on entry to lisp code via a callback.
3593
3594  This is a lot of trouble (and overhead) to support Java, or other
3595  embeddable systems that clobber their caller's thread exception ports.
3596 
3597*/
3598kern_return_t
3599tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
3600{
3601  kern_return_t kret;
3602  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
3603  int i;
3604  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
3605  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
3606  exception_mask_t mask = 0;
3607
3608  kret = thread_swap_exception_ports(thread,
3609                                     LISP_EXCEPTIONS_HANDLED_MASK,
3610                                     lisp_port,
3611                                     MACH_EXCEPTION_CODES | EXCEPTION_STATE,
3612#if WORD_SIZE==64
3613                                     x86_THREAD_STATE64,
3614#else
3615                                     x86_THREAD_STATE32,
3616#endif
3617                                     fxs->masks,
3618                                     &n,
3619                                     fxs->ports,
3620                                     fxs->behaviors,
3621                                     fxs->flavors);
3622  if (kret == KERN_SUCCESS) {
3623    fxs->foreign_exception_port_count = n;
3624    for (i = 0; i < n; i ++) {
3625      foreign_port = fxs->ports[i];
3626
3627      if ((foreign_port != lisp_port) &&
3628          (foreign_port != MACH_PORT_NULL)) {
3629        mask |= fxs->masks[i];
3630      }
3631    }
3632    tcr->foreign_exception_status = (int) mask;
3633  }
3634  return kret;
3635}
3636
3637kern_return_t
3638tcr_establish_lisp_exception_port(TCR *tcr)
3639{
3640  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
3641}
3642
3643/*
3644  Do this when calling out to or returning from foreign code, if
3645  any conflicting foreign exception ports were established when we
3646  last entered lisp code.
3647*/
3648kern_return_t
3649restore_foreign_exception_ports(TCR *tcr)
3650{
3651  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
3652  kern_return_t kret;
3653
3654  if (m) {
3655    MACH_foreign_exception_state *fxs  = 
3656      (MACH_foreign_exception_state *) tcr->native_thread_info;
3657    int i, n = fxs->foreign_exception_port_count;
3658    exception_mask_t tm;
3659
3660    for (i = 0; i < n; i++) {
3661      if ((tm = fxs->masks[i]) & m) {
3662        kret = thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3663                                   tm,
3664                                   fxs->ports[i],
3665                                   fxs->behaviors[i],
3666                                   fxs->flavors[i]);
3667        MACH_CHECK_ERROR("restoring thread exception ports", kret);
3668      }
3669    }
3670  }
3671  return KERN_SUCCESS;
3672}
3673                                   
3674
3675/*
3676  This assumes that a Mach port (to be used as the thread's exception port) whose
3677  "name" matches the TCR's 32-bit address has already been allocated.
3678*/
3679
3680kern_return_t
3681setup_mach_exception_handling(TCR *tcr)
3682{
3683  mach_port_t
3684    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3685    task_self = mach_task_self();
3686  kern_return_t kret;
3687
3688  kret = mach_port_insert_right(task_self,
3689                                thread_exception_port,
3690                                thread_exception_port,
3691                                MACH_MSG_TYPE_MAKE_SEND);
3692  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3693
3694  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3695  if (kret == KERN_SUCCESS) {
3696    mach_port_t exception_port_set = mach_exception_port_set();
3697
3698    kret = mach_port_move_member(task_self,
3699                                 thread_exception_port,
3700                                 exception_port_set);
3701  }
3702  return kret;
3703}
3704
3705void
3706darwin_exception_init(TCR *tcr)
3707{
3708  void tcr_monitor_exception_handling(TCR*, Boolean);
3709  kern_return_t kret;
3710  MACH_foreign_exception_state *fxs = 
3711    calloc(1, sizeof(MACH_foreign_exception_state));
3712 
3713  tcr->native_thread_info = (void *) fxs;
3714
3715  if ((kret = setup_mach_exception_handling(tcr))
3716      != KERN_SUCCESS) {
3717    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
3718    terminate_lisp();
3719  }
3720}
3721
3722/*
3723  The tcr is the "name" of the corresponding thread's exception port.
3724  Destroying the port should remove it from all port sets of which it's
3725  a member (notably, the exception port set.)
3726*/
3727void
3728darwin_exception_cleanup(TCR *tcr)
3729{
3730  mach_port_t exception_port;
3731  void *fxs = tcr->native_thread_info;
3732
3733  if (fxs) {
3734    tcr->native_thread_info = NULL;
3735    free(fxs);
3736  }
3737
3738  exception_port = TCR_TO_EXCEPTION_PORT(tcr);
3739  disassociate_tcr_from_exception_port(exception_port);
3740  mach_port_deallocate(mach_task_self(), exception_port);
3741  /* Theoretically not needed, I guess... */
3742  mach_port_destroy(mach_task_self(), exception_port);
3743}
3744
3745
3746
3747
3748void
3749fatal_mach_error(char *format, ...)
3750{
3751  va_list args;
3752  char s[512];
3753 
3754
3755  va_start(args, format);
3756  vsnprintf(s, sizeof(s),format, args);
3757  va_end(args);
3758
3759  Fatal("Mach error", s);
3760}
3761
3762
3763
3764
3765#endif
3766
3767/* watchpoint stuff */
3768
3769area *
3770new_watched_area(natural size)
3771{
3772  char *p;
3773
3774  p = MapMemory(NULL, size, MEMPROTECT_RWX);
3775  if ((signed_natural)p == -1) {
3776    allocation_failure(true, size);
3777  }
3778  return new_area(p, p + size, AREA_WATCHED);
3779}
3780
3781void
3782delete_watched_area(area *a, TCR *tcr)
3783{
3784  natural nbytes = a->high - a->low;
3785  char *base = a->low;
3786
3787  condemn_area_holding_area_lock(a);
3788
3789  if (nbytes) {
3790    int err;
3791
3792    err = UnMapMemory(base, nbytes);
3793    if (err != 0)
3794      Fatal("munmap in delete_watched_area", "");
3795  }
3796}
3797
3798natural
3799uvector_total_size_in_bytes(LispObj *u)
3800{
3801  LispObj header = header_of(u);
3802  natural header_tag = fulltag_of(header);
3803  natural subtag = header_subtag(header);
3804  natural element_count = header_element_count(header);
3805  natural nbytes = 0;
3806
3807#ifdef X8632
3808  if ((nodeheader_tag_p(header_tag)) ||
3809      (subtag <= max_32_bit_ivector_subtag)) {
3810    nbytes = element_count << 2;
3811  } else if (subtag <= max_8_bit_ivector_subtag) {
3812    nbytes = element_count;
3813  } else if (subtag <= max_16_bit_ivector_subtag) {
3814    nbytes = element_count << 1;
3815  } else if (subtag == subtag_double_float_vector) {
3816    nbytes = element_count << 3;
3817  } else {
3818    nbytes = (element_count + 7) >> 3;
3819  }
3820  /* add 4 byte header and round up to multiple of 8 bytes */
3821  return ~7 & (4 + nbytes + 7);
3822#endif
3823#ifdef X8664
3824  if ((nodeheader_tag_p(header_tag)) || (header_tag == ivector_class_64_bit)) {
3825    nbytes = element_count << 3;
3826  } else if (header_tag == ivector_class_32_bit) {
3827    nbytes = element_count << 2;
3828  } else {
3829    /* ivector_class_other_bit contains 8, 16-bit arrays & bit vector */
3830    if (subtag == subtag_bit_vector) {
3831      nbytes = (element_count + 7) >> 3;
3832    } else if (subtag >= min_8_bit_ivector_subtag) {
3833      nbytes = element_count;
3834    } else {
3835      nbytes = element_count << 1;
3836    }
3837  }
3838  /* add 8 byte header and round up to multiple of 16 bytes */
3839  return ~15 & (8 + nbytes + 15);
3840#endif
3841}
3842
3843extern void wp_update_references(TCR *, LispObj, LispObj);
3844
3845/*
3846 * Other threads are suspended and pc-lusered.
3847 *
3848 * param contains a tagged pointer to a uvector or a cons cell
3849 */
3850signed_natural
3851watch_object(TCR *tcr, signed_natural param)
3852{
3853  LispObj object = (LispObj)param;
3854  unsigned tag = fulltag_of(object);
3855  LispObj *noderef = (LispObj *)untag(object);
3856  area *object_area = area_containing((BytePtr)noderef);
3857  natural size;
3858
3859  if (tag == fulltag_cons)
3860    size = 2 * node_size;
3861  else
3862    size = uvector_total_size_in_bytes(noderef);
3863
3864  if (object_area && object_area->code == AREA_DYNAMIC) {
3865    area *a = new_watched_area(size);
3866    LispObj old = object;
3867    LispObj new = (LispObj)((natural)a->low + tag);
3868
3869    add_area_holding_area_lock(a);
3870
3871    /* move object to watched area */
3872    memcpy(a->low, noderef, size);
3873    ProtectMemory(a->low, size);
3874    memset(noderef, 0, size);
3875    wp_update_references(tcr, old, new);
3876    check_all_areas(tcr);
3877    return 1;
3878  }
3879  return 0;
3880}
3881
3882/*
3883 * We expect the watched object in arg_y, and the new uninitialized
3884 * object (which is just zeroed) in arg_z.
3885 */
3886signed_natural
3887unwatch_object(TCR *tcr, signed_natural param)
3888{
3889  ExceptionInformation *xp = tcr->xframe->curr;
3890  LispObj old = xpGPR(xp, Iarg_y);
3891  unsigned tag = fulltag_of(old);
3892  LispObj new = xpGPR(xp, Iarg_z);
3893  LispObj *oldnode = (LispObj *)untag(old);
3894  LispObj *newnode = (LispObj *)untag(new);
3895  area *a = area_containing((BytePtr)old);
3896  extern void update_managed_refs(area *, BytePtr, natural);
3897
3898  if (a && a->code == AREA_WATCHED) {
3899    natural size;
3900
3901    if (tag == fulltag_cons)
3902      size = 2 * node_size;
3903    else
3904      size = uvector_total_size_in_bytes(oldnode);
3905
3906    memcpy(newnode, oldnode, size);
3907    delete_watched_area(a, tcr);
3908    wp_update_references(tcr, old, new);
3909    /* because wp_update_references doesn't update refbits */
3910    tenure_to_area(tenured_area);
3911    /* Unwatching can (re-)introduce managed_static->dynamic references */
3912    zero_bits(managed_static_area->refbits,managed_static_area->ndnodes);
3913    update_managed_refs(managed_static_area, low_markable_address, area_dnode(active_dynamic_area->active, low_markable_address));
3914    check_all_areas(tcr);
3915    xpGPR(xp, Iarg_z) = new;
3916  }
3917  return 0;
3918}
3919
3920Boolean
3921handle_watch_trap(ExceptionInformation *xp, TCR *tcr)
3922{
3923  LispObj selector = xpGPR(xp,Iimm0);
3924  LispObj object = xpGPR(xp, Iarg_z);
3925  signed_natural result;
3926 
3927  switch (selector) {
3928    case WATCH_TRAP_FUNCTION_WATCH:
3929      result = gc_like_from_xp(xp, watch_object, object);
3930      if (result == 0)
3931        xpGPR(xp,Iarg_z) = lisp_nil;
3932      break;
3933    case WATCH_TRAP_FUNCTION_UNWATCH:
3934      gc_like_from_xp(xp, unwatch_object, 0);
3935      break;
3936    default:
3937      break;
3938  }
3939  return true;
3940}
3941
Note: See TracBrowser for help on using the repository browser.