source: trunk/source/lisp-kernel/x86-exceptions.c

Last change on this file was 16685, checked in by rme, 4 years ago

Update copyright/license headers in files.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 107.1 KB
Line 
1/*
2 * Copyright 2005-2009 Clozure Associates
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "x86-utils.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#ifdef LINUX
30#include <strings.h>
31#include <sys/mman.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35#ifdef DARWIN
36#include <sysexits.h>
37#include <dlfcn.h>
38#include <mach/machine/vm_types.h>
39#endif
40#ifndef WINDOWS
41#include <sys/syslog.h>
42#endif
43#ifdef WINDOWS
44#include <windows.h>
45#ifdef WIN_64
46#include <winternl.h>
47#include <ntstatus.h>
48#endif
49#ifndef EXCEPTION_WRITE_FAULT
50#define EXCEPTION_WRITE_FAULT 1
51#endif
52#endif
53
54void
55normalize_tcr(ExceptionInformation *, TCR *, Boolean);
56
57/*
58  We do all kinds of funky things to avoid handling a signal on the lisp
59  stack.  One of those funky things involves using the  __builtin_return_address()
60  intrinsic so that the real handler returns to the right place when it exits,
61  even if it returns on a different stack.  Code at "the right place" is presumed
62  to just do a sigrereturn, however the OS does that.
63
64  Sadly, some JVMs (at least) do what they call "signal chaining": they install
65  their own handlers for signals that we expect to handle, and call our handler
66  when they realize that they don't know how to handle what we raise.  They
67  don't observe sigaltstack, and they don't necessarily do call our handler
68  tail-recursively (so our stack-switching code would cause our handler to
69  return to the JVM's, running on the wrong stack.
70
71  Try to work around this by setting up an "early" signal handler (before any
72  of this JVM nonsense can take effect) and noting the address it'd return to.
73*/
74
75pc
76real_sigreturn = (pc)0;
77
78#define SIGRETURN_ADDRESS() (real_sigreturn ? real_sigreturn : __builtin_return_address(0))
79
80#ifndef WINDOWS
81#ifndef DARWIN
82void
83early_intn_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
84{
85  real_sigreturn = (pc) __builtin_return_address(0);
86  xpPC(xp) += 2;
87}
88
89#endif
90#endif
91
92void
93do_intn()
94{
95  __asm volatile("int $0xcd");
96}
97
98void
99x86_early_exception_init()
100{
101#ifndef WINDOWS
102#ifndef DARWIN
103  struct sigaction action, oaction;
104
105  action.sa_sigaction = (void *) early_intn_handler;
106  sigfillset(&action.sa_mask);
107  action.sa_flags = SA_SIGINFO;
108  sigaction(SIGNUM_FOR_INTN_TRAP,&action,&oaction);
109 
110  do_intn();
111  sigaction(SIGNUM_FOR_INTN_TRAP,&oaction, NULL);
112#endif
113#endif
114}
115
116int
117page_size = 4096;
118
119int
120log2_page_size = 12;
121
122Boolean
123did_gc_notification_since_last_full_gc = false;
124
125Boolean
126allocation_enabled = true;
127
128void
129update_bytes_allocated(TCR* tcr, void *cur_allocptr)
130{
131  char *last = tcr->last_allocptr;
132  char *current = cur_allocptr;
133  uint64_t *bytes_allocated = (uint64_t *)&TCR_AUX(tcr)->bytes_allocated;
134
135  if (last && (tcr->save_allocbase != ((void *)VOID_ALLOCPTR))) {
136    *bytes_allocated += last - current;
137  }
138  tcr->last_allocptr = 0;
139}
140
141
142
143//  This doesn't GC; it returns true if it made enough room, false
144//  otherwise.
145//  If "extend" is true, it can try to extend the dynamic area to
146//  satisfy the request.
147
148
149
150void
151platform_new_heap_segment(ExceptionInformation *xp, TCR *tcr, BytePtr low, BytePtr high)
152{
153  tcr->last_allocptr = (void *)high;
154  tcr->save_allocptr = (void *)high;
155  xpGPR(xp,Iallocptr) = (LispObj) high;
156  tcr->save_allocbase = (void *) low;
157}
158
159Boolean
160allocate_object(ExceptionInformation *xp,
161                natural bytes_needed, 
162                signed_natural disp_from_allocptr,
163                TCR *tcr,
164                Boolean *crossed_threshold)
165{
166  area *a = active_dynamic_area;
167
168  /* Maybe do an EGC */
169  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
170    if (((a->active)-(a->low)) >= a->threshold) {
171      gc_from_xp(xp, 0L);
172    }
173  }
174
175  /* Life is pretty simple if we can simply grab a segment
176     without extending the heap.
177  */
178  if (new_heap_segment(xp, bytes_needed, false, tcr, crossed_threshold)) {
179    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
180    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
181    return true;
182  }
183 
184  /* It doesn't make sense to try a full GC if the object
185     we're trying to allocate is larger than everything
186     allocated so far.
187  */
188  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
189    untenure_from_area(tenured_area); /* force a full GC */
190    gc_from_xp(xp, 0L);
191    did_gc_notification_since_last_full_gc = false;
192  }
193 
194  /* Try again, growing the heap if necessary */
195  if (new_heap_segment(xp, bytes_needed, true, tcr, NULL)) {
196    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
197    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
198    return true;
199  }
200 
201  return false;
202}
203
204natural gc_deferred = 0, full_gc_deferred = 0;
205
206signed_natural
207flash_freeze(TCR *tcr, signed_natural param)
208{
209  return 0;
210}
211
212
213Boolean
214handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
215{
216  LispObj selector = xpGPR(xp,Iimm0);
217#ifdef X8664
218  LispObj arg = xpGPR(xp,Iimm1);
219#else
220  LispObj arg = xpMMXreg(xp,Imm0);
221#endif
222  area *a = active_dynamic_area;
223  Boolean egc_was_enabled = (a->older != NULL);
224 
225  natural gc_previously_deferred = gc_deferred;
226
227  switch (selector) {
228  case GC_TRAP_FUNCTION_EGC_CONTROL:
229    egc_control(arg != 0, a->active);
230    xpGPR(xp,Iarg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
231    break;
232
233  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
234#ifdef X8664
235    a->threshold = unbox_fixnum(xpGPR(xp, Iarg_x));
236#else
237    a->threshold = unbox_fixnum(xpGPR(xp, Itemp0));
238#endif
239    g1_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_y));
240    g2_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_z));
241    xpGPR(xp,Iarg_z) = lisp_nil+t_offset;
242    break;
243
244  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
245    if (((signed_natural) arg) > 0) {
246      lisp_heap_gc_threshold = 
247        align_to_power_of_2((arg-1) +
248                            (heap_segment_size - 1),
249                            log2_heap_segment_size);
250    }
251    /* fall through */
252  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
253    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
254    break;
255
256  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
257    /*  Try to put the current threshold in effect.  This may
258        need to disable/reenable the EGC. */
259    untenure_from_area(tenured_area);
260    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
261    if (egc_was_enabled) {
262      if ((a->high - a->active) >= a->threshold) {
263        tenure_to_area(tenured_area);
264      }
265    }
266    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
267    break;
268
269  case GC_TRAP_FUNCTION_SET_GC_NOTIFICATION_THRESHOLD:
270    if ((signed_natural)arg >= 0) {
271      lisp_heap_notify_threshold = arg;
272      did_gc_notification_since_last_full_gc = false;
273    }
274    /* fall through */
275
276  case GC_TRAP_FUNCTION_GET_GC_NOTIFICATION_THRESHOLD:
277    xpGPR(xp, Iimm0) = lisp_heap_notify_threshold;
278    break;
279
280  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
281    ensure_static_conses(xp, tcr, 32768);
282    break;
283
284  case GC_TRAP_FUNCTION_FLASH_FREEZE: /* Like freeze below, but no GC */
285    untenure_from_area(tenured_area);
286    gc_like_from_xp(xp,flash_freeze,0);
287    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
288    tenured_area->static_dnodes = area_dnode(a->active, a->low);
289    if (egc_was_enabled) {
290      tenure_to_area(tenured_area);
291    }
292    xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
293    break;
294
295  case GC_TRAP_FUNCTION_ALLOCATION_CONTROL:
296    switch(arg) {
297    case 0: /* disable if allocation enabled */
298      xpGPR(xp, Iarg_z) = lisp_nil;
299      if (allocation_enabled) {
300        TCR *other_tcr;
301        ExceptionInformation *other_context;
302        suspend_other_threads(true);
303        normalize_tcr(xp,tcr,false);
304        for (other_tcr=TCR_AUX(tcr)->next; other_tcr != tcr; other_tcr = TCR_AUX(other_tcr)->next) {
305          other_context = other_tcr->pending_exception_context;
306          if (other_context == NULL) {
307            other_context = TCR_AUX(other_tcr)->suspend_context;
308          }
309          normalize_tcr(other_context, other_tcr, true);
310        }
311        allocation_enabled = false;
312        xpGPR(xp, Iarg_z) = t_value;
313        resume_other_threads(true);
314      }
315      break;
316
317    case 1:                     /* enable if disabled */
318      xpGPR(xp, Iarg_z) = lisp_nil;
319      if (!allocation_enabled) {
320        allocation_enabled = true;
321        xpGPR(xp, Iarg_z) = t_value;
322      }
323      break;
324
325    default:
326      xpGPR(xp, Iarg_z) = lisp_nil;
327      if (allocation_enabled) {
328        xpGPR(xp, Iarg_z) = t_value;
329      }
330      break;
331    }
332    break;
333
334  default:
335    update_bytes_allocated(tcr, (void *) tcr->save_allocptr);
336
337    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
338      if (!full_gc_deferred) {
339        gc_from_xp(xp, 0L);
340        did_gc_notification_since_last_full_gc = false;
341        break;
342      }
343      /* Tried to do a full GC when gc was disabled.  That failed,
344         so try full GC now */
345      selector = GC_TRAP_FUNCTION_GC;
346    }
347   
348    if (egc_was_enabled) {
349      egc_control(false, (BytePtr) a->active);
350    }
351    gc_from_xp(xp, 0L);
352    did_gc_notification_since_last_full_gc = false;
353    if (gc_deferred > gc_previously_deferred) {
354      full_gc_deferred = 1;
355    } else {
356      full_gc_deferred = 0;
357    }
358    if (selector > GC_TRAP_FUNCTION_GC) {
359      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
360        impurify_from_xp(xp, 0L);
361        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
362        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
363        gc_from_xp(xp, 0L);
364      }
365      if (selector & GC_TRAP_FUNCTION_PURIFY) {
366        purify_from_xp(xp, 1);
367        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
368        gc_from_xp(xp, 0L);
369      }
370      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
371        OSErr err;
372        extern OSErr save_application(int, Boolean);
373        area *vsarea = tcr->vs_area;
374
375#ifdef WINDOWS 
376        arg = _open_osfhandle(arg,0);
377#endif
378        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
379        err = save_application((int)arg, egc_was_enabled);
380        if (err == noErr) {
381          _exit(0);
382        }
383        fatal_oserr(": save_application", err);
384      }
385      switch (selector) {
386      case GC_TRAP_FUNCTION_FREEZE:
387        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
388        tenured_area->static_dnodes = area_dnode(a->active, a->low);
389        xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
390        break;
391      default:
392        break;
393      }
394    }
395    if (egc_was_enabled) {
396      egc_control(true, NULL);
397    }
398    break;
399  }
400  return true;
401}
402
403 
404
405
406
407void
408push_on_lisp_stack(ExceptionInformation *xp, LispObj value)
409{
410  LispObj *vsp = (LispObj *)xpGPR(xp,Isp);
411  *--vsp = value;
412  xpGPR(xp,Isp) = (LispObj)vsp;
413}
414
415
416/* Hard to know if or whether this is necessary in general.  For now,
417   do it when we get a "wrong number of arguments" trap.
418*/
419void
420finish_function_entry(ExceptionInformation *xp)
421{
422  natural nargs = xpGPR(xp,Inargs)>>fixnumshift;
423  signed_natural disp = nargs - nargregs;
424  LispObj *vsp =  (LispObj *) xpGPR(xp,Isp), ra = *vsp++;
425   
426  xpGPR(xp,Isp) = (LispObj) vsp;
427
428  if (disp > 0) {               /* implies that nargs > nargregs */
429    vsp[disp] = xpGPR(xp,Ifp);
430    vsp[disp+1] = ra;
431    xpGPR(xp,Ifp) = (LispObj)(vsp+disp);
432#ifdef X8664
433    push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
434#endif
435    push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
436    push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
437  } else {
438    push_on_lisp_stack(xp,ra);
439    push_on_lisp_stack(xp,xpGPR(xp,Ifp));
440    xpGPR(xp,Ifp) = xpGPR(xp,Isp);
441#ifdef X8664
442    if (nargs == 3) {
443      push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
444    }
445#endif
446    if (nargs >= 2) {
447      push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
448    }
449    if (nargs >= 1) {
450      push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
451    }
452  }
453}
454
455Boolean
456object_contains_pc(LispObj container, LispObj addr)
457{
458  if (fulltag_of(container) >= fulltag_misc) {
459    natural elements = header_element_count(header_of(container));
460    if ((addr >= container) &&
461        (addr < ((LispObj)&(deref(container,1+elements))))) {
462      return true;
463    }
464  }
465  return false;
466}
467
468LispObj
469create_exception_callback_frame(ExceptionInformation *xp, TCR *tcr)
470{
471  LispObj containing_uvector = 0, 
472    relative_pc = lisp_nil,
473    nominal_function = lisp_nil, 
474    f, tra, tra_f = 0, abs_pc;
475  LispObj pc_low, pc_high;
476
477  f = xpGPR(xp,Ifn);
478  tra = *(LispObj*)(xpGPR(xp,Isp));
479  if (tra_p(tra)) {
480    char *p = (char *)tra;
481    extern char *spentry_start, *spentry_end;
482
483    if (ptr_in_area(p, tcr->ts_area) ||
484        (p > spentry_start && p < spentry_end) ||
485        in_any_consing_area(tra))
486      tra_f = tra_function(tra);
487    else
488      Bug(xp, "martian tra %p\n", tra);
489  }
490  abs_pc = (LispObj)xpPC(xp);
491#if WORD_SIZE == 64
492  pc_high = ((abs_pc >> 32) & 0xffffffff) << fixnumshift;
493  pc_low = (abs_pc & 0xffffffff) << fixnumshift;
494#else
495  pc_high = ((abs_pc >> 16) & 0xffff) << fixnumshift;
496  pc_low = (abs_pc & 0xffff) << fixnumshift;
497#endif
498
499
500  if (functionp(f))
501    nominal_function = f;
502  else if (tra_f)
503    nominal_function = tra_f;
504 
505  f = xpGPR(xp,Ifn);
506  if (object_contains_pc(f, abs_pc)) {
507    containing_uvector = untag(f)+fulltag_misc;
508  } else {
509    f = xpGPR(xp,Ixfn);
510    if (object_contains_pc(f, abs_pc)) {
511      containing_uvector = untag(f)+fulltag_misc;
512    } else {
513      if (tra_f) {
514        f = tra_f;
515        if (object_contains_pc(f, abs_pc)) {
516          containing_uvector = untag(f)+fulltag_misc;
517          relative_pc = (abs_pc - f) << fixnumshift;
518        }
519      }
520    }
521  }
522  if (containing_uvector) {
523    relative_pc = (abs_pc - (LispObj)&(deref(containing_uvector,1))) << fixnumshift;
524  } else {
525    containing_uvector = lisp_nil;
526  }
527  push_on_lisp_stack(xp, pc_high);
528  push_on_lisp_stack(xp, pc_low);
529  push_on_lisp_stack(xp,(LispObj)(tcr->xframe->prev));
530  push_on_lisp_stack(xp,(LispObj)(tcr->foreign_sp));
531  push_on_lisp_stack(xp,tra);
532  push_on_lisp_stack(xp,(LispObj)xp);
533  push_on_lisp_stack(xp,containing_uvector); 
534  push_on_lisp_stack(xp,relative_pc);
535  push_on_lisp_stack(xp,nominal_function);
536  push_on_lisp_stack(xp,0);
537  push_on_lisp_stack(xp,xpGPR(xp,Ifp));
538  xpGPR(xp,Ifp) = xpGPR(xp,Isp);
539  return xpGPR(xp,Isp);
540}
541
542#ifndef XMEMFULL
543#define XMEMFULL (76)
544#endif
545
546void
547lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed )
548{
549  LispObj xcf = create_exception_callback_frame(xp, tcr),
550    cmain = nrs_CMAIN.vcell;
551  int skip;
552   
553  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
554  xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
555
556  skip = callback_to_lisp(tcr, cmain, xp, xcf, -1, XMEMFULL, 0, 0);
557  xpPC(xp) += skip;
558}
559
560#ifndef SIGTRAP
561#define SIGTRAP 5
562#endif
563
564void
565callback_for_gc_notification(ExceptionInformation *xp, TCR *tcr)
566{
567  LispObj cmain = nrs_CMAIN.vcell;
568  if ((fulltag_of(cmain) == fulltag_misc) &&
569      (header_subtag(header_of(cmain)) == subtag_macptr)) {
570    LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
571      word_beyond_vsp = save_vsp[-1],
572      save_fp = xpGPR(xp,Ifp),
573      xcf = create_exception_callback_frame(xp, tcr);
574
575    callback_to_lisp(tcr, cmain, xp, xcf, SIGTRAP, 0, 0, 0);
576    did_gc_notification_since_last_full_gc = true;
577    xpGPR(xp,Ifp) = save_fp;
578    xpGPR(xp,Isp) = (LispObj)save_vsp;
579    save_vsp[-1] = word_beyond_vsp;
580  }
581}
582
583
584/*
585  Allocate a large list, where "large" means "large enough to
586  possibly trigger the EGC several times if this was done
587  by individually allocating each CONS."  The number of
588  ocnses in question is in arg_z; on successful return,
589  the list will be in arg_z
590*/
591
592Boolean
593allocate_list(ExceptionInformation *xp, TCR *tcr)
594{
595  natural
596    nconses = (unbox_fixnum(xpGPR(xp,Iarg_z))),
597    bytes_needed = (nconses << dnode_shift);
598  LispObj
599    prev = lisp_nil,
600    current,
601    initial = xpGPR(xp,Iarg_y);
602  Boolean notify_pending_gc = false;
603
604  if (nconses == 0) {
605    /* Silly case */
606    xpGPR(xp,Iarg_z) = lisp_nil;
607    return true;
608  }
609  update_bytes_allocated(tcr, (void *)tcr->save_allocptr);
610  if (allocate_object(xp,bytes_needed,bytes_needed-fulltag_cons,tcr, &notify_pending_gc)) {
611    tcr->save_allocptr -= fulltag_cons;
612    for (current = xpGPR(xp,Iallocptr);
613         nconses;
614         prev = current, current+= dnode_size, nconses--) {
615      deref(current,0) = prev;
616      deref(current,1) = initial;
617    }
618    xpGPR(xp,Iarg_z) = prev;
619    if (notify_pending_gc && !did_gc_notification_since_last_full_gc) {
620      callback_for_gc_notification(xp,tcr);
621    }
622  } else {
623    lisp_allocation_failure(xp,tcr,bytes_needed);
624  }
625  return true;
626}
627
628Boolean
629handle_alloc_trap(ExceptionInformation *xp, TCR *tcr, Boolean *notify)
630{
631  natural cur_allocptr, bytes_needed;
632  unsigned allocptr_tag;
633  signed_natural disp;
634
635  if (!allocation_enabled) {
636    LispObj xcf,
637      save_sp = xpGPR(xp,Isp),
638      save_fp = xpGPR(xp,Ifp),
639      cmain = nrs_CMAIN.vcell;
640    xcf =create_exception_callback_frame(xp, tcr),
641    pc_luser_xp(xp,tcr,NULL);
642    allocation_enabled = true;
643    tcr->save_allocbase = (void *)VOID_ALLOCPTR;
644    callback_to_lisp(tcr, cmain, xp, xcf, -1, 0, 0, error_allocation_disabled);
645    xpGPR(xp,Isp) = save_sp;
646    xpGPR(xp,Ifp) = save_fp;
647    return true;
648  }
649  cur_allocptr = xpGPR(xp,Iallocptr);
650  allocptr_tag = fulltag_of(cur_allocptr);
651  if (allocptr_tag == fulltag_misc) {
652#ifdef X8664
653    disp = xpGPR(xp,Iimm1);
654#else
655    disp = xpGPR(xp,Iimm0);
656#endif
657  } else {
658    disp = dnode_size-fulltag_cons;
659  }
660  bytes_needed = disp+allocptr_tag;
661
662  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr+disp)));
663  if (allocate_object(xp, bytes_needed, disp, tcr, notify)) {
664    if (notify && *notify) {
665      xpPC(xp)+=2;
666      /* Finish the allocation: add a header if necessary,
667         clear the tag bits in tcr.save_allocptr. */
668      pc_luser_xp(xp,tcr,NULL);
669      callback_for_gc_notification(xp,tcr);
670    }
671    return true;
672  }
673 
674  lisp_allocation_failure(xp,tcr,bytes_needed);
675
676  return true;
677}
678
679 
680int
681callback_to_lisp (TCR * tcr, LispObj callback_macptr, ExceptionInformation *xp,
682                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
683{
684  natural  callback_ptr;
685  int delta;
686  unsigned old_mxcsr = get_mxcsr();
687#ifdef X8632
688  natural saved_node_regs_mask = tcr->node_regs_mask;
689  natural saved_unboxed0 = tcr->unboxed0;
690  natural saved_unboxed1 = tcr->unboxed1;
691  LispObj *vsp = (LispObj *)xpGPR(xp, Isp);
692#endif
693  set_mxcsr(0x1f80);
694
695  /* Put the active stack pointers where .SPcallback expects them */
696#ifdef X8632
697  tcr->node_regs_mask = X8632_DEFAULT_NODE_REGS_MASK;
698
699  *--vsp = tcr->save0;
700  *--vsp = tcr->save1;
701  *--vsp = tcr->save2;
702  *--vsp = tcr->save3;
703  *--vsp = tcr->next_method_context;
704  xpGPR(xp, Isp) = (LispObj)vsp;
705#endif
706  tcr->save_vsp = (LispObj *)xpGPR(xp, Isp);
707  tcr->save_fp = (LispObj *)xpGPR(xp, Ifp);
708
709  /* Call back.  The caller of this function may have modified stack/frame
710     pointers (and at least should have called prepare_for_callback()).
711  */
712  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
713  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
714  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
715  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
716
717#ifdef X8632
718  tcr->next_method_context = *vsp++;
719  tcr->save3 = *vsp++;
720  tcr->save2 = *vsp++;
721  tcr->save1 = *vsp++;
722  tcr->save0 = *vsp++;
723  xpGPR(xp, Isp) = (LispObj)vsp;
724
725  tcr->node_regs_mask = saved_node_regs_mask;
726  tcr->unboxed0 = saved_unboxed0;
727  tcr->unboxed1 = saved_unboxed1;
728#endif
729  set_mxcsr(old_mxcsr);
730  return delta;
731}
732
733void
734callback_for_interrupt(TCR *tcr, ExceptionInformation *xp)
735{
736  LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
737    word_beyond_vsp = save_vsp[-1],
738    save_fp = xpGPR(xp,Ifp),
739    xcf = create_exception_callback_frame(xp, tcr);
740  int save_errno = errno;
741
742  callback_to_lisp(tcr, nrs_CMAIN.vcell,xp, xcf, 0, 0, 0, 0);
743  xpGPR(xp,Ifp) = save_fp;
744  xpGPR(xp,Isp) = (LispObj)save_vsp;
745  save_vsp[-1] = word_beyond_vsp;
746  errno = save_errno;
747}
748
749Boolean
750handle_error(TCR *tcr, ExceptionInformation *xp)
751{
752  pc program_counter = (pc)xpPC(xp);
753  unsigned char op0 = program_counter[0], op1 = program_counter[1];
754  LispObj rpc, errdisp = nrs_ERRDISP.vcell,
755    save_vsp = xpGPR(xp,Isp), xcf0,
756    save_fp = xpGPR(xp,Ifp);
757  int skip;
758
759  if ((fulltag_of(errdisp) == fulltag_misc) &&
760      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
761
762    if ((op0 == 0xcd) && (op1 >= 0xc0) && (op1 <= 0xc2)) {
763      finish_function_entry(xp);
764    }
765    xcf0 = create_exception_callback_frame(xp, tcr);
766    skip = callback_to_lisp(tcr, errdisp, xp, xcf0, 0, 0, 0, 0);
767    if (skip == -1) {
768      xcf *xcf1 = (xcf *)xcf0;
769      LispObj container = xcf1->containing_uvector;
770     
771      rpc = xcf1->relative_pc >> fixnumshift;
772      if (container == lisp_nil) {
773        xpPC(xp) = rpc;
774      } else {
775        xpPC(xp) = (LispObj)(&(deref(container,
776#ifdef X8664
777                                     1
778#else
779                                     0
780#endif
781)))+rpc;
782      }
783       
784      skip = 0;
785    }
786    xpGPR(xp,Ifp) = save_fp;
787    xpGPR(xp,Isp) = save_vsp;
788    if ((op0 == 0xcd) && (op1 == 0xc7)) {
789      /* Continue after an undefined function call. The function
790         that had been undefined has already been called (in the
791         break loop), and a list of the values that it returned
792         in in the xp's %arg_z.  A function that returns those
793         values in in the xp's %fn; we just have to adjust the
794         stack (keeping the return address in the right place
795         and discarding any stack args/reserved stack frame),
796         then set nargs and the PC so that that function's
797         called when we resume.
798      */
799      LispObj *vsp =(LispObj *)save_vsp, ra = *vsp;
800      int nargs = xpGPR(xp, Inargs)>>fixnumshift;
801
802#ifdef X8664
803      if (nargs > 3) {
804        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 3)));
805        push_on_lisp_stack(xp,ra);
806      }
807#else
808      if (nargs > 2) {
809        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 2)));
810        push_on_lisp_stack(xp,ra);
811      }
812#endif
813      xpPC(xp) = xpGPR(xp,Ifn);
814      xpGPR(xp,Inargs) = 1<<fixnumshift;
815    } else {
816      xpPC(xp) += skip;
817    }
818    return true;
819  } else {
820    return false;
821  }
822}
823
824
825protection_handler
826* protection_handlers[] = {
827  do_spurious_wp_fault,
828  do_soft_stack_overflow,
829  do_soft_stack_overflow,
830  do_soft_stack_overflow,
831  do_hard_stack_overflow,   
832  do_hard_stack_overflow,
833  do_hard_stack_overflow,
834};
835
836
837/* Maybe this'll work someday.  We may have to do something to
838   make the thread look like it's not handling an exception */
839void
840reset_lisp_process(ExceptionInformation *xp)
841{
842}
843
844Boolean
845do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
846{
847  /*  reset_lisp_process(xp); */
848  Bug(xp, "Unrecoverable stack overflow.");
849  return false;
850}
851
852
853Boolean
854do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
855{
856
857  return false;
858}
859
860Boolean
861do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
862{
863  /* Trying to write into a guard page on the vstack or tstack.
864     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
865     signal an error_stack_overflow condition.
866      */
867  lisp_protection_kind which = prot_area->why;
868  Boolean on_TSP = (which == kTSPsoftguard);
869  LispObj save_fp = xpGPR(xp,Ifp);
870  LispObj save_vsp = xpGPR(xp,Isp), 
871    xcf,
872    cmain = nrs_CMAIN.vcell;
873  area *a;
874  protected_area_ptr soft;
875  TCR *tcr = get_tcr(false);
876  int skip;
877
878  if ((fulltag_of(cmain) == fulltag_misc) &&
879      (header_subtag(header_of(cmain)) == subtag_macptr)) {
880    if (on_TSP) {
881      a = tcr->ts_area;
882    } else {
883      a = tcr->vs_area;
884    }
885    soft = a->softprot;
886    unprotect_area(soft);
887    xcf = create_exception_callback_frame(xp, tcr);
888    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, on_TSP, 0, 0);
889    xpGPR(xp,Ifp) = save_fp;
890    xpGPR(xp,Isp) = save_vsp;
891    xpPC(xp) += skip;
892    return true;
893  }
894  return false;
895}
896
897Boolean
898is_write_fault(ExceptionInformation *xp, siginfo_t *info)
899{
900#ifdef DARWIN
901  return (UC_MCONTEXT(xp)->__es.__err & 0x2) != 0;
902#endif
903#if defined(LINUX) || defined(SOLARIS)
904  return (xpGPR(xp,REG_ERR) & 0x2) != 0;
905#endif
906#ifdef FREEBSD
907  return (xp->uc_mcontext.mc_err & 0x2) != 0;
908#endif
909#ifdef WINDOWS
910  return (info->ExceptionFlags == EXCEPTION_WRITE_FAULT);
911#endif
912}
913
914Boolean
915handle_fault(TCR *tcr, ExceptionInformation *xp, siginfo_t *info, int old_valence)
916{
917#ifdef FREEBSD
918#ifdef X8664
919  BytePtr addr = (BytePtr) xp->uc_mcontext.mc_addr;
920#else
921  BytePtr addr = (BytePtr) info->si_addr;
922#endif
923#else
924#ifdef WINDOWS
925  BytePtr addr = (BytePtr) info->ExceptionInformation[1];
926#else
927  BytePtr addr = (BytePtr) info->si_addr;
928#endif
929#endif
930  Boolean valid = IS_PAGE_FAULT(info,xp);
931
932  if (TCR_AUX(tcr)->safe_ref_address != NULL) {
933    xpGPR(xp,Iimm0) = 0;
934    xpPC(xp) = xpGPR(xp,Ira0);
935    TCR_AUX(tcr)->safe_ref_address = NULL;
936    return true;
937  }
938
939  if (valid) {
940    {
941      protected_area *a = find_protected_area(addr);
942      protection_handler *handler;
943     
944      if (a) {
945        handler = protection_handlers[a->why];
946        return handler(xp, a, addr);
947      }
948    }
949
950    if ((addr >= readonly_area->low) &&
951        (addr < readonly_area->active)) {
952      UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
953                      page_size);
954      return true;
955    }
956
957    {
958      area *a = area_containing(addr);
959
960      if (a && a->code == AREA_WATCHED && addr < a->high) {
961        /* caught a write to a watched object */
962        LispObj *p = (LispObj *)a->low;
963        LispObj node = *p;
964        unsigned tag_n = fulltag_of(node);
965        LispObj cmain = nrs_CMAIN.vcell;
966        LispObj obj;
967
968        if (immheader_tag_p(tag_n) || nodeheader_tag_p(tag_n))
969          obj = (LispObj)p + fulltag_misc;
970        else
971          obj = (LispObj)p + fulltag_cons;
972
973        if ((fulltag_of(cmain) == fulltag_misc) &&
974            (header_subtag(header_of(cmain)) == subtag_macptr)) {
975          LispObj save_vsp = xpGPR(xp, Isp);
976          LispObj save_fp = xpGPR(xp, Ifp);
977          LispObj xcf;
978          natural offset = (LispObj)addr - obj;
979          int skip;
980
981          push_on_lisp_stack(xp, obj);
982          xcf = create_exception_callback_frame(xp, tcr);
983
984          /* The magic 2 means this was a write to a watchd object */
985          skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, 2,
986                                  (natural)addr, offset);
987          xpPC(xp) += skip;
988          xpGPR(xp, Ifp) = save_fp;
989          xpGPR(xp, Isp) = save_vsp;
990          return true;
991        }
992      }
993    }
994  }
995
996  if (old_valence == TCR_STATE_LISP) {
997    LispObj cmain = nrs_CMAIN.vcell,
998      xcf;
999    if ((fulltag_of(cmain) == fulltag_misc) &&
1000      (header_subtag(header_of(cmain)) == subtag_macptr)) {
1001      xcf = create_exception_callback_frame(xp, tcr);
1002      callback_to_lisp(tcr, cmain, xp, xcf, SIGBUS, valid ? is_write_fault(xp,info) : (natural)-1, valid ? (natural)addr : 0, 0);
1003    }
1004  }
1005  return false;
1006}
1007
1008Boolean
1009handle_foreign_fpe(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
1010{
1011#ifdef X8632
1012  return false;
1013#else
1014  int code;
1015
1016#ifdef WINDOWS
1017  if (info->ExceptionCode == EXCEPTION_INT_DIVIDE_BY_ZERO)
1018    return false;
1019#else
1020  if (info->si_code == FPE_INTDIV)
1021    return false;
1022#endif
1023
1024  /*
1025   * Cooperate with .SPffcall to avoid saving and restoring the MXCSR
1026   * around every foreign call.
1027   */
1028    if (! (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN_FPE))) {
1029      tcr->flags |= (1<<TCR_FLAG_BIT_FOREIGN_FPE);
1030      tcr->lisp_mxcsr = xpMXCSR(xp) & ~MXCSR_STATUS_MASK;
1031    }
1032    xpMXCSR(xp) &= ~MXCSR_STATUS_MASK;
1033    xpMXCSR(xp) |= MXCSR_CONTROL_MASK;
1034    return true;
1035#endif
1036}
1037
1038Boolean
1039handle_floating_point_exception(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
1040{
1041  int code,skip;
1042  LispObj  xcf, cmain = nrs_CMAIN.vcell,
1043    save_vsp = xpGPR(xp,Isp),
1044    save_fp = xpGPR(xp,Ifp);
1045#ifdef DARWIN /* bug in <= 10.5 */
1046  void decode_vector_fp_exception(siginfo_t *, uint32_t);
1047
1048  decode_vector_fp_exception(info, (uint32_t)(UC_MCONTEXT(xp)->__fs.__fpu_mxcsr));
1049#endif
1050
1051#ifdef WINDOWS
1052  code = info->ExceptionCode;
1053#else
1054  code = info->si_code;
1055#endif 
1056
1057  if ((fulltag_of(cmain) == fulltag_misc) &&
1058      (header_subtag(header_of(cmain)) == subtag_macptr)) {
1059    xcf = create_exception_callback_frame(xp, tcr);
1060    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGFPE, code, 0, 0);
1061    xpPC(xp) += skip;
1062    xpGPR(xp,Ifp) = save_fp;
1063    xpGPR(xp,Isp) = save_vsp;
1064    return true;
1065  } else {
1066    return false;
1067  }
1068}
1069
1070
1071Boolean
1072extend_tcr_tlb(TCR *tcr, ExceptionInformation *xp)
1073{
1074  LispObj index, old_limit = tcr->tlb_limit, new_limit, new_bytes;
1075  LispObj *old_tlb = tcr->tlb_pointer, *new_tlb, *work, *tos;
1076
1077  tos = (LispObj*)(xpGPR(xp,Isp));
1078  index = *tos++;
1079  (xpGPR(xp,Isp))=(LispObj)tos;
1080 
1081  new_limit = align_to_power_of_2(index+1,12);
1082  new_bytes = new_limit-old_limit;
1083  new_tlb = realloc(old_tlb, new_limit);
1084
1085  if (new_tlb == NULL) {
1086    return false;
1087  }
1088  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1089
1090  while (new_bytes) {
1091    *work++ = no_thread_local_binding_marker;
1092    new_bytes -= sizeof(LispObj);
1093  }
1094  tcr->tlb_pointer = new_tlb;
1095  tcr->tlb_limit = new_limit;
1096  return true;
1097}
1098
1099
1100#if defined(FREEBSD) || defined(DARWIN)
1101static
1102char mxcsr_bit_to_fpe_code[] = {
1103  FPE_FLTINV,                   /* ie */
1104  0,                            /* de */
1105  FPE_FLTDIV,                   /* ze */
1106  FPE_FLTOVF,                   /* oe */
1107  FPE_FLTUND,                   /* ue */
1108  FPE_FLTRES                    /* pe */
1109};
1110
1111void
1112decode_vector_fp_exception(siginfo_t *info, uint32_t mxcsr)
1113{
1114  /* If the exception appears to be an XMM FP exception, try to
1115     determine what it was by looking at bits in the mxcsr.
1116  */
1117  int xbit, maskbit;
1118 
1119  for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
1120    if ((mxcsr & (1 << xbit)) &&
1121        !(mxcsr & (1 << maskbit))) {
1122      info->si_code = mxcsr_bit_to_fpe_code[xbit];
1123      return;
1124    }
1125  }
1126  /* Nothing enabled and set in the mxcsr, assume integer /0 */
1127  info->si_code = FPE_INTDIV;
1128}
1129
1130#ifdef FREEBSD
1131void
1132freebsd_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
1133{
1134  if (info->si_code == 0) {
1135#ifdef X8664
1136    struct savefpu *fpu = (struct savefpu *) &(xp->uc_mcontext.mc_fpstate);
1137#else
1138    struct savexmm *fpu = (struct savexmm *) &(xp->uc_mcontext.mc_fpstate);
1139#endif
1140    uint32_t mxcsr = fpu->sv_env.en_mxcsr;
1141
1142    decode_vector_fp_exception(info, mxcsr);
1143  }
1144}
1145#endif
1146
1147#ifdef DARWIN
1148void
1149darwin_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
1150{
1151  if (info->si_code == EXC_I386_SSEEXTERR) {
1152    uint32_t mxcsr = UC_MCONTEXT(xp)->__fs.__fpu_mxcsr;
1153
1154    decode_vector_fp_exception(info, mxcsr);
1155  }
1156}
1157
1158#endif
1159
1160#endif
1161
1162void
1163get_lisp_string(LispObj lisp_string, char *c_string, natural max)
1164{
1165  lisp_char_code *src = (lisp_char_code *)  (ptr_from_lispobj(lisp_string + misc_data_offset));
1166  natural i, n = header_element_count(header_of(lisp_string));
1167
1168  if (n > max) {
1169    n = max;
1170  }
1171
1172  for (i = 0; i < n; i++) {
1173    c_string[i] = 0xff & (src[i]);
1174  }
1175  c_string[n] = 0;
1176}
1177
1178Boolean handle_watch_trap(ExceptionInformation *xp, TCR *tcr);
1179
1180Boolean
1181handle_exception(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1182{
1183  pc program_counter = (pc)xpPC(context);
1184
1185  if (old_valence != TCR_STATE_LISP) {
1186    if (old_valence == TCR_STATE_FOREIGN && signum == SIGFPE) {
1187      return handle_foreign_fpe(tcr, context, info);
1188    } else {
1189      return false;
1190    }
1191  }
1192
1193  switch (signum) {
1194  case SIGNUM_FOR_INTN_TRAP:
1195    if (IS_MAYBE_INT_TRAP(info,context)) {
1196      /* Something mapped to SIGSEGV/SIGBUS that has nothing to do with
1197         a memory fault.  On x86, an "int n" instruction that's
1198         not otherwise implemented causes a "protecton fault".  Of
1199         course that has nothing to do with accessing protected
1200         memory; of course, most Unices act as if it did.*/
1201      if ((program_counter != NULL) &&
1202          (*program_counter == INTN_OPCODE)) {
1203        program_counter++;
1204        switch (*program_counter) {
1205        case UUO_ALLOC_TRAP:
1206          {
1207            Boolean did_notify = false,
1208              allocation_disabled = !allocation_enabled,
1209              *notify_ptr = &did_notify;
1210            if (did_gc_notification_since_last_full_gc) {
1211              notify_ptr = NULL;
1212            }
1213            if (handle_alloc_trap(context, tcr, notify_ptr)) {
1214              if (! did_notify) {
1215                if (! allocation_disabled) {
1216                  xpPC(context) += 2;   /* we might have GCed. */
1217                }
1218              }
1219              return true;
1220            }
1221          }
1222          break;
1223        case UUO_GC_TRAP:
1224          if (handle_gc_trap(context, tcr)) {
1225            xpPC(context) += 2;
1226            return true;
1227          }
1228          break;
1229        case UUO_WATCH_TRAP:
1230          /* add or remove watched object */
1231          if (handle_watch_trap(context, tcr)) {
1232            xpPC(context) += 2;
1233            return true;
1234          }
1235          break;
1236        case UUO_DEBUG_TRAP:
1237          xpPC(context) = (natural) (program_counter+1);
1238          lisp_Debugger(context, info, debug_entry_dbg, false, "Lisp Breakpoint");
1239          return true;
1240           
1241        case UUO_DEBUG_TRAP_WITH_STRING:
1242          xpPC(context) = (natural) (program_counter+1);
1243          {
1244            char msg[512];
1245
1246            get_lisp_string(xpGPR(context,Iarg_z),msg, sizeof(msg)-1);
1247            lisp_Debugger(context, info, debug_entry_dbg, false, msg);
1248          }
1249          return true;
1250         
1251        default:
1252          return handle_error(tcr, context);
1253        }
1254      } else {
1255        return false;
1256      }
1257
1258    } else {
1259      return handle_fault(tcr, context, info, old_valence);
1260    }
1261    break;
1262
1263  case SIGNAL_FOR_PROCESS_INTERRUPT:
1264    tcr->interrupt_pending = 0;
1265    callback_for_interrupt(tcr, context);
1266    return true;
1267    break;
1268
1269
1270  case SIGILL:
1271    if ((program_counter[0] == XUUO_OPCODE_0) &&
1272        (program_counter[1] == XUUO_OPCODE_1)) {
1273      TCR *target = (TCR *)xpGPR(context, Iarg_z);
1274
1275      switch (program_counter[2]) {
1276      case XUUO_TLB_TOO_SMALL:
1277        if (extend_tcr_tlb(tcr,context)) {
1278          xpPC(context)+=3;
1279          return true;
1280        }
1281        break;
1282       
1283      case XUUO_INTERRUPT_NOW:
1284        callback_for_interrupt(tcr,context);
1285        xpPC(context)+=3;
1286        return true;
1287
1288      case XUUO_SUSPEND_NOW:
1289        xpPC(context)+=3;
1290        return true;
1291
1292      case XUUO_INTERRUPT:
1293        raise_thread_interrupt(target);
1294        xpPC(context)+=3;
1295        return true;
1296
1297      case XUUO_SUSPEND:
1298        xpGPR(context,Iimm0) = (LispObj) lisp_suspend_tcr(target);
1299        xpPC(context)+=3;
1300        return true;
1301
1302      case XUUO_SUSPEND_ALL:
1303        lisp_suspend_other_threads();
1304        xpPC(context)+=3;
1305        return true;
1306
1307
1308      case XUUO_RESUME:
1309        xpGPR(context,Iimm0) = (LispObj) lisp_resume_tcr(target);
1310        xpPC(context)+=3;
1311        return true;
1312       
1313      case XUUO_RESUME_ALL:
1314        lisp_resume_other_threads();
1315        xpPC(context)+=3;
1316        return true;
1317       
1318      case XUUO_KILL:
1319        xpGPR(context,Iimm0) = (LispObj)kill_tcr(target);
1320        xpPC(context)+=3;
1321        return true;
1322
1323      case XUUO_ALLOCATE_LIST:
1324        allocate_list(context,tcr);
1325        xpPC(context)+=3;
1326        return true;
1327
1328      default:
1329        return false;
1330      }
1331    } else {
1332      return false;
1333    }
1334    break;
1335   
1336  case SIGFPE:
1337#ifdef FREEBSD
1338    /* As of 6.1, FreeBSD/AMD64 doesn't seem real comfortable
1339       with this newfangled XMM business (and therefore info->si_code
1340       is often 0 on an XMM FP exception.
1341       Try to figure out what really happened by decoding mxcsr
1342       bits.
1343    */
1344    freebsd_decode_vector_fp_exception(info,context);
1345#endif
1346#ifdef DARWIN
1347    /* Same general problem with Darwin as of 8.7.2 */
1348    darwin_decode_vector_fp_exception(info,context);
1349#endif
1350
1351    return handle_floating_point_exception(tcr, context, info);
1352
1353#if SIGBUS != SIGNUM_FOR_INTN_TRAP
1354  case SIGBUS:
1355    return handle_fault(tcr, context, info, old_valence);
1356#endif
1357   
1358#if SIGSEGV != SIGNUM_FOR_INTN_TRAP
1359  case SIGSEGV:
1360    return handle_fault(tcr, context, info, old_valence);
1361#endif   
1362   
1363  default:
1364    return false;
1365  }
1366  return false;
1367}
1368
1369
1370/*
1371   Current thread has all signals masked.  Before unmasking them,
1372   make it appear that the current thread has been suspended.
1373   (This is to handle the case where another thread is trying
1374   to GC before this thread is able to seize the exception lock.)
1375*/
1376int
1377prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
1378{
1379  int old_valence = tcr->valence;
1380
1381  tcr->pending_exception_context = context;
1382  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
1383
1384#ifdef WINDOWS
1385  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1386    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1387    SEM_RAISE(TCR_AUX(tcr)->suspend);
1388    SEM_WAIT_FOREVER(TCR_AUX(tcr)->resume);
1389  }
1390#else
1391  ALLOW_EXCEPTIONS(context);
1392#endif
1393  return old_valence;
1394} 
1395
1396void
1397wait_for_exception_lock_in_handler(TCR *tcr, 
1398                                   ExceptionInformation *context,
1399                                   xframe_list *xf)
1400{
1401
1402  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
1403#if 0
1404  fprintf(dbgout, "0x" LISP " has exception lock\n", tcr);
1405#endif
1406  xf->curr = context;
1407#ifdef X8632
1408  xf->node_regs_mask = tcr->node_regs_mask;
1409#endif
1410  xf->prev = tcr->xframe;
1411  tcr->xframe =  xf;
1412  tcr->pending_exception_context = NULL;
1413  tcr->valence = TCR_STATE_FOREIGN; 
1414}
1415
1416void
1417unlock_exception_lock_in_handler(TCR *tcr)
1418{
1419  tcr->pending_exception_context = tcr->xframe->curr;
1420#ifdef X8632
1421  tcr->node_regs_mask = tcr->xframe->node_regs_mask;
1422#endif
1423  tcr->xframe = tcr->xframe->prev;
1424  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
1425  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
1426#if 0
1427  fprintf(dbgout, "0x" LISP " released exception lock\n", tcr);
1428#endif
1429}
1430
1431/*
1432   If an interrupt is pending on exception exit, try to ensure
1433   that the thread sees it as soon as it's able to run.
1434*/
1435#ifdef WINDOWS
1436void
1437raise_pending_interrupt(TCR *tcr)
1438{
1439}
1440void
1441exit_signal_handler(TCR *tcr, int old_valence)
1442{
1443}
1444void
1445signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
1446{
1447}
1448#else
1449void
1450raise_pending_interrupt(TCR *tcr)
1451{
1452  if ((TCR_INTERRUPT_LEVEL(tcr) >= 0) &&
1453      (tcr->interrupt_pending)) {
1454    pthread_kill((pthread_t)(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
1455  }
1456}
1457
1458void
1459exit_signal_handler(TCR *tcr, int old_valence)
1460{
1461  sigset_t mask;
1462  sigfillset(&mask);
1463#ifdef FREEBSD
1464  sigdelset(&mask,SIGTRAP);
1465#endif
1466 
1467  pthread_sigmask(SIG_SETMASK,&mask, NULL);
1468  tcr->valence = old_valence;
1469  tcr->pending_exception_context = NULL;
1470}
1471
1472void
1473signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context
1474#ifdef DARWIN
1475               , TCR *tcr, int old_valence
1476#endif
1477)
1478{
1479  xframe_list xframe_link;
1480#ifndef DARWIN
1481  TCR *tcr = get_tcr(false);
1482
1483  int old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1484#endif
1485  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
1486    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
1487    pthread_kill(pthread_self(), thread_suspend_signal);
1488  }
1489  wait_for_exception_lock_in_handler(tcr,context, &xframe_link);
1490
1491
1492  if (! handle_exception(signum, info, context, tcr, old_valence)) {
1493    char msg[512];
1494    Boolean foreign = (old_valence != TCR_STATE_LISP);
1495
1496    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x" LISP ", context->regs at #x" LISP "", signum, xpPC(context), (natural)xpGPRvector(context));
1497   
1498    if (lisp_Debugger(context, info, signum,  foreign, msg)) {
1499      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
1500    }
1501  }
1502  unlock_exception_lock_in_handler(tcr);
1503#ifndef DARWIN_USE_PSEUDO_SIGRETURN
1504  exit_signal_handler(tcr, old_valence);
1505#endif
1506  /* raise_pending_interrupt(tcr); */
1507#ifndef DARWIN_USE_PSEUDO_SIGRETURN
1508  SIGRETURN(context);
1509#endif
1510}
1511#endif
1512
1513
1514
1515
1516#ifdef LINUX
1517/* type of pointer to saved fp state */
1518#ifdef X8664
1519typedef fpregset_t FPREGS;
1520#else
1521typedef struct _fpstate *FPREGS;
1522#endif
1523LispObj *
1524copy_fpregs(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
1525{
1526  FPREGS src = (FPREGS)(xp->uc_mcontext.fpregs), dest;
1527 
1528  if (src) {
1529    dest = ((FPREGS)current)-1;
1530    *dest = *src;
1531    *destptr = dest;
1532    current = (LispObj *) dest;
1533  }
1534  return current;
1535}
1536#endif
1537
1538
1539#ifdef FREEBSD
1540typedef void *FPREGS;
1541
1542
1543LispObj *
1544copy_avx(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
1545{
1546  natural sp;
1547
1548  *destptr = (FPREGS)AVX_CONTEXT_PTR(xp);
1549
1550  if (AVX_CONTEXT_PRESENT(xp)) {
1551    sp = (natural)current;
1552    sp -= AVX_CONTEXT_SIZE(xp);
1553    sp = truncate_to_power_of_2(sp,6);
1554    memcpy((void *)sp,(void *)AVX_CONTEXT_PTR(xp),AVX_CONTEXT_SIZE(xp));
1555    current = (LispObj *)sp;
1556    *destptr = (FPREGS)current;
1557  }
1558  return current;
1559}
1560#endif
1561
1562#ifdef DARWIN
1563LispObj *
1564copy_darwin_mcontext(MCONTEXT_T context, 
1565                     LispObj *current, 
1566                     MCONTEXT_T *out)
1567{
1568  MCONTEXT_T dest = ((MCONTEXT_T)current)-1;
1569  dest = (MCONTEXT_T) (((LispObj)dest) & ~15);
1570
1571  *dest = *context;
1572  *out = dest;
1573  return (LispObj *)dest;
1574}
1575#endif
1576
1577LispObj *
1578copy_siginfo(siginfo_t *info, LispObj *current)
1579{
1580  siginfo_t *dest = ((siginfo_t *)current) - 1;
1581#if !defined(LINUX) || !defined(X8632)
1582  dest = (siginfo_t *) (((LispObj)dest)&~15);
1583#endif
1584  *dest = *info;
1585  return (LispObj *)dest;
1586}
1587
1588#ifdef LINUX
1589typedef FPREGS copy_ucontext_last_arg_t;
1590#else
1591typedef void * copy_ucontext_last_arg_t;
1592#endif
1593
1594#ifndef WINDOWS
1595LispObj *
1596copy_ucontext(ExceptionInformation *context, LispObj *current, copy_ucontext_last_arg_t fp)
1597{
1598  ExceptionInformation *dest = ((ExceptionInformation *)current)-1;
1599#if !defined(LINUX) || !defined(X8632)
1600  dest = (ExceptionInformation *) (((LispObj)dest) & ~15);
1601#endif
1602
1603  *dest = *context;
1604  /* Fix it up a little; where's the signal mask allocated, if indeed
1605     it is "allocated" ? */
1606#ifdef LINUX
1607  dest->uc_mcontext.fpregs = (fpregset_t)fp;
1608#endif
1609#ifdef FREEBSD
1610  if (AVX_CONTEXT_PRESENT(context)) {
1611    AVX_CONTEXT_PTR(context) = (natural)fp;
1612  }
1613#endif
1614  dest->uc_stack.ss_sp = 0;
1615  dest->uc_stack.ss_size = 0;
1616  dest->uc_stack.ss_flags = 0;
1617  dest->uc_link = NULL;
1618  return (LispObj *)dest;
1619}
1620#endif
1621
1622
1623LispObj *
1624tcr_frame_ptr(TCR *tcr)
1625{
1626  ExceptionInformation *xp;
1627  LispObj *fp;
1628
1629  if (tcr->pending_exception_context)
1630    xp = tcr->pending_exception_context;
1631  else if (tcr->valence == TCR_STATE_LISP) {
1632    xp = TCR_AUX(tcr)->suspend_context;
1633  } else {
1634    xp = NULL;
1635  }
1636  if (xp) {
1637    fp = (LispObj *)xpGPR(xp, Ifp);
1638  } else {
1639    fp = tcr->save_fp;
1640  }
1641  return fp;
1642}
1643
1644
1645LispObj *
1646find_foreign_rsp(LispObj rsp, area *foreign_area, TCR *tcr)
1647{
1648
1649  if (((BytePtr)rsp < foreign_area->low) ||
1650      ((BytePtr)rsp > foreign_area->high)) {
1651    rsp = (LispObj)(tcr->foreign_sp);
1652  }
1653  return (LispObj *) (((rsp-128) & ~15));
1654}
1655
1656#ifdef X8632
1657#ifdef LINUX
1658/* This is here for debugging.  On entry to a signal handler that
1659   receives info and context arguments, the stack should look exactly
1660   like this.  The "pretcode field" of the structure is the address
1661   of code that does an rt_sigreturn syscall, and rt_sigreturn expects
1662   %esp at the time of that syscall to be pointing just past the
1663   pretcode field.
1664   handle_signal_on_foreign_stack() and helpers have to be very
1665   careful to duplicate this "structure" exactly.
1666   Note that on x8664 Linux, rt_sigreturn expects a ucontext to
1667   be on top of the stack (with a siginfo_t underneath it.)
1668   It sort of half-works to do sigreturn via setcontext() on
1669   x8632 Linux, but (a) it may not be available on some distributions
1670   and (b) even a relatively modern version of it uses "fldenv" to
1671   restore FP context, and "fldenv" isn't nearly good enough.
1672*/
1673
1674struct rt_sigframe {
1675        char *pretcode;
1676        int sig;
1677        siginfo_t  *pinfo;
1678        void  *puc;
1679        siginfo_t info;
1680        struct ucontext uc;
1681        struct _fpstate fpstate;
1682        char retcode[8];
1683};
1684struct rt_sigframe *rtsf = 0;
1685
1686#endif
1687#endif
1688
1689
1690#ifndef WINDOWS
1691/* x8632 Linux requires that the stack-allocated siginfo is nearer
1692   the top of stack than the stack-allocated ucontext.  If other
1693   platforms care, they expect the ucontext to be nearer the top
1694   of stack.
1695*/
1696
1697#if defined(LINUX) && defined(X8632)
1698#define UCONTEXT_ON_TOP_OF_STACK 0
1699#else
1700#define UCONTEXT_ON_TOP_OF_STACK 1
1701#endif
1702void
1703handle_signal_on_foreign_stack(TCR *tcr,
1704                               void *handler, 
1705                               int signum, 
1706                               siginfo_t *info, 
1707                               ExceptionInformation *context,
1708                               LispObj return_address
1709                               )
1710{
1711#ifdef LINUX
1712  FPREGS fpregs = NULL;
1713#else
1714  void *fpregs = NULL;
1715#endif
1716#ifdef DARWIN
1717  MCONTEXT_T mcontextp = NULL;
1718#endif
1719  siginfo_t *info_copy = NULL;
1720  ExceptionInformation *xp = NULL;
1721  LispObj *foreign_rsp = find_foreign_rsp(xpGPR(context,Isp), tcr->cs_area, tcr);
1722
1723#ifdef LINUX
1724  foreign_rsp = copy_fpregs(context, foreign_rsp, &fpregs);
1725#endif
1726#ifdef FREEBSD
1727  foreign_rsp = copy_avx(context, foreign_rsp, &fpregs);
1728#endif
1729#ifdef DARWIN
1730  foreign_rsp = copy_darwin_mcontext(UC_MCONTEXT(context), foreign_rsp, &mcontextp);
1731#endif
1732#if UCONTEXT_ON_TOP_OF_STACK
1733  /* copy info first */
1734  foreign_rsp = copy_siginfo(info, foreign_rsp);
1735  info_copy = (siginfo_t *)foreign_rsp;
1736  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
1737  xp = (ExceptionInformation *)foreign_rsp;
1738#else
1739  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
1740  xp = (ExceptionInformation *)foreign_rsp;
1741  foreign_rsp = copy_siginfo(info, foreign_rsp);
1742  info_copy = (siginfo_t *)foreign_rsp;
1743#endif
1744#ifdef DARWIN
1745  UC_MCONTEXT(xp) = mcontextp;
1746#endif
1747  *--foreign_rsp = return_address;
1748  switch_to_foreign_stack(foreign_rsp,handler,signum,info_copy,xp);
1749}
1750#endif
1751
1752
1753#ifndef WINDOWS
1754#ifndef USE_SIGALTSTACK
1755void
1756arbstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation *context)
1757{
1758  TCR *tcr = get_interrupt_tcr(false);
1759#if 1
1760  if (tcr->valence != TCR_STATE_LISP) {
1761    lisp_Debugger(context, info, signum, true, "exception in foreign context");
1762  }
1763#endif
1764  {
1765    area *vs = tcr->vs_area;
1766    BytePtr current_sp = (BytePtr) current_stack_pointer();
1767
1768
1769    if ((current_sp >= vs->low) &&
1770        (current_sp < vs->high)) {
1771      handle_signal_on_foreign_stack(tcr,
1772                                     signal_handler,
1773                                     signum,
1774                                     info,
1775                                     context,
1776                                     (LispObj)__builtin_return_address(0)
1777
1778                                     );
1779    } else {
1780      signal_handler(signum, info, context, tcr, 0);
1781    }
1782  }
1783}
1784
1785#else
1786void
1787altstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
1788{
1789  TCR* tcr = get_tcr(true);
1790  Boolean do_stack_switch = false;
1791  stack_t ss;
1792
1793  if ((tcr->valence != TCR_STATE_LISP) &&
1794      (tcr->safe_ref_address) &&
1795      ((signum == SIGBUS) || (signum == SIGSEGV))) {
1796    extern opcode ffcall_return;
1797    xpPC(context) = (natural)&ffcall_return;
1798    xpGPR(context,Iimm0) = 0;
1799    xpGPR(context,Isp) = (natural)(tcr->foreign_sp);
1800    return;
1801  }
1802
1803
1804
1805
1806
1807#if WORD_SIZE==64
1808  if ((signum == SIGFPE) && (tcr->valence != TCR_STATE_LISP)) {
1809    if (handle_foreign_fpe(tcr,context,info)) {
1810      return;
1811    }
1812  }
1813#endif
1814     
1815  /* Because of signal chaining - and the possibility that libraries
1816     that use it ignore sigaltstack-related issues - we have to check
1817     to see if we're actually on the altstack.
1818
1819     When OpenJDK VMs overwrite preinstalled signal handlers (that're
1820     there for a reason ...), they're also casual about SA_RESTART.
1821     We care about SA_RESTART (mostly) in the PROCESS-INTERRUPT case,
1822     and whether a JVM steals the signal used for PROCESS-INTERRUPT
1823     is platform-dependent.  On those platforms where the same signal
1824     is used, we should strongly consider trying to use another one.
1825  */
1826  sigaltstack(NULL, &ss);
1827  if (ss.ss_flags == SS_ONSTACK) {
1828    do_stack_switch = true;
1829  } else {
1830    area *vs = tcr->vs_area;
1831    BytePtr current_sp = (BytePtr) current_stack_pointer();
1832
1833    if ((current_sp >= vs->low) &&
1834        (current_sp < vs->high)) {
1835      do_stack_switch = true;
1836    }
1837  }
1838  if (do_stack_switch) {
1839    handle_signal_on_foreign_stack(tcr,signal_handler,signum,info,context,(LispObj)SIGRETURN_ADDRESS());
1840  } else {
1841    signal_handler(signum,info,context);
1842  }
1843}
1844#endif
1845#endif
1846
1847Boolean
1848stack_pointer_on_vstack_p(LispObj stack_pointer, TCR *tcr)
1849{
1850  area *a = tcr->vs_area;
1851 
1852  return (((BytePtr)stack_pointer <= a->high) &&
1853          ((BytePtr)stack_pointer > a->low));
1854}
1855
1856
1857#ifdef WINDOWS
1858extern DWORD restore_windows_context(ExceptionInformation *, TCR *, int);
1859#endif
1860
1861void
1862interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1863{
1864  TCR *tcr = get_interrupt_tcr(false);
1865  int old_valence = tcr->valence;
1866
1867  if (tcr) {
1868    if ((TCR_INTERRUPT_LEVEL(tcr) < 0) ||
1869        (tcr->valence != TCR_STATE_LISP) ||
1870        (tcr->unwinding != 0) ||
1871        ! stack_pointer_on_vstack_p(xpGPR(context,Isp), tcr) ||
1872        ! stack_pointer_on_vstack_p(xpGPR(context,Ifp), tcr)) {
1873      tcr->interrupt_pending = (((natural) 1)<< (nbits_in_word - ((natural)1)));
1874    } else {
1875      LispObj cmain = nrs_CMAIN.vcell;
1876     
1877      if ((fulltag_of(cmain) == fulltag_misc) &&
1878          (header_subtag(header_of(cmain)) == subtag_macptr)) {
1879        /*
1880           This thread can (allegedly) take an interrupt now.
1881        */
1882
1883        xframe_list xframe_link;
1884        signed_natural alloc_displacement = 0;
1885        LispObj
1886          *next_tsp = tcr->next_tsp,
1887          *save_tsp = tcr->save_tsp,
1888          *p,
1889          q;
1890        natural old_foreign_exception = tcr->flags & (1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
1891
1892        tcr->flags &= ~(1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
1893           
1894        if (next_tsp != save_tsp) {
1895          tcr->next_tsp = save_tsp;
1896        } else {
1897          next_tsp = NULL;
1898        }
1899        /* have to do this before allowing interrupts */
1900        pc_luser_xp(context, tcr, &alloc_displacement);
1901        old_valence = prepare_to_wait_for_exception_lock(tcr, context);
1902        wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
1903        handle_exception(signum, info, context, tcr, old_valence);
1904        if (alloc_displacement) {
1905          tcr->save_allocptr -= alloc_displacement;
1906        }
1907        if (next_tsp) {
1908          tcr->next_tsp = next_tsp;
1909          p = next_tsp;
1910          while (p != save_tsp) {
1911            *p++ = 0;
1912          }
1913          q = (LispObj)save_tsp;
1914          *next_tsp = q;
1915        }
1916        tcr->flags |= old_foreign_exception;
1917        unlock_exception_lock_in_handler(tcr);
1918#ifndef WINDOWS
1919        exit_signal_handler(tcr, old_valence);
1920#endif
1921      }
1922    }
1923  }
1924#ifdef WINDOWS
1925  restore_windows_context(context,tcr,old_valence);
1926#else
1927  SIGRETURN(context);
1928#endif
1929}
1930
1931
1932#ifndef WINDOWS
1933#ifndef USE_SIGALTSTACK
1934void
1935arbstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1936{
1937  TCR *tcr = get_interrupt_tcr(false);
1938  area *vs = tcr->vs_area;
1939  BytePtr current_sp = (BytePtr) current_stack_pointer();
1940
1941  if ((current_sp >= vs->low) &&
1942      (current_sp < vs->high)) {
1943    handle_signal_on_foreign_stack(tcr,
1944                                   interrupt_handler,
1945                                   signum,
1946                                   info,
1947                                   context,
1948                                   (LispObj)__builtin_return_address(0)
1949                                   );
1950  } else {
1951    /* If we're not on the value stack, we pretty much have to be on
1952       the C stack.  Just run the handler. */
1953    interrupt_handler(signum, info, context);
1954  }
1955}
1956
1957#else /* altstack works */
1958
1959/*
1960   There aren't likely any JVM-related signal-chaining issues here, since
1961   on platforms where that could be an issue we use either an RT signal
1962   or an unused synchronous hardware signal to raise interrupts.
1963*/
1964void
1965altstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
1966{
1967  TCR *tcr = get_interrupt_tcr(false);
1968  handle_signal_on_foreign_stack(tcr,interrupt_handler,signum,info,context,(LispObj)__builtin_return_address(0)
1969                                 );
1970}
1971
1972#endif
1973#endif
1974
1975#ifndef WINDOWS
1976void
1977install_signal_handler(int signo, void *handler, unsigned flags)
1978{
1979  struct sigaction sa;
1980  int err;
1981 
1982  sa.sa_sigaction = (void *)handler;
1983  sigfillset(&sa.sa_mask);
1984#ifdef FREEBSD
1985  /* Strange FreeBSD behavior wrt synchronous signals */
1986  sigdelset(&sa.sa_mask,SIGTRAP);  /* let GDB work */
1987#endif
1988  sa.sa_flags = SA_SIGINFO;
1989
1990#ifdef USE_SIGALTSTACK
1991  if (flags & ON_ALTSTACK)
1992    sa.sa_flags |= SA_ONSTACK;
1993#endif
1994  if (flags & RESTART_SYSCALLS)
1995    sa.sa_flags |= SA_RESTART;
1996  if (flags & RESERVE_FOR_LISP) {
1997    extern sigset_t user_signals_reserved;
1998    sigaddset(&user_signals_reserved, signo);
1999  }
2000
2001  err = sigaction(signo, &sa, NULL);
2002  if (err) {
2003    perror("sigaction");
2004    exit(1);
2005  }
2006}
2007#endif
2008
2009#ifdef WINDOWS
2010BOOL
2011CALLBACK ControlEventHandler(DWORD event)
2012{
2013  switch(event) {
2014  case CTRL_C_EVENT:
2015    lisp_global(INTFLAG) = (1 << fixnumshift);
2016    return TRUE;
2017    break;
2018  case CTRL_BREAK_EVENT:
2019    lisp_global(INTFLAG) = (2 << fixnumshift);
2020    return TRUE;
2021    break;
2022  default:
2023    return FALSE;
2024  }
2025}
2026
2027static
2028DWORD mxcsr_bit_to_fpe_code[] = {
2029  EXCEPTION_FLT_INVALID_OPERATION, /* ie */
2030  0,                            /* de */
2031  EXCEPTION_FLT_DIVIDE_BY_ZERO, /* ze */
2032  EXCEPTION_FLT_OVERFLOW,       /* oe */
2033  EXCEPTION_FLT_UNDERFLOW,      /* ue */
2034  EXCEPTION_FLT_INEXACT_RESULT  /* pe */
2035};
2036
2037#ifndef STATUS_FLOAT_MULTIPLE_FAULTS
2038#define STATUS_FLOAT_MULTIPLE_FAULTS 0xc00002b4
2039#endif
2040
2041#ifndef STATUS_FLOAT_MULTIPLE_TRAPS
2042#define  STATUS_FLOAT_MULTIPLE_TRAPS 0xc00002b5
2043#endif
2044
2045int
2046map_windows_exception_code_to_posix_signal(DWORD code, siginfo_t *info, ExceptionInformation *context)
2047{
2048  switch (code) {
2049#ifdef WIN_32
2050  case STATUS_FLOAT_MULTIPLE_FAULTS:
2051  case STATUS_FLOAT_MULTIPLE_TRAPS:
2052    {
2053      int xbit, maskbit;
2054      DWORD mxcsr = *(xpMXCSRptr(context));
2055
2056      for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
2057        if ((mxcsr & (1 << xbit)) &&
2058            !(mxcsr & (1 << maskbit))) {
2059          info->ExceptionCode = mxcsr_bit_to_fpe_code[xbit];
2060          break;
2061        }
2062      }
2063    }
2064    return SIGFPE;
2065#endif
2066     
2067  case EXCEPTION_ACCESS_VIOLATION:
2068    return SIGSEGV;
2069  case EXCEPTION_FLT_DENORMAL_OPERAND:
2070  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2071  case EXCEPTION_FLT_INEXACT_RESULT:
2072  case EXCEPTION_FLT_INVALID_OPERATION:
2073  case EXCEPTION_FLT_OVERFLOW:
2074  case EXCEPTION_FLT_STACK_CHECK:
2075  case EXCEPTION_FLT_UNDERFLOW:
2076  case EXCEPTION_INT_DIVIDE_BY_ZERO:
2077  case EXCEPTION_INT_OVERFLOW:
2078    return SIGFPE;
2079  case EXCEPTION_PRIV_INSTRUCTION:
2080  case EXCEPTION_ILLEGAL_INSTRUCTION:
2081    return SIGILL;
2082  case EXCEPTION_IN_PAGE_ERROR:
2083  case STATUS_GUARD_PAGE_VIOLATION:
2084    return SIGBUS;
2085  default:
2086    return -1;
2087  }
2088}
2089
2090
2091LONG
2092windows_exception_handler(EXCEPTION_POINTERS *exception_pointers, TCR *tcr, int signal_number)
2093{
2094  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
2095  int old_valence;
2096  ExceptionInformation *context = exception_pointers->ContextRecord;
2097  siginfo_t *info = exception_pointers->ExceptionRecord;
2098  xframe_list xframes;
2099
2100  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
2101  wait_for_exception_lock_in_handler(tcr, context, &xframes);
2102
2103
2104 
2105  if (!handle_exception(signal_number, info, context, tcr, old_valence)) {
2106    char msg[512];
2107    Boolean foreign = (old_valence != TCR_STATE_LISP);
2108
2109    snprintf(msg, sizeof(msg), "Unhandled exception %d (windows code 0x%x) at 0x%Ix, context->regs at 0x%Ix", signal_number, code, xpPC(context), (natural)xpGPRvector(context));
2110   
2111    if (lisp_Debugger(context, info, signal_number,  foreign, msg)) {
2112      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
2113    }
2114  }
2115  unlock_exception_lock_in_handler(tcr);
2116  return restore_windows_context(context, tcr, old_valence);
2117}
2118
2119void
2120setup_exception_handler_call(CONTEXT *context,
2121                             LispObj new_sp,
2122                             void *handler,
2123                             EXCEPTION_POINTERS *new_ep,
2124                             TCR *tcr,
2125                             int signal_number)
2126{
2127  extern void windows_halt(void);
2128  LispObj *p = (LispObj *)new_sp;
2129#ifdef WIN_64
2130  p-=4;                         /* win64 abi argsave nonsense */
2131  *(--p) = (LispObj)windows_halt;
2132  context->Rsp = (DWORD64)p;
2133  context->Rip = (DWORD64)handler;
2134  context->Rcx = (DWORD64)new_ep;
2135  context->Rdx = (DWORD64)tcr;
2136  context->R8 = (DWORD64)signal_number;
2137#else
2138  p-=4;                          /* args on stack, stack aligned */
2139  p[0] = (LispObj)new_ep;
2140  p[1] = (LispObj)tcr;
2141  p[2] = signal_number;
2142  *(--p) = (LispObj)windows_halt;
2143  context->Esp = (DWORD)p;
2144  context->Eip = (DWORD)handler;
2145#endif
2146  context->EFlags &= ~0x400;  /* clear direction flag */
2147}
2148
2149void
2150prepare_to_handle_windows_exception_on_foreign_stack(TCR *tcr,
2151
2152                                                     CONTEXT *context,
2153                                                     void *handler,
2154                                                     EXCEPTION_POINTERS *original_ep, 
2155                                                     int signal_number)
2156{
2157  LispObj foreign_rsp = 
2158    (LispObj) (tcr->foreign_sp - 128) & ~15;
2159  CONTEXT *new_context;
2160  siginfo_t *new_info;
2161  EXCEPTION_POINTERS *new_ep;
2162
2163  new_context = ((CONTEXT *)(foreign_rsp&~15))-1;
2164  *new_context = *context;
2165  foreign_rsp = (LispObj)new_context;
2166  new_info = ((siginfo_t *)(foreign_rsp&~15))-1;
2167  *new_info = *original_ep->ExceptionRecord;
2168  foreign_rsp = (LispObj)new_info;
2169  new_ep = ((EXCEPTION_POINTERS *)(foreign_rsp&~15))-1;
2170  foreign_rsp = (LispObj)new_ep & ~15;
2171  new_ep->ContextRecord = new_context;
2172  new_ep->ExceptionRecord = new_info;
2173  setup_exception_handler_call(context,foreign_rsp,handler,new_ep, tcr, signal_number);
2174}
2175
2176LONG CALLBACK
2177windows_arbstack_exception_handler(EXCEPTION_POINTERS *exception_pointers)
2178{
2179  extern void ensure_safe_for_string_operations(void);
2180  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
2181  siginfo_t *info = exception_pointers->ExceptionRecord;
2182  ExceptionInformation *context = exception_pointers->ContextRecord;
2183  int signal_number = map_windows_exception_code_to_posix_signal(code, info, context);
2184
2185
2186  if (signal_number <= 0) {
2187    return EXCEPTION_CONTINUE_SEARCH;
2188  } else {
2189    TCR *tcr = get_interrupt_tcr(false);
2190    area *cs = TCR_AUX(tcr)->cs_area;
2191    BytePtr current_sp = (BytePtr) current_stack_pointer();
2192   
2193    ensure_safe_for_string_operations();
2194
2195    if ((current_sp >= cs->low) &&
2196        (current_sp < cs->high)) {
2197      debug_show_registers(context, exception_pointers->ExceptionRecord, 0);
2198      FBug(context, "Exception on foreign stack\n");
2199      return EXCEPTION_CONTINUE_EXECUTION;
2200    }
2201
2202    prepare_to_handle_windows_exception_on_foreign_stack(tcr,
2203                                                         context,
2204                                                         windows_exception_handler,
2205                                                         exception_pointers,
2206                                                         signal_number);
2207    return EXCEPTION_CONTINUE_EXECUTION;
2208  }
2209}
2210
2211
2212void
2213install_pmcl_exception_handlers()
2214{
2215  AddVectoredExceptionHandler(1,windows_arbstack_exception_handler);
2216}
2217#else
2218void
2219install_pmcl_exception_handlers()
2220{
2221  void *handler, *interrupt_handler;
2222
2223#ifdef USE_SIGALTSTACK
2224  handler = (void *)altstack_signal_handler;
2225  interrupt_handler = (void *)altstack_interrupt_handler;
2226#else
2227  handler = (void *)arbstack_signal_handler;
2228  interrupt_handler = (void *)arbstack_interrupt_handler;
2229#endif
2230
2231#ifndef DARWIN
2232  install_signal_handler(SIGILL, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2233  install_signal_handler(SIGBUS, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2234  install_signal_handler(SIGSEGV, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2235  install_signal_handler(SIGFPE, handler, RESERVE_FOR_LISP|ON_ALTSTACK);
2236#endif
2237 
2238  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT, interrupt_handler,
2239                         RESERVE_FOR_LISP|ON_ALTSTACK);
2240  signal(SIGPIPE, SIG_IGN);
2241}
2242#endif
2243
2244
2245
2246/* This should only be called when the tcr_area_lock is held */
2247void
2248empty_tcr_stacks(TCR *tcr)
2249{
2250  if (tcr) {
2251    area *a;
2252
2253    tcr->valence = TCR_STATE_FOREIGN;
2254    a = tcr->vs_area;
2255    if (a) {
2256      a->active = a->high;
2257    }
2258    a = tcr->ts_area;
2259    if (a) {
2260      a->active = a->high;
2261    }
2262    a = TCR_AUX(tcr)->cs_area;
2263    if (a) {
2264      a->active = a->high;
2265    }
2266  }
2267}
2268
2269#ifdef WINDOWS
2270void
2271thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
2272{
2273}
2274#else
2275void
2276thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
2277{
2278  TCR *tcr = get_tcr(false);
2279  sigset_t mask;
2280
2281  sigemptyset(&mask);
2282
2283  empty_tcr_stacks(tcr);
2284
2285  pthread_sigmask(SIG_SETMASK,&mask,NULL);
2286  pthread_exit(NULL);
2287}
2288#endif
2289
2290#ifndef WINDOWS
2291#ifndef USE_SIGALTSTACK
2292void
2293arbstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
2294{
2295  TCR *tcr = get_interrupt_tcr(false);
2296  area *vs = tcr->vs_area;
2297  BytePtr current_sp = (BytePtr) current_stack_pointer();
2298
2299  if ((current_sp >= vs->low) &&
2300      (current_sp < vs->high)) {
2301    handle_signal_on_foreign_stack(tcr,
2302                                   thread_kill_handler,
2303                                   signum,
2304                                   info,
2305                                   context,
2306                                   (LispObj)__builtin_return_address(0)
2307                                   );
2308  } else {
2309    /* If we're not on the value stack, we pretty much have to be on
2310       the C stack.  Just run the handler. */
2311    thread_kill_handler(signum, info, context);
2312  }
2313}
2314
2315
2316#else
2317void
2318altstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
2319{
2320  TCR* tcr = get_tcr(true);
2321  handle_signal_on_foreign_stack(tcr,
2322                                 thread_kill_handler,
2323                                 signum,
2324                                 info,
2325                                 context,
2326                                 (LispObj)__builtin_return_address(0)
2327                                 );
2328}
2329#endif
2330#endif
2331
2332#ifdef USE_SIGALTSTACK
2333#define THREAD_KILL_HANDLER altstack_thread_kill_handler
2334#else
2335#define THREAD_KILL_HANDLER arbstack_thread_kill_handler
2336#endif
2337
2338#ifdef WINDOWS
2339void
2340thread_signal_setup()
2341{
2342}
2343#else
2344void
2345thread_signal_setup()
2346{
2347  thread_suspend_signal = SIG_SUSPEND_THREAD;
2348  thread_kill_signal = SIG_KILL_THREAD;
2349
2350  install_signal_handler(thread_suspend_signal, (void *)suspend_resume_handler,
2351                         RESERVE_FOR_LISP|RESTART_SYSCALLS);
2352  install_signal_handler(thread_kill_signal, (void *)THREAD_KILL_HANDLER,
2353                         RESERVE_FOR_LISP|ON_ALTSTACK);
2354}
2355#endif
2356
2357void
2358enable_fp_exceptions()
2359{
2360}
2361
2362void
2363exception_init()
2364{
2365  x86_early_exception_init();
2366  install_pmcl_exception_handlers();
2367}
2368
2369void
2370adjust_exception_pc(ExceptionInformation *xp, int delta)
2371{
2372  xpPC(xp) += delta;
2373}
2374
2375/*
2376  Lower (move toward 0) the "end" of the soft protected area associated
2377  with a by a page, if we can.
2378*/
2379
2380void
2381adjust_soft_protection_limit(area *a)
2382{
2383  char *proposed_new_soft_limit = a->softlimit - 4096;
2384  protected_area_ptr p = a->softprot;
2385 
2386  if (proposed_new_soft_limit >= (p->start+16384)) {
2387    p->end = proposed_new_soft_limit;
2388    p->protsize = p->end-p->start;
2389    a->softlimit = proposed_new_soft_limit;
2390  }
2391  protect_area(p);
2392}
2393
2394void
2395restore_soft_stack_limit(unsigned restore_tsp)
2396{
2397  TCR *tcr = get_tcr(false);
2398  area *a;
2399 
2400  if (restore_tsp) {
2401    a = tcr->ts_area;
2402  } else {
2403    a = tcr->vs_area;
2404  }
2405  adjust_soft_protection_limit(a);
2406}
2407
2408
2409#ifdef USE_SIGALTSTACK
2410void
2411setup_sigaltstack(area *a)
2412{
2413  stack_t stack;
2414
2415  stack.ss_size = SIGSTKSZ*8;
2416  stack.ss_flags = 0;
2417  stack.ss_sp = mmap(NULL,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_ANON|MAP_PRIVATE,-1,0);
2418#ifdef LINUX
2419  /* The ucontext pushed on the altstack may not contain the (largish)
2420     __fpregs_mem field; copy_ucontext() wants to copy what it thinks
2421     is a pointer to a full ucontext.  That'll touch a page beyond the
2422     bottom of the altstack, and when this happens on the initial
2423     thread's stack on a recent (2.6.32+?) kernel, we'll SIGBUS instead
2424     of mapping that page.
2425     It's easier to just reserve that page here than it would be to
2426     change copy_ucontext().
2427  */
2428  stack.ss_size -= sizeof(struct ucontext);
2429#endif
2430  if (sigaltstack(&stack, NULL) != 0) {
2431    perror("sigaltstack");
2432    exit(-1);
2433  }
2434}
2435#endif
2436
2437extern opcode egc_write_barrier_start, egc_write_barrier_end,
2438  egc_set_hash_key_conditional, egc_set_hash_key_conditional_success_test,
2439  egc_set_hash_key_conditional_retry,
2440  egc_store_node_conditional_success_end, egc_store_node_conditional_retry,
2441  egc_store_node_conditional_success_test,egc_store_node_conditional,
2442  egc_set_hash_key, egc_gvset, egc_rplacd, egc_rplaca;
2443
2444/* We use (extremely) rigidly defined instruction sequences for consing,
2445   mostly so that 'pc_luser_xp()' knows what to do if a thread is interrupted
2446   while consing.
2447
2448   Note that we can usually identify which of these instructions is about
2449   to be executed by a stopped thread without comparing all of the bytes
2450   to those at the stopped program counter, but we generally need to
2451   know the sizes of each of these instructions.
2452*/
2453
2454#ifdef X8664
2455opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
2456#ifdef TCR_IN_GPR
2457  {0x49,0x8b,0x9b,0xd8,0x00,0x00,0x00}
2458#else
2459  {0x65,0x48,0x8b,0x1c,0x25,0xd8,0x00,0x00,0x00}
2460#endif
2461;
2462opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
2463#ifdef TCR_IN_GPR
2464  {0x49,0x3b,0x9b,0xe0,0x00,0x00,0x00}
2465#else
2466  {0x65,0x48,0x3b,0x1c,0x25,0xe0,0x00,0x00,0x00}
2467#endif
2468
2469;
2470opcode branch_around_alloc_trap_instruction[] =
2471  {0x77,0x02};
2472opcode alloc_trap_instruction[] =
2473  {0xcd,0xc5};
2474opcode clear_tcr_save_allocptr_tag_instruction[] =
2475#ifdef TCR_IN_GPR
2476  {0x41,0x80,0xa3,0xd8,0x00,0x00,0x00,0xf0}
2477#else
2478  {0x65,0x80,0x24,0x25,0xd8,0x00,0x00,0x00,0xf0}
2479#endif
2480;
2481opcode set_allocptr_header_instruction[] =
2482  {0x48,0x89,0x43,0xf3};
2483
2484
2485alloc_instruction_id
2486recognize_alloc_instruction(pc program_counter)
2487{
2488  switch(program_counter[0]) {
2489  case 0xcd: return ID_alloc_trap_instruction;
2490  /* 0x7f is jg, which we used to use here instead of ja */
2491  case 0x7f:
2492  case 0x77: return ID_branch_around_alloc_trap_instruction;
2493  case 0x48: return ID_set_allocptr_header_instruction;
2494#ifdef TCR_IN_GPR
2495  case 0x41: return ID_clear_tcr_save_allocptr_tag_instruction;
2496  case 0x49:
2497    switch(program_counter[1]) {
2498    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2499    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2500    }
2501#else
2502  case 0x65: 
2503    switch(program_counter[1]) {
2504    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
2505    case 0x48:
2506      switch(program_counter[2]) {
2507      case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2508      case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2509      }
2510    }
2511#endif
2512  default: break;
2513  }
2514  return ID_unrecognized_alloc_instruction;
2515}
2516#endif
2517#ifdef X8632
2518#define TCR_SEG_PREFIX 0x64
2519
2520#ifdef WIN_32
2521#define SAVE_ALLOCPTR 0x9c,0x0e,0x0,0x0
2522#define SAVE_ALLOCBASE 0x98,0x0e,0x0,0x0
2523#else
2524#define SAVE_ALLOCPTR 0x84,0x0,0x0,0x0
2525#define SAVE_ALLOCBASE 0x88,0x0,0x0,0x0
2526#endif
2527
2528opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
2529  {TCR_SEG_PREFIX,0x8b,0x0d,SAVE_ALLOCPTR};
2530opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
2531  {TCR_SEG_PREFIX,0x3b,0x0d,SAVE_ALLOCBASE};
2532opcode branch_around_alloc_trap_instruction[] =
2533  {0x77,0x02};
2534opcode alloc_trap_instruction[] =
2535  {0xcd,0xc5};
2536opcode clear_tcr_save_allocptr_tag_instruction[] =
2537  {TCR_SEG_PREFIX,0x80,0x25,SAVE_ALLOCPTR,0xf8};
2538opcode set_allocptr_header_instruction[] =
2539  {0x0f,0x7e,0x41,0xfa};
2540
2541alloc_instruction_id
2542recognize_alloc_instruction(pc program_counter)
2543{
2544  switch(program_counter[0]) {
2545  case 0xcd: return ID_alloc_trap_instruction;
2546  case 0x77: return ID_branch_around_alloc_trap_instruction;
2547  case 0x0f: return ID_set_allocptr_header_instruction;
2548  case 0x64: 
2549    switch(program_counter[1]) {
2550    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
2551    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
2552    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
2553    }
2554  }
2555  return ID_unrecognized_alloc_instruction;
2556}
2557#endif     
2558
2559void
2560pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *interrupt_displacement)
2561{
2562  pc program_counter = (pc)xpPC(xp);
2563  int allocptr_tag = fulltag_of((LispObj)(tcr->save_allocptr));
2564
2565  if (allocptr_tag != 0) {
2566    alloc_instruction_id state = recognize_alloc_instruction(program_counter);
2567    signed_natural
2568      disp = (allocptr_tag == fulltag_cons) ?
2569      sizeof(cons) - fulltag_cons :
2570#ifdef X8664
2571      xpGPR(xp,Iimm1)
2572#else
2573      xpGPR(xp,Iimm0)
2574#endif
2575      ;
2576    LispObj new_vector;
2577
2578    if ((state == ID_unrecognized_alloc_instruction) ||
2579        ((state == ID_set_allocptr_header_instruction) &&
2580         (allocptr_tag != fulltag_misc))) {
2581      Bug(xp, "Can't determine state of thread 0x" LISP ", interrupted during memory allocation", tcr);
2582    }
2583    switch(state) {
2584    case ID_set_allocptr_header_instruction:
2585      /* We were consing a vector and we won.  Set the header of the
2586         new vector (in the allocptr register) to the header in %rax
2587         (%mm0 on ia32) and skip over this instruction, then fall into
2588         the next case. */
2589      new_vector = xpGPR(xp,Iallocptr);
2590      deref(new_vector,0) = 
2591#ifdef X8664
2592        xpGPR(xp,Iimm0)
2593#else
2594        xpMMXreg(xp,Imm0)
2595#endif
2596        ;
2597     
2598      xpPC(xp) += sizeof(set_allocptr_header_instruction);
2599
2600      /* Fall thru */
2601    case ID_clear_tcr_save_allocptr_tag_instruction:
2602      tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
2603      xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
2604
2605      break;
2606    case ID_alloc_trap_instruction:
2607      /* If we're looking at another thread, we're pretty much committed to
2608         taking the trap.  We don't want the allocptr register to be pointing
2609         into the heap, so make it point to (- VOID_ALLOCPTR disp), where 'disp'
2610         was determined above.
2611      */
2612      if (interrupt_displacement == NULL) {
2613        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR - disp;
2614        tcr->save_allocptr = (void *)(VOID_ALLOCPTR - disp);
2615      } else {
2616        /* Back out, and tell the caller how to resume the allocation attempt */
2617        *interrupt_displacement = disp;
2618        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2619        tcr->save_allocptr += disp;
2620        xpPC(xp) -= (sizeof(branch_around_alloc_trap_instruction)+
2621                     sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
2622                     sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
2623      }
2624      break;
2625    case ID_branch_around_alloc_trap_instruction:
2626      /* If we'd take the branch - which is a "ja" - around the alloc trap,
2627         we might as well finish the allocation.  Otherwise, back out of the
2628         attempt. */
2629      {
2630        int flags = (int)eflags_register(xp);
2631       
2632        if ((!(flags & (1 << X86_ZERO_FLAG_BIT))) &&
2633            (!(flags & (1 << X86_CARRY_FLAG_BIT)))) {
2634          /* The branch (ja) would have been taken.  Emulate taking it. */
2635          xpPC(xp) += (sizeof(branch_around_alloc_trap_instruction)+
2636                       sizeof(alloc_trap_instruction));
2637          if (allocptr_tag == fulltag_misc) {
2638            /* Slap the header on the new uvector */
2639            new_vector = xpGPR(xp,Iallocptr);
2640#ifdef X8664
2641            deref(new_vector,0) = xpGPR(xp,Iimm0);
2642#else
2643            deref(new_vector,0) = xpMMXreg(xp,Imm0);
2644#endif
2645            xpPC(xp) += sizeof(set_allocptr_header_instruction);
2646          }
2647          tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
2648          xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
2649        } else {
2650          /* Back up */
2651          xpPC(xp) -= (sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
2652                       sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
2653          xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2654          if (interrupt_displacement) {
2655            *interrupt_displacement = disp;
2656            tcr->save_allocptr += disp;
2657          } else {
2658            tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
2659          }
2660        }
2661      }
2662      break;
2663    case ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction:
2664      xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
2665      xpPC(xp) -= sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction);
2666      /* Fall through */
2667    case ID_load_allocptr_reg_from_tcr_save_allocptr_instruction:
2668      if (interrupt_displacement) {
2669        tcr->save_allocptr += disp;
2670        *interrupt_displacement = disp;
2671      } else {
2672        tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
2673      }
2674      break;
2675    default: 
2676      break;
2677    }
2678    return;
2679  }
2680  if ((program_counter >= &egc_write_barrier_start) &&
2681      (program_counter < &egc_write_barrier_end)) {
2682    LispObj *ea = 0, val, root = 0;
2683    bitvector refbits = (bitvector)(lisp_global(REFBITS));
2684    Boolean  need_check_memo = true, need_memoize_root = false;
2685
2686    if (program_counter >= &egc_set_hash_key_conditional) {
2687      if (program_counter <= &egc_set_hash_key_conditional_retry) {
2688        return;
2689      }
2690      if (program_counter < &egc_set_hash_key_conditional_success_test) {
2691        /* Back up the PC, try again.  This is necessary since a pending
2692           GC may cause the value in %eax/%rax to move and backing up
2693           will reload %eax/%rax from a node register before trying the
2694           cmpxchg.
2695        */
2696        xpPC(xp) = (LispObj) &egc_set_hash_key_conditional_retry;
2697        return;
2698      }
2699      if ((program_counter == &egc_set_hash_key_conditional_success_test) &&
2700          !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT))) {
2701        /* Conditional store failed.  Return NIL. */
2702        LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2703        xpGPR(xp,Iarg_z) = lisp_nil;
2704        xpPC(xp) = ra;
2705        xpGPR(xp,Isp)=(LispObj)sp;
2706        return;
2707      }
2708      /* The conditional store succeeded.  Set the refbit, return to ra0 */
2709      val = xpGPR(xp,Iarg_z);
2710#ifdef X8664
2711      root = xpGPR(xp,Iarg_x);
2712      ea = (LispObj*)(root + (unbox_fixnum((signed_natural) xpGPR(xp,Itemp0))));
2713#else
2714      root = xpGPR(xp,Itemp1);
2715      ea = (LispObj *)(root + misc_data_offset + xpGPR(xp,Itemp0));
2716#endif
2717      need_memoize_root = true;
2718      xpGPR(xp,Iarg_z) = t_value;
2719    } else if (program_counter >= &egc_store_node_conditional) {
2720      if (program_counter <= &egc_store_node_conditional_retry) {
2721        return;
2722      }
2723      if (program_counter < &egc_store_node_conditional_success_test) {
2724        /* Back up the PC, try again.  Again, this is necessary because
2725           we're possibly keeping a node in %eax/%rax and haven't completed
2726           the cmpxchg yet. */
2727        xpPC(xp) = (LispObj) &egc_store_node_conditional_retry;
2728        return;
2729      }
2730      if ((program_counter == &egc_store_node_conditional_success_test) &&
2731           !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT))) {
2732        /* cmpxchg failed.  Return NIL. */
2733        LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2734        xpGPR(xp,Iarg_z) = lisp_nil;
2735        xpPC(xp) = ra;
2736        xpGPR(xp,Isp)=(LispObj)sp;
2737        return;
2738      }
2739
2740      if (program_counter >= &egc_store_node_conditional_success_end) {
2741        return;
2742      }
2743
2744      /* The conditional store succeeded.  Set the refbit, return to ra0 */
2745      val = xpGPR(xp,Iarg_z);
2746#ifdef X8664
2747      ea = (LispObj*)(xpGPR(xp,Iarg_x) + (unbox_fixnum((signed_natural)
2748                                                       xpGPR(xp,Itemp0))));
2749#else
2750      ea = (LispObj *)(misc_data_offset + xpGPR(xp,Itemp1) + xpGPR(xp,Itemp0));
2751#endif
2752      xpGPR(xp,Iarg_z) = t_value;
2753    } else if (program_counter >= &egc_set_hash_key) {
2754      if (program_counter == &egc_set_hash_key) {
2755        return;
2756      }
2757#ifdef X8664
2758      root = xpGPR(xp,Iarg_x);
2759#else
2760      root = xpGPR(xp,Itemp0);
2761#endif
2762      ea = (LispObj *) (root+xpGPR(xp,Iarg_y)+misc_data_offset);
2763      val = xpGPR(xp,Iarg_z);
2764      need_memoize_root = true;
2765    } else if (program_counter >= &egc_gvset) {
2766      /* This assumes that the store is the first instruction at _SPgvset.
2767         As of late February 2013 - just before the 1.9 release, that's
2768         a relatively recent change.
2769         If the store has already completed, don't do it again.
2770         See ticket:1058 for an example showing why this matters.
2771      */
2772      if (program_counter == &egc_gvset) {
2773        return;
2774      }
2775#ifdef X8664
2776      ea = (LispObj *) (xpGPR(xp,Iarg_x)+xpGPR(xp,Iarg_y)+misc_data_offset);
2777#else
2778      ea = (LispObj *) (xpGPR(xp,Itemp0)+xpGPR(xp,Iarg_y)+misc_data_offset);
2779#endif
2780      val = xpGPR(xp,Iarg_z);
2781    } else if (program_counter >= &egc_rplacd) {
2782      if (program_counter == &egc_rplacd) {
2783        return;
2784      }
2785      ea = (LispObj *) untag(xpGPR(xp,Iarg_y));
2786      val = xpGPR(xp,Iarg_z);
2787    } else {                      /* egc_rplaca */
2788      if (program_counter == &egc_rplaca) {
2789        return;
2790      }
2791      ea =  ((LispObj *) untag(xpGPR(xp,Iarg_y)))+1;
2792      val = xpGPR(xp,Iarg_z);
2793    }
2794    if (need_check_memo) {
2795      if ((LispObj)ea < val) {
2796        natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE)),
2797          rootbitnumber = area_dnode(root, lisp_global(REF_BASE));
2798        if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT))) {
2799          atomic_set_bit(refbits, bitnumber);
2800          atomic_set_bit(global_refidx,bitnumber>>8);
2801          if (need_memoize_root) {
2802            atomic_set_bit(refbits, rootbitnumber);
2803            atomic_set_bit(global_refidx,rootbitnumber>>8);
2804          }
2805        }
2806        if (bitnumber < lisp_global(MANAGED_STATIC_DNODES)) {
2807          atomic_set_bit(managed_static_refbits,bitnumber);
2808          atomic_set_bit(managed_static_refidx,bitnumber>>8);
2809          if (need_memoize_root) {
2810            atomic_set_bit(managed_static_refbits, rootbitnumber);
2811            atomic_set_bit(managed_static_refidx,rootbitnumber>>8);
2812          }
2813        }
2814      }
2815    }
2816    {
2817      /* These subprimitives are called via CALL/RET; need
2818         to pop the return address off the stack and set
2819         the PC there. */
2820      LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
2821      xpPC(xp) = ra;
2822      xpGPR(xp,Isp)=(LispObj)sp;
2823    }
2824    return;
2825  }
2826}
2827
2828
2829void
2830normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
2831{
2832  void *cur_allocptr = (void *)(tcr->save_allocptr);
2833  LispObj lisprsp;
2834  area *a;
2835
2836  if (xp) {
2837    if (is_other_tcr) {
2838      pc_luser_xp(xp, tcr, NULL);
2839    }
2840    a = tcr->vs_area;
2841    lisprsp = xpGPR(xp, Isp);
2842    if (((BytePtr)lisprsp >= a->low) &&
2843        ((BytePtr)lisprsp < a->high)) {
2844      a->active = (BytePtr)lisprsp;
2845    } else {
2846      a->active = (BytePtr) tcr->save_vsp;
2847    }
2848    a = tcr->ts_area;
2849    a->active = (BytePtr) tcr->save_tsp;
2850  } else {
2851    /* In ff-call; get area active pointers from tcr */
2852    tcr->vs_area->active = (BytePtr) tcr->save_vsp;
2853    tcr->ts_area->active = (BytePtr) tcr->save_tsp;
2854  }
2855  if (cur_allocptr) {
2856    update_bytes_allocated(tcr, cur_allocptr);
2857  }
2858  tcr->save_allocbase = (void *)VOID_ALLOCPTR;
2859  if (fulltag_of((LispObj)(tcr->save_allocptr)) == 0) {
2860    tcr->save_allocptr = (void *)VOID_ALLOCPTR;
2861  }
2862}
2863
2864
2865/* Suspend and "normalize" other tcrs, then call a gc-like function
2866   in that context.  Resume the other tcrs, then return what the
2867   function returned */
2868
2869TCR *gc_tcr = NULL;
2870
2871
2872signed_natural
2873gc_like_from_xp(ExceptionInformation *xp, 
2874                signed_natural(*fun)(TCR *, signed_natural), 
2875                signed_natural param)
2876{
2877  TCR *tcr = get_tcr(false), *other_tcr;
2878  int result;
2879  signed_natural inhibit, barrier = 0;
2880
2881  atomic_incf(&barrier);
2882  suspend_other_threads(true);
2883  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
2884  if (inhibit != 0) {
2885    if (inhibit > 0) {
2886      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
2887    }
2888    atomic_decf(&barrier);
2889    resume_other_threads(true);
2890    gc_deferred++;
2891    return 0;
2892  }
2893  gc_deferred = 0;
2894
2895  gc_tcr = tcr;
2896
2897  /* This is generally necessary if the current thread invoked the GC
2898     via an alloc trap, and harmless if the GC was invoked via a GC
2899     trap.  (It's necessary in the first case because the "allocptr"
2900     register - %rbx - may be pointing into the middle of something
2901     below tcr->save_allocbase, and we wouldn't want the GC to see
2902     that bogus pointer.) */
2903  xpGPR(xp, Iallocptr) = VOID_ALLOCPTR; 
2904
2905  normalize_tcr(xp, tcr, false);
2906
2907
2908  for (other_tcr = TCR_AUX(tcr)->next; other_tcr != tcr;
2909       other_tcr = TCR_AUX(other_tcr)->next) {
2910    if (other_tcr->pending_exception_context) {
2911      TCR_AUX(other_tcr)->gc_context = other_tcr->pending_exception_context;
2912    } else if (other_tcr->valence == TCR_STATE_LISP) {
2913      TCR_AUX(other_tcr)->gc_context = TCR_AUX(other_tcr)->suspend_context;
2914    } else {
2915      /* no pending exception, didn't suspend in lisp state:
2916         must have executed a synchronous ff-call.
2917      */
2918      TCR_AUX(other_tcr)->gc_context = NULL;
2919    }
2920    normalize_tcr(TCR_AUX(other_tcr)->gc_context, other_tcr, true);
2921  }
2922   
2923
2924
2925  result = fun(tcr, param);
2926
2927  other_tcr = tcr;
2928  do {
2929    TCR_AUX(other_tcr)->gc_context = NULL;
2930    other_tcr = TCR_AUX(other_tcr)->next;
2931  } while (other_tcr != tcr);
2932
2933  gc_tcr = NULL;
2934
2935  atomic_decf(&barrier);
2936  resume_other_threads(true);
2937
2938  return result;
2939
2940}
2941
2942signed_natural
2943purify_from_xp(ExceptionInformation *xp, signed_natural param)
2944{
2945  return gc_like_from_xp(xp, purify, param);
2946}
2947
2948signed_natural
2949impurify_from_xp(ExceptionInformation *xp, signed_natural param)
2950{
2951  return gc_like_from_xp(xp, impurify, param);
2952}
2953
2954/* Returns #bytes freed by invoking GC */
2955
2956signed_natural
2957gc_from_tcr(TCR *tcr, signed_natural param)
2958{
2959  area *a;
2960  BytePtr oldfree, newfree;
2961  BytePtr oldend, newend;
2962
2963#if 0
2964  fprintf(dbgout, "Start GC  in 0x" LISP "\n", tcr);
2965#endif
2966  a = active_dynamic_area;
2967  oldend = a->high;
2968  oldfree = a->active;
2969  gc(tcr, param);
2970  newfree = a->active;
2971  newend = a->high;
2972#if 0
2973  fprintf(dbgout, "End GC  in 0x" LISP "\n", tcr);
2974#endif
2975  return ((oldfree-newfree)+(newend-oldend));
2976}
2977
2978signed_natural
2979gc_from_xp(ExceptionInformation *xp, signed_natural param)
2980{
2981  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
2982
2983  freeGCptrs();
2984  return status;
2985}
2986
2987#ifdef DARWIN
2988
2989#define TCR_FROM_EXCEPTION_PORT(p) find_tcr_from_exception_port(p)
2990#define TCR_TO_EXCEPTION_PORT(t) (mach_port_name_t)((natural) (((TCR *)t)->io_datum))
2991
2992
2993extern void pseudo_sigreturn(void);
2994
2995
2996
2997#define LISP_EXCEPTIONS_HANDLED_MASK \
2998 (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
2999
3000/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
3001#define NUM_LISP_EXCEPTIONS_HANDLED 4
3002
3003typedef struct {
3004  int foreign_exception_port_count;
3005  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
3006  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
3007  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
3008  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
3009} MACH_foreign_exception_state;
3010
3011
3012
3013
3014/*
3015  Mach's exception mechanism works a little better than its signal
3016  mechanism (and, not incidentally, it gets along with GDB a lot
3017  better.
3018
3019  Initially, we install an exception handler to handle each native
3020  thread's exceptions.  This process involves creating a distinguished
3021  thread which listens for kernel exception messages on a set of
3022  0 or more thread exception ports.  As threads are created, they're
3023  added to that port set; a thread's exception port is destroyed
3024  (and therefore removed from the port set) when the thread exits.
3025
3026  A few exceptions can be handled directly in the handler thread;
3027  others require that we resume the user thread (and that the
3028  exception thread resumes listening for exceptions.)  The user
3029  thread might eventually want to return to the original context
3030  (possibly modified somewhat.)
3031
3032  As it turns out, the simplest way to force the faulting user
3033  thread to handle its own exceptions is to do pretty much what
3034  signal() does: the exception handlng thread sets up a sigcontext
3035  on the user thread's stack and forces the user thread to resume
3036  execution as if a signal handler had been called with that
3037  context as an argument.  We can use a distinguished UUO at a
3038  distinguished address to do something like sigreturn(); that'll
3039  have the effect of resuming the user thread's execution in
3040  the (pseudo-) signal context.
3041
3042  Since:
3043    a) we have miles of code in C and in Lisp that knows how to
3044    deal with Linux sigcontexts
3045    b) Linux sigcontexts contain a little more useful information
3046    (the DAR, DSISR, etc.) than their Darwin counterparts
3047    c) we have to create a sigcontext ourselves when calling out
3048    to the user thread: we aren't really generating a signal, just
3049    leveraging existing signal-handling code.
3050
3051  we create a Linux sigcontext struct.
3052
3053  Simple ?  Hopefully from the outside it is ...
3054
3055  We want the process of passing a thread's own context to it to
3056  appear to be atomic: in particular, we don't want the GC to suspend
3057  a thread that's had an exception but has not yet had its user-level
3058  exception handler called, and we don't want the thread's exception
3059  context to be modified by a GC while the Mach handler thread is
3060  copying it around.  On Linux (and on Jaguar), we avoid this issue
3061  because (a) the kernel sets up the user-level signal handler and
3062  (b) the signal handler blocks signals (including the signal used
3063  by the GC to suspend threads) until tcr->xframe is set up.
3064
3065  The GC and the Mach server thread therefore contend for the lock
3066  "mach_exception_lock".  The Mach server thread holds the lock
3067  when copying exception information between the kernel and the
3068  user thread; the GC holds this lock during most of its execution
3069  (delaying exception processing until it can be done without
3070  GC interference.)
3071
3072*/
3073
3074#ifdef PPC64
3075#define C_REDZONE_LEN           320
3076#define C_STK_ALIGN             32
3077#else
3078#define C_REDZONE_LEN           224
3079#define C_STK_ALIGN             16
3080#endif
3081#define C_PARAMSAVE_LEN         64
3082#define C_LINKAGE_LEN           48
3083
3084#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
3085
3086void
3087fatal_mach_error(char *format, ...);
3088
3089#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
3090
3091
3092void
3093restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext, native_thread_state_t *ts)
3094{
3095  kern_return_t kret;
3096#if WORD_SIZE == 64
3097  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
3098#else
3099  mcontext_t mc = UC_MCONTEXT(pseudosigcontext);
3100#endif
3101
3102  /* Set the thread's FP state from the pseudosigcontext */
3103  kret = thread_set_state(thread,
3104                          NATIVE_FLOAT_STATE_FLAVOR,
3105                          (thread_state_t)&(mc->__fs),
3106                          NATIVE_FLOAT_STATE_COUNT);
3107  MACH_CHECK_ERROR("setting thread FP state", kret);
3108  *ts = mc->__ss;
3109} 
3110
3111kern_return_t
3112do_pseudo_sigreturn(mach_port_t thread, TCR *tcr, native_thread_state_t *out)
3113{
3114  ExceptionInformation *xp;
3115
3116#ifdef DEBUG_MACH_EXCEPTIONS
3117  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
3118#endif
3119  xp = tcr->pending_exception_context;
3120  if (xp) {
3121    tcr->pending_exception_context = NULL;
3122    tcr->valence = TCR_STATE_LISP;
3123    restore_mach_thread_state(thread, xp, out);
3124    raise_pending_interrupt(tcr);
3125  } else {
3126    Bug(NULL, "no xp here!\n");
3127  }
3128#ifdef DEBUG_MACH_EXCEPTIONS
3129  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
3130#endif
3131  return KERN_SUCCESS;
3132} 
3133
3134ExceptionInformation *
3135create_thread_context_frame(mach_port_t thread, 
3136                            natural *new_stack_top,
3137                            siginfo_t **info_ptr,
3138                            TCR *tcr,
3139                            native_thread_state_t *ts
3140                            )
3141{
3142  mach_msg_type_number_t thread_state_count;
3143  ExceptionInformation *pseudosigcontext;
3144#ifdef X8664
3145  MCONTEXT_T mc;
3146#else
3147  mcontext_t mc;
3148#endif
3149  natural stackp;
3150
3151#ifdef X8664 
3152  stackp = (LispObj) find_foreign_rsp(ts->__rsp,tcr->cs_area,tcr);
3153  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
3154#else
3155  stackp = (LispObj) find_foreign_rsp(ts->__esp, tcr->cs_area, tcr);
3156#endif
3157  stackp = TRUNC_DOWN(stackp, sizeof(siginfo_t), C_STK_ALIGN);
3158  if (info_ptr) {
3159    *info_ptr = (siginfo_t *)stackp;
3160  }
3161  stackp = TRUNC_DOWN(stackp,sizeof(*pseudosigcontext), C_STK_ALIGN);
3162  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
3163
3164  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
3165  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
3166 
3167  memmove(&(mc->__ss),ts,sizeof(*ts));
3168
3169  thread_state_count = NATIVE_FLOAT_STATE_COUNT;
3170  thread_get_state(thread,
3171                   NATIVE_FLOAT_STATE_FLAVOR,
3172                   (thread_state_t)&(mc->__fs),
3173                   &thread_state_count);
3174
3175  thread_state_count = NATIVE_EXCEPTION_STATE_COUNT;
3176  thread_get_state(thread,
3177                   NATIVE_EXCEPTION_STATE_FLAVOR,
3178                   (thread_state_t)&(mc->__es),
3179                   &thread_state_count);
3180
3181
3182  UC_MCONTEXT(pseudosigcontext) = mc;
3183  if (new_stack_top) {
3184    *new_stack_top = stackp;
3185  }
3186  return pseudosigcontext;
3187}
3188
3189/*
3190  This code sets up the user thread so that it executes a "pseudo-signal
3191  handler" function when it resumes.  Create a fake ucontext struct
3192  on the thread's stack and pass it as an argument to the pseudo-signal
3193  handler.
3194
3195  Things are set up so that the handler "returns to" pseudo_sigreturn(),
3196  which will restore the thread's context.
3197
3198  If the handler invokes code that throws (or otherwise never sigreturn()'s
3199  to the context), that's fine.
3200
3201  Actually, check that: throw (and variants) may need to be careful and
3202  pop the tcr's xframe list until it's younger than any frame being
3203  entered.
3204*/
3205
3206int
3207setup_signal_frame(mach_port_t thread,
3208                   void *handler_address,
3209                   int signum,
3210                   int code,
3211                   TCR *tcr,
3212                   native_thread_state_t *ts,
3213                   native_thread_state_t *new_ts
3214                   )
3215{
3216  ExceptionInformation *pseudosigcontext;
3217  int  old_valence = tcr->valence;
3218  natural stackp, *stackpp;
3219  siginfo_t *info;
3220
3221#ifdef DEBUG_MACH_EXCEPTIONS
3222  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
3223#endif
3224  pseudosigcontext = create_thread_context_frame(thread, &stackp, &info, tcr,  ts);
3225  bzero(info, sizeof(*info));
3226  info->si_code = code;
3227  info->si_addr = (void *)(UC_MCONTEXT(pseudosigcontext)->__es.__faultvaddr);
3228  info->si_signo = signum;
3229  pseudosigcontext->uc_onstack = 0;
3230  pseudosigcontext->uc_sigmask = (sigset_t) 0;
3231  pseudosigcontext->uc_stack.ss_sp = 0;
3232  pseudosigcontext->uc_stack.ss_size = 0;
3233  pseudosigcontext->uc_stack.ss_flags = 0;
3234  pseudosigcontext->uc_link = NULL;
3235  pseudosigcontext->uc_mcsize = sizeof(*UC_MCONTEXT(pseudosigcontext));
3236  tcr->pending_exception_context = pseudosigcontext;
3237  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
3238 
3239
3240  /*
3241     It seems like we've created a  sigcontext on the thread's
3242     stack.  Set things up so that we call the handler (with appropriate
3243     args) when the thread's resumed.
3244  */
3245
3246#ifdef X8664
3247  new_ts->__rip = (natural) handler_address;
3248  stackpp = (natural *)stackp;
3249  *--stackpp = (natural)pseudo_sigreturn;
3250  stackp = (natural)stackpp;
3251  new_ts->__rdi = signum;
3252  new_ts->__rsi = (natural)info;
3253  new_ts->__rdx = (natural)pseudosigcontext;
3254  new_ts->__rcx = (natural)tcr;
3255  new_ts->__r8 = (natural)old_valence;
3256  new_ts->__rsp = stackp;
3257  new_ts->__rflags = ts->__rflags;
3258#else
3259  bzero(new_ts, sizeof(*new_ts));
3260  new_ts->__cs = ts->__cs;
3261  new_ts->__ss = ts->__ss;
3262  new_ts->__ds = ts->__ds;
3263  new_ts->__es = ts->__es;
3264  new_ts->__fs = ts->__fs;
3265  new_ts->__gs = ts->__gs;
3266
3267  new_ts->__eip = (natural)handler_address;
3268  stackpp = (natural *)stackp;
3269  *--stackpp = 0;               /* alignment */
3270  *--stackpp = 0;
3271  *--stackpp = 0;
3272  *--stackpp = (natural)old_valence;
3273  *--stackpp = (natural)tcr;
3274  *--stackpp = (natural)pseudosigcontext;
3275  *--stackpp = (natural)info;
3276  *--stackpp = (natural)signum;
3277  *--stackpp = (natural)pseudo_sigreturn;
3278  stackp = (natural)stackpp;
3279  new_ts->__esp = stackp;
3280  new_ts->__eflags = ts->__eflags;
3281#endif
3282#ifdef DEBUG_MACH_EXCEPTIONS
3283  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
3284#endif
3285  return 0;
3286}
3287
3288
3289
3290
3291
3292
3293/*
3294  This function runs in the exception handling thread.  It's
3295  called (by this precise name) from the library function "exc_server()"
3296  when the thread's exception ports are set up.  (exc_server() is called
3297  via mach_msg_server(), which is a function that waits for and dispatches
3298  on exception messages from the Mach kernel.)
3299
3300  This checks to see if the exception was caused by a pseudo_sigreturn()
3301  UUO; if so, it arranges for the thread to have its state restored
3302  from the specified context.
3303
3304  Otherwise, it tries to map the exception to a signal number and
3305  arranges that the thread run a "pseudo signal handler" to handle
3306  the exception.
3307
3308  Some exceptions could and should be handled here directly.
3309*/
3310
3311#define DARWIN_EXCEPTION_HANDLER signal_handler
3312
3313TCR *
3314find_tcr_from_exception_port(mach_port_t port)
3315{
3316    mach_vm_address_t context = 0;
3317    kern_return_t kret;
3318
3319    kret = mach_port_get_context(mach_task_self(), port, &context);
3320    MACH_CHECK_ERROR("finding TCR from exception port", kret);
3321    return (TCR *)context;
3322}
3323
3324void
3325associate_tcr_with_exception_port(mach_port_t port, TCR *tcr)
3326{
3327    kern_return_t kret;
3328   
3329    kret = mach_port_set_context(mach_task_self(),
3330                                 port, (mach_vm_address_t)tcr);
3331    MACH_CHECK_ERROR("associating TCR with exception port", kret);
3332}
3333
3334void
3335disassociate_tcr_from_exception_port(mach_port_t port)
3336{
3337  kern_return_t kret;
3338
3339  kret = mach_port_set_context(mach_task_self(), port, 0);
3340  MACH_CHECK_ERROR("disassociating TCR with exception port", kret);
3341}
3342
3343kern_return_t
3344catch_mach_exception_raise(mach_port_t exception_port,
3345                           mach_port_t thread,
3346                           mach_port_t task,
3347                           exception_type_t exception,
3348                           mach_exception_data_t code,
3349                           mach_msg_type_number_t code_count)
3350{
3351  abort();
3352  return KERN_FAILURE;
3353}
3354
3355kern_return_t
3356catch_mach_exception_raise_state(mach_port_t exception_port,
3357                                 exception_type_t exception,
3358                                 mach_exception_data_t code,
3359                                 mach_msg_type_number_t code_count,
3360                                 int *flavor,
3361                                 thread_state_t in_state,
3362                                 mach_msg_type_number_t in_state_count,
3363                                 thread_state_t out_state,
3364                                 mach_msg_type_number_t *out_state_count)
3365{
3366  int64_t code0 = code[0];
3367  int signum = 0;
3368  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
3369  mach_port_t thread = (mach_port_t)((natural)tcr->native_thread_id);
3370  kern_return_t kret, call_kret;
3371
3372  native_thread_state_t
3373    *ts = (native_thread_state_t *)in_state,
3374    *out_ts = (native_thread_state_t*)out_state;
3375  mach_msg_type_number_t thread_state_count;
3376
3377  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
3378    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
3379  } 
3380  if ((code0 == EXC_I386_GPFLT) &&
3381      ((natural)(ts_pc(ts)) == (natural)pseudo_sigreturn)) {
3382    kret = do_pseudo_sigreturn(thread, tcr, out_ts);
3383#if 0
3384    fprintf(dbgout, "Exception return in 0x%x\n",tcr);
3385#endif
3386  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
3387    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
3388    kret = 17;
3389  } else {
3390    switch (exception) {
3391    case EXC_BAD_ACCESS:
3392      if (code0 == EXC_I386_GPFLT) {
3393        signum = SIGSEGV;
3394      } else {
3395        signum = SIGBUS;
3396      }
3397      break;
3398     
3399    case EXC_BAD_INSTRUCTION:
3400      if (code0 == EXC_I386_GPFLT) {
3401        signum = SIGSEGV;
3402      } else {
3403        signum = SIGILL;
3404      }
3405      break;
3406     
3407    case EXC_SOFTWARE:
3408      signum = SIGILL;
3409      break;
3410     
3411    case EXC_ARITHMETIC:
3412      signum = SIGFPE;
3413      if (code0 == EXC_I386_DIV)
3414        code0 = FPE_INTDIV;
3415      break;
3416     
3417    default:
3418      break;
3419    }
3420#if WORD_SIZE==64
3421    if ((signum==SIGFPE) && 
3422        (code0 != FPE_INTDIV) && 
3423        (tcr->valence != TCR_STATE_LISP)) {
3424      mach_msg_type_number_t thread_state_count = x86_FLOAT_STATE64_COUNT;
3425      x86_float_state64_t fs;
3426     
3427      thread_get_state(thread,
3428                       x86_FLOAT_STATE64,
3429                       (thread_state_t)&fs,
3430                       &thread_state_count);
3431     
3432      if (! (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN_FPE))) {
3433        tcr->flags |= (1<<TCR_FLAG_BIT_FOREIGN_FPE);
3434        tcr->lisp_mxcsr = (fs.__fpu_mxcsr & ~MXCSR_STATUS_MASK);
3435      }
3436      fs.__fpu_mxcsr &= ~MXCSR_STATUS_MASK;
3437      fs.__fpu_mxcsr |= MXCSR_CONTROL_MASK;
3438      thread_set_state(thread,
3439                       x86_FLOAT_STATE64,
3440                       (thread_state_t)&fs,
3441                       x86_FLOAT_STATE64_COUNT);
3442      *out_state_count = NATIVE_THREAD_STATE_COUNT;
3443      *out_ts = *ts;
3444      return KERN_SUCCESS;
3445    }
3446#endif
3447    if (signum) {
3448      kret = setup_signal_frame(thread,
3449                                (void *)DARWIN_EXCEPTION_HANDLER,
3450                                signum,
3451                                code0,
3452                                tcr, 
3453                                ts,
3454                                out_ts);
3455     
3456    } else {
3457      kret = 17;
3458    }
3459  }
3460 
3461  if (kret) {
3462    *out_state_count = 0;
3463    *flavor = 0;
3464  } else {
3465    *out_state_count = NATIVE_THREAD_STATE_COUNT;
3466  }
3467  return kret;
3468}
3469
3470kern_return_t
3471catch_mach_exception_raise_state_identity(mach_port_t exception_port,
3472                                          mach_port_t thread,
3473                                          mach_port_t task,
3474                                          exception_type_t exception,
3475                                          mach_exception_data_t code,
3476                                          mach_msg_type_number_t code_count,
3477                                          int *flavor,
3478                                          thread_state_t old_state,
3479                                          mach_msg_type_number_t old_count,
3480                                          thread_state_t new_state,
3481                                          mach_msg_type_number_t *new_count)
3482{
3483  abort();
3484  return KERN_FAILURE;
3485}
3486
3487
3488
3489
3490
3491static mach_port_t mach_exception_thread = (mach_port_t)0;
3492
3493
3494/*
3495  The initial function for an exception-handling thread.
3496*/
3497
3498void *
3499exception_handler_proc(void *arg)
3500{
3501  extern boolean_t mach_exc_server();
3502  mach_port_t p = (mach_port_t)((natural)arg);
3503
3504  mach_exception_thread = pthread_mach_thread_np(pthread_self());
3505  mach_msg_server(mach_exc_server, 256, p, 0);
3506  /* Should never return. */
3507  abort();
3508}
3509
3510
3511
3512void
3513mach_exception_thread_shutdown()
3514{
3515  kern_return_t kret;
3516
3517  fprintf(dbgout, "terminating Mach exception thread, 'cause exit can't\n");
3518  kret = thread_terminate(mach_exception_thread);
3519  if (kret != KERN_SUCCESS) {
3520    fprintf(dbgout, "Couldn't terminate exception thread, kret = %d\n",kret);
3521  }
3522}
3523
3524
3525mach_port_t
3526mach_exception_port_set()
3527{
3528  static mach_port_t __exception_port_set = MACH_PORT_NULL;
3529  kern_return_t kret; 
3530  if (__exception_port_set == MACH_PORT_NULL) {
3531
3532    kret = mach_port_allocate(mach_task_self(),
3533                              MACH_PORT_RIGHT_PORT_SET,
3534                              &__exception_port_set);
3535    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
3536    create_system_thread(0,
3537                         NULL,
3538                         exception_handler_proc, 
3539                         (void *)((natural)__exception_port_set));
3540  }
3541  return __exception_port_set;
3542}
3543
3544/*
3545  Setup a new thread to handle those exceptions specified by
3546  the mask "which".  This involves creating a special Mach
3547  message port, telling the Mach kernel to send exception
3548  messages for the calling thread to that port, and setting
3549  up a handler thread which listens for and responds to
3550  those messages.
3551
3552*/
3553
3554/*
3555  Establish the lisp thread's TCR as its exception port, and determine
3556  whether any other ports have been established by foreign code for
3557  exceptions that lisp cares about.
3558
3559  If this happens at all, it should happen on return from foreign
3560  code and on entry to lisp code via a callback.
3561
3562  This is a lot of trouble (and overhead) to support Java, or other
3563  embeddable systems that clobber their caller's thread exception ports.
3564 
3565*/
3566kern_return_t
3567tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
3568{
3569  kern_return_t kret;
3570  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
3571  int i;
3572  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
3573  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
3574  exception_mask_t mask = 0;
3575
3576  kret = thread_swap_exception_ports(thread,
3577                                     LISP_EXCEPTIONS_HANDLED_MASK,
3578                                     lisp_port,
3579                                     MACH_EXCEPTION_CODES | EXCEPTION_STATE,
3580#if WORD_SIZE==64
3581                                     x86_THREAD_STATE64,
3582#else
3583                                     x86_THREAD_STATE32,
3584#endif
3585                                     fxs->masks,
3586                                     &n,
3587                                     fxs->ports,
3588                                     fxs->behaviors,
3589                                     fxs->flavors);
3590  if (kret == KERN_SUCCESS) {
3591    fxs->foreign_exception_port_count = n;
3592    for (i = 0; i < n; i ++) {
3593      foreign_port = fxs->ports[i];
3594
3595      if ((foreign_port != lisp_port) &&
3596          (foreign_port != MACH_PORT_NULL)) {
3597        mask |= fxs->masks[i];
3598      }
3599    }
3600    tcr->foreign_exception_status = (int) mask;
3601  }
3602  return kret;
3603}
3604
3605kern_return_t
3606tcr_establish_lisp_exception_port(TCR *tcr)
3607{
3608  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
3609}
3610
3611/*
3612  Do this when calling out to or returning from foreign code, if
3613  any conflicting foreign exception ports were established when we
3614  last entered lisp code.
3615*/
3616kern_return_t
3617restore_foreign_exception_ports(TCR *tcr)
3618{
3619  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
3620  kern_return_t kret;
3621
3622  if (m) {
3623    MACH_foreign_exception_state *fxs  = 
3624      (MACH_foreign_exception_state *) tcr->native_thread_info;
3625    int i, n = fxs->foreign_exception_port_count;
3626    exception_mask_t tm;
3627
3628    for (i = 0; i < n; i++) {
3629      if ((tm = fxs->masks[i]) & m) {
3630        kret = thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
3631                                   tm,
3632                                   fxs->ports[i],
3633                                   fxs->behaviors[i],
3634                                   fxs->flavors[i]);
3635        MACH_CHECK_ERROR("restoring thread exception ports", kret);
3636      }
3637    }
3638  }
3639  return KERN_SUCCESS;
3640}
3641                                   
3642
3643/*
3644  This assumes that a Mach port (to be used as the thread's exception port) whose
3645  "name" matches the TCR's 32-bit address has already been allocated.
3646*/
3647
3648kern_return_t
3649setup_mach_exception_handling(TCR *tcr)
3650{
3651  mach_port_t
3652    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
3653    task_self = mach_task_self();
3654  kern_return_t kret;
3655
3656  kret = mach_port_insert_right(task_self,
3657                                thread_exception_port,
3658                                thread_exception_port,
3659                                MACH_MSG_TYPE_MAKE_SEND);
3660  MACH_CHECK_ERROR("adding send right to exception_port",kret);
3661
3662  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
3663  if (kret == KERN_SUCCESS) {
3664    mach_port_t exception_port_set = mach_exception_port_set();
3665
3666    kret = mach_port_move_member(task_self,
3667                                 thread_exception_port,
3668                                 exception_port_set);
3669  }
3670  return kret;
3671}
3672
3673void
3674darwin_exception_init(TCR *tcr)
3675{
3676  void tcr_monitor_exception_handling(TCR*, Boolean);
3677  kern_return_t kret;
3678  MACH_foreign_exception_state *fxs = 
3679    calloc(1, sizeof(MACH_foreign_exception_state));
3680 
3681  tcr->native_thread_info = (void *) fxs;
3682
3683  if ((kret = setup_mach_exception_handling(tcr))
3684      != KERN_SUCCESS) {
3685    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
3686    terminate_lisp();
3687  }
3688}
3689
3690/*
3691  The tcr is the "name" of the corresponding thread's exception port.
3692  Destroying the port should remove it from all port sets of which it's
3693  a member (notably, the exception port set.)
3694*/
3695void
3696darwin_exception_cleanup(TCR *tcr)
3697{
3698  mach_port_t exception_port;
3699  void *fxs = tcr->native_thread_info;
3700
3701  if (fxs) {
3702    tcr->native_thread_info = NULL;
3703    free(fxs);
3704  }
3705
3706  exception_port = TCR_TO_EXCEPTION_PORT(tcr);
3707  disassociate_tcr_from_exception_port(exception_port);
3708  mach_port_deallocate(mach_task_self(), exception_port);
3709  /* Theoretically not needed, I guess... */
3710  mach_port_destroy(mach_task_self(), exception_port);
3711}
3712
3713
3714
3715
3716void
3717fatal_mach_error(char *format, ...)
3718{
3719  va_list args;
3720  char s[512];
3721 
3722
3723  va_start(args, format);
3724  vsnprintf(s, sizeof(s),format, args);
3725  va_end(args);
3726
3727  Fatal("Mach error", s);
3728}
3729
3730
3731
3732
3733#endif
3734
3735/* watchpoint stuff */
3736
3737area *
3738new_watched_area(natural size)
3739{
3740  char *p;
3741
3742  p = MapMemory(NULL, size, MEMPROTECT_RWX);
3743  if ((signed_natural)p == -1) {
3744    allocation_failure(true, size);
3745  }
3746  return new_area(p, p + size, AREA_WATCHED);
3747}
3748
3749void
3750delete_watched_area(area *a, TCR *tcr)
3751{
3752  natural nbytes = a->high - a->low;
3753  char *base = a->low;
3754
3755  condemn_area_holding_area_lock(a);
3756
3757  if (nbytes) {
3758    int err;
3759
3760    err = UnMapMemory(base, nbytes);
3761    if (err != 0)
3762      Fatal("munmap in delete_watched_area", "");
3763  }
3764}
3765
3766natural
3767uvector_total_size_in_bytes(LispObj *u)
3768{
3769  LispObj header = header_of(u);
3770  natural header_tag = fulltag_of(header);
3771  natural subtag = header_subtag(header);
3772  natural element_count = header_element_count(header);
3773  natural nbytes = 0;
3774
3775#ifdef X8632
3776  if ((nodeheader_tag_p(header_tag)) ||
3777      (subtag <= max_32_bit_ivector_subtag)) {
3778    nbytes = element_count << 2;
3779  } else if (subtag <= max_8_bit_ivector_subtag) {
3780    nbytes = element_count;
3781  } else if (subtag <= max_16_bit_ivector_subtag) {
3782    nbytes = element_count << 1;
3783  } else if (subtag == subtag_double_float_vector) {
3784    nbytes = element_count << 3;
3785  } else {
3786    nbytes = (element_count + 7) >> 3;
3787  }
3788  /* add 4 byte header and round up to multiple of 8 bytes */
3789  return ~7 & (4 + nbytes + 7);
3790#endif
3791#ifdef X8664
3792  if ((nodeheader_tag_p(header_tag)) || (header_tag == ivector_class_64_bit)) {
3793    nbytes = element_count << 3;
3794  } else if (header_tag == ivector_class_32_bit) {
3795    nbytes = element_count << 2;
3796  } else {
3797    /* ivector_class_other_bit contains 8, 16-bit arrays & bit vector */
3798    if (subtag == subtag_bit_vector) {
3799      nbytes = (element_count + 7) >> 3;
3800    } else if (subtag >= min_8_bit_ivector_subtag) {
3801      nbytes = element_count;
3802    } else {
3803      nbytes = element_count << 1;
3804    }
3805  }
3806  /* add 8 byte header and round up to multiple of 16 bytes */
3807  return ~15 & (8 + nbytes + 15);
3808#endif
3809}
3810
3811extern void wp_update_references(TCR *, LispObj, LispObj);
3812
3813/*
3814 * Other threads are suspended and pc-lusered.
3815 *
3816 * param contains a tagged pointer to a uvector or a cons cell
3817 */
3818signed_natural
3819watch_object(TCR *tcr, signed_natural param)
3820{
3821  LispObj object = (LispObj)param;
3822  unsigned tag = fulltag_of(object);
3823  LispObj *noderef = (LispObj *)untag(object);
3824  area *object_area = area_containing((BytePtr)noderef);
3825  natural size;
3826
3827  if (tag == fulltag_cons)
3828    size = 2 * node_size;
3829  else
3830    size = uvector_total_size_in_bytes(noderef);
3831
3832  if (object_area && object_area->code == AREA_DYNAMIC) {
3833    area *a = new_watched_area(size);
3834    LispObj old = object;
3835    LispObj new = (LispObj)((natural)a->low + tag);
3836
3837    add_area_holding_area_lock(a);
3838
3839    /* move object to watched area */
3840    memcpy(a->low, noderef, size);
3841    ProtectMemory(a->low, size);
3842    memset(noderef, 0, size);
3843    wp_update_references(tcr, old, new);
3844    check_all_areas(tcr);
3845    return 1;
3846  }
3847  return 0;
3848}
3849
3850/*
3851 * We expect the watched object in arg_y, and the new uninitialized
3852 * object (which is just zeroed) in arg_z.
3853 */
3854signed_natural
3855unwatch_object(TCR *tcr, signed_natural param)
3856{
3857  ExceptionInformation *xp = tcr->xframe->curr;
3858  LispObj old = xpGPR(xp, Iarg_y);
3859  unsigned tag = fulltag_of(old);
3860  LispObj new = xpGPR(xp, Iarg_z);
3861  LispObj *oldnode = (LispObj *)untag(old);
3862  LispObj *newnode = (LispObj *)untag(new);
3863  area *a = area_containing((BytePtr)old);
3864  extern void update_managed_refs(area *, BytePtr, natural);
3865
3866  if (a && a->code == AREA_WATCHED) {
3867    natural size;
3868
3869    if (tag == fulltag_cons)
3870      size = 2 * node_size;
3871    else
3872      size = uvector_total_size_in_bytes(oldnode);
3873
3874    memcpy(newnode, oldnode, size);
3875    delete_watched_area(a, tcr);
3876    wp_update_references(tcr, old, new);
3877    /* because wp_update_references doesn't update refbits */
3878    tenure_to_area(tenured_area);
3879    /* Unwatching can (re-)introduce managed_static->dynamic references */
3880    zero_bits(managed_static_area->refbits,managed_static_area->ndnodes);
3881    update_managed_refs(managed_static_area, low_markable_address, area_dnode(active_dynamic_area->active, low_markable_address));
3882    check_all_areas(tcr);
3883    xpGPR(xp, Iarg_z) = new;
3884  }
3885  return 0;
3886}
3887
3888Boolean
3889handle_watch_trap(ExceptionInformation *xp, TCR *tcr)
3890{
3891  LispObj selector = xpGPR(xp,Iimm0);
3892  LispObj object = xpGPR(xp, Iarg_z);
3893  signed_natural result;
3894 
3895  switch (selector) {
3896    case WATCH_TRAP_FUNCTION_WATCH:
3897      result = gc_like_from_xp(xp, watch_object, object);
3898      if (result == 0)
3899        xpGPR(xp,Iarg_z) = lisp_nil;
3900      break;
3901    case WATCH_TRAP_FUNCTION_UNWATCH:
3902      gc_like_from_xp(xp, unwatch_object, 0);
3903      break;
3904    default:
3905      break;
3906  }
3907  return true;
3908}
3909
Note: See TracBrowser for help on using the repository browser.