Index: /branches/arm/lisp-kernel/.cvsignore
===================================================================
--- /branches/arm/lisp-kernel/.cvsignore	(revision 13357)
+++ /branches/arm/lisp-kernel/.cvsignore	(revision 13357)
@@ -0,0 +1,1 @@
+*~.*
Index: /branches/arm/lisp-kernel/Threads.h
===================================================================
--- /branches/arm/lisp-kernel/Threads.h	(revision 13357)
+++ /branches/arm/lisp-kernel/Threads.h	(revision 13357)
@@ -0,0 +1,275 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include <stdlib.h>
+#ifndef WINDOWS
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+#undef __argv
+#include <stdio.h>
+#ifndef WINDOWS
+#include <pthread.h>
+#endif
+#ifdef WINDOWS
+#include <process.h>
+#endif
+#include <errno.h>
+#include <limits.h>
+
+#ifdef SOLARIS
+#include <sys/syscall.h>
+#include <sys/lwp.h>
+#endif
+
+#ifdef LINUX
+#include <sys/syscall.h>
+#endif
+
+#undef USE_MACH_SEMAPHORES
+#define USE_POSIX_SEMAPHORES
+#undef USE_WINDOWS_SEMAPHORES
+
+#ifdef DARWIN
+#define USE_MACH_SEMAPHORES 1
+#undef  USE_POSIX_SEMAPHORES
+#endif
+#ifdef WINDOWS
+#define USE_WINDOWS_SEMAPHORES 1
+#undef USE_POSIX_SEMAPHORES
+#ifdef WIN_32
+struct timespec {
+  int tv_sec;
+  int tv_nsec;
+};
+#endif
+#endif
+
+#ifdef USE_POSIX_SEMAPHORES
+#include <semaphore.h>
+#endif
+
+
+#ifdef USE_MACH_SEMAPHORES
+/* We have to use Mach semaphores, even if we're otherwise 
+   using POSIX signals, etc. */
+#include <mach/task.h>
+#include <mach/semaphore.h>
+#endif
+
+#include <limits.h>
+
+#ifdef FREEBSD
+#include <pthread_np.h>
+#endif
+
+#ifndef WINDOWS
+#include <sched.h>
+#endif
+
+#include "lisp.h"
+#include "lisp_globals.h"
+#include "gc.h"
+
+#ifdef USE_FUTEX
+#ifndef FUTEX_WAIT
+#define FUTEX_WAIT (0)
+#endif
+#ifndef FUTEX_WAKE
+#define FUTEX_WAKE (1)
+#endif
+#include <sys/syscall.h>
+#endif
+
+#ifndef WINDOWS
+#include <syslog.h>
+#endif
+
+Boolean extern threads_initialized;
+Boolean extern log_tcr_info;
+
+#define LOCK_SPINLOCK(x,tcr) get_spin_lock(&(x),tcr)
+#define RELEASE_SPINLOCK(x) (x)=0
+
+#define TCR_TO_TSD(tcr) ((void *)((natural)(tcr)+TCR_BIAS))
+#define TCR_FROM_TSD(tsd) ((TCR *)((natural)(tsd)-TCR_BIAS))
+
+#ifdef USE_WINDOWS_SEMAPHORES
+
+typedef void * SEMAPHORE;
+#define SEM_WAIT(s) WaitForSingleObject(s,INFINITE)
+#define SEM_RAISE(s) ReleaseSemaphore(s, 1L, NULL)
+#define SEM_BROADCAST(s, count) do {while(count) {SEM_RAISE(s);(count)--;}}while(0)
+#define SEM_TIMEDWAIT(s,t) WaitOnSingleObject(s,t)
+
+#endif
+#ifdef USE_POSIX_SEMAPHORES
+typedef sem_t * SEMAPHORE;
+#define SEM_WAIT(s) sem_wait((SEMAPHORE)s)
+#define SEM_RAISE(s) sem_post((SEMAPHORE)s)
+#define SEM_BROADCAST(s, count) do {while(count) {SEM_RAISE(s);(count)--;}}while(0)
+#define SEM_TIMEDWAIT(s,t) sem_timedwait((SEMAPHORE)s,(struct timespec *)t)
+#endif
+
+#ifdef USE_MACH_SEMAPHORES
+typedef semaphore_t SEMAPHORE;
+#define SEM_WAIT(s) semaphore_wait((SEMAPHORE)(natural)s)
+#define SEM_RAISE(s) semaphore_signal((SEMAPHORE)(natural)s)
+#define SEM_BROADCAST(s,count)semaphore_signal_all((SEMAPHORE)(natural)s)
+#define SEM_TIMEDWAIT(s,t) semaphore_timedwait((SEMAPHORE)(natural)s,t)
+#endif
+
+void sem_wait_forever(SEMAPHORE s);
+
+#ifdef USE_POSIX_SEMAPHORES
+#define SEM_WAIT_FOREVER(s) sem_wait_forever((SEMAPHORE)s)
+#endif
+
+#ifdef USE_MACH_SEMAPHORES
+#define SEM_WAIT_FOREVER(s) sem_wait_forever((SEMAPHORE)(natural)s)
+#endif
+
+#ifdef USE_WINDOWS_SEMAPHORES
+#define SEM_WAIT_FOREVER(s) sem_wait_forever((SEMAPHORE)s)
+#endif
+
+typedef struct
+{
+  signed_natural avail;
+  TCR* owner;
+  signed_natural  count;
+  void* signal;
+  signed_natural waiting;
+  void *malloced_ptr;
+  signed_natural spinlock;
+} _recursive_lock, *RECURSIVE_LOCK;
+
+
+int lock_recursive_lock(RECURSIVE_LOCK, TCR *);
+int unlock_recursive_lock(RECURSIVE_LOCK, TCR *);
+RECURSIVE_LOCK new_recursive_lock(void);
+void destroy_recursive_lock(RECURSIVE_LOCK);
+int recursive_lock_trylock(RECURSIVE_LOCK, TCR *, int *);
+
+#define LOCK(m, t) lock_recursive_lock((RECURSIVE_LOCK)ptr_from_lispobj(m), (TCR *)t)
+#define UNLOCK(m, t) unlock_recursive_lock((RECURSIVE_LOCK)ptr_from_lispobj(m), (TCR *)t)
+
+/* Hmm.  This doesn't look like the MacOS Thread Manager ... */
+LispObj current_thread_osid(void);
+void *current_native_thread_id(void);
+void *new_semaphore(int);
+void destroy_semaphore(void**);
+void tsd_set(LispObj, void *);
+void *tsd_get(LispObj);
+TCR *new_tcr(natural, natural);
+TCR *initial_thread_tcr;
+
+#define DEFAULT_THREAD_STACK_SIZE ((size_t) -1)
+#define MINIMAL_THREAD_STACK_SIZE ((size_t) 0)
+
+
+LispObj create_system_thread(size_t stack_size, 
+			     void* stackaddr,
+#ifdef WINDOWS
+                             unsigned CALLBACK (*start_routine)(void *)
+#else
+			     void* (*start_routine)(void *)
+#endif
+                             ,
+			     void* param);
+
+TCR *get_tcr(Boolean);
+TCR *get_interrupt_tcr(Boolean);
+Boolean suspend_tcr(TCR *);
+Boolean resume_tcr(TCR *);
+
+typedef struct
+{
+  signed_natural spin; /* need spin lock to change fields */
+  signed_natural state; /* 0 = free, positive if writer, negative if readers; */
+  natural blocked_writers;
+  natural blocked_readers;
+  TCR  *writer;
+#ifdef USE_FUTEX
+  natural reader_signal;
+  natural writer_signal;
+#else
+  void * reader_signal;
+  void * writer_signal;
+#endif
+  void *malloced_ptr;
+} rwlock;
+
+
+rwlock * rwlock_new(void);
+void rwlock_destroy(rwlock *);
+int rwlock_rlock(rwlock *, TCR *, struct timespec *);
+int rwlock_wlock(rwlock *, TCR *, struct timespec *);
+int rwlock_try_wlock(rwlock *, TCR *);
+int rwlock_try_rlock(rwlock *, TCR *);
+int rwlock_unlock(rwlock *, TCR *);
+
+
+natural 
+atomic_and(natural*, natural);
+
+natural 
+atomic_ior(natural*, natural);
+
+#define SET_TCR_FLAG(t,bit) atomic_ior(&(t->flags),(1L<<bit))
+#define CLR_TCR_FLAG(t,bit) atomic_and(&(t->flags),~(1L<<bit))
+
+
+#if defined(SIGRTMIN) && !defined(SOLARIS)
+#define SIG_SUSPEND_THREAD (SIGRTMIN+6)
+#else
+#define SIG_SUSPEND_THREAD SIGUSR2
+#endif
+
+
+#ifdef DARWIN
+#define SIG_KILL_THREAD SIGEMT
+#endif
+
+#if defined(LINUX) && defined(SIGRTMIN)
+#define SIG_KILL_THREAD (SIGRTMIN+7)
+#endif
+
+#ifdef SOLARIS
+#define SIG_KILL_THREAD SIGRTMIN
+#endif
+
+#ifdef FREEBSD
+#define SIG_KILL_THREAD (SIGTHR+5)
+#endif
+
+
+extern int thread_suspend_signal, thread_kill_signal;
+
+void *
+allocate_stack(natural);
+
+void
+suspend_resume_handler(int, siginfo_t *, ExceptionInformation *);
+
+/* Maybe later
+Boolean
+rwlock_try_rlock(rwlock *);
+
+Boolean
+rwlock_try_wlock(rwlock *);
+*/
Index: /branches/arm/lisp-kernel/area.h
===================================================================
--- /branches/arm/lisp-kernel/area.h	(revision 13357)
+++ /branches/arm/lisp-kernel/area.h	(revision 13357)
@@ -0,0 +1,219 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __AREA_H__
+#define __AREA_H__ 1
+
+
+#include "bits.h"
+#include "memprotect.h"
+
+
+
+typedef enum {
+  AREA_VOID = 0,		/* Not really an area at all */
+  AREA_CSTACK = 1<<fixnumshift, /* A control stack */
+  AREA_VSTACK = 2<<fixnumshift, /* A value stack.  The GC sees it as being doubleword-aligned */
+  AREA_TSTACK = 3<<fixnumshift, /* A temp stack.  It -is- doubleword-aligned */
+  AREA_READONLY = 4<<fixnumshift, /* A (cfm) read-only section. */
+  AREA_WATCHED = 5<<fixnumshift, /* A static area containing a single object. */
+  AREA_STATIC_CONS = 6<<fixnumshift, /* static, conses only */
+  AREA_MANAGED_STATIC = 7<<fixnumshift, /* A resizable static area */
+  AREA_STATIC = 8<<fixnumshift, /* A  static section: contains
+                                 roots, but not GCed */
+  AREA_DYNAMIC = 9<<fixnumshift /* A heap. Only one such area is "the heap."*/
+} area_code;
+
+typedef struct area {
+  struct area* pred;            /* linked list predecessor */
+  struct area* succ;            /* linked list successor */
+  char* low;                    /* arithmetic lower limit on addresses
+                                   (inclusive) */
+  char* high;                   /* arithmetic upper limit on addresses
+                                   (exclusive) */
+  char* active;                 /* low bound (stack) or high bound
+                                   (heap) */
+  char* softlimit;		/* only makes sense for dynamic heaps
+                                   & stacks */
+  char* hardlimit;		/* only makes sense for dynamic heaps
+                                   & stacks */
+  natural code;
+  natural*  markbits;           /* markbits for active area */
+  natural ndnodes;		/* "active" size of dynamic area or
+                                   stack */
+  struct area* older;		/* if ephemeral, the next older ephemeral area
+				 or the dynamic area */
+  struct area* younger;         /* if ephemeral, the next "younger"
+                                  ephemeral area if there is one.  If
+                                  dynamic, the oldest ephemeral
+                                  area. */
+  char*  h;			/* The pointer allocated to contain
+				 this area, or NULL if the operating
+				 system allocated it for us. */
+  protected_area* softprot;     /* "soft" protected_area */
+  protected_area* hardprot;     /* "hard" protected_area */
+  TCR * owner;                  /* TCR that the area belongs to, if a stack */
+  natural*  refbits;            /* intergenerational references.  May
+                                               or may not be the same
+                                               as markbits */
+  natural threshold;            /* egc threshold (boxed "fullword
+                                   count") or 0 */
+  LispObj gccount;              /* boxed generation GC count. */
+  natural static_dnodes;        /* for hash consing, maybe other things. */
+  natural *static_used;         /* bitvector */
+} area;
+
+
+/*
+  Areas are kept in a doubly-linked list.
+  The list header is just a distinguished element of
+  that list; by convention, the "active" dynamic
+  area is described by that header's successor, and areas
+  that may have entries in their "markbits" vector (heaps)
+  precede (in the area_list->succ sense) those  that don't (stacks).
+  The list header's "area" pointer is an "AREA_VOID" area; the header
+  (once allocated during kernel initialization) never
+  moves or changes.  Lisp code can get its hands on
+  the list header via a nilreg global, and carefully,
+  atomically, traverse it to do ROOM, etc.
+*/
+
+
+area *new_area(BytePtr, BytePtr, area_code);
+void add_area(area *, TCR *);
+void add_area_holding_area_lock(area *);
+void condemn_area(area *, TCR *);
+void condemn_area_holding_area_lock(area *);
+area *area_containing(BytePtr);
+area *stack_area_containing(BytePtr);
+area *heap_area_containing(BytePtr);
+void tenure_to_area(area *);
+void untenure_from_area(area *);
+
+/* serialize add_area/remove_area, and also the tcr queue */
+void *tcr_area_lock;
+
+#define reserved_area ((area *)(all_areas))
+#define active_dynamic_area ((area *)(reserved_area->succ))
+
+typedef struct area_list {
+  area *the_area;
+  struct area_list *next;
+} area_list;
+
+/* The useable size of a tsp or vsp stack segment.
+  */
+/* #define STACK_SEGMENT_SIZE (64<<10) */
+#define MIN_CSTACK_SIZE (1<<17)
+#define CSTACK_HARDPROT (100<<10)
+#define CSTACK_SOFTPROT (100<<10)
+#define MIN_VSTACK_SIZE (1<<16)
+#define VSTACK_HARDPROT (1<<12)
+#define VSTACK_SOFTPROT (1<<16)
+#define MIN_TSTACK_SIZE (1<<18)
+#define TSTACK_HARDPROT 0
+#define TSTACK_SOFTPROT (1<<16)
+#ifdef PPC
+#define CS_OVERFLOW_FORCE_LIMIT ((natural)(-(sizeof(lisp_frame))))
+#endif
+
+#ifdef X86
+#define CS_OVERFLOW_FORCE_LIMIT ((natural)(-16))
+#endif
+
+
+#ifdef PPC
+#ifdef LINUX
+#ifdef PPC64
+#define IMAGE_BASE_ADDRESS 0x50000000000L
+#else
+#define IMAGE_BASE_ADDRESS 0x31000000
+#endif
+#endif
+#ifdef DARWIN
+#ifdef PPC64
+#define IMAGE_BASE_ADDRESS 0x300000000000L
+#else
+#define IMAGE_BASE_ADDRESS 0x04000000
+#endif
+#endif
+#endif
+
+#ifdef X86
+#ifdef LINUX
+#ifdef X8664
+#define IMAGE_BASE_ADDRESS 0x300000000000L
+#else
+#define IMAGE_BASE_ADDRESS 0x10000000
+#endif
+#endif
+#ifdef FREEBSD
+#ifdef X8664
+#define IMAGE_BASE_ADDRESS 0x300000000000L /* 0x100000000L */
+#else
+#define IMAGE_BASE_ADDRESS 0x30000000
+#endif
+#endif
+#ifdef SOLARIS
+#ifdef X8664
+#define IMAGE_BASE_ADDRESS 0x300000000000L
+#else
+#define IMAGE_BASE_ADDRESS 0x10000000
+#endif
+#endif
+#ifdef DARWIN
+#ifdef X8664
+#define IMAGE_BASE_ADDRESS 0x300000000000L
+#else
+#define IMAGE_BASE_ADDRESS 0x04000000
+#endif
+#endif
+#endif
+#ifdef WINDOWS
+#ifdef X8664
+#define IMAGE_BASE_ADDRESS 0x100000000LL
+#else
+#define IMAGE_BASE_ADDRESS 0x04000000
+#endif
+#endif
+
+#if (WORD_SIZE==64)
+#define PURESPACE_RESERVE 0x2000000000LL /* 128 GB */
+#define PURESPACE_SIZE (1LL<<30LL)
+#else
+#define PURESPACE_RESERVE (128<<20) /* MB */
+#define PURESPACE_SIZE (64<<20)
+#endif
+
+#define STATIC_RESERVE heap_segment_size
+
+#ifndef X86
+#define STATIC_BASE_ADDRESS (0x00002000+(LOWMEM_BIAS))
+#else
+#define STATIC_BASE_ADDRESS (0x00012000+(LOWMEM_BIAS))
+#endif
+
+#define SPJUMP_TARGET_ADDRESS (STATIC_BASE_ADDRESS+0x3000)
+
+extern LispObj image_base;
+extern BytePtr pure_space_start, pure_space_active, pure_space_limit;
+extern BytePtr static_space_start, static_space_active, static_space_limit;
+extern area *find_readonly_area(void);
+extern BytePtr low_relocatable_address, high_relocatable_address,
+  low_markable_address, high_markable_address;
+
+#endif /* __AREA_H__ */
Index: /branches/arm/lisp-kernel/bits.c
===================================================================
--- /branches/arm/lisp-kernel/bits.c	(revision 13357)
+++ /branches/arm/lisp-kernel/bits.c	(revision 13357)
@@ -0,0 +1,70 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+
+#include "lisp.h"
+#include "bits.h"
+#include "lisp-exceptions.h"
+
+
+/* This should be a lot faster than calling set_bit N times */
+
+void
+set_n_bits(bitvector bits, natural first, natural n)
+{
+  if (n) {
+    natural
+      lastbit = (first+n)-1,
+      leftbit = first & bitmap_shift_count_mask,
+      leftmask = ALL_ONES >> leftbit,
+      rightmask = ALL_ONES << ((nbits_in_word-1) - (lastbit & bitmap_shift_count_mask)),
+      *wstart = ((natural *) bits) + (first>>bitmap_shift),
+      *wend = ((natural *) bits) + (lastbit>>bitmap_shift);
+
+    if (wstart == wend) {
+      *wstart |= (leftmask & rightmask);
+    } else {
+      *wstart++ |= leftmask;
+      n -= (nbits_in_word - leftbit);
+      
+      while (n >= nbits_in_word) {
+        *wstart++ = ALL_ONES;
+        n-= nbits_in_word;
+      }
+      
+      if (n) {
+        *wstart |= rightmask;
+      }
+    }
+  }
+}
+
+/* Note that this zeros longwords */
+void
+zero_bits(bitvector bits, natural nbits)
+{
+  memset(bits, 0, ((sizeof(natural)*(((nbits+(nbits_in_word-1)))>>bitmap_shift))));
+}
+
+void
+ior_bits(bitvector dest, bitvector src, natural nbits)
+{
+  while (nbits > 0) {
+    *dest++ |= *src++;
+    nbits -= nbits_in_word;
+  }
+}
Index: /branches/arm/lisp-kernel/bits.h
===================================================================
--- /branches/arm/lisp-kernel/bits.h	(revision 13357)
+++ /branches/arm/lisp-kernel/bits.h	(revision 13357)
@@ -0,0 +1,183 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+
+
+#ifndef __bits_h__
+#define __bits_h__ 1
+
+#include <string.h>
+
+typedef natural *bitvector;
+
+#if WORD_SIZE == 64
+#define bitmap_shift 6
+#define BIT0_MASK 0x8000000000000000ULL
+#define ALL_ONES  0xffffffffffffffffULL
+#define NATURAL1 1ULL
+#else
+#define bitmap_shift 5
+#define BIT0_MASK 0x80000000U 
+#define ALL_ONES  0xFFFFFFFFU
+#define NATURAL1 1U
+#endif
+
+#define bitmap_shift_count_mask ((1<<bitmap_shift)-1)
+
+static inline int
+set_bit(bitvector bits,natural bitnum)  __attribute__((always_inline));
+
+static inline int
+set_bit(bitvector bits,natural bitnum)
+{
+  natural
+    windex = bitnum>>bitmap_shift, 
+    old = bits[windex],
+    new = old | (BIT0_MASK >> (bitnum & bitmap_shift_count_mask));
+  if (new == old) {
+    return 1;			/* Was set */
+  } else {
+    bits[windex] = new;
+    return 0;			/* Was clear */
+  }
+}
+
+static inline int 
+atomic_set_bit(bitvector bits ,natural bitnum)
+{
+  extern natural atomic_ior(bitvector, natural);
+  natural
+    windex = bitnum>>bitmap_shift,
+    mask = (BIT0_MASK >> (bitnum & bitmap_shift_count_mask));
+
+  return atomic_ior(bits + windex, mask);
+}
+
+void set_n_bits(bitvector,natural,natural);
+
+static inline int
+clr_bit(bitvector bits, natural bitnum)
+{
+  natural
+    windex = bitnum>>bitmap_shift, 
+    old = bits[windex],
+    new = old & ~(BIT0_MASK >> (bitnum & bitmap_shift_count_mask));
+  if (new == old) {
+    return 0;	/* Was clear */
+  } else {
+    bits[windex] = new;
+    return 1;	/* Was set */
+  }
+}
+
+
+static inline unsigned
+ref_bit(bitvector bits,natural bitnum) __attribute__((always_inline));
+
+static inline unsigned
+ref_bit(bitvector bits,natural bitnum)
+{
+  return ((bits[bitnum>>bitmap_shift] & (BIT0_MASK >> (bitnum & bitmap_shift_count_mask))) != 0);
+}
+
+void zero_bits(bitvector, natural);
+void ior_bits(bitvector,bitvector,natural);
+
+#define bits_word_index(bitnum) (((natural)(bitnum)) >> bitmap_shift)
+#define bits_bit_index(bitnum) (((natural)(bitnum)) & bitmap_shift_count_mask)
+#define bits_word_ptr(bits,bitnum) \
+  ((natural*) (((natural*) bits) + ((natural) (bits_word_index(bitnum)))))
+#define bits_word_mask(bitnum) ((BIT0_MASK) >> bits_bit_index(bitnum))
+#define bits_indexed_word(bitv,indexw) ((((natural*)(bitv))[indexw]))
+#define bits_word(bitv,bitnum) bits_indexed_word(bits,bits_word_index(bitnum))
+
+/* Evaluates some arguments twice */
+
+#define set_bits_vars(BITVvar,BITNUMvar,BITPvar,BITWvar,MASKvar) \
+{ BITPvar = bits_word_ptr(BITVvar,BITNUMvar); BITWvar = *BITPvar; MASKvar = bits_word_mask(BITNUMvar); }
+
+#define set_bitidx_vars(BITVvar,BITNUMvar,BITPvar,BITWvar,BITIDXvar) \
+{ BITPvar = bits_word_ptr(BITVvar,BITNUMvar); BITIDXvar = bits_bit_index(BITNUMvar); \
+    BITWvar = (*BITPvar << BITIDXvar) >> BITIDXvar; }
+
+#ifdef __GNUC__
+static __inline__ natural
+current_stack_pointer(void) __attribute__((always_inline));
+
+static __inline__ natural
+current_stack_pointer(void)
+{
+#ifdef PPC
+  register natural _sp __asm__("r1");
+#endif
+#ifdef X8664
+  register natural _sp __asm__("%rsp");
+#endif
+#ifdef X8632
+  register natural _sp __asm__("%esp");
+#endif
+  return _sp;
+}
+#else
+natural
+current_stack_pointer(void);
+#endif
+
+#ifdef __GNUC__
+static __inline__ unsigned
+count_leading_zeros(natural w) __attribute__((always_inline));
+
+
+/* Beware: on some platforms, __builtin_clz[ll](0) returns an undefined
+   result */
+
+static __inline__ unsigned
+count_leading_zeros(natural w)
+{
+#if __GNUC__ >= 4
+#if WORD_SIZE == 64
+  return __builtin_clzll(w);  
+#else
+  return __builtin_clz(w);  
+#endif
+#else /* __GNUC__ < 4 */
+  natural lz;
+#ifdef PPC
+#ifdef PPC64
+  __asm__ __volatile__("cntlzd %0,%1" : "=r" (lz) : "r" (w));
+#else
+  __asm__ __volatile__("cntlzw %0,%1" : "=r" (lz) : "r" (w));
+#endif
+#endif /* PPC */
+#ifdef X86
+#ifdef X8664
+  __asm__ __volatile__("bsr %1,%0" : "=r" (lz) : "r" (w));
+  __asm__ __volatile__("xor $63,%0" : "=r" (lz));
+#else
+  __asm__ __volatile__("bsr %1,%0" : "=r" (lz) : "r" (w));
+  __asm__ __volatile__("xor $31,%0" : "=r" (lz));
+#endif 
+#endif
+  return lz;
+#endif
+}
+#else /* not __GNUC__ */
+unsigned
+count_leading_zeros(natural);
+#endif
+                                        
+#endif /* __bits_h__ */
Index: /branches/arm/lisp-kernel/darwinppc/.cvsignore
===================================================================
--- /branches/arm/lisp-kernel/darwinppc/.cvsignore	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinppc/.cvsignore	(revision 13357)
@@ -0,0 +1,1 @@
+*~.*
Index: /branches/arm/lisp-kernel/darwinppc/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/darwinppc/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinppc/.gdbinit	(revision 13357)
@@ -0,0 +1,39 @@
+define pl
+call print_lisp_object($arg0)
+end
+
+define arg_x
+pl $r21
+end
+
+define arg_y
+pl $r22
+end
+
+define arg_z
+pl $r23
+end
+
+define lw
+pl $r16
+end
+
+define fname
+pl $r17
+end
+
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
+handle SIGEMT pass nostop noprint
+# Work around apparent Apple GDB bug
+handle SIGTTIN nopass nostop noprint
Index: /branches/arm/lisp-kernel/darwinppc/Makefile
===================================================================
--- /branches/arm/lisp-kernel/darwinppc/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinppc/Makefile	(revision 13357)
@@ -0,0 +1,128 @@
+#
+#   Copyright (C) 1994-2001 Digitool, Inc
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+# For versions of GCC prior to 3.3, the option "-traditional-cpp" meant
+# "don't use precompiled headers", which was good advice since they didn't
+# work too well.  Beginning with GCC 3.3, the "-traditional-cpp" means 
+# "use a broken preprocessor", which is (in a sense) the opposite of what
+# it used to mean.
+
+# Try to determine the version of GCC in use.  Invoke gcc with the
+# -v flag, and look for a line containing the phrase "specs from" in
+# the output.  Use sed to extract the full pathname of ths specs file
+# printed in that line, then strip off the trailing "/specs".
+gccdir = $(shell $(CC) -v 2>&1 | grep "specs from" | sed -e 's/.*from //' -e 's|/specs||')
+# $(gccdir) is set to the directory containing the specs file, without the
+# trailing slash.  The make intrinsic 'notdir' will strip a leading directory
+# prefix from that pathname, leaving us with a string that should match
+# the gcc version number
+ifneq ($(gccdir),)
+gccversion:=$(notdir $(gccdir))
+oldgcc:=$(shell expr $(gccversion) "<" "3.3")
+pregcc4:=$(shell expr $(gccversion) "<" "4.0")
+ifeq ($(oldgcc),1)
+BROKEN_PREPROCESSOR_WORKAROUND = -traditional-cpp
+endif
+endif
+
+MDYNAMIC_NO_PIC = $(shell ($(CC) --help -v 2>&1 | grep -q -e "-mdynamic-no-pic") && /bin/echo "-mdynamic-no-pic")
+
+OPENMCL_MAJOR_VERSION=0
+OPENMCL_MINOR_VERSION=14
+
+VPATH = ..
+RM = /bin/rm
+LD = ld
+LDFLAGS = -arch ppc -dynamic  -o $@ -e start -pagezero_size 0x1000 -seg1addr 0x00001000 -sectalign __TEXT __text 0x1000 
+AS = as
+M4 = gm4
+M4FLAGS = -DDARWIN -DPPC
+ASFLAGS = -arch ppc -force_cpusubtype_ALL
+CDEFINES = -DDARWIN -DPPC  $(BROKEN_PREPROCESSOR_WORKAROUND) #-DDEBUG -DGC_INTEGRITY_CHECKING
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS) $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c -arch ppc $< $(CDEFINES) $(CDEBUG) $(COPT) -Wno-deprecated-declarations $(WFORMAT) -mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk  $(MDYNAMIC_NO_PIC) -o $@
+
+SPOBJ = ppc-spjump.o ppc-spentry.o  ppc-subprims.o 
+ASMOBJ = ppc-asmutils.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o ppc-gc.o bits.o  ppc-exceptions.o \
+	thread_manager.o lisp-debug.o image.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o plbt.o ppc_print.o
+KERNELOBJ= imports.o $(COBJ) ppc-asmutils.o 
+
+SPINC =	lisp.s m4macros.m4 ppc-constants.s ppc-macros.s errors.s ppc-uuo.s ppc-constants32.s
+
+CHEADERS = area.h bits.h ppc-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h lisptypes.h ppc-constants32.h ppc-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ= $(SPOBJ)
+all:	../../dppccl
+
+
+# No:
+
+# KSPOBJ=
+
+OSEARLYLIBS = -lcrt1.o
+OSLATELIBS = -lSystem
+
+# gcc 4.0 and later want to use -lSystemStubs for many of the
+# runtime support functions that were in -lgcc in previous
+# versions.  'pregcc4' may have been set above.
+ifeq ($(pregcc4),1)
+OSMIDDLELIBS = -lgcc
+else
+OSMIDDLELIBS = -lSystemStubs
+endif
+
+OSLIBS = $(OSEARLYLIBS) $(OSMIDDLELIBS) $(OSLATELIBS)
+
+../../dppccl:	 $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ)
+	$(LD)  $(LDFLAGS) $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ)   $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+
+thread_manager.o: thread_manager.c 
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../dppccl 
+
+# Some earlier versions of this Makefile built "subprims_r.o".  
+# (That file is now defunct.)
+clean:	cclean
+	$(RM) -f $(SPOBJ) $(KSPOBJ) subprims_r.o
+
+strip:	../../dppccl
+	strip -s retain ../../dppccl
Index: /branches/arm/lisp-kernel/darwinppc/retain
===================================================================
--- /branches/arm/lisp-kernel/darwinppc/retain	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinppc/retain	(revision 13357)
@@ -0,0 +1,3 @@
+#symbols that must be retained in a lisp kernel image
+# % strip -s <this file> dppccl
+_catch_exception_raise
Index: /branches/arm/lisp-kernel/darwinppc64/.cvsignore
===================================================================
--- /branches/arm/lisp-kernel/darwinppc64/.cvsignore	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinppc64/.cvsignore	(revision 13357)
@@ -0,0 +1,1 @@
+*~.*
Index: /branches/arm/lisp-kernel/darwinppc64/Makefile
===================================================================
--- /branches/arm/lisp-kernel/darwinppc64/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinppc64/Makefile	(revision 13357)
@@ -0,0 +1,132 @@
+#
+#   Copyright (C) 2005 Clozure Associates and contributors.
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+# For versions of GCC prior to 3.3, the option "-traditional-cpp" meant
+# "don't use precompiled headers", which was good advice since they didn't
+# work too well.  Beginning with GCC 3.3, the "-traditional-cpp" means 
+# "use a broken preprocessor", which is (in a sense) the opposite of what
+# it used to mean.
+
+# Try to determine the version of GCC in use.  Invoke gcc with the
+# -v flag, and look for a line containing the phrase "specs from" in
+# the output.  Use sed to extract the full pathname of ths specs file
+# printed in that line, then strip off the trailing "/specs".
+gccdir = $(shell $(CC) -v 2>&1 | grep "specs from" | sed -e 's/.*from //' -e 's|/specs||')
+# $(gccdir) is set to the directory containing the specs file, without the
+# trailing slash.  The make intrinsic 'notdir' will strip a leading directory
+# prefix from that pathname, leaving us with a string that should match
+# the gcc version number
+#gccversion:=$(notdir $(gccdir))
+#oldgcc:=$(shell expr $(gccversion) "<" "3.3")
+#ifeq ($(oldgcc),1)
+#BROKEN_PREPROCESSOR_WORKAROUND = -traditional-cpp
+#endif
+
+MDYNAMIC_NO_PIC = $(shell ($(CC) --help -v 2>&1 | grep -q -e "-mdynamic-no-pic") && /bin/echo "-mdynamic-no-pic")
+
+OPENMCL_MAJOR_VERSION=0
+OPENMCL_MINOR_VERSION=14
+
+VPATH = ..
+RM = /bin/rm
+LD = ld64
+
+### The -pagezero_size/-seg1addr args are an attempt to work around a
+### bug (#4057702) in ld64.
+
+### The -seg1addr and -pagezero_size arguments below are nonsense;
+### early versions of ld64 were/are broken.
+LDFLAGS = -macosx_version_min 10.4 -M -arch ppc64 -dynamic  -o $@ -e start -pagezero_size 0x1000 -seg1addr 0x1000 -sectalign __TEXT __text 0x1000
+AS = as
+M4 = gm4
+M4FLAGS = -DDARWIN -DPPC -DPPC64
+ASFLAGS = -arch ppc64
+CDEFINES = -DDARWIN -DPPC -DPPC64 $(BROKEN_PREPROCESSOR_WORKAROUND)
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS) $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< -arch ppc64 -m64 $(CDEFINES) $(CDEBUG) $(COPT) $(MDYNAMIC_NO_PIC) $(WFORMAT) -mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk -o $@
+
+SPOBJ = ppc-spjump.o ppc-spentry.o ppc-subprims.o 
+ASMOBJ = ppc-asmutils.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o ppc-gc.o bits.o  ppc-exceptions.o \
+	thread_manager.o lisp-debug.o image.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o plbt.o ppc_print.o
+KERNELOBJ= imports.o $(COBJ) ppc-asmutils.o 
+
+SPINC =	lisp.s m4macros.m4 ppc-constants.s ppc-macros.s errors.s ppc-uuo.s \
+	ppc-constants64.s
+
+CHEADERS = area.h bits.h ppc-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h lisptypes.h ppc-constants64.h ppc-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ= $(SPOBJ)
+all:	../../dppccl64
+
+
+# No:
+
+# KSPOBJ=
+
+OSEARLYLIBS = -lcrt1.o
+OSLATELIBS = -lSystem -lmx
+
+# If the linker can find an absolute path to -lSystemStubs, use
+# -lSystemStubs; otherwise, just use libgcc.a
+SYSTEMSTUBSPATH = $(shell $(CC) --print-file-name=libSystemStubs.a)
+SYSTEMSTUBSABSOLUTE = $(shell expr $(SYSTEMSTUBSPATH) : "^/*")
+ifeq ($(SYSTEMSTUBSABSOLUTE),1)
+OSMIDDLELIBS = -lSystemStubs
+else
+OSMIDDLELIBS = -lgcc
+endif
+
+OSLIBS = $(OSEARLYLIBS) $(OSMIDDLELIBS) $(OSLATELIBS)
+
+../../dppccl64:	 $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile
+	$(LD)  $(LDFLAGS) $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ)   $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+thread_manager.o: thread_manager.c 
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../dppccl64 
+
+# Some earlier versions of this Makefile built "subprims_r.o".  
+# (That file is now defunct.)
+clean:	cclean
+	$(RM) -f $(SPOBJ) $(KSPOBJ) subprims_r.o
+
+strip:	../../dppccl64
+	strip -s retain ../../dppccl64
Index: /branches/arm/lisp-kernel/darwinx8632/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/darwinx8632/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinx8632/.gdbinit	(revision 13357)
@@ -0,0 +1,48 @@
+define pl
+  call print_lisp_object($arg0)
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x3001
+   set $car = *((LispObj *)($l+3))
+   set $l =  *((LispObj *)($l-1))
+   pl $car
+  end
+end
+
+
+define fn
+  pl $edi
+end
+
+define arg_y
+ pl $esi
+end
+
+define arg_z
+ pl $ebx
+end
+
+define offset
+ p (int)$pc-$edi
+end
+
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
+handle SIGEMT pass nostop noprint
+# Work around apparent Apple GDB bug
+handle SIGTTIN nopass nostop noprint
+# Work around Leopard bug du jour
+handle SIGSYS pass nostop noprint
+
Index: /branches/arm/lisp-kernel/darwinx8632/Makefile
===================================================================
--- /branches/arm/lisp-kernel/darwinx8632/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinx8632/Makefile	(revision 13357)
@@ -0,0 +1,110 @@
+#
+#   Copyright (C) 2005 Clozure Associates and contributors.
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+MDYNAMIC_NO_PIC = $(shell ($(CC) --help -v 2>&1 | grep -q -e "-mdynamic-no-pic") && /bin/echo "-mdynamic-no-pic")
+
+VPATH = ..
+RM = /bin/rm
+LD = ld
+LDFLAGS =  -macosx_version_min 10.4 -arch i386 -dynamic  -o $@ -e start -pagezero_size 0x11000 -seg1addr 0x00011000 -sectalign __TEXT __text 0x1000 
+AS = as
+M4 = gm4
+M4FLAGS = -DDARWIN -DX86 -DX8632
+ASFLAGS = -arch i386 -g
+CDEFINES = -DDARWIN -DX86 -DX8632 #-DGC_INTEGRITY_CHECKING -DFORCE_DWS_MARK -DDISABLE_EGC -DDEBUG_MACH_EXCEPTIONS
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+CC=gcc-4.0
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS) $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c -arch i386 $< $(CDEFINES) $(CDEBUG) $(COPT) $(MDYNAMIC_NO_PIC) $(WFORMAT) -mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk -o $@
+
+SPOBJ = x86-spjump32.o x86-spentry32.o x86-subprims32.o
+ASMOBJ = x86-asmutils32.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o bits.o  \
+	thread_manager.o lisp-debug.o image.o memory.o x86-gc.o \
+	x86-exceptions.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o x86_print.o xlbt.o
+KERNELOBJ= imports.o $(COBJ) x86-asmutils32.o 
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s x86-constants32.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h lisptypes.h x86-constants32.h x86-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ= $(SPOBJ)
+all:	../../dx86cl
+
+
+# No:
+
+# KSPOBJ=
+
+OSEARLYLIBS = -lcrt1.o
+OSLATELIBS = -lSystem
+
+# is this needed?
+#OSMIDDLELIBS = -lSystemStubs
+
+OSLIBS = $(OSEARLYLIBS) $(OSMIDDLELIBS) $(OSLATELIBS)
+
+$(KSPOBJ): tiger-sdk-check
+$(KERNELOBJ) : tiger-sdk-check
+$(DEBUGOBJ) : tiger-sdk-check
+Makefile : tiger-sdk-check
+
+../../dx86cl:	 tiger-sdk-check $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile
+	$(LD)  $(LDFLAGS) $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ)   $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+
+thread_manager.o: thread_manager.c 
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../dx86cl 
+
+# Some earlier versions of this Makefile built "subprims_r.o".  
+# (That file is now defunct.)
+clean:	cclean
+	$(RM) -f $(SPOBJ) $(KSPOBJ) subprims_r.o
+
+# retain file not here at the moment
+strip:	../../dx86cl
+	strip -s retain ../../dx86cl
+
+.PHONY: tiger-sdk-check
+tiger-sdk-check:
+	@test -d /Developer/SDKs/MacOSX10.4u.sdk || \
+		 (echo "*** Install Xcode 10.4 support"; exit 1)
+
+
Index: /branches/arm/lisp-kernel/darwinx8664/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/darwinx8664/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinx8664/.gdbinit	(revision 13357)
@@ -0,0 +1,78 @@
+define x86_lisp_string
+x/s $arg0-5
+end
+
+define x86pname
+set $temp=*((long *)((long)($arg0-6)))
+x86_lisp_string $temp
+end
+
+define gtra
+br *$r10
+cont
+end
+
+
+define pname
+ x86pname $arg0
+end
+
+define pl
+ call print_lisp_object($arg0)
+end
+
+define lw
+ pl $r13
+end
+
+define clobber_breakpoint
+  set *(short *)($pc-2)=0x9090
+end
+
+define arg_z
+ pl $rsi
+end
+
+define arg_y
+ pl $rdi
+end
+
+define arg_x
+ pl $r8
+end
+
+define bx
+ pl $rbx
+end
+
+
+define lbt
+ call plbt_sp($rbp)
+end
+
+define ada
+ p/x *(all_areas->succ)
+end
+
+define lregs
+ call debug_lisp_registers($arg0,0,0)
+end
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
+handle SIGEMT pass nostop noprint
+# Work around apparent Apple GDB bug
+handle SIGTTIN nopass nostop noprint
+# Work around Leopard bug du jour
+handle SIGSYS pass nostop noprint
+handle SIGQUIT pass nostop noprint
+
Index: /branches/arm/lisp-kernel/darwinx8664/Makefile
===================================================================
--- /branches/arm/lisp-kernel/darwinx8664/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/darwinx8664/Makefile	(revision 13357)
@@ -0,0 +1,130 @@
+#
+#   Copyright (C) 2005 Clozure Associates and contributors.
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+
+
+MDYNAMIC_NO_PIC = $(shell ($(CC) --help -v 2>&1 | grep -q -e "-mdynamic-no-pic") && /bin/echo "-mdynamic-no-pic")
+
+
+VPATH = ..
+RM = /bin/rm
+LD = ld
+CC=gcc-4.0
+
+### Current ld64 bugs include the claim that 0x1000 isn't a power of 2.
+### Gosh.  I always thought that it was.  Go know, right ?
+LDFLAGS = -macosx_version_min 10.4 -arch x86_64 -dynamic  -o $@ -e start -pagezero_size 0x11000 -seg1addr 0x00011000
+
+
+AS = as
+M4 = gm4
+###
+### DARWIN_GS_HACK enables some awful, dangerous, and slow workarounds
+### for the fact that early versions of x86-64 Darwin don't provide
+### working mechanisms for threads to address thread-local-data
+### relative to a spare segment register.  We instead use the
+### undocumented mechanism which the pthreads library uses to
+### keep pthread data in %gs, and switch %gs between pthread data
+### when running foreign code and lisp tcr data when running lisp
+### code.  Hopefully, we won't have to do this for very long.
+###
+### (Things like i386_set_ldt() are defined, but not implemented
+### correctly on the libc side and not implemented at all on the
+### Mach kernel side.)
+###
+### Apple never
+M4FLAGS = -DDARWIN -DX86 -DX8664 -DTCR_IN_GPR
+ASFLAGS = -arch x86_64 -g
+CDEFINES = -DDARWIN -DX86 -DX8664 -DTCR_IN_GPR
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS) $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< -arch x86_64 -m64 $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) $(MDYNAMIC_NO_PIC) -mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk -o $@
+
+SPOBJ = x86-spjump64.o x86-spentry64.o x86-subprims64.o 
+ASMOBJ = x86-asmutils64.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	thread_manager.o lisp-debug.o image.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= imports.o $(COBJ) x86-asmutils64.o 
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants64.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h lisptypes.h x86-constants64.h x86-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ= $(SPOBJ)
+all:	../../dx86cl64
+
+
+# No:
+
+# KSPOBJ=
+
+OSEARLYLIBS = -lcrt1.o
+OSLATELIBS = -lSystem
+
+OSMIDDLELIBS = 
+
+
+OSLIBS = $(OSEARLYLIBS) $(OSMIDDLELIBS) $(OSLATELIBS)
+
+$(KSPOBJ): tiger-sdk-check
+$(KERNELOBJ) : tiger-sdk-check
+$(DEBUGOBJ) : tiger-sdk-check
+Makefile : tiger-sdk-check
+
+../../dx86cl64:	 tiger-sdk-check $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile
+	$(LD) $(LDFLAGS) $(KSPOBJ) $(KERNELOBJ)  $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+thread_manager.o: thread_manager.c 
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../dx86cl64 
+
+# Some earlier versions of this Makefile built "subprims_r.o".  
+# (That file is now defunct.)
+clean:	cclean
+	$(RM) -f $(SPOBJ) $(KSPOBJ) subprims_r.o
+
+strip:	../../dx86cl64
+	strip -s retain ../../dx86cl64
+
+.PHONY: tiger-sdk-check
+tiger-sdk-check:
+	@test -d /Developer/SDKs/MacOSX10.4u.sdk || \
+		(echo "*** Install Xcode 10.4 support"; exit 1)
+
Index: /branches/arm/lisp-kernel/errors.s
===================================================================
--- /branches/arm/lisp-kernel/errors.s	(revision 13357)
+++ /branches/arm/lisp-kernel/errors.s	(revision 13357)
@@ -0,0 +1,236 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+ 
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+ 
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+ 
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+
+	
+
+error_reg_errnum = 0		/* "real" (typically negative) error number is in RB */
+error_udf = 1
+error_udf_call = 2
+error_throw_tag_missing = 3
+error_alloc_failed = 4
+error_stack_overflow = 5
+error_excised_function_call = 6
+error_too_many_values = 7
+error_propagate_suspend = 10
+error_interrupt = 11
+error_suspend = 12
+error_suspend_all = 13
+error_resume = 14
+error_resume_all = 15					
+error_cant_call = 17
+        
+error_type_error = 128
+
+define(`__type_error_counter__',128)
+define(`def_type_error',`
+error_object_not_$1 = __type_error_counter__
+        define(`__type_error_counter__',eval(__type_error_counter__+1))')
+
+	def_type_error(array)
+	def_type_error(bignum)
+	def_type_error(fixnum)
+	def_type_error(character)
+	def_type_error(integer)
+	def_type_error(list)
+	def_type_error(number)
+	def_type_error(sequence)
+	def_type_error(simple_string)
+	def_type_error(simple_vector)
+	def_type_error(string)
+	def_type_error(symbol)
+	def_type_error(macptr)
+	def_type_error(real)
+	def_type_error(cons)
+	def_type_error(unsigned_byte)
+	def_type_error(radix)
+	def_type_error(float)
+	def_type_error(rational)
+	def_type_error(ratio)
+	def_type_error(short_float)
+	def_type_error(double_float)
+	def_type_error(complex)
+	def_type_error(vector)
+	def_type_error(simple_base_string)
+	def_type_error(function)
+	def_type_error(unsigned_byte_16)
+	def_type_error(unsigned_byte_8)
+	def_type_error(unsigned_byte_32)
+	def_type_error(signed_byte_32)
+	def_type_error(signed_byte_16)
+	def_type_error(signed_byte_8)	
+	def_type_error(base_character)
+	def_type_error(bit)
+	def_type_error(unsigned_byte_24)
+	def_type_error(u64)
+	def_type_error(s64)
+        def_type_error(unsigned_byte_56)
+        def_type_error(simple_array_double_float_2d)
+        def_type_error(simple_array_single_float_2d)
+        def_type_error(mod_char_code_limit)
+        def_type_error(array_2d)
+        def_type_error(array_3d)
+        def_type_error(array_t)
+        def_type_error(array_bit)
+        def_type_error(array_s8)
+        def_type_error(array_u8)
+        def_type_error(array_s16)
+        def_type_error(array_u16)
+        def_type_error(array_s32)
+        def_type_error(array_u32)
+        def_type_error(array_s64)
+        def_type_error(array_u64)
+        def_type_error(array_fixnum)
+        def_type_error(array_single_float)
+        def_type_error(array_double_float)
+        def_type_error(array_char)
+        def_type_error(array_t_2d)
+        def_type_error(array_bit_2d)
+        def_type_error(array_s8_2d)
+        def_type_error(array_u8_2d)
+        def_type_error(array_s16_2d)
+        def_type_error(array_u16_2d)
+        def_type_error(array_s32_2d)
+        def_type_error(array_u32_2d)
+        def_type_error(array_s64_2d)
+        def_type_error(array_u64_2d)
+        def_type_error(array_fixnum_2d)
+        def_type_error(array_single_float_2d)
+        def_type_error(array_double_float_2d)
+        def_type_error(array_char_2d)
+        def_type_error(simple_array_t_2d)
+        def_type_error(simple_array_bit_2d)
+        def_type_error(simple_array_s8_2d)
+        def_type_error(simple_array_u8_2d)
+        def_type_error(simple_array_s16_2d)
+        def_type_error(simple_array_u16_2d)
+        def_type_error(simple_array_s32_2d)
+        def_type_error(simple_array_u32_2d)
+        def_type_error(simple_array_s64_2d)
+        def_type_error(simple_array_u64_2d)
+        def_type_error(simple_array_fixnum_2d)
+        def_type_error(simple_array_char_2d)
+        def_type_error(array_t_3d)
+        def_type_error(array_bit_3d)
+        def_type_error(array_s8_3d)
+        def_type_error(array_u8_3d)
+        def_type_error(array_s16_3d)
+        def_type_error(array_u16_3d)
+        def_type_error(array_s32_3d)
+        def_type_error(array_u32_3d)
+        def_type_error(array_s64_3d)
+        def_type_error(array_u64_3d)
+        def_type_error(array_fixnum_3d)
+        def_type_error(array_single_float_3d)
+        def_type_error(array_double_float_3d)
+        def_type_error(array_char_3d)
+        def_type_error(simple_array_t_3d)
+        def_type_error(simple_array_bit_3d)
+        def_type_error(simple_array_s8_3d)
+        def_type_error(simple_array_u8_3d)
+        def_type_error(simple_array_s16_3d)
+        def_type_error(simple_array_u16_3d)
+        def_type_error(simple_array_s32_3d)
+        def_type_error(simple_array_u32_3d)
+        def_type_error(simple_array_s64_3d)
+        def_type_error(simple_array_u64_3d)
+        def_type_error(simple_array_fixnum_3d)
+        def_type_error(simple_array_single_float_3d)
+        def_type_error(simple_array_double_float_3d)
+        def_type_error(simple_array_char_3d)
+        def_type_error(vector_t)
+        def_type_error(bit_vector)
+        def_type_error(vector_s8)
+        def_type_error(vector_u8)
+        def_type_error(vector_s16)
+        def_type_error(vector_u16)
+        def_type_error(vector_s32)
+        def_type_error(vector_u32)
+        def_type_error(vector_s64)
+        def_type_error(vector_u64)
+        def_type_error(vector_fixnum)
+        def_type_error(vector_single_float)
+        def_type_error(vector_double_float)
+        
+        
+	
+/* These are the "old" error constants that %ERR-DISP understands */
+
+define(`deferr',`
+$1 = $2<<fixnumshift')
+
+
+	deferr(XVUNBND,1)
+	deferr(XBADVEC,2)
+	deferr(XTMINPS,3)
+	deferr(XNEINPS,4)
+	deferr(XWRNGINP,5)
+	deferr(XFUNBND,6)
+	deferr(XSETBADVEC,7)
+	deferr(XCOERCE,8)
+	deferr(XWRONGSYS,9)
+	deferr(XNOMEM,10)
+	deferr(XOPENIMAGE,11)
+	deferr(XNOTFUN,13)
+	deferr(XNOCTAG,33)
+	deferr(XNOFPU,36)
+	deferr(XBADTOK,49)
+	deferr(XFLOVFL,64)
+	deferr(XDIVZRO,66)
+	deferr(XFLDZRO,66)
+	deferr(XMEMFULL,76)
+	deferr(XARRLIMIT,77)
+	deferr(XSTKOVER,75)
+	deferr(XFLEXC,98)
+	deferr(XMFULL,-41)
+
+	deferr(XARROOB,112)
+	deferr(XCONST,115)
+	deferr(XNOSPREAD,120)
+	deferr(XFASLVERS,121)
+	deferr(XNOTFASL,122)
+	deferr(XUDFCALL,123)
+	deferr(XWRONGIMAGE,124)
+
+	deferr(XNOPKG,130)
+	deferr(XBADFASL,132)
+	deferr(XSYMACC,135)
+	deferr(XEXPRTC,136)
+	deferr(XNDIMS,148)
+	deferr(XNARGS,150)
+	deferr(XBADKEYS,153)
+	deferr(XWRONGTYPE,157)
+	deferr(XBADSTRUCT,158)
+	deferr(XSTRUCTBOUNDS,159)
+	deferr(XCALLNOTLAMBDA,160)
+	deferr(XTEMPFLT,161)
+	deferr(XCALLTOOMANY,167)
+	deferr(XCALLTOOFEW,168)
+	deferr(XCALLNOMATCH,169)
+	deferr(XIMPROPERLIST,170)
+	deferr(XNOFILLPTR,171)
+	deferr(XMALADJUST,172)
+	deferr(XACCESSNTH,173)
+	deferr(XNOTELT,174)
+	deferr(XSGEXHAUSTED,175)
+	deferr(XSGNARGS,176)
+	deferr(XTOOMANYVALUES,177)
+        deferr(XSYMNOBIND,178)
+	deferr(XFOREIGNEXCEPTION,200)
+
+error_FPU_exception_double = 1024
+error_FPU_exception_short = 1025
+error_memory_full = 2048
Index: /branches/arm/lisp-kernel/freebsdx8632/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/freebsdx8632/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/freebsdx8632/.gdbinit	(revision 13357)
@@ -0,0 +1,43 @@
+define pl
+  call print_lisp_object($arg0)
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x3001
+   set $car = *((LispObj *)($l+3))
+   set $l =  *((LispObj *)($l-1))
+   pl $car
+  end
+end
+
+
+define fn
+  pl $edi
+end
+
+define arg_y
+ pl $esi
+end
+
+define arg_z
+ pl $ebx
+end
+
+define offset
+ p (int)$pc-$edi
+end
+
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGEMT pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
Index: /branches/arm/lisp-kernel/freebsdx8632/Makefile
===================================================================
--- /branches/arm/lisp-kernel/freebsdx8632/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/freebsdx8632/Makefile	(revision 13357)
@@ -0,0 +1,84 @@
+#
+#   Copyright (C) 2005-2006 Clozure Associates
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ..
+RM = /bin/rm
+AS = as
+M4 = m4
+ASFLAGS = --32
+M4FLAGS = -DFREEBSD -DX86 -DX8632 -DHAVE_TLS
+CDEFINES = -DFREEBSD -D_REENTRANT -DX86 -DX8632 -D_GNU_SOURCE -DHAVE_TLS
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+
+
+
+SPOBJ = pad.o x86-spjump32.o x86-spentry32.o x86-subprims32.o
+ASMOBJ = x86-asmutils32.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o  x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils32.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants32.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants32.h x86-exceptions.h
+
+.if $(MACHINE) == "amd64"
+CROSS = -B/usr/lib32
+.endif
+
+KSPOBJ = $(SPOBJ)
+all:	../../fx86cl
+
+
+OSLIBS = -lm -lthr $(CROSS)
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m32 -o $@
+
+LINKSCRIPTFILE = # 
+LINKSCRIPT =  # -T $(LINKSCRIPTFILE)
+
+../../fx86cl:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(LINKSCRIPTFILE)
+	$(CC) -m32 $(CDEBUG)  -Wl,--export-dynamic  $(LINKSCRIPT)  -o $@  $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../fx86cl
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../fx86cl
+	strip -g ../../fx86cl
Index: /branches/arm/lisp-kernel/freebsdx8632/fpu.h
===================================================================
--- /branches/arm/lisp-kernel/freebsdx8632/fpu.h	(revision 13357)
+++ /branches/arm/lisp-kernel/freebsdx8632/fpu.h	(revision 13357)
@@ -0,0 +1,70 @@
+/* These definitions are taken from the file /usr/include/machine/npx.h,
+   which isn't distributed with amd64 versions of FreeBSD */
+
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *	from: @(#)npx.h	5.3 (Berkeley) 1/18/91
+ * $FreeBSD: src/sys/i386/include/npx.h,v 1.29.2.1 2006/07/01 00:57:55 davidxu Exp $
+ */
+
+struct  ccl_envxmm {
+	u_int16_t	en_cw;		/* control word (16bits) */
+	u_int16_t	en_sw;		/* status word (16bits) */
+	u_int16_t	en_tw;		/* tag word (16bits) */
+	u_int16_t	en_opcode;	/* opcode last executed (11 bits ) */
+	u_int32_t	en_fip;		/* floating point instruction pointer */
+	u_int16_t	en_fcs;		/* floating code segment selector */
+	u_int16_t	en_pad0;	/* padding */
+	u_int32_t	en_foo;		/* floating operand offset */
+	u_int16_t	en_fos;		/* floating operand segment selector */
+	u_int16_t	en_pad1;	/* padding */
+	u_int32_t	en_mxcsr;	/* SSE sontorol/status register */
+	u_int32_t	en_mxcsr_mask;	/* valid bits in mxcsr */
+};
+
+struct  ccl_xmmacc {
+	u_char	xmm_bytes[16];
+};
+
+struct ccl_fpacc87 {
+	u_char	fp_bytes[10];
+};
+
+struct  ccl_savexmm {
+	struct	ccl_envxmm	sv_env;
+	struct {
+		struct ccl_fpacc87	fp_acc;
+		u_char		fp_pad[6];      /* padding */
+	} sv_fp[8];
+	struct ccl_xmmacc	sv_xmm[8];
+	u_char sv_pad[224];
+} __aligned(16);
Index: /branches/arm/lisp-kernel/freebsdx8664/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/freebsdx8664/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/freebsdx8664/.gdbinit	(revision 13357)
@@ -0,0 +1,75 @@
+define x86_lisp_string
+x/s $arg0-5
+end
+
+define x86pname
+set $temp=*((long *)((long)($arg0-6)))
+x86_lisp_string $temp
+end
+
+
+define pname
+ x86pname $arg0
+end
+
+define l
+ call print_lisp_object($arg0)
+end
+
+define lw
+ l $r13
+end
+
+define clobber_breakpoint
+  set *(short *)($pc-2)=0x9090
+end
+
+define arg_z
+ l $rsi
+end
+
+define arg_y
+ l $rdi
+end
+
+define arg_x
+ l $r8
+end
+
+define bx
+ l $rbx
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x200b
+   set $car = *((LispObj *)($l+5))
+   set $l =  *((LispObj *)($l-3))
+   l $car
+  end
+end
+
+define lbt
+ call plbt_sp($rbp)
+end
+
+define ada
+ p/x *(all_areas->succ)
+end
+
+define lregs
+ call debug_lisp_registers($arg0,0,0)
+end
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGEMT pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
Index: /branches/arm/lisp-kernel/freebsdx8664/Makefile
===================================================================
--- /branches/arm/lisp-kernel/freebsdx8664/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/freebsdx8664/Makefile	(revision 13357)
@@ -0,0 +1,81 @@
+#
+#   Copyright (C) 2005-2006 Clozure Associates
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ..
+RM = /bin/rm
+AS = as
+M4 = m4
+ASFLAGS = --64
+M4FLAGS = -DFREEBSD -DX86 -DX8664 -DHAVE_TLS
+CDEFINES = -DFREEBSD -D_REENTRANT -DX86 -DX8664 -D_GNU_SOURCE -DHAVE_TLS
+CDEBUG = -g
+COPT = #-O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+
+
+
+SPOBJ = pad.o x86-spjump64.o x86-spentry64.o x86-subprims64.o
+ASMOBJ = x86-asmutils64.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o  x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils64.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants64.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants64.h x86-exceptions.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../fx86cl64
+
+
+OSLIBS = -lm -lthr
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m64 -o $@
+
+LINKSCRIPTFILE = # ./elf_x86_64.x
+LINKSCRIPT =  # -T $(LINKSCRIPTFILE)
+
+../../fx86cl64:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(LINKSCRIPTFILE)
+	$(CC) -m64 $(CDEBUG)  -Wl,--export-dynamic  $(LINKSCRIPT)  -o $@  $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../fx86cl64
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../fx86cl64
+	strip -g ../../fx86cl64
Index: /branches/arm/lisp-kernel/gc-common.c
===================================================================
--- /branches/arm/lisp-kernel/gc-common.c	(revision 13357)
+++ /branches/arm/lisp-kernel/gc-common.c	(revision 13357)
@@ -0,0 +1,1763 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp_globals.h"
+#include "bits.h"
+#include "gc.h"
+#include "area.h"
+#include "Threads.h"
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef WINDOWS
+#include <sys/time.h>
+#endif
+
+#ifndef timeradd
+# define timeradd(a, b, result)						      \
+  do {									      \
+    (result)->tv_sec = (a)->tv_sec + (b)->tv_sec;			      \
+    (result)->tv_usec = (a)->tv_usec + (b)->tv_usec;			      \
+    if ((result)->tv_usec >= 1000000)					      \
+      {									      \
+	++(result)->tv_sec;						      \
+	(result)->tv_usec -= 1000000;					      \
+      }									      \
+  } while (0)
+#endif
+#ifndef timersub
+# define timersub(a, b, result)						      \
+  do {									      \
+    (result)->tv_sec = (a)->tv_sec - (b)->tv_sec;			      \
+    (result)->tv_usec = (a)->tv_usec - (b)->tv_usec;			      \
+    if ((result)->tv_usec < 0) {					      \
+      --(result)->tv_sec;						      \
+      (result)->tv_usec += 1000000;					      \
+    }									      \
+  } while (0)
+#endif
+
+void
+comma_output_decimal(char *buf, int len, natural n) 
+{
+  int nout = 0;
+
+  buf[--len] = 0;
+  do {
+    buf[--len] = n%10+'0';
+    n = n/10;
+    if (n == 0) {
+      while (len) {
+        buf[--len] = ' ';
+      }
+      return;
+    }
+    if (len == 0) return;
+    nout ++;
+    if (nout == 3) {
+      buf[--len] = ',';
+      nout = 0;
+    }
+  } while (len >= 0);
+}
+
+
+natural
+static_dnodes_for_area(area *a)
+{
+  if (a->low == tenured_area->low) {
+    return tenured_area->static_dnodes;
+  }
+  return 0;
+}
+
+Boolean GCDebug = false, GCverbose = false;
+bitvector GCmarkbits = NULL, GCdynamic_markbits = NULL;
+LispObj GCarealow = 0, GCareadynamiclow = 0;
+natural GCndnodes_in_area = 0, GCndynamic_dnodes_in_area = 0;
+LispObj GCweakvll = (LispObj)NULL;
+LispObj GCdwsweakvll = (LispObj)NULL;
+LispObj GCephemeral_low = 0;
+natural GCn_ephemeral_dnodes = 0;
+natural GCstack_limit = 0;
+
+void
+check_static_cons_freelist(char *phase)
+{
+  LispObj 
+    n,
+    base = (LispObj)static_cons_area->low, 
+    limit = static_cons_area->ndnodes;
+  natural i=0;
+
+  for (n=lisp_global(STATIC_CONSES);n!=lisp_nil;n=((cons *)untag(n))->cdr, i++) {
+    if ((fulltag_of(n) != fulltag_cons) ||
+        (area_dnode(n,base) >= limit)) {
+      Bug(NULL, "%s: static cons freelist has invalid element 0x" LISP "\n",
+          phase, i);
+    }
+  }
+}
+
+void
+reapweakv(LispObj weakv)
+{
+  /*
+    element 2 of the weak vector should be tagged as a cons: if it
+    isn't, just mark it as a root.  if it is, cdr through it until a
+    "marked" cons is encountered.  If the car of any unmarked cons is
+    marked, mark the cons which contains it; otherwise, splice the
+    cons out of the list.  N.B. : elements 0 and 1 are already marked
+    (or are immediate, etc.)
+  */
+  LispObj *prev = ((LispObj *) ptr_from_lispobj(untag(weakv))+(1+2)), cell = *prev;
+  LispObj termination_list = lisp_nil;
+  natural weak_type = (natural) deref(weakv,2);
+  Boolean alistp = ((weak_type & population_type_mask) == population_weak_alist),
+    terminatablep = ((weak_type >> population_termination_bit) != 0);
+  Boolean done = false;
+  cons *rawcons;
+  natural dnode, car_dnode;
+  bitvector markbits = GCmarkbits;
+
+  if (terminatablep) {
+    termination_list = deref(weakv,1+3);
+  }
+
+  if (fulltag_of(cell) != fulltag_cons) {
+    mark_root(cell);
+  } else if (alistp) {
+    /* weak alist */
+    while (! done) {
+      dnode = gc_area_dnode(cell);
+      if ((dnode >= GCndnodes_in_area) ||
+          (ref_bit(markbits, dnode))) {
+        done = true;
+      } else {
+        /* Cons cell is unmarked. */
+        LispObj alist_cell, thecar;
+        unsigned cell_tag;
+
+        rawcons = (cons *) ptr_from_lispobj(untag(cell));
+        alist_cell = rawcons->car;
+        cell_tag = fulltag_of(alist_cell);
+
+        if ((cell_tag == fulltag_cons) &&
+            ((car_dnode = gc_area_dnode(alist_cell)) < GCndnodes_in_area) &&
+            (! ref_bit(markbits, car_dnode)) &&
+            (is_node_fulltag(fulltag_of(thecar = car(alist_cell)))) &&
+            ((car_dnode = gc_area_dnode(thecar)) < GCndnodes_in_area) &&
+            (! ref_bit(markbits, car_dnode))) {
+          *prev = rawcons->cdr;
+          if (terminatablep) {
+            rawcons->cdr = termination_list;
+            termination_list = cell;
+          }
+        } else {
+          set_bit(markbits, dnode);
+          prev = (LispObj *)(&(rawcons->cdr));
+          mark_root(alist_cell);
+        }
+        cell = *prev;
+      }
+    }
+  } else {
+    /* weak list */
+    while (! done) {
+      dnode = gc_area_dnode(cell);
+      if ((dnode >= GCndnodes_in_area) ||
+          (ref_bit(markbits, dnode))) {
+        done = true;
+      } else {
+        /* Cons cell is unmarked. */
+        LispObj thecar;
+        unsigned cartag;
+
+        rawcons = (cons *) ptr_from_lispobj(untag(cell));
+        thecar = rawcons->car;
+        cartag = fulltag_of(thecar);
+
+        if (is_node_fulltag(cartag) &&
+            ((car_dnode = gc_area_dnode(thecar)) < GCndnodes_in_area) &&
+            (! ref_bit(markbits, car_dnode))) {
+          *prev = rawcons->cdr;
+          if (terminatablep) {
+            rawcons->cdr = termination_list;
+            termination_list = cell;
+          }
+        } else {
+          set_bit(markbits, dnode);
+          prev = (LispObj *)(&(rawcons->cdr));
+        }
+        cell = *prev;
+      }
+    }
+  }
+
+  if (terminatablep) {
+    deref(weakv,1+3) = termination_list;
+  }
+  if (termination_list != lisp_nil) {
+    deref(weakv,1) = GCweakvll;
+    GCweakvll = untag(weakv);
+  } else {
+    deref(weakv,1) = lisp_global(WEAKVLL);
+    lisp_global(WEAKVLL) = untag(weakv);
+  }
+}
+
+/* 
+  Screw: doesn't deal with finalization.
+  */
+
+void
+reaphashv(LispObj hashv)
+{
+  hash_table_vector_header
+    *hashp = (hash_table_vector_header *) ptr_from_lispobj(untag(hashv));
+  natural
+    dnode;
+  signed_natural
+    npairs = (header_element_count(hashp->header) - 
+              (hash_table_vector_header_count -1)) >> 1;
+  LispObj *pairp = (LispObj*) (hashp+1), weakelement;
+  int weak_index = (((hashp->flags & nhash_weak_value_mask) == 0) ? 0 : 1);
+  Boolean
+    keys_frozen = ((hashp->flags & nhash_keys_frozen_mask) != 0);
+  bitvector markbits = GCmarkbits;
+  int tag;
+
+  natural *tenured_low = (LispObj *)tenured_area->low;
+  natural tenured_dnodes = area_dnode(GCarealow, tenured_low);
+  natural memo_dnode = area_dnode(ptr_to_lispobj(pairp+weak_index), tenured_low);
+  Boolean
+    hashv_tenured = (memo_dnode < tenured_dnodes);
+  natural bits, bitidx, *bitsp;
+
+  if (hashv_tenured) {
+    set_bitidx_vars(tenured_area->refbits, memo_dnode, bitsp, bits, bitidx);
+  }
+
+  while (true) {
+    if (hashv_tenured) {
+      while (bits == 0) {
+        int skip = nbits_in_word - bitidx;
+        npairs -= skip;
+        if (npairs <= 0) break;
+        pairp += (skip+skip);
+        bitidx = 0;
+        bits = *++bitsp;
+      }
+      if (bits != 0) {
+        int skip = (count_leading_zeros(bits) - bitidx);
+        if (skip != 0) {
+          npairs -= skip;
+          pairp += (skip+skip);
+          bitidx += skip;
+        }
+      }
+    }
+
+    if (npairs <= 0) break;
+
+    weakelement = pairp[weak_index];
+    tag = fulltag_of(weakelement);
+    if (is_node_fulltag(tag)) {
+      dnode = gc_area_dnode(weakelement);
+      if ((dnode < GCndnodes_in_area) && 
+          ! ref_bit(markbits, dnode)) {
+        pairp[0] = slot_unbound;
+        if (keys_frozen) {
+          if (pairp[1] != slot_unbound) {
+            pairp[1] = unbound;
+          }
+        }
+        else {
+          pairp[1] = lisp_nil;
+        }
+        hashp->weak_deletions_count += (1<<fixnumshift);
+      }
+    }
+    pairp += 2;
+    --npairs;
+  }
+  deref(hashv, 1) = lisp_global(WEAKVLL);
+  lisp_global(WEAKVLL) = untag(hashv);
+}
+
+void
+traditional_dws_mark_htabv(LispObj htabv)
+{
+  /* Do nothing, just add htabv to GCweakvll */
+  LispObj *base = (LispObj *) ptr_from_lispobj(untag(htabv));
+
+  base[1] = GCweakvll;
+  GCweakvll = ptr_to_lispobj(base);
+}
+
+void
+ncircle_dws_mark_htabv(LispObj htabv)
+{
+  /* Do nothing, just add htabv to GCdwsweakvll */
+  deref(htabv,1) = GCdwsweakvll;
+  GCdwsweakvll = htabv;
+}
+
+void
+traditional_mark_weak_htabv(LispObj htabv)
+{
+  int i, skip = hash_table_vector_header_count;;
+  LispObj *base = (LispObj *) ptr_from_lispobj(untag(htabv));
+
+  for (i = 2; i <= skip; i++) {
+    rmark(base[i]);
+  }
+  base[1] = GCweakvll;
+  GCweakvll = ptr_to_lispobj(base);
+}
+
+void
+ncircle_mark_weak_htabv(LispObj htabv)
+{
+  int i, skip = hash_table_vector_header_count;
+  hash_table_vector_header *hashp = (hash_table_vector_header *)(untag(htabv));
+  natural
+    npairs = (header_element_count(hashp->header) - 
+              (hash_table_vector_header_count - 1)) >> 1;
+  LispObj *pairp = (LispObj*) (hashp+1);
+  Boolean 
+    weak_on_value = ((hashp->flags & nhash_weak_value_mask) != 0);
+
+
+  for (i = 2; i <= skip; i++) {
+    rmark(deref(htabv,i));
+  }
+  
+  if (!weak_on_value) {
+    pairp++;
+  }
+  /* unconditionally mark the non-weak element of each pair */
+  while (npairs--) {
+    rmark(*pairp);
+    pairp += 2;
+  }
+  deref(htabv,1)  = GCweakvll;
+  GCweakvll = (LispObj)untag(htabv);
+}
+
+
+Boolean
+mark_weak_hash_vector(hash_table_vector_header *hashp, natural elements)
+{
+  natural flags = hashp->flags, weak_dnode, nonweak_dnode;
+  Boolean 
+    marked_new = false, 
+    weak_marked;
+  int non_weak_index = (((flags & nhash_weak_value_mask) != 0) ? 0 : 1);
+  int 
+    skip = hash_table_vector_header_count-1,
+    weak_tag,
+    nonweak_tag,
+    i;
+  signed_natural
+    npairs = (elements - skip) >> 1;
+  LispObj 
+    *pairp = (LispObj*) (hashp+1),
+    weak,
+    nonweak;
+
+  natural *tenured_low = (LispObj *)tenured_area->low;
+  natural tenured_dnodes = area_dnode(GCarealow, tenured_low);
+  natural memo_dnode = area_dnode(ptr_to_lispobj(pairp+non_weak_index), tenured_low);
+  Boolean hashv_tenured = (memo_dnode < tenured_dnodes);
+  natural bits, bitidx, *bitsp;
+
+  if (hashv_tenured) {
+    set_bitidx_vars(tenured_area->refbits, memo_dnode, bitsp, bits, bitidx);
+  }
+
+  /* Mark everything in the header */
+  
+  for (i = 2; i<= skip; i++) {
+    mark_root(deref(ptr_to_lispobj(hashp),i));
+  }
+
+  while (true) {
+    if (hashv_tenured) {
+      while (bits == 0) {
+        int skip = nbits_in_word - bitidx;
+        npairs -= skip;
+        if (npairs <= 0) break;
+        pairp += (skip+skip);
+        bitidx = 0;
+        bits = *++bitsp;
+      }
+      if (bits != 0) {
+        int skip = count_leading_zeros(bits) - bitidx;
+        if (skip != 0) {
+          npairs -= skip;
+          pairp += (skip+skip);
+          bitidx += skip;
+        }
+      }
+    }
+    if (npairs <= 0) break;
+
+    nonweak = pairp[non_weak_index];
+    weak = pairp[1-non_weak_index];
+
+    nonweak_tag = fulltag_of(nonweak);
+    if (is_node_fulltag(nonweak_tag)) {
+      nonweak_dnode = gc_area_dnode(nonweak);
+      if ((nonweak_dnode < GCndnodes_in_area) &&
+          ! ref_bit(GCmarkbits,nonweak_dnode)) {
+        weak_marked = true;
+        weak_tag = fulltag_of(weak);
+        if (is_node_fulltag(weak_tag)) {
+          weak_dnode = gc_area_dnode(weak);
+          if ((weak_dnode < GCndnodes_in_area) &&
+              ! ref_bit(GCmarkbits, weak_dnode)) {
+            weak_marked = false;
+          }
+        }
+        if (weak_marked) {
+          mark_root(nonweak);
+          marked_new = true;
+        }
+      }
+    }
+
+    pairp+=2;
+    --npairs;
+  }
+  return marked_new;
+}
+
+
+Boolean
+mark_weak_alist(LispObj weak_alist, int weak_type)
+{
+  natural
+    elements = header_element_count(header_of(weak_alist)),
+    dnode;
+  int pair_tag;
+  Boolean marked_new = false;
+  LispObj alist, pair, key, value;
+  bitvector markbits = GCmarkbits;
+
+  if (weak_type >> population_termination_bit) {
+    elements -= 1;
+  }
+  for(alist = deref(weak_alist, elements);
+      (fulltag_of(alist) == fulltag_cons) &&
+      ((dnode = gc_area_dnode(alist)) < GCndnodes_in_area) &&
+      (! ref_bit(markbits,dnode));
+      alist = cdr(alist)) {
+    pair = car(alist);
+    pair_tag = fulltag_of(pair);
+    if ((is_node_fulltag(pair_tag)) &&
+        ((dnode = gc_area_dnode(pair_tag)) < GCndnodes_in_area) &&
+        (! ref_bit(markbits,dnode))) {
+      if (pair_tag == fulltag_cons) {
+        key = car(pair);
+        if ((! is_node_fulltag(fulltag_of(key))) ||
+            ((dnode = gc_area_dnode(key)) >= GCndnodes_in_area) ||
+            ref_bit(markbits,dnode)) {
+          /* key is marked, mark value if necessary */
+          value = cdr(pair);
+          if (is_node_fulltag(fulltag_of(value)) &&
+              ((dnode = gc_area_dnode(value)) < GCndnodes_in_area) &&
+              (! ref_bit(markbits,dnode))) {
+            mark_root(value);
+            marked_new = true;
+          }
+        }
+      } else {
+          mark_root(pair);
+          marked_new = true;
+      }
+    }
+  }
+  return marked_new;
+}
+  
+void
+mark_termination_lists()
+{
+  /* 
+     Mark the termination lists in all terminatable weak vectors, which
+     are now linked together on GCweakvll, and add them to WEAKVLL,
+     which already contains all other weak vectors.
+  */
+  LispObj pending = GCweakvll,
+          *base = (LispObj *)NULL;
+
+  while (pending) {
+    base = ptr_from_lispobj(pending);
+    pending = base[1];
+
+    mark_root(base[1+3]);
+  }
+  if (base) {
+    base[1] = lisp_global(WEAKVLL);
+    lisp_global(WEAKVLL) = GCweakvll;
+  }
+
+}
+
+
+void
+traditional_markhtabvs()
+{
+  LispObj *base, this, header, pending;
+  int subtag;
+  hash_table_vector_header *hashp;
+  Boolean marked_new;
+
+  do {
+    pending = (LispObj) NULL;
+    marked_new = false;
+    
+    while (GCweakvll) {
+      base = ptr_from_lispobj(GCweakvll);
+      GCweakvll = base[1];
+      
+      header = base[0];
+      subtag = header_subtag(header);
+      
+      if (subtag == subtag_weak) {
+        natural weak_type = base[2];
+        this = ptr_to_lispobj(base) + fulltag_misc;
+        base[1] = pending;
+        pending = ptr_to_lispobj(base);
+        if ((weak_type & population_type_mask) == population_weak_alist) {
+          if (mark_weak_alist(this, weak_type)) {
+            marked_new = true;
+          }
+        }
+      } else if (subtag == subtag_hash_vector) {
+        natural elements = header_element_count(header);
+
+        hashp = (hash_table_vector_header *) base;
+        if (hashp->flags & nhash_weak_mask) {
+          base[1] = pending;
+          pending = ptr_to_lispobj(base);
+          if (mark_weak_hash_vector(hashp, elements)) {
+            marked_new = true;
+          }
+        } 
+      } else {
+        Bug(NULL, "Strange object on weak vector linked list: " LISP "\n", base);
+      }
+    }
+
+    if (marked_new) {
+      GCweakvll = pending;
+    }
+  } while (marked_new);
+
+  /* Now, everything's marked that's going to be,  and "pending" is a list
+     of populations and weak hash tables.  CDR down that list and free
+     anything that isn't marked.
+     */
+
+  while (pending) {
+    base = ptr_from_lispobj(pending);
+    pending = base[1];
+    base[1] = (LispObj)NULL;
+
+    this = ptr_to_lispobj(base) + fulltag_misc;
+
+    subtag = header_subtag(base[0]);
+    if (subtag == subtag_weak) {
+      reapweakv(this);
+    } else {
+      reaphashv(this);
+    }
+  }
+  mark_termination_lists();
+}
+
+void
+ncircle_markhtabvs()
+{
+  LispObj *base, this, header, pending = 0;
+  int subtag;
+
+  /* First, process any weak hash tables that may have
+     been encountered by the link-inverting marker; we
+     should have more stack space now. */
+
+  while (GCdwsweakvll) {
+    this = GCdwsweakvll;
+    GCdwsweakvll = deref(this,1);
+    ncircle_mark_weak_htabv(this);
+  }
+
+  while (GCweakvll) {
+    base = ptr_from_lispobj(GCweakvll);
+    GCweakvll = base[1];
+    base[1] = (LispObj)NULL;
+
+    this = ptr_to_lispobj(base) + fulltag_misc;
+
+    header = base[0];
+    subtag = header_subtag(header);
+      
+    if (subtag == subtag_weak) {
+      natural weak_type = base[2];
+      base[1] = pending;
+      pending = ptr_to_lispobj(base);
+      if ((weak_type & population_type_mask) == population_weak_alist) {
+        mark_weak_alist(this, weak_type);
+      }
+    } else if (subtag == subtag_hash_vector) {
+      reaphashv(this);
+    }
+  }
+
+  /* Now, everything's marked that's going to be,  and "pending" is a list
+     of populations.  CDR down that list and free
+     anything that isn't marked.
+     */
+
+  while (pending) {
+    base = ptr_from_lispobj(pending);
+    pending = base[1];
+    base[1] = (LispObj)NULL;
+
+    this = ptr_to_lispobj(base) + fulltag_misc;
+
+    subtag = header_subtag(base[0]);
+    if (subtag == subtag_weak) {
+      reapweakv(this);
+    } else {
+      Bug(NULL, "Bad object on pending list: %s\n", this);
+    }
+  }
+
+  mark_termination_lists();
+}
+
+void
+mark_tcr_tlb(TCR *tcr)
+{
+  natural n = tcr->tlb_limit;
+  LispObj 
+    *start = tcr->tlb_pointer,
+    *end = (LispObj *) ((BytePtr)start+n),
+    node;
+
+  while (start < end) {
+    node = *start;
+    if (node != no_thread_local_binding_marker) {
+      mark_root(node);
+    }
+    start++;
+  }
+}
+
+/*
+  Mark things that're only reachable through some (suspended) TCR.
+  (This basically means the tcr's gc_context and the exception
+  frames on its xframe_list.)
+*/
+
+void
+mark_tcr_xframes(TCR *tcr)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+
+  xp = tcr->gc_context;
+  if (xp) {
+#ifndef X8632
+    mark_xp(xp);
+#else
+    mark_xp(xp, tcr->node_regs_mask);
+#endif
+  }
+#ifdef X8632
+  mark_root(tcr->save0);
+  mark_root(tcr->save1);
+  mark_root(tcr->save2);
+  mark_root(tcr->save3);
+  mark_root(tcr->next_method_context);
+#endif
+  
+  for (xframes = (xframe_list *) tcr->xframe; 
+       xframes; 
+       xframes = xframes->prev) {
+#ifndef X8632
+      mark_xp(xframes->curr);
+#else
+      mark_xp(xframes->curr, xframes->node_regs_mask);
+#endif
+  }
+}
+      
+
+void *postGCptrs = NULL;
+struct xmacptr *user_postGC_macptrs = NULL;
+
+
+void
+postGCfree(void *p)
+{
+  *(void **)p = postGCptrs;
+  postGCptrs = p;
+}
+
+void
+postGCfreexmacptr(struct xmacptr *p)
+{
+  p->class = (LispObj) user_postGC_macptrs;
+  user_postGC_macptrs = p;
+}
+
+
+xmacptr_dispose_fn xmacptr_dispose_functions[xmacptr_flag_user_last-xmacptr_flag_user_first];
+
+
+
+void
+freeGCptrs()
+{
+  void *p, *next, *addr;
+  struct xmacptr *x, *xnext;
+  int i, flags;
+  xmacptr_dispose_fn dfn;
+
+  for (p = postGCptrs; p; p = next) {
+    next = *((void **)p);
+    free(p);
+  }
+  postGCptrs = NULL;
+  
+  for (x = user_postGC_macptrs; x; x = xnext) {
+    xnext = (xmacptr *) (x->class);;
+    flags = x->flags - xmacptr_flag_user_first;
+    dfn = xmacptr_dispose_functions[flags];
+    addr = (void *) x->address;
+    x->address = 0;
+    x->flags = 0;
+    x->link = 0;
+    x->class = 0;
+    if (dfn && addr) {
+      dfn(addr);
+    }
+  }
+
+  user_postGC_macptrs = NULL;
+}
+
+int
+register_xmacptr_dispose_function(void *dfn)
+{
+  int i, k;
+  
+  for( i = 0, k = xmacptr_flag_user_first; k < xmacptr_flag_user_last; i++, k++) {
+    if (xmacptr_dispose_functions[i]==NULL) {
+      xmacptr_dispose_functions[i] = dfn;
+      return k;
+    }
+    if (xmacptr_dispose_functions[i] == dfn) {
+      return k;
+    }
+  }
+  return 0;
+}
+
+void
+reap_gcable_ptrs()
+{
+  LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next, ptr;
+  xmacptr_flag flag;
+  natural dnode;
+  xmacptr *x;
+
+  while((next = *prev) != (LispObj)NULL) {
+    dnode = gc_area_dnode(next);
+    x = (xmacptr *) ptr_from_lispobj(untag(next));
+
+    if ((dnode >= GCndnodes_in_area) ||
+        (ref_bit(GCmarkbits,dnode))) {
+      prev = &(x->link);
+    } else {
+      *prev = x->link;
+      flag = (xmacptr_flag)(x->flags);
+      ptr = x->address;
+
+      if (ptr) {
+        switch (flag) {
+        case xmacptr_flag_recursive_lock:
+	  destroy_recursive_lock((RECURSIVE_LOCK)ptr_from_lispobj(ptr));
+          break;
+
+        case xmacptr_flag_ptr:
+	  postGCfree((void *)ptr_from_lispobj(ptr));
+          break;
+
+        case xmacptr_flag_rwlock:
+          rwlock_destroy((rwlock *)ptr_from_lispobj(ptr));
+          break;
+
+        case xmacptr_flag_semaphore:
+	  destroy_semaphore((void**)&(x->address));
+          break;
+
+        default:
+          if ((flag >= xmacptr_flag_user_first) &&
+              (flag < xmacptr_flag_user_last)) {
+            set_n_bits(GCmarkbits,dnode,3);
+            postGCfreexmacptr(x);
+            break;
+          }
+          /* (warn "unknown xmacptr_flag: ~s" flag) */
+          /* Unknowd, and perhaps unknowdable. */
+          /* Fall in: */
+        case xmacptr_flag_none:
+          break;
+        }
+      }
+    }
+  }
+}
+
+
+
+#if  WORD_SIZE == 64
+unsigned short *_one_bits = NULL;
+
+unsigned short
+logcount16(unsigned short n)
+{
+  unsigned short c=0;
+  
+  while(n) {
+    n = n & (n-1);
+    c++;
+  }
+  return c;
+}
+
+void
+gc_init()
+{
+  int i;
+  
+  _one_bits = malloc(sizeof(unsigned short) * (1<<16));
+
+  for (i = 0; i < (1<<16); i++) {
+    _one_bits[i] = dnode_size*logcount16(i);
+  }
+}
+
+
+#else
+const unsigned char _one_bits[256] = {
+    0*8,1*8,1*8,2*8,1*8,2*8,2*8,3*8,1*8,2*8,2*8,3*8,2*8,3*8,3*8,4*8,
+    1*8,2*8,2*8,3*8,2*8,3*8,3*8,4*8,2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,
+    1*8,2*8,2*8,3*8,2*8,3*8,3*8,4*8,2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,
+    2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,
+    1*8,2*8,2*8,3*8,2*8,3*8,3*8,4*8,2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,
+    2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,
+    2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,
+    3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,4*8,5*8,5*8,6*8,5*8,6*8,6*8,7*8,
+    1*8,2*8,2*8,3*8,2*8,3*8,3*8,4*8,2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,
+    2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,
+    2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,
+    3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,4*8,5*8,5*8,6*8,5*8,6*8,6*8,7*8,
+    2*8,3*8,3*8,4*8,3*8,4*8,4*8,5*8,3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,
+    3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,4*8,5*8,5*8,6*8,5*8,6*8,6*8,7*8,
+    3*8,4*8,4*8,5*8,4*8,5*8,5*8,6*8,4*8,5*8,5*8,6*8,5*8,6*8,6*8,7*8,
+    4*8,5*8,5*8,6*8,5*8,6*8,6*8,7*8,5*8,6*8,6*8,7*8,6*8,7*8,7*8,8*8
+};
+
+
+void
+gc_init()
+{
+}
+
+#endif
+
+
+weak_mark_fun dws_mark_weak_htabv = traditional_dws_mark_htabv;
+weak_mark_fun mark_weak_htabv = traditional_mark_weak_htabv;
+weak_process_fun markhtabvs = traditional_markhtabvs;
+
+void
+install_weak_mark_functions(natural set) {
+  switch(set) {
+  case 0:
+  default:
+    dws_mark_weak_htabv = traditional_dws_mark_htabv;
+    mark_weak_htabv = traditional_mark_weak_htabv;
+    markhtabvs = traditional_markhtabvs;
+    break;
+  case 1:
+    dws_mark_weak_htabv = ncircle_dws_mark_htabv;
+    mark_weak_htabv = ncircle_mark_weak_htabv;
+    markhtabvs = ncircle_markhtabvs;
+    break;
+  }
+}
+
+void
+init_weakvll ()
+{
+  LispObj this = lisp_global(WEAKVLL); /* all weak vectors as of last gc */
+
+  GCweakvll = (LispObj)NULL;
+  lisp_global(WEAKVLL) = (LispObj)NULL;
+
+  if (GCn_ephemeral_dnodes) {
+    /* For egc case, initialize GCweakvll with weak vectors not in the
+       GC area.  Weak vectors in the GC area will be added during marking.
+    */
+
+    LispObj *tenured_low = (LispObj *)tenured_area->low;
+    natural tenured_dnodes = area_dnode(GCarealow, tenured_low);
+    bitvector refbits = tenured_area->refbits;
+
+    while (this) {
+      LispObj *base = ptr_from_lispobj(this);
+      LispObj next = base[1];
+      natural dnode = gc_dynamic_area_dnode(this);
+      if (dnode < GCndynamic_dnodes_in_area) {
+        base[1] = (LispObj)NULL; /* drop it, might be garbage */
+      } else {
+        base[1] = GCweakvll;
+        GCweakvll = ptr_to_lispobj(base);
+        if (header_subtag(base[0]) == subtag_weak) {
+          dnode = area_dnode(&base[3], tenured_low);
+          if (dnode < tenured_dnodes) {
+            clr_bit(refbits, dnode); /* Don't treat population.data as root */
+          }
+        } else {
+          if (header_subtag(base[0]) != subtag_hash_vector)
+            Bug(NULL, "Unexpected entry " LISP " -> " LISP " on WEAKVLL", base, base[0]);
+          dnode = area_dnode(base, tenured_low);
+          if ((dnode < tenured_dnodes) && !ref_bit(refbits, dnode)) {
+            Boolean drop = true;
+            /* hash vectors get marked headers if they have any ephemeral keys */
+            /* but not if they have ephemeral values. */
+            if (((hash_table_vector_header *)base)->flags & nhash_weak_value_mask) {
+              signed_natural count = (header_element_count(base[0]) + 2) >> 1;
+              natural bits, bitidx, *bitsp;
+              set_bitidx_vars(refbits, dnode, bitsp, bits, bitidx);
+              while ((0 < count) && (bits == 0)) {
+                int skip = nbits_in_word - bitidx;
+                count -= skip;
+                bits = *++bitsp;
+                bitidx = 0;
+              }
+              count -=  (count_leading_zeros(bits) - bitidx);
+
+              if (0 < count) {
+                set_bit(refbits, dnode); /* has ephemeral values, mark header */
+                drop = false;
+              }
+            }
+            if (drop) { /* if nothing ephemeral, drop it from GCweakvll. */
+              GCweakvll = base[1];
+              base[1] = lisp_global(WEAKVLL);
+              lisp_global(WEAKVLL) = ptr_to_lispobj(base);
+            }
+          }
+        }
+      }
+      this = next;
+    }
+  }
+}
+
+  
+void
+preforward_weakvll ()
+{
+  /* reset population refbits for forwarding */
+  if (GCn_ephemeral_dnodes) {
+    LispObj this = lisp_global(WEAKVLL);
+    LispObj *tenured_low = (LispObj *)tenured_area->low;
+    natural tenured_dnodes = area_dnode(GCarealow, tenured_low);
+    bitvector refbits = tenured_area->refbits;
+
+    while (this) {
+      LispObj *base = ptr_from_lispobj(this);
+      if (header_subtag(base[0]) == subtag_weak) {
+        natural dnode = area_dnode(&base[3], tenured_low);
+        if (base[3] >= GCarealow) {
+          if (dnode < tenured_dnodes) {
+            set_bit(refbits, dnode);
+          }
+        }
+        /* might have set termination list to a new pointer */
+        if ((base[2] >> population_termination_bit) && (base[4] >= GCarealow)) {
+          if ((dnode + 1) < tenured_dnodes) {
+            set_bit(refbits, dnode+1);
+          }
+        }
+      }
+      this = base[1];
+    }
+  }
+}
+
+
+void
+forward_weakvll_links()
+{
+  LispObj *ptr = &(lisp_global(WEAKVLL)), this, new, old;
+
+  while (this = *ptr) {
+    old = this + fulltag_misc;
+    new = node_forwarding_address(old);
+    if (old != new) {
+      *ptr = untag(new);
+    }
+    ptr = &(deref(new,1));
+  }
+}
+
+
+
+
+
+LispObj
+node_forwarding_address(LispObj node)
+{
+  int tag_n;
+  natural dnode = gc_dynamic_area_dnode(node);
+
+  if ((dnode >= GCndynamic_dnodes_in_area) ||
+      (node < GCfirstunmarked)) {
+    return node;
+  }
+
+  tag_n = fulltag_of(node);
+  if (!is_node_fulltag(tag_n)) {
+    return node;
+  }
+
+  return dnode_forwarding_address(dnode, tag_n);
+}
+
+Boolean
+update_noderef(LispObj *noderef)
+{
+  LispObj
+    node = *noderef,
+    new = node_forwarding_address(node);
+
+  if (new != node) {
+    *noderef = new;
+    return true;
+  }
+  return false;
+}
+
+void
+update_locref(LispObj *locref)
+{
+  LispObj
+    obj = *locref,
+    new = locative_forwarding_address(obj);
+
+  if (new != obj) {
+    *locref = new;
+  }
+}
+
+void
+forward_gcable_ptrs()
+{
+  LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next, new;
+  struct xmacptr **xprev, *xnext, *xnew;
+
+  while ((next = *prev) != (LispObj)NULL) {
+    new = node_forwarding_address(next);
+    if (new != next) {
+      *prev = new;
+    }
+    prev = &(((xmacptr *)ptr_from_lispobj(untag(next)))->link);
+  }
+  xprev = &user_postGC_macptrs;
+  while (xnext = *xprev) {
+    xnew = (struct xmacptr *)locative_forwarding_address((LispObj)xnext);
+    if (xnew != xnext) {
+      *xprev = xnew;
+    }
+    xprev = (struct xmacptr **)(&(xnext->class));
+  }
+}
+
+void
+forward_memoized_area(area *a, natural num_memo_dnodes)
+{
+  bitvector refbits = a->refbits;
+  LispObj *p = (LispObj *) a->low, x1, x2, new;
+  natural bits, bitidx, *bitsp, nextbit, diff, memo_dnode = 0, hash_dnode_limit = 0;
+  int tag_x1;
+  hash_table_vector_header *hashp = NULL;
+  Boolean header_p;
+
+  if (num_memo_dnodes) {
+    if (GCDebug) {
+      check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+    }
+
+    /* This is pretty straightforward, but we have to note
+       when we move a key in a hash table vector that wants
+       us to tell it about that. */
+
+    set_bitidx_vars(refbits, 0, bitsp, bits, bitidx);
+    while (memo_dnode < num_memo_dnodes) {
+      if (bits == 0) {
+        int remain = nbits_in_word - bitidx;
+        memo_dnode += remain;
+        p += (remain+remain);
+        bits = *++bitsp;
+        bitidx = 0;
+      } else {
+        nextbit = count_leading_zeros(bits);
+        if ((diff = (nextbit - bitidx)) != 0) {
+          memo_dnode += diff;
+          bitidx = nextbit;
+          p += (diff+diff);
+        }
+        x1 = p[0];
+        x2 = p[1];
+        tag_x1 = fulltag_of(x1);
+        bits &= ~(BIT0_MASK >> bitidx);
+        header_p = (nodeheader_tag_p(tag_x1));
+
+        if (header_p &&
+            (header_subtag(x1) == subtag_hash_vector)) {
+          hashp = (hash_table_vector_header *) p;
+          if (hashp->flags & nhash_track_keys_mask) {
+            hash_dnode_limit = memo_dnode + ((header_element_count(x1)+2)>>1);
+          } else {
+            hashp = NULL;
+          }
+        }
+
+
+        if (! header_p) {
+          new = node_forwarding_address(x1);
+          if (new != x1) {
+            *p = new;
+          }
+        }
+        p++;
+
+        new = node_forwarding_address(x2);
+        if (new != x2) {
+          *p = new;
+          if (memo_dnode < hash_dnode_limit) {
+            /* If this code is reached, 'hashp' is non-NULL and pointing
+               at the header of a hash_table_vector, and 'memo_dnode' identifies
+               a pair of words inside the hash_table_vector.  It may be
+               hard for program analysis tools to recognize that, but I
+               believe that warnings about 'hashp' being NULL here can
+               be safely ignored. */
+            hashp->flags |= nhash_key_moved_mask;
+            hash_dnode_limit = 0;
+            hashp = NULL;
+          }
+        }
+        p++;
+        memo_dnode++;
+        bitidx++;
+
+      }
+    }
+  }
+}
+
+void
+forward_tcr_tlb(TCR *tcr)
+{
+  natural n = tcr->tlb_limit;
+  LispObj 
+    *start = tcr->tlb_pointer, 
+    *end = (LispObj *) ((BytePtr)start+n),
+    node;
+
+  while (start < end) {
+    node = *start;
+    if (node != no_thread_local_binding_marker) {
+      update_noderef(start);
+    }
+    start++;
+  }
+}
+
+void
+reclaim_static_dnodes()
+{
+  natural nstatic = tenured_area->static_dnodes, 
+    i, 
+    bits, 
+    bitnum,
+    nfree = 0,
+    nstatic_conses = area_dnode(static_cons_area->high, static_cons_area->low);
+  cons *c = (cons *)tenured_area->low, *d;
+  bitvector bitsp = GCmarkbits;
+  LispObj head = lisp_global(STATIC_CONSES);
+
+  for (i = 0; i < nstatic; i+= nbits_in_word, c+= nbits_in_word) {
+    bits = *bitsp++;
+    if (bits != ALL_ONES) {
+      for (bitnum = 0; bitnum < nbits_in_word; bitnum++) {
+        if (! (bits & (BIT0_MASK>>bitnum))) {
+          d = c + bitnum;
+          if (i < nstatic_conses) {                
+            d->car = unbound;
+            d->cdr = head;
+            head = ((LispObj)d)+fulltag_cons;
+            nfree++;
+          } else {
+            d->car = 0;
+            d->cdr = 0;
+          }
+        }
+      }
+    }
+  }
+  lisp_global(STATIC_CONSES) = head;
+  lisp_global(FREE_STATIC_CONSES)+=(nfree<<fixnumshift);
+}
+
+Boolean
+youngest_non_null_area_p (area *a)
+{
+  if (a->active == a->high) {
+    return false;
+  } else {
+    for (a = a->younger; a; a = a->younger) {
+      if (a->active != a->high) {
+        return false;
+      }
+    }
+  };
+  return true;
+}
+
+Boolean just_purified_p = false;
+
+/*
+  All thread's stack areas have been "normalized", as
+  has the dynamic heap.  (The "active" pointer in these areas
+  matches the stack pointer/freeptr value at the time that
+  the exception occurred.)
+*/
+
+#define get_time(when) gettimeofday(&when, NULL)
+
+
+
+#ifdef FORCE_DWS_MARK
+#warning recursive marker disabled for testing; remember to re-enable it
+#endif
+
+
+Boolean
+mark_static_ref(LispObj n, BytePtr dynamic_start, natural ndynamic_dnodes)
+{
+  int tag_n = fulltag_of(n);
+  natural dyn_dnode;
+
+  if (nodeheader_tag_p(tag_n)) {
+    return (header_subtag(n) == subtag_hash_vector);
+  }
+ 
+  if (is_node_fulltag (tag_n)) {
+    dyn_dnode = area_dnode(n, dynamic_start);
+    if (dyn_dnode < ndynamic_dnodes) {
+      mark_root(n);             /* May or may not mark it */
+      return true;              /* but return true 'cause it's a dynamic node */
+    }
+  }
+  return false;                 /* Not a heap pointer or not dynamic */
+}
+
+void
+mark_managed_static_refs(area *a, BytePtr low_dynamic_address, natural ndynamic_dnodes)
+{
+  bitvector refbits = a->refbits;
+  LispObj *p = (LispObj *) a->low, x1, x2;
+  natural inbits, outbits, bits, bitidx, *bitsp, nextbit, diff, memo_dnode = 0,
+    num_memo_dnodes = a->ndnodes;
+  Boolean keep_x1, keep_x2;
+
+  if (num_memo_dnodes) {
+    if (GCDebug) {
+      check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+    }
+
+ 
+    set_bitidx_vars(refbits, 0, bitsp, bits, bitidx);
+    inbits = outbits = bits;
+    while (memo_dnode < num_memo_dnodes) {
+      if (bits == 0) {
+        int remain = nbits_in_word - bitidx;
+        memo_dnode += remain;
+        p += (remain+remain);
+        if (outbits != inbits) {
+          *bitsp = outbits;
+        }
+        bits = *++bitsp;
+        inbits = outbits = bits;
+        bitidx = 0;
+      } else {
+        nextbit = count_leading_zeros(bits);
+        if ((diff = (nextbit - bitidx)) != 0) {
+          memo_dnode += diff;
+          bitidx = nextbit;
+          p += (diff+diff);
+        }
+        x1 = *p++;
+        x2 = *p++;
+        bits &= ~(BIT0_MASK >> bitidx);
+        keep_x1 = mark_static_ref(x1, low_dynamic_address, ndynamic_dnodes);
+        keep_x2 = mark_static_ref(x2, low_dynamic_address, ndynamic_dnodes);
+        if ((keep_x1 == false) && 
+            (keep_x2 == false)) {
+          outbits &= ~(BIT0_MASK >> bitidx);
+        }
+        memo_dnode++;
+        bitidx++;
+      }
+    }
+    if (GCDebug) {
+      p = (LispObj *) a->low;
+      check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+    }
+  }
+}
+
+void 
+gc(TCR *tcr, signed_natural param)
+{
+  struct timeval start, stop;
+  area *a = active_dynamic_area, *to = NULL, *from = NULL, *note = NULL;
+  unsigned timeidx = 1;
+  paging_info paging_info_start;
+  LispObj
+    pkg = 0,
+    itabvec = 0;
+  BytePtr oldfree = a->active;
+  TCR *other_tcr;
+  natural static_dnodes;
+  natural weak_method = lisp_global(WEAK_GC_METHOD) >> fixnumshift;
+
+#ifndef FORCE_DWS_MARK
+  if ((natural) (tcr->cs_limit) == CS_OVERFLOW_FORCE_LIMIT) {
+    GCstack_limit = CS_OVERFLOW_FORCE_LIMIT;
+  } else {
+    GCstack_limit = (natural)(tcr->cs_limit)+(natural)page_size;
+  }
+#else
+  GCstack_limit = CS_OVERFLOW_FORCE_LIMIT;
+#endif
+
+  GCephemeral_low = lisp_global(OLDEST_EPHEMERAL);
+  if (GCephemeral_low) {
+    GCn_ephemeral_dnodes=area_dnode(oldfree, GCephemeral_low);
+  } else {
+    GCn_ephemeral_dnodes = 0;
+  }
+  
+  if (GCn_ephemeral_dnodes) {
+    GCverbose = ((nrs_GC_EVENT_STATUS_BITS.vcell & egc_verbose_bit) != 0);
+  } else {
+    GCverbose = ((nrs_GC_EVENT_STATUS_BITS.vcell & gc_verbose_bit) != 0);
+  }
+
+  if (GCephemeral_low) {
+    if ((oldfree-g1_area->low) < g1_area->threshold) {
+      to = g1_area;
+      note = a;
+      timeidx = 4;
+    } else if ((oldfree-g2_area->low) < g2_area->threshold) {
+      to = g2_area;
+      from = g1_area;
+      note = g1_area;
+      timeidx = 3;
+    } else {
+      to = tenured_area;
+      from = g2_area;
+      note = g2_area;
+      timeidx = 2;
+    } 
+  } else {
+    note = tenured_area;
+  }
+
+  install_weak_mark_functions(weak_method);
+  
+  if (GCverbose) {
+    char buf[16];
+
+    sample_paging_info(&paging_info_start);
+    comma_output_decimal(buf,16,area_dnode(oldfree,a->low) << dnode_shift);
+    if (GCephemeral_low) {
+      fprintf(dbgout,
+              "\n\n;;; Starting Ephemeral GC of generation %d",
+              (from == g2_area) ? 2 : (from == g1_area) ? 1 : 0); 
+    } else {
+      fprintf(dbgout,"\n\n;;; Starting full GC");
+    }
+    fprintf(dbgout, ", %s bytes allocated.\n", buf);
+  }
+
+  get_time(start);
+
+  /* The link-inverting marker might need to write to watched areas */
+  unprotect_watched_areas();
+
+  lisp_global(IN_GC) = (1<<fixnumshift);
+
+  if (just_purified_p) {
+    just_purified_p = false;
+    GCDebug = false;
+  } else {
+    GCDebug = ((nrs_GC_EVENT_STATUS_BITS.vcell & gc_integrity_check_bit) != 0);
+    if (GCDebug) {
+      check_all_areas(tcr);
+    }
+    check_static_cons_freelist("in pre-gc static-cons check");
+  }
+
+  if (from) {
+    untenure_from_area(from);
+  }
+  static_dnodes = static_dnodes_for_area(a);
+  GCmarkbits = a->markbits;
+  GCarealow = ptr_to_lispobj(a->low);
+  GCareadynamiclow = GCarealow+(static_dnodes << dnode_shift);
+  GCndnodes_in_area = gc_area_dnode(oldfree);
+
+  if (GCndnodes_in_area) {
+    GCndynamic_dnodes_in_area = GCndnodes_in_area-static_dnodes;
+    GCdynamic_markbits = 
+      GCmarkbits + ((GCndnodes_in_area-GCndynamic_dnodes_in_area)>>bitmap_shift);
+
+    zero_bits(GCmarkbits, GCndnodes_in_area);
+
+    init_weakvll();
+
+    if (GCn_ephemeral_dnodes == 0) {
+      /* For GCTWA, mark the internal package hash table vector of
+       *PACKAGE*, but don't mark its contents. */
+      {
+        LispObj
+          itab;
+        natural
+          dnode, ndnodes;
+      
+        pkg = nrs_PACKAGE.vcell;
+        if ((fulltag_of(pkg) == fulltag_misc) &&
+            (header_subtag(header_of(pkg)) == subtag_package)) {
+          itab = ((package *)ptr_from_lispobj(untag(pkg)))->itab;
+          itabvec = car(itab);
+          dnode = gc_area_dnode(itabvec);
+          if (dnode < GCndnodes_in_area) {
+            ndnodes = (header_element_count(header_of(itabvec))+1) >> 1;
+            set_n_bits(GCmarkbits, dnode, ndnodes);
+          }
+        }
+      }
+    }
+
+    mark_root(lisp_global(STATIC_CONSES));
+
+    {
+      area *next_area;
+      area_code code;
+
+      /* Could make a jump table instead of the typecase */
+
+      for (next_area = a->succ; (code = next_area->code) != AREA_VOID; next_area = next_area->succ) {
+        switch (code) {
+        case AREA_TSTACK:
+          mark_tstack_area(next_area);
+          break;
+
+        case AREA_VSTACK:
+          mark_vstack_area(next_area);
+          break;
+          
+        case AREA_CSTACK:
+          mark_cstack_area(next_area);
+          break;
+
+        case AREA_STATIC:
+	case AREA_WATCHED:
+        case AREA_DYNAMIC:                  /* some heap that isn't "the" heap */
+          /* In both of these cases, we -could- use the area's "markbits"
+             bitvector as a reference map.  It's safe (but slower) to
+             ignore that map and process the entire area.
+          */
+          if (next_area->younger == NULL) {
+            mark_simple_area_range((LispObj *) next_area->low, (LispObj *) next_area->active);
+          }
+          break;
+
+        default:
+          break;
+        }
+      }
+    }
+  
+    if (GCephemeral_low) {
+      mark_memoized_area(tenured_area, area_dnode(a->low,tenured_area->low));
+    }
+
+    mark_managed_static_refs(managed_static_area,low_markable_address,area_dnode(a->active,low_markable_address));
+    
+    other_tcr = tcr;
+    do {
+      mark_tcr_xframes(other_tcr);
+      mark_tcr_tlb(other_tcr);
+      other_tcr = other_tcr->next;
+    } while (other_tcr != tcr);
+
+
+
+
+    /* Go back through *package*'s internal symbols, marking
+       any that aren't worthless.
+    */
+    
+    if (itabvec) {
+      natural
+        i,
+        n = header_element_count(header_of(itabvec));
+      LispObj
+        sym,
+        *raw = 1+((LispObj *)ptr_from_lispobj(untag(itabvec)));
+
+      for (i = 0; i < n; i++) {
+        sym = *raw++;
+        if (is_symbol_fulltag(sym)) {
+          lispsymbol *rawsym = (lispsymbol *)ptr_from_lispobj(untag(sym));
+          natural dnode = gc_area_dnode(sym);
+          
+          if ((dnode < GCndnodes_in_area) &&
+              (!ref_bit(GCmarkbits,dnode))) {
+            /* Symbol is in GC area, not marked.
+               Mark it if fboundp, boundp, or if
+               it has a plist or another home package.
+            */
+            
+            if (FBOUNDP(rawsym) ||
+                BOUNDP(rawsym) ||
+                (rawsym->flags != 0) || /* SPECIAL, etc. */
+                (rawsym->plist != lisp_nil) ||
+                ((rawsym->package_predicate != pkg) &&
+                 (rawsym->package_predicate != lisp_nil))) {
+              mark_root(sym);
+            }
+          }
+        }
+      }
+    }
+
+    (void)markhtabvs();
+
+    if (itabvec) {
+      natural
+        i,
+        n = header_element_count(header_of(itabvec));
+      LispObj
+        sym,
+        *raw = 1+((LispObj *)ptr_from_lispobj(untag(itabvec)));
+
+      for (i = 0; i < n; i++, raw++) {
+        sym = *raw;
+        if (is_symbol_fulltag(sym)) {
+          natural dnode = gc_area_dnode(sym);
+
+          if ((dnode < GCndnodes_in_area) &&
+              (!ref_bit(GCmarkbits,dnode))) {
+            *raw = unbound_marker;
+          }
+        }
+      }
+    }
+  
+    reap_gcable_ptrs();
+
+    preforward_weakvll();
+
+    GCrelocptr = global_reloctab;
+    GCfirstunmarked = calculate_relocation();
+
+    if (!GCephemeral_low) {
+      reclaim_static_dnodes();
+    }
+
+    forward_range((LispObj *) ptr_from_lispobj(GCarealow), (LispObj *) ptr_from_lispobj(GCfirstunmarked));
+
+    other_tcr = tcr;
+    do {
+      forward_tcr_xframes(other_tcr);
+      forward_tcr_tlb(other_tcr);
+      other_tcr = other_tcr->next;
+    } while (other_tcr != tcr);
+
+  
+    forward_gcable_ptrs();
+
+
+
+    {
+      area *next_area;
+      area_code code;
+
+      /* Could make a jump table instead of the typecase */
+
+      for (next_area = a->succ; (code = next_area->code) != AREA_VOID; next_area = next_area->succ) {
+        switch (code) {
+        case AREA_TSTACK:
+          forward_tstack_area(next_area);
+          break;
+
+        case AREA_VSTACK:
+          forward_vstack_area(next_area);
+          break;
+
+        case AREA_CSTACK:
+          forward_cstack_area(next_area);
+          break;
+
+        case AREA_STATIC:
+	case AREA_WATCHED:
+        case AREA_DYNAMIC:                  /* some heap that isn't "the" heap */
+          if (next_area->younger == NULL) {
+            forward_range((LispObj *) next_area->low, (LispObj *) next_area->active);
+          }
+          break;
+
+        default:
+          break;
+        }
+      }
+    }
+
+    if (GCephemeral_low) {
+      forward_memoized_area(tenured_area, area_dnode(a->low, tenured_area->low));
+    }
+  
+    forward_memoized_area(managed_static_area,area_dnode(managed_static_area->active,managed_static_area->low));
+    a->active = (BytePtr) ptr_from_lispobj(compact_dynamic_heap());
+
+    forward_weakvll_links();
+
+    if (to) {
+      tenure_to_area(to);
+    }
+
+    zero_memory_range(a->active, oldfree);
+
+    resize_dynamic_heap(a->active,
+                        (GCephemeral_low == 0) ? lisp_heap_gc_threshold : 0);
+
+    /*
+      If the EGC is enabled: If there's no room for the youngest
+      generation, untenure everything.  If this was a full GC and
+      there's now room for the youngest generation, tenure everything.
+    */
+    if (a->older != NULL) {
+      natural nfree = (a->high - a->active);
+
+
+      if (nfree < a->threshold) {
+        untenure_from_area(tenured_area);
+      } else {
+        if (GCephemeral_low == 0) {
+          tenure_to_area(tenured_area);
+        }
+      }
+    }
+  }
+  lisp_global(GC_NUM) += (1<<fixnumshift);
+  if (note) {
+    note->gccount += (1<<fixnumshift);
+  }
+
+  if (GCDebug) {
+    check_all_areas(tcr);
+  }
+  check_static_cons_freelist("in post-gc static-cons check");
+
+
+  
+  lisp_global(IN_GC) = 0;
+  
+  protect_watched_areas();
+
+  nrs_GC_EVENT_STATUS_BITS.vcell |= gc_postgc_pending;
+  get_time(stop);
+
+  {
+    lispsymbol * total_gc_microseconds = (lispsymbol *) &(nrs_TOTAL_GC_MICROSECONDS);
+    lispsymbol * total_bytes_freed = (lispsymbol *) &(nrs_TOTAL_BYTES_FREED);
+    LispObj val;
+    struct timeval *timeinfo, elapsed = {0, 0};
+
+    val = total_gc_microseconds->vcell;
+    if ((fulltag_of(val) == fulltag_misc) &&
+        (header_subtag(header_of(val)) == subtag_macptr)) {
+      timersub(&stop, &start, &elapsed);
+      timeinfo = (struct timeval *) ptr_from_lispobj(((macptr *) ptr_from_lispobj(untag(val)))->address);
+      timeradd(timeinfo,  &elapsed, timeinfo);
+      timeradd(timeinfo+timeidx,  &elapsed, timeinfo+timeidx);
+    }
+
+    val = total_bytes_freed->vcell;
+    if ((fulltag_of(val) == fulltag_misc) &&
+        (header_subtag(header_of(val)) == subtag_macptr)) {
+      long long justfreed = oldfree - a->active;
+      *( (long long *) ptr_from_lispobj(((macptr *) ptr_from_lispobj(untag(val)))->address)) += justfreed;
+      if (GCverbose) {
+        char buf[16];
+        paging_info paging_info_stop;
+
+        sample_paging_info(&paging_info_stop);
+        if (justfreed <= heap_segment_size) {
+          justfreed = 0;
+        }
+        comma_output_decimal(buf,16,justfreed);
+        if (note == tenured_area) {
+          fprintf(dbgout,";;; Finished full GC. %s bytes freed in %d.%06d s\n\n", buf, elapsed.tv_sec, elapsed.tv_usec);
+        } else {
+          fprintf(dbgout,";;; Finished EGC of generation %d. %s bytes freed in %d.%06d s\n\n", 
+                  (from == g2_area) ? 2 : (from == g1_area) ? 1 : 0,
+                  buf, 
+                  elapsed.tv_sec, elapsed.tv_usec);
+        }
+        report_paging_info_delta(dbgout, &paging_info_start, &paging_info_stop);
+      }
+    }
+  }
+}
Index: /branches/arm/lisp-kernel/gc.h
===================================================================
--- /branches/arm/lisp-kernel/gc.h	(revision 13357)
+++ /branches/arm/lisp-kernel/gc.h	(revision 13357)
@@ -0,0 +1,246 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __GC_H__
+#define __GC_H__ 1
+
+#include "lisp.h"
+#include "bits.h"
+#include "lisp-exceptions.h"
+#include "memprotect.h"
+
+
+
+#ifdef PPC
+#define is_node_fulltag(f)  ((1<<(f))&((1<<fulltag_cons)|(1<<fulltag_misc)))
+#ifdef PPC64
+#define PPC64_CODE_VECTOR_PREFIX (('C'<< 24) | ('O' << 16) | ('D' << 8) | 'E')
+#else
+/*
+  A code-vector's header can't look like a valid instruction or UUO:
+  the low 8 bits must be subtag_code_vector, and the top 6 bits
+  must be 0.  That means that the maximum length of a code vector
+  is 18 bits worth of elements (~1MB.)
+*/
+
+#define code_header_mask ((0x3f<<26) | subtag_code_vector)
+#endif
+#endif
+
+#ifdef X86
+#ifdef X8664
+#define is_node_fulltag(f)  ((1<<(f))&((1<<fulltag_cons)    | \
+				       (1<<fulltag_tra_0)   | \
+				       (1<<fulltag_tra_1)   | \
+				       (1<<fulltag_misc)    | \
+				       (1<<fulltag_symbol)  | \
+				       (1<<fulltag_function)))
+#else
+#define is_node_fulltag(f)  ((1<<(f))&((1<<fulltag_cons) | \
+				       (1<<fulltag_misc) | \
+				       (1<<fulltag_tra)))
+#endif
+#endif
+
+
+extern void zero_memory_range(BytePtr,BytePtr);
+extern LispObj GCarealow, GCareadynamiclow;
+extern natural GCndnodes_in_area, GCndynamic_dnodes_in_area;
+extern bitvector GCmarkbits, GCdynamic_markbits;
+LispObj *global_reloctab, *GCrelocptr;
+LispObj GCfirstunmarked;
+
+extern natural lisp_heap_gc_threshold;
+void mark_root(LispObj);
+void mark_pc_root(LispObj);
+void mark_locative_root(LispObj);
+void rmark(LispObj);
+void postGCfree(void *);
+LispObj *skip_over_ivector(LispObj, LispObj);
+void mark_simple_area_range(LispObj *,LispObj *);
+LispObj calculate_relocation();
+LispObj locative_forwarding_address(LispObj);
+LispObj node_forwarding_address(LispObj);
+void forward_range(LispObj *, LispObj *);
+void note_memoized_references(ExceptionInformation *,LogicalAddress, LogicalAddress, BytePtr *, BytePtr *);
+void gc(TCR *, signed_natural);
+int change_hons_area_size(TCR *, signed_natural);
+void delete_protected_area(protected_area_ptr);
+Boolean egc_control(Boolean, BytePtr);
+Boolean free_segments_zero_filled_by_OS;
+
+/* an type representing 1/4 of a natural word */
+#if WORD_SIZE == 64
+typedef unsigned short qnode;
+#else
+typedef unsigned char qnode;
+#endif
+
+
+#ifdef fulltag_symbol
+#define is_symbol_fulltag(x) (fulltag_of(x) == fulltag_symbol)
+#else
+#define is_symbol_fulltag(x) (fulltag_of(x) == fulltag_misc)
+#endif
+
+#define area_dnode(w,low) ((natural)(((ptr_to_lispobj(w)) - ptr_to_lispobj(low))>>dnode_shift))
+#define gc_area_dnode(w)  area_dnode(w,GCarealow)
+#define gc_dynamic_area_dnode(w) area_dnode(w,GCareadynamiclow)
+
+#if defined(PPC64) || defined(X8632)
+#define forward_marker subtag_forward_marker
+#else
+#define forward_marker fulltag_nil
+#endif
+
+#ifdef PPC64
+#define VOID_ALLOCPTR ((LispObj)(0x8000000000000000-dnode_size))
+#else
+#define VOID_ALLOCPTR ((LispObj)(-dnode_size))
+#endif
+
+#ifdef DARWIN
+#include <mach/task_info.h>
+typedef struct task_events_info paging_info;
+#else
+#ifndef WINDOWS
+#include <sys/resource.h>
+typedef struct rusage paging_info;
+#else
+typedef natural paging_info;
+#endif
+#endif
+
+#undef __argv
+#include <stdio.h>
+
+void sample_paging_info(paging_info *);
+void report_paging_info_delta(FILE*, paging_info *, paging_info *);
+
+
+#define GC_TRAP_FUNCTION_IMMEDIATE_GC (-1)
+#define GC_TRAP_FUNCTION_GC 0
+#define GC_TRAP_FUNCTION_PURIFY 1
+#define GC_TRAP_FUNCTION_IMPURIFY 2
+#define GC_TRAP_FUNCTION_FLASH_FREEZE 4
+#define GC_TRAP_FUNCTION_SAVE_APPLICATION 8
+
+#define GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD 16
+#define GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD 17
+#define GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD 18
+#define GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES 19
+#define GC_TRAP_FUNCTION_EGC_CONTROL 32
+#define GC_TRAP_FUNCTION_CONFIGURE_EGC 64
+#define GC_TRAP_FUNCTION_FREEZE 129
+#define GC_TRAP_FUNCTION_THAW 130
+
+Boolean GCDebug, GCverbose, just_purified_p;
+bitvector GCmarkbits, GCdynamic_markbits;
+LispObj GCarealow, GCareadynamiclow;
+natural GCndnodes_in_area, GCndynamic_dnodes_in_area;
+LispObj GCweakvll,GCdwsweakvll;
+LispObj GCephemeral_low;
+natural GCn_ephemeral_dnodes;
+natural GCstack_limit;
+
+#if WORD_SIZE == 64
+unsigned short *_one_bits;
+#else
+const unsigned char _one_bits[256];
+#endif
+
+#define one_bits(x) _one_bits[x]
+
+natural static_dnodes_for_area(area *a);
+void reapweakv(LispObj weakv);
+void reaphashv(LispObj hashv);
+Boolean mark_weak_hash_vector(hash_table_vector_header *hashp, natural elements);
+Boolean mark_weak_alist(LispObj weak_alist, int weak_type);
+void mark_tcr_tlb(TCR *);
+void mark_tcr_xframes(TCR *);
+void freeGCptrs(void);
+void reap_gcable_ptrs(void);
+unsigned short logcount16(unsigned short);
+void gc_init(void);
+LispObj node_forwarding_address(LispObj);
+Boolean update_noderef(LispObj *);
+void update_locref(LispObj *);
+void forward_gcable_ptrs(void);
+void forward_memoized_area(area *, natural);
+void forward_tcr_tlb(TCR *);
+void reclaim_static_dnodes(void);
+Boolean youngest_non_null_area_p(area *);
+void gc(TCR *, signed_natural);
+
+/* backend-interface */
+
+typedef void (*weak_mark_fun) (LispObj);
+weak_mark_fun mark_weak_htabv, dws_mark_weak_htabv;
+
+typedef void (*weak_process_fun)(void);
+
+weak_process_fun markhtabvs;
+
+
+#define hash_table_vector_header_count (sizeof(hash_table_vector_header)/sizeof(LispObj))
+
+void mark_root(LispObj);
+void rmark(LispObj);
+#ifdef X8632
+void mark_xp(ExceptionInformation *, natural);
+#else
+void mark_xp(ExceptionInformation *);
+#endif
+LispObj dnode_forwarding_address(natural, int);
+LispObj locative_forwarding_address(LispObj);
+void check_refmap_consistency(LispObj *, LispObj *, bitvector);
+void check_all_areas(TCR *);
+void mark_tstack_area(area *);
+void mark_vstack_area(area *);
+void mark_cstack_area(area *);
+void mark_simple_area_range(LispObj *, LispObj *);
+void mark_memoized_area(area *, natural);
+LispObj calculate_relocation(void);
+void forward_range(LispObj *, LispObj *);
+void forward_tstack_area(area *);
+void forward_vstack_area(area *);
+void forward_cstack_area(area *);
+LispObj compact_dynamic_heap(void);
+signed_natural purify(TCR *, signed_natural);
+signed_natural impurify(TCR *, signed_natural);
+signed_natural gc_like_from_xp(ExceptionInformation *, signed_natural(*fun)(TCR *, signed_natural), signed_natural);
+
+
+typedef enum {
+  xmacptr_flag_none = 0,        /* Maybe already disposed by Lisp */
+  xmacptr_flag_recursive_lock,  /* recursive-lock */
+  xmacptr_flag_ptr,             /* malloc/free */
+  xmacptr_flag_rwlock,          /* read/write lock */
+  xmacptr_flag_semaphore,        /* semaphore */
+  xmacptr_flag_user_first = 8,  /* first user-defined dispose fn */
+  xmacptr_flag_user_last = 16   /* exclusive upper bound */
+} xmacptr_flag;
+
+
+typedef void (*xmacptr_dispose_fn)(void *);
+
+extern xmacptr_dispose_fn xmacptr_dispose_functions[];
+
+extern bitvector global_mark_ref_bits, dynamic_mark_ref_bits, relocatable_mark_ref_bits;
+
+
+#endif                          /* __GC_H__ */
Index: /branches/arm/lisp-kernel/image.c
===================================================================
--- /branches/arm/lisp-kernel/image.c	(revision 13357)
+++ /branches/arm/lisp-kernel/image.c	(revision 13357)
@@ -0,0 +1,626 @@
+/*
+   Copyright (C) 2002-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp_globals.h"
+#include "area.h"
+#include "image.h"
+#include "gc.h"
+#include <errno.h>
+#include <unistd.h>
+#ifndef WINDOWS
+#include <sys/mman.h>
+#endif
+#include <stdio.h>
+#include <limits.h>
+
+
+
+#if defined(PPC64) || defined(X8632)
+#define RELOCATABLE_FULLTAG_MASK \
+  ((1<<fulltag_cons)|(1<<fulltag_misc))
+#else
+#ifdef X8664
+#define RELOCATABLE_FULLTAG_MASK \
+  ((1<<fulltag_cons)|(1<<fulltag_misc)|(1<<fulltag_symbol)|(1<<fulltag_function))
+#else
+#define RELOCATABLE_FULLTAG_MASK \
+  ((1<<fulltag_cons)|(1<<fulltag_nil)|(1<<fulltag_misc))
+#endif
+#endif
+
+void
+relocate_area_contents(area *a, LispObj bias)
+{
+  LispObj 
+    *start = (LispObj *)(a->low), 
+    *end = (LispObj *)(a->active),
+    low = (LispObj)image_base - bias,
+    high = ptr_to_lispobj(active_dynamic_area->active) - bias,
+    w0;
+  int fulltag;
+
+  while (start < end) {
+    w0 = *start;
+    fulltag = fulltag_of(w0);
+    if (immheader_tag_p(fulltag)) {
+      start = (LispObj *)skip_over_ivector((natural)start, w0);
+    } else {
+#ifdef X86
+      if (header_subtag(w0) == subtag_function) {
+#ifdef X8664
+        int skip = ((int) start[1])+1;
+#else
+        extern void update_self_references(LispObj *);
+        extern natural imm_word_count(LispObj);
+
+        natural skip = (natural)imm_word_count(((LispObj)start)+fulltag_misc)+1;
+        update_self_references(start);
+#endif
+     
+        start += skip;
+        if (((LispObj) start) & node_size) {
+          --start;
+        }
+        w0 = *start;
+        fulltag = fulltag_of(w0);
+      }
+#endif
+      if (header_subtag(w0) == subtag_weak) {
+        LispObj link = start[1];
+        if ((link >= low) && (link < high)) {
+          start[1] = (link+bias);
+        }
+      }
+      if ((w0 >= low) && (w0 < high) &&
+	  ((1<<fulltag) & RELOCATABLE_FULLTAG_MASK)) {
+	*start = (w0+bias);
+      }
+      w0 = *++start;
+      fulltag = fulltag_of(w0);
+      if ((w0 >= low) && (w0 < high) &&
+	  ((1<<fulltag) & RELOCATABLE_FULLTAG_MASK)) {
+	*start = (w0+bias);
+      }
+      ++start;
+    }
+  }
+  if (start > end) {
+    Bug(NULL, "Overran area bounds in relocate_area_contents");
+  }
+}
+      
+
+
+
+off_t
+seek_to_next_page(int fd)
+{
+  off_t pos = LSEEK(fd, 0, SEEK_CUR);
+  pos = align_to_power_of_2(pos, log2_page_size);
+  return LSEEK(fd, pos, SEEK_SET);
+}
+  
+/*
+  fd is positioned to EOF; header has been allocated by caller.
+  If we find a trailer (and that leads us to the header), read
+  the header & return true else return false.
+*/
+Boolean
+find_openmcl_image_file_header(int fd, openmcl_image_file_header *header)
+{
+  openmcl_image_file_trailer trailer;
+  int disp;
+  off_t pos;
+  unsigned version, flags;
+
+  pos = LSEEK(fd, 0, SEEK_END);
+  if (pos < 0) {
+    return false;
+  }
+  pos -= sizeof(trailer);
+
+  if (LSEEK(fd, pos, SEEK_SET) < 0) {
+    return false;
+  }
+  if (read(fd, &trailer, sizeof(trailer)) != sizeof(trailer)) {
+    return false;
+  }
+  if ((trailer.sig0 != IMAGE_SIG0) ||
+      (trailer.sig1 != IMAGE_SIG1) ||
+      (trailer.sig2 != IMAGE_SIG2)) {
+    return false;
+  }
+  disp = trailer.delta;
+  
+  if (disp >= 0) {
+    return false;
+  }
+  if (LSEEK(fd, disp, SEEK_CUR) < 0) {
+    return false;
+  }
+  if (read(fd, header, sizeof(openmcl_image_file_header)) !=
+      sizeof(openmcl_image_file_header)) {
+    return false;
+  }
+  if ((header->sig0 != IMAGE_SIG0) ||
+      (header->sig1 != IMAGE_SIG1) ||
+      (header->sig2 != IMAGE_SIG2) ||
+      (header->sig3 != IMAGE_SIG3)) {
+    return false;
+  }
+  version = (header->abi_version) & 0xffff;
+  if (version < ABI_VERSION_MIN) {
+    fprintf(dbgout, "Heap image is too old for this kernel.\n");
+    return false;
+  }
+  if (version > ABI_VERSION_MAX) {
+    fprintf(dbgout, "Heap image is too new for this kernel.\n");
+    return false;
+  }
+  flags = header->flags;
+  if (flags != PLATFORM) {
+    fprintf(dbgout, "Heap image was saved for another platform.\n");
+    return false;
+  }
+  return true;
+}
+
+void
+load_image_section(int fd, openmcl_image_section_header *sect)
+{
+  extern area* allocate_dynamic_area(unsigned);
+  off_t
+    pos = seek_to_next_page(fd), advance;
+  natural
+    mem_size = sect->memory_size;
+  void *addr;
+  area *a;
+
+  advance = mem_size;
+  switch(sect->code) {
+  case AREA_READONLY:
+    if (!MapFile(pure_space_active,
+		 pos,
+		 align_to_power_of_2(mem_size,log2_page_size),
+		 MEMPROTECT_RX,
+		 fd)) {
+      return;
+    }
+    a = new_area(pure_space_active, pure_space_limit, AREA_READONLY);
+    pure_space_active += mem_size;
+    a->active = pure_space_active;
+    sect->area = a;      
+    break;
+
+  case AREA_STATIC:
+    if (!MapFile(static_space_active,
+		 pos,
+		 align_to_power_of_2(mem_size,log2_page_size),
+		 MEMPROTECT_RWX,
+		 fd)) {
+      return;
+    }
+    a = new_area(static_space_active, static_space_limit, AREA_STATIC);
+    static_space_active += mem_size;
+    a->active = static_space_active;
+    sect->area = a;
+    break;
+
+  case AREA_DYNAMIC:
+    a = allocate_dynamic_area(mem_size);
+    if (!MapFile(a->low,
+		 pos,
+		 align_to_power_of_2(mem_size,log2_page_size),
+		 MEMPROTECT_RWX,
+		 fd)) {
+      return;
+    }
+
+    a->static_dnodes = sect->static_dnodes;
+    sect->area = a;
+    break;
+
+  case AREA_MANAGED_STATIC:
+    a = new_area(pure_space_limit, pure_space_limit+align_to_power_of_2(mem_size,log2_page_size), AREA_MANAGED_STATIC);
+    a->active = a->low+mem_size;
+    if (mem_size) {
+      natural
+        refbits_size = align_to_power_of_2((((mem_size>>dnode_shift)+7)>>3),
+                                           log2_page_size);
+      if (!MapFile(a->low,
+                   pos,
+                   align_to_power_of_2(mem_size,log2_page_size),
+                   MEMPROTECT_RWX,
+                   fd)) {
+        return;
+      }
+      /* Need to save/restore persistent refbits. */
+      if (!MapFile(global_mark_ref_bits,
+                   align_to_power_of_2(pos+mem_size,log2_page_size),
+                   refbits_size,
+                   MEMPROTECT_RW,
+                   fd)) {
+        return;
+      }
+      advance += refbits_size;
+    }
+    sect->area = a;
+    a->ndnodes = area_dnode(a->active, a->low);
+    managed_static_area = a;
+    lisp_global(REF_BASE) = (LispObj) a->low;
+    break;
+
+    /* In many respects, the static_cons_area is part of the dynamic
+       area; it's physically adjacent to it (immediately precedes the
+       dynamic area in memory) and its contents are subject to full
+       GC (but not compaction.)  It's maintained as a seperate section
+       in the image file, at least for now. */
+
+
+  case AREA_STATIC_CONS:
+    addr = (void *) lisp_global(HEAP_START);
+    a = new_area(addr-align_to_power_of_2(mem_size,log2_page_size), addr, AREA_STATIC_CONS);
+    if (mem_size) {      
+      if (!MapFile(a->low,
+                   pos,
+                   align_to_power_of_2(mem_size,log2_page_size),
+                   MEMPROTECT_RWX,
+                   fd)) {
+        return;
+      }
+    }
+    a->ndnodes = area_dnode(a->active, a->low);
+    sect->area = a;
+    static_cons_area = a;
+    break;
+
+  default:
+    return;
+    
+  }
+  LSEEK(fd, pos+advance, SEEK_SET);
+}
+
+LispObj
+load_openmcl_image(int fd, openmcl_image_file_header *h)
+{
+  LispObj image_nil = 0;
+  area *a;
+  if (find_openmcl_image_file_header(fd, h)) {
+    int i, nsections = h->nsections;
+    openmcl_image_section_header sections[nsections], *sect=sections;
+    LispObj bias = image_base - ACTUAL_IMAGE_BASE(h);
+#if (WORD_SIZE== 64)
+    signed_natural section_data_delta = 
+      ((signed_natural)(h->section_data_offset_high) << 32L) | h->section_data_offset_low;
+#endif
+
+    if (read (fd, sections, nsections*sizeof(openmcl_image_section_header)) !=
+	nsections * sizeof(openmcl_image_section_header)) {
+      return 0;
+    }
+#if WORD_SIZE == 64
+    LSEEK(fd, section_data_delta, SEEK_CUR);
+#endif
+    for (i = 0; i < nsections; i++, sect++) {
+      load_image_section(fd, sect);
+      a = sect->area;
+      if (a == NULL) {
+	return 0;
+      }
+    }
+
+    for (i = 0, sect = sections; i < nsections; i++, sect++) {
+      a = sect->area;
+      switch(sect->code) {
+      case AREA_STATIC:
+	nilreg_area = a;
+#ifdef PPC
+#ifdef PPC64
+        image_nil = ptr_to_lispobj(a->low + (1024*4) + sizeof(lispsymbol) + fulltag_misc);
+#else
+	image_nil = (LispObj)(a->low + 8 + 8 + (1024*4) + fulltag_nil);
+#endif
+#endif
+#ifdef X86
+#ifdef X8664
+	image_nil = (LispObj)(a->low) + (1024*4) + fulltag_nil;
+#else
+	image_nil = (LispObj)(a->low) + (1024*4) + fulltag_cons;
+#endif
+#endif
+	set_nil(image_nil);
+	if (bias) {
+	  relocate_area_contents(a, bias);
+	}
+	make_dynamic_heap_executable(a->low, a->active);
+        add_area_holding_area_lock(a);
+        break;
+        
+      case AREA_READONLY:
+        if (bias && 
+            (managed_static_area->active != managed_static_area->low)) {
+          UnProtectMemory(a->low, a->active-a->low);
+          relocate_area_contents(a, bias);
+          ProtectMemory(a->low, a->active-a->low);
+        }
+        readonly_area = a;
+	add_area_holding_area_lock(a);
+	break;
+      }
+    }
+    for (i = 0, sect = sections; i < nsections; i++, sect++) {
+      a = sect->area;
+      switch(sect->code) {
+      case AREA_MANAGED_STATIC:
+        if (bias) {
+          relocate_area_contents(a, bias);
+        }
+        add_area_holding_area_lock(a);
+        break;
+      case AREA_STATIC_CONS:
+        break;
+      case AREA_DYNAMIC:
+        lower_heap_start(static_cons_area->low,a);
+        if (bias) {
+          relocate_area_contents(a, bias);
+        }
+	resize_dynamic_heap(a->active, lisp_heap_gc_threshold);
+	xMakeDataExecutable(a->low, a->active - a->low);
+	break;
+      }
+    }
+  }
+  return image_nil;
+}
+ 
+void
+prepare_to_write_dynamic_space()
+{
+  area *a = active_dynamic_area;
+  LispObj 
+    *start = (LispObj *)(a->low),
+    *end = (LispObj *) (a->active),
+    x1;
+  int tag, subtag, element_count;
+
+  while (start < end) {
+    x1 = *start;
+    tag = fulltag_of(x1);
+    if (immheader_tag_p(tag)) {
+      subtag = header_subtag(x1);
+      if (subtag == subtag_macptr) {
+        if ((start[1] >= (natural)0x10000) && (start[1] < (natural)-0x10000)) {
+          /* Leave small pointers alone */
+          *start = make_header(subtag_dead_macptr,header_element_count(x1));
+        }
+      }
+      start = (LispObj *)skip_over_ivector((natural)start, x1);
+    } else if (nodeheader_tag_p(tag)) {
+      element_count = header_element_count(x1) | 1;
+      start += (element_count+1);
+    } else {
+      start += 2;
+    }
+  }
+}
+
+  
+
+int
+write_file_and_section_headers(int fd, 
+                               openmcl_image_file_header *file_header,
+                               openmcl_image_section_header* section_headers,
+                               int nsections,
+                               off_t *header_pos)
+{
+  *header_pos = seek_to_next_page(fd);
+
+  if (LSEEK (fd, *header_pos, SEEK_SET) < 0) {
+    return errno;
+  }
+  if (write(fd, file_header, sizeof(*file_header)) != sizeof(*file_header)) {
+    return errno;
+  }
+  if (write(fd, section_headers, sizeof(section_headers[0])*nsections)
+      != (sizeof(section_headers[0])*nsections)) {
+    return errno;
+  }
+  return 0;
+}
+  
+natural
+writebuf(int fd, char *bytes, natural n)
+{
+  natural remain = n, this_size;
+  signed_natural result;
+
+  while (remain) {
+    this_size = remain;
+    if (this_size > INT_MAX) {
+      this_size = INT_MAX;
+    }
+    result = write(fd, bytes, this_size);
+    if (result < 0) {
+      return errno;
+    }
+    bytes += result;
+
+    remain -= result;
+  }
+  return 0;
+}
+
+OSErr
+save_application(unsigned fd, Boolean egc_was_enabled)
+{
+  openmcl_image_file_header fh;
+  openmcl_image_section_header sections[NUM_IMAGE_SECTIONS];
+  openmcl_image_file_trailer trailer;
+  area *areas[NUM_IMAGE_SECTIONS], *a;
+  int i, err;
+  off_t header_pos, eof_pos;
+#if WORD_SIZE == 64
+  off_t image_data_pos;
+  signed_natural section_data_delta;
+#endif
+
+  /*
+    Coerce macptrs to dead_macptrs.
+  */
+  
+  prepare_to_write_dynamic_space(active_dynamic_area);
+
+  /* 
+     If we ever support continuing after saving an image,
+     undo this .. */
+
+  if (static_cons_area->high > static_cons_area->low) {
+    active_dynamic_area->low = static_cons_area->high;
+    tenured_area->static_dnodes -= area_dnode(static_cons_area->high, static_cons_area->low);
+  }
+
+  areas[0] = nilreg_area; 
+  areas[1] = readonly_area;
+  areas[2] = active_dynamic_area;
+  areas[3] = managed_static_area;
+  areas[4] = static_cons_area;
+  for (i = 0; i < NUM_IMAGE_SECTIONS; i++) {
+    a = areas[i];
+    sections[i].code = a->code;
+    sections[i].area = NULL;
+    sections[i].memory_size  = a->active - a->low;
+    if (a == active_dynamic_area) {
+      sections[i].static_dnodes = tenured_area->static_dnodes;
+    } else {
+      sections[i].static_dnodes = 0;
+    }
+  }
+  fh.sig0 = IMAGE_SIG0;
+  fh.sig1 = IMAGE_SIG1;
+  fh.sig2 = IMAGE_SIG2;
+  fh.sig3 = IMAGE_SIG3;
+  fh.timestamp = time(NULL);
+  CANONICAL_IMAGE_BASE(&fh) = IMAGE_BASE_ADDRESS;
+  ACTUAL_IMAGE_BASE(&fh) = image_base;
+  fh.nsections = NUM_IMAGE_SECTIONS;
+  fh.abi_version=ABI_VERSION_CURRENT;
+#if WORD_SIZE == 64
+  fh.section_data_offset_high = 0;
+  fh.section_data_offset_low = 0;
+#else
+  fh.pad0[0] = fh.pad0[1] = 0;
+  fh.pad1[0] = fh.pad1[1] = fh.pad1[2] = fh.pad1[3] = 0;
+#endif
+  fh.flags = PLATFORM;
+
+#if WORD_SIZE == 64
+  image_data_pos = seek_to_next_page(fd);
+#else
+  err = write_file_and_section_headers(fd, &fh, sections, NUM_IMAGE_SECTIONS, &header_pos);
+  if (err) {
+    return err;
+  }
+#endif
+
+
+  {
+    area *g0_area = g1_area->younger;
+
+    /* Save GC config */
+    lisp_global(LISP_HEAP_THRESHOLD) = lisp_heap_gc_threshold;
+    lisp_global(G0_THRESHOLD) = g0_area->threshold;
+    lisp_global(G1_THRESHOLD) = g1_area->threshold;
+    lisp_global(G2_THRESHOLD) = g2_area->threshold;
+    lisp_global(EGC_ENABLED) = (LispObj)egc_was_enabled;
+  }
+  /*
+    lisp_global(GC_NUM) and lisp_global(FWDNUM) are persistent,
+    as is DELETED_STATIC_PAIRS.
+    Nothing else is even meaningful at this point.
+  */
+  for (i = MIN_KERNEL_GLOBAL; i < 0; i++) {
+    switch (i) {
+    case FREE_STATIC_CONSES:
+    case FWDNUM:
+    case GC_NUM:
+    case STATIC_CONSES:
+    case WEAK_GC_METHOD:
+    case LISP_HEAP_THRESHOLD:
+    case EGC_ENABLED:
+    case G0_THRESHOLD:
+    case G1_THRESHOLD:
+    case G2_THRESHOLD:
+      break;
+    case WEAKVLL:
+      break;
+    default:
+      lisp_global(i) = 0;
+    }
+  }
+
+  for (i = 0; i < NUM_IMAGE_SECTIONS; i++) {
+    natural n;
+    a = areas[i];
+    seek_to_next_page(fd);
+    n = sections[i].memory_size;
+    if (writebuf(fd, a->low, n)) {
+	return errno;
+    }
+    if (n &&  ((sections[i].code) == AREA_MANAGED_STATIC)) {
+      natural ndnodes = area_dnode(a->active, a->low);
+      natural nrefbytes = align_to_power_of_2((ndnodes+7)>>3,log2_page_size);
+
+      seek_to_next_page(fd);
+      if (writebuf(fd,(char*)a->refbits,nrefbytes)) {
+        return errno;
+      }
+    }
+  }
+
+#if WORD_SIZE == 64
+  seek_to_next_page(fd);
+  section_data_delta = -((LSEEK(fd,0,SEEK_CUR)+sizeof(fh)+sizeof(sections)) -
+                         image_data_pos);
+  fh.section_data_offset_high = (int)(section_data_delta>>32L);
+  fh.section_data_offset_low = (unsigned)section_data_delta;
+  err =  write_file_and_section_headers(fd, &fh, sections, NUM_IMAGE_SECTIONS, &header_pos);
+  if (err) {
+    return err;
+  }  
+#endif
+
+  trailer.sig0 = IMAGE_SIG0;
+  trailer.sig1 = IMAGE_SIG1;
+  trailer.sig2 = IMAGE_SIG2;
+  eof_pos = LSEEK(fd, 0, SEEK_CUR) + sizeof(trailer);
+  trailer.delta = (int) (header_pos-eof_pos);
+  if (write(fd, &trailer, sizeof(trailer)) == sizeof(trailer)) {
+#ifndef WINDOWS
+    fsync(fd);
+#endif
+    close(fd);
+    return 0;
+  } 
+  i = errno;
+  close(fd);
+  return i;
+}
+      
+
+
+
Index: /branches/arm/lisp-kernel/image.h
===================================================================
--- /branches/arm/lisp-kernel/image.h	(revision 13357)
+++ /branches/arm/lisp-kernel/image.h	(revision 13357)
@@ -0,0 +1,96 @@
+/*
+   Copyright (C) 2002-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+
+#define IMAGE_SIG0 (('O'<<24) | ('p'<<16) | ('e'<<8) | 'n')
+#define IMAGE_SIG1 (('M'<<24) | ('C'<<16) | ('L'<<8) | 'I')
+#define IMAGE_SIG2 (('m'<<24) | ('a'<<16) | ('g'<<8) | 'e')
+#define IMAGE_SIG3 (('F'<<24) | ('i'<<16) | ('l'<<8) | 'e')
+
+/* 
+   An image file contains a header (which describes the type, size,
+   and nominal memory address of one or more sections) and data for
+   each section; each section's data is page-aligned within the image
+   file, so its disk address is implicit.  The header must reside
+   entirely within a page; the first section's data starts on the page
+   after the image header, and subsequent sections start on the pages
+   after the page which contains the last byte of their predecessor's
+   data.
+
+   The image header's position relative to the start of the file is
+   arbitrary.  The image header's position relative to the end of the
+   file is indicated by the last word in the file (which is preceded
+   by the first three signature words above.)  The last word contains
+   the distance from the end-of-file to the start of the header.
+
+   As long as these alignment constraints are met, the image file can
+   have arbitrary data (or executable programs, or shell scripts)
+   prepended to it.  This is supposed to simplify distribution.
+*/
+
+typedef struct {
+  natural code;
+  area *area;
+  natural memory_size;
+  natural static_dnodes;
+} openmcl_image_section_header;
+
+typedef struct {
+  unsigned sig0, sig1, sig2, sig3;
+  unsigned timestamp;
+  unsigned canonical_image_base_32; /* IMAGE_BASE_ADDRESS */
+  unsigned actual_image_base_32;	/* Hopefully the same */
+  unsigned nsections;
+  unsigned abi_version;
+#if WORD_SIZE == 64
+  int section_data_offset_high; /* signed offset from end of
+                                         section headers to first
+                                         section's data.  May be zero. */
+  unsigned section_data_offset_low;
+  unsigned flags; 
+  natural canonical_image_base_64;
+  natural actual_image_base_64;
+#else 
+  unsigned pad0[2]; 
+  unsigned flags;
+  unsigned pad1[4];
+#endif
+} openmcl_image_file_header;
+
+#if WORD_SIZE == 64
+#define ACTUAL_IMAGE_BASE(header) ((header)->actual_image_base_64)
+#define CANONICAL_IMAGE_BASE(header) ((header)->canonical_image_base_64)
+#else
+#define ACTUAL_IMAGE_BASE(header) ((header)->actual_image_base_32)
+#define CANONICAL_IMAGE_BASE(header) ((header)->canonical_image_base_32)
+#endif
+
+typedef struct {
+  unsigned sig0, sig1, sig2;
+  int delta;
+} openmcl_image_file_trailer;
+
+LispObj
+load_openmcl_image(int, openmcl_image_file_header*);
+
+
+
+
+#define ABI_VERSION_MIN 1036
+#define ABI_VERSION_CURRENT 1036
+#define ABI_VERSION_MAX 1036
+
+#define NUM_IMAGE_SECTIONS 5    /* used to be 3 */
Index: /branches/arm/lisp-kernel/imports.s
===================================================================
--- /branches/arm/lisp-kernel/imports.s	(revision 13357)
+++ /branches/arm/lisp-kernel/imports.s	(revision 13357)
@@ -0,0 +1,120 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+	include(m4macros.m4)
+define(`PTR',`
+        __ifdef(`PPC64')
+        .quad $1
+        __else
+	 __ifdef(`X8664')
+	 .quad $1
+	 __else
+	  .long $1
+	 __endif
+        __endif
+')
+	_beginfile
+
+        	
+	.globl C(import_ptrs_base)
+define(`defimport',`
+	.globl C($1)
+        PTR(C($1))
+                
+# __line__
+')
+	.data
+import_ptrs_start:
+
+	defimport(fd_setsize_bytes)
+	defimport(do_fd_set)
+	defimport(do_fd_clr)
+	defimport(do_fd_is_set)
+	defimport(do_fd_zero)
+	defimport(xMakeDataExecutable)
+	defimport(xGetSharedLibrary)
+	defimport(xFindSymbol)
+	defimport(allocate)
+	defimport(deallocate)
+	defimport(jvm_init)
+	defimport(tcr_frame_ptr)
+	defimport(register_xmacptr_dispose_function)
+	defimport(open_debug_output)
+	defimport(get_r_debug)
+	defimport(restore_soft_stack_limit)
+	defimport(lisp_egc_control)
+	defimport(lisp_bug)
+	defimport(xNewThread)
+	defimport(cooperative_thread_startup)
+	defimport(xDisposeThread)
+	defimport(xThreadCurrentStackSpace)
+	defimport(usage_exit)
+	defimport(save_fp_context)
+	defimport(restore_fp_context)
+	defimport(put_vector_registers)
+	defimport(get_vector_registers)
+        defimport(new_semaphore)
+	defimport(wait_on_semaphore)
+	defimport(signal_semaphore)
+        defimport(destroy_semaphore)
+        defimport(new_recursive_lock)
+        defimport(lock_recursive_lock)
+        defimport(unlock_recursive_lock)
+        defimport(destroy_recursive_lock)
+        defimport(lisp_suspend_other_threads)
+        defimport(lisp_resume_other_threads)
+        defimport(lisp_suspend_tcr)
+        defimport(lisp_resume_tcr)
+        defimport(rwlock_new)
+        defimport(rwlock_destroy)
+        defimport(rwlock_rlock)
+        defimport(rwlock_wlock)
+        defimport(rwlock_unlock)
+        defimport(recursive_lock_trylock)
+	defimport(foreign_name_and_offset)
+        defimport(lisp_read)
+        defimport(lisp_write)
+        defimport(lisp_open)
+        defimport(lisp_fchmod)
+        defimport(lisp_lseek)
+        defimport(lisp_close)
+        defimport(lisp_ftruncate)
+        defimport(lisp_stat)
+        defimport(lisp_fstat)
+        defimport(lisp_futex)
+        defimport(lisp_opendir)
+        defimport(lisp_readdir)
+        defimport(lisp_closedir)
+        defimport(lisp_pipe)
+        defimport(lisp_gettimeofday)
+        defimport(lisp_sigexit)
+   
+        .globl C(import_ptrs_base)
+C(import_ptrs_base):
+	PTR(import_ptrs_start)
+
+	__ifdef(`PPC')
+        __ifdef(`LINUX')
+        __ifndef(`PPC64')
+        .globl __trampoline_setup
+	.long  __trampoline_setup
+        __endif
+        __endif
+	__endif
+
+
+
+
+	_endfile
Index: /branches/arm/lisp-kernel/kernel-globals.h
===================================================================
--- /branches/arm/lisp-kernel/kernel-globals.h	(revision 13357)
+++ /branches/arm/lisp-kernel/kernel-globals.h	(revision 13357)
@@ -0,0 +1,33 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __kernel_globals__
+#define __kernel_globals__
+#include "area.h"
+
+
+extern area *nilreg_area, *tenured_area, *g2_area, *g1_area, *managed_static_area, *readonly_area, *static_cons_area;
+extern area *all_areas;
+extern int cache_block_size;
+
+
+
+
+
+
+
+#endif /* __kernel_globals__ */
Index: /branches/arm/lisp-kernel/linuxppc/.cvsignore
===================================================================
--- /branches/arm/lisp-kernel/linuxppc/.cvsignore	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc/.cvsignore	(revision 13357)
@@ -0,0 +1,1 @@
+*~.*
Index: /branches/arm/lisp-kernel/linuxppc/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/linuxppc/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc/.gdbinit	(revision 13357)
@@ -0,0 +1,120 @@
+directory lisp-kernel
+
+define header32
+x/x $arg0-6
+end
+
+define header64
+x/x $arg0-12
+end
+
+define lisp_string32
+x/s ($arg0-2)
+end
+
+define lisp_string64
+x/s (($arg0)-4)
+end
+
+define pname32
+lisp_string32 (*($arg0-2))
+end
+
+# GDB's expression parser seems to have difficulty
+# with this unless the temporary is used.
+define pname64
+set $temp=*((long *)((long)($arg0-4)))
+lisp_string64 $temp
+end
+
+define ada 
+ p *all_areas->succ
+end
+
+define _TCR
+ p/x *(TCR *) $arg0
+end
+
+define tcr32
+ _TCR $r13
+end
+
+define tcr64
+ _TCR $r2
+end
+
+define regs32
+ p/x *(((struct pt_regs **)$arg0)[12])
+end
+
+define regs64
+ p/x * (((ExceptionInformation *)$arg0)->uc_mcontext.regs)
+end
+
+define xpGPR
+ p/x (((struct pt_regs **)$arg0)[12])->gpr[$arg1]
+end
+
+define xpPC
+ p/x ((ExceptionInformation *)$arg0)->uc_mcontext.regs->nip
+end
+
+define lisp_string
+ if $ppc64
+  lisp_string64 $arg0
+ else
+  lisp_string32 $arg0
+ end
+end
+
+define pname
+ if $ppc64
+  pname64 $arg0
+ else
+  pname32 $arg0
+ end
+end
+
+define tcr
+ if $ppc64
+  tcr64
+ else
+  tcr32
+ end
+end
+
+define regs
+ if $ppc64
+  regs64 $arg0
+ else
+  regs32 $arg0
+ end
+end
+
+define xpGPR
+ if $ppc64
+  xpGPR64 $arg0 $arg1
+ else
+  xpGPR32 $arg0 $arg1
+ end
+end
+
+define lisp
+ call print_lisp_object($arg0)
+end
+
+set $ppc64=0
+
+
+break Bug
+
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIG40 pass nostop noprint
+handle SIG41 pass nostop noprint
+handle SIG42 pass nostop noprint
+handle SIGPWR pass nostop noprint
+
+display/i $pc
Index: /branches/arm/lisp-kernel/linuxppc/Makefile
===================================================================
--- /branches/arm/lisp-kernel/linuxppc/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc/Makefile	(revision 13357)
@@ -0,0 +1,107 @@
+#
+#   Copyright (C) 1994-2001 Digitool, Inc
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ../
+RM = /bin/rm
+# Versions of GNU as >= 2.9.1 all seem to work
+# AS = gas-2.9.1
+AS = as
+M4 = m4
+ASFLAGS = -mregnames -mppc32 -maltivec
+M4FLAGS = -DLINUX -DPPC
+CDEFINES = -DLINUX -DPPC -D_REENTRANT -D_GNU_SOURCE
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+# If the linker supports a "--hash-style=" option, use traditional
+# Sysv hash tables.  (If it doesn't support that option, assume
+# that traditional hash tables will be used by default.)
+ld_has_hash_style = $(shell $(LD) --help | grep "hash-style=")
+ifeq ($(ld_has_hash_style),)
+HASH_STYLE=
+else
+HASH_STYLE="-Wl,--hash-style=sysv"
+endif
+
+# The only version of GCC I have that supports both ppc32 and ppc64
+# compilation uses the -m32 option to target ppc32.  This may not be
+# definitive; there seem to be a bewildering array of similar options
+# in other GCC versions.  It's assumed here that if "-m32" is recognized,
+# it's required as well.
+
+PPC32 = $(shell ($(CC) --help -v 2>&1 | grep -q -e "-m32 ") && /bin/echo "-m32")
+
+# Likewise, some versions of GAS may need a "-a32" flag, to force the
+# output file to be 32-bit compatible.
+
+A32 = $(shell ($(AS) --help -v 2>&1 | grep -q -e "-a32") && /bin/echo "-a32")
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS) $(A32) $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) $(PPC32) -o $@
+
+SPOBJ = pad.o ppc-spjump.o ppc-spentry.o ppc-subprims.o
+ASMOBJ = ppc-asmutils.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o ppc-gc.o bits.o  ppc-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o plbt.o ppc_print.o
+KERNELOBJ= $(COBJ) ppc-asmutils.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 ppc-constants.s ppc-macros.s errors.s ppc-uuo.s ppc-constants32.s
+
+CHEADERS = area.h bits.h ppc-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h ppc-constants32.h ppc-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ = $(SPOBJ)
+all:	../../ppccl
+
+
+# No:
+
+# KSPOBJ=
+# all:	../../ppccl ../../subprims.so
+
+OSLIBS = -ldl -lm -lpthread
+
+
+../../ppccl:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ)
+	$(CC) $(PPC32) $(CDEBUG)  -Wl,--export-dynamic $(HASH_STYLE) -o $@ -T ./elf32ppclinux.x $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../ppccl
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../ppccl
+	strip -g ../../ppccl
Index: /branches/arm/lisp-kernel/linuxppc/elf32ppclinux.x
===================================================================
--- /branches/arm/lisp-kernel/linuxppc/elf32ppclinux.x	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc/elf32ppclinux.x	(revision 13357)
@@ -0,0 +1,253 @@
+OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc",
+	      "elf32-powerpc")
+OUTPUT_ARCH(powerpc:common)
+ENTRY(_start)
+SEARCH_DIR(/lib); SEARCH_DIR(/usr/lib); SEARCH_DIR(/usr/local/lib); SEARCH_DIR(/usr/powerpc-linux/lib);
+/* Do we need any of these for elf?
+   __DYNAMIC = 0;    */
+VERSION {default { global : __trampoline_setup ;  } ;}
+SECTIONS
+{
+  PROVIDE (__executable_start = 0x00010000);
+  . = 0x00010000 + SIZEOF_HEADERS;
+  .pad : { pad.o(.text) }
+  .subprims ALIGN(0x1000)    :  
+  {
+   ppc-spjump.o(.text)
+   ppc-spentry.o(.text)   
+   ppc-subprims.o(.text)
+  }
+  /* Read-only sections, merged into text segment: */
+/*  . = 0x10000000; */
+  .interp     : { *(.interp) 	}
+  .hash          : { *(.hash)		}
+  .dynsym        : { *(.dynsym)		}
+  .dynstr        : { *(.dynstr)		}
+  .gnu.version   : { *(.gnu.version)	}
+  .gnu.version_d   : { *(.gnu.version_d)	}
+  .gnu.version_r   : { *(.gnu.version_r)	}
+  .rel.init      : { *(.rel.init)	}
+  .rela.init     : { *(.rela.init)	}
+  .rel.text      :
+    {
+      *(.rel.text)
+      *(.rel.text.*)
+      *(.rel.gnu.linkonce.t*)
+    }
+  .rela.text     :
+    {
+      *(.rela.text)
+      *(.rela.text.*)
+      *(.rela.gnu.linkonce.t*)
+    }
+  .rel.fini      : { *(.rel.fini)	}
+  .rela.fini     : { *(.rela.fini)	}
+  .rel.rodata    :
+    {
+      *(.rel.rodata)
+      *(.rel.rodata.*)
+      *(.rel.gnu.linkonce.r*)
+    }
+  .rela.rodata   :
+    {
+      *(.rela.rodata)
+      *(.rela.rodata.*)
+      *(.rela.gnu.linkonce.r*)
+    }
+  .rel.data      :
+    {
+      *(.rel.data)
+      *(.rel.data.*)
+      *(.rel.gnu.linkonce.d*)
+    }
+  .rela.data     :
+    {
+      *(.rela.data)
+      *(.rela.data.*)
+      *(.rela.gnu.linkonce.d*)
+    }
+  .rel.ctors     : { *(.rel.ctors)	}
+  .rela.ctors    : { *(.rela.ctors)	}
+  .rel.dtors     : { *(.rel.dtors)	}
+  .rela.dtors    : { *(.rela.dtors)	}
+  .rel.got       : { *(.rel.got)		}
+  .rela.got      : { *(.rela.got)		}
+  .rel.sdata     :
+    {
+      *(.rel.sdata)
+      *(.rel.sdata.*)
+      *(.rel.gnu.linkonce.s*)
+    }
+  .rela.sdata     :
+    {
+      *(.rela.sdata)
+      *(.rela.sdata.*)
+      *(.rela.gnu.linkonce.s*)
+    }
+  .rel.sbss      : { *(.rel.sbss)		}
+  .rela.sbss     : { *(.rela.sbss)	}
+  .rel.sdata2    : { *(.rel.sdata2)	}
+  .rela.sdata2   : { *(.rela.sdata2)	}
+  .rel.sbss2     : { *(.rel.sbss2)	}
+  .rela.sbss2    : { *(.rela.sbss2)	}
+  .rel.bss       : { *(.rel.bss)		}
+  .rela.bss      : { *(.rela.bss)		}
+  .rel.plt       : { *(.rel.plt)		}
+  .rela.plt      : { *(.rela.plt)		}
+  .init          : 
+  { 
+    KEEP (*(.init))
+  } =0
+  .text      :
+  {
+    *(.text)
+    *(.text.*)
+    *(.stub)
+    /* .gnu.warning sections are handled specially by elf32.em.  */
+    *(.gnu.warning)
+    *(.gnu.linkonce.t*)
+  } =0
+  .fini      :
+  {
+    KEEP (*(.fini))
+  } =0
+  PROVIDE (__etext = .);
+  PROVIDE (_etext = .);
+  PROVIDE (etext = .);
+  .rodata   : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r*) }
+  .rodata1   : { *(.rodata1) }
+  .sdata2   : { *(.sdata2) }
+  .sbss2   : { *(.sbss2) }
+  /* Adjust the address for the data segment.  We want to adjust up to
+     the same address within the page on the next page up.  */
+  . = ALIGN(0x10000) + (. & (0x10000 - 1));
+  /* Ensure the __preinit_array_start label is properly aligned.  We
+     could instead move the label definition inside the section, but
+     the linker would then create the section even if it turns out to
+     be empty, which isn't pretty.  */
+  . = ALIGN(32 / 8);
+  PROVIDE (__preinit_array_start = .);
+  .preinit_array     : { *(.preinit_array) }
+  PROVIDE (__preinit_array_end = .);
+  PROVIDE (__init_array_start = .);
+  .init_array     : { *(.init_array) }
+  PROVIDE (__init_array_end = .);
+  PROVIDE (__fini_array_start = .);
+  .fini_array     : { *(.fini_array) }
+  PROVIDE (__fini_array_end = .);
+  .data    :
+  {
+    *(.data)
+    *(.data.*)
+    *(.gnu.linkonce.d*)
+    SORT(CONSTRUCTORS)
+  }
+  .data1   : { *(.data1) }
+  .eh_frame : { *(.eh_frame) }
+  .gcc_except_table : { *(.gcc_except_table) }
+  .got1		: { *(.got1) }
+  .got2		: { *(.got2) }
+  .ctors   : 
+  {
+    /* gcc uses crtbegin.o to find the start of
+       the constructors, so we make sure it is
+       first.  Because this is a wildcard, it
+       doesn't matter if the user does not
+       actually link against crtbegin.o; the
+       linker won't look for a file to match a
+       wildcard.  The wildcard also means that it
+       doesn't matter which directory crtbegin.o
+       is in.  */
+    KEEP (*crtbegin.o(.ctors))
+    /* We don't want to include the .ctor section from
+       from the crtend.o file until after the sorted ctors.
+       The .ctor section from the crtend file contains the
+       end of ctors marker and it must be last */
+    KEEP (*(EXCLUDE_FILE (*crtend.o ) .ctors))
+    KEEP (*(SORT(.ctors.*)))
+    KEEP (*(.ctors))
+  }
+   .dtors         :
+  {
+    KEEP (*crtbegin.o(.dtors))
+    KEEP (*(EXCLUDE_FILE (*crtend.o ) .dtors))
+    KEEP (*(SORT(.dtors.*)))
+    KEEP (*(.dtors))
+  }
+  .got		  : { *(.got.plt) *(.got) }
+  .dynamic       : { *(.dynamic) }
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata     : 
+  {
+    PROVIDE (_SDA_BASE_ = .);
+    *(.sdata) 
+    *(.sdata.*)
+    *(.gnu.linkonce.s.*)
+  }
+  _edata = .;
+  PROVIDE (edata = .);
+  __bss_start = .;
+  .sbss      :
+  {
+    PROVIDE (__sbss_start = .);
+    PROVIDE (___sbss_start = .);
+    *(.dynsbss)
+    *(.sbss)
+    *(.sbss.*)
+    *(.scommon)
+    PROVIDE (__sbss_end = .);
+    PROVIDE (___sbss_end = .);
+  }
+  .plt      : { *(.plt)	}
+  .bss       :
+  {
+   *(.dynbss)
+   *(.bss)
+   *(.bss.*)
+   *(COMMON)
+   /* Align here to ensure that the .bss section occupies space up to
+      _end.  Align after .bss to ensure correct alignment even if the
+      .bss section disappears because there are no input sections.  */
+   . = ALIGN(32 / 8);
+  }
+  . = ALIGN(32 / 8);
+  _end = .;
+  PROVIDE (end = .);
+  /* Stabs debugging sections.  */
+  .stab 0 : { *(.stab) }
+  .stabstr 0 : { *(.stabstr) }
+  .stab.excl 0 : { *(.stab.excl) }
+  .stab.exclstr 0 : { *(.stab.exclstr) }
+  .stab.index 0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment 0 : { *(.comment) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /DISCARD/	: { *(.fixup) }
+  /* These must appear regardless of  .  */
+}
Index: /branches/arm/lisp-kernel/linuxppc64/.cvsignore
===================================================================
--- /branches/arm/lisp-kernel/linuxppc64/.cvsignore	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc64/.cvsignore	(revision 13357)
@@ -0,0 +1,1 @@
+*~.*
Index: /branches/arm/lisp-kernel/linuxppc64/Makefile
===================================================================
--- /branches/arm/lisp-kernel/linuxppc64/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc64/Makefile	(revision 13357)
@@ -0,0 +1,95 @@
+#
+#   Copyright (C) 1994-2001 Digitool, Inc
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ../
+RM = /bin/rm
+# Versions of GNU as >= 2.9.1 all seem to work
+# AS = gas-2.9.1
+AS = as
+M4 = m4
+ASFLAGS = -mregnames -mppc64 -a64 -maltivec
+M4FLAGS = -DLINUX -DPPC -DPPC64
+CDEFINES = -DLINUX -D_REENTRANT -DPPC -DPPC64 -D_GNU_SOURCE
+CDEBUG = -g
+COPT = -O2
+# word size issues are a little more relevant on a 64-bit platform
+# than elsewhere, but most gcc format warnings are still nonsense.
+WFORMAT = -Wno-format
+
+# If the linker supports a "--hash-style=" option, use traditional
+# Sysv hash tables.  (If it doesn't support that option, assume
+# that traditional hash tables will be used by default.)
+ld_has_hash_style = $(shell $(LD) --help | grep "hash-style=")
+ifeq ($(ld_has_hash_style),)
+HASH_STYLE=
+else
+HASH_STYLE="-Wl,--hash-style=sysv"
+endif
+
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m64 -o $@
+
+SPOBJ = pad.o ppc-spjump.o ppc-spentry.o ppc-subprims.o
+ASMOBJ = ppc-asmutils.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o ppc-gc.o bits.o  ppc-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o plbt.o ppc_print.o
+KERNELOBJ= $(COBJ) ppc-asmutils.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 ppc-constants.s ppc-macros.s errors.s ppc-uuo.s ppc-constants64.s
+
+CHEADERS = area.h bits.h ppc-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h ppc-constants64.h ppc-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ = $(SPOBJ)
+all:	../../ppccl64
+
+
+# No:
+
+# KSPOBJ=
+# all:	../../ppccl64 ../../subprims.so
+
+OSLIBS = -ldl -lm -lpthread
+
+
+../../ppccl64:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ)
+	$(CC) -m64 $(CDEBUG)  -Wl,--export-dynamic $(HASH_STYLE)  -o $@ -T ./elf64ppc.x $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../ppccl64
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../ppccl64
+	strip -g ../../ppccl64
Index: /branches/arm/lisp-kernel/linuxppc64/elf64ppc.x
===================================================================
--- /branches/arm/lisp-kernel/linuxppc64/elf64ppc.x	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxppc64/elf64ppc.x	(revision 13357)
@@ -0,0 +1,229 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc",
+	      "elf64-powerpc")
+OUTPUT_ARCH(powerpc:common64)
+ENTRY(_start)
+SEARCH_DIR("/usr/local/lib64"); SEARCH_DIR("/lib64"); SEARCH_DIR("/usr/lib64"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib");
+/* Do we need any of these for elf?
+   __DYNAMIC = 0;    */
+SECTIONS
+{
+  /* Read-only sections, merged into text segment: */
+  PROVIDE (__executable_start = 0x00010000); . = 0x00010000 + SIZEOF_HEADERS;
+  .pad : { pad.o(.text) }
+  .subprims ALIGN(0x1000)    :  
+  {
+   ppc-spjump.o(.text)
+   ppc-spentry.o(.text)
+   ppc-subprims.o(.text)
+  }
+  .interp         : { *(.interp) }
+  .hash           : { *(.hash) }
+  .dynsym         : { *(.dynsym) }
+  .dynstr         : { *(.dynstr) }
+  .gnu.version    : { *(.gnu.version) }
+  .gnu.version_d  : { *(.gnu.version_d) }
+  .gnu.version_r  : { *(.gnu.version_r) }
+  .rel.dyn        :
+    {
+      *(.rel.init)
+      *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+      *(.rel.fini)
+      *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+      *(.rel.data.rel.ro*)
+      *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+      *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+      *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+      *(.rel.ctors)
+      *(.rel.dtors)
+      *(.rel.got)
+      *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*)
+      *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*)
+      *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*)
+      *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*)
+      *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+    }
+  .rela.dyn       :
+    {
+      *(.rela.init)
+      *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+      *(.rela.fini)
+      *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+      *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+      *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+      *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+      *(.rela.ctors)
+      *(.rela.dtors)
+      *(.rela.got)
+      *(.rela.toc)
+      *(.rela.opd)
+      *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*)
+      *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*)
+      *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*)
+      *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*)
+      *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+    }
+  .rel.plt        : { *(.rel.plt) }
+  .rela.plt       : { *(.rela.plt) }
+  .rela.tocbss	  : { *(.rela.tocbss) }
+  .init           :
+  {
+    KEEP (*(.init))
+  } =0x60000000
+  .text           :
+  {
+    *(.text .stub .text.* .gnu.linkonce.t.*)
+    KEEP (*(.text.*personality*))
+    /* .gnu.warning sections are handled specially by elf32.em.  */
+    *(.gnu.warning)
+    *(.sfpr .glink)
+  } =0x60000000
+  .fini           :
+  {
+    KEEP (*(.fini))
+  } =0x60000000
+  PROVIDE (__etext = .);
+  PROVIDE (_etext = .);
+  PROVIDE (etext = .);
+  .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+  .rodata1        : { *(.rodata1) }
+  .sdata2         : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) }
+  .sbss2          : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
+  .eh_frame_hdr : { *(.eh_frame_hdr) }
+  .eh_frame       : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+  .gcc_except_table   : ONLY_IF_RO { KEEP (*(.gcc_except_table)) *(.gcc_except_table.*) }
+  /* Adjust the address for the data segment.  We want to adjust up to
+     the same address within the page on the next page up.  */
+  . = ALIGN (0x10000) - ((0x10000 - .) & (0x10000 - 1)); . = DATA_SEGMENT_ALIGN (0x10000, 0x1000);
+  /* Exception handling  */
+  .eh_frame       : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+  .gcc_except_table   : ONLY_IF_RW { KEEP (*(.gcc_except_table)) *(.gcc_except_table.*) }
+  /* Thread Local Storage sections  */
+  .tdata	  : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+  .tbss		  : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+  /* Ensure the __preinit_array_start label is properly aligned.  We
+     could instead move the label definition inside the section, but
+     the linker would then create the section even if it turns out to
+     be empty, which isn't pretty.  */
+  . = ALIGN(64 / 8);
+  PROVIDE (__preinit_array_start = .);
+  .preinit_array     : { KEEP (*(.preinit_array)) }
+  PROVIDE (__preinit_array_end = .);
+  PROVIDE (__init_array_start = .);
+  .init_array     : { KEEP (*(.init_array)) }
+  PROVIDE (__init_array_end = .);
+  PROVIDE (__fini_array_start = .);
+  .fini_array     : { KEEP (*(.fini_array)) }
+  PROVIDE (__fini_array_end = .);
+  .ctors          :
+  {
+    /* gcc uses crtbegin.o to find the start of
+       the constructors, so we make sure it is
+       first.  Because this is a wildcard, it
+       doesn't matter if the user does not
+       actually link against crtbegin.o; the
+       linker won't look for a file to match a
+       wildcard.  The wildcard also means that it
+       doesn't matter which directory crtbegin.o
+       is in.  */
+    KEEP (*crtbegin*.o(.ctors))
+    /* We don't want to include the .ctor section from
+       from the crtend.o file until after the sorted ctors.
+       The .ctor section from the crtend file contains the
+       end of ctors marker and it must be last */
+    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
+    KEEP (*(SORT(.ctors.*)))
+    KEEP (*(.ctors))
+  }
+  .dtors          :
+  {
+    KEEP (*crtbegin*.o(.dtors))
+    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
+    KEEP (*(SORT(.dtors.*)))
+    KEEP (*(.dtors))
+  }
+  .jcr            : { KEEP (*(.jcr)) }
+  .data.rel.ro : { *(.data.rel.ro.local) *(.data.rel.ro*) }
+  .dynamic        : { *(.dynamic) }
+/*  . = DATA_SEGMENT_RELRO_END (0, .); */
+  .data           :
+  {
+    *(.data .data.* .gnu.linkonce.d.*)
+    KEEP (*(.gnu.linkonce.d.*personality*))
+    SORT(CONSTRUCTORS)
+  }
+  .data1          : { *(.data1) }
+  .toc1		 ALIGN(8) : { *(.toc1) }
+  .opd		 ALIGN(8) : { KEEP (*(.opd)) }
+  .got		ALIGN(8) : { *(.got .toc) }
+  /* We want the small data sections together, so single-instruction offsets
+     can access them all, and initialized data all before uninitialized, so
+     we can shorten the on-disk segment size.  */
+  .sdata          :
+  {
+    *(.sdata .sdata.* .gnu.linkonce.s.*)
+  }
+  _edata = .;
+  PROVIDE (edata = .);
+  __bss_start = .;
+  .tocbss	 ALIGN(8) : { *(.tocbss)}
+  .sbss           :
+  {
+    PROVIDE (__sbss_start = .);
+    PROVIDE (___sbss_start = .);
+    *(.dynsbss)
+    *(.sbss .sbss.* .gnu.linkonce.sb.*)
+    *(.scommon)
+    PROVIDE (__sbss_end = .);
+    PROVIDE (___sbss_end = .);
+  }
+  .plt            : { *(.plt) }
+  .bss            :
+  {
+   *(.dynbss)
+   *(.bss .bss.* .gnu.linkonce.b.*)
+   *(COMMON)
+   /* Align here to ensure that the .bss section occupies space up to
+      _end.  Align after .bss to ensure correct alignment even if the
+      .bss section disappears because there are no input sections.  */
+   . = ALIGN(64 / 8);
+  }
+  . = ALIGN(64 / 8);
+  _end = .;
+  PROVIDE (end = .);
+  . = DATA_SEGMENT_END (.);
+  /* Stabs debugging sections.  */
+  .stab          0 : { *(.stab) }
+  .stabstr       0 : { *(.stabstr) }
+  .stab.excl     0 : { *(.stab.excl) }
+  .stab.exclstr  0 : { *(.stab.exclstr) }
+  .stab.index    0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment       0 : { *(.comment) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /DISCARD/ : { *(.note.GNU-stack) }
+}
Index: /branches/arm/lisp-kernel/linuxx8632/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/linuxx8632/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxx8632/.gdbinit	(revision 13357)
@@ -0,0 +1,46 @@
+define pl
+  call print_lisp_object($arg0)
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x3001
+   set $car = *((LispObj *)($l+3))
+   set $l =  *((LispObj *)($l-1))
+   pl $car
+  end
+end
+
+
+define fn
+  pl $edi
+end
+
+define arg_y
+ pl $esi
+end
+
+define arg_z
+ pl $ebx
+end
+
+define offset
+ p (int)$pc-$edi
+end
+
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIG40 pass nostop noprint
+handle SIG41 pass nostop noprint
+handle SIG42 pass nostop noprint
+handle SIGPWR pass nostop noprint
+handle SIGQUIT pass nostop noprint
+
Index: /branches/arm/lisp-kernel/linuxx8632/Makefile
===================================================================
--- /branches/arm/lisp-kernel/linuxx8632/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxx8632/Makefile	(revision 13357)
@@ -0,0 +1,88 @@
+#
+#   Copyright (C) 2008 Clozure Associates and contributors
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ../
+RM = /bin/rm
+AS = as
+M4 = m4
+ASFLAGS = --32
+M4FLAGS = -DLINUX -DX86 -DX8632 -DHAVE_TLS
+CDEFINES = -DLINUX -D_REENTRANT -DX86 -DX8632 -D_GNU_SOURCE -DHAVE_TLS -DUSE_FUTEX #-DGC_INTEGRITY_CHECKING -DDISABLE_EGC
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+# If the linker supports a "--hash-style=" option, use traditional
+# SysV hash tables.  (If it doesn't support that option, assume
+# that traditional hash tables will be used by default.)
+ld_has_hash_style = $(shell $(LD) --help | grep "hash-style=")
+ifeq ($(ld_has_hash_style),)
+HASH_STYLE=
+else
+HASH_STYLE="-Wl,--hash-style=sysv"
+endif
+
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m32 -o $@
+
+SPOBJ = pad.o x86-spjump32.o x86-spentry32.o x86-subprims32.o
+ASMOBJ = x86-asmutils32.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils32.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants32.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants32.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../lx86cl
+
+
+OSLIBS = -ldl -lm -lpthread
+LINK_SCRIPT = # ./elf_x86_32.x
+USE_LINK_SCRIPT = # -T $(LINK_SCRIPT)
+
+../../lx86cl:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile  $(LINK_SCRIPT)
+	$(CC)  -m32 $(CDEBUG)  -Wl,--export-dynamic $(HASH_STYLE) -o $@ $(USE_LINK_SCRIPT) $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../lx86cl
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../lx86cl
+	strip -g ../../lx86cl
Index: /branches/arm/lisp-kernel/linuxx8664/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/linuxx8664/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxx8664/.gdbinit	(revision 13357)
@@ -0,0 +1,83 @@
+define x86_lisp_string
+x/s $arg0-5
+end
+
+define gtra
+br *$r10
+cont
+end
+
+define x86pname
+set $temp=*((long *)((long)($arg0-6)))
+x86_lisp_string $temp
+end
+
+
+define pname
+ x86pname $arg0
+end
+
+define l
+ call print_lisp_object($arg0)
+end
+
+define lw
+ l $r13
+end
+
+define clobber_breakpoint
+  set *(short *)($pc-2)=0x9090
+end
+
+define arg_z
+ l $rsi
+end
+
+define arg_y
+ l $rdi
+end
+
+define arg_x
+ l $r8
+end
+
+define bx
+ l $rbx
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x200b
+   set $car = *((LispObj *)($l+5))
+   set $l =  *((LispObj *)($l-3))
+   l $car
+  end
+end
+
+define lbt
+ call plbt_sp($rbp)
+end
+
+define ada
+ p/x *(all_areas->succ)
+end
+
+define lregs
+ call debug_lisp_registers($arg0,0,0)
+end
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIG40 pass nostop noprint
+handle SIG41 pass nostop noprint
+handle SIG42 pass nostop noprint
+handle SIGPWR pass nostop noprint
+handle SIGQUIT pass nostop noprint
+
Index: /branches/arm/lisp-kernel/linuxx8664/Makefile
===================================================================
--- /branches/arm/lisp-kernel/linuxx8664/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxx8664/Makefile	(revision 13357)
@@ -0,0 +1,88 @@
+#
+#   Copyright (C) 2005 Clozure Associates
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ../
+RM = /bin/rm
+AS = as
+M4 = m4
+ASFLAGS = --64
+M4FLAGS = -DLINUX -DX86 -DX8664 -DHAVE_TLS
+CDEFINES = -DLINUX -D_REENTRANT -DX86 -DX8664 -D_GNU_SOURCE -DHAVE_TLS -DUSE_FUTEX #-DDISABLE_EGC
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+# If the linker supports a "--hash-style=" option, use traditional
+# SysV hash tables.  (If it doesn't support that option, assume
+# that traditional hash tables will be used by default.)
+ld_has_hash_style = $(shell $(LD) --help | grep "hash-style=")
+ifeq ($(ld_has_hash_style),)
+HASH_STYLE=
+else
+HASH_STYLE="-Wl,--hash-style=sysv"
+endif
+
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m64 -o $@
+
+SPOBJ = pad.o x86-spjump64.o x86-spentry64.o x86-subprims64.o
+ASMOBJ = x86-asmutils64.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils64.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants64.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants64.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../lx86cl64
+
+
+OSLIBS = -ldl -lm -lpthread
+LINK_MAP = ./elf_x86_64.x
+USE_LINK_MAP = # -T ./elf_x86_64.x
+
+../../lx86cl64:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile  $(LINK_MAP)
+	$(CC)  -m64 $(CDEBUG)  -Wl,--export-dynamic $(HASH_STYLE) -o $@ $(USE_LINK_MAP) $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../lx86cl64
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../lx86cl64
+	strip -g ../../lx86cl64
Index: /branches/arm/lisp-kernel/linuxx8664/elf_x86_64.x
===================================================================
--- /branches/arm/lisp-kernel/linuxx8664/elf_x86_64.x	(revision 13357)
+++ /branches/arm/lisp-kernel/linuxx8664/elf_x86_64.x	(revision 13357)
@@ -0,0 +1,196 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64",
+	      "elf64-x86-64")
+OUTPUT_ARCH(i386:x86-64)
+ENTRY(_start)
+SEARCH_DIR("/usr/x86_64-linux-gnu/lib64"); SEARCH_DIR("/usr/local/lib64"); SEARCH_DIR("/lib64"); SEARCH_DIR("/usr/lib64"); SEARCH_DIR("/usr/x86_64-linux-gnu/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib");
+/* Do we need any of these for elf?
+   __DYNAMIC = 0;    */
+SECTIONS
+{
+  /* Read-only sections, merged into text segment: */
+  PROVIDE (__executable_start = 0x400000); . = 0x400000 + SIZEOF_HEADERS;
+  .interp         : { *(.interp) }
+  .hash           : { *(.hash) }
+  .dynsym         : { *(.dynsym) }
+  .dynstr         : { *(.dynstr) }
+  .gnu.version    : { *(.gnu.version) }
+  .gnu.version_d  : { *(.gnu.version_d) }
+  .gnu.version_r  : { *(.gnu.version_r) }
+  .rel.dyn        :
+    {
+      *(.rel.init)
+      *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+      *(.rel.fini)
+      *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+      *(.rel.data.rel.ro*)
+      *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+      *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+      *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+      *(.rel.ctors)
+      *(.rel.dtors)
+      *(.rel.got)
+      *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+    }
+  .rela.dyn       :
+    {
+      *(.rela.init)
+      *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+      *(.rela.fini)
+      *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+      *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+      *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+      *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+      *(.rela.ctors)
+      *(.rela.dtors)
+      *(.rela.got)
+      *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+    }
+  .rel.plt        : { *(.rel.plt) }
+  .rela.plt       : { *(.rela.plt) }
+  .init           :
+  {
+    KEEP (*(.init))
+  } =0x90909090
+  .plt            : { *(.plt) }
+  .subprims 0x410000:
+  {
+    x86-spjump64.o(.text)
+    x86-spentry64.o(.text)
+    x86-subprims64.o(.text)
+  }
+  .text           :
+  {
+    *(.text .stub .text.* .gnu.linkonce.t.*)
+    KEEP (*(.text.*personality*))
+    /* .gnu.warning sections are handled specially by elf32.em.  */
+    *(.gnu.warning)
+  } =0x90909090
+  .fini           :
+  {
+    KEEP (*(.fini))
+  } =0x90909090
+  PROVIDE (__etext = .);
+  PROVIDE (_etext = .);
+  PROVIDE (etext = .);
+  .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+  .rodata1        : { *(.rodata1) }
+  .eh_frame_hdr : { *(.eh_frame_hdr) }
+  .eh_frame       : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+  .gcc_except_table   : ONLY_IF_RO { KEEP (*(.gcc_except_table)) *(.gcc_except_table.*) }
+  /* Adjust the address for the data segment.  We want to adjust up to
+     the same address within the page on the next page up.  */
+  . = ALIGN (0x100000) - ((0x100000 - .) & (0x100000 - 1)); . = DATA_SEGMENT_ALIGN (0x100000, 0x1000);
+  /* Exception handling  */
+  .eh_frame       : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+  .gcc_except_table   : ONLY_IF_RW { KEEP (*(.gcc_except_table)) *(.gcc_except_table.*) }
+  /* Thread Local Storage sections  */
+  .tdata	  : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+  .tbss		  : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+  /* Ensure the __preinit_array_start label is properly aligned.  We
+     could instead move the label definition inside the section, but
+     the linker would then create the section even if it turns out to
+     be empty, which isn't pretty.  */
+  . = ALIGN(64 / 8);
+  PROVIDE (__preinit_array_start = .);
+  .preinit_array     : { KEEP (*(.preinit_array)) }
+  PROVIDE (__preinit_array_end = .);
+  PROVIDE (__init_array_start = .);
+  .init_array     : { KEEP (*(.init_array)) }
+  PROVIDE (__init_array_end = .);
+  PROVIDE (__fini_array_start = .);
+  .fini_array     : { KEEP (*(.fini_array)) }
+  PROVIDE (__fini_array_end = .);
+  .ctors          :
+  {
+    /* gcc uses crtbegin.o to find the start of
+       the constructors, so we make sure it is
+       first.  Because this is a wildcard, it
+       doesn't matter if the user does not
+       actually link against crtbegin.o; the
+       linker won't look for a file to match a
+       wildcard.  The wildcard also means that it
+       doesn't matter which directory crtbegin.o
+       is in.  */
+    KEEP (*crtbegin*.o(.ctors))
+    /* We don't want to include the .ctor section from
+       from the crtend.o file until after the sorted ctors.
+       The .ctor section from the crtend file contains the
+       end of ctors marker and it must be last */
+    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
+    KEEP (*(SORT(.ctors.*)))
+    KEEP (*(.ctors))
+  }
+  .dtors          :
+  {
+    KEEP (*crtbegin*.o(.dtors))
+    KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
+    KEEP (*(SORT(.dtors.*)))
+    KEEP (*(.dtors))
+  }
+  .jcr            : { KEEP (*(.jcr)) }
+  .data.rel.ro : { *(.data.rel.ro.local) *(.data.rel.ro*) }
+  .dynamic        : { *(.dynamic) }
+  .got            : { *(.got) }
+  . = DATA_SEGMENT_RELRO_END (24, .);
+  .got.plt        : { *(.got.plt) }
+  .data           :
+  {
+    *(.data .data.* .gnu.linkonce.d.*)
+    KEEP (*(.gnu.linkonce.d.*personality*))
+    SORT(CONSTRUCTORS)
+  }
+  .data1          : { *(.data1) }
+  _edata = .;
+  PROVIDE (edata = .);
+  __bss_start = .;
+  .bss            :
+  {
+   *(.dynbss)
+   *(.bss .bss.* .gnu.linkonce.b.*)
+   *(COMMON)
+   /* Align here to ensure that the .bss section occupies space up to
+      _end.  Align after .bss to ensure correct alignment even if the
+      .bss section disappears because there are no input sections.  */
+   . = ALIGN(64 / 8);
+  }
+  . = ALIGN(64 / 8);
+  _end = .;
+  PROVIDE (end = .);
+  . = DATA_SEGMENT_END (.);
+  /* Stabs debugging sections.  */
+  .stab          0 : { *(.stab) }
+  .stabstr       0 : { *(.stabstr) }
+  .stab.excl     0 : { *(.stab.excl) }
+  .stab.exclstr  0 : { *(.stab.exclstr) }
+  .stab.index    0 : { *(.stab.index) }
+  .stab.indexstr 0 : { *(.stab.indexstr) }
+  .comment       0 : { *(.comment) }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section so we begin them at 0.  */
+  /* DWARF 1 */
+  .debug          0 : { *(.debug) }
+  .line           0 : { *(.line) }
+  /* GNU DWARF 1 extensions */
+  .debug_srcinfo  0 : { *(.debug_srcinfo) }
+  .debug_sfnames  0 : { *(.debug_sfnames) }
+  /* DWARF 1.1 and DWARF 2 */
+  .debug_aranges  0 : { *(.debug_aranges) }
+  .debug_pubnames 0 : { *(.debug_pubnames) }
+  /* DWARF 2 */
+  .debug_info     0 : { *(.debug_info .gnu.linkonce.wi.*) }
+  .debug_abbrev   0 : { *(.debug_abbrev) }
+  .debug_line     0 : { *(.debug_line) }
+  .debug_frame    0 : { *(.debug_frame) }
+  .debug_str      0 : { *(.debug_str) }
+  .debug_loc      0 : { *(.debug_loc) }
+  .debug_macinfo  0 : { *(.debug_macinfo) }
+  /* SGI/MIPS DWARF 2 extensions */
+  .debug_weaknames 0 : { *(.debug_weaknames) }
+  .debug_funcnames 0 : { *(.debug_funcnames) }
+  .debug_typenames 0 : { *(.debug_typenames) }
+  .debug_varnames  0 : { *(.debug_varnames) }
+  /DISCARD/ : { *(.note.GNU-stack) }
+}
+
Index: /branches/arm/lisp-kernel/lisp-debug.c
===================================================================
--- /branches/arm/lisp-kernel/lisp-debug.c	(revision 13357)
+++ /branches/arm/lisp-kernel/lisp-debug.c	(revision 13357)
@@ -0,0 +1,1256 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+#include "area.h"
+#include "Threads.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+
+#ifdef WINDOWS
+#include <fcntl.h>
+#else
+#include <sys/socket.h>
+#include <dlfcn.h>
+#endif
+#include <sys/stat.h>
+
+FILE *dbgout = NULL;
+
+typedef enum {
+  debug_continue,		/* stay in the repl */
+  debug_exit_success,		/* return 0 from lisp_Debugger */
+  debug_exit_fail,		/* return non-zero from lisp_Debugger */
+  debug_kill
+} debug_command_return;
+
+
+Boolean
+open_debug_output(int fd)
+{
+  FILE *f = fdopen(fd, "w");
+  
+  if (f) {
+    if (setvbuf(f, NULL, _IONBF, 0) == 0) {
+#ifdef WINDOWS
+      if (fileno(stdin) < 0) {
+        stdin->_file = 0;
+      }
+#endif
+      dbgout = f;
+      return true;
+    }
+    fclose(f);
+  }
+  return false;
+}
+
+
+typedef debug_command_return (*debug_command) (ExceptionInformation *,
+					       siginfo_t *,
+					       int);
+
+#define DEBUG_COMMAND_FLAG_REQUIRE_XP 1 /* function  */
+#define DEBUG_COMMAND_FLAG_AUX_REGNO  (2 | DEBUG_COMMAND_FLAG_REQUIRE_XP)
+#define DEBUG_COMMAND_FLAG_AUX_SPR (4 | DEBUG_COMMAND_FLAG_REQUIRE_XP)
+#define DEBUG_COMMAND_REG_FLAGS 7
+#define DEBUG_COMMAND_FLAG_EXCEPTION_ENTRY_ONLY 8
+#define DEBUG_COMMAND_FLAG_EXCEPTION_REASON_ARG 16
+
+typedef struct {
+  debug_command f;
+  char *help_text;
+  unsigned flags;
+  char *aux_prompt;
+  int c;
+} debug_command_entry;
+
+
+extern
+debug_command_entry debug_command_entries[];
+
+Boolean lisp_debugger_in_foreign_code = false;
+
+#ifndef WINDOWS
+Boolean
+stdin_is_dev_null()
+{
+  struct stat fd0stat, devnullstat;
+
+  if (fstat(fileno(stdin),&fd0stat)) {
+    return true;
+  }
+  if (stat("/dev/null",&devnullstat)) {
+    return true;
+  }
+  return ((fd0stat.st_ino == devnullstat.st_ino) &&
+          (fd0stat.st_dev == devnullstat.st_dev));
+}
+#endif
+
+#ifdef WINDOWS
+Boolean
+stdin_is_dev_null()
+{
+  HANDLE stdIn;
+  stdIn = GetStdHandle(STD_INPUT_HANDLE);
+  return (stdIn == NULL);
+}
+#endif
+
+
+
+
+char *
+foreign_name_and_offset(natural addr, int *delta)
+{
+#ifndef WINDOWS
+  Dl_info info;
+#endif
+  char *ret = NULL;
+
+  if (delta) {
+    *delta = 0;
+  }
+#ifndef WINDOWS
+  if (dladdr((void *)addr, &info)) {
+    ret = (char *)info.dli_sname;
+    if (delta) {
+      *delta = ((natural)addr - (natural)info.dli_saddr);
+    }
+  }
+#endif
+  return ret;
+}
+
+
+#if defined(LINUX) || defined(SOLARIS)
+#define fpurge __fpurge
+#endif
+
+#ifdef WINDOWS
+void
+fpurge (FILE* file)
+{
+}
+#endif
+
+int
+readc()
+{
+  unsigned tries = 1000;
+  int c;
+
+  while (tries) {
+    c = getchar();
+    switch(c) {
+    case '\n':
+      continue;
+    case '\r':
+      continue;
+    case EOF:
+      if (ferror(stdin)) {
+	if ((errno == EINTR) || (errno == EIO)) {
+	  clearerr(stdin);
+	  tries--;
+	  continue;
+	}
+      }
+      /* fall through */
+    default:
+      return c;
+    }
+  }
+  return EOF;
+}
+
+#ifdef X8664
+#ifdef LINUX
+char* Iregnames[] = {"r8 ","r9 ","r10","r11","r12","r13","r14","r15",
+		     "rdi","rsi","rbp", "rbx", "rdx", "rax", "rcx","rsp"};
+#endif
+#ifdef SOLARIS
+char* Iregnames[] = {"r15 ","r14 ","r13","r12","r11","r10","r9 ","r8 ",
+		     "rdi","rsi","rbp", "rbx", "rdx", "rcx", "rcx","rsp"};
+#endif
+#ifdef FREEBSD
+char* Iregnames[] = {"???", "rdi", "rsi", "rdx", "rcx", "r8 ", "r9 ", "rax",
+                     "rbx", "rbp", "r10", "r11", "r12", "r13", "r14", "r15",
+                     "???", "???", "???", "???", "???", "???", "???", "rsp"};
+#endif
+#ifdef DARWIN
+char* Iregnames[] = {"rax", "rbx", "rcx", "rdx", "rdi", "rsi",
+                     "rbp", "rsp", "r8 ", "r9 ", "r10", "r11", "r12", "r13",
+                     "r14", "r15", "rip", "rfl"};
+#endif
+#ifdef WINDOWS
+char* Iregnames[] = {"rax ","rcx ","rdx","rbx","rsp","rrbp","rsi","rdi",
+		     "r8","r9","r10", "r11", "r12", "r13", "r14","r15"};
+#endif
+#endif
+
+#ifdef X8632
+#ifdef DARWIN
+char *Iregnames[] = {"eax", "ebx", "ecx", "edx", "edi", "esi",
+		     "ebp", "???", "efl", "eip"};
+#endif
+#ifdef LINUX
+char *Iregnames[] = {"???", "???", "???", "???",
+                     "edi", "esi", "ebp", "esp",
+                     "ebx", "edx", "ecx", "eax",
+                     "???", "???", "eip", "???", "efl"};
+#endif
+#ifdef WINDOWS
+char *Iregnames[] = {"edi", "esi", "ebx", "edx", "ecx", "eax",
+                     "ebp", "eip", "???", "efl", "esp"};
+#endif
+#ifdef FREEBSD
+char *Iregnames[] = {"???", "???", "???", "???", "???"
+                     "edi", "esi", "ebp", "ebx", "edx", 
+		     "ecx", "eax", "???", "???", "eip",
+		     "???", "efl", "esp"};
+#endif
+#ifdef SOLARIS
+char *Iregnames[] = {"???", "???", "???", "???", "???",
+                     "edi", "esi", "ebp", "???", "ebx",
+                     "edx", "ecx", "eax", "???", "???",
+                     "eip", "???", "efl", "esp"};
+#endif
+#endif
+
+#ifdef X8632
+int bit_for_regnum(int r)
+{
+  switch (r) {
+  case REG_EAX: return 1<<0;
+  case REG_ECX: return 1<<1;
+  case REG_EDX: return 1<<2;
+  case REG_EBX: return 1<<3;
+  case REG_ESP: return 1<<4;
+  case REG_EBP: return 1<<5;
+  case REG_ESI: return 1<<6;
+  case REG_EDI: return 1<<7;
+  }
+}
+#endif
+
+void
+show_lisp_register(ExceptionInformation *xp, char *label, int r)
+{
+
+  extern char* print_lisp_object(LispObj);
+
+  LispObj val = xpGPR(xp, r);
+
+#ifdef PPC
+  fprintf(dbgout, "r%02d (%s) = %s\n", r, label, print_lisp_object(val));
+#endif
+#ifdef X8664
+  fprintf(dbgout, "%%%s (%s) = %s\n",Iregnames[r], label, print_lisp_object(val));
+#endif
+#ifdef X8632
+  {
+    TCR *tcr = get_tcr(false);
+    char *s;
+
+    if (r == REG_EDX && (xpGPR(xp, REG_EFL) & EFL_DF))
+      s = "marked as unboxed (DF set)";
+    else if (tcr && (tcr->node_regs_mask & bit_for_regnum(r)) == 0)
+      s = "marked as unboxed (node_regs_mask)";
+    else
+      s = print_lisp_object(val);
+
+    fprintf(dbgout, "%%%s (%s) = %s\n", Iregnames[r], label, s);
+  }
+#endif
+
+}
+
+
+void
+describe_memfault(ExceptionInformation *xp, siginfo_t *info)
+{
+#ifdef PPC
+  void *addr = (void *)xpDAR(xp);
+  natural dsisr = xpDSISR(xp);
+
+  fprintf(dbgout, "%s operation to %s address 0x%lx\n",
+	  dsisr & (1<<25) ? "Write" : "Read",
+	  dsisr & (1<<27) ? "protected" : "unmapped",
+	  addr);
+#endif
+}
+
+#ifdef PPC
+void
+describe_ppc_illegal(ExceptionInformation *xp)
+{
+  pc where = xpPC(xp);
+  opcode the_uuo = *where;
+  Boolean described = false;
+
+  if (IS_UUO(the_uuo)) {
+    unsigned 
+      minor = UUO_MINOR(the_uuo),
+      errnum = 0x3ff & (the_uuo >> 16);
+
+    switch(minor) {
+    case UUO_INTERR:
+      switch (errnum) {
+      case error_udf_call:
+        fprintf(dbgout, "ERROR: undefined function call: %s\n",
+                print_lisp_object(xpGPR(xp,fname)));
+        described = true;
+        break;
+        
+      default:
+        fprintf(dbgout, "ERROR: lisp error %d\n", errnum);
+        described = true;
+        break;
+      }
+      break;
+      
+    default:
+      break;
+    }
+  }
+  if (!described) {
+    fprintf(dbgout, "Illegal instruction (0x%08x) at 0x%lx\n",
+            the_uuo, where);
+  }
+}
+#endif
+
+#ifdef PPC
+void
+describe_ppc_trap(ExceptionInformation *xp)
+{
+  pc where = xpPC(xp);
+  opcode the_trap = *where, instr;
+  int err_arg2, ra, rs;
+  Boolean identified = false;
+
+  if ((the_trap & OP_MASK) == OP(major_opcode_TRI)) {
+    /* TWI/TDI.  If the RA field is "nargs", that means that the
+       instruction is either a number-of-args check or an
+       event-poll.  Otherwise, the trap is some sort of
+       typecheck. */
+
+    if (RA_field(the_trap) == nargs) {
+      switch (TO_field(the_trap)) {
+      case TO_NE:
+	if (xpGPR(xp, nargs) < D_field(the_trap)) {
+	  fprintf(dbgout, "Too few arguments (no opt/rest)\n");
+	} else {
+	  fprintf(dbgout, "Too many arguments (no opt/rest)\n");
+	}
+	identified = true;
+	break;
+	
+      case TO_GT:
+	fprintf(dbgout, "Event poll !\n");
+	identified = true;
+	break;
+	
+      case TO_HI:
+	fprintf(dbgout, "Too many arguments (with opt)\n");
+	identified = true;
+	break;
+	
+      case TO_LT:
+	fprintf(dbgout, "Too few arguments (with opt/rest/key)\n");
+	identified = true;
+	break;
+	
+      default:                /* some weird trap, not ours. */
+	identified = false;
+	break;
+      }
+    } else {
+      /* A type or boundp trap of some sort. */
+      switch (TO_field(the_trap)) {
+      case TO_EQ:
+	/* Boundp traps are of the form:
+	   treqi rX,unbound
+	   where some preceding instruction is of the form:
+	   lwz/ld rX,symbol.value(rY).
+	   The error message should try to say that rY is unbound. */
+	
+	if (D_field(the_trap) == unbound) {
+#ifdef PPC64
+	  instr = scan_for_instr(LD_instruction(RA_field(the_trap),
+                                                unmasked_register,
+                                                offsetof(lispsymbol,vcell)-fulltag_misc),
+				 D_RT_IMM_MASK,
+				 where);
+#else
+	  instr = scan_for_instr(LWZ_instruction(RA_field(the_trap),
+						 unmasked_register,
+						 offsetof(lispsymbol,vcell)-fulltag_misc),
+				 D_RT_IMM_MASK,
+				 where);
+#endif
+	  if (instr) {
+	    ra = RA_field(instr);
+	    if (lisp_reg_p(ra)) {
+	      fprintf(dbgout, "Unbound variable: %s\n",
+		      print_lisp_object(xpGPR(xp,ra)));
+	      identified = true;	
+	    }
+	  }
+	}
+	break;
+	
+      case TO_NE:
+	/* A type check.  If the type (the immediate field of the trap
+	   instruction) is a header type, an "lbz
+	   rX,misc_header_offset(rY)" should precede it, in which case
+	   we say that "rY is not of header type <type>."  If the type
+	   is not a header type, then rX should have been set by a
+	   preceding "clrlwi rX,rY,29/30".  In that case, scan
+	   backwards for an RLWINM instruction that set rX and report
+	   that rY isn't of the indicated type. */
+	err_arg2 = D_field(the_trap);
+	if (nodeheader_tag_p(err_arg2) ||
+	    immheader_tag_p(err_arg2)) {
+	  instr = scan_for_instr(LBZ_instruction(RA_field(the_trap),
+						 unmasked_register,
+						 misc_subtag_offset),
+				 D_RT_IMM_MASK,
+				 where);
+	  if (instr) {
+	    ra = RA_field(instr);
+	    if (lisp_reg_p(ra)) {
+	      fprintf(dbgout, "value 0x%lX is not of the expected header type 0x%02X\n", xpGPR(xp, ra), err_arg2);
+	      identified = true;
+	    }
+	  }
+	} else {		
+	  /* Not a header type, look for rlwinm whose RA field matches the_trap's */
+	  instr = scan_for_instr((OP(major_opcode_RLWINM) | (the_trap & RA_MASK)),
+				 (OP_MASK | RA_MASK),
+				 where);
+	  if (instr) {
+	    rs = RS_field(instr);
+	    if (lisp_reg_p(rs)) {
+	      fprintf(dbgout, "value 0x%lX is not of the expected type 0x%02X\n",
+		      xpGPR(xp, rs), err_arg2);
+	      identified = true;
+	    }
+	  }
+	}
+	break;
+      }
+    }
+  } else {
+    /* a "TW <to>,ra,rb" instruction."
+       twltu sp,rN is stack-overflow on SP.
+       twgeu rX,rY is subscript out-of-bounds, which was preceded
+       by an "lwz rM,misc_header_offset(rN)" instruction.
+       rM may or may not be the same as rY, but no other header
+       would have been loaded before the trap. */
+    switch (TO_field(the_trap)) {
+    case TO_LO:
+      if (RA_field(the_trap) == sp) {
+	fprintf(dbgout, "Stack overflow! Run away! Run away!\n");
+	identified = true;
+      }
+      break;
+      
+    case (TO_HI|TO_EQ):
+      instr = scan_for_instr(OP(major_opcode_LWZ) | (D_MASK & misc_header_offset),
+			     (OP_MASK | D_MASK),
+			     where);
+      if (instr) {
+	ra = RA_field(instr);
+	if (lisp_reg_p(ra)) {
+	  fprintf(dbgout, "Bad index %d for vector %lX length %d\n",
+		  unbox_fixnum(xpGPR(xp, RA_field(the_trap))),
+		  xpGPR(xp, ra),
+		  unbox_fixnum(xpGPR(xp, RB_field(the_trap))));
+	  identified = true;
+	}
+      }
+      break;
+    }
+  }
+
+  if (!identified) {
+    fprintf(dbgout, "Unknown trap: 0x%08x\n", the_trap);
+  }
+
+
+}
+#endif
+
+debug_command_return
+debug_lisp_registers(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  if (lisp_debugger_in_foreign_code == false) {
+#ifdef PPC
+    TCR *xpcontext = (TCR *)ptr_from_lispobj(xpGPR(xp, rcontext));
+
+    fprintf(dbgout, "rcontext = 0x%lX ", xpcontext);
+    if (!active_tcr_p(xpcontext)) {
+      fprintf(dbgout, "(INVALID)\n");
+    } else {
+      fprintf(dbgout, "\nnargs = %d\n", xpGPR(xp, nargs) >> fixnumshift);
+      show_lisp_register(xp, "fn", fn);
+      show_lisp_register(xp, "arg_z", arg_z);
+      show_lisp_register(xp, "arg_y", arg_y);
+      show_lisp_register(xp, "arg_x", arg_x);
+      show_lisp_register(xp, "temp0", temp0);
+      show_lisp_register(xp, "temp1/next_method_context", temp1);
+      show_lisp_register(xp, "temp2/nfn", temp2);
+      show_lisp_register(xp, "temp3/fname", temp3);
+      /*    show_lisp_register(xp, "new_fn", new_fn); */
+      show_lisp_register(xp, "save0", save0);
+      show_lisp_register(xp, "save1", save1);
+      show_lisp_register(xp, "save2", save2);
+      show_lisp_register(xp, "save3", save3);
+      show_lisp_register(xp, "save4", save4);
+      show_lisp_register(xp, "save5", save5);
+      show_lisp_register(xp, "save6", save6);
+      show_lisp_register(xp, "save7", save7);
+    }
+#endif
+#ifdef X8664
+
+    show_lisp_register(xp, "arg_z", Iarg_z);
+    show_lisp_register(xp, "arg_y", Iarg_y);
+    show_lisp_register(xp, "arg_x", Iarg_x);
+    fprintf(dbgout,"------\n");
+    show_lisp_register(xp, "fn", Ifn);
+    fprintf(dbgout,"------\n");
+    show_lisp_register(xp, "save0", Isave0);
+    show_lisp_register(xp, "save1", Isave1);
+    show_lisp_register(xp, "save2", Isave2);
+    show_lisp_register(xp, "save3", Isave3);
+    fprintf(dbgout,"------\n");
+    show_lisp_register(xp, "temp0", Itemp0);
+    show_lisp_register(xp, "temp1", Itemp1);
+    show_lisp_register(xp, "temp2", Itemp2);
+    fprintf(dbgout,"------\n");
+    if (tag_of(xpGPR(xp,Inargs)) == tag_fixnum) {
+      fprintf(dbgout,"%%rcx (nargs) = %ld (maybe)\n", unbox_fixnum(xpGPR(xp,Inargs)&0xffff));
+    }
+#endif
+
+#ifdef X8632
+  show_lisp_register(xp, "arg_z", Iarg_z);
+  show_lisp_register(xp, "arg_y", Iarg_y);
+  fprintf(dbgout,"------\n");
+  show_lisp_register(xp, "fn", Ifn);
+  fprintf(dbgout,"------\n");
+  show_lisp_register(xp, "temp0", Itemp0);
+  show_lisp_register(xp, "temp1", Itemp1);
+  fprintf(dbgout,"------\n");
+  if (tag_of(xpGPR(xp,Inargs)) == tag_fixnum) {
+    fprintf(dbgout,"%%edx (nargs) = %d (maybe)\n", unbox_fixnum(xpGPR(xp,Inargs)));
+  }
+#endif
+  }
+  
+  return debug_continue;
+}
+
+#ifdef PPC
+debug_command_return
+debug_advance_pc(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  adjust_exception_pc(xp,4);
+  return debug_continue;
+}
+#endif
+
+debug_command_return
+debug_identify_exception(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+#ifdef PPC
+  pc program_counter = xpPC(xp);
+  opcode instruction = 0;
+
+  switch (arg) {
+  case SIGILL:
+  case SIGTRAP:
+    instruction = *program_counter;
+    if (major_opcode_p(instruction, major_opcode_TRI) ||
+	X_opcode_p(instruction,major_opcode_X31,minor_opcode_TR)) {
+      describe_ppc_trap(xp);
+    } else {
+      describe_ppc_illegal(xp);
+    }
+    break;
+  case SIGSEGV:
+  case SIGBUS:
+    describe_memfault(xp, info);
+    break;
+  default:
+    break;
+  }
+#endif
+  return debug_continue;
+}
+
+char *
+debug_get_string_value(char *prompt)
+{
+  static char buf[128];
+  char *p, *res;
+
+  do {
+    fpurge(stdin);
+    fprintf(dbgout, "\n %s :",prompt);
+    buf[0] = 0;
+    res = fgets(buf, sizeof(buf), stdin);
+  } while (0);
+  p = strchr(res, '\n');
+  if (p) {
+    *p = 0;
+    return buf;
+  }
+  return NULL;
+}
+
+natural
+debug_get_natural_value(char *prompt)
+{
+  char s[32], *res;
+  int n;
+  natural val;
+
+  do {
+    fpurge(stdin);
+    fprintf(dbgout, "\n  %s :", prompt);
+    s[0]=0;
+    res = fgets(s, 24, stdin);
+    n = sscanf(s, "%lu", &val);
+  } while (n != 1);
+  return val;
+}
+
+unsigned
+debug_get_u5_value(char *prompt)
+{
+  char s[32], *res;
+  int n;
+  unsigned val;
+
+  do {
+    fpurge(stdin);
+    fprintf(dbgout, "\n  %s :", prompt);
+    res = fgets(s, 24, stdin);
+    n = sscanf(res, "%i", &val);
+  } while ((n != 1) || (val > 31));
+  return val;
+}
+
+debug_command_return
+debug_show_symbol(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  char *pname = debug_get_string_value("symbol name");
+  extern void *plsym(ExceptionInformation *,char*);
+  
+  if (pname != NULL) {
+    plsym(xp, pname);
+  }
+  return debug_continue;
+}
+
+debug_command_return
+debug_thread_info(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  TCR * tcr = get_tcr(false);
+  
+  if (tcr) {
+    area *vs_area = tcr->vs_area, *cs_area = tcr->cs_area;
+
+    fprintf(dbgout, "Current Thread Context Record (tcr) = 0x" LISP "\n", tcr);
+    fprintf(dbgout, "Control (C) stack area:  low = 0x" LISP ", high = 0x" LISP "\n",
+            (cs_area->low), (cs_area->high));
+    fprintf(dbgout, "Value (lisp) stack area: low = 0x" LISP ", high = 0x" LISP "\n",
+            (u64_t)(natural)(vs_area->low), (u64_t)(natural)vs_area->high);
+    fprintf(dbgout, "Exception stack pointer = 0x" LISP "\n",
+#ifdef PPC
+            (u64_t) (natural)(xpGPR(xp,1))
+#endif
+#ifdef X86
+            (u64_t) (natural)(xpGPR(xp,Isp))
+#endif
+            );
+  }
+  return debug_continue;
+}
+      
+
+debug_command_return
+debug_set_gpr(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  char buf[32];
+  natural val;
+
+  sprintf(buf, "value for GPR %d", arg);
+  val = debug_get_natural_value(buf);
+  xpGPR(xp,arg) = val;
+  return debug_continue;
+}
+
+debug_command_return
+debug_show_registers(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+
+
+#ifdef PPC
+#ifdef PPC64
+  int a, b;
+  for (a = 0, b = 16; a < 16; a++, b++) {
+    fprintf(dbgout,"r%02d = 0x%016lX    r%02d = 0x%016lX\n",
+	    a, xpGPR(xp, a),
+	    b, xpGPR(xp, b));
+  }
+  
+  fprintf(dbgout, "\n PC = 0x%016lX     LR = 0x%016lX\n",
+          xpPC(xp), xpLR(xp));
+  fprintf(dbgout, "CTR = 0x%016lX    CCR = 0x%08X\n",
+          xpCTR(xp), xpCCR(xp));
+  fprintf(dbgout, "XER = 0x%08X            MSR = 0x%016lX\n",
+          xpXER(xp), xpMSR(xp));
+  fprintf(dbgout,"DAR = 0x%016lX  DSISR = 0x%08X\n",
+	  xpDAR(xp), xpDSISR(xp));
+#else
+  int a, b, c, d;;
+  for (a = 0, b = 8, c = 16, d = 24; a < 8; a++, b++, c++, d++) {
+    fprintf(dbgout,"r%02d = 0x%08X  r%02d = 0x%08X  r%02d = 0x%08X  r%02d = 0x%08X\n",
+	    a, xpGPR(xp, a),
+	    b, xpGPR(xp, b),
+	    c, xpGPR(xp, c),
+	    d, xpGPR(xp, d));
+  }
+  fprintf(dbgout, "\n PC = 0x%08X   LR = 0x%08X  CTR = 0x%08X  CCR = 0x%08X\n",
+	  xpPC(xp), xpLR(xp), xpCTR(xp), xpCCR(xp));
+  fprintf(dbgout, "XER = 0x%08X  MSR = 0x%08X  DAR = 0x%08X  DSISR = 0x%08X\n",
+	  xpXER(xp), xpMSR(xp), xpDAR(xp), xpDSISR(xp));
+#endif
+#endif
+
+#ifdef X8664
+  fprintf(dbgout,"%%rax = 0x" ZLISP "      %%r8  = 0x" ZLISP "\n", xpGPR(xp,REG_RAX),xpGPR(xp,REG_R8));
+  fprintf(dbgout,"%%rcx = 0x" ZLISP "      %%r9  = 0x" ZLISP "\n", xpGPR(xp,REG_RCX),xpGPR(xp,REG_R9));
+  fprintf(dbgout,"%%rdx = 0x" ZLISP "      %%r10 = 0x" ZLISP "\n", xpGPR(xp,REG_RDX),xpGPR(xp,REG_R10));
+  fprintf(dbgout,"%%rbx = 0x" ZLISP "      %%r11 = 0x" ZLISP "\n", xpGPR(xp,REG_RBX),xpGPR(xp,REG_R11));
+  fprintf(dbgout,"%%rsp = 0x" ZLISP "      %%r12 = 0x" ZLISP "\n", xpGPR(xp,REG_RSP),xpGPR(xp,REG_R12));
+  fprintf(dbgout,"%%rbp = 0x" ZLISP "      %%r13 = 0x" ZLISP "\n", xpGPR(xp,REG_RBP),xpGPR(xp,REG_R13));
+  fprintf(dbgout,"%%rsi = 0x" ZLISP "      %%r14 = 0x" ZLISP "\n", xpGPR(xp,REG_RSI),xpGPR(xp,REG_R14));
+  fprintf(dbgout,"%%rdi = 0x" ZLISP "      %%r15 = 0x" ZLISP "\n", xpGPR(xp,REG_RDI),xpGPR(xp,REG_R15));
+  fprintf(dbgout,"%%rip = 0x" ZLISP "   %%rflags = 0x%08lx\n",
+	  xpGPR(xp, Iip), eflags_register(xp));
+#endif
+
+#ifdef X8632
+  unsigned short rcs,rds,res,rfs,rgs,rss;
+#ifdef DARWIN
+  rcs = xp->uc_mcontext->__ss.__cs;
+  rds = xp->uc_mcontext->__ss.__ds;
+  res = xp->uc_mcontext->__ss.__es;
+  rfs = xp->uc_mcontext->__ss.__fs;
+  rgs = xp->uc_mcontext->__ss.__gs;
+  rss = xp->uc_mcontext->__ss.__ss;
+#define DEBUG_SHOW_X86_SEGMENT_REGISTERS
+#endif
+#ifdef LINUX
+  rcs = xp->uc_mcontext.gregs[REG_CS];
+  rds = xp->uc_mcontext.gregs[REG_DS];
+  res = xp->uc_mcontext.gregs[REG_ES];
+  rfs = xp->uc_mcontext.gregs[REG_FS];
+  rgs = xp->uc_mcontext.gregs[REG_GS];
+  rss = xp->uc_mcontext.gregs[REG_SS];
+#define DEBUG_SHOW_X86_SEGMENT_REGISTERS
+#endif
+#ifdef FREEBSD
+  rcs = xp->uc_mcontext.mc_cs;
+  rds = xp->uc_mcontext.mc_ds;
+  res = xp->uc_mcontext.mc_es;
+  rfs = xp->uc_mcontext.mc_fs;
+  rgs = xp->uc_mcontext.mc_gs;
+  rss = xp->uc_mcontext.mc_ss;
+#define DEBUG_SHOW_X86_SEGMENT_REGISTERS
+#endif
+#ifdef SOLARIS
+  rcs = xp->uc_mcontext.gregs[CS];
+  rds = xp->uc_mcontext.gregs[DS];
+  res = xp->uc_mcontext.gregs[ES];
+  rfs = xp->uc_mcontext.gregs[FS];
+  rgs = xp->uc_mcontext.gregs[GS];
+  rss = xp->uc_mcontext.gregs[SS];
+#define DEBUG_SHOW_X86_SEGMENT_REGISTERS
+#endif
+#ifdef WINDOWS
+  rcs = xp->SegCs;
+  rds = xp->SegDs;
+  res = xp->SegEs;
+  rfs = xp->SegFs;
+  rgs = xp->SegGs;
+  rss = xp->SegSs;
+#define DEBUG_SHOW_X86_SEGMENT_REGISTERS
+#endif
+
+
+
+  fprintf(dbgout, "%%eax = 0x" ZLISP "\n", xpGPR(xp, REG_EAX));
+  fprintf(dbgout, "%%ecx = 0x" ZLISP "\n", xpGPR(xp, REG_ECX));
+  fprintf(dbgout, "%%edx = 0x" ZLISP "\n", xpGPR(xp, REG_EDX));
+  fprintf(dbgout, "%%ebx = 0x" ZLISP "\n", xpGPR(xp, REG_EBX));
+  fprintf(dbgout, "%%esp = 0x" ZLISP "\n", xpGPR(xp, REG_ESP));
+  fprintf(dbgout, "%%ebp = 0x" ZLISP "\n", xpGPR(xp, REG_EBP));
+  fprintf(dbgout, "%%esi = 0x" ZLISP "\n", xpGPR(xp, REG_ESI));
+  fprintf(dbgout, "%%edi = 0x" ZLISP "\n", xpGPR(xp, REG_EDI));
+  fprintf(dbgout, "%%eip = 0x" ZLISP "\n", xpGPR(xp, REG_EIP));
+  fprintf(dbgout, "%%eflags = 0x" ZLISP "\n", xpGPR(xp, REG_EFL));
+#ifdef DEBUG_SHOW_X86_SEGMENT_REGISTERS
+  fprintf(dbgout,"\n");
+  fprintf(dbgout, "%%cs = 0x%04x\n", rcs);
+  fprintf(dbgout, "%%ds = 0x%04x\n", rds);
+  fprintf(dbgout, "%%ss = 0x%04x\n", rss);
+  fprintf(dbgout, "%%es = 0x%04x\n", res);
+  fprintf(dbgout, "%%fs = 0x%04x\n", rfs);
+  fprintf(dbgout, "%%gs = 0x%04x\n", rgs);
+
+#endif
+
+#endif
+
+  return debug_continue;
+}
+
+debug_command_return
+debug_show_fpu(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  double *dp;
+  int *np, i;
+#ifdef PPC
+  dp = xpFPRvector(xp);
+  np = (int *) dp;
+  
+  for (i = 0; i < 32; i++, np+=2) {
+    fprintf(dbgout, "f%02d : 0x%08X%08X (%f)\n", i,  np[0], np[1], *dp++);
+  }
+  fprintf(dbgout, "FPSCR = %08X\n", xpFPSCR(xp));
+#endif
+#ifdef X8664
+#ifdef LINUX
+  struct _libc_xmmreg * xmmp = &(xp->uc_mcontext.fpregs->_xmm[0]);
+#endif
+#ifdef DARWIN
+  struct xmm {
+    char fpdata[16];
+  };
+  struct xmm *xmmp = (struct xmm *)(xpFPRvector(xp));
+#endif
+#ifdef WINDOWS
+  struct xmm {
+    char fpdata[16];
+  };
+  struct xmm *xmmp; /* XXX: actually get them */
+#endif
+#ifdef FREEBSD
+  struct xmmacc *xmmp = xpXMMregs(xp);
+#endif
+#ifdef SOLARIS
+  upad128_t *xmmp = xpXMMregs(xp);
+#endif
+  float *sp;
+
+
+  for (i = 0; i < 16; i++, xmmp++) {
+    sp = (float *) xmmp;
+    dp = (double *) xmmp;
+    np = (int *) xmmp;
+    fprintf(dbgout, "f%02d: 0x%08x (%e), 0x%08x%08x (%e)\n", i, *np, (double)(*sp), np[1], np[0], *dp);
+  }
+  fprintf(dbgout, "mxcsr = 0x%08x\n",
+#ifdef LINUX
+          xp->uc_mcontext.fpregs->mxcsr
+#endif
+#ifdef DARWIN
+          UC_MCONTEXT(xp)->__fs.__fpu_mxcsr
+#endif
+#ifdef FREEBSD
+          (((struct savefpu *)(&(xp)->uc_mcontext.mc_fpstate))->sv_env.en_mxcsr)
+#endif
+#ifdef SOLARIS
+	  xp->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xstatus
+#endif
+#ifdef WINDOWS
+          *(xpMXCSRptr(xp))
+#endif
+          );
+#endif  
+#ifdef X8632
+#ifdef DARWIN
+  struct xmm {
+    char fpdata[8];
+  };
+  struct xmm *xmmp = (struct xmm *)(xpFPRvector(xp));
+
+  for (i = 0; i < 8; i++, xmmp++) {
+    float *sp = (float *)xmmp;
+    dp = (double *)xmmp;
+    np = (int *)xmmp;
+    fprintf(dbgout, "f%1d: 0x%08x (%e), 0x%08x%08x (%e)\n", i, *np,
+	    (double)(*sp), np[1], np[0], *dp);
+  }
+  fprintf(dbgout, "mxcsr = 0x%08x\n", UC_MCONTEXT(xp)->__fs.__fpu_mxcsr);
+#endif
+#endif
+
+  return debug_continue;
+}
+
+debug_command_return
+debug_kill_process(ExceptionInformation *xp, siginfo_t *info, int arg) {
+  return debug_kill;
+}
+
+debug_command_return
+debug_win(ExceptionInformation *xp, siginfo_t *info, int arg) {
+  return debug_exit_success;
+}
+
+debug_command_return
+debug_lose(ExceptionInformation *xp, siginfo_t *info, int arg) {
+  return debug_exit_fail;
+}
+
+debug_command_return
+debug_help(ExceptionInformation *xp, siginfo_t *info, int arg) {
+  debug_command_entry *entry;
+
+  for (entry = debug_command_entries; entry->f; entry++) {
+    /* If we have an XP or don't need one, call the function */
+    if (xp || !(entry->flags & DEBUG_COMMAND_FLAG_REQUIRE_XP)) {
+      fprintf(dbgout, "(%c)  %s\n", entry->c, entry->help_text);
+    }
+  }
+  return debug_continue;
+}
+	      
+
+  
+
+debug_command_return
+debug_backtrace(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  extern LispObj current_stack_pointer();
+  extern void plbt_sp(LispObj);
+  extern void plbt(ExceptionInformation *);
+
+  if (xp) {
+    plbt(xp);
+  } else {
+    plbt_sp(current_stack_pointer());
+  }
+  return debug_continue;
+}
+
+debug_command_return
+debug_thread_reset(ExceptionInformation *xp, siginfo_t *info, int arg)
+{
+  reset_lisp_process(xp);
+  return debug_exit_success;
+}
+
+
+debug_command_entry debug_command_entries[] = 
+{
+  {debug_set_gpr,
+   "Set specified GPR to new value",
+   DEBUG_COMMAND_FLAG_AUX_REGNO,
+   "GPR to set (0-31) ?",
+   'G'},
+#ifdef PPC
+  {debug_advance_pc,
+   "Advance the program counter by one instruction (use with caution!)",
+   DEBUG_COMMAND_FLAG_REQUIRE_XP | DEBUG_COMMAND_FLAG_EXCEPTION_ENTRY_ONLY,
+   NULL,
+   'A'},
+  {debug_identify_exception,
+   "Describe the current exception in greater detail",
+   DEBUG_COMMAND_FLAG_REQUIRE_XP | DEBUG_COMMAND_FLAG_EXCEPTION_ENTRY_ONLY |
+   DEBUG_COMMAND_FLAG_EXCEPTION_REASON_ARG,
+   NULL,
+   'D'},
+#endif
+  {debug_show_registers, 
+   "Show raw GPR/SPR register values", 
+   DEBUG_COMMAND_FLAG_REQUIRE_XP,
+   NULL,
+   'R'},
+  {debug_lisp_registers,
+   "Show Lisp values of tagged registers",
+   DEBUG_COMMAND_FLAG_REQUIRE_XP,
+   NULL,
+   'L'},
+  {debug_show_fpu,
+   "Show FPU registers",
+   DEBUG_COMMAND_FLAG_REQUIRE_XP,
+   NULL,
+   'F'},
+  {debug_show_symbol,
+   "Find and describe symbol matching specified name",
+   0,
+   NULL,
+   'S'},
+  {debug_backtrace,
+   "Show backtrace",
+   0,
+   NULL,
+   'B'},
+  {debug_thread_info,
+   "Show info about current thread",
+   0,
+   NULL,
+   'T'},
+  {debug_win,
+   "Exit from this debugger, asserting that any exception was handled",
+   0,
+   NULL,
+   'X'},
+#ifdef DARWIN
+  {debug_lose,
+   "Propagate the exception to another handler (debugger or OS)",
+   DEBUG_COMMAND_FLAG_REQUIRE_XP | DEBUG_COMMAND_FLAG_EXCEPTION_ENTRY_ONLY,
+   NULL,
+   'P'},
+#endif
+#if 0
+  {debug_thread_reset,
+   "Reset current thread (as if in response to stack overflow)",
+   DEBUG_COMMAND_FLAG_REQUIRE_XP,
+   NULL,
+   'T'},
+#endif
+  {debug_kill_process,
+   "Kill Clozure CL process",
+   0,
+   NULL,
+   'K'},
+  {debug_help,
+   "Show this help",
+   0,
+   NULL,
+   '?'},
+  /* end-of-table */
+  {NULL,
+   NULL,
+   0,
+   NULL,
+   0}
+};
+
+debug_command_return
+apply_debug_command(ExceptionInformation *xp, int c, siginfo_t *info, int why) 
+{
+  if (c == EOF) {
+    return debug_kill;
+  } else {
+    debug_command_entry *entry;
+    debug_command f;
+    c = toupper(c);
+
+    for (entry = debug_command_entries; (f = entry->f) != NULL; entry++) {
+      if (toupper(entry->c) == c) {
+	/* If we have an XP or don't need one, call the function */
+	if ((xp || !(entry->flags & DEBUG_COMMAND_FLAG_REQUIRE_XP)) &&
+	    ((why > debug_entry_exception) || 
+	     !(entry->flags & DEBUG_COMMAND_FLAG_EXCEPTION_ENTRY_ONLY))) {
+	  int arg = 0;
+	  if ((entry->flags & DEBUG_COMMAND_REG_FLAGS)
+	      == DEBUG_COMMAND_FLAG_AUX_REGNO) {
+	    arg = debug_get_u5_value("register number");
+	  }
+	  if (entry->flags & DEBUG_COMMAND_FLAG_EXCEPTION_REASON_ARG) {
+	    arg = why;
+	  }
+	  return (f)(xp, info, arg);
+	}
+	break;
+      }
+    }
+    return debug_continue;
+  }
+}
+
+debug_identify_function(ExceptionInformation *xp, siginfo_t *info) 
+{
+#ifdef PPC
+  if (xp) {
+    if (active_tcr_p((TCR *)(ptr_from_lispobj(xpGPR(xp, rcontext))))) {
+      LispObj f = xpGPR(xp, fn), codev;
+      pc where = xpPC(xp);
+      
+      if (!(codev = register_codevector_contains_pc(f, where))) {
+        f = xpGPR(xp, nfn);
+        codev =  register_codevector_contains_pc(f, where);
+      }
+      if (codev) {
+        fprintf(dbgout, " While executing: %s\n", print_lisp_object(f));
+      }
+    } else {
+      int disp;
+      char *foreign_name;
+      natural where = (natural)xpPC(xp);
+
+      fprintf(dbgout, " In foreign code at address 0x" ZLISP "\n", where);
+      foreign_name = foreign_name_and_offset(where, &disp);
+      if (foreign_name) {
+        fprintf(dbgout, "  [%s + %d]\n", foreign_name, disp);
+      }
+    }
+  }
+#endif
+}
+
+#ifndef WINDOWS
+extern pid_t main_thread_pid;
+#endif
+
+
+OSStatus
+lisp_Debugger(ExceptionInformation *xp, 
+	      siginfo_t *info, 
+	      int why, 
+              Boolean in_foreign_code,
+	      char *message, 
+	      ...)
+{
+  va_list args;
+  debug_command_return state = debug_continue;
+
+
+  if (stdin_is_dev_null()) {
+    return -1;
+  }
+
+  va_start(args,message);
+  vfprintf(dbgout, message, args);
+  fprintf(dbgout, "\n");
+  va_end(args);
+
+  if (threads_initialized) {
+    suspend_other_threads(false);
+  }
+
+  lisp_debugger_in_foreign_code = in_foreign_code;
+  if (in_foreign_code) {    
+    char *foreign_name;
+    int disp;
+    fprintf(dbgout, "Exception occurred while executing foreign code\n");
+    foreign_name = foreign_name_and_offset((natural)xpPC(xp), &disp);
+    if (foreign_name) {
+      fprintf(dbgout, " at %s + %d\n", foreign_name, disp);
+    }
+  }
+
+  if (xp) {
+    if (why > debug_entry_exception) {
+      debug_identify_exception(xp, info, why);
+    }
+    debug_identify_function(xp, info);
+  }
+  if (lisp_global(BATCH_FLAG)) {
+#ifdef WINDOWS
+    fprintf(dbgout, "Current Process Id %d\n", (int)GetCurrentProcessId());
+#else
+    fprintf(dbgout, "Main thread pid %d\n", main_thread_pid);
+#endif
+    debug_thread_info(xp, info, 0);
+    if (xp) {
+      debug_show_registers(xp, info, 0);
+      debug_lisp_registers(xp, info, 0);
+      debug_show_fpu(xp, info, 0);
+    }
+    debug_backtrace(xp, info, 0);
+    abort();
+  }
+
+  fprintf(dbgout, "? for help\n");
+  while (state == debug_continue) {
+#ifdef WINDOWS
+    fprintf(dbgout, "[%d] Clozure CL kernel debugger: ", (int)GetCurrentProcessId());
+#else
+    fprintf(dbgout, "[%d] Clozure CL kernel debugger: ", main_thread_pid);
+#endif
+    fflush(dbgout);             /* dbgout should be unbuffered, so this shouldn't be necessary.  But it can't hurt ... */
+    state = apply_debug_command(xp, readc(), info, why);
+  }
+  switch (state) {
+  case debug_exit_success:
+    if (threads_initialized) {
+      resume_other_threads(false);
+    }
+    return 0;
+  case debug_exit_fail:
+    if (threads_initialized) {
+      resume_other_threads(false);
+    }
+    return -1;
+  case debug_kill:
+    terminate_lisp();
+  default:
+    return 0;
+  }
+}
+
+void
+Bug(ExceptionInformation *xp, const char *format, ...)
+{
+  va_list args;
+  char s[512];
+ 
+  va_start(args, format);
+  vsnprintf(s, sizeof(s),format, args);
+  va_end(args);
+  lisp_Debugger(xp, NULL, debug_entry_bug, false, s);
+
+}
+
+void
+FBug(ExceptionInformation *xp, const char *format, ...)
+{
+  va_list args;
+  char s[512];
+ 
+  va_start(args, format);
+  vsnprintf(s, sizeof(s),format, args);
+  va_end(args);
+  lisp_Debugger(xp, NULL, debug_entry_bug, true, s);
+
+}
+
+void
+lisp_bug(char *string)
+{
+  Bug(NULL, "Bug in Clozure CL system code:\n%s", string);
+}
+
Index: /branches/arm/lisp-kernel/lisp-errors.h
===================================================================
--- /branches/arm/lisp-kernel/lisp-errors.h	(revision 13357)
+++ /branches/arm/lisp-kernel/lisp-errors.h	(revision 13357)
@@ -0,0 +1,163 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __ERRORS_X
+#define __ERRORS_X 1
+
+
+#define error_reg_regnum 0
+#define error_udf 1
+#define error_udf_call 2
+#define error_throw_tag_missing 3
+#define error_alloc_failed 4
+#define error_stack_overflow 5
+#define error_excised_function_call 6
+#define error_too_many_values 7
+#define error_propagate_suspend 10
+#define error_interrupt 11
+#define error_suspend 12
+#define error_suspend_all 13
+#define error_resume 14
+#define error_resume_all 15 
+#define error_kill 16
+#define error_cant_call 17
+#define error_allocate_list 18
+
+#define error_type_error 128
+
+typedef enum {
+  error_object_not_array = error_type_error,
+  error_object_not_bignum,
+  error_object_not_fixnum,
+  error_object_not_character,
+  error_object_not_integer,
+  error_object_not_list,
+  error_object_not_number,
+  error_object_not_sequence,
+  error_object_not_simple_string,
+  error_object_not_simple_vector,
+  error_object_not_string,
+  error_object_not_symbol,
+  error_object_not_macptr,
+  error_object_not_real,
+  error_object_not_cons,
+  error_object_not_unsigned_byte,
+  error_object_not_radix,
+  error_object_not_float,
+  error_object_not_rational,
+  error_object_not_ratio,
+  error_object_not_short_float,
+  error_object_not_double_float,
+  error_object_not_complex,
+  error_object_not_vector,
+  error_object_not_simple_base_string,
+  error_object_not_function,
+  error_object_not_unsigned_byte_16,
+  error_object_not_unsigned_byte_8,
+  error_object_not_unsigned_byte_32,
+  error_object_not_signed_byte_32,
+  error_object_not_signed_byte_16,
+  error_object_not_signed_byte_8,	
+  error_object_not_base_character,
+  error_object_not_bit,
+  error_object_not_unsigned_byte_24,
+  error_object_not_u64,
+  error_object_not_s64,
+  error_object_not_unsigned_byte_56,
+  error_object_not_simple_array_double_float_2d,
+  error_object_not_simple_array_single_float_2d,
+  error_object_not_mod_char_code_limit,
+  error_object_not_array_2d,
+  error_object_not_array_3d,
+  error_object_not_array_t,
+  error_object_not_array_bit,
+  error_object_not_array_s8,
+  error_object_not_array_u8,
+  error_object_not_array_s16,
+  error_object_not_array_u16,
+  error_object_not_array_s32,
+  error_object_not_array_u32,
+  error_object_not_array_s64,
+  error_object_not_array_u64,
+  error_object_not_array_fixnum,
+  error_object_not_array_single_float,
+  error_object_not_array_double_float,
+  error_object_not_array_char,
+  error_object_not_array_t_2d,
+  error_object_not_array_bit_2d,
+  error_object_not_array_s8_2d,
+  error_object_not_array_u8_2d,
+  error_object_not_array_s16_2d,
+  error_object_not_array_u16_2d,
+  error_object_not_array_s32_2d,
+  error_object_not_array_u32_2d,
+  error_object_not_array_s64_2d,
+  error_object_not_array_u64_2d,
+  error_object_not_array_fixnum_2d,
+  error_object_not_array_single_float_2d,
+  error_object_not_array_double_float_2d,
+  error_object_not_array_char_2d,
+  error_object_not_simple_array_t_2d,
+  error_object_not_simple_array_bit_2d,
+  error_object_not_simple_array_s8_2d,
+  error_object_not_simple_array_u8_2d,
+  error_object_not_simple_array_s16_2d,
+  error_object_not_simple_array_u16_2d,
+  error_object_not_simple_array_s32_2d,
+  error_object_not_simple_array_u32_2d,
+  error_object_not_simple_array_s64_2d,
+  error_object_not_simple_array_u64_2d,
+  error_object_not_simple_array_fixnum_2d,
+  error_object_not_simple_array_char_2d,
+  error_object_not_array_t_3d,
+  error_object_not_array_bit_3d,
+  error_object_not_array_s8_3d,
+  error_object_not_array_u8_3d,
+  error_object_not_array_s16_3d,
+  error_object_not_array_u16_3d,
+  error_object_not_array_s32_3d,
+  error_object_not_array_u32_3d,
+  error_object_not_array_s64_3d,
+  error_object_not_array_u64_3d,
+  error_object_not_array_fixnum_3d,
+  error_object_not_array_single_float_3d,
+  error_object_not_array_double_float_3d,
+  error_object_not_array_char_3d,
+  error_object_not_simple_array_t_3d,
+  error_object_not_simple_array_bit_3d,
+  error_object_not_simple_array_s8_3d,
+  error_object_not_simple_array_u8_3d,
+  error_object_not_simple_array_s16_3d,
+  error_object_not_simple_array_u16_3d,
+  error_object_not_simple_array_s32_3d,
+  error_object_not_simple_array_u32_3d,
+  error_object_not_simple_array_s64_3d,
+  error_object_not_simple_array_u64_3d,
+  error_object_not_simple_array_fixnum_3d,
+  error_object_not_simple_array_single_float_3d,
+  error_object_not_simple_array_double_float_3d,
+  error_object_not_simple_array_char_3d
+} type_error;
+
+#define error_FPU_exception_double 1024
+#define error_FPU_exception_short 1025
+
+#define error_memory_full 2048
+
+
+
+#endif /* __ERRORS_X */
Index: /branches/arm/lisp-kernel/lisp-exceptions.h
===================================================================
--- /branches/arm/lisp-kernel/lisp-exceptions.h	(revision 13357)
+++ /branches/arm/lisp-kernel/lisp-exceptions.h	(revision 13357)
@@ -0,0 +1,158 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __lisp_exceptions_h__
+#define __lisp_exceptions_h__ 1
+
+
+#include <stdlib.h>
+#include "memprotect.h"
+#include "gc.h"
+
+#ifdef WINDOWS
+#include <windows.h>
+#endif
+
+typedef enum {
+  kDebugger,
+  kContinue,
+  kExit
+} ErrAction;
+
+
+#ifdef WINDOWS
+typedef EXCEPTION_RECORD siginfo_t;  /* Not even close to being the right thing to do */
+#endif
+
+
+void
+zero_page(BytePtr);
+
+void
+zero_heap_segment(BytePtr);
+
+extern protected_area_ptr AllProtectedAreas;
+
+protected_area_ptr find_protected_area(BytePtr);
+
+OSStatus
+lisp_Debugger(ExceptionInformation *, siginfo_t *, int, Boolean, char *, ...);
+
+OSStatus
+handle_protection_violation(ExceptionInformation *, siginfo_t *, TCR *, int);
+
+protected_area_ptr 
+new_protected_area(BytePtr, BytePtr, lisp_protection_kind, natural, Boolean);
+
+void
+unprotect_area_prefix(protected_area_ptr, size_t);
+
+void
+protect_area_prefix(protected_area_ptr, size_t);
+
+void
+protect_area(protected_area_ptr);
+
+
+Boolean
+resize_dynamic_heap(BytePtr, natural);
+
+OSStatus
+PMCL_exception_handler(int, ExceptionInformation *, TCR *, siginfo_t *, int);
+
+TCR*
+get_tcr(Boolean);
+
+ErrAction
+error_action( void );
+
+void
+install_pmcl_exception_handlers(void);
+
+void
+unprotect_all_areas(void);
+
+void
+exception_cleanup(void);
+
+void
+exception_init();
+
+
+#define debug_entry_exception 0
+#define debug_entry_bug -1
+#define debug_entry_dbg -2
+
+#ifdef WINDOWS
+#define ALLOW_EXCEPTIONS(context) // blank stare for now
+#else
+#define ALLOW_EXCEPTIONS(context) \
+  pthread_sigmask(SIG_SETMASK, &context->uc_sigmask, NULL);
+#endif
+
+void
+Fatal(StringPtr, StringPtr);
+
+
+Ptr
+allocate(natural);
+
+Ptr
+zalloc(natural);
+
+void
+deallocate(Ptr);
+
+
+
+void
+non_fatal_error( char * );
+
+void Bug(ExceptionInformation *, const char *format_string, ...);
+void FBug(ExceptionInformation *, const char *format_string, ...);
+signed_natural gc_from_xp(ExceptionInformation *, signed_natural);
+signed_natural purify_from_xp(ExceptionInformation *, signed_natural);
+signed_natural impurify_from_xp(ExceptionInformation *, signed_natural);
+
+
+
+void
+adjust_exception_pc(ExceptionInformation *, int);
+
+size_t
+symbol_name( unsigned, char *, size_t );
+
+
+size_t
+exception_fn_name( ExceptionInformation *, int, char *, size_t );
+
+
+
+#ifdef PPC
+#include "ppc-exceptions.h"
+#endif
+
+#ifdef X86
+#include "x86-exceptions.h"
+#endif
+
+void suspend_other_threads(Boolean);
+void resume_other_threads(Boolean);
+
+
+#endif /* __lisp_exceptions_h__ */
+
Index: /branches/arm/lisp-kernel/lisp.h
===================================================================
--- /branches/arm/lisp-kernel/lisp.h	(revision 13357)
+++ /branches/arm/lisp-kernel/lisp.h	(revision 13357)
@@ -0,0 +1,135 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __lisp__
+#define __lisp__
+
+
+
+#include "lisptypes.h"
+#ifndef LOWMEM_BIAS
+#define LOWMEM_BIAS 0
+#endif
+
+#ifdef PPC
+#include "ppc-constants.h"
+#endif
+#ifdef X86
+#include "x86-constants.h"
+#endif
+#include "macros.h"
+
+extern Boolean use_mach_exception_handling;
+
+extern int page_size, log2_page_size;
+
+static inline natural
+_align_to_power_of_2(natural n, unsigned power)
+{
+  natural align = (1<<power) -1;
+
+  return (n+align) & ~align;
+}
+
+#define align_to_power_of_2(n,p) _align_to_power_of_2(((natural)(n)),p)
+
+static inline natural
+_truncate_to_power_of_2(natural n, unsigned power)
+{
+  return n & ~((1<<power) -1);
+}
+
+#define truncate_to_power_of_2(n,p) _truncate_to_power_of_2((natural)(n),p)
+
+LispObj start_lisp(TCR*, LispObj);
+
+size_t
+ensure_stack_limit(size_t);
+
+char *
+print_lisp_object(LispObj);
+
+#include "kernel-globals.h"
+#endif
+
+#define PLATFORM_WORD_SIZE_32 0
+#define PLATFORM_WORD_SIZE_64 64
+#define PLATFORM_CPU_PPC (0<<3)
+#define PLATFORM_CPU_SPARC (1<<3)
+#define PLATFORM_CPU_X86 (2<<3)
+#define PLATFORM_OS_VXWORKS 0
+#define PLATFORM_OS_LINUX 1
+#define PLATFORM_OS_SOLARIS 2
+#define PLATFORM_OS_DARWIN 3
+#define PLATFORM_OS_FREEBSD 4
+#define PLATFORM_OS_WINDOWS 5
+
+#ifdef LINUX
+#define PLATFORM_OS PLATFORM_OS_LINUX
+#endif
+
+#ifdef DARWIN
+#define PLATFORM_OS PLATFORM_OS_DARWIN
+#endif
+
+#ifdef FREEBSD
+#define PLATFORM_OS PLATFORM_OS_FREEBSD
+#endif
+
+#ifdef SOLARIS
+#define PLATFORM_OS PLATFORM_OS_SOLARIS
+#endif
+
+#ifdef WINDOWS
+#define PLATFORM_OS PLATFORM_OS_WINDOWS
+#endif
+
+#ifdef PPC
+#define PLATFORM_CPU PLATFORM_CPU_PPC
+#endif
+
+#ifdef X86
+#define PLATFORM_CPU PLATFORM_CPU_X86
+#endif
+
+#if (WORD_SIZE == 32)
+#define PLATFORM_WORD_SIZE PLATFORM_WORD_SIZE_32
+#endif
+
+#if (WORD_SIZE == 64)
+#define PLATFORM_WORD_SIZE PLATFORM_WORD_SIZE_64
+#endif
+
+#define PLATFORM (PLATFORM_OS|PLATFORM_CPU|PLATFORM_WORD_SIZE)
+
+#ifdef WINDOWS
+Boolean check_for_embedded_image (wchar_t *);
+#else
+Boolean check_for_embedded_image (char *);
+#endif
+natural xStackSpace();
+void init_threads(void *, TCR *);
+
+#ifdef WINDOWS
+void wperror(char *);
+#endif
+
+void ensure_static_conses(ExceptionInformation *, TCR *,natural);
+
+#include <stdio.h>
+
+extern FILE *dbgout;
Index: /branches/arm/lisp-kernel/lisp.s
===================================================================
--- /branches/arm/lisp-kernel/lisp.s	(revision 13357)
+++ /branches/arm/lisp-kernel/lisp.s	(revision 13357)
@@ -0,0 +1,68 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+ 
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+ 
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+ 
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+	include(m4macros.m4)        
+        ifdef(`LOWMEM_BIAS',`
+`LOWMEM_BIAS' = LOWMEM_BIAS
+',`
+`LOWMEM_BIAS' = 0
+')
+        undefine(`LOWMEM_BIAS')
+        /* DWARF2 exception fsm */
+        DW_CFA_advance_loc = 0x40   
+        DW_CFA_offset = 0x80
+        DW_CFA_restore = 0xc0
+        DW_CFA_nop = 0x00
+        DW_CFA_set_loc = 0x01
+        DW_CFA_advance_loc1 = 0x02
+        DW_CFA_advance_loc2 = 0x03
+        DW_CFA_advance_loc4 = 0x04
+        DW_CFA_offset_extended = 0x05
+        DW_CFA_restore_extended = 0x06
+        DW_CFA_undefined = 0x07
+        DW_CFA_same_value = 0x08
+        DW_CFA_register = 0x09
+        DW_CFA_remember_state = 0x0a
+        DW_CFA_restore_state = 0x0b
+        DW_CFA_def_cfa = 0x0c
+        DW_CFA_def_cfa_register = 0x0d
+        DW_CFA_def_cfa_offset = 0x0e
+        /* DWARF 3.  */
+        DW_CFA_def_cfa_expression = 0x0f
+        DW_CFA_expression = 0x10
+        DW_CFA_offset_extended_sf = 0x11
+        DW_CFA_def_cfa_sf = 0x12
+        DW_CFA_def_cfa_offset_sf = 0x13
+        DW_CFA_val_offset = 0x14
+        DW_CFA_val_offset_sf = 0x15
+        DW_CFA_val_expression = 0x16
+        /* SGI/MIPS specific.  */
+        DW_CFA_MIPS_advance_loc8 = 0x1d
+        /* GNU extensions.  */
+        DW_CFA_GNU_window_save = 0x2d
+        DW_CFA_GNU_args_size = 0x2e
+        DW_CFA_GNU_negative_offset_extended = 0x2f
+
+        ifdef(`PPC',`
+         include(ppc-constants.s)
+         include(ppc-macros.s)
+	 include(ppc-uuo.s)
+        ')
+	ifdef(`X86',`
+         include(x86-constants.s)
+         include(x86-macros.s)
+	 include(x86-uuo.s)
+	')
+
Index: /branches/arm/lisp-kernel/lisp_globals.h
===================================================================
--- /branches/arm/lisp-kernel/lisp_globals.h	(revision 13357)
+++ /branches/arm/lisp-kernel/lisp_globals.h	(revision 13357)
@@ -0,0 +1,147 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __lisp_globals__
+#define __lisp_globals__
+
+
+extern LispObj lisp_nil;
+
+#define GET_TCR (-1)		/* address of get_tcr() for callbacks */
+#define TCR_COUNT (-2)		/* next tcr's tcr_id */
+#define INTERRUPT_SIGNAL  (-3)  /* signal to use for PROCESS-INTERRUPT */
+#define KERNEL_IMPORTS (-4)	/* some things we need to have imported for us. */
+#define OBJC_2_PERSONALITY (-5) /* A good listener.  Doesn't say much */
+#define SAVETOC (-6)	        /* Saved TOC register, for some platforms */
+#define SAVER13 (-7)		/* Saved (global) r13, on some platforms */
+#define SUBPRIMS_BASE (-8)	/* where the dynamic subprims wound up */
+#define RET1VALN (-9)		/* magic multiple-values return address */
+#define TCR_KEY (-10)     	/* tsd key for per-thread tcr */
+#define TCR_AREA_LOCK (-11)       /* all_areas/tcr queue lock */
+#define EXCEPTION_LOCK (-12)	/* serialize exception handling */
+#define STATIC_CONSES (-13)
+#define DEFAULT_ALLOCATION_QUANTUM (-14)
+#define INTFLAG (-15)
+#define GC_INHIBIT_COUNT (-16)
+#define REFBITS (-17)
+#define OLDSPACE_DNODE_COUNT (-18) /* count of dynamic dnodes older than generation 0 */
+#define ALTIVEC_PRESENT (-19)   /* non-zero if AltiVec present. */
+#define FWDNUM (-20)            /* fixnum: GC "forwarder" call count. */
+#define GC_NUM (-21)            /* fixnum: GC call count. */
+#define GCABLE_POINTERS (-22)   /* linked-list of weak macptrs. */
+#define HEAP_START (-23)        /* start of lisp heap */
+#define HEAP_END (-24)          /* end of lisp heap */
+#define STATICALLY_LINKED (-25)        /* non-zero if -static */
+#define STACK_SIZE (-26)        /* from the command line */
+#define OBJC_2_BEGIN_CATCH (-27)  /* address of ObjC 2.0 objc_begin_catch() */
+#define KERNEL_PATH (-28)       /* real executable name */
+#define ALL_AREAS (-29)         /* doubly-linked list of stack & heap areas */
+#define LEXPR_RETURN (-30)      /* magic &lexpr cleanup code */
+#define LEXPR_RETURN1V (-31)    /* single-value &lexpr cleanup code */
+#define IN_GC (-32)             /* non-zero when lisp addresses may be invalid */
+#define FREE_STATIC_CONSES (-33)     /* length of freelist */
+#define OBJC_2_END_CACTCH (-34)          /* address of ObjC 2.0 objc_end_catch() */
+#define SHORT_FLOAT_ZERO (-35)  /* low half of 1.0d0 */
+#define DOUBLE_FLOAT_ONE (-36)  /* high half of 1.0d0 */
+#define STATIC_CONS_AREA (-37)	/* static_cons_area */
+#define LISP_EXIT_HOOK (-38)	/* install foreign exception handling */
+#define OLDEST_EPHEMERAL (-39)  /* doubleword address of oldest ephemeral object or 0 */
+#define TENURED_AREA (-40)      /* the tenured area */
+#define REF_BASE (-41)          /* start of oldest pointer-bearing area */
+#define ARGV (-42)              /* pointer to &argv[0] */
+#define HOST_PLATFORM (-43)	/* for platform-specific initialization */
+#define BATCH_FLAG (-44)	/* -b arg */
+#define UNWIND_RESUME (-45)	/* address of _Unwind_Resume from libobjc */
+#define WEAK_GC_METHOD (-46)	/* weak GC algorithm */
+#define IMAGE_NAME (-47)	/* --image-name arg */
+#define INITIAL_TCR (-48)	/* initial thread tcr */
+#define WEAKVLL (-49)           /* all populations as of last GC */
+
+#define MIN_KERNEL_GLOBAL WEAKVLL
+
+/* These are only non-zero when an image is being saved or loaded */
+
+#if (WORD_SIZE==64)
+#define LISP_HEAP_THRESHOLD (-511)
+#define EGC_ENABLED (-510)
+#define G0_THRESHOLD (-509)
+#define G1_THRESHOLD (-508)
+#define G2_THRESHOLD (-507)
+#else
+#define LISP_HEAP_THRESHOLD (-1023)
+#define EGC_ENABLED (-1022)
+#define G0_THRESHOLD (-1021)
+#define G1_THRESHOLD (-1020)
+#define G2_THRESHOLD (-1019)
+#endif
+
+#ifdef PPC
+#ifdef PPC64
+#define lisp_global(g) (((LispObj *) (0x3000+(LOWMEM_BIAS)))[(g)])
+#define nrs_symbol(s) (((lispsymbol *) (0x3000+(LOWMEM_BIAS)))[(s)])
+#else
+#define lisp_global(g) (((LispObj *) (nil_value-fulltag_nil))[(g)])
+#define nrs_symbol(s) (((lispsymbol *) (nil_value+(8-fulltag_nil)+8))[(s)])
+#endif
+#endif
+
+#ifdef X8664
+#define lisp_global(g) (((LispObj *) (0x13000+(LOWMEM_BIAS)))[(g)])
+#define nrs_symbol(s) (((lispsymbol *) (0x13020+(LOWMEM_BIAS)))[(s)])
+#endif
+
+#ifdef X8632
+#define lisp_global(g) (((LispObj *) (0x13000+(LOWMEM_BIAS)))[(g)])
+#define nrs_symbol(s) (((lispsymbol *) (0x13008+(LOWMEM_BIAS)))[(s)])
+#endif
+
+#define nrs_T 				(nrs_symbol(0))		/* t */
+#define nrs_NILSYM			(nrs_symbol(1))		/* nil */
+#define nrs_ERRDISP			(nrs_symbol(2))		/* %err-disp */
+#define nrs_CMAIN			(nrs_symbol(3))		/* cmain */
+#define nrs_EVAL			(nrs_symbol(4))		/* eval */
+#define nrs_APPEVALFN			(nrs_symbol(5))		/* apply-evaluated-function */
+#define nrs_ERROR			(nrs_symbol(6))		/* error */
+#define nrs_DEFUN			(nrs_symbol(7))		/* %defun */
+#define nrs_DEFVAR			(nrs_symbol(8))		/* %defvar */
+#define nrs_DEFCONSTANT			(nrs_symbol(9))		/* %defconstant */
+#define nrs_MACRO			(nrs_symbol(10))	/* %macro */
+#define nrs_KERNELRESTART		(nrs_symbol(11))	/* %kernel-restart */
+#define nrs_PACKAGE			(nrs_symbol(12))	/* *package* */
+#define nrs_TOTAL_BYTES_FREED           (nrs_symbol(13))        /* *total-bytes-freed* */
+#define nrs_KALLOWOTHERKEYS		(nrs_symbol(14))	/* :allow-other-keys */
+#define nrs_TOPLCATCH			(nrs_symbol(15))	/* %toplevel-catch% */
+#define nrs_TOPLFUNC			(nrs_symbol(16))	/* %toplevel-function% */
+#define nrs_CALLBACKS			(nrs_symbol(17))	/* %pascal-functions% */
+#define nrs_ALLMETEREDFUNS		(nrs_symbol(18))	/* *all-metered-functions* */
+#define nrs_TOTAL_GC_MICROSECONDS       (nrs_symbol(19))        /* *total-gc-microseconds* */
+#define nrs_BUILTIN_FUNCTIONS           (nrs_symbol(20))        /* %builtin-functions% */
+#define nrs_UDF				(nrs_symbol(21))	/* %unbound-function% */
+#define nrs_INIT_MISC			(nrs_symbol(22))        /* %init-misc% */
+#define nrs_MACRO_CODE                  (nrs_symbol(23))        /* %macro-code% */
+#define nrs_CLOSURE_CODE		(nrs_symbol(24))        /* %closure-code% */
+#define nrs_NEW_GCABLE_PTR		(nrs_symbol(25))	/* %new-gcable-ptr */
+#define nrs_GC_EVENT_STATUS_BITS	(nrs_symbol(26))	/* *gc-event-status-bits* */
+#define nrs_POST_GC_HOOK		(nrs_symbol(27))	/* *post-gc-hook* */
+#define nrs_HANDLERS			(nrs_symbol(28))	/* %handlers% */
+#define nrs_ALL_PACKAGES		(nrs_symbol(29))	/* %all-packages% */
+#define nrs_KEYWORD_PACKAGE		(nrs_symbol(30))	/* *keyword-package* */
+#define nrs_FINALIZATION_ALIST		(nrs_symbol(31))	/* %finalization-alist% */
+#define nrs_FOREIGN_THREAD_CONTROL      (nrs_symbol(32))        /* %foreign-thread-control */
+#define num_nilreg_symbols 33
+#define nilreg_symbols_end ((BytePtr) &(nrs_symbol(num_nilreg_symbols)))
+#endif
Index: /branches/arm/lisp-kernel/lispdcmd.c
===================================================================
--- /branches/arm/lisp-kernel/lispdcmd.c	(revision 13357)
+++ /branches/arm/lisp-kernel/lispdcmd.c	(revision 13357)
@@ -0,0 +1,47 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+/*
+  MCL-PPC dcmd utilities.
+*/
+
+#include "lispdcmd.h"
+
+
+
+
+void
+display_buffer(char *buf)
+{
+  fprintf(dbgout, "%s\n", buf);
+}
+
+int
+Dprintf(const char *format, ...)
+{
+  char buf[512];
+  va_list args;
+  int res;
+
+  va_start(args, format);
+  res = vsnprintf(buf, sizeof(buf), format, args);
+  if (res >= 0) {
+    display_buffer(buf);
+  }
+  return res;
+}
+
Index: /branches/arm/lisp-kernel/lispdcmd.h
===================================================================
--- /branches/arm/lisp-kernel/lispdcmd.h	(revision 13357)
+++ /branches/arm/lisp-kernel/lispdcmd.h	(revision 13357)
@@ -0,0 +1,31 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "lisp.h"
+#include "area.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+
+/* More-or-less like c printf(); */
+int Dprintf(const char *format, ...);
+
+
+char *
+print_lisp_object(LispObj);
Index: /branches/arm/lisp-kernel/lisptypes.h
===================================================================
--- /branches/arm/lisp-kernel/lisptypes.h	(revision 13357)
+++ /branches/arm/lisp-kernel/lisptypes.h	(revision 13357)
@@ -0,0 +1,238 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __lisptypes__
+#define __lisptypes__
+
+#include <sys/types.h>
+#define WORD_SIZE 32
+#ifdef PPC64
+#undef WORD_SIZE
+#define WORD_SIZE 64
+#endif
+#ifdef X8664
+#undef WORD_SIZE
+#define WORD_SIZE 64
+#endif
+
+
+#ifdef WINDOWS
+#include <windows.h>
+typedef long long s64_t;
+typedef unsigned long long u64_t;
+typedef signed long s32_t;
+typedef unsigned long u32_t;
+typedef signed short s16_t;
+typedef unsigned short u16_t;
+typedef signed char s8_t;
+typedef unsigned char u8_t;
+#else
+
+#include <stdint.h>
+
+#ifdef SOLARIS
+/* Solaris doesn't laugh and play like the other children */
+typedef int64_t s64_t;
+typedef uint64_t u64_t;
+typedef int32_t s32_t;
+typedef uint32_t u32_t;
+typedef int16_t s16_t;
+typedef uint16_t u16_t;
+typedef int8_t s8_t;
+typedef uint8_t u8_t;
+#else
+typedef int64_t s64_t;
+typedef u_int64_t u64_t;
+typedef int32_t s32_t;
+typedef u_int32_t u32_t;
+typedef int16_t s16_t;
+typedef u_int16_t u16_t;
+typedef int8_t s8_t;
+typedef u_int8_t u8_t;
+#endif
+#endif
+
+#if WORD_SIZE == 64
+typedef u64_t LispObj;
+typedef u64_t natural;
+typedef s64_t signed_natural;
+typedef u64_t unsigned_of_pointer_size;
+#else
+typedef u32_t LispObj;
+typedef u32_t natural;
+typedef s32_t signed_natural;
+typedef u32_t unsigned_of_pointer_size;
+#endif
+
+
+#ifdef DARWIN
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+#include <AvailabilityMacros.h>
+
+#ifdef PPC
+#if MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4
+#define __ss ss
+#define __es es
+#define __fs fs
+
+#define __srr0 srr0
+#define __srr1 srr1
+#define __r0 r0
+#define __r1 r1
+#define __r3 r3
+#define __r4 r4
+#define __r5 r5
+#define __r6 r6
+#define __r13 r13
+#define __cr cr
+#define __xer xer
+#define __lr lr
+#define __ctr ctr
+
+#define __dar dar
+#define __dsisr dsisr
+#define __exception exception
+
+#define __fpregs fpregs
+#define __fpscr fpscr
+#endif
+
+#if WORD_SIZE == 64
+#ifdef _STRUCT_UCONTEXT64
+typedef _STRUCT_UCONTEXT64 ExceptionInformation;
+typedef _STRUCT_MCONTEXT64 *MCONTEXT_T;
+#else /* _STRUCT_UCONTEXT64 */
+typedef struct ucontext64 ExceptionInformation;
+typedef struct mcontext64 *MCONTEXT_T;
+#endif /* _STRUCT_UCONTEXT64 */
+#define UC_MCONTEXT(UC) UC->uc_mcontext64
+#else /* WORD_SIZE */
+#ifdef _STRUCT_UCONTEXT
+typedef _STRUCT_UCONTEXT ExceptionInformation;
+typedef _STRUCT_MCONTEXT *MCONTEXT_T;
+#else
+typedef struct ucontext ExceptionInformation;
+typedef struct mcontext *MCONTEXT_T;
+#endif
+#define UC_MCONTEXT(UC) UC->uc_mcontext
+#endif /* WORD_SIZE */
+
+
+
+#endif /* PPC */
+
+#ifdef X8664
+#if MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4
+/* Broken <i386/ucontext.h> in Mac OS 10.4u SDK */
+struct mcontext64 {
+	x86_exception_state64_t	__es;
+	x86_thread_state64_t 	__ss;	
+	x86_float_state64_t	__fs;
+};
+
+typedef struct mcontext64 *MCONTEXT_T;
+typedef ucontext64_t ExceptionInformation;
+#define UC_MCONTEXT(UC) UC->uc_mcontext64
+#define __rax rax
+#define __fpu_mxcsr fpu_mxcsr
+#define __fpu_xmm0 fpu_xmm0
+#define __rsp rsp
+#define __trapno trapno
+#define __faultvaddr faultvaddr
+#define __err err
+#define __rip rip
+#define __rsi rsi
+#define __rdi rdi
+#define __rdx rdx
+#define __rcx rcx
+#define __r8 r8
+#define __rflags rflags
+#else /* post-10.4 */
+typedef mcontext_t MCONTEXT_T;
+typedef ucontext_t ExceptionInformation;
+#define UC_MCONTEXT(UC) UC->uc_mcontext
+#endif
+#endif
+
+#ifdef X8632
+/* Assume rational <i386/ucontext.h> */
+/* Sadly, we can't make that assumption, since Apple renamed things
+   for Leopard. Yow!  Are we standards-compliant yet ? */
+/* In the long term, we probably want to use the leopard-compliant
+   names (with leading __ prefixes).  In the shorter term, we want
+   kernels compiled on Leopard to run on Tiger (and not reference
+   foo$UNIX2003 and similar nonsense, and that means getting the old
+   names (without leading __ prefixes.)  Confused yet ? */
+
+#if MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4
+#define __ss ss
+#define __ds ds
+#define __es es
+#define __cs cs
+#define __fs fs
+#define __gs gs
+#define __eax eax
+#define __esp esp
+#define __eip eip
+#define __eflags eflags
+#define __fpu_xmm0 fpu_xmm0
+#define __fpu_mxcsr fpu_mxcsr
+#define __fpu_stmm0 fpu_stmm0
+#define __trapno trapno
+#define __err err
+#define __faultvaddr faultvaddr
+#endif
+
+#define UC_MCONTEXT(UC) UC->uc_mcontext
+typedef mcontext_t MCONTEXT_T;
+typedef ucontext_t ExceptionInformation;
+#endif
+
+#endif /* #ifdef DARWIN */
+
+#ifdef LINUX
+typedef struct ucontext ExceptionInformation;
+#endif
+
+#ifdef FREEBSD
+typedef struct __ucontext ExceptionInformation;
+#endif
+
+#ifdef SOLARIS
+typedef struct ucontext ExceptionInformation;
+#endif
+
+#ifdef WINDOWS
+typedef CONTEXT ExceptionInformation;
+#endif
+
+typedef u32_t lisp_char_code;
+
+typedef int OSStatus, OSErr;
+#define noErr ((OSErr) 0)
+typedef int Boolean;
+typedef void *LogicalAddress;
+typedef char *Ptr, *BytePtr, *StringPtr;
+typedef unsigned int UInt32;
+
+
+
+#define true 1
+#define false 0
+
+#endif /*__lisptypes__ */
Index: /branches/arm/lisp-kernel/m4macros.m4
===================================================================
--- /branches/arm/lisp-kernel/m4macros.m4	(revision 13357)
+++ /branches/arm/lisp-kernel/m4macros.m4	(revision 13357)
@@ -0,0 +1,353 @@
+changecom(`/*',`*/')
+
+
+
+/*   Copyright (C) 1994-2001 Digitool, Inc  */
+/*   This file is part of Clozure CL.    */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+
+
+/*  BSD debugging information (line numbers, etc) is a little different  */
+/*  from ELF/SVr4 debugging information.  There are probably lots more  */
+/*  differences, but this helps us to distinguish between what LinuxPPC  */
+/*  (ELF/SVr4) wants and what Darwin(BSD) wants.  */
+
+
+define(`BSDstabs',`1')
+define(`ELFstabs',`2')
+define(`COFFstabs',`3')
+undefine(`EABI')
+undefine(`POWEROPENABI')
+undefine(`rTOC')
+
+
+ifdef(`DARWIN',`define(`SYSstabs',`BSDstabs')
+		define(`DarwinAssembler',`')
+                define(`CNamesNeedUnderscores',`')
+	        define(`LocalLabelPrefix',`L')
+	        define(`StartTextLabel',`Ltext0')
+	        define(`EndTextLabel',`Letext')
+                ifdef(`PPC',`
+		define(`POWEROPENABI',`')')
+                ifdef(`X86',`
+                define(`SYSCALL_SETS_CARRY_ON_ERROR',`')
+		define(`SSE2_MATH_LIB',`')')
+')
+
+ifdef(`LINUX',`define(`SYSstabs',`ELFstabs')
+	       define(`HaveWeakSymbols',`')
+	       define(`LocalLabelPrefix',`.L')
+	       define(`StartTextLabel',`.Ltext0')
+	       define(`EndTextLabel',`.Letext')
+               ifdef(`PPC64',`
+               define(`POWEROPENABI',`')
+               define(`rTOC',`r2')', `
+	       define(`EABI',`')')')
+
+ifdef(`FREEBSD',`define(`SYSstabs',`ELFstabs')
+	       define(`HaveWeakSymbols',`')
+	       define(`LocalLabelPrefix',`.L')
+	       define(`StartTextLabel',`.Ltext0')
+	       define(`EndTextLabel',`.Letext')'
+                ifdef(`X86',`
+                define(`SYSCALL_SETS_CARRY_ON_ERROR',`')')
+)
+
+ifdef(`SOLARIS',`define(`SYSstabs',`ELFstabs')
+	       define(`HaveWeakSymbols',`')
+	       define(`LocalLabelPrefix',`.L')
+	       define(`StartTextLabel',`.Ltext0')
+	       define(`EndTextLabel',`.Letext')')
+
+ifdef(`WINDOWS',`define(`SYSstabs',`COFFstabs')
+               define(`CNamesNeedUnderscores',`')
+               define(`LocalLabelPrefix',`L')
+	       define(`StartTextLabel',`Ltext0')
+	       define(`EndTextLabel',`Letext')')
+
+
+/*  Names exported to (or imported from) C may need leading underscores.  */
+/*  Still.  After all these years.  Why ?  */
+
+define(`C',`ifdef(`CNamesNeedUnderscores',``_'$1',`$1')')
+
+define(`_linecounter_',0)
+
+define(`_emit_BSD_source_line_stab',`
+ifdef(`X86',`
+# __line__ "__file__" 1',`
+	.stabd 68,0,$1
+')')
+
+
+/*  We don't really do "weak importing" of symbols from a separate  */
+/*  subprims library anymore; if we ever do and the OS supports it,  */
+/*  here's how to say that we want it ...  */
+
+define(`WEAK',`ifdef(`HaveWeakSymbols',`
+	.weak $1
+',`
+	.globl $1
+')')
+
+define(`_emit_ELF_source_line_stab',`
+  define(`_linecounter_',incr(_linecounter_))
+	.stabn 68,0,$1,`.LM'_linecounter_`-'__func_name
+`.LM'_linecounter_:
+')
+
+define(`_emit_COFF_source_line_stab',`
+        _emit_ELF_source_line_stab($1)
+')
+
+
+define(`emit_source_line_stab',`
+	ifelse(eval(SYSstabs),
+             eval(BSDstabs),
+  	      `_emit_BSD_source_line_stab($1)',
+              eval(SYSstabs),
+              eval(ELFstabs),
+              `_emit_ELF_source_line_stab($1)',
+              `_emit_COFF_source_line_stab($1)')')
+
+
+
+
+
+
+/*  Assemble a reference to the high half of a 32-bit constant,  */
+/*  possibly adjusted for sign-extension of thw low half.  */
+
+
+define(`HA',`ifdef(`DARWIN',`ha16($1)',`$1@ha')')
+
+ 
+/*  Likewise for the low half, and for the high half without  */
+/*  concern for sign-extension of the low half.  */
+
+define(`LO',`ifdef(`DARWIN',`lo16($1)',`$1@l')')
+define(`HI',`ifdef(`DARWIN',`hi16($1)',`$1@hi')')
+
+/*  Note that m4 macros that could be expanded in the .text segment  */
+/*  need to advertise the current line number after they have finished  */
+/*  expanding.  That shouldn't be too onerous, if only because there  */
+/*  should not be too many of them.  */
+
+
+define(`N_FUN',36)
+define(`N_SO',100)
+
+/*    I wish that there was a less-dumb way of doing this.  */
+
+define(`pwd0',esyscmd(`/bin/pwd'))
+define(`__pwd__',substr(pwd0,0,decr(len(pwd0)))`/')
+
+/*   _beginfile() -- gets line/file in synch, generates N_SO for file,  */
+/*   starts .text section  */
+
+
+define(`_beginfile',`
+	.stabs "__pwd__",N_SO,0,0,StartTextLabel()
+	.stabs "__file__",N_SO,0,0,StartTextLabel()
+ifdef(`PPC64',`
+ifdef(`DARWIN',`
+        .machine ppc64
+')')
+	.text
+StartTextLabel():
+# __line__ "__file__"
+')
+
+define(`_endfile',`
+	.stabs "",N_SO,0,0,EndTextLabel()
+EndTextLabel():
+# __line__
+')
+
+define(`_startfn',`define(`__func_name',$1)
+# __line__
+	ifelse(eval(SYSstabs),eval(ELFstabs),`
+	.type $1,@function
+')
+
+$1:
+ifdef(`WINDOWS',`
+	.def	$1;	.scl	2;	.type	32;	.endef
+',`
+        .stabd 68,0,__line__
+')
+	.stabs "$1:F1",36,0,__line__,$1
+	.set func_start,$1
+# __line__ "__file__" 1 ')
+
+
+
+define(`_exportfn',`
+	.globl $1
+	_startfn($1)
+ifdef(`PPC64',`
+ifdef(`LINUX',`
+        .global `.'$1
+`.'$1:
+')')
+# __line__
+')
+
+
+define(`_endfn',`
+LocalLabelPrefix`'__func_name`999':
+ifdef(`WINDOWS',`
+',`
+	.stabs "",36,0,0,LocalLabelPrefix`'__func_name`999'-__func_name
+	.line __line__
+	ifelse(eval(SYSstabs),eval(ELFstabs),`
+        .size __func_name,LocalLabelPrefix`'__func_name`999'-__func_name
+')
+')
+	undefine(`__func_name')
+')
+
+
+/* _struct(name,start_offset)  */
+/*   This just generates a bunch of assembler equates; m4  */
+/*   doesn't remember much of it ..  */
+
+define(`_struct', `define(`__struct_name',$1)
+ define(`_struct_org_name', _$1_org) 
+ define(`_struct_base_name', _$1_base)
+	.set _struct_org_name,$2
+	.set _struct_base_name,_struct_org_name
+ ifelse($3,`',`
+  undefine(`_struct_fixed_size_name')
+  ',`
+  define(`_struct_fixed_size_name', _$1_fixed_size)
+	.set _struct_fixed_size_name,$3
+  ')
+')
+
+define(`_struct_pad',`
+	.set _struct_org_name,_struct_org_name + $1
+')
+ 
+define(`_struct_label',`
+	.set __struct_name`.'$1, _struct_org_name
+')
+
+/*  _field(name,size)   */
+define(`_field',`_struct_label($1) _struct_pad($2)')
+
+define(`_halfword', `_field($1, 2)')
+define(`_word', `_field($1, 4)')
+define(`_dword',`_field($1, 8)')
+define(`_node', `_field($1, node_size)')
+
+define(`_ends',`ifdef(`_struct_fixed_size_name',`
+	.set  __struct_name`.size',_struct_fixed_size_name
+	',`
+	.set  __struct_name`.size', _struct_org_name-_struct_base_name
+	')
+')
+
+
+/*   Lisp fixed-size objects always have a 1-word header  */
+/*   and are always accessed from a "fulltag_misc"-tagged pointer.  */
+/*   We also want to define STRUCT_NAME.element_count for each  */
+/*   such object.  */
+
+
+define(`_structf',`
+	_struct($1,ifelse($2,`',-misc_bias,$2))
+        _node(header)
+')
+
+define(`_endstructf',`
+	.set __struct_name.`element_count',((_struct_org_name-node_size)-_struct_base_name)/node_size
+	_ends
+')
+
+
+define(`__',`emit_source_line_stab(__line__)
+	$@
+	')
+
+define(`__local_label_counter__',0)
+define(`__macro_label_counter__',0)
+
+define(`new_local_labels',
+  `define(`__local_label_counter__',incr(__local_label_counter__))')
+
+define(`new_macro_labels',
+  `define(`__macro_label_counter__',incr(__macro_label_counter__))')
+
+define(`_local_label',`LocalLabelPrefix()`'$1')
+
+define(`local_label',`_local_label($1`'__local_label_counter__)')
+
+define(`macro_label',`_local_label($1`'__macro_label_counter__)')
+
+
+/* The Darwin assembler doesn't seem to support .ifdef/.ifndef, but  */
+/* does understand .if.    */
+/* Note that using M4's own ifdef is certainly possible, but it's  */
+/* hard to generate source line information when doing so.  */
+
+  
+define(`__ifdef',`ifdef(`$1',`.if 1',`.if 0')')
+define(`__ifndef',`ifdef(`$1',`.if 0',`.if 1')')
+define(`__else',`.else')
+define(`__endif',`.endif')
+define(`__if',`.if $1')
+
+define(`equate_if_defined',`ifdef($1,`
+`$1' = 1
+',`
+`$1' = 0
+')')
+
+equate_if_defined(`DARWIN')
+equate_if_defined(`LINUX')
+equate_if_defined(`FREEBSD')
+equate_if_defined(`SOLARIS')
+equate_if_defined(`WIN_64')
+equate_if_defined(`PPC64')
+equate_if_defined(`X8664')
+equate_if_defined(`WIN_32')
+equate_if_defined(`WINDOWS')
+
+equate_if_defined(`HAVE_TLS')
+/* DARWIN_GS_HACK is hopefully short-lived */
+equate_if_defined(`DARWIN_GS_HACK')
+
+equate_if_defined(`TCR_IN_GPR')
+
+/* Well, so much for that. Maybe this will go away soon ? */
+equate_if_defined(`WIN32_ES_HACK')
+equate_if_defined(`SYSCALL_SETS_CARRY_ON_ERROR')
+
+
+
+/* We use (more-or-less) a PowerOpen C frame, except on LinuxPPC32  */
+
+define(`USE_POWEROPEN_C_FRAME',`')
+undefine(`USE_EABI_C_FRAME')
+
+ifdef(`LINUX',`
+ifdef(`PPC64',`',`
+define(`USE_EABI_C_FRAME',`')
+undefine(`USE_POWEROPEN_C_FRAME')
+')')
+
+
+
+
Index: /branches/arm/lisp-kernel/macros.h
===================================================================
--- /branches/arm/lisp-kernel/macros.h	(revision 13357)
+++ /branches/arm/lisp-kernel/macros.h	(revision 13357)
@@ -0,0 +1,116 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+/* Totally different content than 'macros.s' */
+
+
+
+#ifndef __macros__
+#define __macros__
+
+#define ptr_to_lispobj(p) ((LispObj)(p))
+#define ptr_from_lispobj(o) ((LispObj*)(o))
+#define lisp_reg_p(reg)  ((reg) >= fn)
+
+#define fulltag_of(o)  ((o) & fulltagmask)
+#define tag_of(o) ((o) & tagmask)
+#define untag(o) ((o) & ~fulltagmask)
+#define node_aligned(o) ((o) & ~tagmask)
+#define indirect_node(o) (*(LispObj *)(node_aligned(o)))
+
+#define deref(o,n) ((((LispObj*) (untag((LispObj)o))))[(n)])
+#define header_of(o) deref(o,0)
+
+#define header_subtag(h) ((h) & subtagmask)
+#define header_element_count(h) ((h) >> num_subtag_bits)
+#define make_header(subtag,element_count) ((subtag)|((element_count)<<num_subtag_bits))
+
+#define unbox_fixnum(x) ((signed_natural)(((signed_natural)(x))>>fixnum_shift))
+#define box_fixnum(x) ((LispObj)((signed_natural)(x)<<fixnum_shift))
+
+#define car(x) (((cons *)ptr_from_lispobj(untag(x)))->car)
+#define cdr(x) (((cons *)ptr_from_lispobj(untag(x)))->cdr)
+
+/* "sym" is an untagged pointer to a symbol */
+#define BOUNDP(sym)  ((((lispsymbol *)(sym))->vcell) != undefined)
+
+/* Likewise. */
+#define FBOUNDP(sym) ((((lispsymbol *)(sym))->fcell) != nrs_UDF.vcell)
+
+#ifdef PPC
+#ifdef PPC64
+#define nodeheader_tag_p(tag) (((tag) & lowtag_mask) == lowtag_nodeheader)
+#define immheader_tag_p(tag) (((tag) & lowtag_mask) == lowtag_immheader)
+#else
+#define nodeheader_tag_p(tag) (tag == fulltag_nodeheader)
+#define immheader_tag_p(tag) (tag == fulltag_immheader)
+#endif
+#endif
+
+#ifdef X86
+#ifdef X8664
+#define NODEHEADER_MASK ((1<<(fulltag_nodeheader_0)) | \
+			 (1<<(fulltag_nodeheader_1)))
+#define nodeheader_tag_p(tag) ((1<<(tag)) &  NODEHEADER_MASK)
+
+#define IMMHEADER_MASK ((1<<fulltag_immheader_0) | \
+			(1UL<<fulltag_immheader_1) |			\
+			(1UL<<fulltag_immheader_2))
+
+#define immheader_tag_p(tag) ((1<<(tag)) & IMMHEADER_MASK)
+#else
+#define nodeheader_tag_p(tag) (tag == fulltag_nodeheader)
+#define immheader_tag_p(tag) (tag == fulltag_immheader)
+#endif
+#endif
+
+#ifdef VC
+#define inline
+#define __attribute__(x)
+#endif
+
+/* lfuns */
+#define lfun_bits(f) (deref(f,header_element_count(header_of(f))))
+#define named_function_p(f) (!(lfun_bits(f)&(1<<(29+fixnum_shift))))
+#define named_function_name(f) (deref(f,-1+header_element_count(header_of(f))))
+
+#define TCR_INTERRUPT_LEVEL(tcr) \
+  (((signed_natural *)((tcr)->tlb_pointer))[INTERRUPT_LEVEL_BINDING_INDEX])
+#endif
+
+#ifdef WINDOWS
+#define LSEEK(fd,offset,how) _lseeki64(fd,offset,how)
+#else
+#define LSEEK(fd,offset,how) lseek(fd,offset,how)
+#endif
+
+/* We can't easily and unconditionally use format strings like "0x%lx"
+   to print lisp objects: the "l" might not match the word size, and
+   neither would (necessarily) something like "0x%llx".  We can at 
+   least exploit the fact that on all current platforms, "ll" ("long long")
+   is the size of a 64-bit lisp object and "l" ("long") is the size of
+   a 32-bit lisp object. */
+
+#if (WORD_SIZE == 64)
+#define LISP "%llx"
+#define ZLISP "%016llx"
+#define DECIMAL "%lld"
+#else
+#define LISP "%lx"
+#define ZLISP "%08x"
+#define DECIMAL "%ld"
+#endif
Index: /branches/arm/lisp-kernel/memory.c
===================================================================
--- /branches/arm/lisp-kernel/memory.c	(revision 13357)
+++ /branches/arm/lisp-kernel/memory.c	(revision 13357)
@@ -0,0 +1,980 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+#include "Threads.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#ifdef LINUX
+#include <strings.h>
+#include <fpu_control.h>
+#include <linux/prctl.h>
+#endif
+
+#ifndef WINDOWS
+#include <sys/mman.h>
+#endif
+
+#define DEBUG_MEMORY 0
+
+void
+allocation_failure(Boolean pointerp, natural size)
+{
+  char buf[64];
+  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
+  Fatal(":   Kernel memory allocation failure.  ", buf);
+}
+
+void
+fatal_oserr(StringPtr param, OSErr err)
+{
+  char buf[64];
+  sprintf(buf," - operating system error %d.", err);
+  Fatal(param, buf);
+}
+
+
+Ptr
+allocate(natural size)
+{
+  return (Ptr) malloc(size);
+}
+
+void
+deallocate(Ptr p)
+{
+  free((void *)p);
+}
+
+Ptr
+zalloc(natural size)
+{
+  Ptr p = allocate(size);
+  if (p != NULL) {
+    memset(p, 0, size);
+  }
+  return p;
+}
+
+#ifdef DARWIN
+#if WORD_SIZE == 64
+#define vm_region vm_region_64
+#endif
+
+/*
+  Check to see if the specified address is unmapped by trying to get
+  information about the mapped address at or beyond the target.  If
+  the difference between the target address and the next mapped address
+  is >= len, we can safely mmap len bytes at addr.
+*/
+Boolean
+address_unmapped_p(char *addr, natural len)
+{
+  vm_address_t vm_addr = (vm_address_t)addr;
+  vm_size_t vm_size;
+#if WORD_SIZE == 64
+  vm_region_basic_info_data_64_t vm_info;
+#else
+  vm_region_basic_info_data_t vm_info;
+#endif
+#if WORD_SIZE == 64
+  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
+#else
+  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
+#endif
+  mach_port_t vm_object_name = (mach_port_t) 0;
+  kern_return_t kret;
+
+  kret = vm_region(mach_task_self(),
+		   &vm_addr,
+		   &vm_size,
+#if WORD_SIZE == 64
+                   VM_REGION_BASIC_INFO_64,
+#else
+		   VM_REGION_BASIC_INFO,
+#endif
+		   (vm_region_info_t)&vm_info,
+		   &vm_info_size,
+		   &vm_object_name);
+  if (kret != KERN_SUCCESS) {
+    return false;
+  }
+
+  return vm_addr >= (vm_address_t)(addr+len);
+}
+#endif
+
+
+  /*
+    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
+    likely to reside near the beginning of an unmapped block of memory
+    that's at least 1GB in size.  We'd like to load the heap image's
+    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
+    that'd allow us to file-map those sections (and would enable us to
+    avoid having to relocate references in the data sections.)
+
+    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
+    by creating an anonymous mapping with mmap().
+
+    If we try to insist that mmap() map a 1GB block at
+    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
+    mmap() will gleefully clobber any mapped memory that's already
+    there.  (That region's empty at this writing, but some future
+    version of the OS might decide to put something there.)
+
+    If we don't specify MAP_FIXED, mmap() is free to treat the address
+    we give it as a hint; Linux seems to accept the hint if doing so
+    wouldn't cause a problem.  Naturally, that behavior's too useful
+    for Darwin (or perhaps too inconvenient for it): it'll often
+    return another address, even if the hint would have worked fine.
+
+    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
+    would conflict with anything.  Until we discover a need to do 
+    otherwise, we'll assume that if Linux's mmap() fails to take the
+    hint, it's because of a legitimate conflict.
+
+    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
+    to implement an address_unmapped_p() for Linux.
+  */
+
+LogicalAddress
+ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
+{
+  LogicalAddress start;
+  Boolean fixed_map_ok = false;
+#ifdef DARWIN
+  fixed_map_ok = address_unmapped_p(want,totalsize);
+#endif
+#ifdef SOLARIS
+  fixed_map_ok = true;
+#endif
+  raise_limit();
+#ifdef WINDOWS
+  start = VirtualAlloc((void *)want,
+		       totalsize + heap_segment_size,
+		       MEM_RESERVE,
+		       PAGE_NOACCESS);
+  if (!start) {
+#if DEBUG_MEMORY    
+    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
+#endif
+    start = VirtualAlloc(0,
+			 totalsize + heap_segment_size,
+			 MEM_RESERVE,
+			 PAGE_NOACCESS);
+    if (!start) {
+      return NULL;
+    }
+  }
+#else
+  start = mmap((void *)want,
+	       totalsize + heap_segment_size,
+	       PROT_NONE,
+	       MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
+	       -1,
+	       0);
+  if (start == MAP_FAILED) {
+    return NULL;
+  }
+
+  if (start != want) {
+    munmap(start, totalsize+heap_segment_size);
+    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
+    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
+      return NULL;
+    }
+  }
+  mprotect(start, totalsize, PROT_NONE);
+#endif
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
+#endif
+  return start;
+}
+
+int
+CommitMemory (LogicalAddress start, natural len) 
+{
+  LogicalAddress rc;
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
+#endif
+#ifdef WINDOWS
+  if ((start < ((LogicalAddress)nil_value)) &&
+      (((LogicalAddress)nil_value) < (start+len))) {
+    /* nil area is in the executable on Windows; ensure range is
+       read-write */
+    DWORD as_if_i_care;
+    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
+      return false;
+    }
+    return true;
+  }
+  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+  if (!rc) {
+    wperror("CommitMemory VirtualAlloc");
+    return false;
+  }
+  return true;
+#else
+  int i, err;
+  void *addr;
+
+  for (i = 0; i < 3; i++) {
+    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
+    if (addr == start) {
+      return true;
+    } else {
+      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
+    }
+  }
+  return false;
+#endif
+}
+
+void
+UnCommitMemory (LogicalAddress start, natural len) {
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
+#endif
+#ifdef WINDOWS
+  int rc = VirtualFree(start, len, MEM_DECOMMIT);
+  if (!rc) {
+    wperror("UnCommitMemory VirtualFree");
+    Fatal("mmap error", "");
+    return;
+  }
+#else
+  if (len) {
+    madvise(start, len, MADV_DONTNEED);
+    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
+	!= start) {
+      int err = errno;
+      Fatal("mmap error", "");
+      fprintf(dbgout, "errno = %d", err);
+    }
+  }
+#endif
+}
+
+
+LogicalAddress
+MapMemory(LogicalAddress addr, natural nbytes, int protection)
+{
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
+#endif
+#ifdef WINDOWS
+  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
+#else
+  {
+    int flags = MAP_PRIVATE|MAP_ANON;
+
+    if (addr > 0) flags |= MAP_FIXED;
+    return mmap(addr, nbytes, protection, flags, -1, 0);
+  }
+#endif
+}
+
+LogicalAddress
+MapMemoryForStack(natural nbytes)
+{
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
+#endif
+#ifdef WINDOWS
+  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
+#else
+  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
+#endif
+}
+
+int
+UnMapMemory(LogicalAddress addr, natural nbytes)
+{
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
+#endif
+#ifdef WINDOWS
+  /* Can't MEM_RELEASE here because we only want to free a chunk */
+  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
+#else
+  return munmap(addr, nbytes);
+#endif
+}
+
+int
+ProtectMemory(LogicalAddress addr, natural nbytes)
+{
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
+#endif
+#ifdef WINDOWS
+  DWORD oldProtect;
+  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
+  
+  if(!status) {
+    wperror("ProtectMemory VirtualProtect");
+    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
+  }
+  return status;
+#else
+  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
+  
+  if (status) {
+    status = errno;
+    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
+  }
+  return status;
+#endif
+}
+
+int
+UnProtectMemory(LogicalAddress addr, natural nbytes)
+{
+#if DEBUG_MEMORY
+  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
+#endif
+#ifdef WINDOWS
+  DWORD oldProtect;
+  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
+#else
+  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
+#endif
+}
+
+int
+MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
+{
+#ifdef WINDOWS
+#if 0
+  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
+  HANDLE hFile, hFileMapping;
+  LPVOID rc;
+  DWORD desiredAccess;
+
+  if (permissions == MEMPROTECT_RWX) {
+    permissions |= PAGE_WRITECOPY;
+    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
+  } else {
+    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
+  }
+
+  hFile = _get_osfhandle(fd);
+  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
+				   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
+  
+  if (!hFileMapping) {
+    wperror("CreateFileMapping");
+    return false;
+  }
+
+  rc = MapViewOfFileEx(hFileMapping,
+		       desiredAccess,
+		       (pos >> 32),
+		       (pos & 0xffffffff),
+		       nbytes,
+		       addr);
+#else
+  size_t count, total = 0;
+  size_t opos;
+
+  opos = LSEEK(fd, 0, SEEK_CUR);
+  CommitMemory(addr, nbytes);
+  LSEEK(fd, pos, SEEK_SET);
+
+  while (total < nbytes) {
+    count = read(fd, addr + total, nbytes - total);
+    total += count;
+    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
+    if (!(count > 0))
+      return false;
+  }
+
+  LSEEK(fd, opos, SEEK_SET);
+
+  return true;
+#endif
+#else
+  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
+#endif
+}
+
+void
+unprotect_area(protected_area_ptr p)
+{
+  BytePtr start = p->start;
+  natural nprot = p->nprot;
+  
+  if (nprot) {
+    UnProtectMemory(start, nprot);
+    p->nprot = 0;
+  }
+}
+
+protected_area_ptr
+new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
+{
+  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
+  
+  if (p == NULL) return NULL;
+  p->protsize = protsize;
+  p->nprot = 0;
+  p->start = start;
+  p->end = end;
+  p->why = reason;
+  p->next = AllProtectedAreas;
+
+  AllProtectedAreas = p;
+  if (now) {
+    protect_area(p);
+  }
+  
+  return p;
+}
+
+/*
+  Un-protect the first nbytes bytes in specified area.
+  Note that this may cause the area to be empty.
+*/
+void
+unprotect_area_prefix(protected_area_ptr area, size_t delta)
+{
+  unprotect_area(area);
+  area->start += delta;
+  if ((area->start + area->protsize) <= area->end) {
+    protect_area(area);
+  }
+}
+
+
+/*
+  Extend the protected area, causing the preceding nbytes bytes
+  to be included and protected.
+*/
+void
+protect_area_prefix(protected_area_ptr area, size_t delta)
+{
+  unprotect_area(area);
+  area->start -= delta;
+  protect_area(area);
+}
+
+protected_area_ptr
+AllProtectedAreas = NULL;
+
+
+/* 
+  This does a linear search.  Areas aren't created all that often;
+  if there get to be very many of them, some sort of tree search
+  might be justified.
+*/
+
+protected_area_ptr
+find_protected_area(BytePtr addr)
+{
+  protected_area* p;
+  
+  for(p = AllProtectedAreas; p; p=p->next) {
+    if ((p->start <= addr) && (p->end > addr)) {
+      return p;
+    }
+  }
+  return NULL;
+}
+
+
+void
+zero_memory_range(BytePtr start, BytePtr end)
+{
+#ifdef WINDOWS
+  ZeroMemory(start,end-start);
+#else
+  bzero(start,(size_t)(end-start));
+#endif
+}
+
+
+  
+
+/* 
+   Grow or shrink the dynamic area.  Or maybe not.
+   Whether or not the end of (mapped space in) the heap changes,
+   ensure that everything between the freeptr and the heap end
+   is mapped and read/write.  (It'll incidentally be zeroed.)
+*/
+Boolean
+resize_dynamic_heap(BytePtr newfree, 
+		    natural free_space_size)
+{
+  extern int page_size;
+  area *a = active_dynamic_area;
+  BytePtr newlimit, protptr, zptr;
+  int psize = page_size;
+  if (free_space_size) {
+    BytePtr lowptr = a->active;
+    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
+					    log2_heap_segment_size);
+    if (newlimit > a->high) {
+      return grow_dynamic_area(newlimit-a->high);
+    } else if ((lowptr + free_space_size) < a->high) {
+      shrink_dynamic_area(a->high-newlimit);
+      return true;
+    }
+  }
+}
+
+void
+protect_area(protected_area_ptr p)
+{
+  BytePtr start = p->start;
+  natural n = p->protsize;
+
+  if (n && ! p->nprot) {
+    ProtectMemory(start, n);
+    p->nprot = n;
+  }
+}
+
+
+void
+zero_page(BytePtr start)
+{
+  extern int page_size;
+#ifdef PPC
+  extern void zero_cache_lines(BytePtr, size_t, size_t);
+  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
+#else
+  memset(start, 0, page_size);
+#endif
+}
+
+/* area management */
+
+
+area *
+new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
+{
+  area *a = (area *) (zalloc(sizeof(area)));
+  if (a) {
+    natural ndnodes = area_dnode(highaddr, lowaddr);
+    a->low = lowaddr;
+    a->high = highaddr;
+    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
+    a->code = code;
+    a->ndnodes = ndnodes;
+    /* Caller must allocate markbits when allocating heap ! */
+    
+  }
+  return a;
+}
+
+static area *
+add_area_before(area *new_area, area *before)
+{
+  area *before_before = before->pred;
+
+  new_area->pred = before_before;
+  new_area->succ = before;
+  before_before->succ = new_area;
+  before->pred = new_area;
+  return new_area;
+}
+
+/*
+  The active dynamic area comes first.
+  Static areas follow dynamic areas.
+  Stack areas follow static areas.
+  Readonly areas come last.
+*/
+
+/*
+  If we already own the area_lock (or during iniitalization), it's safe
+  to add an area.
+*/
+
+
+void
+add_area_holding_area_lock(area *new_area)
+{
+  area *that = all_areas;
+  int
+    thiscode = (int)(new_area->code),
+    thatcode;
+
+  /* Cdr down the linked list */
+  do {
+    that = that->succ;
+    thatcode = (int)(that->code);
+  } while (thiscode < thatcode);
+  add_area_before(new_area, that);
+}
+
+/*
+  In general, we need to own the area lock before adding an area.
+*/
+void
+add_area(area *new_area, TCR *tcr)
+{
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+  add_area_holding_area_lock(new_area);
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+}  
+
+/*
+  Search areas "forward" from the header's successor, until
+  an area containing ADDR is found or an area with code < MINCODE
+  is encountered.
+  This walks the area list visiting heaps (dynamic, then static)
+  first, then stacks.
+
+*/
+static area *
+find_area_forward(BytePtr addr, area_code mincode)
+{
+  area *p, *header = all_areas;
+
+  for (p = header->succ; p != header; p = p->succ) {
+    area_code pcode = p->code;
+    if (pcode < mincode) {
+      return NULL;
+    }
+    if (pcode >= AREA_READONLY) {
+      if ((addr >= p->low) &&
+          (addr < p->active)) {
+        return p;
+      }
+    } else {
+      if ((addr >= p->active) &&
+          (addr < p->high)) {
+        return p;
+      }
+    }
+  }
+  return NULL;
+}
+
+static area *
+find_area_backward(BytePtr addr, area_code maxcode)
+{
+  area *p, *header = all_areas;
+
+  for (p = header->pred; p != header; p = p->pred) {
+    area_code pcode = p->code;
+
+    if (pcode > maxcode) {
+      return NULL;
+    }
+    if (pcode >= AREA_READONLY) {
+      if ((addr >= p->low) &&
+          (addr < p->active)) {
+        return p;
+      }
+    } else {
+      if ((addr >= p->active) &&
+          (addr < p->high)) {
+        return p;
+      }
+    }
+  }
+  return NULL;
+}
+
+area *
+area_containing(BytePtr addr)
+{
+  return find_area_forward(addr, AREA_VOID);
+}
+
+area *
+heap_area_containing(BytePtr addr)
+{
+  return find_area_forward(addr, AREA_READONLY);
+}
+
+area *
+stack_area_containing(BytePtr addr)
+{
+  return find_area_backward(addr, AREA_TSTACK);
+}
+
+/*
+  Make everything "younger" than the start of the target area
+  belong to that area; all younger areas will become empty, and
+  the dynamic area will have to lose some of its markbits (they
+  get zeroed and become part of the tenured area's refbits.)
+
+  The active dynamic area must have been "normalized" (e.g., its
+  active pointer must match the free pointer) before this is called.
+
+  If the target area is 'tenured_area' (the oldest ephemeral generation),
+  zero its refbits and update YOUNGEST_EPHEMERAL.
+
+*/
+
+void
+tenure_to_area(area *target)
+{
+  area *a = active_dynamic_area, *child;
+  BytePtr 
+    curfree = a->active,
+    target_low = target->low,
+    tenured_low = tenured_area->low;
+  natural 
+    dynamic_dnodes = area_dnode(curfree, a->low),
+    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
+  bitvector 
+    refbits = tenured_area->refbits,
+    markbits = a->markbits,
+    new_markbits;
+
+  target->high = target->active = curfree;
+  target->ndnodes = area_dnode(curfree, target_low);
+
+  for (child = target->younger; child != a; child = child->younger) {
+    child->high = child->low = child->active = curfree;
+    child->ndnodes = 0;
+  }
+
+  a->low = curfree;
+  a->ndnodes = area_dnode(a->high, curfree);
+
+  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
+  
+  if (target == tenured_area) {
+    zero_bits(refbits, new_tenured_dnodes);
+    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
+  } else {
+    /* Need more (zeroed) refbits & fewer markbits */
+    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
+  }
+   
+  a->markbits = new_markbits;
+  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
+}
+
+
+
+/*
+  Make everything younger than the oldest byte in 'from' belong to 
+  the youngest generation.  If 'from' is 'tenured_area', this means
+  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
+  to 0 to indicate this.)
+  
+  Some tenured_area refbits become dynamic area markbits in the process;
+  it's not necessary to zero them, since the GC will do that.
+*/
+
+void
+untenure_from_area(area *from)
+{
+  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
+    area *a = active_dynamic_area, *child;
+    BytePtr curlow = from->low;
+    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
+    
+    for (child = from; child != a; child = child->younger) {
+      child->low = child->active = child->high = curlow;
+      child->ndnodes = 0;
+    }
+    
+    a->low = curlow;
+    a->ndnodes = area_dnode(a->high, curlow);
+    
+    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
+    if (from == tenured_area) {
+      /* Everything's in the dynamic area */
+      lisp_global(OLDEST_EPHEMERAL) = 0;
+      lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
+
+    }
+  }
+}
+
+
+Boolean
+egc_control(Boolean activate, BytePtr curfree)
+{
+  area *a = active_dynamic_area;
+  Boolean egc_is_active = (a->older != NULL);
+
+  if (activate != egc_is_active) {
+    if (curfree != NULL) {
+      a->active = curfree;
+    }
+    if (activate) {
+      a->older = g1_area;
+      tenure_to_area(tenured_area);
+      egc_is_active = true;
+    } else {
+      untenure_from_area(tenured_area);
+      a->older = NULL;
+      egc_is_active = false;
+    }
+  }
+  return egc_is_active;
+}
+
+/*
+  Lisp ff-calls this; it needs to set the active area's active pointer
+  correctly.
+*/
+
+Boolean
+lisp_egc_control(Boolean activate)
+{
+  area *a = active_dynamic_area;
+  return egc_control(activate, (BytePtr) a->active);
+}
+
+
+
+
+  
+/* Splice the protected_area_ptr out of the list and dispose of it. */
+void
+delete_protected_area(protected_area_ptr p)
+{
+  BytePtr start = p->start;
+  int nbytes = p->nprot;
+  protected_area_ptr *prev = &AllProtectedAreas, q;
+
+  if (nbytes) {
+    UnProtectMemory((LogicalAddress)start, nbytes);
+  }
+  
+  while ((q = *prev) != NULL) {
+    if (p == q) {
+      *prev = p->next;
+      break;
+    } else {
+      prev = &(q->next);
+    }
+  }
+
+  deallocate((Ptr)p);
+}
+
+
+
+
+/* 
+  Unlink the area from all_areas.
+  Unprotect and dispose of any hard/soft protected_areas.
+  If the area has a handle, dispose of that as well.
+  */
+
+void
+condemn_area_holding_area_lock(area *a)
+{
+  void free_stack(void *);
+  area *prev = a->pred, *next = a->succ;
+  Ptr h = a->h;
+  protected_area_ptr p;
+
+  prev->succ = next;
+  next->pred = prev;
+
+  p = a->softprot;
+  if (p) delete_protected_area(p);
+
+  p = a->hardprot;
+
+  if (p) delete_protected_area(p);
+
+  if (h) free_stack(h);
+  deallocate((Ptr)a);
+}
+
+
+
+void
+condemn_area(area *a, TCR *tcr)
+{
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+  condemn_area_holding_area_lock(a);
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+}
+
+
+
+
+/*
+  condemn an area and all the other areas that can be reached
+  via the area.older & area.younger links.
+  This is the function in the ppc::kernel-import-condemn-area slot,
+  called by free-stack-area
+  */
+void
+condemn_area_chain(area *a, TCR *tcr)
+{
+  area *older;
+
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+
+  for (; a->younger; a = a->younger) ;
+  for (;a;) {
+    older = a->older;
+    condemn_area_holding_area_lock(a);
+    a = older;
+  }
+  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
+}
+
+
+void
+protect_watched_areas()
+{
+  area *a = active_dynamic_area;
+  natural code = a->code;
+
+  while (code != AREA_VOID) {
+    if (code == AREA_WATCHED) {
+      natural size = a->high - a->low;
+      
+      ProtectMemory(a->low, size);
+    }
+    a = a->succ;
+    code = a->code;
+  }
+}
+
+void
+unprotect_watched_areas()
+{
+  area *a = active_dynamic_area;
+  natural code = a->code;
+
+  while (code != AREA_VOID) {
+    if (code == AREA_WATCHED) {
+      natural size = a->high - a->low;
+      
+      UnProtectMemory(a->low, size);
+    }
+    a = a->succ;
+    code = a->code;
+  }
+}
Index: /branches/arm/lisp-kernel/memprotect.h
===================================================================
--- /branches/arm/lisp-kernel/memprotect.h	(revision 13357)
+++ /branches/arm/lisp-kernel/memprotect.h	(revision 13357)
@@ -0,0 +1,132 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __memprotect_h__
+#define __memprotect_h__
+
+
+
+#include "lisptypes.h"
+#ifdef PPC
+#include "ppc-constants.h"
+#endif
+#include <signal.h>
+#ifndef WINDOWS
+#ifdef DARWIN
+#include <sys/ucontext.h>
+#else
+#include <ucontext.h>
+#endif
+#endif
+
+#ifdef WINDOWS
+#define MAP_FAILED ((void *)(-1))
+
+#define MEMPROTECT_NONE PAGE_NOACCESS
+#define MEMPROTECT_RO   PAGE_READONLY
+#define MEMPROTECT_RW   PAGE_READWRITE
+#define MEMPROTECT_RX   PAGE_EXECUTE_READ
+#define MEMPROTECT_RWX  PAGE_EXECUTE_READWRITE
+
+#else
+
+#define MEMPROTECT_NONE PROT_NONE
+#define MEMPROTECT_RO   PROT_READ
+#define MEMPROTECT_RW   (PROT_READ|PROT_WRITE)
+#define MEMPROTECT_RX   (PROT_READ|PROT_EXEC)
+#define MEMPROTECT_RWX  (PROT_READ|PROT_WRITE|PROT_EXEC)
+#ifndef MAP_GROWSDOWN
+#define MAP_GROWSDOWN (0)
+#endif
+
+
+#endif
+
+LogicalAddress
+ReserveMemoryForHeap(LogicalAddress want, natural totalsize);
+
+int
+CommitMemory (LogicalAddress start, natural len);
+
+void
+UnCommitMemory (LogicalAddress start, natural len);
+
+LogicalAddress
+MapMemory(LogicalAddress addr, natural nbytes, int protection);
+
+LogicalAddress
+MapMemoryForStack(natural nbytes);
+
+int
+UnMapMemory(LogicalAddress addr, natural nbytes);
+
+int
+ProtectMemory(LogicalAddress, natural);
+
+int
+UnProtectMemory(LogicalAddress, natural);
+
+int
+MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd);
+
+typedef enum {
+  kNotProtected,		/* At least not at the moment. */
+  kVSPsoftguard,
+  kSPsoftguard,
+  kTSPsoftguard,
+  kSPhardguard,			/* Touch one and die. */
+  kVSPhardguard,
+  kTSPhardguard,
+  kHEAPsoft,			/* Uninitialized page in the heap */
+  kHEAPhard,			/* The end-of-the-line in the heap */
+  /* Phony last entry. */
+  kNumProtectionKinds
+  } lisp_protection_kind;
+
+typedef
+struct protected_area {
+  struct protected_area *next;
+  BytePtr start;                /* first byte (page-aligned) that might be protected */
+  BytePtr end;                  /* last byte (page-aligned) that could be protected */
+  unsigned nprot;               /* Might be 0 */
+  unsigned protsize;            /* number of bytes to protect */
+  lisp_protection_kind why;
+} protected_area, *protected_area_ptr;
+
+
+/* Various functions that try to respond to a protection violation */
+typedef 
+  OSStatus (protection_handler)(ExceptionInformation *, protected_area_ptr, BytePtr);
+
+protection_handler 
+  do_spurious_wp_fault,
+  do_soft_stack_overflow,
+  do_hard_stack_overflow,
+  do_tenured_space_write,
+  do_heap_soft_probe,
+  do_heap_hard_probe;
+
+extern protection_handler
+  *protection_handlers[];
+
+
+void
+exception_cleanup(void);
+
+
+  
+#endif /* __memprotect_h__ */
Index: /branches/arm/lisp-kernel/pad.s
===================================================================
--- /branches/arm/lisp-kernel/pad.s	(revision 13357)
+++ /branches/arm/lisp-kernel/pad.s	(revision 13357)
@@ -0,0 +1,6 @@
+	.globl openmcl_low_address
+openmcl_low_address:
+        nop
+        
+
+
Index: /branches/arm/lisp-kernel/plbt.c
===================================================================
--- /branches/arm/lisp-kernel/plbt.c	(revision 13357)
+++ /branches/arm/lisp-kernel/plbt.c	(revision 13357)
@@ -0,0 +1,318 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lispdcmd.h"
+#ifdef LINUX
+#define __USE_GNU 1
+#include <dlfcn.h>
+#endif
+
+#ifdef DARWIN
+#if 0
+#undef undefined
+#include <stdint.h>
+#include <mach-o/dyld.h>
+#include <mach-o/nlist.h>
+
+typedef struct dl_info {
+  const char      *dli_fname;     /* Pathname of shared object */
+  void            *dli_fbase;     /* Base address of shared object */
+  const char      *dli_sname;     /* Name of nearest symbol */
+  void            *dli_saddr;     /* Address of nearest symbol */
+} Dl_info;
+
+int
+darwin_dladdr(void *p, Dl_info *info)
+{
+  unsigned long i;
+  unsigned long j;
+  uint32_t count = _dyld_image_count();
+  struct mach_header *mh = 0;
+  struct load_command *lc = 0;
+  unsigned long addr = 0;
+  unsigned long table_off = (unsigned long)0;
+  int found = 0;
+
+  if (!info)
+    return 0;
+  info->dli_fname = 0;
+  info->dli_fbase = 0;
+  info->dli_sname = 0;
+  info->dli_saddr = 0;
+  /* Some of this was swiped from code posted by Douglas Davidson
+   * <ddavidso AT apple DOT com> to darwin-development AT lists DOT
+   * apple DOT com and slightly modified
+   */
+  for (i = 0; i < count; i++) {
+    addr = (unsigned long)p - _dyld_get_image_vmaddr_slide(i);
+    mh = (struct mach_header *)_dyld_get_image_header(i);
+    if (mh) {
+      lc = (struct load_command *)((char *)mh + sizeof(struct mach_header));
+      for (j = 0; j < mh->ncmds; j++, lc = (struct load_command *)((char *)lc + lc->cmdsize)) {
+	if (LC_SEGMENT == lc->cmd &&
+	    addr >= ((struct segment_command *)lc)->vmaddr &&
+	    addr <
+	    ((struct segment_command *)lc)->vmaddr + ((struct segment_command *)lc)->vmsize) {
+	  info->dli_fname = _dyld_get_image_name(i);
+	  info->dli_fbase = (void *)mh;
+	  found = 1;
+	  break;
+	}
+      }
+      if (found) {
+	    break;
+      }
+    }
+  }
+  if (!found) {
+    return 0;
+  }
+  lc = (struct load_command *)((char *)mh + sizeof(struct mach_header));
+  for (j = 0; 
+       j < mh->ncmds; 
+       j++, lc = (struct load_command *)((char *)lc + lc->cmdsize)) {
+    if (LC_SEGMENT == lc->cmd) {
+      if (!strcmp(((struct segment_command *)lc)->segname, "__LINKEDIT"))
+	break;
+    }
+  }
+  table_off =
+    ((unsigned long)((struct segment_command *)lc)->vmaddr) -
+    ((unsigned long)((struct segment_command *)lc)->fileoff) + _dyld_get_image_vmaddr_slide(i);
+  
+  lc = (struct load_command *)((char *)mh + sizeof(struct mach_header));
+  for (j = 0; 
+       j < mh->ncmds; 
+       j++, lc = (struct load_command *)((char *)lc + lc->cmdsize)) {
+    if (LC_SYMTAB == lc->cmd) {
+      struct nlist *symtable = (struct nlist *)(((struct symtab_command *)lc)->symoff + table_off);
+      unsigned long numsyms = ((struct symtab_command *)lc)->nsyms;
+      struct nlist *nearest = NULL;
+      unsigned long diff = 0xffffffff;
+      unsigned long strtable = (unsigned long)(((struct symtab_command *)lc)->stroff + table_off);
+      for (i = 0; i < numsyms; i++) {
+	/* fprintf(dbgout,"%s : 0x%08x, 0x%x\n",(char *)(strtable + symtable->n_un.n_strx) ,symtable->n_value, symtable->n_type); */
+	/* Ignore the following kinds of Symbols */
+	if ((!symtable->n_value)	/* Undefined */
+	    || (symtable->n_type & N_STAB)	/* Debug symbol */
+	    || ((symtable->n_type & N_TYPE) != N_SECT)	/* Absolute, indirect, ... */
+	    ) {
+	  symtable++;
+	  continue;
+	}
+	if ((addr >= symtable->n_value) && 
+	    (diff >= addr - (symtable->n_value ))) {
+	  diff = addr- (unsigned long)symtable->n_value;
+	  nearest = symtable;
+	}
+	symtable++;
+      }
+      if (nearest) {
+	info->dli_saddr = nearest->n_value + ((void *)p - addr);
+	info->dli_sname = (char *)(strtable + nearest->n_un.n_strx);
+      }
+    }
+  }
+  return 1;
+}
+
+#define dladdr darwin_dladdr
+#else
+#include <dlfcn.h>
+#endif
+#endif
+
+
+
+extern Boolean lisp_frame_p(lisp_frame *);
+
+void
+print_lisp_frame(lisp_frame *frame)
+{
+  LispObj fun = frame->savefn, pc = frame->savelr;
+  int delta = 0;
+  Dl_info info;
+  char *spname;
+
+  if ((fun == 0) || (fun == fulltag_misc)) {
+    spname = "unknown ?";
+#ifndef STATIC
+    if (dladdr((void *)ptr_from_lispobj(pc), &info)) {
+      spname = (char *)(info.dli_sname);
+#ifdef DARWIN
+      if (spname[-1] != '_') {
+        --spname;
+      }
+#endif
+    }
+#endif
+#ifdef PPC64
+    Dprintf("(#x%016lX) #x%016lX : (subprimitive %s)", frame, pc, spname);
+#else
+    Dprintf("(#x%08X) #x%08X : (subprimitive %s)", frame, pc, spname);
+#endif
+  } else {
+    if ((fulltag_of(fun) != fulltag_misc) ||
+        (header_subtag(header_of(fun)) != subtag_function)) {
+#ifdef PPC64
+      Dprintf("(#x%016lX) #x%016lX : (not a function!)", frame, pc);
+#else
+      Dprintf("(#x%08X) #x%08X : (not a function!)", frame, pc);
+#endif
+    } else {
+      LispObj code_vector = deref(fun, 1);
+      
+      if ((pc >= (code_vector+misc_data_offset)) &&
+          (pc < ((code_vector+misc_data_offset)+(header_element_count(header_of(code_vector))<<2)))) {
+        delta = (pc - (code_vector+misc_data_offset));
+      }
+#ifdef PPC64
+      Dprintf("(#x%016lX) #x%016lX : %s + %d", frame, pc, print_lisp_object(fun), delta);
+#else
+      Dprintf("(#x%08X) #x%08X : %s + %d", frame, pc, print_lisp_object(fun), delta);
+#endif
+    }
+  }
+}
+
+
+void
+print_foreign_frame(void *frame)
+{
+#ifdef LINUX
+  natural pc = (natural) (((eabi_c_frame *)frame)->savelr);
+#endif
+#ifdef DARWIN
+  natural pc = (natural) (((c_frame *)frame)->savelr);
+#endif
+  Dl_info foreign_info;
+
+#ifndef STATIC
+  if (dladdr((void *)pc, &foreign_info)) {
+    Dprintf(
+#ifdef PPC64
+"(#x%016lx) #x%016lX : %s + %d"
+#else
+"(#x%08x) #x%08X : %s + %d"
+#endif
+, frame, pc, foreign_info.dli_sname,
+	    pc-((long)foreign_info.dli_saddr));
+  } else {
+#endif
+    Dprintf(
+#ifdef PPC64
+"(#x%016X) #x%016X : foreign code (%s)"
+#else
+"(#x%08X) #x%08X : foreign code (%s)"
+#endif
+, frame, pc, "unknown");
+#ifndef STATIC
+  }
+#endif
+}
+
+
+/* Walk frames from "start" to "end". 
+   Say whatever can be said about foreign frames and lisp frames.
+*/
+
+void
+walk_stack_frames(lisp_frame *start, lisp_frame *end) 
+{
+  lisp_frame *next;
+  Dprintf("\n");
+  while (start < end) {
+
+    if (lisp_frame_p(start)) {
+      print_lisp_frame(start);
+    } else {
+#ifdef DARWIN
+      print_foreign_frame((c_frame *)start);
+#else
+      print_foreign_frame((eabi_c_frame *)start);
+#endif
+    }
+    
+    next = start->backlink;
+    if (next == 0) {
+      next = end;
+    }
+    if (next < start) {
+      fprintf(dbgout, "Bad frame! (%x < %x)\n", next, start);
+      break;
+    }
+    start = next;
+  }
+}
+
+char *
+interrupt_level_description(TCR *tcr)
+{
+  signed_natural level = (signed_natural) TCR_INTERRUPT_LEVEL(tcr);
+  if (level < 0) {
+    if (tcr->interrupt_pending) {
+      return "disabled(pending)";
+    } else {
+      return "disabled";
+    }
+  } else {
+    return "enabled";
+  }
+}
+
+void
+walk_other_areas()
+{
+  TCR *start = (TCR *)get_tcr(true), *tcr = start->next;
+  area *a;
+  char *ilevel = interrupt_level_description(tcr);
+
+  while (tcr != start) {
+    a = tcr->cs_area;
+    Dprintf("\n\n TCR = 0x%lx, cstack area #x%lx,  native thread ID = 0x%lx, interrupts %s", tcr, a,  tcr->native_thread_id, ilevel);
+    walk_stack_frames((lisp_frame *) (a->active), (lisp_frame *) (a->high));
+    tcr = tcr->next;
+  }
+}
+
+void
+plbt_sp(LispObj currentSP)
+{
+  area *cs_area;
+  
+{
+    TCR *tcr = (TCR *)get_tcr(true);
+    char *ilevel = interrupt_level_description(tcr);
+    cs_area = tcr->cs_area;
+    if ((((LispObj) ptr_to_lispobj(cs_area->low)) > currentSP) ||
+        (((LispObj) ptr_to_lispobj(cs_area->high)) < currentSP)) {
+      Dprintf("\nStack pointer [#x%lX] in unknown area.", currentSP);
+    } else {
+      fprintf(dbgout, "current thread: tcr = 0x%lx, native thread ID = 0x%lx, interrupts %s\n", tcr, tcr->native_thread_id, ilevel);
+      walk_stack_frames((lisp_frame *) ptr_from_lispobj(currentSP), (lisp_frame *) (cs_area->high));
+      walk_other_areas();
+    }
+  } 
+}
+
+  
+void
+plbt(ExceptionInformation *xp)
+{
+  plbt_sp(xpGPR(xp, sp));
+}
+    
Index: /branches/arm/lisp-kernel/plprint.c
===================================================================
--- /branches/arm/lisp-kernel/plprint.c	(revision 13357)
+++ /branches/arm/lisp-kernel/plprint.c	(revision 13357)
@@ -0,0 +1,30 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lispdcmd.h"
+
+
+void
+plprint(ExceptionInformation *xp, LispObj obj)
+{
+  if (lisp_nil == (LispObj) NULL) {
+    fprintf(dbgout,"can't find lisp NIL; lisp process not active process ?\n");
+  } else {
+    Dprintf("\n%s", print_lisp_object(obj));
+  }
+}
+
Index: /branches/arm/lisp-kernel/plsym.c
===================================================================
--- /branches/arm/lisp-kernel/plsym.c	(revision 13357)
+++ /branches/arm/lisp-kernel/plsym.c	(revision 13357)
@@ -0,0 +1,128 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lispdcmd.h"
+
+void
+describe_symbol(LispObj sym)
+{
+  lispsymbol *rawsym = (lispsymbol *)ptr_from_lispobj(untag(sym));
+  LispObj function = rawsym->fcell;
+#ifdef fulltag_symbol
+  sym += (fulltag_symbol-fulltag_misc);
+#endif
+  Dprintf("Symbol %s at #x%llX", print_lisp_object(sym), (u64_t) sym);
+  Dprintf("  value    : %s", print_lisp_object(rawsym->vcell));
+  if (function != nrs_UDF.vcell) {
+    Dprintf("  function : %s", print_lisp_object(function));
+  }
+}
+  
+int
+compare_lisp_string_to_c_string(lisp_char_code *lisp_string,
+                                char *c_string,
+                                natural n)
+{
+  natural i;
+  for (i = 0; i < n; i++) {
+    if (lisp_string[i] != (lisp_char_code)(c_string[i])) {
+      return 1;
+    }
+  }
+  return 0;
+}
+
+/*
+  Walk the heap until we find a symbol
+  whose pname matches "name".  Return the 
+  tagged symbol or NULL.
+*/
+
+LispObj
+find_symbol_in_range(LispObj *start, LispObj *end, char *name)
+{
+  LispObj header, tag;
+  int n = strlen(name);
+  char *s = name;
+  lisp_char_code *p;
+  while (start < end) {
+    header = *start;
+    tag = fulltag_of(header);
+    if (header_subtag(header) == subtag_symbol) {
+      LispObj 
+        pname = deref(ptr_to_lispobj(start), 1),
+        pname_header = header_of(pname);
+      if ((header_subtag(pname_header) == subtag_simple_base_string) &&
+          (header_element_count(pname_header) == n)) {
+        p = (lisp_char_code *) ptr_from_lispobj(pname + misc_data_offset);
+        if (compare_lisp_string_to_c_string(p, s, n) == 0) {
+          return (ptr_to_lispobj(start))+fulltag_misc;
+        }
+      }
+    }
+    if (nodeheader_tag_p(tag)) {
+      start += (~1 & (2 + header_element_count(header)));
+    } else if (immheader_tag_p(tag)) {
+      start = (LispObj *) skip_over_ivector((natural)start, header);
+    } else {
+      start += 2;
+    }
+  }
+  return (LispObj)NULL;
+}
+
+LispObj 
+find_symbol(char *name)
+{
+  area *a =  ((area *) (ptr_from_lispobj(lisp_global(ALL_AREAS))))->succ;
+  area_code code;
+  LispObj sym = 0;
+
+  while ((code = a->code) != AREA_VOID) {
+    if ((code == AREA_STATIC) ||
+        (code == AREA_DYNAMIC)) {
+      sym = find_symbol_in_range((LispObj *)(a->low), (LispObj *)(a->active), name);
+      if (sym) {
+        break;
+      }
+    }
+    a = a->succ;
+  }
+  return sym;
+}
+
+    
+void 
+plsym(ExceptionInformation *xp, char *pname) 
+{
+  natural address = 0;
+
+  address = find_symbol(pname);
+  if (address == 0) {
+    Dprintf("Can't find symbol.");
+    return;
+  }
+  
+  if ((fulltag_of(address) == fulltag_misc) &&
+      (header_subtag(header_of(address)) == subtag_symbol)){
+    describe_symbol(address);
+  } else {
+    fprintf(dbgout, "Not a symbol.\n");
+  }
+  return;
+}
+
Index: /branches/arm/lisp-kernel/pmcl-kernel.c
===================================================================
--- /branches/arm/lisp-kernel/pmcl-kernel.c	(revision 13357)
+++ /branches/arm/lisp-kernel/pmcl-kernel.c	(revision 13357)
@@ -0,0 +1,2483 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifdef DARWIN
+/*	dyld.h included here because something in "lisp.h" causes
+    a conflict (actually I think the problem is in "constants.h")
+*/
+#include <mach-o/dyld.h>
+
+#endif
+#include "lisp.h"
+#include "lisp_globals.h"
+#include "gc.h"
+#include "area.h"
+#include <stdlib.h>
+#include <string.h>
+#include "lisp-exceptions.h"
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef WINDOWS
+#include <sys/mman.h>
+#endif
+#include <fcntl.h>
+#include <signal.h>
+#include <errno.h>
+#ifndef WINDOWS
+#include <sys/utsname.h>
+#include <unistd.h>
+#endif
+
+#ifdef LINUX
+#include <mcheck.h>
+#include <dirent.h>
+#include <dlfcn.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <link.h>
+#include <elf.h>
+
+/* 
+   The version of <asm/cputable.h> provided by some distributions will
+   claim that <asm-ppc64/cputable.h> doesn't exist.  It may be present
+   in the Linux kernel source tree even if it's not copied to
+   /usr/include/asm-ppc64.  Hopefully, this will be straightened out
+   soon (and/or the PPC_FEATURE_HAS_ALTIVEC constant will be defined
+   in a less volatile place.)  Until that's straightened out, it may
+   be necessary to install a copy of the kernel header in the right
+   place and/or persuade <asm/cputable> to lighten up a bit.
+*/
+
+#ifdef PPC
+#ifndef PPC64
+#include <asm/cputable.h>
+#endif
+#ifndef PPC_FEATURE_HAS_ALTIVEC
+#define PPC_FEATURE_HAS_ALTIVEC 0x10000000
+#endif
+#endif
+#endif
+
+Boolean use_mach_exception_handling = 
+#ifdef DARWIN
+  true
+#else
+  false
+#endif
+;
+
+#ifdef DARWIN
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <mach/mach_types.h>
+#include <mach/message.h>
+#include <mach/vm_region.h>
+#include <mach/port.h>
+#include <sys/sysctl.h>
+#include <dlfcn.h>
+#endif
+
+#if defined(FREEBSD) || defined(SOLARIS)
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <dlfcn.h>
+#include <elf.h> 
+#include <link.h>
+#endif
+
+#include <ctype.h>
+#ifndef WINDOWS
+#include <sys/select.h>
+#endif
+#include "Threads.h"
+
+#include <fenv.h>
+#include <sys/stat.h>
+
+#ifndef MAP_NORESERVE
+#define MAP_NORESERVE (0)
+#endif
+
+#ifdef WINDOWS
+#include <windows.h>
+#include <stdio.h>
+void
+wperror(char* message)
+{
+  char* buffer;
+  DWORD last_error = GetLastError();
+  
+  FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER|
+		FORMAT_MESSAGE_FROM_SYSTEM|
+		FORMAT_MESSAGE_IGNORE_INSERTS,
+		NULL,
+		last_error,
+		MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+		(LPTSTR)&buffer,
+		0, NULL);
+  fprintf(dbgout, "%s: 0x%x %s\n", message, (unsigned) last_error, buffer);
+  LocalFree(buffer);
+}
+#endif
+
+LispObj lisp_nil = (LispObj) 0;
+bitvector global_mark_ref_bits = NULL, dynamic_mark_ref_bits = NULL, relocatable_mark_ref_bits = NULL;
+
+
+/* These are all "persistent" : they're initialized when
+   subprims are first loaded and should never change. */
+extern LispObj ret1valn;
+extern LispObj nvalret;
+extern LispObj popj;
+
+LispObj text_start = 0;
+
+/* A pointer to some of the kernel's own data; also persistent. */
+
+extern LispObj import_ptrs_base;
+
+
+
+void
+xMakeDataExecutable(void *, unsigned long);
+
+void
+make_dynamic_heap_executable(LispObj *p, LispObj *q)
+{
+  void * cache_start = (void *) p;
+  natural ncacheflush = (natural) q - (natural) p;
+
+  xMakeDataExecutable(cache_start, ncacheflush);  
+}
+      
+size_t
+ensure_stack_limit(size_t stack_size)
+{
+#ifdef WINDOWS
+  extern void os_get_current_thread_stack_bounds(void **, natural*);
+  natural totalsize;
+  void *ignored;
+  
+  os_get_current_thread_stack_bounds(&ignored, &totalsize);
+
+  return (size_t)totalsize-(size_t)(CSTACK_HARDPROT+CSTACK_SOFTPROT);
+
+#else
+  struct rlimit limits;
+  rlim_t cur_stack_limit, max_stack_limit;
+ 
+  stack_size += (CSTACK_HARDPROT+CSTACK_SOFTPROT);
+  getrlimit(RLIMIT_STACK, &limits);
+  cur_stack_limit = limits.rlim_cur;
+  max_stack_limit = limits.rlim_max;
+  if (stack_size > max_stack_limit) {
+    stack_size = max_stack_limit;
+  }
+  if (cur_stack_limit < stack_size) {
+    limits.rlim_cur = stack_size;
+    errno = 0;
+    if (setrlimit(RLIMIT_STACK, &limits)) {
+      int e = errno;
+      fprintf(dbgout, "errno = %d\n", e);
+      Fatal(": Stack resource limit too small", "");
+    }
+  }
+#endif
+  return stack_size;
+}
+
+
+/* This should write-protect the bottom of the stack.
+   Doing so reliably involves ensuring that everything's unprotected on exit.
+*/
+
+BytePtr
+allocate_lisp_stack(natural useable,
+                    unsigned softsize,
+                    unsigned hardsize,
+                    lisp_protection_kind softkind,
+                    lisp_protection_kind hardkind,
+                    Ptr *h_p,
+                    BytePtr *base_p,
+                    protected_area_ptr *softp,
+                    protected_area_ptr *hardp)
+{
+  void *allocate_stack(natural);
+  void free_stack(void *);
+  natural size = useable+softsize+hardsize;
+  natural overhead;
+  BytePtr base, softlimit, hardlimit;
+  Ptr h = allocate_stack(size+4095);
+  protected_area_ptr hprotp = NULL, sprotp;
+
+  if (h == NULL) {
+    return NULL;
+  }
+  if (h_p) *h_p = h;
+  base = (BytePtr) align_to_power_of_2( h, log2_page_size);
+  hardlimit = (BytePtr) (base+hardsize);
+  softlimit = hardlimit+softsize;
+
+  overhead = (base - (BytePtr) h);
+  if (hardsize) {
+    hprotp = new_protected_area((BytePtr)base,hardlimit,hardkind, hardsize, true);
+    if (hprotp == NULL) {
+      if (base_p) *base_p = NULL;
+      if (h_p) *h_p = NULL;
+      deallocate(h);
+      return NULL;
+    }
+    if (hardp) *hardp = hprotp;
+  }
+  if (softsize) {
+    sprotp = new_protected_area(hardlimit,softlimit, softkind, softsize, true);
+    if (sprotp == NULL) {
+      if (base_p) *base_p = NULL;
+      if (h_p) *h_p = NULL;
+      if (hardp) *hardp = NULL;
+      if (hprotp) delete_protected_area(hprotp);
+      free_stack(h);
+      return NULL;
+    }
+    if (softp) *softp = sprotp;
+  }
+  if (base_p) *base_p = base;
+  return (BytePtr) ((natural)(base+size));
+}
+
+/*
+  This should only called by something that owns the area_lock, or
+  by the initial thread before other threads exist.
+*/
+area *
+allocate_lisp_stack_area(area_code stack_type,
+                         natural usable,
+                         unsigned softsize, 
+                         unsigned hardsize, 
+                         lisp_protection_kind softkind, 
+                         lisp_protection_kind hardkind)
+
+{
+  BytePtr base, bottom;
+  Ptr h;
+  area *a = NULL;
+  protected_area_ptr soft_area=NULL, hard_area=NULL;
+
+  bottom = allocate_lisp_stack(usable, 
+                               softsize, 
+                               hardsize, 
+                               softkind, 
+                               hardkind, 
+                               &h, 
+                               &base,
+                               &soft_area, 
+                               &hard_area);
+
+  if (bottom) {
+    a = new_area(base, bottom, stack_type);
+    a->hardlimit = base+hardsize;
+    a->softlimit = base+hardsize+softsize;
+    a->h = h;
+    a->softprot = soft_area;
+    a->hardprot = hard_area;
+    add_area_holding_area_lock(a);
+  }
+  return a;
+}
+
+/*
+  Also assumes ownership of the area_lock 
+*/
+area*
+register_cstack_holding_area_lock(BytePtr bottom, natural size)
+{
+  BytePtr lowlimit = (BytePtr) (((((natural)bottom)-size)+4095)&~4095);
+  area *a = new_area((BytePtr) bottom-size, bottom, AREA_CSTACK);
+  a->hardlimit = lowlimit+CSTACK_HARDPROT;
+  a->softlimit = a->hardlimit+CSTACK_SOFTPROT;
+#ifdef USE_SIGALTSTACK
+  setup_sigaltstack(a);
+#endif
+  add_area_holding_area_lock(a);
+  return a;
+}
+  
+
+area*
+allocate_vstack_holding_area_lock(natural usable)
+{
+  return allocate_lisp_stack_area(AREA_VSTACK, 
+				  usable > MIN_VSTACK_SIZE ?
+				  usable : MIN_VSTACK_SIZE,
+                                  VSTACK_SOFTPROT,
+                                  VSTACK_HARDPROT,
+                                  kVSPsoftguard,
+                                  kVSPhardguard);
+}
+
+area *
+allocate_tstack_holding_area_lock(natural usable)
+{
+  return allocate_lisp_stack_area(AREA_TSTACK, 
+                                  usable > MIN_TSTACK_SIZE ?
+				  usable : MIN_TSTACK_SIZE,
+                                  TSTACK_SOFTPROT,
+                                  TSTACK_HARDPROT,
+                                  kTSPsoftguard,
+                                  kTSPhardguard);
+}
+
+
+/* It's hard to believe that max & min don't exist already */
+unsigned unsigned_min(unsigned x, unsigned y)
+{
+  if (x <= y) {
+    return x;
+  } else {
+    return y;
+  }
+}
+
+unsigned unsigned_max(unsigned x, unsigned y)
+{
+  if (x >= y) {
+    return x;
+  } else {
+    return y;
+  }
+}
+
+#if WORD_SIZE == 64
+#ifdef DARWIN
+#define MAXIMUM_MAPPABLE_MEMORY (512L<<30L)
+#endif
+#ifdef FREEBSD
+#define MAXIMUM_MAPPABLE_MEMORY (512L<<30L)
+#endif
+#ifdef SOLARIS
+#define MAXIMUM_MAPPABLE_MEMORY (512L<<30L)
+#endif
+#ifdef LINUX
+#ifdef X8664
+#define MAXIMUM_MAPPABLE_MEMORY (512L<<30L)
+#endif
+#ifdef PPC
+#define MAXIMUM_MAPPABLE_MEMORY (512L<<30L)
+#endif
+#endif
+#ifdef WINDOWS
+/* Supposedly, the high-end version of Vista allow 128GB of pageable memory */
+#define MAXIMUM_MAPPABLE_MEMORY (512LL<<30LL)
+#endif
+#else
+#ifdef DARWIN
+#define MAXIMUM_MAPPABLE_MEMORY ((1U<<31)-2*heap_segment_size)
+#endif
+#ifdef LINUX
+#ifdef X86
+#define MAXIMUM_MAPPABLE_MEMORY (9U<<28)
+#else
+#define MAXIMUM_MAPPABLE_MEMORY (1U<<30)
+#endif
+#endif
+#ifdef WINDOWS
+#define MAXIMUM_MAPPABLE_MEMORY (1U<<30)
+#endif
+#ifdef FREEBSD
+#define MAXIMUM_MAPPABLE_MEMORY (1U<<30)
+#endif
+#ifdef SOLARIS
+#define MAXIMUM_MAPPABLE_MEMORY (1U<<30)
+#endif
+#endif
+
+natural
+reserved_area_size = MAXIMUM_MAPPABLE_MEMORY;
+
+area 
+  *nilreg_area=NULL,
+  *tenured_area=NULL, 
+  *g2_area=NULL, 
+  *g1_area=NULL,
+  *managed_static_area=NULL,
+  *static_cons_area=NULL,
+  *readonly_area=NULL;
+
+area *all_areas=NULL;
+int cache_block_size=32;
+
+
+#if WORD_SIZE == 64
+#define DEFAULT_LISP_HEAP_GC_THRESHOLD (32<<20)
+#define G2_AREA_THRESHOLD (8<<20)
+#define G1_AREA_THRESHOLD (4<<20)
+#define G0_AREA_THRESHOLD (2<<20)
+#else
+#define DEFAULT_LISP_HEAP_GC_THRESHOLD (16<<20)
+#define G2_AREA_THRESHOLD (4<<20)
+#define G1_AREA_THRESHOLD (2<<20)
+#define G0_AREA_THRESHOLD (1<<20)
+#endif
+
+#define MIN_DYNAMIC_SIZE (DEFAULT_LISP_HEAP_GC_THRESHOLD *2)
+
+#if (WORD_SIZE == 32)
+#define DEFAULT_INITIAL_STACK_SIZE (1<<20)
+#else
+#define DEFAULT_INITIAL_STACK_SIZE (2<<20)
+#endif
+
+natural
+lisp_heap_gc_threshold = DEFAULT_LISP_HEAP_GC_THRESHOLD;
+
+natural 
+initial_stack_size = DEFAULT_INITIAL_STACK_SIZE;
+
+natural
+thread_stack_size = 0;
+
+
+/*
+  'start' should be on a segment boundary; 'len' should be
+  an integral number of segments.  remap the entire range.
+*/
+
+void 
+uncommit_pages(void *start, size_t len)
+{
+  UnCommitMemory(start, len);
+}
+
+#define TOUCH_PAGES_ON_COMMIT 0
+
+Boolean
+touch_all_pages(void *start, size_t len)
+{
+#if TOUCH_PAGES_ON_COMMIT
+  extern Boolean touch_page(void *);
+  char *p = (char *)start;
+
+  while (len) {
+    if (!touch_page(p)) {
+      return false;
+    }
+    len -= page_size;
+    p += page_size;
+  }
+#endif
+  return true;
+}
+
+Boolean
+commit_pages(void *start, size_t len)
+{
+  if (len != 0) {
+    if (CommitMemory(start, len)) {
+      if (touch_all_pages(start, len)) {
+	return true;
+      }
+    }
+  }
+  return true;
+}
+
+area *
+find_readonly_area()
+{
+  area *a;
+
+  for (a = active_dynamic_area->succ; a != all_areas; a = a->succ) {
+    if (a->code == AREA_READONLY) {
+      return a;
+    }
+  }
+  return NULL;
+}
+
+area *
+extend_readonly_area(natural more)
+{
+  area *a;
+  unsigned mask;
+  BytePtr new_start, new_end;
+
+  if ((a = find_readonly_area()) != NULL) {
+    if ((a->active + more) > a->high) {
+      return NULL;
+    }
+    mask = ((natural)a->active) & (page_size-1);
+    if (mask) {
+      UnProtectMemory(a->active-mask, page_size);
+    }
+    new_start = (BytePtr)(align_to_power_of_2(a->active,log2_page_size));
+    new_end = (BytePtr)(align_to_power_of_2(a->active+more,log2_page_size));
+    if (!CommitMemory(new_start, new_end-new_start)) {
+      return NULL;
+    }
+    return a;
+  }
+  return NULL;
+}
+
+LispObj image_base=0;
+BytePtr pure_space_start, pure_space_active, pure_space_limit;
+BytePtr static_space_start, static_space_active, static_space_limit;
+
+void
+raise_limit()
+{
+#ifdef RLIMIT_AS
+  struct rlimit r;
+  if (getrlimit(RLIMIT_AS, &r) == 0) {
+    r.rlim_cur = r.rlim_max;
+    setrlimit(RLIMIT_AS, &r);
+    /* Could limit heaplimit to rlim_max here if smaller? */
+  }
+#endif
+} 
+
+
+area *
+create_reserved_area(natural totalsize)
+{
+  Ptr h;
+  natural base;
+  BytePtr 
+    end, 
+    lastbyte, 
+    start, 
+    want = (BytePtr)IMAGE_BASE_ADDRESS;
+  area *reserved;
+  Boolean fatal = false;
+
+  totalsize = align_to_power_of_2((void *)totalsize, log2_heap_segment_size);
+    
+  if (totalsize < (PURESPACE_RESERVE + MIN_DYNAMIC_SIZE)) {
+    totalsize = PURESPACE_RESERVE + MIN_DYNAMIC_SIZE;
+    fatal = true;
+  }
+
+  start = ReserveMemoryForHeap(want, totalsize);
+
+  if (start == NULL) {
+    if (fatal) {
+      perror("minimal initial mmap");
+      exit(1);
+    }
+    return NULL;
+  }
+
+  h = (Ptr) start;
+  base = (natural) start;
+  image_base = base;
+  lastbyte = (BytePtr) (start+totalsize);
+  static_space_start = static_space_active = (BytePtr)STATIC_BASE_ADDRESS;
+  static_space_limit = static_space_start + STATIC_RESERVE;
+  pure_space_start = pure_space_active = start;
+  pure_space_limit = start + PURESPACE_SIZE;
+  start += PURESPACE_RESERVE;
+
+  /*
+    Allocate mark bits here.  They need to be 1/64 the size of the
+     maximum useable area of the heap (+ 3 words for the EGC.)
+  */
+  end = lastbyte;
+  end = (BytePtr) ((natural)((((natural)end) - ((totalsize+63)>>6)) & ~4095));
+
+  global_mark_ref_bits = (bitvector)end;
+  end = (BytePtr) ((natural)((((natural)end) - ((totalsize+63) >> 6)) & ~4095));
+  global_reloctab = (LispObj *) end;
+  reserved = new_area(start, end, AREA_VOID);
+  /* The root of all evil is initially linked to itself. */
+  reserved->pred = reserved->succ = reserved;
+  all_areas = reserved;
+  return reserved;
+}
+
+void *
+allocate_from_reserved_area(natural size)
+{
+  area *reserved = reserved_area;
+  BytePtr low = reserved->low, high = reserved->high;
+  natural avail = high-low;
+  
+  size = align_to_power_of_2(size, log2_heap_segment_size);
+
+  if (size > avail) {
+    return NULL;
+  }
+  reserved->low += size;
+  reserved->active = reserved->low;
+  reserved->ndnodes -= (size>>dnode_shift);
+  return low;
+}
+
+
+
+BytePtr reloctab_limit = NULL, markbits_limit = NULL;
+BytePtr low_relocatable_address = NULL, high_relocatable_address = NULL,
+  low_markable_address = NULL, high_markable_address = NULL;
+
+void
+map_initial_reloctab(BytePtr low, BytePtr high)  
+{
+  natural ndnodes, reloctab_size, n;
+
+  low_relocatable_address = low; /* will never change */
+  high_relocatable_address = high;
+  ndnodes = area_dnode(high,low);
+  reloctab_size = (sizeof(LispObj)*(((ndnodes+((1<<bitmap_shift)-1))>>bitmap_shift)+1));
+  
+  reloctab_limit = (BytePtr)align_to_power_of_2(((natural)global_reloctab)+reloctab_size,log2_page_size);
+  CommitMemory(global_reloctab,reloctab_limit-(BytePtr)global_reloctab);
+}
+
+void
+map_initial_markbits(BytePtr low, BytePtr high)
+{
+  natural
+    prefix_dnodes = area_dnode(low, pure_space_limit),
+    ndnodes = area_dnode(high, low),
+    prefix_size = (prefix_dnodes+7)>>3,
+    markbits_size = (3*sizeof(LispObj))+((ndnodes+7)>>3),
+    n;
+  low_markable_address = low;
+  high_markable_address = high;
+  dynamic_mark_ref_bits = (bitvector)(((BytePtr)global_mark_ref_bits)+prefix_size);
+  relocatable_mark_ref_bits = dynamic_mark_ref_bits;
+  n = align_to_power_of_2(markbits_size,log2_page_size);
+  markbits_limit = ((BytePtr)dynamic_mark_ref_bits)+n;
+  CommitMemory(dynamic_mark_ref_bits,n);
+}
+    
+void
+lower_heap_start(BytePtr new_low, area *a)
+{
+  natural new_dnodes = area_dnode(low_markable_address,new_low);
+
+  if (new_dnodes) {
+    natural n = (new_dnodes+7)>>3;
+
+    BytePtr old_markbits = (BytePtr)dynamic_mark_ref_bits,
+      new_markbits = old_markbits-n;
+    CommitMemory(new_markbits,n);
+    dynamic_mark_ref_bits = (bitvector)new_markbits;
+    if (a->refbits) {
+      a->refbits= dynamic_mark_ref_bits;
+    }
+    a->static_dnodes += new_dnodes;
+    a->ndnodes += new_dnodes;
+    a->low = new_low;
+    low_markable_address = new_low;
+    lisp_global(HEAP_START) = (LispObj)new_low;
+    static_cons_area->ndnodes = area_dnode(static_cons_area->high,new_low);
+  }
+}
+
+void
+ensure_gc_structures_writable()
+{
+  natural 
+    ndnodes = area_dnode(lisp_global(HEAP_END),low_relocatable_address),
+    markbits_size = (3*sizeof(LispObj))+((ndnodes+7)>>3),
+    reloctab_size = (sizeof(LispObj)*(((ndnodes+((1<<bitmap_shift)-1))>>bitmap_shift)+1)),
+    n;
+  BytePtr 
+    new_reloctab_limit = (BytePtr)align_to_power_of_2(((natural)global_reloctab)+reloctab_size,log2_page_size),
+    new_markbits_limit = (BytePtr)align_to_power_of_2(((natural)relocatable_mark_ref_bits)+markbits_size,log2_page_size);
+
+  if (new_reloctab_limit > reloctab_limit) {
+    n = new_reloctab_limit - reloctab_limit;
+    CommitMemory(reloctab_limit, n);
+    UnProtectMemory(reloctab_limit, n);
+    reloctab_limit = new_reloctab_limit;
+  }
+  
+  if (new_markbits_limit > markbits_limit) {
+    n = new_markbits_limit-markbits_limit;
+    CommitMemory(markbits_limit, n);
+    UnProtectMemory(markbits_limit, n);
+    markbits_limit = new_markbits_limit;
+  }
+}
+
+
+area *
+allocate_dynamic_area(natural initsize)
+{
+  natural totalsize = align_to_power_of_2(initsize, log2_heap_segment_size);
+  BytePtr start, end;
+  area *a;
+
+  start = allocate_from_reserved_area(totalsize);
+  if (start == NULL) {
+    fprintf(dbgout, "reserved area too small to load heap image\n");
+    exit(1);
+  }
+  end = start + totalsize;
+  a = new_area(start, end, AREA_DYNAMIC);
+  a->active = start+initsize;
+  add_area_holding_area_lock(a);
+  CommitMemory(start, end-start);
+  a->h = start;
+  a->softprot = NULL;
+  a->hardprot = NULL;
+  map_initial_reloctab(a->low, a->high);
+  map_initial_markbits(a->low, a->high);
+  lisp_global(HEAP_START) = ptr_to_lispobj(a->low);
+  lisp_global(HEAP_END) = ptr_to_lispobj(a->high);
+  return a;
+ }
+
+
+Boolean
+grow_dynamic_area(natural delta)
+{
+  area *a = active_dynamic_area, *reserved = reserved_area;
+  natural avail = reserved->high - reserved->low;
+  
+  delta = align_to_power_of_2(delta, log2_heap_segment_size);
+  if (delta > avail) {
+    return false;
+  }
+
+  if (!commit_pages(a->high,delta)) {
+    return false;
+  }
+
+
+  if (!allocate_from_reserved_area(delta)) {
+    return false;
+  }
+
+
+  a->high += delta;
+  a->ndnodes = area_dnode(a->high, a->low);
+  lisp_global(HEAP_END) += delta;
+  ensure_gc_structures_writable();
+  return true;
+}
+
+/*
+  As above.  Pages that're returned to the reserved_area are
+  "condemned" (e.g, we try to convince the OS that they never
+  existed ...)
+*/
+Boolean
+shrink_dynamic_area(natural delta)
+{
+  area *a = active_dynamic_area, *reserved = reserved_area;
+  
+  delta = align_to_power_of_2(delta, log2_heap_segment_size);
+
+  a->high -= delta;
+  a->ndnodes = area_dnode(a->high, a->low);
+  a->hardlimit = a->high;
+  uncommit_pages(a->high, delta);
+  reserved->low -= delta;
+  reserved->ndnodes += (delta>>dnode_shift);
+  lisp_global(HEAP_END) -= delta;
+  return true;
+}
+
+
+
+#ifndef WINDOWS
+void
+user_signal_handler (int signum, siginfo_t *info, ExceptionInformation *context)
+{
+  if (signum == SIGINT) {
+    lisp_global(INTFLAG) = (((signum<<8) + 1) << fixnumshift);
+  }
+  else if (signum == SIGTERM) {
+    lisp_global(INTFLAG) = (((signum<<8) + 2) << fixnumshift);
+  }
+  else if (signum == SIGQUIT) {
+    lisp_global(INTFLAG) = (((signum<<8) + 2) << fixnumshift);
+  }
+#ifdef DARWIN
+  DarwinSigReturn(context);
+#endif
+}
+
+#endif
+
+void
+register_user_signal_handler()
+{
+#ifdef WINDOWS
+  extern BOOL CALLBACK ControlEventHandler(DWORD);
+
+  signal(SIGINT, SIG_IGN);
+
+  SetConsoleCtrlHandler(ControlEventHandler,TRUE);
+#else
+  install_signal_handler(SIGINT, (void *)user_signal_handler);
+  install_signal_handler(SIGTERM, (void *)user_signal_handler);
+#endif
+}
+
+
+
+BytePtr
+initial_stack_bottom()
+{
+#ifndef WINDOWS
+  extern char **environ;
+  char *p = *environ;
+  while (*p) {
+    p += (1+strlen(p));
+  }
+  return (BytePtr)((((natural) p) +4095) & ~4095);
+#endif
+#ifdef WINDOWS
+  return (BytePtr)((current_stack_pointer() + 4095) & ~ 4095);
+#endif
+}
+
+
+
+  
+Ptr fatal_spare_ptr = NULL;
+
+
+void
+Fatal(StringPtr param0, StringPtr param1)
+{
+
+  if (fatal_spare_ptr) {
+    deallocate(fatal_spare_ptr);
+    fatal_spare_ptr = NULL;
+  }
+  fprintf(dbgout, "Fatal error: %s\n%s\n", param0, param1);
+  _exit(-1);
+}
+
+OSErr application_load_err = noErr;
+
+area *
+set_nil(LispObj);
+
+
+/* Check for the existence of a file named by 'path'; return true
+   if it seems to exist, without checking size, permissions, or
+   anything else. */
+Boolean
+probe_file(char *path)
+{
+  struct stat st;
+
+  return (stat(path,&st) == 0);
+}
+
+
+#ifdef WINDOWS
+/* Chop the trailing ".exe" from the kernel image name */
+wchar_t *
+chop_exe_suffix(wchar_t *path)
+{
+  int len = wcslen(path);
+  wchar_t *copy = malloc((len+1)*sizeof(wchar_t)), *tail;
+
+  wcscpy(copy,path);
+  tail = wcsrchr(copy, '.');
+  if (tail) {
+    *tail = 0;
+  }
+  return copy;
+}
+#endif
+
+#ifdef WINDOWS
+wchar_t *
+path_by_appending_image(wchar_t *path)
+{
+  int len = wcslen(path) + wcslen(L".image") + 1;
+  wchar_t *copy = (wchar_t *) malloc(len*sizeof(wchar_t));
+
+  if (copy) {
+    wcscpy(copy, path);
+    wcscat(copy, L".image");
+  }
+  return copy;
+}
+#else
+char *
+path_by_appending_image(char *path)
+{
+  int len = strlen(path) + strlen(".image") + 1;
+  char *copy = (char *) malloc(len);
+
+  if (copy) {
+    strcpy(copy, path);
+    strcat(copy, ".image");
+  }
+  return copy;
+}
+#endif
+
+char *
+case_inverted_path(char *path)
+{
+  char *copy = strdup(path), *base = copy, *work = copy, c;
+  if (copy == NULL) {
+    return NULL;
+  }
+  while(*work) {
+    if (*work++ == '/') {
+      base = work;
+    }
+  }
+  work = base;
+  while ((c = *work) != '\0') {
+    if (islower(c)) {
+      *work++ = toupper(c);
+    } else {
+      *work++ = tolower(c);
+    }
+  }
+  return copy;
+}
+/* 
+   The underlying file system may be case-insensitive (e.g., HFS),
+   so we can't just case-invert the kernel's name.
+   Tack ".image" onto the end of the kernel's name.  Much better ...
+*/
+#ifdef WINDOWS
+wchar_t *
+default_image_name(wchar_t *orig)
+{
+  wchar_t *path = chop_exe_suffix(orig);
+  wchar_t *image_name = path_by_appending_image(path);
+  return image_name;
+}
+#else
+char *
+default_image_name(char *orig)
+{
+#ifdef WINDOWS
+  char *path = chop_exe_suffix(orig);
+#else
+  char *path = orig;
+#endif
+  char *image_name = path_by_appending_image(path);
+#if !defined(WINDOWS) && !defined(DARWIN)
+  if (!probe_file(image_name)) {
+    char *legacy = case_inverted_path(path);
+    if (probe_file(legacy)) {
+      image_name = legacy;
+    }
+  }
+#endif
+  return image_name;
+}
+#endif
+
+
+
+char *program_name = NULL;
+#ifdef WINDOWS
+wchar_t *real_executable_name = NULL;
+#else
+char *real_executable_name = NULL;
+#endif
+
+#ifndef WINDOWS
+
+char *
+ensure_real_path(char *path)
+{
+  char buf[PATH_MAX*2], *p, *q;
+  int n;
+
+  p = realpath(path, buf);
+  
+  if (p == NULL) {
+    return path;
+  }
+  n = strlen(p);
+  q = malloc(n+1);
+  strcpy(q,p);
+  return q;
+}
+
+char *
+determine_executable_name(char *argv0)
+{
+#ifdef DARWIN
+  uint32_t len = 1024;
+  char exepath[1024], *p = NULL;
+
+  if (_NSGetExecutablePath(exepath, (void *)&len) == 0) {
+    p = malloc(len+1);
+    memmove(p, exepath, len);
+    p[len]=0;
+    return ensure_real_path(p);
+  } 
+  return ensure_real_path(argv0);
+#endif
+#ifdef LINUX
+  char exepath[PATH_MAX], *p;
+  int n;
+
+  if ((n = readlink("/proc/self/exe", exepath, PATH_MAX)) > 0) {
+    p = malloc(n+1);
+    memmove(p,exepath,n);
+    p[n]=0;
+    return p;
+  }
+  return argv0;
+#endif
+#ifdef FREEBSD
+  return ensure_real_path(argv0);
+#endif
+#ifdef SOLARIS
+  char exepath[PATH_MAX], proc_path[PATH_MAX], *p;
+  int n;
+
+  snprintf(proc_path,PATH_MAX-1,"/proc/%d/path/a.out",getpid());
+
+  if ((n = readlink(proc_path, exepath, PATH_MAX)) > 0) {
+    p = malloc(n+1);
+    memmove(p,exepath,n);
+    p[n]=0;
+    return p;
+  }
+  return ensure_real_path(argv0);
+#endif
+  return ensure_real_path(argv0);
+}
+#endif
+
+#ifdef WINDOWS
+wchar_t *
+determine_executable_name()
+{
+  DWORD nsize = 512, result;
+  wchar_t *buf = malloc(nsize*sizeof(wchar_t));
+
+  do {
+    result = GetModuleFileNameW(NULL, buf, nsize);
+    if (result == nsize) {
+      nsize *= 2;
+      buf = realloc(buf,nsize*sizeof(wchar_t));
+    } else {
+      return buf;
+    }
+  } while (1);
+}
+
+
+wchar_t *
+ensure_real_path(wchar_t *path)
+{
+  int bufsize = 256, n;
+
+  do {
+    wchar_t buf[bufsize];
+
+    n = GetFullPathNameW(path,bufsize,buf,NULL);
+    if (n == 0) {
+      return path;
+    }
+
+    if (n < bufsize) {
+      int i;
+      wchar_t *q = calloc(n+1,sizeof(wchar_t));
+
+      for (i = 0; i < n; i++) {
+        q[i] = buf[i];
+      }
+      return q;
+    }
+    bufsize = n+1;
+  } while (1);
+}
+#endif
+
+void
+usage_exit(char *herald, int exit_status, char* other_args)
+{
+  if (herald && *herald) {
+    fprintf(dbgout, "%s\n", herald);
+  }
+  fprintf(dbgout, "usage: %s <options>\n", program_name);
+  fprintf(dbgout, "\t or %s <image-name>\n", program_name);
+  fprintf(dbgout, "\t where <options> are one or more of:\n");
+  if (other_args && *other_args) {
+    fputs(other_args, dbgout);
+  }
+  fprintf(dbgout, "\t-R, --heap-reserve <n>: reserve <n> (default: %lld)\n",
+	  (u64_t) reserved_area_size);
+  fprintf(dbgout, "\t\t bytes for heap expansion\n");
+  fprintf(dbgout, "\t-S, --stack-size <n>: set  size of initial thread's control stack to <n>\n");
+  fprintf(dbgout, "\t-Z, --thread-stack-size <n>: set default size of first (listener)  thread's stacks based on <n>\n");
+  fprintf(dbgout, "\t-b, --batch: exit when EOF on *STANDARD-INPUT*\n");
+  fprintf(dbgout, "\t--no-sigtrap : obscure option for running under GDB\n");
+  fprintf(dbgout, "\t-I, --image-name <image-name>\n");
+#ifndef WINDOWS
+  fprintf(dbgout, "\t and <image-name> defaults to %s\n", 
+	  default_image_name(program_name));
+#endif
+  fprintf(dbgout, "\n");
+  _exit(exit_status);
+}
+
+int no_sigtrap = 0;
+#ifdef WINDOWS
+wchar_t *image_name = NULL;
+#else
+char *image_name = NULL;
+#endif
+int batch_flag = 0;
+
+
+natural
+parse_numeric_option(char *arg, char *argname, natural default_val)
+{
+  char *tail;
+  natural val = 0;
+
+  val = strtoul(arg, &tail, 0);
+  switch(*tail) {
+  case '\0':
+    break;
+    
+  case 'M':
+  case 'm':
+    val = val << 20;
+    break;
+    
+  case 'K':
+  case 'k':
+    val = val << 10;
+    break;
+    
+  case 'G':
+  case 'g':
+    val = val << 30;
+    break;
+    
+  default:
+    fprintf(dbgout, "couldn't parse %s argument %s", argname, arg);
+    val = default_val;
+    break;
+  }
+  return val;
+}
+  
+
+
+/* 
+   The set of arguments recognized by the kernel is
+   likely to remain pretty small and pretty simple.
+   This removes everything it recognizes from argv;
+   remaining args will be processed by lisp code.
+*/
+
+void
+process_options(int argc, char *argv[], wchar_t *shadow[])
+{
+  int i, j, k, num_elide, flag, arg_error;
+  char *arg, *val;
+  wchar_t *warg, *wval;
+#ifdef DARWIN
+  extern int NXArgc;
+#endif
+
+  for (i = 1; i < argc;) {
+    arg = argv[i];
+    if (shadow) {
+      warg = shadow[i];
+    }
+    arg_error = 0;
+    if (*arg != '-') {
+      i++;
+    } else {
+      num_elide = 0;
+      val = NULL;
+      if ((flag = (strncmp(arg, "-I", 2) == 0)) ||
+	  (strcmp (arg, "--image-name") == 0)) {
+	if (flag && arg[2]) {
+	  val = arg+2;          
+          if (shadow) {
+            wval = warg+2;
+          }
+	  num_elide = 1;
+	} else {
+	  if ((i+1) < argc) {
+	    val = argv[i+1];
+            if (shadow) {
+              wval = shadow[i+1];
+            }
+	    num_elide = 2;
+	  } else {
+	    arg_error = 1;
+	  }
+	}
+	if (val) {
+#ifdef WINDOWS
+          image_name = wval;
+#else
+	  image_name = val;
+#endif
+	}
+      } else if ((flag = (strncmp(arg, "-R", 2) == 0)) ||
+		 (strcmp(arg, "--heap-reserve") == 0)) {
+	natural reserved_size = reserved_area_size;
+
+	if (flag && arg[2]) {
+	  val = arg+2;
+	  num_elide = 1;
+	} else {
+	  if ((i+1) < argc) {
+	    val = argv[i+1];
+	    num_elide = 2;
+	  } else {
+	    arg_error = 1;
+	  }
+	}
+
+	if (val) {
+	  reserved_size = parse_numeric_option(val, 
+					       "-R/--heap-reserve", 
+					       reserved_area_size);
+	}
+
+	if (reserved_size <= MAXIMUM_MAPPABLE_MEMORY) {
+	  reserved_area_size = reserved_size;
+	}
+
+      } else if ((flag = (strncmp(arg, "-S", 2) == 0)) ||
+		 (strcmp(arg, "--stack-size") == 0)) {
+	natural stack_size;
+
+	if (flag && arg[2]) {
+	  val = arg+2;
+	  num_elide = 1;
+	} else {
+	  if ((i+1) < argc) {
+	    val = argv[i+1];
+	    num_elide = 2;
+	  } else {
+	    arg_error = 1;
+	  }
+	}
+
+	if (val) {
+	  stack_size = parse_numeric_option(val, 
+					    "-S/--stack-size", 
+					    initial_stack_size);
+	  
+
+	  if (stack_size >= MIN_CSTACK_SIZE) {
+	    initial_stack_size = stack_size;
+	  }
+	}
+
+      } else if ((flag = (strncmp(arg, "-Z", 2) == 0)) ||
+		 (strcmp(arg, "--thread-stack-size") == 0)) {
+	natural stack_size;
+
+	if (flag && arg[2]) {
+	  val = arg+2;
+	  num_elide = 1;
+	} else {
+	  if ((i+1) < argc) {
+	    val = argv[i+1];
+	    num_elide = 2;
+	  } else {
+	    arg_error = 1;
+	  }
+	}
+
+	if (val) {
+	  stack_size = parse_numeric_option(val, 
+					    "-Z/--thread-stack-size", 
+					    thread_stack_size);
+	  
+
+	  if (stack_size >= MIN_CSTACK_SIZE) {
+	   thread_stack_size = stack_size;
+	  }
+          if (thread_stack_size >= (1LL<<((WORD_SIZE-fixnumshift)-1))) {
+            thread_stack_size = (1LL<<((WORD_SIZE-fixnumshift)-1))-1;
+          }
+          
+	}
+
+      } else if (strcmp(arg, "--no-sigtrap") == 0) {
+	no_sigtrap = 1;
+	num_elide = 1;
+      } else if ((strcmp(arg, "-b") == 0) ||
+		 (strcmp(arg, "--batch") == 0)) {
+	batch_flag = 1;
+	num_elide = 1;
+      } else if (strcmp(arg,"--") == 0) {
+        break;
+      } else {
+	i++;
+      }
+      if (arg_error) {
+	usage_exit("error in program arguments", 1, "");
+      }
+      if (num_elide) {
+	for (j = i+num_elide, k=i; j < argc; j++, k++) {
+	  argv[k] = argv[j];
+          if (shadow) {
+            shadow[k] = shadow[j];
+          }
+	}
+	argc -= num_elide;
+#ifdef DARWIN
+	NXArgc -= num_elide;
+#endif
+	argv[argc] = NULL;
+        if (shadow) {
+          shadow[argc] = NULL;
+        }
+      }
+    }
+  }
+}
+
+#ifdef WINDOWS
+void
+terminate_lisp()
+{
+  _exit(EXIT_FAILURE);
+}
+#else
+pid_t main_thread_pid = (pid_t)0;
+
+void
+terminate_lisp()
+{
+  kill(main_thread_pid, SIGKILL);
+  _exit(-1);
+}
+#endif
+
+#ifdef DARWIN
+#define min_os_version "8.0"    /* aka Tiger */
+#endif
+#ifdef LINUX
+#ifdef PPC
+#define min_os_version "2.2"
+#endif
+#ifdef X86
+#define min_os_version "2.6"
+#endif
+#endif
+#ifdef FREEBSD
+#define min_os_version "6.0"
+#endif
+#ifdef SOLARIS
+#define min_os_version "5.10"
+#endif
+
+#ifdef PPC
+#if defined(PPC64) || !defined(DARWIN)
+/* ld64 on Darwin doesn't offer anything close to reliable control
+   over the layout of a program in memory.  About all that we can
+   be assured of is that the canonical subprims jump table address
+   (currently 0x5000) is unmapped.  Map that page, and copy the
+   actual spjump table there. */
+
+
+void
+remap_spjump()
+{
+  extern opcode spjump_start, spjump_end;
+  pc new,
+    old = &spjump_start,
+    limit = &spjump_end,
+    work;
+  opcode instr;
+  void *target;
+  int disp;
+  
+  if (old != (pc)SPJUMP_TARGET_ADDRESS) {
+    new = mmap((pc) SPJUMP_TARGET_ADDRESS,
+               0x1000,
+               PROT_READ | PROT_WRITE | PROT_EXEC,
+               MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+               -1,
+               0);
+    if (new != (pc) SPJUMP_TARGET_ADDRESS) {
+      perror("remap spjump");
+      _exit(1);
+    }
+    
+    for (work = new; old < limit; work++, old++) {
+      instr = *old;
+      disp = instr & ((1<<26)-1);
+      target = (void*)old+disp;
+      disp = target-(void *)work;
+      *work = ((instr >> 26) << 26) | disp;
+    }
+    xMakeDataExecutable(new, (void*)work-(void*)new);
+    ProtectMemory(new, 0x1000);
+  }
+}
+#endif
+#endif
+
+#ifdef X86
+#ifdef WINDOWS
+
+/* By using linker tricks, we ensure there's memory between 0x11000
+   and 0x21000, so we just need to fix permissions and copy the spjump
+   table. */
+
+void
+remap_spjump()
+{
+  extern opcode spjump_start;
+  DWORD old_protect;
+
+  if ((void *)(&spjump_start) != (void *) SPJUMP_TARGET_ADDRESS) {
+    if (!VirtualProtect((pc) SPJUMP_TARGET_ADDRESS,
+                        0x1000,
+                        PAGE_EXECUTE_READWRITE,
+                        &old_protect)) {
+      wperror("VirtualProtect spjump");
+      _exit(1);
+    }
+    memmove((pc) SPJUMP_TARGET_ADDRESS, &spjump_start, 0x1000);
+  }
+}
+#else
+void
+remap_spjump()
+{
+  extern opcode spjump_start;
+  pc new = mmap((pc) SPJUMP_TARGET_ADDRESS,
+                0x1000,
+                PROT_READ | PROT_WRITE | PROT_EXEC,
+                MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                -1,
+                0),
+    old = &spjump_start;
+  if (new == (pc)-1) {
+    perror("remap spjump");
+    _exit(1);
+  }
+  memmove(new, old, 0x1000);
+}
+#endif
+#endif
+
+
+void
+check_os_version(char *progname)
+{
+#ifdef WINDOWS
+  /* We should be able to run with any version of Windows that actually gets here executing the binary, so don't do anything for now. */
+#else
+  struct utsname uts;
+  long got, want;
+  char *got_end,*want_end;
+#ifdef X8632
+  extern Boolean rcontext_readonly;
+#endif
+
+  want = strtoul(min_os_version,&want_end,10);
+
+  uname(&uts);
+  got = strtoul(uts.release,&got_end,10);
+#ifdef X8632
+#ifdef FREEBSD
+  if (!strcmp(uts.machine,"amd64")) {
+    rcontext_readonly = true;
+  }
+#endif
+#endif
+  while (got == want) {
+    if (*want_end == '.') {
+      want = strtoul(want_end+1,&want_end,10);
+      got = 0;
+      if (*got_end == '.') {
+        got = strtoul(got_end+1,&got_end,10);
+      } else {
+        break;
+      }
+    } else {
+      break;
+    }
+  }
+
+  if (got < want) {
+    fprintf(dbgout, "\n%s requires %s version %s or later; the current version is %s.\n", progname, uts.sysname, min_os_version, uts.release);
+    exit(1);
+  }
+#endif
+}
+
+#ifdef X86
+/*
+  This should determine the cache block size.  It should also
+  probably complain if we don't have (at least) SSE2.
+*/
+extern int cpuid(natural, natural*, natural*, natural*);
+
+#define X86_FEATURE_CMOV    (1<<15)
+#define X86_FEATURE_CLFLUSH (1<<19)
+#define X86_FEATURE_MMX     (1<<23)
+#define X86_FEATURE_SSE     (1<<25)
+#define X86_FEATURE_SSE2    (1<<26)
+
+#define X86_REQUIRED_FEATURES (X86_FEATURE_CMOV|X86_FEATURE_MMX|X86_FEATURE_SSE|X86_FEATURE_SSE2)
+
+Boolean
+check_x86_cpu()
+{
+  natural eax, ebx, ecx, edx;
+
+  eax = cpuid(0, &ebx, &ecx, &edx);
+
+  if (eax >= 1) {
+    eax = cpuid(1, &ebx, &ecx, &edx);
+    cache_block_size = (ebx & 0xff00) >> 5;
+    if ((X86_REQUIRED_FEATURES & edx) == X86_REQUIRED_FEATURES) {
+      return true;
+    }
+    /* It's very unlikely that SSE2 would be present and other things
+       that we want wouldn't.  If they don't have MMX or CMOV either,
+       might as well tell them. */
+    if ((edx & X86_FEATURE_SSE2) == 0) {
+      fprintf(dbgout, "This CPU doesn't support the SSE2 instruction set\n");
+    }
+    if ((edx & X86_FEATURE_MMX) == 0) {
+      fprintf(dbgout, "This CPU doesn't support the MMX instruction set\n");
+    }
+    if ((edx & X86_FEATURE_CMOV) == 0) {
+      fprintf(dbgout, "This CPU doesn't support the CMOV instruction\n");
+    }
+    
+  }
+  return false;
+}
+#endif
+
+void
+lazarus()
+{
+  TCR *tcr = get_tcr(false);
+  if (tcr) {
+    /* Some threads may be dying; no threads should be created. */
+    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+    tcr->vs_area->active = tcr->vs_area->high - node_size;
+    tcr->save_vsp = (LispObj *)(tcr->vs_area->active);
+    tcr->ts_area->active = tcr->ts_area->high;
+    tcr->save_tsp = (LispObj *)(tcr->ts_area->active);
+    tcr->catch_top = 0;
+    tcr->db_link = 0;
+    tcr->xframe = 0;
+    start_lisp(tcr, 0);
+  }
+}
+
+#ifdef LINUX
+#ifdef X8664
+#include <asm/prctl.h>
+#include <sys/prctl.h>
+
+void
+ensure_gs_available(char *progname)
+{
+  LispObj fs_addr = 0L, gs_addr = 0L, cur_thread = (LispObj)pthread_self();
+  char *gnu_get_libc_version(void);
+  
+  arch_prctl(ARCH_GET_GS, &gs_addr);
+  arch_prctl(ARCH_GET_FS, &fs_addr);
+  if ((gs_addr == cur_thread) && (fs_addr != cur_thread)) {
+    fprintf(dbgout, "The installed C library - version %s - seems to be using the %%gs register for thread storage.\n\"%s\" cannot run, since it expects to be\nable to use that register for its own purposes.\n", gnu_get_libc_version(),progname);
+    _exit(1);
+  }
+}
+#endif
+#endif
+
+Boolean 
+bogus_fp_exceptions = false;
+
+typedef
+float (*float_arg_returns_float)(float);
+
+float
+fcallf(float_arg_returns_float fun, float arg)
+{
+  return fun(arg);
+}
+
+void
+check_bogus_fp_exceptions()
+{
+#ifdef X8664
+  float asinf(float),result;
+    
+
+  natural save_mxcsr = get_mxcsr(), post_mxcsr;
+  set_mxcsr(0x1f80);
+
+  result = fcallf(asinf, 1.0);
+  post_mxcsr = get_mxcsr();
+  set_mxcsr(save_mxcsr);
+  if (post_mxcsr & (FE_ALL_EXCEPT & (~FE_INEXACT))) {
+    bogus_fp_exceptions = true;
+  }
+#endif
+}
+
+#ifdef WINDOWS
+char *
+utf_16_to_utf_8(wchar_t *utf_16)
+{
+  int utf8len = WideCharToMultiByte(CP_UTF8,
+                                    0,
+                                    utf_16,
+                                    -1,
+                                    NULL,
+                                    0,
+                                    NULL,
+                                    NULL);
+
+  char *utf_8 = malloc(utf8len);
+
+  WideCharToMultiByte(CP_UTF8,
+                      0,
+                      utf_16,
+                      -1,
+                      utf_8,
+                      utf8len,
+                      NULL,
+                      NULL);
+
+  return utf_8;
+}
+
+char **
+wide_argv_to_utf_8(wchar_t *wide_argv[], int argc)
+{
+  char** argv = calloc(argc+1,sizeof(char *));
+  int i;
+
+  for (i = 0; i < argc; i++) {
+    if (wide_argv[i]) {
+      argv[i] = utf_16_to_utf_8(wide_argv[i]);
+    } else {
+      argv[i] = NULL;
+    }
+  }
+  return argv;
+}
+#endif
+
+
+  
+
+
+int
+main(int argc, char *argv[]
+#ifndef WINDOWS
+, char *envp[], void *aux
+#endif
+)
+{
+  extern int page_size;
+  natural default_g0_threshold = G0_AREA_THRESHOLD,
+    default_g1_threshold = G1_AREA_THRESHOLD,
+    default_g2_threshold = G2_AREA_THRESHOLD,
+    lisp_heap_threshold_from_image = 0;
+  Boolean egc_enabled =
+#ifdef DISABLE_EGC
+    false
+#else
+    true
+#endif
+    ;
+  Boolean lisp_heap_threshold_set_from_command_line = false;
+  wchar_t **utf_16_argv = NULL;
+
+#ifdef PPC
+  extern int altivec_present;
+#endif
+#ifdef WINDOWS
+  extern LispObj load_image(wchar_t *);
+#else
+  extern LispObj load_image(char *);
+#endif
+  area *a;
+  BytePtr stack_base, current_sp = (BytePtr) current_stack_pointer();
+  TCR *tcr;
+
+  dbgout = stderr;
+
+#ifdef WINDOWS
+  {
+    int wide_argc;
+    extern void init_winsock(void);
+    extern void init_windows_io(void);
+
+    _fmode = O_BINARY;
+    _setmode(1, O_BINARY);
+    _setmode(2, O_BINARY);
+    setvbuf(dbgout, NULL, _IONBF, 0);
+    init_winsock();
+    init_windows_io();
+    utf_16_argv = CommandLineToArgvW(GetCommandLineW(),&wide_argc);
+  }
+#endif
+
+  check_os_version(argv[0]);
+#ifdef WINDOWS
+  real_executable_name = determine_executable_name();
+#else
+  real_executable_name = determine_executable_name(argv[0]);
+#endif
+  page_size = getpagesize(); /* Implement with GetSystemInfo on Windows w/o MinGW */
+
+  check_bogus_fp_exceptions();
+#ifdef LINUX
+#ifdef X8664
+  ensure_gs_available(real_executable_name);
+#endif
+#endif
+#if (defined(DARWIN) && defined(PPC64)) || (defined(LINUX) && defined(PPC))|| defined(X8664) || (defined(X8632) && !defined(DARWIN))
+  remap_spjump();
+#endif
+
+#ifdef PPC
+#ifdef LINUX
+  {
+    ElfW(auxv_t) *av = aux;
+    int hwcap, done = false;
+    
+    if (av) {
+      do {
+	switch (av->a_type) {
+	case AT_DCACHEBSIZE:
+	  cache_block_size = av->a_un.a_val;
+	  break;
+
+	case AT_HWCAP:
+	  hwcap = av->a_un.a_val;
+	  altivec_present = ((hwcap & PPC_FEATURE_HAS_ALTIVEC) != 0);
+	  break;
+
+	case AT_NULL:
+	  done = true;
+	  break;
+	}
+	av++;
+      } while (!done);
+    }
+  }
+#endif
+#ifdef DARWIN
+  {
+    unsigned value = 0;
+    size_t len = sizeof(value);
+    int mib[2];
+    
+    mib[0] = CTL_HW;
+    mib[1] = HW_CACHELINE;
+    if (sysctl(mib,2,&value,&len, NULL, 0) != -1) {
+      if (len == sizeof(value)) {
+	cache_block_size = value;
+      }
+    }
+    mib[1] = HW_VECTORUNIT;
+    value = 0;
+    len = sizeof(value);
+    if (sysctl(mib,2,&value,&len, NULL, 0) != -1) {
+      if (len == sizeof(value)) {
+	altivec_present = value;
+      }
+    }
+  }
+#endif
+#endif
+
+#ifdef X86
+  if (!check_x86_cpu()) {
+    fprintf(dbgout, "CPU doesn't support required features\n");
+    exit(1);
+  }
+#endif
+
+#ifdef SOLARIS
+#ifdef X8632
+  {
+    extern void solaris_ldt_init(void);
+    solaris_ldt_init();
+  }
+#endif
+#endif
+
+#ifndef WINDOWS
+  main_thread_pid = getpid();
+#endif
+  tcr_area_lock = (void *)new_recursive_lock();
+
+  program_name = argv[0];
+  if ((argc == 2) && (*argv[1] != '-')) {
+#ifdef WINDOWS
+    image_name = utf_16_argv[1];
+#else
+    image_name = argv[1];
+#endif
+    argv[1] = NULL;
+#ifdef WINDOWS
+    utf_16_argv[1] = NULL;
+#endif
+  } else {
+    process_options(argc,argv,utf_16_argv);
+  }
+  if (lisp_heap_gc_threshold != DEFAULT_LISP_HEAP_GC_THRESHOLD) {
+    lisp_heap_threshold_set_from_command_line = true;
+  }
+
+  initial_stack_size = ensure_stack_limit(initial_stack_size);
+  if (image_name == NULL) {
+    if (check_for_embedded_image(real_executable_name)) {
+      image_name = real_executable_name;
+    } else {
+      image_name = default_image_name(real_executable_name);
+    }
+  }
+
+  while (1) {
+    if (create_reserved_area(reserved_area_size)) {
+      break;
+    }
+    reserved_area_size = reserved_area_size *.9;
+  }
+
+  gc_init();
+
+  set_nil(load_image(image_name));
+  lisp_heap_threshold_from_image = lisp_global(LISP_HEAP_THRESHOLD);
+  if (lisp_heap_threshold_from_image) {
+    if ((!lisp_heap_threshold_set_from_command_line) &&
+        (lisp_heap_threshold_from_image != lisp_heap_gc_threshold)) {
+      lisp_heap_gc_threshold = lisp_heap_threshold_from_image;
+      resize_dynamic_heap(active_dynamic_area->active,lisp_heap_gc_threshold);
+    }
+    /* If lisp_heap_threshold_from_image was set, other image params are
+       valid. */
+    default_g0_threshold = lisp_global(G0_THRESHOLD);
+    default_g1_threshold = lisp_global(G1_THRESHOLD);
+    default_g2_threshold = lisp_global(G2_THRESHOLD);
+    egc_enabled = lisp_global(EGC_ENABLED);
+  }
+
+  lisp_global(TCR_AREA_LOCK) = ptr_to_lispobj(tcr_area_lock);
+
+#ifdef X86
+  lisp_global(SUBPRIMS_BASE) = (LispObj)((1<<16)+(5<<10));
+#else
+  lisp_global(SUBPRIMS_BASE) = (LispObj)(5<<10);
+#endif
+  lisp_global(RET1VALN) = (LispObj)&ret1valn;
+  lisp_global(LEXPR_RETURN) = (LispObj)&nvalret;
+  lisp_global(LEXPR_RETURN1V) = (LispObj)&popj;
+  lisp_global(ALL_AREAS) = ptr_to_lispobj(all_areas);
+  lisp_global(DEFAULT_ALLOCATION_QUANTUM) = log2_heap_segment_size << fixnumshift;
+  lisp_global(STACK_SIZE) = thread_stack_size<<fixnumshift;
+
+
+  exception_init();
+
+  
+
+#ifdef WINDOWS
+  lisp_global(IMAGE_NAME) = ptr_to_lispobj(utf_16_to_utf_8(ensure_real_path(image_name)));
+  lisp_global(KERNEL_PATH) = ptr_to_lispobj(utf_16_to_utf_8(real_executable_name));
+  lisp_global(ARGV) = ptr_to_lispobj(wide_argv_to_utf_8(utf_16_argv, argc));
+#else
+  lisp_global(IMAGE_NAME) = ptr_to_lispobj(ensure_real_path(image_name));
+  lisp_global(KERNEL_PATH) = ptr_to_lispobj(real_executable_name);
+  lisp_global(ARGV) = ptr_to_lispobj(argv);
+#endif
+  lisp_global(KERNEL_IMPORTS) = (LispObj)import_ptrs_base;
+
+  lisp_global(GET_TCR) = (LispObj) get_tcr;
+  *(double *) &(lisp_global(DOUBLE_FLOAT_ONE)) = (double) 1.0;
+
+  lisp_global(HOST_PLATFORM) = (LispObj) PLATFORM << fixnumshift;
+
+  lisp_global(BATCH_FLAG) = (batch_flag << fixnumshift);
+
+  a = active_dynamic_area;
+
+  if (nilreg_area != NULL) {
+    BytePtr lowptr = (BytePtr) a->low;
+
+    /* Create these areas as AREA_STATIC, change them to AREA_DYNAMIC */
+    g1_area = new_area(lowptr, lowptr, AREA_STATIC);
+    g2_area = new_area(lowptr, lowptr, AREA_STATIC);
+    tenured_area = new_area(lowptr, lowptr, AREA_STATIC);
+    add_area_holding_area_lock(tenured_area);
+    add_area_holding_area_lock(g2_area);
+    add_area_holding_area_lock(g1_area);
+
+    g1_area->code = AREA_DYNAMIC;
+    g2_area->code = AREA_DYNAMIC;
+    tenured_area->code = AREA_DYNAMIC;
+
+/*    a->older = g1_area; */ /* Not yet: this is what "enabling the EGC" does. */
+    g1_area->younger = a;
+    g1_area->older = g2_area;
+    g2_area->younger = g1_area;
+    g2_area->older = tenured_area;
+    tenured_area->younger = g2_area;
+    tenured_area->refbits = dynamic_mark_ref_bits;
+    managed_static_area->refbits = global_mark_ref_bits;
+    a->markbits = dynamic_mark_ref_bits;
+    tenured_area->static_dnodes = a->static_dnodes;
+    a->static_dnodes = 0;
+    tenured_area->static_used = a->static_used;
+    a->static_used = 0;
+    lisp_global(TENURED_AREA) = ptr_to_lispobj(tenured_area);
+    lisp_global(STATIC_CONS_AREA) = ptr_to_lispobj(static_cons_area);
+    lisp_global(REFBITS) = ptr_to_lispobj(global_mark_ref_bits);
+    g2_area->threshold = default_g2_threshold;
+    g1_area->threshold = default_g1_threshold;
+    a->threshold = default_g0_threshold;
+  }
+
+  tcr = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
+  stack_base = initial_stack_bottom()-xStackSpace();
+  init_threads((void *)(stack_base), tcr);
+  thread_init_tcr(tcr, current_sp, current_sp-stack_base);
+
+  if (lisp_global(STATIC_CONSES) == 0) {
+    lisp_global(STATIC_CONSES) = lisp_nil;
+  }
+
+  lisp_global(EXCEPTION_LOCK) = ptr_to_lispobj(new_recursive_lock());
+  enable_fp_exceptions();
+  register_user_signal_handler();
+
+#ifdef PPC
+  lisp_global(ALTIVEC_PRESENT) = altivec_present << fixnumshift;
+#endif
+#if STATIC
+  lisp_global(STATICALLY_LINKED) = 1 << fixnumshift;
+#endif
+  tcr->prev = tcr->next = tcr;
+#ifndef WINDOWS
+  lisp_global(INTERRUPT_SIGNAL) = (LispObj) box_fixnum(SIGNAL_FOR_PROCESS_INTERRUPT);
+#endif
+  tcr->vs_area->active -= node_size;
+  *(--tcr->save_vsp) = nrs_TOPLFUNC.vcell;
+  nrs_TOPLFUNC.vcell = lisp_nil;
+#ifdef GC_INTEGRITY_CHECKING
+  (nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit);
+#endif
+  if (egc_enabled) {
+    egc_control(true, NULL);
+  } else {
+    lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
+  }
+  atexit(lazarus);
+  start_lisp(TCR_TO_TSD(tcr), 0);
+  _exit(0);
+}
+
+area *
+set_nil(LispObj r)
+{
+
+  if (lisp_nil == (LispObj)NULL) {
+
+    lisp_nil = r;
+  }
+  return NULL;
+}
+
+
+void
+xMakeDataExecutable(void *start, unsigned long nbytes)
+{
+#ifndef X86
+  extern void flush_cache_lines();
+  natural ustart = (natural) start, base, end;
+  
+  base = (ustart) & ~(cache_block_size-1);
+  end = (ustart + nbytes + cache_block_size - 1) & ~(cache_block_size-1);
+  flush_cache_lines(base, (end-base)/cache_block_size, cache_block_size);
+#endif
+}
+
+natural
+xStackSpace()
+{
+  return initial_stack_size+CSTACK_HARDPROT+CSTACK_SOFTPROT;
+}
+
+#ifndef DARWIN
+#ifdef WINDOWS
+extern void *windows_open_shared_library(char *);
+
+void *
+xGetSharedLibrary(char *path, int mode)
+{
+  return windows_open_shared_library(path);
+}
+#else
+void *
+xGetSharedLibrary(char *path, int mode)
+{
+  return dlopen(path, mode);
+}
+#endif
+#else
+void *
+xGetSharedLibrary(char *path, int *resultType)
+{
+#if 0
+  NSObjectFileImageReturnCode code;
+  NSObjectFileImage	         moduleImage;
+  NSModule		         module;
+  const struct mach_header *     header;
+  const char *                   error;
+  void *                         result;
+  /* not thread safe */
+  /*
+  static struct {
+    const struct mach_header  *header;
+    NSModule	              *module;
+    const char                *error;
+  } results;	
+  */
+  result = NULL;
+  error = NULL;
+
+  /* first try to open this as a bundle */
+  code = NSCreateObjectFileImageFromFile(path,&moduleImage);
+  if (code != NSObjectFileImageSuccess &&
+      code != NSObjectFileImageInappropriateFile &&
+      code != NSObjectFileImageAccess)
+    {
+      /* compute error strings */
+      switch (code)
+	{
+	case NSObjectFileImageFailure:
+	  error = "NSObjectFileImageFailure";
+	  break;
+	case NSObjectFileImageArch:
+	  error = "NSObjectFileImageArch";
+	  break;
+	case NSObjectFileImageFormat:
+	  error = "NSObjectFileImageFormat";
+	  break;
+	case NSObjectFileImageAccess:
+	  /* can't find the file */
+	  error = "NSObjectFileImageAccess";
+	  break;
+	default:
+	  error = "unknown error";
+	}
+      *resultType = 0;
+      return (void *)error;
+    }
+  if (code == NSObjectFileImageInappropriateFile ||
+      code == NSObjectFileImageAccess ) {
+    /* the pathname might be a partial pathane (hence the access error)
+       or it might be something other than a bundle, if so perhaps
+       it is a .dylib so now try to open it as a .dylib */
+
+    /* protect against redundant loads, Gary Byers noticed possible
+       heap corruption if this isn't done */
+    header = NSAddImage(path, NSADDIMAGE_OPTION_RETURN_ON_ERROR |
+			NSADDIMAGE_OPTION_WITH_SEARCHING |
+			NSADDIMAGE_OPTION_RETURN_ONLY_IF_LOADED);
+    if (!header)
+      header = NSAddImage(path, NSADDIMAGE_OPTION_RETURN_ON_ERROR |
+			  NSADDIMAGE_OPTION_WITH_SEARCHING);
+    result = (void *)header;
+    *resultType = 1;
+  }
+  else if (code == NSObjectFileImageSuccess) {
+    /* we have a sucessful module image
+       try to link it, don't bind symbols privately */
+
+    module = NSLinkModule(moduleImage, path,
+			  NSLINKMODULE_OPTION_RETURN_ON_ERROR | NSLINKMODULE_OPTION_BINDNOW);
+    NSDestroyObjectFileImage(moduleImage);	
+    result = (void *)module;
+    *resultType = 2;
+  }
+  if (!result)
+    {
+      /* compute error string */
+      NSLinkEditErrors ler;
+      int lerno;
+      const char* file;
+      NSLinkEditError(&ler,&lerno,&file,&error);
+      if (error) {
+	result = (void *)error;
+	*resultType = 0;
+      }
+    }
+  return result;
+#else
+  const char *                   error;
+  void *                         result;
+
+  result = dlopen(path, RTLD_NOW | RTLD_GLOBAL);
+  
+  if (result == NULL) {
+    error = dlerror();
+    *resultType = 0;
+    return (void *)error;
+  }
+  *resultType = 1;
+  return result;
+#endif
+}
+#endif
+
+
+
+int
+fd_setsize_bytes()
+{
+  return FD_SETSIZE/8;
+}
+
+void
+do_fd_set(int fd, fd_set *fdsetp)
+{
+  FD_SET(fd, fdsetp);
+}
+
+void
+do_fd_clr(int fd, fd_set *fdsetp)
+{
+  FD_CLR(fd, fdsetp);
+}
+
+int
+do_fd_is_set(int fd, fd_set *fdsetp)
+{
+  return FD_ISSET(fd,fdsetp);
+}
+
+
+void
+do_fd_zero(fd_set *fdsetp)
+{
+  FD_ZERO(fdsetp);
+}
+
+#include "image.h"
+
+
+
+Boolean
+check_for_embedded_image (
+#ifdef WINDOWS
+                          wchar_t *path
+#else
+                          char *path
+#endif
+                          )
+{
+#ifdef WINDOWS
+  int fd = wopen(path, O_RDONLY);
+#else  
+  int fd = open(path, O_RDONLY);
+#endif
+
+  Boolean image_is_embedded = false;
+
+  if (fd >= 0) {
+    openmcl_image_file_header h;
+
+    if (find_openmcl_image_file_header (fd, &h)) {
+      image_is_embedded = true;
+    }
+    close (fd);
+  }
+  return image_is_embedded;
+}
+
+LispObj
+load_image(
+#ifdef WINDOWS
+           wchar_t * path
+#else
+           char *path
+#endif
+)
+{
+#ifdef WINDOWS
+  int fd = wopen(path, O_RDONLY, 0666), err;
+#else
+  int fd = open(path, O_RDONLY, 0666), err;
+#endif
+  LispObj image_nil = 0;
+
+  if (fd > 0) {
+    openmcl_image_file_header ih;
+
+    errno = 0;
+    image_nil = load_openmcl_image(fd, &ih);
+    /* We -were- using a duplicate fd to map the file; that
+       seems to confuse Darwin (doesn't everything ?), so
+       we'll instead keep the original file open.
+    */
+    err = errno;
+    if (!image_nil) {
+      close(fd);
+    }
+#ifdef WINDOWS
+    /* We currently don't actually map the image, and leaving the file
+       open seems to make it difficult to write to reliably. */
+    if (image_nil) {
+      close(fd);
+    }
+#endif
+  } else {
+    err = errno;
+  }
+  if (image_nil == 0) {
+    if (err == 0) {
+      fprintf(dbgout, "Couldn't load lisp heap image from %s\n", path);
+    } else {
+      fprintf(dbgout, "Couldn't load lisp heap image from %s:\n%s\n", path, strerror(err));
+    }
+    exit(-1);
+  }
+  return image_nil;
+}
+
+int
+set_errno(int val)
+{
+  errno = val;
+  return -1;
+}
+
+/* A horrible hack to allow us to initialize a JVM instance from lisp.
+   On Darwin, creating a JVM instance clobbers the thread's existing
+   Mach exception infrastructure, so we save and restore it here.
+*/
+
+typedef int (*jvm_initfunc)(void*,void*,void*);
+
+int
+jvm_init(jvm_initfunc f,void*arg0,void*arg1,void*arg2)
+{
+  int result = -1;
+  TCR *tcr = get_tcr(1);
+#ifdef DARWIN
+  extern kern_return_t tcr_establish_lisp_exception_port(TCR *);
+#endif
+  
+  result = f(arg0,arg1,arg2);
+#ifdef DARWIN
+  tcr_establish_lisp_exception_port(tcr);
+#endif
+  return result;
+}
+  
+
+
+
+void *
+xFindSymbol(void* handle, char *name)
+{
+#if defined(LINUX) || defined(FREEBSD) || defined(SOLARIS)
+  return dlsym(handle, name);
+#endif
+#ifdef DARWIN
+#if 1
+  void *result;
+
+  if ((handle == NULL) || (handle == ((void *) -1))) {
+    handle = RTLD_DEFAULT;
+  }    
+  result = dlsym(handle, name);
+  if ((result == NULL) && (*name == '_')) {
+    result = dlsym(handle, name+1);
+  }
+  return result;
+#else
+  natural address = 0;
+
+  if ((handle == NULL) ||
+      (handle == (void *)-1) ||
+      (handle == (void *)-2)){
+    if (NSIsSymbolNameDefined(name)) { /* Keep dyld_lookup from crashing */
+      _dyld_lookup_and_bind(name, (void *) &address, (void*) NULL);
+    }
+    return (void *)address;
+  }
+  Bug(NULL, "How did this happen ?");
+#endif
+#endif
+#ifdef WINDOWS
+  extern void *windows_find_symbol(void *, char *);
+  return windows_find_symbol(handle, name);
+#endif
+}
+
+void *
+get_r_debug()
+{
+#if defined(LINUX) || defined(FREEBSD) || defined(SOLARIS)
+#if WORD_SIZE == 64
+  extern Elf64_Dyn _DYNAMIC[];
+  Elf64_Dyn *dp;
+#else
+  extern Elf32_Dyn _DYNAMIC[];
+  Elf32_Dyn *dp;
+#endif
+  int tag;
+
+  for (dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++) {
+    if (tag == DT_DEBUG) {
+      return (void *)(dp->d_un.d_ptr);
+    }
+  }
+#endif
+  return NULL;
+}
+
+
+#ifdef DARWIN
+void
+sample_paging_info(paging_info *stats)
+{
+  mach_msg_type_number_t count = TASK_EVENTS_INFO_COUNT;
+
+  task_info(mach_task_self(),
+            TASK_EVENTS_INFO,
+            (task_info_t)stats,
+            &count);
+}
+
+void
+report_paging_info_delta(FILE *out, paging_info *start, paging_info *stop)
+{
+  fprintf(out,";;; %d soft faults, %d faults, %d pageins\n\n",
+          stop->cow_faults-start->cow_faults,
+          stop->faults-start->faults,
+          stop->pageins-start->pageins);
+}
+
+#else
+#ifdef WINDOWS
+void
+sample_paging_info(paging_info *stats)
+{
+}
+
+void
+report_paging_info_delta(FILE *out, paging_info *start, paging_info *stop)
+{
+}
+#else
+void
+sample_paging_info(paging_info *stats)
+{
+  getrusage(RUSAGE_SELF, stats);
+}
+
+void
+report_paging_info_delta(FILE *out, paging_info *start, paging_info *stop)
+{
+  fprintf(out,";;; %ld soft faults, %ld faults, %ld pageins\n\n",
+          stop->ru_minflt-start->ru_minflt,
+          stop->ru_majflt-start->ru_majflt,
+          stop->ru_nswap-start->ru_nswap);
+}
+
+#endif
+#endif
+
+void
+allocate_static_conses(natural n)
+{
+  BytePtr old_low = static_cons_area->low,
+    new_low = old_low - (n<<dnode_shift);
+  cons *c;
+  natural i;
+  LispObj prev;
+
+  CommitMemory(new_low,old_low-new_low);
+
+  static_cons_area->low = new_low;
+  lower_heap_start(new_low, tenured_area);
+  /* what a mess this is ... */
+  if (active_dynamic_area->low == old_low) {
+    active_dynamic_area->low = new_low;
+  }
+  if (!active_dynamic_area->older) {
+    active_dynamic_area->markbits = tenured_area->refbits;
+  }
+  if (g1_area->low == old_low) {
+    g1_area->low = new_low;
+  }
+  if (g1_area->high == old_low) {
+    g1_area->high = new_low;
+  }
+  if (g2_area->low == old_low) {
+    g2_area->low = new_low;
+  }
+  if (g2_area->high == old_low) {
+    g2_area->high = new_low;
+  }
+  for (i=0, prev=lisp_global(STATIC_CONSES), c=(cons *)new_low;
+       i < n;
+       i++, c++) {
+    c->car = unbound;
+    c->cdr = prev;
+    prev = ((LispObj)c)+fulltag_cons;
+  }
+  lisp_global(STATIC_CONSES)=prev;
+  lisp_global(FREE_STATIC_CONSES)+=(n<<fixnumshift);
+}
+void
+ensure_static_conses(ExceptionInformation *xp, TCR *tcr, natural nconses)
+{
+  area *a = active_dynamic_area;
+  natural nbytes = nconses>>dnode_shift, have;
+  BytePtr p = a->high-nbytes;
+
+  if (p < a->active) {
+    untenure_from_area(tenured_area);
+    gc_from_xp(xp, 0L);
+  }
+
+  have = unbox_fixnum(lisp_global(FREE_STATIC_CONSES));
+  if (have < nconses) {
+    if ((a->high-a->active)>nbytes) {
+      shrink_dynamic_area(nbytes);
+    }
+    allocate_static_conses(nconses);
+    tcr->bytes_allocated += nbytes;
+  }
+}
+      
Index: /branches/arm/lisp-kernel/ppc-asmutils.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-asmutils.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-asmutils.s	(revision 13357)
@@ -0,0 +1,458 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+	
+
+	include(lisp.s)
+
+	_beginfile
+/*  Zero R4 cache lines, starting at address in R3.  Each line is assumed to be */
+/* R5 bytes wide. */
+_exportfn(C(zero_cache_lines))
+	__(cmpri(cr0,r4,0))
+	__(mtctr r4)
+	__(beqlr)
+1:
+	__(DCBZL(0,r3))
+	__(add r3,r3,r5)
+	__(bdnz 1b)
+	__(blr)
+_endfn
+
+/*  Flush R4 cache lines, starting at address in R3.  Each line is */
+/* assumed to be R5 bytes wide. */
+_exportfn(C(flush_cache_lines))
+	__(cmpri(cr0,r4,0))
+	__(mtctr r4)
+        __(mr r6,r3)
+	__(beqlr)
+1:
+	__(dcbst 0,r3)
+        __(add r3,r3,r5)
+        __(bdnz 1b)
+	__(sync)                /* wait until dcbst's get to memory */
+        __(mr r3,r6)
+        __(mtctr r4)
+2:      
+	__(icbi 0,r3)
+	__(add r3,r3,r5)
+	__(bdnz 2b)
+        __(sync)
+	__(isync)
+	__(blr)
+/* The strange reference to "exp" is supposed to force the kernel to */
+/* load libm, so lisp code can use it.   Under Darwin, the functionality */
+/* of libm is contained in libsystem, along with libc & everything else. */
+
+        __ifndef(`DARWIN')
+        .data
+        __ifdef(`PPC64')
+        .quad exp
+        __else
+        .long exp
+        __endif
+        .text        
+        __endif
+_endfn
+
+_exportfn(C(touch_page))
+        __(str(r3,0(r3)))
+        __(li r4,0)
+        __(str(r4,0(r3)))
+        __(li r3,1) /* can't assume that low 32 bits of r3 are non-zero */
+        .globl C(touch_page_end)
+C(touch_page_end):
+        __(blr)
+_endfn
+                                
+_exportfn(C(current_stack_pointer))
+	__(mr r3,sp)
+	__(blr)
+_endfn
+	
+_exportfn(C(count_leading_zeros))
+        __ifdef(`PPC64')
+        __(cntlzd r3,r3)
+        __else
+	__(cntlzw r3,r3)
+        __endif
+	__(blr)
+_endfn
+
+_exportfn(C(noop))
+	__(blr)
+_endfn
+
+_exportfn(C(set_fpscr))
+	__(stru(sp,-32(sp)))
+	__(stw r3,12(sp))
+	__(lfd f0,8(sp))
+	__(mtfsf 0xff,f0)
+	__(la sp,32(sp))
+	__(blr)
+_endfn
+
+
+_exportfn(C(get_fpscr))
+	__(stru(sp,-32(sp)))
+        __(mffs f0)
+        __(stfd f0,8(sp))
+        __(lwz r3,12(sp))
+	__(la sp,32(sp))
+	__(blr)
+_endfn
+                
+
+/* The Linux kernel is constantly enabling and disabling the FPU and enabling */
+/* FPU exceptions.  We can't touch the FPU without turning off the FPSCR`FEX' */
+/* bit and we can't turn off the FPSCR`FEX' bit without touching the FPU. */
+/* Force a distinguished exception, and let the handler for that exception */
+/* zero the fpscr in its exception context. */
+
+_exportfn(C(zero_fpscr))
+	__(uuo_zero_fpscr())
+	__(blr)
+_endfn
+	
+	
+_exportfn(C(save_fp_context))
+	__(subi r4,r3,8)
+	__(stfdu f0,8(r4))
+	__(stfdu f1,8(r4))
+	__(stfdu f2,8(r4))
+	__(stfdu f3,8(r4))
+	__(stfdu f4,8(r4))
+	__(stfdu f5,8(r4))
+	__(stfdu f6,8(r4))
+	__(stfdu f7,8(r4))
+	__(stfdu f8,8(r4))
+	__(stfdu f9,8(r4))
+	__(stfdu f10,8(r4))
+	__(stfdu f11,8(r4))
+	__(stfdu f12,8(r4))
+	__(stfdu f13,8(r4))
+	__(stfdu f14,8(r4))
+	__(stfdu f15,8(r4))
+	__(stfdu f16,8(r4))
+	__(stfdu f17,8(r4))
+	__(stfdu f18,8(r4))
+	__(stfdu f19,8(r4))
+	__(stfdu f20,8(r4))
+	__(stfdu f21,8(r4))
+	__(stfdu f22,8(r4))
+	__(stfdu f23,8(r4))
+	__(stfdu f24,8(r4))
+	__(stfdu f25,8(r4))
+	__(stfdu f26,8(r4))
+	__(stfdu f27,8(r4))
+	__(stfdu f28,8(r4))
+	__(stfdu f29,8(r4))
+	__(stfdu f30,8(r4))
+	__(stfdu f31,8(r4))
+	__(mffs f0)
+	__(stfd f0,8(r4))
+	__(lfd f0,0(r3))
+	__(blr)
+_endfn
+
+_exportfn(C(restore_fp_context))
+	__(mr r4,r3)
+	__(lfdu f1,8(r4))
+	__(lfdu f2,8(r4))
+	__(lfdu f3,8(r4))
+	__(lfdu f4,8(r4))
+	__(lfdu f5,8(r4))
+	__(lfdu f6,8(r4))
+	__(lfdu f7,8(r4))
+	__(lfdu f8,8(r4))
+	__(lfdu f9,8(r4))
+	__(lfdu f10,8(r4))
+	__(lfdu f11,8(r4))
+	__(lfdu f12,8(r4))
+	__(lfdu f13,8(r4))
+	__(lfdu f14,8(r4))
+	__(lfdu f15,8(r4))
+	__(lfdu f16,8(r4))
+	__(lfdu f17,8(r4))
+	__(lfdu f18,8(r4))
+	__(lfdu f19,8(r4))
+	__(lfdu f20,8(r4))
+	__(lfdu f21,8(r4))
+	__(lfdu f22,8(r4))
+	__(lfdu f23,8(r4))
+	__(lfdu f24,8(r4))
+	__(lfdu f25,8(r4))
+	__(lfdu f26,8(r4))
+	__(lfdu f27,8(r4))
+	__(lfdu f28,8(r4))
+	__(lfdu f29,8(r4))
+	__(lfdu f30,8(r4))
+	__(lfdu f31,8(r4))
+	__(lfd f0,8(r4))
+	__(mtfsf 0xff,f0)
+	__(lfd f0,0(r3))
+	__(blr)
+_endfn
+
+
+
+/* Atomically store new value (r5) in *r3, if old value == expected. */
+/* Return actual old value. */
+
+_exportfn(C(store_conditional))
+        __(mr r6,r3)
+1:      __(lrarx(r3,0,r6))
+        __(cmpw r3,r4)
+        __(bne- 2f)
+        __(strcx(r5,0,r6))
+        __(bne- 1b)
+        __(isync)
+        __(blr)
+2:      __(li r0,RESERVATION_DISCHARGE)
+        __(strcx(r0,0,r0))
+        __(blr)
+_endfn
+
+/* Atomically store new_value(r4) in *r3 ;  return previous contents */
+/* of *r3. */
+
+_exportfn(C(atomic_swap))
+        __(sync)
+1:	__(lrarx(r5,0,r3))
+	__(strcx(r4,0,r3))
+	__(bne- 1b)
+	__(isync)
+	__(mr r3,r5)
+	__(blr)
+_endfn
+
+/* Logior the value in *r3 with the value in r4 (presumably a bitmask with exactly 1 */
+/* bit set.)  Return non-zero if any of the bits in that bitmask were already set. */
+        
+_exportfn(C(atomic_ior))
+        __(sync)
+1:	__(lrarx(r5,0,r3))
+        __(or r6,r4,r5)
+	__(strcx(r6,0,r3))
+	__(bne- 1b)
+	__(isync)
+	__(and r3,r4,r5)
+	__(blr)
+_endfn
+
+
+/* Logand the value in *r3 with the value in r4 (presumably a bitmask with exactly 1 */
+/* bit set.)  Return the value now in *r3 (for some value of "now" */
+
+_exportfn(C(atomic_and))
+        __(sync)
+1:	__(lrarx(r5,0,r3))
+        __(and r6,r4,r5)
+	__(strcx(r6,0,r3))
+	__(bne- 1b)
+	__(isync)
+	__(mr r3,r6)
+	__(blr)
+_endfn
+                
+	
+        __ifdef(`DARWIN')
+_exportfn(C(enable_fp_exceptions))
+        __(.long 0)
+        __(blr)
+_endfn
+        
+_exportfn(C(disable_fp_exceptions))
+        __(.long 0)
+        __(blr)
+_endfn
+
+_exportfn(C(pseudo_sigreturn))
+	__(.long 0)
+	__(b C(pseudo_sigreturn))
+_endfn
+        __endif
+	
+/* Copy all 32 Altivec registers (+ VSCR & VRSAVE) to the buffer */
+/* in r3.  If the buffer's non-NULL, it's aligned and big enough, */
+/* and Altivec is present. */
+
+_exportfn(C(put_vector_registers))
+	__(cmpri(r3,0))
+	__(li r4,0)
+	__(beqlr)
+	__(stvx v0,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v1,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v2,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v3,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v4,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v5,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v6,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v7,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v8,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v9,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v10,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v11,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v12,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v13,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v14,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v15,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v16,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v17,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v18,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v19,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v20,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v21,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v22,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v23,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v24,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v25,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v26,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v27,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v28,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v29,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v30,r3,r4)
+	__(la r4,16(r4))
+	__(stvx v31,r3,r4)
+	__(la r4,16(r4))
+	__(mfvscr v0)
+	__(stvx v0,r3,r4)
+	__(mfspr r5,256)
+	__(stw r5,8(r4))
+	__(blr)
+_endfn
+
+_exportfn(C(get_vector_registers))
+	__(cmpri(r3,0))
+	__(li r4,32*16)
+	__(beqlr)
+	__(lvx v0,r3,r4)
+	__(mtvscr v0)
+	__(lwz r5,8(r4))
+	__(mtspr 256,r5)
+	__(la r4,-16(r4))
+	__(lvx v31,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v30,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v29,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v28,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v27,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v26,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v25,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v24,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v23,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v22,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v21,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v20,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v19,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v18,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v17,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v16,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v15,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v14,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v13,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v12,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v11,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v10,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v9,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v8,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v7,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v6,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v5,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v4,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v3,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v2,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v1,r3,r4)
+	__(la r4,-16(r4))
+	__(lvx v0,r3,r4)
+	__(blr)
+_endfn
+
+/* Some versions of Linux don't implement madvise().  That's */
+/* not catastrophic, but some versions of glibc will make a */
+/* big deal out of that at link time.  This is here to try */
+/* to fool those versions of glibc. */
+
+        __ifdef(`LINUX')
+	.globl set_errno
+_exportfn(C(madvise))
+	__(li r0,205)	/* _NR_madvise; see /usr/include/asm/unistd.h */
+	__(sc)
+	__(bnslr)
+	__(b set_errno)
+_endfn
+        __endif
+
+	_endfile
Index: /branches/arm/lisp-kernel/ppc-constants.h
===================================================================
--- /branches/arm/lisp-kernel/ppc-constants.h	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-constants.h	(revision 13357)
@@ -0,0 +1,92 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __ppc_constants__
+#define __ppc_constants__ 1
+
+/*  Register usage: */
+#define rzero 0
+#define sp 1
+#define linux_sys_reg 2  /* volatile reg on Darwin ; thread ptr on Linux32, TOC on
+                                Linux64. */
+#define imm0 3
+#define imm1 4
+#define imm2 5
+#define imm3 6
+#define imm4 7
+#define imm5 8
+#define allocptr 9
+#define allocbase 10
+#define nargs 11
+#define tsp 12
+#define loc_pc 14		/*  code vector locative */
+#define vsp 15		
+#define fn 16
+#define temp3 17
+#define temp2 18
+#define temp1 19
+#define temp0 20	
+#define arg_x 21
+#define arg_y 22
+#define arg_z 23
+#define save7 24
+#define save6 25
+#define save5 26
+#define save4 27
+#define save3 28
+#define save2 29
+#define save1 30
+#define save0 31
+
+#define vfp save0	/*  frame pointer if needed (stack consing). */
+#define fname temp3
+#define nfn temp2
+#define next_method_context temp1
+#define closure_data temp0
+
+
+#define BA_MASK ((unsigned) ((-1<<26) | (1<<1)))
+#define BA_VAL  ((unsigned) ((18<<26) | (1<<1)))
+
+#define TCR_FLAG_BIT_FOREIGN fixnumshift
+#define TCR_FLAG_BIT_AWAITING_PRESET (fixnumshift+1)
+#define TCR_FLAG_BIT_ALT_SUSPEND (fixnumshift+2)
+#define TCR_FLAG_BIT_PROPAGATE_EXCEPTION (fixnumshift+3)
+#define TCR_FLAG_BIT_SUSPEND_ACK_PENDING (fixnumshift+4)
+#define TCR_FLAG_BIT_PENDING_EXCEPTION (fixnumshift+5)
+#define TCR_FLAG_BIT_FOREIGN_EXCEPTION (fixnumshift+6)
+#define TCR_FLAG_BIT_PENDING_SUSPEND (fixnumshift+7)
+
+#define TCR_STATE_FOREIGN (1)
+#define TCR_STATE_LISP    (0)
+#define TCR_STATE_EXCEPTION_WAIT (2)
+#define TCR_STATE_EXCEPTION_RETURN (4)
+
+#ifdef PPC64
+#include "ppc-constants64.h"
+#else
+#include "ppc-constants32.h"
+#endif
+
+#define dnode_size (node_size*2)
+#define dnode_shift (node_shift+1)
+
+#define INTERRUPT_LEVEL_BINDING_INDEX (1)
+
+#endif /* __ppc_constants__ */
+
+
Index: /branches/arm/lisp-kernel/ppc-constants.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-constants.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-constants.s	(revision 13357)
@@ -0,0 +1,239 @@
+/* Copyright (C) 2004-2009 Clozure Associates */
+/* This file is part of Clozure CL. */
+ 
+/* Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/* License , known as the LLGPL and distributed with Clozure CL as the */
+/* file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/* which is distributed with Clozure CL as the file "LGPL".  Where these */
+/* conflict, the preamble takes precedence. */
+ 
+/* Clozure CL is referenced in the preamble as the "LIBRARY." */
+ 
+/* The LLGPL is also available online at */
+/* http://opensource.franz.com/preamble.html */
+
+
+/* Register usage: */
+
+
+define(`rzero',`r0')	
+define(`sp',`r1')
+
+define(`imm0',`r3')
+define(`imm1',`r4')
+define(`imm2',`r5')
+define(`imm3',`r6')
+define(`imm4',`r7')
+define(`imm5',`r8')
+define(`allocptr',`r9')
+define(`allocbase',`r10')
+define(`nargs',`r11')
+define(`tsp',`r12')      /* temp-consing stack. */
+
+define(`loc_pc',`r14') 	 /* code vector locative */
+define(`vsp',`r15')
+define(`fn',`r16')
+define(`temp3',`r17')
+define(`temp2',`r18')
+define(`temp1',`r19')
+define(`temp0',`r20')
+define(`arg_x',`r21')
+define(`arg_y',`r22')
+define(`arg_z',`r23')
+define(`save7',`r24')
+define(`save6',`r25')
+define(`save5',`r26')
+define(`save4',`r27')
+define(`save3',`r28')
+define(`save2',`r29')
+define(`save1',`r30')
+define(`save0',`r31')
+
+define(`fname',`temp3')
+define(`nfn',`temp2')
+define(`next_method_context',`temp1')
+define(`first_nvr',`save7')
+define(`second_nvr',`save6')        
+define(`third_nvr',`save5')
+define(`fourth_nvr',`save4')        
+define(`fifth_nvr',`save3')
+define(`sixth_nvr',`save2')        
+define(`seventh_nvr',`save1')
+define(`eighth_nvr',`save0')        
+define(`nargregs',`3')
+	
+r0 = 0
+r1 = 1
+r2 = 2
+r3 = 3
+r4 = 4
+r5 = 5
+r6 = 6
+r7 = 7
+r8 = 8
+r9 = 9
+r10 = 10
+r11 = 11
+r12 = 12
+r13 = 13
+r14 = 14
+r15 = 15
+r16 = 16
+r17 = 17
+r18 = 18
+r19 = 19
+r20 = 20
+r21 = 21
+r22 = 22
+r23 = 23
+r24 = 24
+r25 = 25
+r26 = 26
+r27 = 27
+r28 = 28
+r29 = 29
+r30 = 30
+r31 = 31
+
+/* Lisp code keeps 0.0 in fp_zero */
+define(`fp_zero',`f31')   /* a non-volatile reg as far as FFI is concerned. */
+define(`fp_s32conv',`f30')   /* for s32->fp conversion */
+	
+/* registers, as used in destrucuring-bind/macro-bind */
+
+define(`whole_reg',`temp1')
+define(`arg_reg',`temp3')
+define(`keyvect_reg',`temp2')
+define(`mask_req_start',`24')
+define(`mask_req_width',`8')
+define(`mask_opt_start',`16')
+define(`mask_opt_width',`8')
+define(`mask_key_start',`8')
+define(`mask_key_width',`8')
+define(`mask_initopt',`7')
+define(`mask_keyp',`6') /*  note that keyp can be true even when 0 keys. */
+define(`mask_aok',`5')
+define(`mask_restp',`4')
+
+ifdef(`DARWIN',`
+	define(`STACK_ALIGN',16)
+	define(`STACK_ALIGN_MASK',15)
+',`
+	define(`STACK_ALIGN',8)
+	define(`STACK_ALIGN_MASK',7)
+')
+
+/* Indices in %builtin-functions% */
+_builtin_plus = 0	/* +-2 */
+_builtin_minus = 1	/* --2 */
+_builtin_times = 2	/* *-2 */
+_builtin_div = 3	/* /-2 */
+_builtin_eq = 4		/* =-2 */
+_builtin_ne = 5		/* /-2 */
+_builtin_gt = 6		/* >-2 */
+_builtin_ge = 7		/* >=-2 */
+_builtin_lt = 8		/* <-2 */
+_builtin_le = 9		/* <=-2 */
+_builtin_eql = 10	/* eql */
+_builtin_length = 11	/* length */
+_builtin_seqtype = 12	/* sequence-type */
+_builtin_assq = 13	/* assq */
+_builtin_memq = 14	/* memq */
+_builtin_logbitp = 15	/* logbitp */
+_builtin_logior = 16	/* logior-2 */
+_builtin_logand = 17	/* logand-2 */
+_builtin_ash = 18	/* ash */
+_builtin_negate = 19	/* %negate */
+_builtin_logxor = 20	/* logxor-2 */
+_builtin_aref1 = 21	/* %aref1 */
+_builtin_aset1 = 22	/* %aset1 */
+
+	/* FPSCR status bits */
+fpscr_FX = 0
+fpscr_FEX = 1
+fpscr_VX = 2
+fpscr_OX = 3
+fpscr_UX = 4
+fpscr_ZX = 5
+fpscr_XX = 6
+	/* FPSCR control bits */
+fpscr_VE = 24
+fpscr_OE = 25
+fpscr_UE = 26
+fpscr_ZE = 27
+fpscr_XE = 28
+	
+
+/* This should be (a) an (UNSIGNED-BYTE 16) and (b) one less than */
+/* TSTACK_SOFTPROT (defined in "area.h") */
+		
+tstack_alloc_limit = 0xffff
+        
+define(`TCR_STATE_FOREIGN',1)
+define(`TCR_STATE_LISP',0)
+define(`TCR_STATE_EXCEPTION_WAIT',2)
+define(`TCR_STATE_EXCEPTION_RETURN',4)
+
+        
+
+        	
+ifdef(`PPC64',`
+        include(ppc-constants64.s)
+',`
+        include(ppc-constants32.s)
+')
+
+num_lisp_globals = 49		 /* MUST UPDATE THIS !!! */
+	
+	_struct(lisp_globals,lisp_globals_limit-(num_lisp_globals*node_size))
+	 _node(weakvll)                 /* all populations as of last GC */
+	 _node(initial_tcr)	        /* initial thread tcr */
+	 _node(image_name)	        /* --image-name argument */
+	 _node(BADfpscr_save_high)      /* high word of FP reg used to save FPSCR */
+	 _node(unwind_resume)           /* _Unwind_Resume */
+	 _node(batch_flag)	        /* -b */
+	 _node(host_platform)	        /* for runtime platform-specific stuff */
+	 _node(argv)			/* address of argv`0' */
+	 _node(ref_base)		        /* start of oldest pointer-bearing area */
+	 _node(tenured_area) 		/* the tenured_area */
+	 _node(oldest_ephemeral) 	/* dword address of oldest ephemeral object or 0 */
+	 _node(lisp_exit_hook)		/* install foreign exception_handling */
+	 _node(lisp_return_hook)	/* install lisp exception_handling */
+	 _node(double_float_one) 	/* high half of 1.0d0 */
+	 _node(short_float_zero) 	/* low half of 1.0d0 */
+	 _node(objc2_end_catch)         /* objc_end_catch() */
+	 _node(metering_info) 		/* address of lisp_metering global */
+	 _node(in_gc) 			/* non-zero when GC active */
+	 _node(lexpr_return1v) 		/* simpler when &lexpr called for single value. */
+	 _node(lexpr_return) 		/* magic &lexpr return code. */
+	 _node(all_areas) 		/* doubly-linked list of all memory areas */
+	 _node(kernel_path) 		/* real executable name */
+	 _node(objc2_begin_catch) 	/* objc_begin_catch */
+	 _node(stack_size) 		/* from command-line */
+	 _node(statically_linked)	/* non-zero if -static */
+	 _node(heap_end)                /* end of lisp heap */
+	 _node(heap_start)              /* start of lisp heap */
+	 _node(gcable_pointers)         /* linked-list of weak macptrs. */
+	 _node(gc_num)                  /* fixnum: GC call count. */
+	 _node(fwdnum)                  /* fixnum: GC "forwarder" call count. */
+	 _node(altivec_present)         /* non-zero when AltiVec available */
+	 _node(oldspace_dnode_count) 	/* dynamic dnodes older than g0 start */
+	 _node(refbits) 		/* EGC refbits */
+	 _node(gc_inhibit_count)
+	 _node(intflag) 		/* sigint pending */
+	 _node(BAD_block_tag_counter) 	/* counter for (immediate) block tag */
+	 _node(deleted_static_pairs) 		
+	 _node(exception_lock)
+	 _node(area_lock)
+	 _node(tcr_key) 		/* tsd key for per-thread tcr */
+	 _node(ret1val_addr) 		/* address of "dynamic" subprims magic values return addr */
+	 _node(subprims_base) 		/* address of dynamic subprims jump table */
+	 _node(saveR13)			/* probably don't really need this */
+	 _node(saveTOC)                 /* where the 68K emulator stores the  emulated regs */
+	 _node(objc_2_personality)      /* exception "personality routine" address for ObjC 2.0 */ 
+	 _node(kernel_imports) 		/* some things we need imported for us */
+	 _node(interrupt_signal)	/* signal used by PROCESS-INTERRUPT */
+	 _node(tcr_count) 		/* tcr_id for next tcr */
+	 _node(get_tcr) 		/* address of get_tcr() */
+	_ends
+	
Index: /branches/arm/lisp-kernel/ppc-constants32.h
===================================================================
--- /branches/arm/lisp-kernel/ppc-constants32.h	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-constants32.h	(revision 13357)
@@ -0,0 +1,475 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __constants32__
+#define __constants32__ 1
+
+#define rcontext 13
+
+#define nbits_in_word 32
+#define log2_nbits_in_word 5
+#define nbits_in_byte 8
+#define ntagbits 3	/* But only 2 are significant to lisp */
+#define nlisptagbits 2
+#define nfixnumtagbits 2
+#define num_subtag_bits 8
+#define fixnumshift 2
+#define fixnum_shift 2
+#define fulltagmask 7
+#define tagmask	 3
+#define fixnummask 3
+#define subtagmask ((1<<num_subtag_bits)-1)
+#define ncharcodebits 24        /* Only the low 8 are used currently */
+#define charcode_shift (nbits_in_word-ncharcodebits)
+#define node_size 4
+#define node_shift 2
+
+/*  Tags. */
+/*  There are two-bit tags and three-bit tags. */
+/*  A FULLTAG is the value of the low three bits of a tagged object. */
+/*  A TAG is the value of the low two bits of a tagged object. */
+/*  A TYPECODE is either a TAG or the value of a "tag-misc" object's header-byte. */
+
+/*  There are 4 primary TAG values.  Any object which lisp can "see" can be classified  */
+/*  by its TAG.  (Some headers have FULLTAGS that are congruent modulo 4 with the */
+/*  TAGS of other objects, but lisp can't "see" headers.) */
+
+
+#define tag_fixnum 0	/*  All fixnums, whether odd or even */
+#define tag_list 1	/*  Conses and NIL */
+#define tag_misc 2	/*  Heap-consed objects other than lists: vectors, symbols, functions, floats ... */
+#define tag_imm	 3	/*  Immediate-objects: characters, UNBOUND, other markers. */
+
+/*  And there are 8 FULLTAG values.  Note that NIL has its own FULLTAG (congruent mod 4 to tag-list), */
+/*  that FULLTAG-MISC is > 4 (so that code-vector entry-points can be branched to, since the low */
+/*  two bits of the PC are ignored) and that both FULLTAG-MISC and FULLTAG-IMM have header fulltags */
+/*  that share the same TAG. */
+/*  Things that walk memory (and the stack) have to be careful to look at the FULLTAG of each */
+/*  object that they see. */
+
+#define fulltag_even_fixnum 0	/*  I suppose EVENP/ODDP might care; nothing else does. */
+#define fulltag_cons	 1	/*  a real (non_null) cons.  Shares TAG with fulltag_nil. */
+#define fulltag_nodeheader 2	/*  Header of heap_allocated object that contains lisp_object pointers */
+#define fulltag_imm	 3	/*  a "real" immediate object.  Shares TAG with fulltag_immheader. */
+#define fulltag_odd_fixnum 4	/*   */
+#define fulltag_nil	 5	/*  NIL and nothing but.  (Note that there's still a hidden NILSYM.) */
+#define fulltag_misc	 6	/*  Pointer "real" tag_misc object.  Shares TAG with fulltag_nodeheader. */
+#define fulltag_immheader 7	/*  Header of heap-allocated object that contains unboxed data. */
+
+
+
+/*  Order of CAR and CDR doesn't seem to matter much - there aren't */
+/*  too many tricks to be played with predecrement/preincrement addressing. */
+/*  Keep them in the confusing MCL 3.0 order, to avoid confusion. */
+
+typedef struct cons {
+  LispObj cdr;
+  LispObj car;
+} cons;
+
+
+#define misc_header_offset -fulltag_misc
+#define misc_subtag_offset misc_header_offset+3		/*  low byte of header */
+#define misc_data_offset misc_header_offset+4		/*  first word of data */
+#define misc_dfloat_offset misc_header_offset+8		/*  double-floats are doubleword-aligned */
+
+#define max_64_bit_constant_index ((0x7fff + misc_dfloat_offset)>>3)
+#define max_32_bit_constant_index ((0x7fff + misc_data_offset)>>2)
+#define max_16_bit_constant_index ((0x7fff + misc_data_offset)>>1)
+#define max_8_bit_constant_index (0x7fff + misc_data_offset)
+#define max_1_bit_constant_index ((0x7fff + misc_data_offset)<<5)
+
+/*  T is almost adjacent to NIL: since NIL is a misaligned CONS, it spans */
+/*  two doublewords.  The arithmetic difference between T and NIL is */
+/*  such that the least-significant bit and exactly one other bit is */
+/*  set in the result. */
+
+#define t_offset (8+(8-fulltag_nil)+fulltag_misc)
+#define t_value (lisp_nil+t_offset)
+
+/*  The order in which various header values are defined is significant in several ways: */
+/*  1) Numeric subtags precede non-numeric ones; there are further orderings among numeric subtags. */
+/*  2) All subtags which denote CL arrays are preceded by those that don't, */
+/*     with a further ordering which requires that (< header-arrayH header-vectorH ,@all-other-CL-vector-types) */
+/*  3) The element-size of ivectors is determined by the ordering of ivector subtags. */
+/*  4) All subtags are >= fulltag-immheader . */
+
+#define SUBTAG(tag,subtag) ((tag) | ((subtag) << ntagbits))
+#define IMM_SUBTAG(subtag) SUBTAG(fulltag_immheader,(subtag))
+#define NODE_SUBTAG(subtag) SUBTAG(fulltag_nodeheader,(subtag))
+
+	
+/* Numeric subtags. */
+
+#define subtag_bignum IMM_SUBTAG(0)
+#define min_numeric_subtag subtag_bignum
+
+#define subtag_ratio NODE_SUBTAG(1)
+#define max_rational_subtag subtag_ratio
+
+#define subtag_single_float IMM_SUBTAG(1)
+#define subtag_double_float IMM_SUBTAG(2)
+#define min_float_subtag subtag_single_float
+#define max_float_subtag subtag_double_float
+#define max_real_subtag subtag_double_float
+
+#define subtag_complex NODE_SUBTAG(3)
+#define max_numeric_subtag subtag_complex
+
+
+/*  CL array types.  There are more immediate types than node types; all CL array subtags must be > than */
+/*  all non-CL-array subtags.  So we start by defining the immediate subtags in decreasing order, starting */
+/*  with that subtag whose element size isn't an integral number of bits and ending with those whose */
+/*  element size - like all non-CL-array fulltag-immheader types - is 32 bits. */
+
+#define subtag_bit_vector IMM_SUBTAG(31)
+#define subtag_double_float_vector IMM_SUBTAG(30)
+#define subtag_s16_vector IMM_SUBTAG(29)
+#define subtag_u16_vector IMM_SUBTAG(28)
+#define min_16_bit_ivector_subtag subtag_u16_vector
+#define max_16_bit_ivector_subtag subtag_s16_vector
+
+#define subtag_s8_vector IMM_SUBTAG(26)
+#define subtag_u8_vector IMM_SUBTAG(25)
+#define min_8_bit_ivector_subtag subtag_u8_vector
+#define max_8_bit_ivector_subtag IMM_SUBTAG(27)
+
+#define subtag_simple_base_string IMM_SUBTAG(24)
+#define subtag_fixnum_vector IMM_SUBTAG(23)
+#define subtag_s32_vector IMM_SUBTAG(22)
+#define subtag_u32_vector IMM_SUBTAG(21)
+#define subtag_single_float_vector IMM_SUBTAG(20)
+#define max_32_bit_ivector_subtag IMM_SUBTAG(24)
+#define min_cl_ivector_subtag subtag_single_float_vector
+
+
+#define subtag_vectorH NODE_SUBTAG(20)
+#define subtag_arrayH NODE_SUBTAG(19)
+#define subtag_simple_vector NODE_SUBTAG(21)	/*  Only one such subtag) */
+#define min_vector_subtag subtag_vectorH
+#define min_array_subtag subtag_arrayH
+
+/*  So, we get the remaining subtags (n: (n > max-numeric-subtag) & (n < min-array-subtag)) */
+/*  for various immediate/node object types. */
+
+#define subtag_macptr IMM_SUBTAG(3)
+#define min_non_numeric_imm_subtag subtag_macptr
+
+#define subtag_dead_macptr IMM_SUBTAG(4)
+#define subtag_code_vector IMM_SUBTAG(5)
+#define subtag_creole IMM_SUBTAG(6)
+
+#define max_non_array_imm_subtag ((19<<ntagbits)|fulltag_immheader)
+
+#define subtag_catch_frame NODE_SUBTAG(4)
+#define subtag_function NODE_SUBTAG(5)
+#define subtag_basic_stream NODE_SUBTAG(6)
+#define subtag_symbol NODE_SUBTAG(7)
+#define subtag_lock NODE_SUBTAG(8)
+#define subtag_hash_vector NODE_SUBTAG(9)
+#define subtag_pool NODE_SUBTAG(10)
+#define subtag_weak NODE_SUBTAG(11)
+#define subtag_package NODE_SUBTAG(12)
+#define subtag_slot_vector NODE_SUBTAG(13)
+#define subtag_instance NODE_SUBTAG(14)
+#define subtag_struct NODE_SUBTAG(15)
+#define subtag_istruct NODE_SUBTAG(16)
+#define max_non_array_node_subtag ((19<<ntagbits)|fulltag_immheader)
+	
+/*  The objects themselves look something like this: */
+
+typedef struct lispsymbol {
+  LispObj header;
+  LispObj pname;
+  LispObj vcell;
+  LispObj fcell;
+  LispObj package_predicate;
+  LispObj flags;
+  LispObj plist;
+  LispObj binding_index;
+} lispsymbol;
+
+typedef struct ratio {
+  LispObj header;
+  LispObj numer;
+  LispObj denom;
+} ratio;
+
+typedef struct double_float {
+  LispObj header;
+  LispObj pad;
+  LispObj value_high;
+  LispObj value_low;
+} double_float;
+
+typedef struct single_float {
+  LispObj header;
+  LispObj value;
+} single_float;
+
+typedef struct macptr {
+  LispObj header;
+  LispObj address;
+  LispObj class;
+  LispObj type;
+} macptr;
+
+typedef struct xmacptr {
+  LispObj header;
+  LispObj address;
+  LispObj class;
+  LispObj type;
+  LispObj flags;
+  LispObj link;
+} xmacptr;
+  
+
+typedef struct eabi_c_frame {
+  struct eabi_c_frame *backlink;
+  unsigned savelr;
+  unsigned params[8];
+} eabi_c_frame;
+
+/* PowerOpen ABI C frame */
+
+typedef struct c_frame {
+  struct c_frame *backlink;
+  unsigned crsave;
+  unsigned savelr;
+  unsigned unused[2];
+  unsigned savetoc;		/* Used with CFM */
+  unsigned params[8];		/* Space for callee to save r3-r10 */
+} c_frame;
+
+typedef struct lisp_frame {
+  struct lisp_frame *backlink;
+  LispObj savefn;
+  LispObj savelr;
+  LispObj savevsp;
+} lisp_frame;
+
+typedef struct special_binding {
+  struct special_binding *link;
+  struct lispsymbol *sym;
+  LispObj value;
+} special_binding;
+
+/* The GC (at least) needs to know what a
+   package looks like, so that it can do GCTWA. */
+typedef struct package {
+  LispObj header;
+  LispObj itab;			/* itab and etab look like (vector (fixnum . fixnum) */
+  LispObj etab;
+  LispObj used;
+  LispObj used_by;
+  LispObj names;
+  LispObj shadowed;
+} package;
+
+/*
+  The GC also needs to know what a catch_frame looks like.
+*/
+
+typedef struct catch_frame {
+  LispObj header;
+  LispObj catch_tag;
+  LispObj link;
+  LispObj mvflag;
+  LispObj csp;
+  LispObj db_link;
+  LispObj regs[8];
+  LispObj xframe;
+  LispObj tsp_segment;
+} catch_frame;
+
+#define catch_frame_element_count ((sizeof(catch_frame)/sizeof(LispObj))-1)
+#define catch_frame_header make_header(subtag_catch_frame,catch_frame_element_count)
+
+#define unbound SUBTAG(fulltag_imm, 6)
+#define undefined unbound
+#define unbound_marker unbound
+#define subtag_character SUBTAG(fulltag_imm, 9)
+#define slot_unbound SUBTAG(fulltag_imm, 10)
+#define slot_unbound_marker slot_unbound
+#define no_thread_local_binding_marker SUBTAG(fulltag_imm,30)
+
+/* 
+  All exception frames in a thread are linked together 
+  */
+typedef struct xframe_list {
+  ExceptionInformation *curr;
+  struct xframe_list *prev;
+} xframe_list;
+
+#define fixnum_bitmask(n)  (1<<((n)+fixnumshift))
+
+/* 
+  The GC (at least) needs to know about hash-table-vectors and their flag bits.
+*/
+
+typedef struct hash_table_vector_header {
+  LispObj header;
+  LispObj link;                 /* If weak */
+  LispObj flags;                /* a fixnum; see below */
+  LispObj gc_count;             /* gc-count kernel global */
+  LispObj free_alist;           /* preallocated conses for finalization_alist */
+  LispObj finalization_alist;   /* key/value alist for finalization */
+  LispObj weak_deletions_count; /* incremented when GC deletes weak pair */
+  LispObj hash;                 /* backpointer to hash-table */
+  LispObj deleted_count;        /* number of deleted entries [not maintained if lock-free] */
+  LispObj count;                /* number of valid entries [not maintained if lock-free] */
+  LispObj cache_idx;            /* index of last cached pair */
+  LispObj cache_key;            /* value of last cached key */
+  LispObj cache_value;          /* last cached value */
+  LispObj size;                 /* number of entries in table */
+  LispObj size_reciprocal;      /* shifted reciprocal of size */
+} hash_table_vector_header;
+
+/*
+  Bits (masks)  in hash_table_vector.flags:
+*/
+
+/* GC should track keys when addresses change */ 
+#define nhash_track_keys_mask fixnum_bitmask(28) 
+
+/* GC should set when nhash_track_keys_bit & addresses change */
+#define nhash_key_moved_mask  fixnum_bitmask(27) 
+
+/* weak on key or value (need new "weak both" encoding.) */
+#define nhash_weak_mask       fixnum_bitmask(12)
+
+/* weak on value */
+#define nhash_weak_value_mask fixnum_bitmask(11)
+
+/* finalizable */
+#define nhash_finalizable_mask fixnum_bitmask(10)
+
+/* keys frozen, i.e. don't clobber keys, only values */
+#define nhash_keys_frozen_mask fixnum_bitmask(9)
+
+/* Lfun bits */
+
+#define lfbits_nonnullenv_mask fixnum_bitmask(0)
+#define lfbits_keys_mask fixnum_bitmask(1)
+#define lfbits_restv_mask fixnum_bitmask(7)
+#define lfbits_optinit_mask fixnum_bitmask(14)
+#define lfbits_rest_mask fixnum_bitmask(15)
+#define lfbits_aok_mask fixnum_bitmask(16)
+#define lfbits_lap_mask fixnum_bitmask(23)
+#define lfbits_trampoline_mask fixnum_bitmask(24)
+#define lfbits_evaluated_mask fixnum_bitmask(25)
+#define lfbits_cm_mask fixnum_bitmask(26)         /* combined_method */
+#define lfbits_nextmeth_mask fixnum_bitmask(26)   /* or call_next_method with method_mask */
+#define lfbits_gfn_mask fixnum_bitmask(27)        /* generic_function */
+#define lfbits_nextmeth_with_args_mask fixnum_bitmask(27)   /* or call_next_method_with_args with method_mask */
+#define lfbits_method_mask fixnum_bitmask(28)     /* method function */
+/* PPC only but want it defined for xcompile */
+#define lfbits_noname_mask fixnum_bitmask(29)
+
+
+/* Creole */
+
+#define doh_quantum 400
+#define doh_block_slots ((doh_quantum >> 2) - 3)
+
+typedef struct doh_block {
+  struct doh_block *link;
+  unsigned size;
+  unsigned free;
+  LispObj data[doh_block_slots];
+} doh_block, *doh_block_ptr;
+
+
+#define population_weak_list (0<<fixnum_shift)
+#define population_weak_alist (1<<fixnum_shift)
+#define population_termination_bit (16+fixnum_shift)
+#define population_type_mask ((1<<population_termination_bit)-1)
+
+#define gc_retain_pages_bit fixnum_bitmask(0)
+#define gc_integrity_check_bit fixnum_bitmask(2)
+#define egc_verbose_bit fixnum_bitmask(3)
+#define gc_verbose_bit fixnum_bitmask(4)
+#define gc_allow_stack_overflows_bit fixnum_bitmask(5)
+#define gc_postgc_pending fixnum_bitmask(26)
+
+#include "lisp-errors.h"
+
+
+
+
+#define nil_value (0x00003015+(LOWMEM_BIAS))
+
+#define TCR_BIAS (0)
+
+typedef struct tcr {
+  struct tcr *next;
+  struct tcr *prev;
+  union {
+    double d;
+    struct {unsigned h, l;} words;
+  } lisp_fpscr;			/* lisp thread's fpscr (in low word) */
+  special_binding *db_link;	/* special binding chain head */
+  LispObj catch_top;		/* top catch frame */
+  LispObj *save_vsp;		/* VSP when in foreign code */
+  LispObj *save_tsp;		/* TSP when in foreign code */
+  struct area *cs_area;		/* cstack area pointer */
+  struct area *vs_area;		/* vstack area pointer */
+  struct area *ts_area;		/* tstack area pointer */
+  LispObj cs_limit;		/* stack overflow limit */
+  unsigned long long bytes_allocated;
+  natural log2_allocation_quantum;  /* for per-thread consing */
+  int interrupt_pending;	/* deferred-interrupt pending */
+  xframe_list *xframe;		/* exception-frame linked list */
+  int *errno_loc;		/* per-thread (?) errno location */
+  LispObj ffi_exception;	/* fpscr bits from ff-call */
+  LispObj osid;			/* OS thread id */
+  int valence;			/* odd when in foreign code */
+  int foreign_exception_status;	/* non-zero -> call lisp_exit_hook */
+  void *native_thread_info;	/* platform-dependent */
+  void *native_thread_id;	/* mach_thread_t, pid_t, etc. */
+  void *last_allocptr;
+  void *save_allocptr;
+  void *save_allocbase;
+  void *reset_completion;
+  void *activate;
+  int suspend_count;
+  ExceptionInformation *suspend_context;
+  ExceptionInformation *pending_exception_context;
+  void *suspend;		/* suspension semaphore */
+  void *resume;			/* resumption semaphore */
+  natural flags;
+  ExceptionInformation *gc_context;
+  void *termination_semaphore;
+  int unwinding;
+  unsigned tlb_limit;
+  LispObj *tlb_pointer;
+  unsigned shutdown_count;
+  void *safe_ref_address;
+} TCR;
+
+/* 
+  These were previously global variables.  There are lots of implicit
+  assumptions about the size of a heap segment, so they might as well
+  be constants.
+*/
+
+#define heap_segment_size 0x00010000
+#define log2_heap_segment_size 16
+
+
+#endif
+
Index: /branches/arm/lisp-kernel/ppc-constants32.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-constants32.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-constants32.s	(revision 13357)
@@ -0,0 +1,687 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+define(`rcontext',`r13')
+        
+nbits_in_word = 32
+nbits_in_byte = 8
+ntagbits = 3	/* But only 2 are significant to lisp */
+nlisptagbits = 2
+nfixnumtagbits = 2
+num_subtag_bits = 8
+fixnumshift = 2
+fixnum_shift = 2
+fulltagmask = 7
+tagmask = 3
+fixnummask = 3
+ncharcodebits = 24              /* arguably, we're only using the low 8 */
+charcode_shift = nbits_in_word-ncharcodebits
+word_shift = 2
+node_size = 4
+dnode_size = 8
+dnode_align_bits = 3
+dnode_shift = dnode_align_bits
+bitmap_shift = 5
+
+
+fixnumone = (1<<fixnumshift)
+fixnum_one = fixnumone
+fixnum1 = fixnumone
+
+
+/* Tags. */
+/* There are two-bit tags and three-bit tags. */
+/* A FULLTAG is the value of the low three bits of a tagged object. */
+/* A TAG is the value of the low two bits of a tagged object. */
+/* A TYPECODE is either a TAG or the value of a "tag-misc" objects header-byte. */
+
+/* There are 4 primary TAG values.  Any object which lisp can "see" can be classified */
+/* by its TAG.  (Some headers have FULLTAGS that are congruent modulo 4 with the */
+/* TAGS of other objects, but lisp can't "see" headers.) */
+
+
+tag_fixnum = 0	/* All fixnums, whether odd or even */
+tag_list = 1	/* Conses and NIL */
+tag_misc = 2	/* Heap-consed objects other than lists: vectors, symbols, functions, floats ... */
+tag_imm = 3	/* Immediate-objects: characters, UNBOUND, other markers. */
+
+
+/*  And there are 8 FULLTAG values.  Note that NIL has its own FULLTAG (congruent mod 4 to tag-list), */
+/*  that FULLTAG-MISC is > 4 (so that code-vector entry-points can be branched to, since the low */
+/*  two bits of the PC are ignored) and that both FULLTAG-MISC and FULLTAG-IMM have header fulltags */
+/*  that share the same TAG. */
+/*  Things that walk memory (and the stack) have to be careful to look at the FULLTAG of each */
+/*  object that they see. */
+
+
+fulltag_even_fixnum = 0	/* I suppose EVENP/ODDP might care; nothing else does. */
+fulltag_cons = 1	/* a real (non_null) cons.  Shares TAG with fulltag_nil. */
+fulltag_nodeheader = 2	/* Header of heap_allocated object that contains lisp_object pointers */
+fulltag_imm = 3	/* a "real" immediate object.  Shares TAG with fulltag_immheader. */
+fulltag_odd_fixnum = 4	/* */
+fulltag_nil = 5	/* NIL and nothing but.  (Note that there's still a hidden NILSYM.) */
+fulltag_misc = 6	/* Pointer "real" tag_misc object.  Shares TAG with fulltag_nodeheader. */
+fulltag_immheader = 7	/* Header of heap-allocated object that contains unboxed data. */
+
+nil_value = 0x00003015+LOWMEM_BIAS
+misc_bias = fulltag_misc
+cons_bias = tag_list        
+
+/* Functions are of (conceptually) unlimited size. */
+	_struct(_function,-misc_bias)
+	 _node(header)
+	 _node(codevector)
+	_ends
+
+	_struct(tsp_frame,0)
+	 _node(backlink)
+	 _node(type)
+	 _struct_label(fixed_overhead)
+	 _struct_label(data_offset)
+	_ends
+
+/* Order of CAR and CDR doesn't seem to matter much - there aren't */
+/* too many tricks to be played with predecrement/preincrement addressing. */
+/* Keep them in the confusing MCL 3.0 order, to avoid confusion. */
+	_struct(cons,-cons_bias)
+	 _node(cdr)
+	 _node(car)
+	_ends
+	
+misc_header_offset = -fulltag_misc
+misc_subtag_offset = misc_header_offset+3		/* low byte of header */
+misc_data_offset = misc_header_offset+4		/* first word of data */
+misc_dfloat_offset = misc_header_offset+8		/* double-floats are doubleword-aligned */
+
+max_64_bit_constant_index = ((0x7fff + misc_dfloat_offset)>>3)
+max_32_bit_constant_index = ((0x7fff + misc_data_offset)>>2)
+max_16_bit_constant_index = ((0x7fff + misc_data_offset)>>1)
+max_8_bit_constant_index = (0x7fff + misc_data_offset)
+max_1_bit_constant_index = ((0x7fff + misc_data_offset)<<5)
+
+/* T is almost adjacent to NIL: since NIL is a misaligned CONS, it spans */
+/* two doublewords.  The arithmetic difference between T and NIL is */
+/* such that the least-significant bit and exactly one other bit is */
+/* set in the result. */
+
+t_offset = (8+(8-fulltag_nil)+fulltag_misc)
+t_value = nil_value+t_offset
+
+/* The order in which various header values are defined is significant in several ways: */
+/* 1) Numeric subtags precede non-numeric ones; there are further orderings among numeric subtags. */
+/* 2) All subtags which denote CL arrays are preceded by those that don't, */
+/*    with a further ordering which requires that (< header-arrayH header-vectorH ,@all-other-CL-vector-types) */
+/* 3) The element-size of ivectors is determined by the ordering of ivector subtags. */
+/* 4) All subtags are >= fulltag-immheader . */
+
+define(`define_subtag',`
+subtag_$1 = $2|($3<<ntagbits)')
+	
+define(`define_imm_subtag',`
+	define_subtag($1,fulltag_immheader,$2)')
+
+	
+define(`define_node_subtag',`
+	define_subtag($1,fulltag_nodeheader,$2)')
+
+		
+/*Immediate subtags. */
+	define_subtag(character,fulltag_imm,9)
+	define_subtag(unbound,fulltag_imm,6)
+        define_subtag(illegal,fulltag_imm,10)
+	define_subtag(go_tag,fulltag_imm,12)
+	define_subtag(block_tag,fulltag_imm,24)
+	define_subtag(vsp_protect,fulltag_imm,7)
+        define_subtag(no_thread_local_binding,fulltag_imm,30)
+unbound_marker = subtag_unbound
+undefined = unbound_marker
+illegal_marker = subtag_illegal
+no_thread_local_binding_marker = subtag_no_thread_local_binding
+/*Numeric subtags. */
+
+	define_imm_subtag(bignum,0)
+min_numeric_subtag = subtag_bignum
+
+	define_node_subtag(ratio,1)
+max_rational_subtag = subtag_ratio
+
+	define_imm_subtag(single_float,1)
+	define_imm_subtag(double_float,2)
+min_float_subtag = subtag_single_float
+max_float_subtag = subtag_double_float
+max_real_subtag = subtag_double_float
+
+	define_node_subtag(complex,3)
+max_numeric_subtag = subtag_complex
+
+
+/* CL array types.  There are more immediate types than node types; all CL array subtags must be > than */
+/* all non-CL-array subtags.  So we start by defining the immediate subtags in decreasing order, starting */
+/* with that subtag whose element size isn't an integral number of bits and ending with those whose */
+/* element size - like all non-CL-array fulltag-immheader types - is 32 bits. */
+
+	define_imm_subtag(bit_vector,31)
+	define_imm_subtag(double_float_vector,30)
+	define_imm_subtag(s16_vector,29)
+	define_imm_subtag(u16_vector,28)
+min_16_bit_ivector_subtag = subtag_u16_vector
+max_16_bit_ivector_subtag = subtag_s16_vector
+	define_imm_subtag(s8_vector,26)
+	define_imm_subtag(u8_vector,25)
+min_8_bit_ivector_subtag = subtag_u8_vector
+max_8_bit_ivector_subtag = fulltag_immheader|(27<<ntagbits)
+        define_imm_subtag(simple_base_string,24)
+        define_imm_subtag(fixnum_vector,23)
+	define_imm_subtag(s32_vector,22)
+	define_imm_subtag(u32_vector,21)
+	define_imm_subtag(single_float_vector,20)
+max_32_bit_ivector_subtag = fulltag_immheader|(24<<ntagbits)
+min_cl_ivector_subtag = subtag_single_float_vector
+
+
+	define_node_subtag(vectorH,20)
+	define_node_subtag(arrayH,19)
+	define_node_subtag(simple_vector,21)
+min_vector_subtag = subtag_vectorH
+min_array_subtag = subtag_arrayH
+
+/* So, we get the remaining subtags (n: (n > max-numeric-subtag) & (n < min-array-subtag)) */
+/* for various immediate/node object types. */
+
+	define_imm_subtag(macptr,3)
+min_non_numeric_imm_subtag = subtag_macptr
+
+	define_imm_subtag(dead_macptr,4)
+	define_imm_subtag(code_vector,5)
+	define_imm_subtag(creole,6)
+
+max_non_array_imm_subtag = (18<<ntagbits)|fulltag_immheader
+
+	define_node_subtag(catch_frame,4)
+	define_node_subtag(function,5)
+	define_node_subtag(basic_stream,6)
+	define_node_subtag(symbol,7)
+	define_node_subtag(lock,8)
+	define_node_subtag(hash_vector,9)
+	define_node_subtag(pool,10)
+	define_node_subtag(weak,11)
+	define_node_subtag(package,12)
+	define_node_subtag(slot_vector,13)
+	define_node_subtag(instance,14)
+	define_node_subtag(struct,15)
+	define_node_subtag(istruct,16)
+	define_node_subtag(value_cell,17)
+        define_node_subtag(xfunction,18)
+max_non_array_node_subtag = (18<<ntagbits)|fulltag_immheader
+	
+/* The objects themselves look something like this: */
+	_structf(ratio)
+	 _node(numer)
+	 _node(denom)
+	_endstructf
+
+	_structf(single_float)
+	 _word(value)
+	_endstructf
+
+	_structf(double_float)
+	 _word(pad)
+	 _dword(value)
+	_endstructf
+
+	_structf(symbol)
+	 _node(pname)
+	 _node(vcell)
+	 _node(fcell)
+	 _node(package_predicate)
+	 _node(flags)
+         _node(plist)
+         _node(binding_index)
+	_endstructf
+
+	_structf(catch_frame)
+	 _node(catch_tag)	/* #<unbound> -> unwind-protect, else catch */
+	 _node(link)		/* backpointer to previous catch frame */
+	 _node(mvflag)		/* 0 if single-valued catch, fixnum 1 otherwise */
+	 _node(csp)		/* pointer to lisp_frame on csp */
+	 _node(db_link)		/* head of special-binding chain */
+	 _field(regs,8*node_size)	/* save7-save0 */
+	 _node(xframe)		/* exception frame chain */
+	 _node(tsp_segment)	/* maybe someday; padding for now */
+	_endstructf
+
+	_structf(macptr)
+	 _node(address)
+         _node(domain)
+         _node(type)
+	_endstructf
+
+	_structf(vectorH)
+	 _node(logsize)
+	 _node(physsize)
+	 _node(data_vector)
+	 _node(displacement)
+	 _node(flags)
+	_endstructf
+
+        _structf(arrayH)
+         _node(rank)
+         _node(physsize)
+         _node(data_vector)
+         _node(displacement)
+         _node(flags)
+         _struct_label(dim0)
+        _endstructf
+        
+	
+        
+	_struct(c_frame,0)	/* PowerOpen ABI C stack frame */
+	 _node(backlink)
+	 _node(crsave)
+	 _node(savelr)
+	 _field(unused, 8)
+	 _node(savetoc)
+	 _struct_label(params)
+         _node(param0)
+         _node(param1)
+         _node(param2)
+         _node(param3)
+         _node(param4)
+         _node(param5)
+         _node(param6)
+         _node(param7)
+	 _struct_label(minsiz)
+	_ends
+
+
+	_struct(eabi_c_frame,0)
+	 _word(backlink) 
+	 _word(savelr)
+	 _word(param0)
+	 _word(param1)
+	 _word(param2)
+	 _word(param3)
+	 _word(param4)
+	 _word(param5)
+	 _word(param6)
+	 _word(param7)
+	 _struct_label(minsiz)
+	_ends
+
+	/* For entry to variable-argument-list functions */
+/*	  (e.g., via callback) */
+	_struct(varargs_eabi_c_frame,0)
+	 _word(backlink)
+	 _word(savelr)
+	 _struct_label(va_list)
+	 _word(flags)		/* gpr count byte, fpr count byte, padding */
+	 _word(overflow_arg_area)
+	 _word(reg_save_area)
+	 _field(padding,4)
+	 _struct_label(regsave)
+	 _field(gp_save,8*node_size)
+	 _field(fp_save,8*8)
+	 _word(old_backlink)
+	 _word(old_savelr)
+	 _struct_label(incoming_stack_args)
+	_ends
+        	
+	_struct(lisp_frame,0)
+	 _node(backlink) 
+	 _node(savefn)	
+	 _node(savelr)	
+	 _node(savevsp)	
+	_ends
+
+	_struct(vector,-fulltag_misc)
+	 _node(header)
+	 _struct_label(data)
+	_ends
+
+        _struct(binding,0)
+         _node(link)
+         _node(sym)
+         _node(val)
+        _ends
+
+
+
+symbol_extra = symbol.size-fulltag_misc
+	
+	_struct(nrs,nil_value-fulltag_nil)
+	 _struct_pad(fulltag_nil)
+	 _field(nilptr,16-fulltag_nil)
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(tsym)
+	 _struct_pad(symbol_extra)	/* t */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(nilsym)
+	 _struct_pad(symbol_extra)	/* nil */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(errdisp)
+	 _struct_pad(symbol_extra)	/* %err-disp */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(cmain)
+	 _struct_pad(symbol_extra)	/* cmain */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(eval)
+	 _struct_pad(symbol_extra)	/* eval */
+ 
+	 _struct_pad(fulltag_misc)
+	 _struct_label(appevalfn)
+	 _struct_pad(symbol_extra)	/* apply-evaluated-function */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(error)
+	 _struct_pad(symbol_extra)	/* error */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(defun)
+	 _struct_pad(symbol_extra)	/* %defun */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(defvar)
+	 _struct_pad(symbol_extra)	/* %defvar */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(defconstant)
+	 _struct_pad(symbol_extra)	/* %defconstant */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(macrosym)
+	 _struct_pad(symbol_extra)	/* %macro */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(kernelrestart)
+	 _struct_pad(symbol_extra)	/* %kernel-restart */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(package)
+	 _struct_pad(symbol_extra)	/* *package* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(total_bytes_freed)		/* *total-bytes-freed* */
+	 _struct_pad(symbol_extra)
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(kallowotherkeys)
+	 _struct_pad(symbol_extra)	/* allow-other-keys */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(toplcatch)
+	 _struct_pad(symbol_extra)	/* %toplevel-catch% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(toplfunc)
+	 _struct_pad(symbol_extra)	/* %toplevel-function% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(callbacks)
+	 _struct_pad(symbol_extra)	/* %pascal-functions% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(allmeteredfuns)
+	 _struct_pad(symbol_extra)	/* *all-metered-functions* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(total_gc_microseconds)		/* *total-gc-microseconds* */
+	 _struct_pad(symbol_extra)
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(builtin_functions)		/* %builtin-functions% */
+	 _struct_pad(symbol_extra)                
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(udf)
+	 _struct_pad(symbol_extra)	/* %unbound-function% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(init_misc)
+	 _struct_pad(symbol_extra)	/* %init-misc */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(macro_code)
+	 _struct_pad(symbol_extra)	/* %macro-code% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(closure_code)
+	 _struct_pad(symbol_extra)      /* %closure-code% */
+
+       	 _struct_pad(fulltag_misc)
+	 _struct_label(new_gcable_ptr) /* %new-gcable-ptr */
+	 _struct_pad(symbol_extra)
+	
+       	 _struct_pad(fulltag_misc)
+	 _struct_label(gc_event_status_bits)
+	 _struct_pad(symbol_extra)	/* *gc-event-status-bits* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(post_gc_hook)
+	 _struct_pad(symbol_extra)	/* *post-gc-hook* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(handlers)
+	 _struct_pad(symbol_extra)	/* %handlers% */
+
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(all_packages)
+	 _struct_pad(symbol_extra)	/* %all-packages% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(keyword_package)
+	 _struct_pad(symbol_extra)	/* *keyword-package* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(finalization_alist)
+	 _struct_pad(symbol_extra)	/* %finalization-alist% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(foreign_thread_control)
+	 _struct_pad(symbol_extra)	/* %foreign-thread-control */
+
+	_ends
+
+define(`def_header',`
+$1 = ($2<<num_subtag_bits)|$3')
+
+	def_header(single_float_header,single_float.element_count,subtag_single_float)
+	def_header(double_float_header,double_float.element_count,subtag_double_float)
+	def_header(one_digit_bignum_header,1,subtag_bignum)
+	def_header(two_digit_bignum_header,2,subtag_bignum)
+	def_header(three_digit_bignum_header,3,subtag_bignum)
+	def_header(symbol_header,symbol.element_count,subtag_symbol)
+	def_header(value_cell_header,1,subtag_value_cell	)
+	def_header(macptr_header,macptr.element_count,subtag_macptr)
+	def_header(vectorH_header,vectorH.element_count,subtag_vectorH)
+
+	include(errors.s)
+
+/* Symbol bits that we care about */
+sym_vbit_bound = (0+fixnum_shift)
+sym_vbit_bound_mask = (1<<sym_vbit_bound)
+sym_vbit_const = (1+fixnum_shift)
+sym_vbit_const_mask = (1<<sym_vbit_const)
+
+	_struct(area,0)
+	 _node(pred) 
+	 _node(succ) 
+	 _node(low) 
+	 _node(high) 
+	 _node(active) 
+	 _node(softlimit) 
+	 _node(hardlimit) 
+	 _node(code) 
+	 _node(markbits) 
+	 _node(ndwords) 
+	 _node(older) 
+	 _node(younger) 
+	 _node(h) 
+	 _node(sofprot) 
+	 _node(hardprot) 
+	 _node(owner) 
+	 _node(refbits) 
+	 _node(nextref) 
+	_ends
+
+
+/* This is only referenced by c->lisp code that needs to save/restore C NVRs in a TSP frame. */
+	_struct(c_reg_save,0)
+	 _node(tsp_link)	/* backpointer */
+	 _node(tsp_mark)	/* frame type */
+	 _node(save_fpscr)	/* for Cs FPSCR */
+	 _field(save_gprs,19*4) /* r13-r31 */
+	 _dword(save_fp_zero)	/* for fp_zero */
+	 _dword(save_fps32conv)
+         _field(save_fprs,13*8)
+	_ends
+
+
+TCR_BIAS = 0
+/* TCR_BIAS = 0x7000 */
+        
+/*  Thread context record. */
+
+	_struct(tcr,-TCR_BIAS)
+	 _node(prev)		/* in doubly-linked list */
+	 _node(next)		/* in doubly-linked list */
+	 _node(lisp_fpscr)	/* lisp thread's fpscr (in low word) */
+	 _node(lisp_fpscr_low)
+	 _node(db_link)		/* special binding chain head */
+	 _node(catch_top)	/* top catch frame */
+	 _node(save_vsp)	/* VSP when in foreign code */
+	 _node(save_tsp)	/* TSP when in foreign code */
+	 _node(cs_area)		/* cstack area pointer */
+	 _node(vs_area)		/* vstack area pointer */
+	 _node(ts_area)		/* tstack area pointer */
+	 _node(cs_limit)	/* cstack overflow limit */
+	 _node(bytes_consed_high)
+	 _node(bytes_consed_low)
+	 _node(log2_allocation_quantum)
+	 _node(interrupt_pending)
+	 _node(xframe)		/* per-thread exception frame list */
+	 _node(errno_loc)	/* per-thread  errno location */
+	 _node(ffi_exception)	/* fpscr exception bits from ff-call */
+	 _node(osid)		/* OS thread id */
+         _node(valence)		/* odd when in foreign code */
+	 _node(foreign_exception_status)
+	 _node(native_thread_info)
+	 _node(native_thread_id)
+	 _node(last_allocptr)
+	 _node(save_allocptr)
+	 _node(save_allocbase)
+	 _node(reset_completion)
+	 _node(activate)
+         _node(suspend_count)
+         _node(suspend_context)
+	 _node(pending_exception_context)
+	 _node(suspend)		/* semaphore for suspension notify */
+	 _node(resume)		/* sempahore for resumption notify */
+	 _node(flags)      
+	 _node(gc_context)
+         _node(termination_semaphore)
+         _node(unwinding)
+         _node(tlb_limit)
+         _node(tlb_pointer)     /* Consider using tcr+N as tlb_pointer */
+	 _node(shutdown_count)
+         _node(safe_ref_address)
+	_ends
+
+TCR_FLAG_BIT_FOREIGN = fixnum_shift
+TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)
+TCR_FLAG_BIT_ALT_SUSPEND = (fixnumshift+2)
+TCR_FLAG_BIT_PROPAGATE_EXCEPTION = (fixnumshift+3)
+TCR_FLAG_BIT_SUSPEND_ACK_PENDING = (fixnumshift+4)
+TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
+TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
+TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)        
+	
+r0 = 0
+r1 = 1
+r2 = 2
+r3 = 3
+r4 = 4
+r5 = 5
+r6 = 6
+r7 = 7
+r8 = 8
+r9 = 9
+r10 = 10
+r11 = 11
+r12 = 12
+r13 = 13
+r14 = 14
+r15 = 15
+r16 = 16
+r17 = 17
+r18 = 18
+r19 = 19
+r20 = 20
+r21 = 21
+r22 = 22
+r23 = 23
+r24 = 24
+r25 = 25
+r26 = 26
+r27 = 27
+r28 = 28
+r29 = 29
+r30 = 30
+r31 = 31
+
+/* Lisp code keeps 0.0 in fp_zero */
+define(`fp_zero',`f31')   /* a non-volatile reg as far as FFI is concerned. */
+define(`fp_s32conv',`f30')   /* for s32->fp conversion */
+	
+/* registers, as used in destrucuring-bind/macro-bind */
+
+define(`whole_reg',`temp1')
+define(`arg_reg',`temp3')
+define(`keyvect_reg',`temp2')
+define(`mask_req_start',`24')
+define(`mask_req_width',`8')
+define(`mask_opt_start',`16')
+define(`mask_opt_width',`8')
+define(`mask_key_start',`8')
+define(`mask_key_width',`8')
+define(`mask_initopt',`7')
+define(`mask_keyp',`6') /*  note that keyp can be true even when 0 keys. */
+define(`mask_aok',`5')
+define(`mask_restp',`4')
+
+ifdef(`DARWIN',`
+	define(`STACK_ALIGN',16)
+	define(`STACK_ALIGN_MASK',15)
+',`
+	define(`STACK_ALIGN',8)
+	define(`STACK_ALIGN_MASK',7)
+')
+
+define(`TCR_STATE_FOREIGN',1)
+define(`TCR_STATE_LISP',0)
+define(`TCR_STATE_EXCEPTION_WAIT',2)
+define(`TCR_STATE_EXCEPTION_RETURN',4)
+
+define(`RESERVATION_DISCHARGE',0x2004)
+
+lisp_globals_limit = (0x3010+(LOWMEM_BIAS))
+        
+INTERRUPT_LEVEL_BINDING_INDEX = fixnumone
Index: /branches/arm/lisp-kernel/ppc-constants64.h
===================================================================
--- /branches/arm/lisp-kernel/ppc-constants64.h	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-constants64.h	(revision 13357)
@@ -0,0 +1,456 @@
+/*
+   Copyright (C) 2003-2009, Clozure Associates.
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __constants64__
+#define __constants64__ 1
+
+#define rcontext 2
+
+#define nbits_in_word 64L
+#define log2_nbits_in_word 6L
+#define nbits_in_byte 8L
+#define ntagbits 4L
+#define nlisptagbits 3L
+#define nfixnumtagbits 2L
+#define num_subtag_bits 8L
+#define fixnumshift 3L
+#define fixnum_shift 3L
+#define fulltagmask 15L
+#define tagmask	 7L
+#define fixnummask 3
+#define subtagmask ((1L<<num_subtag_bits)-1L)
+#define ncharcodebits 8L
+#define charcode_shift 8L
+#define node_size 8L
+#define node_shift 3L
+
+#define lowtagmask 3L
+#define lowtag_mask lowtagmask
+
+#define lowtag_primary 0L
+#define lowtag_imm 1L
+#define lowtag_immheader 2L
+#define lowtag_nodeheader 3L
+
+#define tag_fixnum 0L
+
+#define fulltag_even_fixnum 0L
+#define fulltag_imm_0 1L
+#define fulltag_immheader_0 2L
+#define fulltag_nodeheader_0 3L
+#define fulltag_cons 4L
+#define fulltag_imm_1 5L
+#define fulltag_immheader_1 6L
+#define fulltag_nodeheader_1 7L
+#define fulltag_odd_fixnum 8L
+#define fulltag_imm_2 9L
+#define fulltag_immheader_2 10L
+#define fulltag_nodeheader_2 11L
+#define fulltag_misc 12L
+#define fulltag_imm_3 13L
+#define fulltag_immheader_3 14L
+#define fulltag_nodeheader_3 15L
+
+#define SUBTAG(tag,subtag) ((tag) | ((subtag) << ntagbits))
+#define cl_array_subtag_mask 0x80L
+#define CL_ARRAY_SUBTAG(tag,subtag) (cl_array_subtag_mask | (SUBTAG(tag,subtag)))
+
+#define subtag_arrayH CL_ARRAY_SUBTAG(fulltag_nodeheader_1,0L)
+#define subtag_vectorH CL_ARRAY_SUBTAG(fulltag_nodeheader_2,0L)
+#define subtag_simple_vector CL_ARRAY_SUBTAG(fulltag_nodeheader_3,0L)
+#define min_vector_subtag subtag_vectorH	
+
+#define ivector_class_64_bit fulltag_immheader_3
+#define ivector_class_32_bit fulltag_immheader_2
+#define ivector_class_other_bit fulltag_immheader_1
+#define ivector_class_8_bit fulltag_immheader_0
+
+#define subtag_s64_vector CL_ARRAY_SUBTAG(ivector_class_64_bit,1)
+#define subtag_u64_vector CL_ARRAY_SUBTAG(ivector_class_64_bit,2)
+#define subtag_fixnum_vector CL_ARRAY_SUBTAG(ivector_class_64_bit,3)
+#define subtag_double_float_vector CL_ARRAY_SUBTAG(ivector_class_64_bit,4)
+#define subtag_s32_vector CL_ARRAY_SUBTAG(ivector_class_32_bit,1)
+#define subtag_u32_vector CL_ARRAY_SUBTAG(ivector_class_32_bit,2)
+#define subtag_single_float_vector CL_ARRAY_SUBTAG(ivector_class_32_bit,3)
+#define subtag_simple_base_string CL_ARRAY_SUBTAG(ivector_class_32_bit,5)
+#define subtag_s16_vector CL_ARRAY_SUBTAG(ivector_class_other_bit,1)
+#define subtag_u16_vector CL_ARRAY_SUBTAG(ivector_class_other_bit,2)
+#define subtag_bit_vector CL_ARRAY_SUBTAG(ivector_class_other_bit,7)
+#define subtag_s8_vector CL_ARRAY_SUBTAG(ivector_class_8_bit,1)
+#define subtag_u8_vector CL_ARRAY_SUBTAG(ivector_class_8_bit,2)
+
+/* There's some room for expansion in non-array ivector space. */
+#define subtag_macptr SUBTAG(ivector_class_64_bit,1)
+#define subtag_dead_macptr SUBTAG(ivector_class_64_bit,2)
+#define subtag_code_vector SUBTAG(ivector_class_32_bit,0)
+#define subtag_xcode_vector SUBTAG(ivector_class_32_bit,1)
+#define subtag_bignum SUBTAG(ivector_class_32_bit,2)
+#define subtag_double_float SUBTAG(ivector_class_32_bit,3)
+
+
+/*
+ Size doesn't matter for non-CL-array gvectors; I can't think of a good
+ reason to classify them in any particular way.  Let's put funcallable
+ things in the first slice by themselves, though it's not clear that
+ that helps FUNCALL much.
+*/
+#define gvector_funcallable fulltag_nodeheader_0
+	
+#define subtag_function SUBTAG(gvector_funcallable,0)
+#define subtag_symbol SUBTAG(gvector_funcallable,1)
+#define subtag_catch_frame SUBTAG(fulltag_nodeheader_1,0)
+#define subtag_basic_stream SUBTAG(fulltag_nodeheader_1,1)
+#define subtag_lock SUBTAG(fulltag_nodeheader_1,2)
+#define subtag_hash_vector SUBTAG(fulltag_nodeheader_1,3)
+#define subtag_pool SUBTAG(fulltag_nodeheader_1,4)
+#define subtag_weak SUBTAG(fulltag_nodeheader_1,5)
+#define subtag_package SUBTAG(fulltag_nodeheader_1,6)
+
+#define subtag_slot_vector SUBTAG(fulltag_nodeheader_2,0)
+#define subtag_instance SUBTAG(fulltag_nodeheader_2,1)
+#define subtag_struct SUBTAG(fulltag_nodeheader_2,2)
+#define subtag_istruct SUBTAG(fulltag_nodeheader_2,3)
+#define subtag_value_cell SUBTAG(fulltag_nodeheader_2,4)
+#define subtag_xfunction SUBTAG(fulltag_nodeheader_2,5)
+
+#define subtag_ratio SUBTAG(fulltag_nodeheader_3,0)
+#define subtag_complex SUBTAG(fulltag_nodeheader_3,1)
+
+
+
+#define nil_value (0x3000+fulltag_misc+sizeof(struct lispsymbol)+(LOWMEM_BIAS))
+#define t_value (0x3000+fulltag_misc+(LOWMEM_BIAS))	
+#define misc_bias fulltag_misc
+#define cons_bias fulltag_cons
+
+	
+#define misc_header_offset -fulltag_misc
+#define misc_subtag_offset misc_header_offset+7       /* low byte of header */
+#define misc_data_offset misc_header_offset+8		/* first word of data */
+#define misc_dfloat_offset misc_header_offset		/* double-floats are doubleword-aligned */
+
+#define subtag_single_float SUBTAG(fulltag_imm_0,0)
+
+#define subtag_go_tag SUBTAG(fulltag_imm_1,2) /* deprecated */
+#define subtag_block_tag SUBTAG(fulltag_imm_1,3) /* deprecated */
+
+#define subtag_character SUBTAG(fulltag_imm_1,0)
+
+#define subtag_unbound SUBTAG(fulltag_imm_3,0)
+#define unbound_marker subtag_unbound
+#define undefined unbound_marker
+#define unbound unbound_marker
+#define subtag_slot_unbound SUBTAG(fulltag_imm_3,1)
+#define slot_unbound_marker subtag_slot_unbound
+#define slot_unbound slot_unbound_marker
+#define subtag_illegal SUBTAG(fulltag_imm_3,2)
+#define illegal_marker subtag_illegal
+#define subtag_no_thread_local_binding SUBTAG(fulltag_imm_3,3)
+#define no_thread_local_binding_marker subtag_no_thread_local_binding        
+#define subtag_forward_marker SUBTAG(fulltag_imm_3,7)
+	
+#define max_64_bit_constant_index ((0x7fff + misc_dfloat_offset)>>3)
+#define max_32_bit_constant_index ((0x7fff + misc_data_offset)>>2)
+#define max_16_bit_constant_index ((0x7fff + misc_data_offset)>>1)
+#define max_8_bit_constant_index (0x7fff + misc_data_offset)
+#define max_1_bit_constant_index ((0x7fff + misc_data_offset)<<5)
+
+
+/* The objects themselves look something like this: */
+
+/*  Order of CAR and CDR doesn't seem to matter much - there aren't */
+/*  too many tricks to be played with predecrement/preincrement addressing. */
+/*  Keep them in the confusing MCL 3.0 order, to avoid confusion. */
+
+typedef struct cons {
+  LispObj cdr;
+  LispObj car;
+} cons;
+
+
+
+typedef struct lispsymbol {
+  LispObj header;
+  LispObj pname;
+  LispObj vcell;
+  LispObj fcell;
+  LispObj package_predicate;
+  LispObj flags;
+  LispObj plist;
+  LispObj binding_index;
+} lispsymbol;
+
+typedef struct ratio {
+  LispObj header;
+  LispObj numer;
+  LispObj denom;
+} ratio;
+
+typedef struct double_float {
+  LispObj header;
+  LispObj value;
+} double_float;
+
+
+typedef struct macptr {
+  LispObj header;
+  LispObj address;
+  LispObj class;
+  LispObj type;
+} macptr;
+
+typedef struct xmacptr {
+  LispObj header;
+  LispObj address;
+  LispObj class;
+  LispObj type;
+  LispObj flags;
+  LispObj link;
+} xmacptr;
+  
+
+typedef struct eabi_c_frame {
+  struct eabi_c_frame *backlink;
+  unsigned savelr;
+  LispObj params[8];
+} eabi_c_frame;
+
+/* PowerOpen ABI C frame */
+
+typedef struct c_frame {
+  struct c_frame *backlink;
+  natural crsave;
+  natural savelr;
+  natural unused[2];
+  natural savetoc;		/* Used with CFM (and on Linux.) */
+  natural params[8];		/* Space for callee to save r3-r10 */
+} c_frame;
+
+typedef struct lisp_frame {
+  struct lisp_frame *backlink;
+  LispObj savefn;
+  LispObj savelr;
+  LispObj savevsp;
+} lisp_frame;
+
+typedef struct special_binding {
+  struct special_binding *link;
+  struct lispsymbol *sym;
+  LispObj value;
+} special_binding;
+
+/* The GC (at least) needs to know what a
+   package looks like, so that it can do GCTWA. */
+typedef struct package {
+  LispObj header;
+  LispObj itab;			/* itab and etab look like (vector (fixnum . fixnum) */
+  LispObj etab;
+  LispObj used;
+  LispObj used_by;
+  LispObj names;
+  LispObj shadowed;
+} package;
+
+/*
+  The GC also needs to know what a catch_frame looks like.
+*/
+
+typedef struct catch_frame {
+  LispObj header;
+  LispObj catch_tag;
+  LispObj link;
+  LispObj mvflag;
+  LispObj csp;
+  LispObj db_link;
+  LispObj regs[8];
+  LispObj xframe;
+  LispObj tsp_segment;
+} catch_frame;
+
+#define catch_frame_element_count ((sizeof(catch_frame)/sizeof(LispObj))-1)
+#define catch_frame_header make_header(subtag_catch_frame,catch_frame_element_count)
+
+
+/* 
+  All exception frames in a thread are linked together 
+  */
+typedef struct xframe_list {
+  ExceptionInformation *curr;
+  struct xframe_list *prev;
+} xframe_list;
+
+#define fixnum_bitmask(n)  (1LL<<((n)+fixnumshift))
+
+/* 
+  The GC (at least) needs to know about hash-table-vectors and their flag bits.
+*/
+
+typedef struct hash_table_vector_header {
+  LispObj header;
+  LispObj link;                 /* If weak */
+  LispObj flags;                /* a fixnum; see below */
+  LispObj gc_count;             /* gc-count kernel global */
+  LispObj free_alist;           /* preallocated conses for finalization_alist */
+  LispObj finalization_alist;   /* key/value alist for finalization */
+  LispObj weak_deletions_count; /* incremented when GC deletes weak pair */
+  LispObj hash;                 /* backpointer to hash-table */
+  LispObj deleted_count;        /* number of deleted entries [not maintained if lock-free] */
+  LispObj count;                /* number of valid entries [not maintained if lock-free] */
+  LispObj cache_idx;            /* index of last cached pair */
+  LispObj cache_key;            /* value of last cached key */
+  LispObj cache_value;          /* last cached value */
+  LispObj size;                 /* number of entries in table */
+  LispObj size_reciprocal;      /* shifted reciprocal of size */
+} hash_table_vector_header;
+
+/*
+  Bits (masks)  in hash_table_vector.flags:
+*/
+
+/* GC should track keys when addresses change */ 
+#define nhash_track_keys_mask fixnum_bitmask(28) 
+
+/* GC should set when nhash_track_keys_bit & addresses change */
+#define nhash_key_moved_mask  fixnum_bitmask(27) 
+
+/* weak on key or value (need new "weak both" encoding.) */
+#define nhash_weak_mask       fixnum_bitmask(12)
+
+/* weak on value */
+#define nhash_weak_value_mask fixnum_bitmask(11)
+
+/* finalizable */
+#define nhash_finalizable_mask fixnum_bitmask(10)
+
+/* keys frozen, i.e. don't clobber keys, only values */
+#define nhash_keys_frozen_mask fixnum_bitmask(9)
+
+/* Lfun bits */
+
+#define lfbits_nonnullenv_mask fixnum_bitmask(0)
+#define lfbits_keys_mask fixnum_bitmask(1)
+#define lfbits_restv_mask fixnum_bitmask(7)
+#define lfbits_optinit_mask fixnum_bitmask(14)
+#define lfbits_rest_mask fixnum_bitmask(15)
+
+#define lfbits_aok_mask fixnum_bitmask(16)
+#define lfbits_lap_mask fixnum_bitmask(23)
+#define lfbits_trampoline_mask fixnum_bitmask(24)
+#define lfbits_evaluated_mask fixnum_bitmask(25)
+#define lfbits_cm_mask fixnum_bitmask(26)         /* combined_method */
+#define lfbits_nextmeth_mask fixnum_bitmask(26)   /* or call_next_method with method_mask */
+#define lfbits_gfn_mask fixnum_bitmask(27)        /* generic_function */
+#define lfbits_nextmeth_with_args_mask fixnum_bitmask(27)   /* or call_next_method_with_args with method_mask */
+#define lfbits_method_mask fixnum_bitmask(28)     /* method function */
+/* PPC only but want it defined for xcompile */
+#define lfbits_noname_mask fixnum_bitmask(29)
+
+
+/* Creole */
+
+#define doh_quantum 400
+#define doh_block_slots ((doh_quantum >> 2) - 3)
+
+typedef struct doh_block {
+  struct doh_block *link;
+  unsigned size;
+  unsigned free;
+  LispObj data[doh_block_slots];
+} doh_block, *doh_block_ptr;
+
+
+#define population_weak_list (0<<fixnum_shift)
+#define population_weak_alist (1<<fixnum_shift)
+#define population_termination_bit (16+fixnum_shift)
+#define population_type_mask ((1<<population_termination_bit)-1)
+
+#define gc_retain_pages_bit fixnum_bitmask(0)
+#define gc_integrity_check_bit fixnum_bitmask(2)
+#define egc_verbose_bit fixnum_bitmask(3)
+#define gc_verbose_bit fixnum_bitmask(4)
+#define gc_allow_stack_overflows_bit fixnum_bitmask(5)
+#define gc_postgc_pending fixnum_bitmask(26)
+
+#include "lisp-errors.h"
+
+
+
+#define TCR_BIAS (0x0)
+
+typedef struct tcr {
+  struct tcr* next;
+  struct tcr* prev;
+  struct {
+    float f;
+    u_int32_t tag;
+  } single_float_convert;
+  union {
+    double d;
+    struct {u_int32_t h, l;} words;
+  } lisp_fpscr;			/* lisp thread's fpscr (in low word) */
+  special_binding* db_link;	/* special binding chain head */
+  LispObj catch_top;		/* top catch frame */
+  LispObj* save_vsp;  /* VSP when in foreign code */
+  LispObj* save_tsp;  /* TSP when in foreign code */
+  struct area* cs_area; /* cstack area pointer */
+  struct area* vs_area; /* vstack area pointer */
+  struct area* ts_area; /* tstack area pointer */
+  LispObj cs_limit;		/* stack overflow limit */
+  natural bytes_allocated;
+  natural log2_allocation_quantum;      /* for per-tread consing */
+  signed_natural interrupt_pending;	/* pending interrupt flag */
+  xframe_list* xframe; /* exception-frame linked list */
+  int* errno_loc;		/* per-thread (?) errno location */
+  LispObj ffi_exception;	/* fpscr bits from ff-call */
+  LispObj osid;			/* OS thread id */
+  signed_natural valence;			/* odd when in foreign code */
+  signed_natural foreign_exception_status;	/* non-zero -> call lisp_exit_hook */
+  void* native_thread_info;	/* platform-dependent */
+  void* native_thread_id;	/* mach_thread_t, pid_t, etc. */
+  void* last_allocptr;
+  void* save_allocptr;
+  void* save_allocbase;
+  void* reset_completion;
+  void* activate;
+  signed_natural suspend_count;
+  ExceptionInformation* suspend_context;
+  ExceptionInformation* pending_exception_context;
+  void* suspend;		/* suspension semaphore */
+  void* resume;			/* resumption semaphore */
+  natural flags;
+  ExceptionInformation* gc_context;
+  void* termination_semaphore;
+  signed_natural unwinding;
+  natural tlb_limit;
+  LispObj* tlb_pointer;
+  natural shutdown_count;
+  void *safe_ref_address;
+} TCR;
+
+#define t_offset -(sizeof(lispsymbol))
+
+/* 
+  These were previously global variables.  There are lots of implicit
+  assumptions about the size of a heap segment, so they might as well
+  be constants.
+*/
+
+#define heap_segment_size 0x00020000L
+#define log2_heap_segment_size 17L
+
+#endif
+
Index: /branches/arm/lisp-kernel/ppc-constants64.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-constants64.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-constants64.s	(revision 13357)
@@ -0,0 +1,596 @@
+/*   Copyright (C) 2003-2009, Clozure Associates. */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+define(`rcontext',`r2')
+        
+nbits_in_word = 64
+nbits_in_byte = 8
+ntagbits = 4
+nlisptagbits = 3
+nfixnumtagbits = 3
+nlowtagbits = 2        
+num_subtag_bits = 8
+fixnumshift = 3
+fixnum_shift = 3
+fulltagmask = 15
+tagmask = 7
+fixnummask = 7
+ncharcodebits = 8
+charcode_shift = 8
+word_shift = 3
+node_size = 8
+dnode_size = 16
+dnode_align_bits = 4
+dnode_shift = dnode_align_bits        
+bitmap_shift = 6
+        
+fixnumone = (1<<fixnumshift)
+fixnum_one = fixnumone
+fixnum1 = fixnumone
+
+
+lowtagmask = ((1<<nlowtagbits)-1)
+lowtag_mask = lowtagmask
+
+lowtag_primary = 0
+lowtag_imm = 1
+lowtag_immheader = 2
+lowtag_nodeheader = 3
+
+tag_fixnum = 0
+
+fulltag_even_fixnum = 0
+fulltag_imm_0 = 1
+fulltag_immheader_0 = 2
+fulltag_nodeheader_0 = 3
+fulltag_cons = 4
+fulltag_imm_1 = 5
+fulltag_immheader_1 = 6
+fulltag_nodeheader_1 = 7
+fulltag_odd_fixnum = 8
+fulltag_imm_2 = 9
+fulltag_immheader_2 = 10
+fulltag_nodeheader_2 = 11
+fulltag_misc = 12
+fulltag_imm_3 = 13
+fulltag_immheader_3 = 14
+fulltag_nodeheader_3 = 15
+
+define(`define_subtag',`
+subtag_$1 = ($2 | ($3 << ntagbits))
+')
+			
+cl_array_subtag_mask = 0x80
+define(`define_cl_array_subtag',`
+define_subtag($1,(cl_array_subtag_mask|$2),$3)
+')
+
+define_cl_array_subtag(arrayH,fulltag_nodeheader_1,0)
+define_cl_array_subtag(vectorH,fulltag_nodeheader_2,0)
+define_cl_array_subtag(simple_vector,fulltag_nodeheader_3,0)
+min_vector_subtag = subtag_vectorH
+min_array_subtag = subtag_arrayH
+        
+	
+ivector_class_64_bit = fulltag_immheader_3
+ivector_class_32_bit = fulltag_immheader_2
+ivector_class_other_bit = fulltag_immheader_1
+ivector_class_8_bit = fulltag_immheader_0
+
+define_cl_array_subtag(s64_vector,ivector_class_64_bit,1)
+define_cl_array_subtag(u64_vector,ivector_class_64_bit,2)
+define_cl_array_subtag(fixnum_vector,ivector_class_64_bit,3)        
+define_cl_array_subtag(double_float_vector,ivector_class_64_bit,4)
+define_cl_array_subtag(s32_vector,ivector_class_32_bit,1)
+define_cl_array_subtag(u32_vector,ivector_class_32_bit,2)
+define_cl_array_subtag(single_float_vector,ivector_class_32_bit,3)
+define_cl_array_subtag(simple_base_string,ivector_class_32_bit,5)
+define_cl_array_subtag(s16_vector,ivector_class_other_bit,1)
+define_cl_array_subtag(u16_vector,ivector_class_other_bit,2)
+define_cl_array_subtag(bit_vector,ivector_class_other_bit,7)
+define_cl_array_subtag(s8_vector,ivector_class_8_bit,1)
+define_cl_array_subtag(u8_vector,ivector_class_8_bit,2)
+/* There's some room for expansion in non-array ivector space. */
+define_subtag(macptr,ivector_class_64_bit,1)
+define_subtag(dead_macptr,ivector_class_64_bit,2)
+define_subtag(code_vector,ivector_class_32_bit,0)
+define_subtag(xcode_vector,ivector_class_32_bit,1)
+define_subtag(bignum,ivector_class_32_bit,2)
+define_subtag(double_float,ivector_class_32_bit,3)
+
+
+
+        
+/* Size doesn't matter for non-CL-array gvectors; I can't think of a good */
+/* reason to classify them in any particular way.  Let's put funcallable */
+/* things in the first slice by themselves, though it's not clear that */
+/* that helps FUNCALL much. */
+        
+gvector_funcallable = fulltag_nodeheader_0
+	
+define_subtag(function,gvector_funcallable,0)
+define_subtag(symbol,gvector_funcallable,1)
+define_subtag(catch_frame,fulltag_nodeheader_1,0)
+define_subtag(basic_stream,fulltag_nodeheader_1,1)
+define_subtag(lock,fulltag_nodeheader_1,2)
+define_subtag(hash_vector,fulltag_nodeheader_1,3)
+define_subtag(pool,fulltag_nodeheader_1,4)
+define_subtag(weak,fulltag_nodeheader_1,5)
+define_subtag(package,fulltag_nodeheader_1,6)
+        
+define_subtag(slot_vector,fulltag_nodeheader_2,0)
+define_subtag(instance,fulltag_nodeheader_2,1)
+define_subtag(struct,fulltag_nodeheader_2,2)
+define_subtag(istruct,fulltag_nodeheader_2,3)
+define_subtag(value_cell,fulltag_nodeheader_2,4)
+define_subtag(xfunction,fulltag_nodeheader_2,5)
+	
+define_subtag(ratio,fulltag_nodeheader_3,0)
+define_subtag(complex,fulltag_nodeheader_3,1)
+			
+t_value = (0x3000+fulltag_misc)	
+misc_bias = fulltag_misc
+cons_bias = fulltag_cons
+define(`t_offset',-symbol.size)
+	
+misc_header_offset = -fulltag_misc
+misc_data_offset = misc_header_offset+node_size /* first word of data */
+misc_subtag_offset = misc_data_offset-1       /* low byte of header */
+misc_dfloat_offset = misc_data_offset		/* double-floats are doubleword-aligned */
+
+define_subtag(single_float,fulltag_imm_0,0)
+
+define_subtag(go_tag,fulltag_imm_1,0)
+define_subtag(block_tag,fulltag_imm_1,1)
+
+define_subtag(character,fulltag_imm_1,0)
+                	
+define_subtag(unbound,fulltag_imm_3,0)
+unbound_marker = subtag_unbound
+undefined = unbound_marker
+define_subtag(slot_unbound,fulltag_imm_3,1)
+slot_unbound_marker = subtag_slot_unbound
+define_subtag(illegal,fulltag_imm_3,2)
+illegal_marker = subtag_illegal
+define_subtag(no_thread_local_binding,fulltag_imm_3,3)
+no_thread_local_binding_marker = subtag_no_thread_local_binding        
+
+	
+max_64_bit_constant_index = ((0x7fff + misc_dfloat_offset)>>3)
+max_32_bit_constant_index = ((0x7fff + misc_data_offset)>>2)
+max_16_bit_constant_index = ((0x7fff + misc_data_offset)>>1)
+max_8_bit_constant_index = (0x7fff + misc_data_offset)
+max_1_bit_constant_index = ((0x7fff + misc_data_offset)<<5)
+
+
+	
+/* The objects themselves look something like this: */
+	
+/* Order of CAR and CDR doesn't seem to matter much - there aren't */
+/* too many tricks to be played with predecrement/preincrement addressing. */
+/* Keep them in the confusing MCL 3.0 order, to avoid confusion. */
+	_struct(cons,-cons_bias)
+	 _node(cdr)
+	 _node(car)
+	_ends
+	
+	_structf(ratio)
+	 _node(numer)
+	 _node(denom)
+	_endstructf
+	
+	_structf(double_float)
+	 _word(value)
+         _word(val_low)
+	_endstructf
+	
+	_structf(macptr)
+	 _node(address)
+         _node(domain)
+         _node(type)
+	_endstructf
+	
+/* Functions are of (conceptually) unlimited size. */
+	_struct(_function,-misc_bias)
+	 _node(header)
+	 _node(codevector)
+	_ends
+
+	_struct(tsp_frame,0)
+	 _node(backlink)
+	 _node(type)
+	 _struct_label(fixed_overhead)
+	 _struct_label(data_offset)
+	_ends
+
+
+
+	_structf(symbol)
+	 _node(pname)
+	 _node(vcell)
+	 _node(fcell)
+	 _node(package_predicate)
+	 _node(flags)
+         _node(plist)
+         _node(binding_index)
+	_endstructf
+
+	_structf(catch_frame)
+	 _node(catch_tag)	/* #<unbound> -> unwind-protect, else catch */
+	 _node(link)		/* backpointer to previous catch frame */
+	 _node(mvflag)		/* 0 if single-valued catch, fixnum 1 otherwise */
+	 _node(csp)		/* pointer to lisp_frame on csp */
+	 _node(db_link)		/* head of special-binding chain */
+	 _field(regs,8*node_size)	/* save7-save0 */
+	 _node(xframe)		/* exception frame chain */
+	 _node(tsp_segment)	/* maybe someday; padding for now */
+	_endstructf
+
+
+	_structf(vectorH)
+	 _node(logsize)
+	 _node(physsize)
+	 _node(data_vector)
+	 _node(displacement)
+	 _node(flags)
+	_endstructf	
+	
+        _structf(arrayH)
+         _node(rank)
+         _node(physsize)
+         _node(data_vector)
+         _node(displacement)
+         _node(flags)
+         _struct_label(dim0)
+        _endstructf
+        
+	_struct(c_frame,0)	/* PowerOpen ABI C stack frame */
+	 _node(backlink)
+	 _node(crsave)
+	 _node(savelr)
+	 _field(unused, 16)
+	 _node(savetoc)
+	 _struct_label(params)
+         _node(param0)
+         _node(param1)
+         _node(param2)
+         _node(param3)
+         _node(param4)
+         _node(param5)
+         _node(param6)
+         _node(param7)
+	 _struct_label(minsiz)
+	_ends
+
+
+	_struct(eabi_c_frame,0)
+	 _word(backlink) 
+	 _word(savelr)
+	 _word(param0)
+	 _word(param1)
+	 _word(param2)
+	 _word(param3)
+	 _word(param4)
+	 _word(param5)
+	 _word(param6)
+	 _word(param7)
+	 _struct_label(minsiz)
+	_ends
+
+        /* For entry to variable-argument-list functions */
+	/* (e.g., via callback) */
+	_struct(varargs_eabi_c_frame,0)
+	 _word(backlink)
+	 _word(savelr)
+	 _struct_label(va_list)
+	 _word(flags)		/* gpr count byte, fpr count byte, padding */
+	 _word(overflow_arg_area)
+	 _word(reg_save_area)
+	 _field(padding,4)
+	 _struct_label(regsave)
+	 _field(gp_save,8*4)
+	 _field(fp_save,8*8)
+	 _word(old_backlink)
+	 _word(old_savelr)
+	 _struct_label(incoming_stack_args)
+	_ends
+        	
+	_struct(lisp_frame,0)
+	 _node(backlink) 
+	 _node(savefn)	
+	 _node(savelr)	
+	 _node(savevsp)	
+	_ends
+
+	_struct(vector,-fulltag_misc)
+	 _node(header)
+	 _struct_label(data)
+	_ends
+
+        _struct(binding,0)
+         _node(link)
+         _node(sym)
+         _node(val)
+        _ends
+
+
+/* Nilreg-relative globals.  Talking the assembler into doing something reasonable here */
+/* is surprisingly hard. */
+
+symbol_extra = symbol.size-fulltag_misc
+
+	
+	_struct(nrs,(0x3000+(LOWMEM_BIAS)))
+	 _struct_pad(fulltag_misc)
+	 _struct_label(tsym)
+	 _struct_pad(symbol_extra)	/* t */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(nil)
+	 _struct_pad(symbol_extra)	/* nil */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(errdisp)
+	 _struct_pad(symbol_extra)	/* %err-disp */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(cmain)
+	 _struct_pad(symbol_extra)	/* cmain */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(eval)
+	 _struct_pad(symbol_extra)	/* eval */
+ 
+	 _struct_pad(fulltag_misc)
+	 _struct_label(appevalfn)
+	 _struct_pad(symbol_extra)	/* apply-evaluated-function */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(error)
+	 _struct_pad(symbol_extra)	/* error */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(defun)
+	 _struct_pad(symbol_extra)	/* %defun */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(defvar)
+	 _struct_pad(symbol_extra)	/* %defvar */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(defconstant)
+	 _struct_pad(symbol_extra)	/* %defconstant */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(macrosym)
+	 _struct_pad(symbol_extra)	/* %macro */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(kernelrestart)
+	 _struct_pad(symbol_extra)	/* %kernel-restart */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(package)
+	 _struct_pad(symbol_extra)	/* *package* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(total_bytes_freed)		/* *total-bytes-freed* */
+	 _struct_pad(symbol_extra)
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(kallowotherkeys)
+	 _struct_pad(symbol_extra)	/* allow-other-keys */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(toplcatch)
+	 _struct_pad(symbol_extra)	/* %toplevel-catch% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(toplfunc)
+	 _struct_pad(symbol_extra)	/* %toplevel-function% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(callbacks)
+	 _struct_pad(symbol_extra)	/* %pascal-functions% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(allmeteredfuns)
+	 _struct_pad(symbol_extra)	/* *all-metered-functions* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(total_gc_microseconds)		/* *total-gc-microseconds* */
+	 _struct_pad(symbol_extra)
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(builtin_functions)		/* %builtin-functions% */
+	 _struct_pad(symbol_extra)                
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(udf)
+	 _struct_pad(symbol_extra)	/* %unbound-function% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(init_misc)
+	 _struct_pad(symbol_extra)	/* %init-misc */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(macro_code)
+	 _struct_pad(symbol_extra)	/* %macro-code% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(closure_code)
+	 _struct_pad(symbol_extra)      /* %closure-code% */
+
+       	 _struct_pad(fulltag_misc)
+	 _struct_label(new_gcable_ptr) /* %new-gcable-ptr */
+	 _struct_pad(symbol_extra)
+	
+       	 _struct_pad(fulltag_misc)
+	 _struct_label(gc_event_status_bits)
+	 _struct_pad(symbol_extra)	/* *gc-event-status-bits* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(post_gc_hook)
+	 _struct_pad(symbol_extra)	/* *post-gc-hook* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(handlers)
+	 _struct_pad(symbol_extra)	/* %handlers% */
+
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(all_packages)
+	 _struct_pad(symbol_extra)	/* %all-packages% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(keyword_package)
+	 _struct_pad(symbol_extra)	/* *keyword-package* */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(finalization_alist)
+	 _struct_pad(symbol_extra)	/* %finalization-alist% */
+
+	 _struct_pad(fulltag_misc)
+	 _struct_label(foreign_thread_control)
+	 _struct_pad(symbol_extra)	/* %foreign-thread-control */
+
+	_ends
+
+define(`def_header',`
+$1 = ($2<<num_subtag_bits)|$3')
+
+	def_header(double_float_header,2,subtag_double_float)
+	def_header(two_digit_bignum_header,2,subtag_bignum)
+	def_header(three_digit_bignum_header,3,subtag_bignum)
+	def_header(four_digit_bignum_header,4,subtag_bignum)
+	def_header(five_digit_bignum_header,5,subtag_bignum)        
+	def_header(symbol_header,symbol.element_count,subtag_symbol)
+	def_header(value_cell_header,1,subtag_value_cell	)
+	def_header(macptr_header,macptr.element_count,subtag_macptr)
+	def_header(vectorH_header,vectorH.element_count,subtag_vectorH)
+
+	include(errors.s)
+
+/* Symbol bits that we care about */
+sym_vbit_bound = (0+fixnum_shift)
+sym_vbit_bound_mask = (1<<sym_vbit_bound)
+sym_vbit_const = (1+fixnum_shift)
+sym_vbit_const_mask = (1<<sym_vbit_const)
+
+	_struct(area,0)
+	 _node(pred) 
+	 _node(succ) 
+	 _node(low) 
+	 _node(high) 
+	 _node(active) 
+	 _node(softlimit) 
+	 _node(hardlimit) 
+	 _node(code) 
+	 _node(markbits) 
+	 _node(ndwords) 
+	 _node(older) 
+	 _node(younger) 
+	 _node(h) 
+	 _node(sofprot) 
+	 _node(hardprot) 
+	 _node(owner) 
+	 _node(refbits) 
+	 _node(nextref) 
+	_ends
+
+
+/* This is only referenced by c->lisp code that needs to save/restore C NVRs in a TSP frame. */
+	_struct(c_reg_save,0)
+	 _node(tsp_link)	/* backpointer */
+	 _node(tsp_mark)	/* frame type */
+	 _node(save_fpscr)	/* for Cs FPSCR */
+	 _field(save_gprs,19*node_size) /* r13-r31 */
+	 _dword(save_fp_zero)	/* for fp_zero */
+	 _dword(save_fps32conv)
+         _field(save_fprs,13*8)
+	_ends
+
+
+TCR_BIAS = 0
+	
+/*  Thread context record. */
+
+	_struct(tcr,-TCR_BIAS)
+	 _node(prev)		/* in doubly-linked list */
+	 _node(next)		/* in doubly-linked list */
+         _node(single_float_convert) /* xxxf0 */
+	 _word(lisp_fpscr)	/* lisp thread's fpscr (in low word) */
+	 _word(lisp_fpscr_low)
+	 _node(db_link)		/* special binding chain head */
+	 _node(catch_top)	/* top catch frame */
+	 _node(save_vsp)	/* VSP when in foreign code */
+	 _node(save_tsp)	/* TSP when in foreign code */
+	 _node(cs_area)		/* cstack area pointer */
+	 _node(vs_area)		/* vstack area pointer */
+	 _node(ts_area)		/* tstack area pointer */
+	 _node(cs_limit)	/* cstack overflow limit */
+	 _word(bytes_consed_high)
+	 _word(bytes_consed_low)
+	 _node(log2_allocation_quantum)
+	 _node(interrupt_pending)
+	 _node(xframe)		/* per-thread exception frame list */
+	 _node(errno_loc)	/* per-thread  errno location */
+	 _node(ffi_exception)	/* fpscr exception bits from ff-call */
+	 _node(osid)		/* OS thread id */
+         _node(valence)		/* odd when in foreign code */
+	 _node(foreign_exception_status)
+	 _node(native_thread_info)
+	 _node(native_thread_id)
+	 _node(last_allocptr)
+	 _node(save_allocptr)
+	 _node(save_allocbase)
+	 _node(reset_completion)
+	 _node(activate)
+         _node(suspend_count)
+         _node(suspend_context)
+	 _node(pending_exception_context)
+	 _node(suspend)		/* semaphore for suspension notify */
+	 _node(resume)		/* sempahore for resumption notify */
+         _word(flags_pad)
+	 _word(flags)      
+	 _node(gc_context)
+         _node(termination_semaphore)
+         _node(unwinding)
+         _node(tlb_limit)
+         _node(tlb_pointer)     /* Consider using tcr+N as tlb_pointer */
+	 _node(shutdown_count)
+         _node(safe_ref_address)
+	_ends
+
+TCR_FLAG_BIT_FOREIGN = fixnum_shift
+TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)
+TCR_FLAG_BIT_ALT_SUSPEND = (fixnumshift+2)
+TCR_FLAG_BIT_PROPAGATE_EXCEPTION = (fixnumshift+3)
+TCR_FLAG_BIT_SUSPEND_ACK_PENDING = (fixnumshift+4)
+TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
+TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
+TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)        
+
+
+nil_value = (0x3000+symbol.size+fulltag_misc+(LOWMEM_BIAS))
+        	
+define(`RESERVATION_DISCHARGE',(0x2008+(LOWMEM_BIAS)))
+
+lisp_globals_limit = (0x3000+(LOWMEM_BIAS))
+        
+INTERRUPT_LEVEL_BINDING_INDEX = fixnumone
+        
+                
Index: /branches/arm/lisp-kernel/ppc-exceptions.c
===================================================================
--- /branches/arm/lisp-kernel/ppc-exceptions.c	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-exceptions.c	(revision 13357)
@@ -0,0 +1,3232 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#ifdef LINUX
+#include <strings.h>
+#include <sys/mman.h>
+#include <fpu_control.h>
+#include <linux/prctl.h>
+#endif
+
+#ifdef DARWIN
+#include <sys/mman.h>
+#define _FPU_RESERVED 0xffffff00
+#ifndef SA_NODEFER
+#define SA_NODEFER 0
+#endif
+#include <sysexits.h>
+
+/* a distinguished UUO at a distinguished address */
+extern void pseudo_sigreturn(ExceptionInformation *);
+#endif
+
+
+#include "Threads.h"
+
+#define MSR_FE0_MASK (((unsigned)0x80000000)>>20)
+#define MSR_FE1_MASK (((unsigned)0x80000000)>>23)
+#define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK)
+extern void enable_fp_exceptions(void);
+extern void disable_fp_exceptions(void);
+
+#ifdef LINUX
+/* Some relatively recent kernels support this interface.
+   If this prctl isn't supported, assume that we're always
+   running with excptions enabled and "precise". 
+*/
+#ifndef PR_SET_FPEXC
+#define PR_SET_FPEXC 12
+#endif
+#ifndef PR_FP_EXC_DISABLED
+#define PR_FP_EXC_DISABLED 0
+#endif
+#ifndef PR_FP_EXC_PRECISE
+#define PR_FP_EXC_PRECISE 3
+#endif
+
+void
+enable_fp_exceptions()
+{
+  prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE);
+}
+
+void
+disable_fp_exceptions()
+{
+  prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED);
+}
+
+#endif
+
+/*
+  Handle exceptions.
+
+*/
+
+extern LispObj lisp_nil;
+
+extern natural lisp_heap_gc_threshold;
+extern Boolean grow_dynamic_area(natural);
+
+
+
+
+
+
+int
+page_size = 4096;
+
+int
+log2_page_size = 12;
+
+
+
+
+
+/*
+  If the PC is pointing to an allocation trap, the previous instruction
+  must have decremented allocptr.  Return the non-zero amount by which
+  allocptr was decremented.
+*/
+signed_natural
+allocptr_displacement(ExceptionInformation *xp)
+{
+  pc program_counter = xpPC(xp);
+  opcode instr = *program_counter, prev_instr = *(program_counter-1);
+
+  if (instr == ALLOC_TRAP_INSTRUCTION) {
+    if (match_instr(prev_instr, 
+                    XO_MASK | RT_MASK | RB_MASK,
+                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
+                    RT(allocptr) |
+                    RB(allocptr))) {
+      return ((signed_natural) xpGPR(xp, RA_field(prev_instr)));
+    }
+    if (match_instr(prev_instr,
+                    OP_MASK | RT_MASK | RA_MASK,
+                    OP(major_opcode_ADDI) | 
+                    RT(allocptr) |
+                    RA(allocptr))) {
+      return (signed_natural) -((short) prev_instr);
+    }
+    Bug(xp, "Can't determine allocation displacement");
+  }
+  return 0;
+}
+
+
+/*
+  A cons cell's been successfully allocated, but the allocptr's
+  still tagged (as fulltag_cons, of course.)  Emulate any instructions
+  that might follow the allocation (stores to the car or cdr, an
+  assignment to the "result" gpr) that take place while the allocptr's
+  tag is non-zero, advancing over each such instruction.  When we're
+  done, the cons cell will be allocated and initialized, the result
+  register will point to it, the allocptr will be untagged, and
+  the PC will point past the instruction that clears the allocptr's
+  tag.
+*/
+void
+finish_allocating_cons(ExceptionInformation *xp)
+{
+  pc program_counter = xpPC(xp);
+  opcode instr;
+  LispObj cur_allocptr = xpGPR(xp, allocptr);
+  cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr));
+  int target_reg;
+
+  while (1) {
+    instr = *program_counter++;
+
+    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
+      xpGPR(xp, allocptr) = untag(cur_allocptr);
+      xpPC(xp) = program_counter;
+      return;
+    }
+    
+    switch (instr & STORE_CXR_ALLOCPTR_MASK) {
+    case STORE_CAR_ALLOCPTR_INSTRUCTION:
+      c->car = xpGPR(xp,RT_field(instr));
+      break;
+    case STORE_CDR_ALLOCPTR_INSTRUCTION:
+      c->cdr = xpGPR(xp,RT_field(instr));
+      break;
+    default:
+      /* Assume that this is an assignment: {rt/ra} <- allocptr.
+         There are several equivalent instruction forms
+         that might have that effect; just assign to target here.
+      */
+      if (major_opcode_p(instr,major_opcode_X31)) {
+	target_reg = RA_field(instr);
+      } else {
+	target_reg = RT_field(instr);
+      }
+      xpGPR(xp,target_reg) = cur_allocptr;
+      break;
+    }
+  }
+}
+
+/*
+  We were interrupted in the process of allocating a uvector; we
+  survived the allocation trap, and allocptr is tagged as fulltag_misc.
+  Emulate any instructions which store a header into the uvector,
+  assign the value of allocptr to some other register, and clear
+  allocptr's tag.  Don't expect/allow any other instructions in
+  this environment.
+*/
+void
+finish_allocating_uvector(ExceptionInformation *xp)
+{
+  pc program_counter = xpPC(xp);
+  opcode instr;
+  LispObj cur_allocptr = xpGPR(xp, allocptr);
+  int target_reg;
+
+  while (1) {
+    instr = *program_counter++;
+    if (instr == UNTAG_ALLOCPTR_INSTRUCTION) {
+      xpGPR(xp, allocptr) = untag(cur_allocptr);
+      xpPC(xp) = program_counter;
+      return;
+    }
+    if ((instr &  STORE_HEADER_ALLOCPTR_MASK) == 
+        STORE_HEADER_ALLOCPTR_INSTRUCTION) {
+      header_of(cur_allocptr) = xpGPR(xp, RT_field(instr));
+    } else {
+      /* assume that this is an assignment */
+
+      if (major_opcode_p(instr,major_opcode_X31)) {
+	target_reg = RA_field(instr);
+      } else {
+	target_reg = RT_field(instr);
+      }
+      xpGPR(xp,target_reg) = cur_allocptr;
+    }
+  }
+}
+
+
+Boolean
+allocate_object(ExceptionInformation *xp,
+                natural bytes_needed, 
+                signed_natural disp_from_allocptr,
+		TCR *tcr)
+{
+  area *a = active_dynamic_area;
+
+  /* Maybe do an EGC */
+  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
+    if (((a->active)-(a->low)) >= a->threshold) {
+      gc_from_xp(xp, 0L);
+    }
+  }
+
+  /* Life is pretty simple if we can simply grab a segment
+     without extending the heap.
+  */
+  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
+    xpGPR(xp, allocptr) += disp_from_allocptr;
+#ifdef DEBUG
+    fprintf(dbgout, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n",
+            tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp));
+#endif
+    return true;
+  }
+  
+  /* It doesn't make sense to try a full GC if the object
+     we're trying to allocate is larger than everything
+     allocated so far.
+  */
+  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
+    untenure_from_area(tenured_area); /* force a full GC */
+    gc_from_xp(xp, 0L);
+  }
+  
+  /* Try again, growing the heap if necessary */
+  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
+    xpGPR(xp, allocptr) += disp_from_allocptr;
+#ifdef DEBUG
+    fprintf(dbgout, "New heap segment for #x%x after GC: #x%x/#x%x\n",
+            tcr,xpGPR(xp,allocbase),tcr->last_allocptr);
+#endif
+    return true;
+  }
+  
+  return false;
+}
+
+#ifndef XNOMEM
+#define XNOMEM 10
+#endif
+
+void
+update_bytes_allocated(TCR* tcr, void *cur_allocptr)
+{
+  BytePtr 
+    last = (BytePtr) tcr->last_allocptr, 
+    current = (BytePtr) cur_allocptr;
+  if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) {
+    tcr->bytes_allocated += last-current;
+  }
+  tcr->last_allocptr = 0;
+}
+
+void
+lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed)
+{
+  /* Couldn't allocate the object.  If it's smaller than some arbitrary
+     size (say 128K bytes), signal a "chronically out-of-memory" condition;
+     else signal a "allocation request failed" condition.
+  */
+  xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
+  handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0,  xpPC(xp));
+}
+
+/*
+  Allocate a large list, where "large" means "large enough to
+  possibly trigger the EGC several times if this was done
+  by individually allocating each CONS."  The number of 
+  ocnses in question is in arg_z; on successful return,
+  the list will be in arg_z 
+*/
+
+Boolean
+allocate_list(ExceptionInformation *xp, TCR *tcr)
+{
+  natural 
+    nconses = (unbox_fixnum(xpGPR(xp,arg_z))),
+    bytes_needed = (nconses << dnode_shift);
+  LispObj
+    prev = lisp_nil,
+    current,
+    initial = xpGPR(xp,arg_y);
+
+  if (nconses == 0) {
+    /* Silly case */
+    xpGPR(xp,arg_z) = lisp_nil;
+    xpGPR(xp,allocptr) = lisp_nil;
+    return true;
+  }
+  update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr);
+  if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) {
+    for (current = xpGPR(xp,allocptr);
+         nconses;
+         prev = current, current+= dnode_size, nconses--) {
+      deref(current,0) = prev;
+      deref(current,1) = initial;
+    }
+    xpGPR(xp,arg_z) = prev;
+    xpGPR(xp,arg_y) = xpGPR(xp,allocptr);
+    xpGPR(xp,allocptr)-=fulltag_cons;
+  } else {
+    lisp_allocation_failure(xp,tcr,bytes_needed);
+  }
+  return true;
+}
+
+OSStatus
+handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
+{
+  pc program_counter;
+  natural cur_allocptr, bytes_needed = 0;
+  opcode prev_instr;
+  signed_natural disp = 0;
+  unsigned allocptr_tag;
+
+  cur_allocptr = xpGPR(xp,allocptr);
+  program_counter = xpPC(xp);
+  prev_instr = *(program_counter-1);
+  allocptr_tag = fulltag_of(cur_allocptr);
+
+  switch (allocptr_tag) {
+  case fulltag_cons:
+    bytes_needed = sizeof(cons);
+    disp = -sizeof(cons) + fulltag_cons;
+    break;
+
+  case fulltag_even_fixnum:
+  case fulltag_odd_fixnum:
+    break;
+
+  case fulltag_misc:
+    if (match_instr(prev_instr, 
+                    XO_MASK | RT_MASK | RB_MASK,
+                    XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) |
+                    RT(allocptr) |
+                    RB(allocptr))) {
+      disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr)));
+    } else if (match_instr(prev_instr,
+                           OP_MASK | RT_MASK | RA_MASK,
+                           OP(major_opcode_ADDI) | 
+                           RT(allocptr) |
+                           RA(allocptr))) {
+      disp = (signed_natural) ((short) prev_instr);
+    }
+    if (disp) {
+      bytes_needed = (-disp) + fulltag_misc;
+      break;
+    }
+    /* else fall thru */
+  default:
+    return -1;
+  }
+
+  if (bytes_needed) {
+    update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp)));
+    if (allocate_object(xp, bytes_needed, disp, tcr)) {
+#if 0
+      fprintf(dbgout, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n",
+              tcr, xpGPR(xp, allocptr));
+#endif
+      adjust_exception_pc(xp,4);
+      return 0;
+    }
+    lisp_allocation_failure(xp,tcr,bytes_needed);
+    return -1;
+  }
+  return -1;
+}
+
+natural gc_deferred = 0, full_gc_deferred = 0;
+
+signed_natural
+flash_freeze(TCR *tcr, signed_natural param)
+{
+  return 0;
+}
+
+OSStatus
+handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
+{
+  LispObj 
+    selector = xpGPR(xp,imm0), 
+    arg = xpGPR(xp,imm1);
+  area *a = active_dynamic_area;
+  Boolean egc_was_enabled = (a->older != NULL);
+  natural gc_previously_deferred = gc_deferred;
+
+
+  switch (selector) {
+  case GC_TRAP_FUNCTION_EGC_CONTROL:
+    egc_control(arg != 0, a->active);
+    xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
+    break;
+
+  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
+    a->threshold = unbox_fixnum(xpGPR(xp, arg_x));
+    g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y));
+    g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z));
+    xpGPR(xp,arg_z) = lisp_nil+t_offset;
+    break;
+
+  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
+    if (((signed_natural) arg) > 0) {
+      lisp_heap_gc_threshold = 
+        align_to_power_of_2((arg-1) +
+                            (heap_segment_size - 1),
+                            log2_heap_segment_size);
+    }
+    /* fall through */
+  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
+    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
+    break;
+
+  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
+    /*  Try to put the current threshold in effect.  This may
+        need to disable/reenable the EGC. */
+    untenure_from_area(tenured_area);
+    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
+    if (egc_was_enabled) {
+      if ((a->high - a->active) >= a->threshold) {
+        tenure_to_area(tenured_area);
+      }
+    }
+    xpGPR(xp, imm0) = lisp_heap_gc_threshold;
+    break;
+
+  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
+    ensure_static_conses(xp,tcr,32768);
+    break;
+
+  case GC_TRAP_FUNCTION_FLASH_FREEZE:
+    untenure_from_area(tenured_area);
+    gc_like_from_xp(xp,flash_freeze,0);
+    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
+    tenured_area->static_dnodes = area_dnode(a->active, a->low);
+    if (egc_was_enabled) {
+      tenure_to_area(tenured_area);
+    }
+    xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
+    break;
+
+  default:
+    update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr)));
+
+    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
+      if (!full_gc_deferred) {
+        gc_from_xp(xp, 0L);
+        break;
+      }
+      /* Tried to do a full GC when gc was disabled.  That failed,
+         so try full GC now */
+      selector = GC_TRAP_FUNCTION_GC;
+    }
+    
+    if (egc_was_enabled) {
+      egc_control(false, (BytePtr) a->active);
+    }
+    gc_from_xp(xp, 0L);
+    if (gc_deferred > gc_previously_deferred) {
+      full_gc_deferred = 1;
+    } else {
+      full_gc_deferred = 0;
+    }
+    if (selector > GC_TRAP_FUNCTION_GC) {
+      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
+        impurify_from_xp(xp, 0L);
+        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
+        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
+        gc_from_xp(xp, 0L);
+      }
+      if (selector & GC_TRAP_FUNCTION_PURIFY) {
+        purify_from_xp(xp, 0L);
+        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
+        gc_from_xp(xp, 0L);
+      }
+      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
+        OSErr err;
+        extern OSErr save_application(unsigned, Boolean);
+        TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
+        area *vsarea = tcr->vs_area;
+	
+        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
+        err = save_application(arg, egc_was_enabled);
+        if (err == noErr) {
+          _exit(0);
+        }
+        fatal_oserr(": save_application", err);
+      }
+      switch (selector) {
+
+
+      case GC_TRAP_FUNCTION_FREEZE:
+        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
+        tenured_area->static_dnodes = area_dnode(a->active, a->low);
+        xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift;
+        break;
+      default:
+        break;
+      }
+    }
+    
+    if (egc_was_enabled) {
+      egc_control(true, NULL);
+    }
+    break;
+    
+  }
+
+  adjust_exception_pc(xp,4);
+  return 0;
+}
+
+
+
+void
+signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg)
+{
+  /* The cstack just overflowed.  Force the current thread's
+     control stack to do so until all stacks are well under their overflow
+     limits. 
+  */
+
+#if 0
+  lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */
+#endif
+  handle_error(xp, error_stack_overflow, reg, 0,  xpPC(xp));
+}
+
+/*
+  Lower (move toward 0) the "end" of the soft protected area associated
+  with a by a page, if we can.
+*/
+
+void
+adjust_soft_protection_limit(area *a)
+{
+  char *proposed_new_soft_limit = a->softlimit - 4096;
+  protected_area_ptr p = a->softprot;
+  
+  if (proposed_new_soft_limit >= (p->start+16384)) {
+    p->end = proposed_new_soft_limit;
+    p->protsize = p->end-p->start;
+    a->softlimit = proposed_new_soft_limit;
+  }
+  protect_area(p);
+}
+
+void
+restore_soft_stack_limit(unsigned stkreg)
+{
+  area *a;
+  TCR *tcr = get_tcr(true);
+
+  switch (stkreg) {
+  case sp:
+    a = tcr->cs_area;
+    if ((a->softlimit - 4096) > (a->hardlimit + 16384)) {
+      a->softlimit -= 4096;
+    }
+    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
+    break;
+  case vsp:
+    a = tcr->vs_area;
+    adjust_soft_protection_limit(a);
+    break;
+  case tsp:
+    a = tcr->ts_area;
+    adjust_soft_protection_limit(a);
+  }
+}
+
+/* Maybe this'll work someday.  We may have to do something to
+   make the thread look like it's not handling an exception */
+void
+reset_lisp_process(ExceptionInformation *xp)
+{
+  TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext));
+  catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top));
+
+  tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr));
+  tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase));
+
+  tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp);
+  tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */
+
+  start_lisp(tcr, 1);
+}
+
+/*
+  This doesn't GC; it returns true if it made enough room, false
+  otherwise.
+  If "extend" is true, it can try to extend the dynamic area to
+  satisfy the request.
+*/
+
+Boolean
+new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
+{
+  area *a;
+  natural newlimit, oldlimit;
+  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
+
+  a  = active_dynamic_area;
+  oldlimit = (natural) a->active;
+  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
+	      align_to_power_of_2(need, log2_allocation_quantum));
+  if (newlimit > (natural) (a->high)) {
+    if (extend) {
+      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
+      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
+      do {
+        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
+          break;
+        }
+        extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum);
+        if (extend_by < 4<<20) {
+          return false;
+        }
+      } while (1);
+    } else {
+      return false;
+    }
+  }
+  a->active = (BytePtr) newlimit;
+  tcr->last_allocptr = (void *)newlimit;
+  xpGPR(xp,allocptr) = (LispObj) newlimit;
+  xpGPR(xp,allocbase) = (LispObj) oldlimit;
+
+  return true;
+}
+
+ 
+void
+update_area_active (area **aptr, BytePtr value)
+{
+  area *a = *aptr;
+  for (; a; a = a->older) {
+    if ((a->low <= value) && (a->high >= value)) break;
+  };
+  if (a == NULL) Bug(NULL, "Can't find active area");
+  a->active = value;
+  *aptr = a;
+
+  for (a = a->younger; a; a = a->younger) {
+    a->active = a->high;
+  }
+}
+
+LispObj *
+tcr_frame_ptr(TCR *tcr)
+{
+  ExceptionInformation *xp;
+  LispObj *bp = NULL;
+
+  if (tcr->pending_exception_context)
+    xp = tcr->pending_exception_context;
+  else {
+    xp = tcr->suspend_context;
+  }
+  if (xp) {
+    bp = (LispObj *) xpGPR(xp, sp);
+  }
+  return bp;
+}
+
+void
+normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
+{
+  void *cur_allocptr = NULL;
+  LispObj freeptr = 0;
+
+  if (xp) {
+    if (is_other_tcr) {
+      pc_luser_xp(xp, tcr, NULL);
+      freeptr = xpGPR(xp, allocptr);
+      if (fulltag_of(freeptr) == 0){
+	cur_allocptr = (void *) ptr_from_lispobj(freeptr);
+      }
+    }
+    update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)));
+    update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp)));
+    update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp)));
+#ifdef DEBUG
+    fprintf(dbgout, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n",
+            tcr, xpGPR(xp, vsp), xpGPR(xp, tsp));
+    fprintf(dbgout, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n",
+            tcr,
+            xpGPR(xp, allocbase),
+            xpGPR(xp, allocptr),
+            xpPC(xp));
+    fprintf(dbgout, "TCR 0x%x, exception context = 0x%x\n",
+            tcr,
+            tcr->pending_exception_context);
+#endif
+  } else {
+    /* In ff-call.  No need to update cs_area */
+    cur_allocptr = (void *) (tcr->save_allocptr);
+#ifdef DEBUG
+    fprintf(dbgout, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n",
+            tcr, tcr->save_vsp, tcr->save_tsp);
+    fprintf(dbgout, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n",
+            tcr,
+            tcr->save_allocbase,
+            tcr->save_allocptr,
+            xpPC(xp));
+
+#endif
+    update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp);
+    update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp);
+  }
+
+
+  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
+  if (cur_allocptr) {
+    update_bytes_allocated(tcr, cur_allocptr);
+    if (freeptr) {
+      xpGPR(xp, allocptr) = VOID_ALLOCPTR;
+      xpGPR(xp, allocbase) = VOID_ALLOCPTR;
+    }
+  }
+}
+
+TCR *gc_tcr = NULL;
+
+/* Suspend and "normalize" other tcrs, then call a gc-like function
+   in that context.  Resume the other tcrs, then return what the
+   function returned */
+
+signed_natural
+gc_like_from_xp(ExceptionInformation *xp, 
+                signed_natural(*fun)(TCR *, signed_natural), 
+                signed_natural param)
+{
+  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr;
+  int result;
+  signed_natural inhibit;
+
+  suspend_other_threads(true);
+  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
+  if (inhibit != 0) {
+    if (inhibit > 0) {
+      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
+    }
+    resume_other_threads(true);
+    gc_deferred++;
+    return 0;
+  }
+  gc_deferred = 0;
+
+  gc_tcr = tcr;
+
+  xpGPR(xp, allocptr) = VOID_ALLOCPTR;
+  xpGPR(xp, allocbase) = VOID_ALLOCPTR;
+
+  normalize_tcr(xp, tcr, false);
+
+
+  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
+    if (other_tcr->pending_exception_context) {
+      other_tcr->gc_context = other_tcr->pending_exception_context;
+    } else if (other_tcr->valence == TCR_STATE_LISP) {
+      other_tcr->gc_context = other_tcr->suspend_context;
+    } else {
+      /* no pending exception, didn't suspend in lisp state:
+	 must have executed a synchronous ff-call. 
+      */
+      other_tcr->gc_context = NULL;
+    }
+    normalize_tcr(other_tcr->gc_context, other_tcr, true);
+  }
+    
+
+
+  result = fun(tcr, param);
+
+  other_tcr = tcr;
+  do {
+    other_tcr->gc_context = NULL;
+    other_tcr = other_tcr->next;
+  } while (other_tcr != tcr);
+
+  gc_tcr = NULL;
+
+  resume_other_threads(true);
+
+  return result;
+
+}
+
+
+
+/* Returns #bytes freed by invoking GC */
+
+signed_natural
+gc_from_tcr(TCR *tcr, signed_natural param)
+{
+  area *a;
+  BytePtr oldfree, newfree;
+  BytePtr oldend, newend;
+
+#ifdef DEBUG
+  fprintf(dbgout, "Start GC  in 0x%lx\n", tcr);
+#endif
+  a = active_dynamic_area;
+  oldend = a->high;
+  oldfree = a->active;
+  gc(tcr, param);
+  newfree = a->active;
+  newend = a->high;
+#if 0
+  fprintf(dbgout, "End GC  in 0x%lx\n", tcr);
+#endif
+  return ((oldfree-newfree)+(newend-oldend));
+}
+
+signed_natural
+gc_from_xp(ExceptionInformation *xp, signed_natural param)
+{
+  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
+
+  freeGCptrs();
+  return status;
+}
+
+signed_natural
+purify_from_xp(ExceptionInformation *xp, signed_natural param)
+{
+  return gc_like_from_xp(xp, purify, param);
+}
+
+signed_natural
+impurify_from_xp(ExceptionInformation *xp, signed_natural param)
+{
+  return gc_like_from_xp(xp, impurify, param);
+}
+
+
+
+
+
+
+protection_handler
+ * protection_handlers[] = {
+   do_spurious_wp_fault,
+   do_soft_stack_overflow,
+   do_soft_stack_overflow,
+   do_soft_stack_overflow,
+   do_hard_stack_overflow,    
+   do_hard_stack_overflow,
+   do_hard_stack_overflow
+   };
+
+
+Boolean
+is_write_fault(ExceptionInformation *xp, siginfo_t *info)
+{
+  /* use the siginfo if it's available.  Some versions of Linux
+     don't propagate the DSISR and TRAP fields correctly from
+     64- to 32-bit handlers.
+  */
+  if (info) {
+    /* 
+       To confuse matters still further, the value of SEGV_ACCERR
+       varies quite a bit among LinuxPPC variants (the value defined
+       in the header files varies, and the value actually set by
+       the kernel also varies.  So far, we're only looking at the
+       siginfo under Linux and Linux always seems to generate
+       SIGSEGV, so check for SIGSEGV and check the low 16 bits
+       of the si_code.
+    */
+    return ((info->si_signo == SIGSEGV) &&
+	    ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff)));
+  }
+  return(((xpDSISR(xp) & (1 << 25)) != 0) &&
+	 (xpTRAP(xp) == 
+#ifdef LINUX
+0x0300
+#endif
+#ifdef DARWIN
+0x0300/0x100
+#endif
+)
+	 );
+#if 0 
+  /* Maybe worth keeping around; not sure if it's an exhaustive
+     list of PPC instructions that could cause a WP fault */
+  /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide
+     valid values of those SPRs in the context they provide to
+     exception handlers.  Look at the opcode of the offending
+     instruction & recognize 32-bit store operations */
+  opcode instr = *(xpPC(xp));
+
+  if (xp->regs->trap != 0x300) {
+    return 0;
+  }
+  switch (instr >> 26) {
+  case 47:			/* STMW */
+  case 36:			/* STW */
+  case 37:			/* STWU */
+    return 1;
+  case 31:
+    switch ((instr >> 1) & 1023) {
+    case 151:			/* STWX */
+    case 183:			/* STWUX */
+      return 1;
+    default:
+      return 0;
+    }
+  default:
+    return 0;
+  }
+#endif
+}
+
+OSStatus
+handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence)
+{
+  BytePtr addr;
+  protected_area_ptr area;
+  protection_handler *handler;
+  extern Boolean touch_page(void *);
+  extern void touch_page_end(void);
+
+  if (info) {
+    addr = (BytePtr)(info->si_addr);
+  } else {
+    addr = (BytePtr) ((natural) (xpDAR(xp)));
+  }
+
+  if (addr && (addr == tcr->safe_ref_address)) {
+    adjust_exception_pc(xp,4);
+
+    xpGPR(xp,imm0) = 0;
+    return 0;
+  }
+
+  if (xpPC(xp) == (pc)touch_page) {
+    xpGPR(xp,imm0) = 0;
+    xpPC(xp) = (pc)touch_page_end;
+    return 0;
+  }
+
+
+  if (is_write_fault(xp,info)) {
+    area = find_protected_area(addr);
+    if (area != NULL) {
+      handler = protection_handlers[area->why];
+      return handler(xp, area, addr);
+    } else {
+      if ((addr >= readonly_area->low) &&
+	  (addr < readonly_area->active)) {
+        UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
+                        page_size);
+	return 0;
+      }
+    }
+  }
+  if (old_valence == TCR_STATE_LISP) {
+    callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info));
+  }
+  return -1;
+}
+
+
+
+
+
+OSStatus
+do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
+{
+#ifdef SUPPORT_PRAGMA_UNUSED
+#pragma unused(area,addr)
+#endif
+  reset_lisp_process(xp);
+  return -1;
+}
+
+extern area*
+allocate_vstack(natural useable);       /* This is in "pmcl-kernel.c" */
+
+extern area*
+allocate_tstack(natural useable);       /* This is in "pmcl-kernel.c" */
+
+#ifdef EXTEND_VSTACK
+Boolean
+catch_frame_p(lisp_frame *spPtr)
+{
+  catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP));
+
+  for (; catch; catch = (catch_frame *) untag(catch->link)) {
+    if (spPtr == ((lisp_frame *) catch->csp)) {
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+Boolean
+unwind_protect_cleanup_frame_p(lisp_frame *spPtr)
+{
+  if ((spPtr->savevsp == (LispObj)NULL) ||  /* The frame to where the unwind-protect will return */
+      (((spPtr->backlink)->savevsp) == (LispObj)NULL)) {  /* The frame that returns to the kernel  from the cleanup form */
+    return true;
+  } else {
+    return false;
+  }
+}
+
+Boolean
+lexpr_entry_frame_p(lisp_frame *spPtr)
+{
+  LispObj savelr = spPtr->savelr;
+  LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN);
+  LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V);
+  LispObj ret1valn = (LispObj) lisp_global(RET1VALN);
+
+  return
+    (savelr == lexpr_return1v) ||
+    (savelr == lexpr_return) ||
+    ((savelr == ret1valn) &&
+     (((spPtr->backlink)->savelr) == lexpr_return));
+}
+
+Boolean
+lisp_frame_p(lisp_frame *spPtr)
+{
+  LispObj savefn;
+  /* We can't just look at the size of the stack frame under the EABI
+     calling sequence, but that's the first thing to check. */
+  if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) {
+    return false;
+  }
+  savefn = spPtr->savefn;
+  return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc);
+  
+}
+
+
+int ffcall_overflow_count = 0;
+
+/* Find a frame that is neither a catch frame nor one of the
+   lexpr_entry frames We don't check for non-lisp frames here because
+   we'll always stop before we get there due to a dummy lisp frame
+   pushed by .SPcallback that masks out the foreign frames.  The one
+   exception is that there is a non-lisp frame without a valid VSP
+   while in the process of ppc-ff-call. We recognize that because its
+   savelr is NIL.  If the saved VSP itself is 0 or the savevsp in the
+   next frame is 0, then we're executing an unwind-protect cleanup
+   form, and the top stack frame belongs to its (no longer extant)
+   catch frame.  */
+
+#ifdef EXTEND_VSTACK
+lisp_frame *
+find_non_catch_frame_from_xp (ExceptionInformation *xp)
+{
+  lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp);
+  if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) {
+    ffcall_overflow_count++;          /* This is mostly so I can breakpoint here */
+  }
+  for (; !lisp_frame_p(spPtr)  || /* In the process of ppc-ff-call */
+         unwind_protect_cleanup_frame_p(spPtr) ||
+         catch_frame_p(spPtr) ||
+         lexpr_entry_frame_p(spPtr) ; ) {
+     spPtr = spPtr->backlink;
+     };
+  return spPtr;
+}
+#endif
+
+#ifdef EXTEND_VSTACK
+Boolean
+db_link_chain_in_area_p (area *a)
+{
+  LispObj *db = (LispObj *) lisp_global(DB_LINK),
+          *high = (LispObj *) a->high,
+          *low = (LispObj *) a->low;
+  for (; db; db = (LispObj *) *db) {
+    if ((db >= low) && (db < high)) return true;
+  };
+  return false;
+}
+#endif
+
+
+
+
+/* Note: CURRENT_VS (CURRENT_TS) is always either the area containing
+  the current value of VSP (TSP) or an older area.  */
+
+OSStatus
+do_vsp_overflow (ExceptionInformation *xp, BytePtr addr)
+{
+  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
+  area *a = tcr->vs_area;
+  protected_area_ptr vsp_soft = a->softprot;
+  unprotect_area(vsp_soft);
+  signal_stack_soft_overflow(xp,vsp);
+  return 0;
+}
+
+
+OSStatus
+do_tsp_overflow (ExceptionInformation *xp, BytePtr addr)
+{
+  TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
+  area *a = tcr->ts_area;
+  protected_area_ptr tsp_soft = a->softprot;
+  unprotect_area(tsp_soft);
+  signal_stack_soft_overflow(xp,tsp);
+  return 0;
+}
+
+OSStatus
+do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
+{
+  /* Trying to write into a guard page on the vstack or tstack.
+     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
+     signal an error_stack_overflow condition.
+      */
+  lisp_protection_kind which = prot_area->why;
+  Boolean on_TSP = (which == kTSPsoftguard);
+
+  if (on_TSP) {
+    return do_tsp_overflow(xp, addr);
+   } else {
+    return do_vsp_overflow(xp, addr);
+   }
+}
+
+OSStatus
+do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
+{
+#ifdef SUPPORT_PRAGMA_UNUSED
+#pragma unused(xp,area,addr)
+#endif
+  return -1;
+}
+
+
+/*
+  We have a couple of choices here.  We can simply unprotect the page
+  and let the store happen on return, or we can try to emulate writes
+  that we know will involve an intergenerational reference.  Both are
+  correct as far as EGC constraints go, but the latter approach is
+  probably more efficient.  (This only matters in the case where the
+  GC runs after this exception handler returns but before the write
+  actually happens.  If we didn't emulate node stores here, the EGC
+  would scan the newly-writen page, find nothing interesting, and
+  run to completion.  This thread will try the write again afer it
+  resumes, the page'll be re-protected, and we'll have taken this
+  fault twice.  The whole scenario shouldn't happen very often, but
+  (having already taken a fault and committed to an mprotect syscall)
+  we might as well emulate stores involving intergenerational references,
+  since they're pretty easy to identify.
+
+  Note that cases involving two or more threads writing to the same
+  page (before either of them can run this handler) is benign: one
+  invocation of the handler will just unprotect an unprotected page in
+  that case.
+
+  If there are GCs (or any other suspensions of the thread between
+  the time that the write fault was detected and the time that the
+  exception lock is obtained) none of this stuff happens.
+*/
+
+/*
+  Return true (and emulate the instruction) iff:
+  a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb"
+     instruction.
+  b) RS is a node register (>= fn)
+  c) RS is tagged as a cons or vector
+  d) RS is in some ephemeral generation.
+  This is slightly conservative, since RS may be no younger than the
+  EA being written to.
+*/
+Boolean
+is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea)
+{
+  if (((ptr_to_lispobj(ea)) & 3) == 0) {
+    opcode instr = *xpPC(xp);
+    
+    if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) ||
+        major_opcode_p(instr, major_opcode_STW)) {
+      LispObj 
+        rs = RS_field(instr), 
+        rsval = xpGPR(xp,rs),
+        tag = fulltag_of(rsval);
+      
+      if (rs >= fn) {
+        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
+          if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) &&
+              ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) {
+            *(LispObj *)ea = rsval;
+            return true;
+          }
+        }
+      }
+    }
+  }
+  return false;
+}
+
+      
+
+
+
+
+
+OSStatus
+handle_sigfpe(ExceptionInformation *xp, TCR *tcr)
+{
+  (void) zero_fpscr(tcr);
+  enable_fp_exceptions();
+
+
+  tcr->lisp_fpscr.words.l =  xpFPSCR(xp) & ~_FPU_RESERVED;
+
+  /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU
+     operation; there's an FPU operation right at the PC, so tell it to start
+     looking one word beyond */
+  return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4));
+}
+
+    
+int
+altivec_present = 1;
+
+
+/* This only tries to implement the "optional" fsqrt and fsqrts
+   instructions, which were generally implemented on IBM hardware
+   but generally not available on Motorola/Freescale systems.
+*/		  
+OSStatus
+handle_unimplemented_instruction(ExceptionInformation *xp,
+                                 opcode instruction,
+                                 TCR *tcr)
+{
+  (void) zero_fpscr(tcr);
+  enable_fp_exceptions();
+  /* the rc bit (bit 0 in the instruction) is supposed to cause
+     some FPSCR bits to be copied to CR1.  Clozure CL doesn't generate
+     fsqrt. or fsqrts.
+  */
+  if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || 
+       (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) &&
+      ((instruction & ((1 << 6) -2)) == (22<<1))) {
+    double b, d, sqrt(double);
+
+    b = xpFPR(xp,RB_field(instruction));
+    d = sqrt(b);
+    xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) |
+                   (get_fpscr() & _FPU_RESERVED));
+    xpFPR(xp,RT_field(instruction)) = d;
+    adjust_exception_pc(xp,4);
+    return 0;
+  }
+
+  return -1;
+}
+
+OSStatus
+PMCL_exception_handler(int xnum, 
+                       ExceptionInformation *xp, 
+                       TCR *tcr, 
+                       siginfo_t *info,
+                       int old_valence)
+{
+  OSStatus status = -1;
+  pc program_counter;
+  opcode instruction = 0;
+
+
+  program_counter = xpPC(xp);
+  
+  if ((xnum == SIGILL) | (xnum == SIGTRAP)) {
+    instruction = *program_counter;
+  }
+
+  if (instruction == ALLOC_TRAP_INSTRUCTION) {
+    status = handle_alloc_trap(xp, tcr);
+  } else if ((xnum == SIGSEGV) ||
+	     (xnum == SIGBUS)) {
+    status = handle_protection_violation(xp, info, tcr, old_valence);
+  } else if (xnum == SIGFPE) {
+    status = handle_sigfpe(xp, tcr);
+  } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) {
+    if (instruction == GC_TRAP_INSTRUCTION) {
+      status = handle_gc_trap(xp, tcr);
+    } else if (IS_UUO(instruction)) {
+      status = handle_uuo(xp, instruction, program_counter);
+    } else if (is_conditional_trap(instruction)) {
+      status = handle_trap(xp, instruction, program_counter, info);
+    } else {
+      status = handle_unimplemented_instruction(xp,instruction,tcr);
+    }
+  } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) {
+    tcr->interrupt_pending = 0;
+    callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0);
+    status = 0;
+  }
+
+  return status;
+}
+
+void
+adjust_exception_pc(ExceptionInformation *xp, int delta)
+{
+  xpPC(xp) += (delta >> 2);
+}
+
+
+/* 
+  This wants to scan backwards until "where" points to an instruction
+   whose major opcode is either 63 (double-float) or 59 (single-float)
+*/
+
+OSStatus
+handle_fpux_binop(ExceptionInformation *xp, pc where)
+{
+  OSStatus err;
+  opcode *there = (opcode *) where, instr, errnum = 0;
+  int i = TRAP_LOOKUP_TRIES, delta = 0;
+  
+  while (i--) {
+    instr = *--there;
+    delta -= 4;
+    if (codevec_hdr_p(instr)) {
+      return -1;
+    }
+    if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) {
+      errnum = error_FPU_exception_double;
+      break;
+    }
+
+    if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) {
+      errnum = error_FPU_exception_short;
+      break;
+    }
+  }
+  
+  err = handle_error(xp, errnum, rcontext, 0,  there);
+  /* Yeah, we said "non-continuable".  In case we ever change that ... */
+  
+  adjust_exception_pc(xp, delta);
+  xpFPSCR(xp)  &=  0x03fff;
+  
+  return err;
+
+}
+
+OSStatus
+handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) 
+{
+#ifdef SUPPORT_PRAGMA_UNUSED
+#pragma unused(where)
+#endif
+  unsigned 
+    minor = UUO_MINOR(the_uuo),
+    rb = 0x1f & (the_uuo >> 11),
+    errnum = 0x3ff & (the_uuo >> 16);
+
+  OSStatus status = -1;
+
+  int bump = 4;
+
+  switch (minor) {
+
+  case UUO_ZERO_FPSCR:
+    status = 0;
+    xpFPSCR(xp) = 0;
+    break;
+
+
+  case UUO_INTERR:
+    {
+      TCR * target = (TCR *)xpGPR(xp,arg_z);
+      status = 0;
+      switch (errnum) {
+      case error_propagate_suspend:
+	break;
+      case error_interrupt:
+	xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target);
+	break;
+      case error_suspend:
+	xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target);
+	break;
+      case error_suspend_all:
+	lisp_suspend_other_threads();
+	break;
+      case error_resume:
+	xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target);
+	break;
+      case error_resume_all:
+	lisp_resume_other_threads();
+	break;
+      case error_kill:
+	xpGPR(xp,imm0) = (LispObj)kill_tcr(target);
+	break;
+      case error_allocate_list:
+        allocate_list(xp,get_tcr(true));
+        break;
+      default:
+	status = handle_error(xp, errnum, rb, 0,  where);
+	break;
+      }
+    }
+    break;
+
+  case UUO_INTCERR:
+    status = handle_error(xp, errnum, rb, 1,  where);
+    if (errnum == error_udf_call) {
+      /* If lisp's returned from a continuable undefined-function call,
+	 it's put a code vector in the xp's PC.  Don't advance the
+	 PC ... */
+      bump = 0;
+    }
+    break;
+
+  case UUO_FPUX_BINOP:
+    status = handle_fpux_binop(xp, where);
+    bump = 0;
+    break;
+
+  default:
+    status = -1;
+    bump = 0;
+  }
+  
+  if ((!status) && bump) {
+    adjust_exception_pc(xp, bump);
+  }
+  return status;
+}
+
+natural
+register_codevector_contains_pc (natural lisp_function, pc where)
+{
+  natural code_vector, size;
+
+  if ((fulltag_of(lisp_function) == fulltag_misc) &&
+      (header_subtag(header_of(lisp_function)) == subtag_function)) {
+    code_vector = deref(lisp_function, 1);
+    size = header_element_count(header_of(code_vector)) << 2;
+    if ((untag(code_vector) < (natural)where) && 
+	((natural)where < (code_vector + size)))
+      return(code_vector);
+  }
+
+  return(0);
+}
+
+/* Callback to lisp to handle a trap. Need to translate the
+   PC (where) into one of two forms of pairs:
+
+   1. If PC is in fn or nfn's code vector, use the register number
+      of fn or nfn and the index into that function's code vector.
+   2. Otherwise use 0 and the pc itself
+*/
+void
+callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where,
+                   natural arg1, natural arg2, natural arg3)
+{
+  natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where);
+  unsigned register_number = fn;
+  natural index = (natural)where;
+
+  if (code_vector == 0) {
+    register_number = nfn;
+    code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where);
+  }
+  if (code_vector == 0)
+    register_number = 0;
+  else
+    index = ((natural)where - (code_vector + misc_data_offset)) >> 2;
+  callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3);
+}
+
+void
+callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp,
+                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
+{
+  natural  callback_ptr;
+  area *a;
+
+  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
+
+  /* Put the active stack pointer where .SPcallback expects it */
+  a = tcr->cs_area;
+  a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp));
+
+  /* Copy globals from the exception frame to tcr */
+  tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr));
+  tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase));
+  tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp));
+  tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp));
+
+
+
+  /* Call back.
+     Lisp will handle trampolining through some code that
+     will push lr/fn & pc/nfn stack frames for backtrace.
+  */
+  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
+#ifdef DEBUG
+  fprintf(dbgout, "0x%x releasing exception lock for callback\n", tcr);
+#endif
+  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
+  ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
+  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
+#ifdef DEBUG
+  fprintf(dbgout, "0x%x acquired exception lock after callback\n", tcr);
+#endif
+
+
+
+  /* Copy GC registers back into exception frame */
+  xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase);
+  xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr);
+}
+
+area *
+allocate_no_stack (natural size)
+{
+#ifdef SUPPORT_PRAGMA_UNUSED
+#pragma unused(size)
+#endif
+
+  return (area *) NULL;
+}
+
+
+
+
+
+
+/* callback to (symbol-value cmain) if it is a macptr, 
+   otherwise report cause and function name to console.
+   Returns noErr if exception handled OK */
+OSStatus
+handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info)
+{
+  LispObj   cmain = nrs_CMAIN.vcell;
+  TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext));
+
+  /* If we got here, "the_trap" is either a TRI or a TR instruction.
+     It's a TRI instruction iff its major opcode is major_opcode_TRI. */
+
+  /* If it's a "trllt" instruction where RA == sp, it's a failed 
+     control stack overflow check.  In that case:
+     
+     a) We're in "yellow zone" mode if the value of the
+     lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT.  If
+     we're not already in yellow zone mode, attempt to create a new
+     thread and continue execution on its stack. If that fails, call
+     signal_stack_soft_overflow to enter yellow zone mode and signal
+     the condition to lisp.
+     
+     b) If we're already in "yellow zone" mode, then:
+     
+     1) if the SP is past the current control-stack area's hard
+     overflow limit, signal a "hard" stack overflow error (e.g., throw
+     to toplevel as quickly as possible. If we aren't in "yellow zone"
+     mode, attempt to continue on another thread first.
+     
+     2) if SP is "well" (> 4K) below its soft overflow limit, set
+     lisp_global(CS_OVERFLOW_LIMIT) to its "real" value.  We're out of
+     "yellow zone mode" in this case.
+     
+     3) Otherwise, do nothing.  We'll continue to trap every time
+     something gets pushed on the control stack, so we should try to
+     detect and handle all of these cases fairly quickly.  Of course,
+     the trap overhead is going to slow things down quite a bit.
+     */
+
+  if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) &&
+      (RA_field(the_trap) == sp) &&
+      (TO_field(the_trap) == TO_LO)) {
+    area 
+      *CS_area = tcr->cs_area,
+      *VS_area = tcr->vs_area;
+      
+    natural 
+      current_SP = xpGPR(xp,sp),
+      current_VSP = xpGPR(xp,vsp);
+
+    if (current_SP  < (natural) (CS_area->hardlimit)) {
+      /* If we're not in soft overflow mode yet, assume that the
+         user has set the soft overflow size very small and try to
+         continue on another thread before throwing to toplevel */
+      if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) {
+        reset_lisp_process(xp);
+      }
+    } else {
+      if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) {
+        /* If the control stack pointer is at least 4K away from its soft limit
+	   and the value stack pointer is at least 4K away from its soft limit,
+           stop trapping.  Else keep trapping. */
+        if ((current_SP > (natural) ((CS_area->softlimit)+4096)) &&
+	    (current_VSP > (natural) ((VS_area->softlimit)+4096))) {
+	  protected_area_ptr vs_soft = VS_area->softprot;
+	  if (vs_soft->nprot == 0) {
+	    protect_area(vs_soft);
+	  }
+          tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit);
+        }
+      } else {
+	tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit);	  
+	signal_stack_soft_overflow(xp, sp);
+      }
+    }
+    
+    adjust_exception_pc(xp, 4);
+    return noErr;
+  } else {
+    if (the_trap == LISP_BREAK_INSTRUCTION) {
+      char *message =  (char *) ptr_from_lispobj(xpGPR(xp,3));
+      set_xpPC(xp, xpLR(xp));
+      if (message == NULL) {
+	message = "Lisp Breakpoint";
+      }
+      lisp_Debugger(xp, info, debug_entry_dbg, false, message);
+      return noErr;
+    }
+    if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) {
+      adjust_exception_pc(xp,4);
+      lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint");
+      return noErr;
+    }
+    /*
+      twlle ra,rb is used to detect tlb overflow, where RA = current
+      limit and RB = index to use.
+    */
+    if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && 
+        (TO_field(the_trap) == (TO_LO|TO_EQ))) {
+      if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) {
+        return noErr;
+      }
+      return -1;
+    }
+
+    if ((fulltag_of(cmain) == fulltag_misc) &&
+        (header_subtag(header_of(cmain)) == subtag_macptr)) {
+      if (the_trap == TRI_instruction(TO_GT,nargs,0)) {
+        /* reset interrup_level, interrupt_pending */
+        TCR_INTERRUPT_LEVEL(tcr) = 0;
+        tcr->interrupt_pending = 0;
+      }
+#if 0
+      fprintf(dbgout, "About to do trap callback in 0x%x\n",tcr);
+#endif
+      callback_for_trap(cmain, xp,  where, (natural) the_trap,  0, 0);
+      adjust_exception_pc(xp, 4);
+      return(noErr);
+    }
+    return -1;
+  }
+}
+
+
+/* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern.
+   Stop if subtag_code_vector is encountered. */
+unsigned
+scan_for_instr( unsigned target, unsigned mask, pc where )
+{
+  int i = TRAP_LOOKUP_TRIES;
+
+  while( i-- ) {
+    unsigned instr = *(--where);
+    if ( codevec_hdr_p(instr) ) {
+      return 0;
+    } else if ( match_instr(instr, mask, target) ) {
+      return instr;
+    }
+  }
+  return 0;
+}
+
+
+void non_fatal_error( char *msg )
+{
+  fprintf( dbgout, "Non-fatal error: %s.\n", msg );
+  fflush( dbgout );
+}
+
+/* The main opcode.  */
+
+int 
+is_conditional_trap(opcode instr)
+{
+  unsigned to = TO_field(instr);
+  int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR);
+
+#ifndef MACOS
+  if ((instr == LISP_BREAK_INSTRUCTION) ||
+      (instr == QUIET_LISP_BREAK_INSTRUCTION)) {
+    return 1;
+  }
+#endif
+  if (is_tr || major_opcode_p(instr,major_opcode_TRI)) {
+    /* A "tw/td" or "twi/tdi" instruction.  To be unconditional, the
+       EQ bit must be set in the TO mask and either the register
+       operands (if "tw") are the same or either both of the signed or
+       both of the unsigned inequality bits must be set. */
+    if (! (to & TO_EQ)) {
+      return 1;			/* Won't trap on EQ: conditional */
+    }
+    if (is_tr && (RA_field(instr) == RB_field(instr))) {
+      return 0;			/* Will trap on EQ, same regs: unconditional */
+    }
+    if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || 
+	((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) {
+      return 0;			/* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */
+    }
+    return 1;			/* must be conditional */
+  }
+  return 0;			/* Not "tw/td" or "twi/tdi".  Let
+                                   debugger have it */
+}
+
+OSStatus
+handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where)
+{
+  LispObj   errdisp = nrs_ERRDISP.vcell;
+
+  if ((fulltag_of(errdisp) == fulltag_misc) &&
+      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
+    /* errdisp is a macptr, we can call back to lisp */
+    callback_for_trap(errdisp, xp, where, errnum, rb, continuable);
+    return(0);
+    }
+
+  return(-1);
+}
+	       
+
+/* 
+   Current thread has all signals masked.  Before unmasking them,
+   make it appear that the current thread has been suspended.
+   (This is to handle the case where another thread is trying
+   to GC before this thread is able to sieze the exception lock.)
+*/
+int
+prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
+{
+  int old_valence = tcr->valence;
+
+  tcr->pending_exception_context = context;
+  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
+
+  ALLOW_EXCEPTIONS(context);
+  return old_valence;
+}  
+
+void
+wait_for_exception_lock_in_handler(TCR *tcr, 
+				   ExceptionInformation *context,
+				   xframe_list *xf)
+{
+
+  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
+#ifdef DEBUG
+  fprintf(dbgout, "0x%x has exception lock\n", tcr);
+#endif
+  xf->curr = context;
+  xf->prev = tcr->xframe;
+  tcr->xframe =  xf;
+  tcr->pending_exception_context = NULL;
+  tcr->valence = TCR_STATE_FOREIGN; 
+}
+
+void
+unlock_exception_lock_in_handler(TCR *tcr)
+{
+  tcr->pending_exception_context = tcr->xframe->curr;
+  tcr->xframe = tcr->xframe->prev;
+  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
+#ifdef DEBUG
+  fprintf(dbgout, "0x%x releasing exception lock\n", tcr);
+#endif
+  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
+}
+
+/* 
+   If an interrupt is pending on exception exit, try to ensure
+   that the thread sees it as soon as it's able to run.
+*/
+void
+raise_pending_interrupt(TCR *tcr)
+{
+  if (TCR_INTERRUPT_LEVEL(tcr) > 0) {
+    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
+  }
+}
+
+void
+exit_signal_handler(TCR *tcr, int old_valence)
+{
+  sigset_t mask;
+  sigfillset(&mask);
+  
+  pthread_sigmask(SIG_SETMASK,&mask, NULL);
+  tcr->valence = old_valence;
+  tcr->pending_exception_context = NULL;
+}
+
+
+void
+signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
+{
+  xframe_list xframe_link;
+
+  if (!use_mach_exception_handling) {
+    
+    tcr = (TCR *) get_interrupt_tcr(false);
+  
+    /* The signal handler's entered with all signals (notably the
+       thread_suspend signal) blocked.  Don't allow any other signals
+       (notably the thread_suspend signal) to preempt us until we've
+       set the TCR's xframe slot to include the current exception
+       context.
+    */
+    
+    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
+  }
+
+  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
+    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
+    pthread_kill(pthread_self(), thread_suspend_signal);
+  }
+
+  
+  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
+  if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) {
+    char msg[512];
+    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context));
+    if (lisp_Debugger(context, info, signum, false, msg)) {
+      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
+    }
+  }
+
+  unlock_exception_lock_in_handler(tcr);
+
+  /* This thread now looks like a thread that was suspended while
+     executing lisp code.  If some other thread gets the exception
+     lock and GCs, the context (this thread's suspend_context) will
+     be updated.  (That's only of concern if it happens before we
+     can return to the kernel/to the Mach exception handler).
+  */
+  if (!use_mach_exception_handling) {
+    exit_signal_handler(tcr, old_valence);
+    raise_pending_interrupt(tcr);
+  }
+}
+
+/*
+  If it looks like we're in the middle of an atomic operation, make
+  it seem as if that operation is either complete or hasn't started
+  yet.
+
+  The cases handled include:
+
+  a) storing into a newly-allocated lisp frame on the stack.
+  b) marking a newly-allocated TSP frame as containing "raw" data.
+  c) consing: the GC has its own ideas about how this should be
+     handled, but other callers would be best advised to back
+     up or move forward, according to whether we're in the middle
+     of allocating a cons cell or allocating a uvector.
+  d) a STMW to the vsp
+  e) EGC write-barrier subprims.
+*/
+
+extern opcode
+  egc_write_barrier_start,
+  egc_write_barrier_end, 
+  egc_store_node_conditional, 
+  egc_store_node_conditional_test,
+  egc_set_hash_key,
+  egc_gvset,
+  egc_rplaca,
+  egc_rplacd,
+  egc_set_hash_key_conditional,
+  egc_set_hash_key_conditional_test;
+
+
+extern opcode ffcall_return_window, ffcall_return_window_end;
+
+void
+pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp)
+{
+  pc program_counter = xpPC(xp);
+  opcode instr = *program_counter;
+  lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp));
+  LispObj cur_allocptr = xpGPR(xp, allocptr);
+  int allocptr_tag = fulltag_of(cur_allocptr);
+  
+
+
+  if ((program_counter < &egc_write_barrier_end) && 
+      (program_counter >= &egc_write_barrier_start)) {
+    LispObj *ea = 0, val = 0, root = 0;
+    bitvector refbits = (bitvector)(lisp_global(REFBITS));
+    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
+
+    if (program_counter >= &egc_set_hash_key_conditional) {
+      if ((program_counter < &egc_set_hash_key_conditional_test) ||
+	  ((program_counter == &egc_set_hash_key_conditional_test) &&
+	   (! (xpCCR(xp) & 0x20000000)))) {
+	return;
+      }
+      need_store = false;
+      root = xpGPR(xp,arg_x);
+      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
+      need_memoize_root = true;
+    } else if (program_counter >= &egc_store_node_conditional) {
+      if ((program_counter < &egc_store_node_conditional_test) ||
+	  ((program_counter == &egc_store_node_conditional_test) &&
+	   (! (xpCCR(xp) & 0x20000000)))) {
+	/* The conditional store either hasn't been attempted yet, or
+	   has failed.  No need to adjust the PC, or do memoization. */
+	return;
+      }
+      ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4));
+      xpGPR(xp,arg_z) = t_value;
+      need_store = false;
+    } else if (program_counter >= &egc_set_hash_key) {
+      root = xpGPR(xp,arg_x);
+      val = xpGPR(xp,arg_z);
+      ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset);
+      need_memoize_root = true;
+    } else if (program_counter >= &egc_gvset) {
+      ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset);
+      val = xpGPR(xp,arg_z);
+    } else if (program_counter >= &egc_rplacd) {
+      ea = (LispObj *) untag(xpGPR(xp,arg_y));
+      val = xpGPR(xp,arg_z);
+    } else {                      /* egc_rplaca */
+      ea =  ((LispObj *) untag(xpGPR(xp,arg_y)))+1;
+      val = xpGPR(xp,arg_z);
+    }
+    if (need_store) {
+      *ea = val;
+    }
+    if (need_check_memo) {
+      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
+      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
+          ((LispObj)ea < val)) {
+        atomic_set_bit(refbits, bitnumber);
+        if (need_memoize_root) {
+          bitnumber = area_dnode(root, lisp_global(REF_BASE));
+          atomic_set_bit(refbits, bitnumber);
+        }
+      }
+    }
+    set_xpPC(xp, xpLR(xp));
+    return;
+  }
+
+
+  if (instr == MARK_TSP_FRAME_INSTRUCTION) {
+    LispObj tsp_val = xpGPR(xp,tsp);
+    
+    ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val;
+    adjust_exception_pc(xp, 4);
+    return;
+  }
+  
+  if (frame->backlink == (frame+1)) {
+    if (
+#ifdef PPC64
+        (major_opcode_p(instr, major_opcode_DS_STORE64)) &&
+        (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) &&
+#else
+        (major_opcode_p(instr, major_opcode_STW)) && 
+#endif
+	(RA_field(instr) == sp) &&
+	/* There are a few places in the runtime that store into
+	   a previously-allocated frame atop the stack when
+	   throwing values around.  We only care about the case
+	   where the frame was newly allocated, in which case
+	   there must have been a CREATE_LISP_FRAME_INSTRUCTION
+	   a few instructions before the current program counter.
+	   (The whole point here is that a newly allocated frame
+	   might contain random values that we don't want the
+	   GC to see; a previously allocated frame should already
+	   be completely initialized.)
+	*/
+	((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) ||
+	 (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) ||
+	 (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION)))  {
+#ifdef PPC64
+      int disp = DS_field(instr);
+#else      
+      int disp = D_field(instr);
+#endif
+
+
+      if (disp < (4*node_size)) {
+#if 0
+        fprintf(dbgout, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp);
+#endif
+	frame->savevsp = 0;
+	if (disp < (3*node_size)) {
+	  frame->savelr = 0;
+	  if (disp == node_size) {
+	    frame->savefn = 0;
+	  }
+	}
+      }
+      return;
+    }
+  }
+
+  if (allocptr_tag != tag_fixnum) {
+    signed_natural disp = allocptr_displacement(xp);
+
+    if (disp) {
+      /* Being architecturally "at" the alloc trap doesn't tell
+         us much (in particular, it doesn't tell us whether
+         or not the thread has committed to taking the trap
+         and is waiting for the exception lock (or waiting
+         for the Mach exception thread to tell it how bad
+         things are) or is about to execute a conditional
+         trap.
+         Regardless of which case applies, we want the
+         other thread to take (or finish taking) the
+         trap, and we don't want it to consider its
+         current allocptr to be valid.
+         The difference between this case (suspend other
+         thread for GC) and the previous case (suspend
+         current thread for interrupt) is solely a
+         matter of what happens after we leave this
+         function: some non-current thread will stay
+         suspended until the GC finishes, then take
+         (or start processing) the alloc trap.   The
+         current thread will go off and do PROCESS-INTERRUPT
+         or something, and may return from the interrupt
+         and need to finish the allocation that got interrupted.
+      */
+
+      if (alloc_disp) {
+        *alloc_disp = disp;
+        xpGPR(xp,allocptr) += disp;
+        /* Leave the PC at the alloc trap.  When the interrupt
+           handler returns, it'll decrement allocptr by disp
+           and the trap may or may not be taken.
+        */
+      } else {
+        update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp));
+        xpGPR(xp, allocbase) = VOID_ALLOCPTR;
+        xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp;
+      }
+    } else {
+#ifdef DEBUG
+      fprintf(dbgout, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr));
+#endif
+      /* If we're already past the alloc_trap, finish allocating
+         the object. */
+      if (allocptr_tag == fulltag_cons) {
+        finish_allocating_cons(xp);
+#ifdef DEBUG
+          fprintf(dbgout, "finish allocating cons in TCR = #x%x\n",
+                  tcr);
+#endif
+      } else {
+        if (allocptr_tag == fulltag_misc) {
+#ifdef DEBUG
+          fprintf(dbgout, "finish allocating uvector in TCR = #x%x\n",
+                  tcr);
+#endif
+          finish_allocating_uvector(xp);
+        } else {
+          Bug(xp, "what's being allocated here ?");
+        }
+      }
+      /* Whatever we finished allocating, reset allocptr/allocbase to
+         VOID_ALLOCPTR */
+      xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR;
+    }
+    return;
+  }
+
+  if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) {
+    LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs)));
+    int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift;
+#if 0
+        fprintf(dbgout, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx);
+#endif
+
+    for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) {
+      deref(frame,idx) = 0;
+    }
+    ((LispObj *)(xpGPR(xp, tsp)))[1] = 0;
+    return;
+  }
+
+#ifndef PC64
+  if ((major_opcode_p(instr, 47)) && /* 47 = stmw */
+      (RA_field(instr) == vsp)) {
+    int r;
+    LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp));
+    
+    for (r = RS_field(instr); r <= 31; r++) {
+      *vspptr++ = xpGPR(xp,r);
+    }
+    adjust_exception_pc(xp, 4);
+  }
+#endif
+}
+
+void
+interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
+{
+  TCR *tcr = get_interrupt_tcr(false);
+  if (tcr) {
+    if (TCR_INTERRUPT_LEVEL(tcr) < 0) {
+      tcr->interrupt_pending = 1 << fixnumshift;
+    } else {
+      LispObj cmain = nrs_CMAIN.vcell;
+
+      if ((fulltag_of(cmain) == fulltag_misc) &&
+	  (header_subtag(header_of(cmain)) == subtag_macptr)) {
+	/* 
+	   This thread can (allegedly) take an interrupt now.
+	   It's tricky to do that if we're executing
+	   foreign code (especially Linuxthreads code, much
+	   of which isn't reentrant.)
+           If we're unwinding the stack, we also want to defer
+           the interrupt.
+	*/
+	if ((tcr->valence != TCR_STATE_LISP) ||
+            (tcr->unwinding != 0)) {
+	  TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift);
+	} else {
+	  xframe_list xframe_link;
+	  int old_valence;
+          signed_natural disp=0;
+	  
+	  pc_luser_xp(context, tcr, &disp);
+	  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
+	  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
+#ifdef DEBUG
+          fprintf(dbgout, "[0x%x acquired exception lock for interrupt]\n",tcr);
+#endif
+	  PMCL_exception_handler(signum, context, tcr, info, old_valence);
+          if (disp) {
+            xpGPR(context,allocptr) -= disp;
+          }
+	  unlock_exception_lock_in_handler(tcr);
+#ifdef DEBUG
+          fprintf(dbgout, "[0x%x released exception lock for interrupt]\n",tcr);
+#endif
+	  exit_signal_handler(tcr, old_valence);
+	}
+      }
+    }
+  }
+#ifdef DARWIN
+    DarwinSigReturn(context);
+#endif
+}
+
+
+
+void
+install_signal_handler(int signo, void *handler)
+{
+  struct sigaction sa;
+  
+  sa.sa_sigaction = (void *)handler;
+  sigfillset(&sa.sa_mask);
+  sa.sa_flags = 
+    0 /* SA_RESTART */
+    | SA_SIGINFO
+#ifdef DARWIN
+#ifdef PPC64
+    | SA_64REGSET
+#endif
+#endif
+    ;
+
+  sigaction(signo, &sa, NULL);
+}
+
+void
+install_pmcl_exception_handlers()
+{
+#ifdef DARWIN
+  extern Boolean use_mach_exception_handling;
+#endif
+
+  Boolean install_signal_handlers_for_exceptions =
+#ifdef DARWIN
+    !use_mach_exception_handling
+#else
+    true
+#endif
+    ;
+  if (install_signal_handlers_for_exceptions) {
+    extern int no_sigtrap;
+    install_signal_handler(SIGILL, (void *)signal_handler);
+    if (no_sigtrap != 1) {
+      install_signal_handler(SIGTRAP, (void *)signal_handler);
+    }
+    install_signal_handler(SIGBUS,  (void *)signal_handler);
+    install_signal_handler(SIGSEGV, (void *)signal_handler);
+    install_signal_handler(SIGFPE, (void *)signal_handler);
+  }
+  
+  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
+			 (void *)interrupt_handler);
+  signal(SIGPIPE, SIG_IGN);
+}
+
+void
+thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp)
+{
+  TCR *tcr = get_tcr(false);
+  area *a;
+  sigset_t mask;
+  
+  sigemptyset(&mask);
+
+  if (tcr) {
+    tcr->valence = TCR_STATE_FOREIGN;
+    a = tcr->vs_area;
+    if (a) {
+      a->active = a->high;
+    }
+    a = tcr->ts_area;
+    if (a) {
+      a->active = a->high;
+    }
+    a = tcr->cs_area;
+    if (a) {
+      a->active = a->high;
+    }
+  }
+  
+  pthread_sigmask(SIG_SETMASK,&mask,NULL);
+  pthread_exit(NULL);
+}
+
+void
+thread_signal_setup()
+{
+  thread_suspend_signal = SIG_SUSPEND_THREAD;
+  thread_kill_signal = SIG_KILL_THREAD;
+
+  install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler);
+  install_signal_handler(thread_kill_signal, (void *)thread_kill_handler);
+}
+
+
+
+void
+unprotect_all_areas()
+{
+  protected_area_ptr p;
+
+  for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) {
+    unprotect_area(p);
+  }
+}
+
+/*
+  A binding subprim has just done "twlle limit_regno,idx_regno" and
+  the trap's been taken.  Extend the tcr's tlb so that the index will
+  be in bounds and the new limit will be on a page boundary, filling
+  in the new page(s) with 'no_thread_local_binding_marker'.  Update
+  the tcr fields and the registers in the xp and return true if this
+  all works, false otherwise.
+
+  Note that the tlb was allocated via malloc, so realloc can do some
+  of the hard work.
+*/
+Boolean
+extend_tcr_tlb(TCR *tcr, 
+               ExceptionInformation *xp, 
+               unsigned limit_regno,
+               unsigned idx_regno)
+{
+  unsigned
+    index = (unsigned) (xpGPR(xp,idx_regno)),
+    old_limit = tcr->tlb_limit,
+    new_limit = align_to_power_of_2(index+1,12),
+    new_bytes = new_limit-old_limit;
+  LispObj 
+    *old_tlb = tcr->tlb_pointer,
+    *new_tlb = realloc(old_tlb, new_limit),
+    *work;
+
+  if (new_tlb == NULL) {
+    return false;
+  }
+  
+  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
+
+  while (new_bytes) {
+    *work++ = no_thread_local_binding_marker;
+    new_bytes -= sizeof(LispObj);
+  }
+  tcr->tlb_pointer = new_tlb;
+  tcr->tlb_limit = new_limit;
+  xpGPR(xp, limit_regno) = new_limit;
+  return true;
+}
+
+
+
+void
+exception_init()
+{
+  install_pmcl_exception_handlers();
+}
+
+
+
+
+
+#ifdef DARWIN
+
+
+#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
+#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
+
+
+
+#define LISP_EXCEPTIONS_HANDLED_MASK \
+ (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
+
+/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
+#define NUM_LISP_EXCEPTIONS_HANDLED 4 
+
+typedef struct {
+  int foreign_exception_port_count;
+  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
+  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
+  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
+  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
+} MACH_foreign_exception_state;
+
+
+
+
+/*
+  Mach's exception mechanism works a little better than its signal
+  mechanism (and, not incidentally, it gets along with GDB a lot
+  better.
+
+  Initially, we install an exception handler to handle each native
+  thread's exceptions.  This process involves creating a distinguished
+  thread which listens for kernel exception messages on a set of
+  0 or more thread exception ports.  As threads are created, they're
+  added to that port set; a thread's exception port is destroyed
+  (and therefore removed from the port set) when the thread exits.
+
+  A few exceptions can be handled directly in the handler thread;
+  others require that we resume the user thread (and that the
+  exception thread resumes listening for exceptions.)  The user
+  thread might eventually want to return to the original context
+  (possibly modified somewhat.)
+
+  As it turns out, the simplest way to force the faulting user
+  thread to handle its own exceptions is to do pretty much what
+  signal() does: the exception handlng thread sets up a sigcontext
+  on the user thread's stack and forces the user thread to resume
+  execution as if a signal handler had been called with that
+  context as an argument.  We can use a distinguished UUO at a
+  distinguished address to do something like sigreturn(); that'll
+  have the effect of resuming the user thread's execution in
+  the (pseudo-) signal context.
+
+  Since:
+    a) we have miles of code in C and in Lisp that knows how to
+    deal with Linux sigcontexts
+    b) Linux sigcontexts contain a little more useful information
+    (the DAR, DSISR, etc.) than their Darwin counterparts
+    c) we have to create a sigcontext ourselves when calling out
+    to the user thread: we aren't really generating a signal, just
+    leveraging existing signal-handling code.
+
+  we create a Linux sigcontext struct.
+
+  Simple ?  Hopefully from the outside it is ...
+
+  We want the process of passing a thread's own context to it to
+  appear to be atomic: in particular, we don't want the GC to suspend
+  a thread that's had an exception but has not yet had its user-level
+  exception handler called, and we don't want the thread's exception
+  context to be modified by a GC while the Mach handler thread is
+  copying it around.  On Linux (and on Jaguar), we avoid this issue
+  because (a) the kernel sets up the user-level signal handler and
+  (b) the signal handler blocks signals (including the signal used
+  by the GC to suspend threads) until tcr->xframe is set up.
+
+  The GC and the Mach server thread therefore contend for the lock
+  "mach_exception_lock".  The Mach server thread holds the lock
+  when copying exception information between the kernel and the
+  user thread; the GC holds this lock during most of its execution
+  (delaying exception processing until it can be done without
+  GC interference.)
+
+*/
+
+#ifdef PPC64
+#define	C_REDZONE_LEN		320
+#define	C_STK_ALIGN             32
+#else
+#define	C_REDZONE_LEN		224
+#define	C_STK_ALIGN		16
+#endif
+#define C_PARAMSAVE_LEN		64
+#define	C_LINKAGE_LEN		48
+
+#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
+
+void
+fatal_mach_error(char *format, ...);
+
+#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
+
+
+void
+restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
+{
+  kern_return_t kret;
+  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
+
+  /* Set the thread's FP state from the pseudosigcontext */
+  kret = thread_set_state(thread,
+                          PPC_FLOAT_STATE,
+                          (thread_state_t)&(mc->__fs),
+                          PPC_FLOAT_STATE_COUNT);
+
+  MACH_CHECK_ERROR("setting thread FP state", kret);
+
+  /* The thread'll be as good as new ... */
+#ifdef PPC64
+  kret = thread_set_state(thread,
+                          PPC_THREAD_STATE64,
+                          (thread_state_t)&(mc->__ss),
+                          PPC_THREAD_STATE64_COUNT);
+#else
+  kret = thread_set_state(thread, 
+                          MACHINE_THREAD_STATE,
+                          (thread_state_t)&(mc->__ss),
+                          MACHINE_THREAD_STATE_COUNT);
+#endif
+  MACH_CHECK_ERROR("setting thread state", kret);
+}  
+
+/* This code runs in the exception handling thread, in response
+   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
+   in response to a call to pseudo_sigreturn() from the specified
+   user thread.
+   Find that context (the user thread's R3 points to it), then
+   use that context to set the user thread's state.  When this
+   function's caller returns, the Mach kernel will resume the
+   user thread.
+*/
+
+kern_return_t
+do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
+{
+  ExceptionInformation *xp;
+
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
+#endif
+  xp = tcr->pending_exception_context;
+  if (xp) {
+    tcr->pending_exception_context = NULL;
+    tcr->valence = TCR_STATE_LISP;
+    restore_mach_thread_state(thread, xp);
+    raise_pending_interrupt(tcr);
+  } else {
+    Bug(NULL, "no xp here!\n");
+  }
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
+#endif
+  return KERN_SUCCESS;
+}  
+
+ExceptionInformation *
+create_thread_context_frame(mach_port_t thread, 
+			    natural *new_stack_top)
+{
+#ifdef PPC64
+  ppc_thread_state64_t ts;
+#else
+  ppc_thread_state_t ts;
+#endif
+  mach_msg_type_number_t thread_state_count;
+  kern_return_t result;
+  ExceptionInformation *pseudosigcontext;
+  MCONTEXT_T mc;
+  natural stackp, backlink;
+
+#ifdef PPC64
+  thread_state_count = PPC_THREAD_STATE64_COUNT;
+  result = thread_get_state(thread,
+                            PPC_THREAD_STATE64,
+                            (thread_state_t)&ts,
+                            &thread_state_count);
+#else
+  thread_state_count = MACHINE_THREAD_STATE_COUNT;
+  result = thread_get_state(thread, 
+                            PPC_THREAD_STATE,	/* GPRs, some SPRs  */
+                            (thread_state_t)&ts,
+                            &thread_state_count);
+#endif
+  
+  if (result != KERN_SUCCESS) {
+    get_tcr(true);
+    Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result);
+  }
+  stackp = ts.__r1;
+  backlink = stackp;
+  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
+  stackp -= sizeof(*pseudosigcontext);
+  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
+
+  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
+  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
+  memmove(&(mc->__ss),&ts,sizeof(ts));
+
+  thread_state_count = PPC_FLOAT_STATE_COUNT;
+  thread_get_state(thread,
+		   PPC_FLOAT_STATE,
+		   (thread_state_t)&(mc->__fs),
+		   &thread_state_count);
+
+
+#ifdef PPC64
+  thread_state_count = PPC_EXCEPTION_STATE64_COUNT;
+#else
+  thread_state_count = PPC_EXCEPTION_STATE_COUNT;
+#endif
+  thread_get_state(thread,
+#ifdef PPC64
+                   PPC_EXCEPTION_STATE64,
+#else
+		   PPC_EXCEPTION_STATE,
+#endif
+		   (thread_state_t)&(mc->__es),
+		   &thread_state_count);
+
+
+  UC_MCONTEXT(pseudosigcontext) = mc;
+  stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN);
+  stackp -= C_LINKAGE_LEN;
+  *(natural *)ptr_from_lispobj(stackp) = backlink;
+  if (new_stack_top) {
+    *new_stack_top = stackp;
+  }
+  return pseudosigcontext;
+}
+
+/*
+  This code sets up the user thread so that it executes a "pseudo-signal
+  handler" function when it resumes.  Create a linux sigcontext struct
+  on the thread's stack and pass it as an argument to the pseudo-signal
+  handler.
+
+  Things are set up so that the handler "returns to" pseudo_sigreturn(),
+  which will restore the thread's context.
+
+  If the handler invokes code that throws (or otherwise never sigreturn()'s
+  to the context), that's fine.
+
+  Actually, check that: throw (and variants) may need to be careful and
+  pop the tcr's xframe list until it's younger than any frame being
+  entered.
+*/
+
+int
+setup_signal_frame(mach_port_t thread,
+		   void *handler_address,
+		   int signum,
+                   int code,
+		   TCR *tcr)
+{
+#ifdef PPC64
+  ppc_thread_state64_t ts;
+#else
+  ppc_thread_state_t ts;
+#endif
+  ExceptionInformation *pseudosigcontext;
+  int old_valence = tcr->valence;
+  natural stackp;
+
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
+#endif
+  pseudosigcontext = create_thread_context_frame(thread, &stackp);
+  pseudosigcontext->uc_onstack = 0;
+  pseudosigcontext->uc_sigmask = (sigset_t) 0;
+  tcr->pending_exception_context = pseudosigcontext;
+  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
+  
+
+  /* 
+     It seems like we've created a  sigcontext on the thread's
+     stack.  Set things up so that we call the handler (with appropriate
+     args) when the thread's resumed.
+  */
+
+  ts.__srr0 = (natural) handler_address;
+  ts.__srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK;
+  ts.__r1 = stackp;
+  ts.__r3 = signum;
+  ts.__r4 = (natural)pseudosigcontext;
+  ts.__r5 = (natural)tcr;
+  ts.__r6 = (natural)old_valence;
+  ts.__lr = (natural)pseudo_sigreturn;
+
+
+#ifdef PPC64
+  ts.__r13 = xpGPR(pseudosigcontext,13);
+  thread_set_state(thread,
+                   PPC_THREAD_STATE64,
+                   (thread_state_t)&ts,
+                   PPC_THREAD_STATE64_COUNT);
+#else
+  thread_set_state(thread, 
+		   MACHINE_THREAD_STATE,
+		   (thread_state_t)&ts,
+		   MACHINE_THREAD_STATE_COUNT);
+#endif
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
+#endif
+  return 0;
+}
+
+
+void
+pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence)
+{
+  signal_handler(signum, NULL, context, tcr, old_valence);
+} 
+
+
+int
+thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled)
+{
+#ifdef PPC64
+  ppc_thread_state64_t ts;
+#else
+  ppc_thread_state_t ts;
+#endif
+  mach_msg_type_number_t thread_state_count;
+
+#ifdef PPC64
+  thread_state_count = PPC_THREAD_STATE64_COUNT;
+#else
+  thread_state_count = PPC_THREAD_STATE_COUNT;
+#endif
+  thread_get_state(thread, 
+#ifdef PPC64
+		   PPC_THREAD_STATE64,	/* GPRs, some SPRs  */
+#else
+		   PPC_THREAD_STATE,	/* GPRs, some SPRs  */
+#endif
+		   (thread_state_t)&ts,
+		   &thread_state_count);
+  if (enabled) {
+    ts.__srr1 |= MSR_FE0_FE1_MASK;
+  } else {
+    ts.__srr1 &= ~MSR_FE0_FE1_MASK;
+  }
+  /* 
+     Hack-o-rama warning (isn't it about time for such a warning?):
+     pthread_kill() seems to want to lose the MSR's FE0/FE1 bits.
+     Our handler for lisp's use of pthread_kill() pushes a phony
+     lisp frame on the stack and force the context to resume at
+     the UUO in enable_fp_exceptions(); the "saveLR" field of that
+     lisp frame contains the -real- address that process_interrupt
+     should have returned to, and the fact that it's in a lisp
+     frame should convince the GC to notice that address if it
+     runs in the tiny time window between returning from our
+     interrupt handler and ... here.
+     If the top frame on the stack is a lisp frame, discard it
+     and set ts.srr0 to the saveLR field in that frame.  Otherwise,
+     just adjust ts.srr0 to skip over the UUO.
+  */
+  {
+    lisp_frame *tos = (lisp_frame *)ts.__r1,
+      *next_frame = tos->backlink;
+    
+    if (tos == (next_frame -1)) {
+      ts.__srr0 = tos->savelr;
+      ts.__r1 = (LispObj) next_frame;
+    } else {
+      ts.__srr0 += 4;
+    }
+  }
+  thread_set_state(thread, 
+#ifdef PPC64
+		   PPC_THREAD_STATE64,	/* GPRs, some SPRs  */
+#else
+		   PPC_THREAD_STATE,	/* GPRs, some SPRs  */
+#endif
+		   (thread_state_t)&ts,
+#ifdef PPC64
+                   PPC_THREAD_STATE64_COUNT
+#else
+		   PPC_THREAD_STATE_COUNT
+#endif
+                   );
+
+  return 0;
+}
+
+/*
+  This function runs in the exception handling thread.  It's
+  called (by this precise name) from the library function "exc_server()"
+  when the thread's exception ports are set up.  (exc_server() is called
+  via mach_msg_server(), which is a function that waits for and dispatches
+  on exception messages from the Mach kernel.)
+
+  This checks to see if the exception was caused by a pseudo_sigreturn()
+  UUO; if so, it arranges for the thread to have its state restored
+  from the specified context.
+
+  Otherwise, it tries to map the exception to a signal number and
+  arranges that the thread run a "pseudo signal handler" to handle
+  the exception.
+
+  Some exceptions could and should be handled here directly.
+*/
+
+kern_return_t
+catch_exception_raise(mach_port_t exception_port,
+		      mach_port_t thread,
+		      mach_port_t task, 
+		      exception_type_t exception,
+		      exception_data_t code_vector,
+		      mach_msg_type_number_t code_count)
+{
+  int signum = 0, code = *code_vector, code1;
+  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
+  kern_return_t kret;
+
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
+#endif
+
+  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
+    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
+  } 
+  if ((exception == EXC_BAD_INSTRUCTION) &&
+      (code_vector[0] == EXC_PPC_UNIPL_INST) &&
+      (((code1 = code_vector[1]) == (int)pseudo_sigreturn) ||
+       (code1 == (int)enable_fp_exceptions) ||
+       (code1 == (int)disable_fp_exceptions))) {
+    if (code1 == (int)pseudo_sigreturn) {
+      kret = do_pseudo_sigreturn(thread, tcr);
+#if 0
+      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
+#endif
+        
+    } else if (code1 == (int)enable_fp_exceptions) {
+      kret = thread_set_fp_exceptions_enabled(thread, true);
+    } else kret =  thread_set_fp_exceptions_enabled(thread, false);
+  } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
+    CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
+    kret = 17;
+  } else {
+    switch (exception) {
+    case EXC_BAD_ACCESS:
+      signum = SIGSEGV;
+      break;
+        
+    case EXC_BAD_INSTRUCTION:
+      signum = SIGILL;
+      break;
+      
+    case EXC_SOFTWARE:
+      if (code == EXC_PPC_TRAP) {
+        signum = SIGTRAP;
+      }
+      break;
+      
+    case EXC_ARITHMETIC:
+      signum = SIGFPE;
+      break;
+
+    default:
+      break;
+    }
+    if (signum) {
+      kret = setup_signal_frame(thread,
+                                (void *)pseudo_signal_handler,
+                                signum,
+                                code,
+                                tcr);
+#if 0
+      fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
+#endif
+
+    } else {
+      kret = 17;
+    }
+  }
+
+  return kret;
+}
+
+
+
+typedef struct {
+  mach_msg_header_t Head;
+  /* start of the kernel processed data */
+  mach_msg_body_t msgh_body;
+  mach_msg_port_descriptor_t thread;
+  mach_msg_port_descriptor_t task;
+  /* end of the kernel processed data */
+  NDR_record_t NDR;
+  exception_type_t exception;
+  mach_msg_type_number_t codeCnt;
+  integer_t code[2];
+  mach_msg_trailer_t trailer;
+} exceptionRequest;
+
+
+boolean_t
+openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out)
+{
+  static NDR_record_t _NDR = {0};
+  kern_return_t handled;
+  mig_reply_error_t *reply = (mig_reply_error_t *) out;
+  exceptionRequest *req = (exceptionRequest *) in;
+
+  reply->NDR = _NDR;
+
+  out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK;
+  out->msgh_remote_port = in->msgh_remote_port;
+  out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned));
+  out->msgh_local_port = MACH_PORT_NULL;
+  out->msgh_id = in->msgh_id+100;
+
+  /* Could handle other exception flavors in the range 2401-2403 */
+
+
+  if (in->msgh_id != 2401) {
+    reply->RetCode = MIG_BAD_ID;
+    return FALSE;
+  }
+  handled = catch_exception_raise(req->Head.msgh_local_port,
+                                  req->thread.name,
+                                  req->task.name,
+                                  req->exception,
+                                  req->code,
+                                  req->codeCnt);
+  reply->RetCode = handled;
+  return TRUE;
+}
+
+/*
+  The initial function for an exception-handling thread.
+*/
+
+void *
+exception_handler_proc(void *arg)
+{
+  extern boolean_t exc_server();
+  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
+
+  mach_msg_server(openmcl_exc_server, 2048, p, 0);
+  /* Should never return. */
+  abort();
+}
+
+
+
+mach_port_t
+mach_exception_port_set()
+{
+  static mach_port_t __exception_port_set = MACH_PORT_NULL;
+  kern_return_t kret;  
+  if (__exception_port_set == MACH_PORT_NULL) {
+    kret = mach_port_allocate(mach_task_self(),
+			      MACH_PORT_RIGHT_PORT_SET,
+			      &__exception_port_set);
+    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
+    create_system_thread(0,
+                         NULL,
+                         exception_handler_proc, 
+                         (void *)((natural)__exception_port_set));
+  }
+  return __exception_port_set;
+}
+
+/*
+  Setup a new thread to handle those exceptions specified by
+  the mask "which".  This involves creating a special Mach
+  message port, telling the Mach kernel to send exception
+  messages for the calling thread to that port, and setting
+  up a handler thread which listens for and responds to
+  those messages.
+
+*/
+
+/*
+  Establish the lisp thread's TCR as its exception port, and determine
+  whether any other ports have been established by foreign code for
+  exceptions that lisp cares about.
+
+  If this happens at all, it should happen on return from foreign
+  code and on entry to lisp code via a callback.
+
+  This is a lot of trouble (and overhead) to support Java, or other
+  embeddable systems that clobber their caller's thread exception ports.
+  
+*/
+kern_return_t
+tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
+{
+  kern_return_t kret;
+  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
+  int i;
+  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
+  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
+  exception_mask_t mask = 0;
+
+  kret = thread_swap_exception_ports(thread,
+				     LISP_EXCEPTIONS_HANDLED_MASK,
+				     lisp_port,
+				     EXCEPTION_DEFAULT,
+				     THREAD_STATE_NONE,
+				     fxs->masks,
+				     &n,
+				     fxs->ports,
+				     fxs->behaviors,
+				     fxs->flavors);
+  if (kret == KERN_SUCCESS) {
+    fxs->foreign_exception_port_count = n;
+    for (i = 0; i < n; i ++) {
+      foreign_port = fxs->ports[i];
+
+      if ((foreign_port != lisp_port) &&
+	  (foreign_port != MACH_PORT_NULL)) {
+	mask |= fxs->masks[i];
+      }
+    }
+    tcr->foreign_exception_status = (int) mask;
+  }
+  return kret;
+}
+
+kern_return_t
+tcr_establish_lisp_exception_port(TCR *tcr)
+{
+  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
+}
+
+/*
+  Do this when calling out to or returning from foreign code, if
+  any conflicting foreign exception ports were established when we
+  last entered lisp code.
+*/
+kern_return_t
+restore_foreign_exception_ports(TCR *tcr)
+{
+  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
+  
+  if (m) {
+    MACH_foreign_exception_state *fxs  = 
+      (MACH_foreign_exception_state *) tcr->native_thread_info;
+    int i, n = fxs->foreign_exception_port_count;
+    exception_mask_t tm;
+
+    for (i = 0; i < n; i++) {
+      if ((tm = fxs->masks[i]) & m) {
+	thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
+				   tm,
+				   fxs->ports[i],
+				   fxs->behaviors[i],
+				   fxs->flavors[i]);
+      }
+    }
+  }
+}
+				   
+
+/*
+  This assumes that a Mach port (to be used as the thread's exception port) whose
+  "name" matches the TCR's 32-bit address has already been allocated.
+*/
+
+kern_return_t
+setup_mach_exception_handling(TCR *tcr)
+{
+  mach_port_t 
+    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
+    task_self = mach_task_self();
+  kern_return_t kret;
+
+  kret = mach_port_insert_right(task_self,
+				thread_exception_port,
+				thread_exception_port,
+				MACH_MSG_TYPE_MAKE_SEND);
+  MACH_CHECK_ERROR("adding send right to exception_port",kret);
+
+  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
+  if (kret == KERN_SUCCESS) {
+    mach_port_t exception_port_set = mach_exception_port_set();
+
+    kret = mach_port_move_member(task_self,
+				 thread_exception_port,
+				 exception_port_set);
+  }
+  return kret;
+}
+
+void
+darwin_exception_init(TCR *tcr)
+{
+  void tcr_monitor_exception_handling(TCR*, Boolean);
+  kern_return_t kret;
+  MACH_foreign_exception_state *fxs = 
+    calloc(1, sizeof(MACH_foreign_exception_state));
+  
+  tcr->native_thread_info = (void *) fxs;
+
+  if ((kret = setup_mach_exception_handling(tcr))
+      != KERN_SUCCESS) {
+    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
+    terminate_lisp();
+  }
+}
+
+/*
+  The tcr is the "name" of the corresponding thread's exception port.
+  Destroying the port should remove it from all port sets of which it's
+  a member (notably, the exception port set.)
+*/
+void
+darwin_exception_cleanup(TCR *tcr)
+{
+  void *fxs = tcr->native_thread_info;
+  extern Boolean use_mach_exception_handling;
+
+  if (fxs) {
+    tcr->native_thread_info = NULL;
+    free(fxs);
+  }
+  if (use_mach_exception_handling) {
+    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
+    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
+  }
+}
+
+
+Boolean
+suspend_mach_thread(mach_port_t mach_thread)
+{
+  kern_return_t status;
+  Boolean aborted = false;
+  
+  do {
+    aborted = false;
+    status = thread_suspend(mach_thread);
+    if (status == KERN_SUCCESS) {
+      status = thread_abort_safely(mach_thread);
+      if (status == KERN_SUCCESS) {
+        aborted = true;
+      } else {
+        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
+        thread_resume(mach_thread);
+      }
+    } else {
+      return false;
+    }
+  } while (! aborted);
+  return true;
+}
+
+/*
+  Only do this if pthread_kill indicated that the pthread isn't
+  listening to signals anymore, as can happen as soon as pthread_exit()
+  is called on Darwin.  The thread could still call out to lisp as it
+  is exiting, so we need another way to suspend it in this case.
+*/
+Boolean
+mach_suspend_tcr(TCR *tcr)
+{
+  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
+  ExceptionInformation *pseudosigcontext;
+  Boolean result = false;
+  
+  result = suspend_mach_thread(mach_thread);
+  if (result) {
+    pseudosigcontext = create_thread_context_frame(mach_thread, NULL);
+    pseudosigcontext->uc_onstack = 0;
+    pseudosigcontext->uc_sigmask = (sigset_t) 0;
+    tcr->suspend_context = pseudosigcontext;
+  }
+  return result;
+}
+
+void
+mach_resume_tcr(TCR *tcr)
+{
+  ExceptionInformation *xp;
+  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
+  
+  xp = tcr->suspend_context;
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
+          tcr, tcr->pending_exception_context);
+#endif
+  tcr->suspend_context = NULL;
+  restore_mach_thread_state(mach_thread, xp);
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
+          tcr, tcr->pending_exception_context);
+#endif
+  thread_resume(mach_thread);
+}
+
+void
+fatal_mach_error(char *format, ...)
+{
+  va_list args;
+  char s[512];
+ 
+
+  va_start(args, format);
+  vsnprintf(s, sizeof(s),format, args);
+  va_end(args);
+
+  Fatal("Mach error", s);
+}
+
+void
+pseudo_interrupt_handler(int signum, ExceptionInformation *context)
+{
+  interrupt_handler(signum, NULL, context);
+}
+
+int
+mach_raise_thread_interrupt(TCR *target)
+{
+  mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id));
+  kern_return_t kret;
+  Boolean result = false;
+  TCR *current = get_tcr(false);
+  thread_basic_info_data_t info; 
+  mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
+
+  LOCK(lisp_global(TCR_AREA_LOCK), current);
+
+  if (suspend_mach_thread(mach_thread)) {
+    if (thread_info(mach_thread,
+                    THREAD_BASIC_INFO,
+                    (thread_info_t)&info,
+                    &info_count) == KERN_SUCCESS) {
+      if (info.suspend_count == 1) {
+        if ((target->valence == TCR_STATE_LISP) &&
+            (!target->unwinding) &&
+            (TCR_INTERRUPT_LEVEL(target) >= 0)) {
+          kret = setup_signal_frame(mach_thread,
+                                    (void *)pseudo_interrupt_handler,
+                                    SIGNAL_FOR_PROCESS_INTERRUPT,
+                                    0,
+                                    target);
+          if (kret == KERN_SUCCESS) {
+            result = true;
+          }
+        }
+      }
+    }
+    if (! result) {
+      target->interrupt_pending = 1 << fixnumshift;
+    }
+    thread_resume(mach_thread);
+    
+  }
+  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
+  return 0;
+}
+
+#endif
Index: /branches/arm/lisp-kernel/ppc-exceptions.h
===================================================================
--- /branches/arm/lisp-kernel/ppc-exceptions.h	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-exceptions.h	(revision 13357)
@@ -0,0 +1,440 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+#define UUO_MASK 0xfc00000f
+
+#define IS_UUO(i) (((i) & UUO_MASK) == 0xb)
+/* If an instruction is a UUO, the minor opcode is in bits 21:27 */
+#define UUO_MINOR(u) (((u) >> 4) & 0x7f)
+
+typedef u_int32_t opcode, *pc;
+
+OSStatus
+handle_uuo(ExceptionInformation *, opcode, pc);
+
+
+
+#ifdef LINUX
+/*
+  Different (recent) versions of glibc disagree about how
+  a ucontext is laid out (and about what an mcontext is.)
+  There's something like a pointer to a pt_regs structure
+  in the 12th word in both cases.  (Yes, this is an extremely
+  ugly hack; it would be better to conditionalize on the values
+  of GLIBC_VERSION/GLIBC_MINOR , but the discrepancy exists
+  in various flavors of glibc 2.3.)
+*/
+#ifdef PPC64
+#define XP_PTREGS(x) ((x)->uc_mcontext.regs)
+#define xpGPRvector(x) ((natural *)(XP_PTREGS(x)))
+#else
+#define XP_PTREGS(x) (((struct pt_regs **)(x))[12])
+#define xpGPRvector(x) (XP_PTREGS(x)->gpr)
+#endif
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define set_xpGPR(x,gpr,new) xpGPR((x),(gpr)) = (natural)(new)
+#define xpPC(x) (*((pc*)(&(XP_PTREGS(x)->nip))))
+#define set_xpPC(x,new) (xpPC(x) = (pc)(new))
+#define xpLR(x) (*((pc*)(&(XP_PTREGS(x)->link))))
+#define xpCTR(x) (*(pc*)(&(XP_PTREGS(x)->ctr)))
+#define xpXER(x) (XP_PTREGS(x)->xer)
+#define xpCCR(x) (XP_PTREGS(x)->ccr)
+#define xpMSR(x) (XP_PTREGS(x)->msr)
+#define xpDSISR(x) (XP_PTREGS(x)->dsisr)
+#define xpDAR(x) (XP_PTREGS(x)->dar)
+#define xpTRAP(x) (XP_PTREGS(x)->trap)
+#define xpFPSCR(x) (XP_PTREGS(x)->gpr[PT_FPSCR])
+#define xpFPRvector(x) ((double *)(&(XP_PTREGS(x)->gpr[PT_FPR0])))
+#define xpFPR(x,fprno) (xpFPRvector(x)[fprno])
+
+/* 
+   Work around a Darwin G5 bug (present in OSX 10.2.7, 10.2.8, and later
+   versions.  See below for details.
+*/
+#define DarwinSigReturn(context)
+#define SIGRETURN(context)
+#endif
+
+#ifdef DARWIN
+#define xpGPRvector(x) (&(UC_MCONTEXT(x)->__ss.__r0))
+#define xpGPR(x,gprno) ((xpGPRvector(x))[gprno])
+#define set_xpGPR(x,gpr,new) xpGPR((x),(gpr)) = (UInt32)(new)
+#define xpPC(x) (*((pc*) &(UC_MCONTEXT(x)->__ss.__srr0)))
+#define set_xpPC(x,new) (xpPC(x) = (pc)(new))
+#define xpLR(x) (*((pc*)(&(UC_MCONTEXT(x)->__ss.__lr))))
+#define xpCTR(x) (*(pc*)(&(UC_MCONTEXT(x)->__ss.__ctr)))
+#define xpXER(x) (UC_MCONTEXT(x)->__ss.__xer)
+#define xpCCR(x) (UC_MCONTEXT(x)->__ss.__cr)
+#define xpMSR(x) (UC_MCONTEXT(x)->__ss.__srr1)
+#define xpDSISR(x) (UC_MCONTEXT(x)->__es.__dsisr)
+#define xpDAR(x) (UC_MCONTEXT(x)->__es.__dar)
+#define xpTRAP(x) (UC_MCONTEXT(x)->__es.__exception)
+#define xpFPSCR(x) (UC_MCONTEXT(x)->__fs.__fpscr)
+#define xpFPRvector(x) (UC_MCONTEXT(x)->__fs.__fpregs)
+#define xpFPR(x,fprno) (xpFPRvector(x)[fprno])
+/* There's a bug in some versions of Darwin on G5 processors: FP context
+   isn't restored correctly on exit from a signal handler if the integer
+   context appears to be unmodified (the 64-bit context isn't set up
+   correctly by the kernel: only the first N bytes are copied out of
+   the kernel, where N = size of 32-bit context.
+
+   If the kernel pushed both a 32-bit and 64-bit context, the C
+   runtime "signal trampoline" code tries to determine if the 32-bit
+   GPRs and user-visible SPRs in the 32-bit context contain the same
+   values as their 64-bit counterparts on exit; if so, it tries to
+   call sigreturn with an extra argument that indicates that the
+   thread's state should be restored from the 64-bit context.
+   (Apparently that's more efficient; it'd be surprising if it'd be
+   more efficent when the cost of comparing values in the two contexts
+   is factored in ...).  On some OS releases, the 64-bit context can't
+   be reliably restored (FPRs get trashed.)
+
+   One way to work around this is to use a deprecated, 32-bit-context-only
+   version of the sigreturn syscall.  There seems to be reason to be
+   reason to believe that the old sigreturn syscall will disappear
+   on OS releases >10.3.
+
+   Another way to work around this is to make a "harmless" change to
+   an SPR or GPR value in the 32-bit context.  There are lots of
+   "reserved" bits in the XER that make good candidates: 1's written
+   to reserved XER bits can't be reliably read anyway, so this may
+   or may not actually change the value in the XER in a way that
+   can be reliably detected.
+
+   Note that both the old, deprecated version of sigreturn and the
+   new version take a first argument of type "struct ucontext *",
+   not "struct sigcontext *" as the man page and header files claim.
+   The new version takes a second argument, which is a small integer
+   which defines what "flavor" of context should be restored from.
+   The meaningful values that can be used here aren't defined in
+   a header file; the kernel (and the libc _sigtramp() function)
+   have (hopefully) matching, redundant hardwired definitions in
+   the source.
+*/
+#ifdef PPC64
+#define DarwinSigReturn(x)
+#else
+#define DarwinSigReturn(x) (UC_MCONTEXT(x)->__ss.__xer)^=0x80
+#endif
+#define SIGRETURN(context) DarwinSigReturn(context)
+#endif
+
+
+
+
+
+
+/* 
+  Unconditional traps (tw, twi instructions) are used by the
+  operating system.  We use conditional traps.
+  */
+
+int
+is_conditional_trap(opcode);
+
+#define kNameBufLen 256
+#define TRAP_LOOKUP_TRIES 5   /* # instrs to scan before trap instr */
+
+void
+callback_for_trap (LispObj, ExceptionInformation *, pc, natural, natural, natural);
+
+natural
+register_codevector_contains_pc (natural, pc);
+
+void
+callback_to_lisp (LispObj, ExceptionInformation *, natural, natural, natural, natural, natural);
+
+OSStatus
+handle_trap(ExceptionInformation *, opcode, pc, siginfo_t *);
+
+unsigned
+scan_for_instr( unsigned, unsigned, pc );
+
+
+
+#define UUO_INTERR (11)
+#define UUO_INTCERR (12)
+#define UUO_INTERR2 (13)
+#define UUO_INTCERR2 (14)
+
+#define UUO_FPUX_BINOP (22)
+#define UUO_ZERO_FPSCR (25)
+
+
+/* PPC instructions */
+#define match_instr(instr, mask, target)   (((instr) & (mask)) == (target))
+#define RS_field(instr)  (((instr) >> 21) & 0x1f)
+#define RT_field(instr)  (RS_field(instr))
+#define TO_field(instr)  (RT_field(instr))
+#define RA_field(instr)  (((instr) >> 16) & 0x1f)
+#define RB_field(instr)  (((instr) >> 11) & 0x1f)
+#define D_field(instr)   ((instr) & 0xffff)
+#define DS_field(instr)  ((instr) & 0xfffc)
+#define DS_VARIANT_FIELD(instr) ((instr) & 3)
+
+#define RT(val) ((val & 0x1f) << 21)
+#define RS(val) (RT(val))
+#define RA(val) ((val & 0x1f) << 16)
+#define RB(val) ((val & 0x1f) << 11)
+#define D(val) (val & 0xffff)
+
+#define RS_MASK RS(-1)
+#define RT_MASK RS_MASK
+#define TO_MASK RS_MASK
+#define RA_MASK RA(-1)
+#define RB_MASK RB(-1)
+#define D_MASK  D(-1)
+
+
+
+#define OP(x) (((x) & 0x3f) << 26)
+#define OP_MASK OP (0x3f)
+
+/* Main opcode + TO field of a D form instruction */
+#define OPTO(x,to) (OP(x) | (((to) & 0x1f) << 21))
+#define OPTO_MASK (OP_MASK | TO_MASK)
+#define OPTORA(x,to,ra) (OPTO(x,to) | RA(ra))
+#define OPTORA_MASK (OP_TO_MASK | RA_MASK)
+
+
+
+
+/* An X form instruction.  */
+#define X(op, xop) (OP (op) | (((xop) & 0x3ff) << 1))
+
+/* An X form instruction with the RC bit specified.  */
+#define XRC(op, xop, rc) (X ((op), (xop)) | ((rc) & 1))
+
+/* The mask for an X form instruction.  */
+#define X_MASK XRC(0x3f, 0x3ff, 1)
+
+/* An XO form instruction */
+#define XO(op, xop, oe, rc) \
+  (OP (op) | ((((unsigned long)(xop)) & 0x1ff) << 1) | ((((unsigned long)(oe)) & 1) << 10) | (((unsigned long)(rc)) & 1))
+#define XO_MASK XO (0x3f, 0x1ff, 1, 1)
+
+
+
+/* The bits in the TO field of a TW or TWI instruction */
+#define TO_LT (1<<4)		/* signed < */
+#define TO_GT (1<<3)		/* signed > */
+#define TO_EQ (1<<2)		/* = */
+#define TO_LO (1<<1)		/* unsigned < */
+#define TO_HI (1<<0)		/* unsigned > */
+#define TO_NE (TO_LT|TO_GT)
+
+/* True if major opcode of "instr" is "op" */
+#define major_opcode_p(instr, op) match_instr((instr),OP_MASK,OP(op))
+
+/* True if "instr" is an X form instruction with major opcode "major"
+   and minor opcode "minor" */
+#define X_opcode_p(instr,major,minor) match_instr((instr),X_MASK,X(major,minor))
+
+#define major_opcode_TDI 2
+#define major_opcode_TWI 3
+#ifdef PPC64
+#define major_opcode_TRI major_opcode_TDI
+#else
+#define major_opcode_TRI major_opcode_TWI
+#endif
+#define major_opcode_ADDI 14
+#define major_opcode_RLWINM 21
+#define major_opcode_X31 31		/* an "X" form instruction; see minor opcode */
+#define major_opcode_LWZ 32
+#define major_opcode_LBZ 34
+#define major_opcode_STW 36
+#define major_opcode_STWU 37
+#define major_opcode_LD_LDU_LWA 58
+#define major_opcode_FPU_SINGLE 59
+#define major_opcode_FPU_DOUBLE 63
+
+#define minor_opcode_TW 4
+#define minor_opcode_TD 68
+#ifdef PPC64
+#define minor_opcode_TR minor_opcode_TD
+#else
+#define minor_opcode_TR minor_opcode_TW
+#endif
+#define minor_opcode_SUBF 40
+#define minor_opcode_STWX 151
+#define minor_opcode_STWUX 183
+
+#define major_opcode_DS_LOAD64 58
+#define DS_LOAD64_VARIANT_LD 0
+
+#define major_opcode_DS_STORE64 62
+#define DS_STORE64_VARIANT_STD 0
+
+
+
+#define D_instruction(major,rt,ra,imm) (OP(major)|((rt)<<21)|((ra)<<16)|((imm)&D_MASK))
+#define DS_instruction(major,rt,ra,imm,minor) (OP(major)|((rt)<<21)|((ra)<<16)|(((imm)&D_MASK)&~3)|((minor)&3))
+#define TRI_instruction(rt,ra,imm)     D_instruction(major_opcode_TRI,rt,ra,imm)
+#define LBZ_instruction(rt,ra,imm)     D_instruction(major_opcode_LBZ,rt,ra,imm)
+#define LWZ_instruction(rt,ra,imm)     D_instruction(major_opcode_LWZ,rt,ra,imm)
+#define LD_instruction(rt,ra,imm)      DS_instruction(58,rt,ra,imm,0)
+
+#define D_RT_IMM_MASK                  (OP_MASK|RT_MASK|D_MASK)
+#define D_RA_IMM_MASK                  (OP_MASK|RA_MASK|D_MASK)
+
+#define X_instruction(major,minor,rt,ra,rb) (X(major,minor)|((rt)<<21)|((ra)<<16)|((rb)<<11))
+
+#define unmasked_register              0
+
+#define LISP_BREAK_INSTRUCTION 0x7f810808
+#define QUIET_LISP_BREAK_INSTRUCTION 0x7c800008
+
+#ifdef PPC64
+/* Have to use signed comparisons on PPC64; if we decrememt
+   allocptr and it "wraps around" address 0, that's an 
+   attempt to allocate a large object.  Note that this
+   means that valid heap addresses can't have the high
+   bit set. */
+/* tdlt allocptr,allocbase */
+#define ALLOC_TRAP_INSTRUCTION 0x7e095088
+#else
+/* On PPC32, we can use an unsigned comparison, as long
+   as  HEAP_IMAGE_BASE+PURESPACE_RESERVE is greater than
+   the maximum possible allocation (around 27 bits).
+   Decrementing allocptr may cause it to wrap around
+   #x80000000, but it should never wrap around 0. */
+/* twllt allocptr,allocbase */
+#define ALLOC_TRAP_INSTRUCTION 0x7c495008
+#endif
+
+#ifdef PPC64
+/* tdlgei allocptr,0 */
+#define GC_TRAP_INSTRUCTION 0x08a90000
+#else
+/* twlgei allocptr,0 */
+#define GC_TRAP_INSTRUCTION 0x0ca90000
+#endif
+
+#ifdef PPC64
+/* clrrdi allocptr,allocptr,4 */
+#define UNTAG_ALLOCPTR_INSTRUCTION 0x792906e4
+#else
+/* clrrwi allocptr,allocptr,3 */
+#define UNTAG_ALLOCPTR_INSTRUCTION 0x55290038
+#endif
+
+#ifdef PPC64
+/* std rX,misc_header_offset(allocptr) */
+#define STORE_HEADER_ALLOCPTR_INSTRUCTION 0xf809fff4
+#else
+/* stw rX,misc_header_offset(allocptr) */
+#define STORE_HEADER_ALLOCPTR_INSTRUCTION 0x9009fffa
+#endif
+#define STORE_HEADER_ALLOCPTR_MASK D_RA_IMM_MASK
+
+#ifdef PPC64
+/* std rX,cons.cXr(allocptr) */
+#define STORE_CAR_ALLOCPTR_INSTRUCTION 0xf8090004
+#define STORE_CDR_ALLOCPTR_INSTRUCTION 0xf809fffc
+#else
+/* stw rX,cons.cXr(allocptr) */
+#define STORE_CAR_ALLOCPTR_INSTRUCTION 0x90090003
+#define STORE_CDR_ALLOCPTR_INSTRUCTION 0x9009ffff
+#endif
+#define STORE_CXR_ALLOCPTR_MASK D_RA_IMM_MASK
+
+
+#ifdef PPC64
+/* stdu sp,-32(sp) */
+#define CREATE_LISP_FRAME_INSTRUCTION 0xf821ffe1
+#else
+/* stwu sp,-16(sp) */
+#define CREATE_LISP_FRAME_INSTRUCTION 0x9421fff0
+#endif
+
+#ifdef PPC64
+/* std tsp,tsp_frame.type(tsp) */
+#define MARK_TSP_FRAME_INSTRUCTION 0xf98c0008
+#else
+/* stw tsp,tsp_frame.type(tsp) */
+#define MARK_TSP_FRAME_INSTRUCTION 0x918c0004
+#endif
+
+#ifdef PPC64
+#define INIT_CATCH_FRAME_INSTRUCTION (0xf8000000 | RA(nargs))
+#define INIT_CATCH_FRAME_MASK (OP_MASK | RA_MASK)
+#else
+#define INIT_CATCH_FRAME_INSTRUCTION (0x90000000 | RA(nargs))
+#define INIT_CATCH_FRAME_MASK (OP_MASK | RA_MASK)
+#endif
+
+OSStatus
+handle_error(ExceptionInformation *, unsigned, unsigned, unsigned, pc);
+
+typedef char* vector_buf;
+
+void put_altivec_registers(vector_buf);
+void get_altivec_registers(vector_buf);
+
+
+int altivec_available;
+
+#ifdef DARWIN
+#include <mach/mach.h>
+#include <mach/mach_error.h>
+#include <mach/machine/thread_state.h>
+#include <mach/machine/thread_status.h>
+
+#endif
+
+/* Yet another way to look at a branch instruction ... */
+typedef union {
+  struct {unsigned op:6, li:24, aa:1, lk:1;} b;
+  unsigned opcode;
+} branch_instruction;
+
+
+
+  /* Enable exceptions (at least, enable another thread's attempts to
+     suspend this one) by restoring the signal mask.
+  */
+
+
+
+#ifdef DARWIN
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGUSR1
+#endif
+#ifdef LINUX
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGPWR
+#endif
+
+
+#ifdef LINUX
+register void *current_r2 __asm__("r2");
+#endif
+
+Boolean
+extend_tcr_tlb(TCR *, ExceptionInformation *, unsigned, unsigned);
+
+void 
+pc_luser_xp(ExceptionInformation *, TCR *, signed_natural *);
+
+
+#ifdef PPC64
+#define codevec_hdr_p(value) ((value) == (('C'<<24)|('O'<<16)|('D'<<8)|'E'))
+#else
+/* top 6 bits will be zero, subtag will be subtag_code_vector */
+#define CV_HDR_MASK     (OP_MASK | subtagmask)
+#define CV_HDR_VALUE    subtag_code_vector
+#define codevec_hdr_p(value)	(((value) & CV_HDR_MASK) == CV_HDR_VALUE)
+#endif
+
+
Index: /branches/arm/lisp-kernel/ppc-gc.c
===================================================================
--- /branches/arm/lisp-kernel/ppc-gc.c	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-gc.c	(revision 13357)
@@ -0,0 +1,2410 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp_globals.h"
+#include "bits.h"
+#include "gc.h"
+#include "area.h"
+#include "Threads.h"
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+
+/* Heap sanity checking. */
+
+void
+check_node(LispObj n)
+{
+  int tag = fulltag_of(n), header_tag;
+  area *a;
+  LispObj header;
+
+  switch (tag) {
+  case fulltag_even_fixnum:
+  case fulltag_odd_fixnum:
+
+
+#ifdef PPC64
+  case fulltag_imm_0:
+  case fulltag_imm_1:
+  case fulltag_imm_2:
+  case fulltag_imm_3:
+#else
+  case fulltag_imm:
+#endif
+
+
+    return;
+
+#ifndef PPC64
+  case fulltag_nil:
+    if (n != lisp_nil) {
+      Bug(NULL,"Object tagged as nil, not nil : 0x%08x", n);
+    }
+    return;
+#endif
+
+
+#ifdef PPC64
+  case fulltag_nodeheader_0: 
+  case fulltag_nodeheader_1: 
+  case fulltag_nodeheader_2: 
+  case fulltag_nodeheader_3: 
+  case fulltag_immheader_0: 
+  case fulltag_immheader_1: 
+  case fulltag_immheader_2: 
+  case fulltag_immheader_3: 
+#else
+  case fulltag_nodeheader:
+  case fulltag_immheader:
+#endif
+
+
+    Bug(NULL, "Header not expected : 0x%lx", n);
+    return;
+
+  case fulltag_misc:
+  case fulltag_cons:
+    a = heap_area_containing((BytePtr)ptr_from_lispobj(n));
+    
+    if (a == NULL) {
+      /* Can't do as much sanity checking as we'd like to
+         if object is a defunct stack-consed object.
+         If a dangling reference to the heap, that's
+         bad .. */
+      a = active_dynamic_area;
+      if ((n > (ptr_to_lispobj(a->active))) &&
+          (n < (ptr_to_lispobj(a->high)))) {
+        Bug(NULL, "Node points to heap free space: 0x%lx", n);
+      }
+      return;
+    }
+    break;
+  }
+  /* Node points to heap area, so check header/lack thereof. */
+  header = header_of(n);
+  header_tag = fulltag_of(header);
+  if (tag == fulltag_cons) {
+    if ((nodeheader_tag_p(header_tag)) ||
+        (immheader_tag_p(header_tag))) {
+      Bug(NULL, "Cons cell at 0x%lx has bogus header : 0x%lx", n, header);
+    }
+    return;
+  }
+
+  if ((!nodeheader_tag_p(header_tag)) &&
+      (!immheader_tag_p(header_tag))) {
+    Bug(NULL,"Vector at 0x%lx has bogus header : 0x%lx", n, header);
+  }
+  return;
+}
+
+
+
+
+void
+check_range(LispObj *start, LispObj *end, Boolean header_allowed)
+{
+  LispObj node, *current = start, *prev = NULL;
+  int tag;
+  natural elements;
+
+  while (current < end) {
+    prev = current;
+    node = *current++;
+    tag = fulltag_of(node);
+    if (immheader_tag_p(tag)) {
+      if (! header_allowed) {
+        Bug(NULL, "Header not expected at 0x%lx\n", prev);
+      }
+      current = (LispObj *)skip_over_ivector((natural)prev, node);
+    } else if (nodeheader_tag_p(tag)) {
+      if (! header_allowed) {
+        Bug(NULL, "Header not expected at 0x%lx\n", prev);
+      }
+      elements = header_element_count(node) | 1;
+      while (elements--) {
+        check_node(*current++);
+      }
+    } else {
+      check_node(node);
+      check_node(*current++);
+    }
+  }
+
+  if (current != end) {
+    Bug(NULL, "Overran end of memory range: start = 0x%08x, end = 0x%08x, prev = 0x%08x, current = 0x%08x",
+        start, end, prev, current);
+  }
+}
+
+void
+check_all_areas(TCR *tcr)
+{
+  area *a = active_dynamic_area;
+  area_code code = a->code;
+
+  while (code != AREA_VOID) {
+    switch (code) {
+    case AREA_DYNAMIC:
+    case AREA_STATIC:
+    case AREA_MANAGED_STATIC:
+      check_range((LispObj *)a->low, (LispObj *)a->active, true);
+      break;
+
+    case AREA_VSTACK:
+      {
+        LispObj* low = (LispObj *)a->active;
+        LispObj* high = (LispObj *)a->high;
+        
+        if (((natural)low) & node_size) {
+          check_node(*low++);
+        }
+        check_range(low, high, false);
+      }
+      break;
+
+    case AREA_TSTACK:
+      {
+        LispObj *current, *next,
+                *start = (LispObj *) a->active,
+                *end = start,
+                *limit = (LispObj *) a->high;
+                 
+        for (current = start;
+             end != limit;
+             current = next) {
+          next = ptr_from_lispobj(*current);
+          end = ((next >= start) && (next < limit)) ? next : limit;
+          if (current[1] == 0) {
+            check_range(current+2, end, true);
+          }
+        }
+      }
+      break;
+    }
+    a = a->succ;
+    code = (a->code);
+  }
+}
+
+
+
+
+
+
+
+
+
+
+
+/* Sooner or later, this probably wants to be in assembler */
+void
+mark_root(LispObj n)
+{
+  int tag_n = fulltag_of(n);
+  natural dnode, bits, *bitsp, mask;
+
+  if (!is_node_fulltag(tag_n)) {
+    return;
+  }
+
+  dnode = gc_area_dnode(n);
+  if (dnode >= GCndnodes_in_area) {
+    return;
+  }
+  set_bits_vars(GCmarkbits,dnode,bitsp,bits,mask);
+  if (bits & mask) {
+    return;
+  }
+  *bitsp = (bits | mask);
+
+  if (tag_n == fulltag_cons) {
+    cons *c = (cons *) ptr_from_lispobj(untag(n));
+    rmark(c->car);
+    rmark(c->cdr);
+    return;
+  }
+  {
+    LispObj *base = (LispObj *) ptr_from_lispobj(untag(n));
+    natural
+      header = *((natural *) base),
+      subtag = header_subtag(header),
+      element_count = header_element_count(header),
+      total_size_in_bytes,      /* including 4/8-byte header */
+      suffix_dnodes;
+    tag_n = fulltag_of(header);
+
+
+#ifdef PPC64
+    if ((nodeheader_tag_p(tag_n)) ||
+        (tag_n == ivector_class_64_bit)) {
+      total_size_in_bytes = 8 + (element_count<<3);
+    } else if (tag_n == ivector_class_8_bit) {
+      total_size_in_bytes = 8 + element_count;
+    } else if (tag_n == ivector_class_32_bit) {
+      total_size_in_bytes = 8 + (element_count<<2);
+    } else {
+      /* ivector_class_other_bit contains 16-bit arrays & bitvector */
+      if (subtag == subtag_bit_vector) {
+        total_size_in_bytes = 8 + ((element_count+7)>>3);
+      } else {
+        total_size_in_bytes = 8 + (element_count<<1);
+      }
+    }
+#else
+    if ((tag_n == fulltag_nodeheader) ||
+        (subtag <= max_32_bit_ivector_subtag)) {
+      total_size_in_bytes = 4 + (element_count<<2);
+    } else if (subtag <= max_8_bit_ivector_subtag) {
+      total_size_in_bytes = 4 + element_count;
+    } else if (subtag <= max_16_bit_ivector_subtag) {
+      total_size_in_bytes = 4 + (element_count<<1);
+    } else if (subtag == subtag_double_float_vector) {
+      total_size_in_bytes = 8 + (element_count<<3);
+    } else {
+      total_size_in_bytes = 4 + ((element_count+7)>>3);
+    }
+#endif
+
+
+
+    suffix_dnodes = ((total_size_in_bytes+(dnode_size-1))>>dnode_shift) -1;
+
+    if (suffix_dnodes) {
+      set_n_bits(GCmarkbits, dnode+1, suffix_dnodes);
+    }
+
+    if (nodeheader_tag_p(tag_n)) {
+      if (subtag == subtag_hash_vector) {
+        /* Don't invalidate the cache here.  It should get
+           invalidated on the lisp side, if/when we know
+           that rehashing is necessary. */
+        LispObj flags = ((hash_table_vector_header *) base)->flags;
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) base)->cache_key = undefined;
+          ((hash_table_vector_header *) base)->cache_value = lisp_nil;
+          mark_weak_htabv(n);
+	  return;
+	}
+      }
+
+      if (subtag == subtag_pool) {
+        deref(n, 1) = lisp_nil;
+      }
+      
+      if (subtag == subtag_weak) {
+        natural weak_type = (natural) base[2];
+        if (weak_type >> population_termination_bit) {
+          element_count -= 2;
+        } else {
+          element_count -= 1;
+        }
+      }
+
+      base += (1+element_count);
+
+
+      while(element_count--) {
+        rmark(*--base);
+      }
+      if (subtag == subtag_weak) {
+        deref(n, 1) = GCweakvll;
+        GCweakvll = untag(n);
+      }
+    }
+  }
+}
+
+
+/* 
+  This marks the node if it needs to; it returns true if the node
+  is either a hash table vector header or a cons/misc-tagged pointer
+  to ephemeral space.
+  Note that it  might be a pointer to ephemeral space even if it's
+  not pointing to the current generation.
+*/
+
+Boolean
+mark_ephemeral_root(LispObj n)
+{
+  int tag_n = fulltag_of(n);
+  natural eph_dnode;
+
+  if (nodeheader_tag_p(tag_n)) {
+    return (header_subtag(n) == subtag_hash_vector);
+  }
+ 
+  if ((tag_n == fulltag_cons) ||
+      (tag_n == fulltag_misc)) {
+    eph_dnode = area_dnode(n, GCephemeral_low);
+    if (eph_dnode < GCn_ephemeral_dnodes) {
+      mark_root(n);             /* May or may not mark it */
+      return true;              /* but return true 'cause it's an ephemeral node */
+    }
+  }
+  return false;                 /* Not a heap pointer or not ephemeral */
+}
+  
+
+#ifdef PPC64
+/* Any register (srr0, the lr or ctr) or stack location that
+   we're calling this on should have its low 2 bits clear; it'll
+   be tagged as a "primary" object, but the pc/lr/ctr should
+   never point to a tagged object or contain a fixnum.
+   
+   If the "pc" appears to be pointing into a heap-allocated
+   code vector that's not yet marked, back up until we find
+   the code-vector's prefix (the 32-bit word containing the
+   value 'CODE' whic precedes the code-vector's first instruction)
+   and mark the entire code-vector.
+*/
+void
+mark_pc_root(LispObj xpc)
+{
+  if ((xpc & 3) != 0) {
+    Bug(NULL, "Bad PC locative!");
+  } else {
+    natural dnode = gc_area_dnode(xpc);
+    if ((dnode < GCndnodes_in_area) &&
+        !ref_bit(GCmarkbits,dnode)) {
+      LispObj
+        *headerP,
+        header;
+      opcode *program_counter;
+
+      for(program_counter=(opcode *)ptr_from_lispobj(xpc & ~7);
+	  (LispObj)program_counter >= GCarealow;
+          program_counter-=2) {
+        if (*program_counter == PPC64_CODE_VECTOR_PREFIX) {
+          headerP = ((LispObj *)program_counter)-1;
+          header = *headerP;
+	  dnode = gc_area_dnode(headerP);
+          set_n_bits(GCmarkbits, dnode, (8+(header_element_count(header)<<2)+(dnode_size-1))>>dnode_shift);
+          return;
+        }
+      }
+      /*
+        Expected to have found a header by now, but didn't.
+        That's a bug.
+        */
+      Bug(NULL, "code_vector header not found!");
+    }
+  }
+}
+#else /* PPC64 */
+/*
+  Some objects (saved LRs on the control stack, the LR, PC, and CTR
+  in exception frames) may be tagged as fixnums but are really
+  locatives into code_vectors.
+
+  If "pc" is not tagged as a fixnum, mark it as a "normal" root.
+  If "pc" doesn't point at an unmarked doubleword in the area
+  being GCed, return.
+  Else back up until the code_vector's header is found and mark
+  all doublewords in the code_vector.
+*/
+void
+mark_pc_root(LispObj pc)
+{
+  if (tag_of(pc) != tag_fixnum) {
+    mark_root(pc);
+  } else {
+    natural dnode = gc_area_dnode(pc);
+    if ((dnode < GCndnodes_in_area) &&
+        !ref_bit(GCmarkbits,dnode)) {
+      LispObj
+        *headerP,
+        header;
+
+      for(headerP = (LispObj*)ptr_from_lispobj(untag(pc));
+          dnode < GCndnodes_in_area;
+          headerP-=2, --dnode) {
+        header = *headerP;
+
+        if ((header & code_header_mask) == subtag_code_vector) {
+          set_n_bits(GCmarkbits, dnode, (2+header_element_count(header))>>1);
+          return;
+        }
+      }
+      /*
+        Expected to have found a header by now, but didn't.
+        That's a bug.
+        */
+      Bug(NULL, "code_vector header not found!");
+    }
+  }
+}
+#endif /* PPC64 */
+
+
+
+#ifdef PPC64
+#define RMARK_PREV_ROOT fulltag_imm_3
+#define RMARK_PREV_CAR fulltag_misc
+#else
+#define RMARK_PREV_ROOT fulltag_imm
+#define RMARK_PREV_CAR fulltag_nil
+#endif
+
+
+
+
+
+/*
+  This wants to be in assembler even more than "mark_root" does.
+  For now, it does link-inversion: hard as that is to express in C,
+  reliable stack-overflow detection may be even harder ...
+*/
+void
+rmark(LispObj n)
+{
+  int tag_n = fulltag_of(n);
+  bitvector markbits = GCmarkbits;
+  natural dnode, bits, *bitsp, mask;
+
+  if (!is_node_fulltag(tag_n)) {
+    return;
+  }
+
+  dnode = gc_area_dnode(n);
+  if (dnode >= GCndnodes_in_area) {
+    return;
+  }
+  set_bits_vars(markbits,dnode,bitsp,bits,mask);
+  if (bits & mask) {
+    return;
+  }
+  *bitsp = (bits | mask);
+
+  if (current_stack_pointer() > GCstack_limit) {
+    if (tag_n == fulltag_cons) {
+      rmark(deref(n,1));
+      rmark(deref(n,0));
+    } else {
+      LispObj *base = (LispObj *) ptr_from_lispobj(untag(n));
+      natural
+        header = *((natural *) base),
+        subtag = header_subtag(header),
+        element_count = header_element_count(header),
+        total_size_in_bytes,
+        suffix_dnodes;
+      tag_n = fulltag_of(header);
+#ifdef PPC64
+      if ((nodeheader_tag_p(tag_n)) ||
+          (tag_n == ivector_class_64_bit)) {
+        total_size_in_bytes = 8 + (element_count<<3);
+      } else if (tag_n == ivector_class_8_bit) {
+        total_size_in_bytes = 8 + element_count;
+      } else if (tag_n == ivector_class_32_bit) {
+        total_size_in_bytes = 8 + (element_count<<2);
+      } else {
+        /* ivector_class_other_bit contains 16-bit arrays & bitvector */
+        if (subtag == subtag_bit_vector) {
+          total_size_in_bytes = 8 + ((element_count+7)>>3);
+        } else {
+          total_size_in_bytes = 8 + (element_count<<1);
+        }
+      }
+#else
+      if ((tag_n == fulltag_nodeheader) ||
+          (subtag <= max_32_bit_ivector_subtag)) {
+        total_size_in_bytes = 4 + (element_count<<2);
+      } else if (subtag <= max_8_bit_ivector_subtag) {
+        total_size_in_bytes = 4 + element_count;
+      } else if (subtag <= max_16_bit_ivector_subtag) {
+        total_size_in_bytes = 4 + (element_count<<1);
+      } else if (subtag == subtag_double_float_vector) {
+        total_size_in_bytes = 8 + (element_count<<3);
+      } else {
+        total_size_in_bytes = 4 + ((element_count+7)>>3);
+      }
+#endif
+
+
+      suffix_dnodes = ((total_size_in_bytes+(dnode_size-1))>>dnode_shift)-1;
+
+      if (suffix_dnodes) {
+        set_n_bits(GCmarkbits, dnode+1, suffix_dnodes);
+      }
+
+      if (!nodeheader_tag_p(tag_n)) return;
+
+      if (subtag == subtag_hash_vector) {
+        /* Splice onto weakvll, then return */
+        /* In general, there's no reason to invalidate the cached
+           key/value pair here.  However, if the hash table's weak,
+           we don't want to retain an otherwise unreferenced key
+           or value simply because they're referenced from the
+           cache.  Clear the cached entries iff the hash table's
+           weak in some sense.
+        */
+        LispObj flags = ((hash_table_vector_header *) base)->flags;
+
+        if ((flags & nhash_keys_frozen_mask) &&
+            (((hash_table_vector_header *) base)->deleted_count > 0)) {
+          /* We're responsible for clearing out any deleted keys, since
+             lisp side can't do it without breaking the state machine
+          */
+          LispObj *pairp = base + hash_table_vector_header_count;
+          natural
+            npairs = (element_count - (hash_table_vector_header_count - 1)) >> 1;
+
+          while (npairs--) {
+            if ((pairp[1] == unbound) && (pairp[0] != unbound)) {
+              pairp[0] = slot_unbound;
+            }
+            pairp +=2;
+          }
+          ((hash_table_vector_header *) base)->deleted_count = 0;
+        }
+
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) base)->cache_key = undefined;
+          ((hash_table_vector_header *) base)->cache_value = lisp_nil;
+	  mark_weak_htabv(n);
+	  return;
+	}
+      }
+
+      if (subtag == subtag_pool) {
+        deref(n, 1) = lisp_nil;
+      }
+
+      if (subtag == subtag_weak) {
+        natural weak_type = (natural) base[2];
+        if (weak_type >> population_termination_bit)
+          element_count -= 2;
+        else
+          element_count -= 1;
+      }
+      while (element_count) {
+        rmark(deref(n,element_count));
+        element_count--;
+      }
+
+      if (subtag == subtag_weak) {
+        deref(n, 1) = GCweakvll;
+        GCweakvll = untag(n);
+      }
+
+    }
+  } else {
+    LispObj prev = undefined;
+    LispObj this = n, next;
+    /*
+      This is an FSM.  The basic states are:
+      (0) Just marked the cdr of a cons; mark the car next;
+      (1) Just marked the car of a cons; back up.
+      (2) Hit a gvector header.  Back up.
+      (3) Marked a gvector element; mark the preceding one.
+      (4) Backed all the way up to the object that got us here.
+      
+      This is all encoded in the fulltag of the "prev" pointer.
+    */
+
+    if (tag_n == fulltag_cons) goto MarkCons;
+    goto MarkVector;
+
+  ClimbCdr:
+    prev = deref(this,0);
+    deref(this,0) = next;
+
+  Climb:
+    next = this;
+    this = prev;
+    tag_n = fulltag_of(prev);
+    switch(tag_n) {
+    case fulltag_odd_fixnum:
+    case fulltag_even_fixnum:
+      goto ClimbVector;
+
+    case RMARK_PREV_ROOT:
+      return;
+
+    case fulltag_cons:
+      goto ClimbCdr;
+
+    case RMARK_PREV_CAR:
+      goto ClimbCar;
+
+      /* default: abort() */
+    }
+
+  DescendCons:
+    prev = this;
+    this = next;
+
+  MarkCons:
+    next = deref(this,1);
+    this += node_size;
+    tag_n = fulltag_of(next);
+    if (!is_node_fulltag(tag_n)) goto MarkCdr;
+    dnode = gc_area_dnode(next);
+    if (dnode >= GCndnodes_in_area) goto MarkCdr;
+    set_bits_vars(markbits,dnode,bitsp,bits,mask);
+    if (bits & mask) goto MarkCdr;
+    *bitsp = (bits | mask);
+    deref(this,1) = prev;
+    if (tag_n == fulltag_cons) goto DescendCons;
+    goto DescendVector;
+
+  ClimbCar:
+    prev = deref(this,1);
+    deref(this,1) = next;
+
+  MarkCdr:
+    next = deref(this, 0);
+    this -= node_size;
+    tag_n = fulltag_of(next);
+    if (!is_node_fulltag(tag_n)) goto Climb;
+    dnode = gc_area_dnode(next);
+    if (dnode >= GCndnodes_in_area) goto Climb;
+    set_bits_vars(markbits,dnode,bitsp,bits,mask);
+    if (bits & mask) goto Climb;
+    *bitsp = (bits | mask);
+    deref(this, 0) = prev;
+    if (tag_n == fulltag_cons) goto DescendCons;
+    /* goto DescendVector; */
+
+  DescendVector:
+    prev = this;
+    this = next;
+
+  MarkVector:
+    {
+      LispObj *base = (LispObj *) ptr_from_lispobj(untag(this));
+      natural
+        header = *((natural *) base),
+        subtag = header_subtag(header),
+        element_count = header_element_count(header),
+        total_size_in_bytes,
+        suffix_dnodes;
+
+      tag_n = fulltag_of(header);
+
+#ifdef PPC64
+      if ((nodeheader_tag_p(tag_n)) ||
+          (tag_n == ivector_class_64_bit)) {
+        total_size_in_bytes = 8 + (element_count<<3);
+      } else if (tag_n == ivector_class_8_bit) {
+        total_size_in_bytes = 8 + element_count;
+      } else if (tag_n == ivector_class_32_bit) {
+        total_size_in_bytes = 8 + (element_count<<2);
+      } else {
+        /* ivector_class_other_bit contains 16-bit arrays & bitvector */
+        if (subtag == subtag_bit_vector) {
+          total_size_in_bytes = 8 + ((element_count+7)>>3);
+        } else {
+          total_size_in_bytes = 8 + (element_count<<1);
+        }
+      }
+#else
+      if ((tag_n == fulltag_nodeheader) ||
+          (subtag <= max_32_bit_ivector_subtag)) {
+        total_size_in_bytes = 4 + (element_count<<2);
+      } else if (subtag <= max_8_bit_ivector_subtag) {
+        total_size_in_bytes = 4 + element_count;
+      } else if (subtag <= max_16_bit_ivector_subtag) {
+        total_size_in_bytes = 4 + (element_count<<1);
+      } else if (subtag == subtag_double_float_vector) {
+        total_size_in_bytes = 8 + (element_count<<3);
+      } else {
+        total_size_in_bytes = 4 + ((element_count+7)>>3);
+      }
+#endif
+
+
+      suffix_dnodes = ((total_size_in_bytes+(dnode_size-1))>>dnode_shift)-1;
+
+      if (suffix_dnodes) {
+        set_n_bits(GCmarkbits, dnode+1, suffix_dnodes);
+      }
+
+      if (!nodeheader_tag_p(tag_n)) goto Climb;
+
+      if (subtag == subtag_hash_vector) {
+        /* Splice onto weakvll, then climb */
+        LispObj flags = ((hash_table_vector_header *) base)->flags;
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) base)->cache_key = undefined;
+          ((hash_table_vector_header *) base)->cache_value = lisp_nil;
+	  dws_mark_weak_htabv(this);
+	  element_count = hash_table_vector_header_count;
+	}
+      }
+
+      if (subtag == subtag_pool) {
+        deref(this, 1) = lisp_nil;
+      }
+
+      if (subtag == subtag_weak) {
+        natural weak_type = (natural) base[2];
+        if (weak_type >> population_termination_bit)
+          element_count -= 2;
+        else
+          element_count -= 1;
+      }
+
+      this = untag(this) + ((element_count+1) << node_shift);
+      goto MarkVectorLoop;
+    }
+
+  ClimbVector:
+    prev = *((LispObj *) ptr_from_lispobj(this));
+    *((LispObj *) ptr_from_lispobj(this)) = next;
+
+  MarkVectorLoop:
+    this -= node_size;
+    next = *((LispObj *) ptr_from_lispobj(this));
+    tag_n = fulltag_of(next);
+    if (nodeheader_tag_p(tag_n)) goto MarkVectorDone;
+    if (!is_node_fulltag(tag_n)) goto MarkVectorLoop;
+    dnode = gc_area_dnode(next);
+    if (dnode >= GCndnodes_in_area) goto MarkVectorLoop;
+    set_bits_vars(markbits,dnode,bitsp,bits,mask);
+    if (bits & mask) goto MarkVectorLoop;
+    *bitsp = (bits | mask);
+    *(ptr_from_lispobj(this)) = prev;
+    if (tag_n == fulltag_cons) goto DescendCons;
+    goto DescendVector;
+
+  MarkVectorDone:
+    /* "next" is vector header; "this" is fixnum-aligned.
+       If  header subtag = subtag_weak_header, put it on weakvll */
+    this += fulltag_misc;
+
+    if (header_subtag(next) == subtag_weak) {
+      deref(this, 1) = GCweakvll;
+      GCweakvll = untag(this);
+    }
+    goto Climb;
+  }
+}
+
+LispObj *
+skip_over_ivector(natural start, LispObj header)
+{
+  natural 
+    element_count = header_element_count(header),
+    subtag = header_subtag(header),
+    nbytes;
+
+#ifdef PPC64
+  switch (fulltag_of(header)) {
+  case ivector_class_64_bit:
+    nbytes = element_count << 3;
+    break;
+  case ivector_class_32_bit:
+    nbytes = element_count << 2;
+    break;
+  case ivector_class_8_bit:
+    nbytes = element_count;
+    break;
+  case ivector_class_other_bit:
+  default:
+    if (subtag == subtag_bit_vector) {
+      nbytes = (element_count+7)>>3;
+    } else {
+      nbytes = element_count << 1;
+    }
+  }
+  return ptr_from_lispobj(start+(~15 & (nbytes + 8 + 15)));
+#else
+  if (subtag <= max_32_bit_ivector_subtag) {
+    nbytes = element_count << 2;
+  } else if (subtag <= max_8_bit_ivector_subtag) {
+    nbytes = element_count;
+  } else if (subtag <= max_16_bit_ivector_subtag) {
+    nbytes = element_count << 1;
+  } else if (subtag == subtag_double_float_vector) {
+    nbytes = 4 + (element_count << 3);
+  } else {
+    nbytes = (element_count+7) >> 3;
+  }
+  return ptr_from_lispobj(start+(~7 & (nbytes + 4 + 7)));
+#endif
+
+
+
+}
+
+
+void
+check_refmap_consistency(LispObj *start, LispObj *end, bitvector refbits)
+{
+  LispObj x1, *base = start;
+  int tag;
+  natural ref_dnode, node_dnode;
+  Boolean intergen_ref;
+
+  while (start < end) {
+    x1 = *start;
+    tag = fulltag_of(x1);
+    if (immheader_tag_p(tag)) {
+      start = skip_over_ivector(ptr_to_lispobj(start), x1);
+    } else {
+      intergen_ref = false;
+      if ((tag == fulltag_misc) || (tag == fulltag_cons)) {        
+        node_dnode = gc_area_dnode(x1);
+        if (node_dnode < GCndnodes_in_area) {
+          intergen_ref = true;
+        }
+      }
+      if (intergen_ref == false) {        
+        x1 = start[1];
+        tag = fulltag_of(x1);
+        if ((tag == fulltag_misc) || (tag == fulltag_cons)) {
+          node_dnode = gc_area_dnode(x1);
+          if (node_dnode < GCndnodes_in_area) {
+            intergen_ref = true;
+          }
+        }
+      }
+      if (intergen_ref) {
+        ref_dnode = area_dnode(start, base);
+        if (!ref_bit(refbits, ref_dnode)) {
+          Bug(NULL, "Missing memoization in doublenode at 0x" LISP "\n", start);
+          set_bit(refbits, ref_dnode);
+        }
+      }
+      start += 2;
+    }
+  }
+}
+
+
+
+void
+mark_memoized_area(area *a, natural num_memo_dnodes)
+{
+  bitvector refbits = a->refbits;
+  LispObj *p = (LispObj *) a->low, x1, x2;
+  natural inbits, outbits, bits, bitidx, *bitsp, nextbit, diff, memo_dnode = 0;
+  Boolean keep_x1, keep_x2;
+  natural hash_dnode_limit = 0;
+  hash_table_vector_header *hashp = NULL;
+  int mark_method = 3;
+
+  if (GCDebug) {
+    check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+  }
+
+  /* The distinction between "inbits" and "outbits" is supposed to help us
+     detect cases where "uninteresting" setfs have been memoized.  Storing
+     NIL, fixnums, immediates (characters, etc.) or node pointers to static
+     or readonly areas is definitely uninteresting, but other cases are
+     more complicated (and some of these cases are hard to detect.)
+
+     Some headers are "interesting", to the forwarder if not to us. 
+
+     */
+
+  /*
+    We need to ensure that there are no bits set at or beyond
+    "num_memo_dnodes" in the bitvector.  (This can happen as the EGC
+    tenures/untenures things.)  We find bits by grabbing a fullword at
+    a time and doing a cntlzw instruction; and don't want to have to
+    check for (< memo_dnode num_memo_dnodes) in the loop.
+    */
+
+  {
+    natural 
+      bits_in_last_word = (num_memo_dnodes & bitmap_shift_count_mask),
+      index_of_last_word = (num_memo_dnodes >> bitmap_shift);
+
+    if (bits_in_last_word != 0) {
+      natural mask = ~((1L<<(nbits_in_word-bits_in_last_word))-1L);
+      refbits[index_of_last_word] &= mask;
+    }
+  }
+        
+  set_bitidx_vars(refbits, 0, bitsp, bits, bitidx);
+  inbits = outbits = bits;
+  while (memo_dnode < num_memo_dnodes) {
+    if (bits == 0) {
+      int remain = nbits_in_word - bitidx;
+      memo_dnode += remain;
+      p += (remain+remain);
+      if (outbits != inbits) {
+        *bitsp = outbits;
+      }
+      bits = *++bitsp;
+      inbits = outbits = bits;
+      bitidx = 0;
+    } else {
+      nextbit = count_leading_zeros(bits);
+      if ((diff = (nextbit - bitidx)) != 0) {
+        memo_dnode += diff;
+        bitidx = nextbit;
+        p += (diff+diff);
+      }
+      x1 = *p++;
+      x2 = *p++;
+      bits &= ~(BIT0_MASK >> bitidx);
+
+      if (hashp) {
+        Boolean force_x1 = false;
+        if ((memo_dnode >= hash_dnode_limit) && (mark_method == 3)) {
+          /* if vector_header_count is odd, x1 might be the last word of the header */
+          force_x1 = (hash_table_vector_header_count & 1) && (memo_dnode == hash_dnode_limit);
+          /* was marking header, switch to data */
+          hash_dnode_limit = area_dnode(((LispObj *)hashp)
+                                        + 1
+                                        + header_element_count(hashp->header),
+                                        a->low);
+          /* In traditional weak method, don't mark vector entries at all. */
+          /* Otherwise mark the non-weak elements only */
+          mark_method = ((lisp_global(WEAK_GC_METHOD) == 0) ? 0 :
+                         ((hashp->flags & nhash_weak_value_mask)
+                          ? (1 + (hash_table_vector_header_count & 1))
+                          : (2 - (hash_table_vector_header_count & 1))));
+        }
+
+        if (memo_dnode < hash_dnode_limit) {
+          /* perhaps ignore one or both of the elements */
+          if (!force_x1 && !(mark_method & 1)) x1 = 0;
+          if (!(mark_method & 2)) x2 = 0;
+        } else {
+          hashp = NULL;
+        }
+      }
+
+      if (header_subtag(x1) == subtag_hash_vector) {
+        if (hashp) Bug(NULL, "header inside hash vector?");
+        hash_table_vector_header *hp = (hash_table_vector_header *)(p - 2);
+        if (hp->flags & nhash_weak_mask) {
+          /* If header_count is odd, this cuts off the last header field */
+          /* That case is handled specially above */
+          hash_dnode_limit = memo_dnode + ((hash_table_vector_header_count) >>1);
+          hashp = hp;
+          mark_method = 3;
+        }
+      }
+
+      keep_x1 = mark_ephemeral_root(x1);
+      keep_x2 = mark_ephemeral_root(x2);
+      if ((keep_x1 == false) && 
+          (keep_x2 == false) &&
+          (hashp == NULL)) {
+        outbits &= ~(BIT0_MASK >> bitidx);
+      }
+      memo_dnode++;
+      bitidx++;
+    }
+  }
+  if (GCDebug) {
+    p = (LispObj *) a->low;
+    check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+  }
+}
+
+
+
+void
+mark_simple_area_range(LispObj *start, LispObj *end)
+{
+  LispObj x1, *base;
+  int tag;
+
+  while (start < end) {
+    x1 = *start;
+    tag = fulltag_of(x1);
+    if (immheader_tag_p(tag)) {
+      start = (LispObj *)ptr_from_lispobj(skip_over_ivector(ptr_to_lispobj(start), x1));
+    } else if (!nodeheader_tag_p(tag)) {
+      ++start;
+      mark_root(x1);
+      mark_root(*start++);
+    } else {
+      int subtag = header_subtag(x1);
+      natural element_count = header_element_count(x1);
+      natural size = (element_count+1 + 1) & ~1;
+
+      if (subtag == subtag_hash_vector) {
+        LispObj flags = ((hash_table_vector_header *) start)->flags;
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) start)->cache_key = undefined;
+          ((hash_table_vector_header *) start)->cache_value = lisp_nil;
+	  mark_weak_htabv((LispObj)start);
+	  element_count = 0;
+	}
+      }
+      if (subtag == subtag_pool) {
+	start[1] = lisp_nil;
+      }
+
+      if (subtag == subtag_weak) {
+	natural weak_type = (natural) start[2];
+	if (weak_type >> population_termination_bit)
+	  element_count -= 2;
+	else
+	  element_count -= 1; 
+	start[1] = GCweakvll;
+	GCweakvll = ptr_to_lispobj(start);
+      }
+
+      base = start + element_count + 1;
+      while(element_count--) {
+	mark_root(*--base);
+      }   
+      start += size;
+    }
+  }
+}
+
+
+/* Mark a tstack area */
+void
+mark_tstack_area(area *a)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) (a->active),
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = (LispObj *) ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    if (current[1] == 0) {
+      mark_simple_area_range(current+2, end);
+    }
+  }
+}
+
+/*
+  It's really important that headers never wind up in tagged registers.
+  Those registers would (possibly) get pushed on the vstack and confuse
+  the hell out of this routine.
+
+  vstacks are just treated as a "simple area range", possibly with
+  an extra word at the top (where the area's active pointer points.)
+  */
+
+void
+mark_vstack_area(area *a)
+{
+  LispObj
+    *start = (LispObj *) a->active,
+    *end = (LispObj *) a->high;
+
+#if 0
+  fprintf(dbgout, "mark VSP range: 0x%lx:0x%lx\n", start, end);
+#endif
+  if (((natural)start) & (sizeof(natural))) {
+    /* Odd number of words.  Mark the first (can't be a header) */
+    mark_root(*start);
+    ++start;
+  }
+  mark_simple_area_range(start, end);
+}
+
+
+/*
+  Mark lisp frames on the control stack.
+  Ignore emulator frames (odd backpointer) and C frames (size != 4).
+*/
+
+void
+mark_cstack_area(area *a)
+{
+  BytePtr
+    current,
+    next,
+    limit = a->high,
+    low = a->low;
+
+  for (current = a->active; (current >= low) && (current < limit); current = next) {
+    next = *((BytePtr *)current);
+#if 0
+    if (next < current) {
+      Bug(NULL, "Child stack frame older than parent");
+    }
+#endif
+    if (next == NULL) break;
+    if (((next - current) == sizeof(lisp_frame)) &&
+	(((((lisp_frame *)current)->savefn) == 0) ||
+	 (fulltag_of(((lisp_frame *)current)->savefn) == fulltag_misc))) {
+      /* mark fn, then saved lr */
+      mark_root(((lisp_frame *)current)->savefn);
+      mark_pc_root(((lisp_frame *)current)->savelr);
+    } else {
+      /* Clear low 2 bits of "next", just in case */
+      next = (BytePtr) (((natural)next) & ~3);
+    }
+  }
+}
+
+
+
+/* Mark the lisp objects in an exception frame */
+void
+mark_xp(ExceptionInformation *xp)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+#ifdef PPC
+  int r;
+  /* registers >= fn should be tagged and marked as roots.
+     the PC, LR, loc_pc, and CTR should be treated as "pc_locatives".
+
+     In general, marking a locative is more expensive than marking
+     a node is, since it may be neccessary to back up and find the
+     containing object's header.  Since exception frames contain
+     many locatives, it'd be wise to mark them *after* marking the
+     stacks, nilreg-relative globals, etc.
+     */
+
+  for (r = fn; r < 32; r++) {
+    mark_root((regs[r]));
+  }
+
+
+
+  mark_pc_root((regs[loc_pc]));
+  mark_pc_root(ptr_to_lispobj(xpPC(xp)));
+  mark_pc_root(ptr_to_lispobj(xpLR(xp)));
+  mark_pc_root(ptr_to_lispobj(xpCTR(xp)));
+#endif /* PPC */
+
+}
+
+/* A "pagelet" contains 32 doublewords.  The relocation table contains
+   a word for each pagelet which defines the lowest address to which
+   dnodes on that pagelet will be relocated.
+
+   The relocation address of a given pagelet is the sum of the relocation
+   address for the preceding pagelet and the number of bytes occupied by
+   marked objects on the preceding pagelet.
+*/
+
+LispObj
+calculate_relocation()
+{
+  LispObj *relocptr = GCrelocptr;
+  LispObj current = GCareadynamiclow;
+  bitvector 
+    markbits = GCdynamic_markbits;
+  qnode *q = (qnode *) markbits;
+  natural npagelets = ((GCndynamic_dnodes_in_area+(nbits_in_word-1))>>bitmap_shift);
+  natural thesebits;
+  LispObj first = 0;
+
+  do {
+    *relocptr++ = current;
+    thesebits = *markbits++;
+    if (thesebits == ALL_ONES) {
+      current += nbits_in_word*dnode_size;
+      q += 4; /* sic */
+    } else {
+      if (!first) {
+        first = current;
+        while (thesebits & BIT0_MASK) {
+          first += dnode_size;
+          thesebits += thesebits;
+        }
+      }
+      current += one_bits(*q++);
+      current += one_bits(*q++);
+      current += one_bits(*q++);
+      current += one_bits(*q++);
+    }
+  } while(--npagelets);
+  *relocptr++ = current;
+  return first ? first : current;
+}
+
+#ifdef PPC64
+LispObj
+dnode_forwarding_address(natural dnode, int tag_n)
+{
+  natural pagelet, nbits;
+  unsigned int near_bits;
+  LispObj new;
+
+  if (GCDebug) {
+    if (! ref_bit(GCdynamic_markbits, dnode)) {
+      Bug(NULL, "unmarked object being forwarded!\n");
+    }
+  }
+
+  pagelet = dnode >> bitmap_shift;
+  nbits = dnode & bitmap_shift_count_mask;
+  near_bits = ((unsigned int *)GCdynamic_markbits)[dnode>>(dnode_shift+1)];
+
+  if (nbits < 32) {
+    new = GCrelocptr[pagelet] + tag_n;;
+    /* Increment "new" by the count of 1 bits which precede the dnode */
+    if (near_bits == 0xffffffff) {
+      return (new + (nbits << 4));
+    } else {
+      near_bits &= (0xffffffff00000000 >> nbits);
+      if (nbits > 15) {
+        new += one_bits(near_bits & 0xffff);
+      }
+      return (new + (one_bits(near_bits >> 16))); 
+    }
+  } else {
+    new = GCrelocptr[pagelet+1] + tag_n;
+    nbits = 64-nbits;
+
+    if (near_bits == 0xffffffff) {
+      return (new - (nbits << 4));
+    } else {
+      near_bits &= (1<<nbits)-1;
+      if (nbits > 15) {
+        new -= one_bits(near_bits >> 16);
+      }
+      return (new -  one_bits(near_bits & 0xffff));
+    }
+  }
+}
+#else
+LispObj
+dnode_forwarding_address(natural dnode, int tag_n)
+{
+  natural pagelet, nbits;
+  unsigned short near_bits;
+  LispObj new;
+
+  if (GCDebug) {
+    if (! ref_bit(GCdynamic_markbits, dnode)) {
+      Bug(NULL, "unmarked object being forwarded!\n");
+    }
+  }
+
+  pagelet = dnode >> 5;
+  nbits = dnode & 0x1f;
+  near_bits = ((unsigned short *)GCdynamic_markbits)[dnode>>4];
+
+  if (nbits < 16) {
+    new = GCrelocptr[pagelet] + tag_n;;
+    /* Increment "new" by the count of 1 bits which precede the dnode */
+    if (near_bits == 0xffff) {
+      return (new + (nbits << 3));
+    } else {
+      near_bits &= (0xffff0000 >> nbits);
+      if (nbits > 7) {
+        new += one_bits(near_bits & 0xff);
+      }
+      return (new + (one_bits(near_bits >> 8))); 
+    }
+  } else {
+    new = GCrelocptr[pagelet+1] + tag_n;
+    nbits = 32-nbits;
+
+    if (near_bits == 0xffff) {
+      return (new - (nbits << 3));
+    } else {
+      near_bits &= (1<<nbits)-1;
+      if (nbits > 7) {
+        new -= one_bits(near_bits >> 8);
+      }
+      return (new -  one_bits(near_bits & 0xff));
+    }
+  }
+}
+#endif
+
+
+LispObj
+locative_forwarding_address(LispObj obj)
+{
+  int tag_n = fulltag_of(obj);
+  natural dnode;
+
+
+#ifdef PPC
+  /* Locatives can be tagged as conses, "fulltag_misc"
+     objects, or as fixnums.  Immediates, headers, and nil
+     shouldn't be "forwarded".  Nil never will be, but it
+     doesn't hurt to check ... */
+#ifdef PPC64
+  if ((tag_n & lowtag_mask) != lowtag_primary) {
+    return obj;
+  }
+#else
+  if ((1<<tag_n) & ((1<<fulltag_immheader) |
+                    (1<<fulltag_nodeheader) |
+                    (1<<fulltag_imm) |
+                    (1<<fulltag_nil))) {
+    return obj;
+  }
+#endif
+#endif
+
+  dnode = gc_dynamic_area_dnode(obj);
+
+  if ((dnode >= GCndynamic_dnodes_in_area) ||
+      (obj < GCfirstunmarked)) {
+    return obj;
+  }
+
+  return dnode_forwarding_address(dnode, tag_n);
+}
+
+
+
+
+void
+forward_range(LispObj *range_start, LispObj *range_end)
+{
+  LispObj *p = range_start, node, new;
+  int tag_n;
+  natural nwords;
+  hash_table_vector_header *hashp;
+
+  while (p < range_end) {
+    node = *p;
+    tag_n = fulltag_of(node);
+    if (immheader_tag_p(tag_n)) {
+      p = (LispObj *) skip_over_ivector((natural) p, node);
+    } else if (nodeheader_tag_p(tag_n)) {
+      nwords = header_element_count(node);
+      nwords += (1 - (nwords&1));
+      if ((header_subtag(node) == subtag_hash_vector) &&
+          ((((hash_table_vector_header *)p)->flags) & nhash_track_keys_mask)) {
+        natural skip = (sizeof(hash_table_vector_header)/sizeof(LispObj))-1;
+        hashp = (hash_table_vector_header *) p;
+        p++;
+        nwords -= skip;
+        while(skip--) {
+          update_noderef(p);
+          p++;
+        }
+        /* "nwords" is odd at this point: there are (floor nwords 2)
+           key/value pairs to look at, and then an extra word for
+           alignment.  Process them two at a time, then bump "p"
+           past the alignment word. */
+        nwords >>= 1;
+        while(nwords--) {
+          if (update_noderef(p) && hashp) {
+            hashp->flags |= nhash_key_moved_mask;
+            hashp = NULL;
+          }
+          p++;
+          update_noderef(p);
+          p++;
+        }
+        *p++ = 0;
+      } else {
+        p++;
+        while(nwords--) {
+          update_noderef(p);
+          p++;
+        }
+      }
+    } else {
+      new = node_forwarding_address(node);
+      if (new != node) {
+        *p = new;
+      }
+      p++;
+      update_noderef(p);
+      p++;
+    }
+  }
+}
+
+
+
+
+/* Forward a tstack area */
+void
+forward_tstack_area(area *a)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) a->active,
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    if (current[1] == 0) {
+      forward_range(current+2, end);
+    }
+  }
+}
+
+/* Forward a vstack area */
+void
+forward_vstack_area(area *a)
+{
+  LispObj
+    *p = (LispObj *) a->active,
+    *q = (LispObj *) a->high;
+
+#ifdef DEBUG
+  fprintf(dbgout,"Forward range 0x%x/0x%x (owner 0x%x)\n",p,q,a->owner);
+#endif
+  if (((natural)p) & sizeof(natural)) {
+    update_noderef(p);
+    p++;
+  }
+  forward_range(p, q);
+}
+
+void
+forward_cstack_area(area *a)
+{
+  BytePtr
+    current,
+    next,
+    limit = a->high,
+    low = a->low;
+
+  for (current = a->active; (current >= low) && (current < limit); current = next) {
+    next = *((BytePtr *)current);
+    if (next == NULL) break;
+    if (((next - current) == sizeof(lisp_frame)) &&
+	(((((lisp_frame *)current)->savefn) == 0) ||
+	 (fulltag_of(((lisp_frame *)current)->savefn) == fulltag_misc))) {
+      update_noderef(&((lisp_frame *) current)->savefn);
+      update_locref(&((lisp_frame *) current)->savelr);
+    }
+  }
+}
+
+
+
+void
+forward_xp(ExceptionInformation *xp)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+  int r;
+
+  /* registers >= fn should be tagged and forwarded as roots.
+     the PC, LR, loc_pc, and CTR should be treated as "locatives".
+     */
+
+  for (r = fn; r < 32; r++) {
+    update_noderef((LispObj*) (&(regs[r])));
+  }
+
+  update_locref((LispObj*) (&(regs[loc_pc])));
+
+  update_locref((LispObj*) (&(xpPC(xp))));
+  update_locref((LispObj*) (&(xpLR(xp))));
+  update_locref((LispObj*) (&(xpCTR(xp))));
+
+}
+
+
+void
+forward_tcr_xframes(TCR *tcr)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+
+  xp = tcr->gc_context;
+  if (xp) {
+    forward_xp(xp);
+  }
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+    if (xframes->curr == xp) {
+      Bug(NULL, "forward xframe twice ???");
+    }
+    forward_xp(xframes->curr);
+  }
+}
+
+
+
+/*
+  Compact the dynamic heap (from GCfirstunmarked through its end.)
+  Return the doublenode address of the new freeptr.
+  */
+
+LispObj
+compact_dynamic_heap()
+{
+  LispObj *src = ptr_from_lispobj(GCfirstunmarked), *dest = src, node, new;
+  natural 
+    elements, 
+    dnode = gc_area_dnode(GCfirstunmarked), 
+    node_dnodes = 0, 
+    imm_dnodes = 0, 
+    bitidx, 
+    *bitsp, 
+    bits, 
+    nextbit, 
+    diff;
+  int tag;
+  bitvector markbits = GCmarkbits;
+    /* keep track of whether or not we saw any
+       code_vector headers, and only flush cache if so. */
+  Boolean GCrelocated_code_vector = false;
+
+  if (dnode < GCndnodes_in_area) {
+    lisp_global(FWDNUM) += (1<<fixnum_shift);
+  
+    set_bitidx_vars(markbits,dnode,bitsp,bits,bitidx);
+    while (dnode < GCndnodes_in_area) {
+      if (bits == 0) {
+        int remain = nbits_in_word - bitidx;
+        dnode += remain;
+        src += (remain+remain);
+        bits = *++bitsp;
+        bitidx = 0;
+      } else {
+        /* Have a non-zero markbits word; all bits more significant
+           than "bitidx" are 0.  Count leading zeros in "bits"
+           (there'll be at least "bitidx" of them.)  If there are more
+           than "bitidx" leading zeros, bump "dnode", "bitidx", and
+           "src" by the difference. */
+        nextbit = count_leading_zeros(bits);
+        if ((diff = (nextbit - bitidx)) != 0) {
+          dnode += diff;
+          bitidx = nextbit;
+          src += (diff+diff);
+        }
+
+        if (GCDebug) {
+          if (dest != ptr_from_lispobj(locative_forwarding_address(ptr_to_lispobj(src)))) {
+            Bug(NULL, "Out of synch in heap compaction.  Forwarding from 0x%lx to 0x%lx,\n expected to go to 0x%lx\n", 
+                src, dest, locative_forwarding_address(ptr_to_lispobj(src)));
+          }
+        }
+
+        node = *src++;
+        tag = fulltag_of(node);
+        if (nodeheader_tag_p(tag)) {
+          elements = header_element_count(node);
+          node_dnodes = (elements+2)>>1;
+          dnode += node_dnodes;
+          if ((header_subtag(node) == subtag_hash_vector) &&
+              (((hash_table_vector_header *) (src-1))->flags & nhash_track_keys_mask)) {
+            hash_table_vector_header *hashp = (hash_table_vector_header *) dest;
+            int skip = (sizeof(hash_table_vector_header)/sizeof(LispObj))-1;
+          
+            *dest++ = node;
+            elements -= skip;
+            while(skip--) {
+              *dest++ = node_forwarding_address(*src++);
+            }
+            /* There should be an even number of (key/value) pairs in elements;
+               an extra alignment word follows. */
+            elements >>= 1;
+            while (elements--) {
+              if (hashp) {
+                node = *src++;
+                new = node_forwarding_address(node);
+                if (new != node) {
+                  hashp->flags |= nhash_key_moved_mask;
+                  hashp = NULL;
+                  *dest++ = new;
+                } else {
+                  *dest++ = node;
+                }
+              } else {
+                *dest++ = node_forwarding_address(*src++);
+              }
+              *dest++ = node_forwarding_address(*src++);
+            }
+            *dest++ = 0;
+            src++;
+          } else {
+            *dest++ = node;
+            *dest++ = node_forwarding_address(*src++);
+            while(--node_dnodes) {
+              *dest++ = node_forwarding_address(*src++);
+              *dest++ = node_forwarding_address(*src++);
+            }
+          }
+          set_bitidx_vars(markbits,dnode,bitsp,bits,bitidx);
+        } else if (immheader_tag_p(tag)) {
+          *dest++ = node;
+          *dest++ = *src++;
+          elements = header_element_count(node);
+          tag = header_subtag(node);
+
+#ifdef PPC
+#ifdef PPC64
+          switch(fulltag_of(tag)) {
+          case ivector_class_64_bit:
+            imm_dnodes = ((elements+1)+1)>>1;
+            break;
+          case ivector_class_32_bit:
+            if (tag == subtag_code_vector) {
+              GCrelocated_code_vector = true;
+            }
+            imm_dnodes = (((elements+2)+3)>>2);
+            break;
+          case ivector_class_8_bit:
+            imm_dnodes = (((elements+8)+15)>>4);
+            break;
+          case ivector_class_other_bit:
+            if (tag == subtag_bit_vector) {
+              imm_dnodes = (((elements+64)+127)>>7);
+            } else {
+              imm_dnodes = (((elements+4)+7)>>3);
+            }
+          }
+#else
+          if (tag <= max_32_bit_ivector_subtag) {
+            if (tag == subtag_code_vector) {
+              GCrelocated_code_vector = true;
+            }
+            imm_dnodes = (((elements+1)+1)>>1);
+          } else if (tag <= max_8_bit_ivector_subtag) {
+            imm_dnodes = (((elements+4)+7)>>3);
+          } else if (tag <= max_16_bit_ivector_subtag) {
+            imm_dnodes = (((elements+2)+3)>>2);
+          } else if (tag == subtag_bit_vector) {
+            imm_dnodes = (((elements+32)+63)>>6);
+          } else {
+            imm_dnodes = elements+1;
+          }
+#endif
+#endif
+
+          dnode += imm_dnodes;
+          while (--imm_dnodes) {
+            *dest++ = *src++;
+            *dest++ = *src++;
+          }
+          set_bitidx_vars(markbits,dnode,bitsp,bits,bitidx);
+        } else {
+          *dest++ = node_forwarding_address(node);
+          *dest++ = node_forwarding_address(*src++);
+          bits &= ~(BIT0_MASK >> bitidx);
+          dnode++;
+          bitidx++;
+        }
+      }
+  
+    }
+
+    {
+      natural nbytes = (natural)ptr_to_lispobj(dest) - (natural)GCfirstunmarked;
+      if ((nbytes != 0) && GCrelocated_code_vector) {
+        xMakeDataExecutable((LogicalAddress)ptr_from_lispobj(GCfirstunmarked), nbytes);
+      }
+    }
+  }
+  return ptr_to_lispobj(dest);
+}
+
+
+
+
+      
+    
+/*
+  Total the (physical) byte sizes of all ivectors in the indicated memory range
+*/
+
+natural
+unboxed_bytes_in_range(LispObj *start, LispObj *end)
+{
+    natural total=0, elements, tag, subtag, bytes;
+    LispObj header;
+
+    while (start < end) {
+      header = *start;
+      tag = fulltag_of(header);
+    
+      if ((nodeheader_tag_p(tag)) ||
+          (immheader_tag_p(tag))) {
+        elements = header_element_count(header);
+        if (nodeheader_tag_p(tag)) {
+          start += ((elements+2) & ~1);
+        } else {
+          subtag = header_subtag(header);
+
+#ifdef PPC64
+          switch(fulltag_of(header)) {
+          case ivector_class_64_bit:
+            bytes = 8 + (elements<<3);
+            break;
+          case ivector_class_32_bit:
+            bytes = 8 + (elements<<2);
+            break;
+          case ivector_class_8_bit:
+            bytes = 8 + elements;
+            break;
+          case ivector_class_other_bit:
+          default:
+            if (subtag == subtag_bit_vector) {
+              bytes = 8 + ((elements+7)>>3);
+            } else {
+              bytes = 8 + (elements<<1);
+            }
+          }
+#else
+          if (subtag <= max_32_bit_ivector_subtag) {
+            bytes = 4 + (elements<<2);
+          } else if (subtag <= max_8_bit_ivector_subtag) {
+            bytes = 4 + elements;
+          } else if (subtag <= max_16_bit_ivector_subtag) {
+            bytes = 4 + (elements<<1);
+          } else if (subtag == subtag_double_float_vector) {
+            bytes = 8 + (elements<<3);
+          } else {
+            bytes = 4 + ((elements+7)>>3);
+          }
+#endif
+
+
+          bytes = (bytes+dnode_size-1) & ~(dnode_size-1);
+          total += bytes;
+          start += (bytes >> node_shift);
+        }
+      } else {
+        start += 2;
+      }
+    }
+    return total;
+  }
+
+
+  /* 
+     This assumes that it's getting called with an ivector
+     argument and that there's room for the object in the
+     destination area.
+  */
+
+
+LispObj
+purify_displaced_object(LispObj obj, area *dest, natural disp)
+{
+  BytePtr 
+    free = dest->active,
+    *old = (BytePtr *) ptr_from_lispobj(untag(obj));
+  LispObj 
+    header = header_of(obj), 
+    new;
+  natural 
+    start = (natural)old,
+    physbytes;
+
+  physbytes = ((natural)(skip_over_ivector(start,header))) - start;
+  dest->active += physbytes;
+
+  new = ptr_to_lispobj(free)+disp;
+
+  memcpy(free, (BytePtr)old, physbytes);
+  /* Leave a trail of breadcrumbs.  Or maybe just one breadcrumb. */
+  /* Actually, it's best to always leave a trail, for two reasons.
+     a) We may be walking the same heap that we're leaving forwaring
+     pointers in, so we don't want garbage that we leave behind to
+     look like a header.
+     b) We'd like to be able to forward code-vector locatives, and
+     it's easiest to do so if we leave a {forward_marker, dnode_locative}
+     pair at every doubleword in the old vector.
+  */
+  while(physbytes) {
+    *old++ = (BytePtr) forward_marker;
+    *old++ = (BytePtr) free;
+    free += dnode_size;
+    physbytes -= dnode_size;
+  }
+  return new;
+}
+
+LispObj
+purify_object(LispObj obj, area *dest)
+{
+  return purify_displaced_object(obj, dest, fulltag_of(obj));
+}
+
+
+
+void
+copy_ivector_reference(LispObj *ref, BytePtr low, BytePtr high, area *dest)
+{
+  LispObj obj = *ref, header;
+  natural tag = fulltag_of(obj), header_tag;
+
+  if ((tag == fulltag_misc) &&
+      (((BytePtr)ptr_from_lispobj(obj)) > low) &&
+      (((BytePtr)ptr_from_lispobj(obj)) < high)) {
+    header = deref(obj, 0);
+    if (header == forward_marker) { /* already copied */
+      *ref = (untag(deref(obj,1)) + tag);
+    } else {
+      header_tag = fulltag_of(header);
+      if (immheader_tag_p(header_tag)) {
+        if (header_subtag(header) != subtag_macptr) {
+          *ref = purify_object(obj, dest);
+        }
+      }
+    }
+  }
+}
+
+void
+purify_locref(LispObj *locaddr, BytePtr low, BytePtr high, area *to)
+{
+#ifdef PPC
+  LispObj
+    loc = *locaddr,
+    *headerP;
+  opcode
+    *p,
+    insn;
+  natural
+    tag = fulltag_of(loc);
+
+  if (((BytePtr)ptr_from_lispobj(loc) > low) &&
+      ((BytePtr)ptr_from_lispobj(loc) < high)) {
+
+    headerP = (LispObj *)ptr_from_lispobj(untag(loc));
+    switch (tag) {
+    case fulltag_even_fixnum:
+    case fulltag_odd_fixnum:
+#ifdef PPC64
+    case fulltag_cons:
+    case fulltag_misc:
+#endif
+      if (*headerP == forward_marker) {
+	*locaddr = (headerP[1]+tag);
+      } else {
+	/* Grovel backwards until the header's found; copy
+	   the code vector to to space, then treat it as if it 
+	   hasn't already been copied. */
+	p = (opcode *)headerP;
+	do {
+	  p -= 2;
+	  tag += 8;
+	  insn = *p;
+#ifdef PPC64
+	} while (insn != PPC64_CODE_VECTOR_PREFIX);
+	headerP = ((LispObj*)p)-1;
+	*locaddr = purify_displaced_object(((LispObj)headerP), to, tag);
+#else
+      } while ((insn & code_header_mask) != subtag_code_vector);
+      *locaddr = purify_displaced_object(ptr_to_lispobj(p), to, tag);
+#endif
+    }
+    break;
+
+#ifndef PPC64
+  case fulltag_misc:
+    copy_ivector_reference(locaddr, low, high, to);
+    break;
+#endif
+  }
+}
+#endif
+}
+
+void
+purify_range(LispObj *start, LispObj *end, BytePtr low, BytePtr high, area *to)
+{
+  LispObj header;
+  unsigned tag;
+
+  while (start < end) {
+    header = *start;
+    if (header == forward_marker) {
+      start += 2;
+    } else {
+      tag = fulltag_of(header);
+      if (immheader_tag_p(tag)) {
+        start = (LispObj *)skip_over_ivector((natural)start, header);
+      } else {
+        if (!nodeheader_tag_p(tag)) {
+          copy_ivector_reference(start, low, high, to);
+        }
+        start++;
+        copy_ivector_reference(start, low, high, to);
+        start++;
+      }
+    }
+  }
+}
+        
+/* Purify references from tstack areas */
+void
+purify_tstack_area(area *a, BytePtr low, BytePtr high, area *to)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) (a->active),
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = (LispObj *) ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    if (current[1] == 0) {
+      purify_range(current+2, end, low, high, to);
+    }
+  }
+}
+
+/* Purify a vstack area */
+void
+purify_vstack_area(area *a, BytePtr low, BytePtr high, area *to)
+{
+  LispObj
+    *p = (LispObj *) a->active,
+    *q = (LispObj *) a->high;
+
+  if (((natural)p) & sizeof(natural)) {
+    copy_ivector_reference(p, low, high, to);
+    p++;
+  }
+  purify_range(p, q, low, high, to);
+}
+
+
+void
+purify_cstack_area(area *a, BytePtr low, BytePtr high, area *to)
+{
+  BytePtr
+    current,
+    next,
+    limit = a->high;
+
+  for (current = a->active; current != limit; current = next) {
+    next = *((BytePtr *)current);
+    if (next == NULL) break;
+    if (((next - current) == sizeof(lisp_frame)) && 
+	(((((lisp_frame *)current)->savefn) == 0) ||
+	 (fulltag_of(((lisp_frame *)current)->savefn) == fulltag_misc))) {
+      purify_locref(&((lisp_frame *) current)->savelr, low, high, to);
+    } else {
+      /* Clear low bits of "next", just in case */
+      next = (BytePtr) (((natural)next) & ~(sizeof(natural)-1));
+    }
+  }
+}
+
+void
+purify_xp(ExceptionInformation *xp, BytePtr low, BytePtr high, area *to)
+{
+  unsigned long *regs = (unsigned long *) xpGPRvector(xp);
+
+  int r;
+
+  /* registers >= fn should be treated as roots.
+     The PC, LR, loc_pc, and CTR should be treated as "locatives".
+   */
+
+  for (r = fn; r < 32; r++) {
+    copy_ivector_reference((LispObj*) (&(regs[r])), low, high, to);
+  };
+
+  purify_locref((LispObj*) (&(regs[loc_pc])), low, high, to);
+
+  purify_locref((LispObj*) (&(xpPC(xp))), low, high, to);
+  purify_locref((LispObj*) (&(xpLR(xp))), low, high, to);
+  purify_locref((LispObj*) (&(xpCTR(xp))), low, high, to);
+}
+
+void
+purify_tcr_tlb(TCR *tcr, BytePtr low, BytePtr high, area *to)
+{
+  natural n = tcr->tlb_limit;
+  LispObj *start = tcr->tlb_pointer, *end = (LispObj *) ((BytePtr)start+n);
+
+  purify_range(start, end, low, high, to);
+}
+
+void
+purify_tcr_xframes(TCR *tcr, BytePtr low, BytePtr high, area *to)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+  
+  xp = tcr->gc_context;
+  if (xp) {
+    purify_xp(xp, low, high, to);
+  }
+
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+    purify_xp(xframes->curr, low, high, to);
+  }
+}
+
+void
+purify_gcable_ptrs(BytePtr low, BytePtr high, area *to)
+{
+  LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next;
+
+  while ((*prev) != (LispObj)NULL) {
+    copy_ivector_reference(prev, low, high, to);
+    next = *prev;
+    prev = &(((xmacptr *)ptr_from_lispobj(untag(next)))->link);
+  }
+}
+
+
+void
+purify_areas(BytePtr low, BytePtr high, area *target)
+{
+  area *next_area;
+  area_code code;
+      
+  for (next_area = active_dynamic_area; (code = next_area->code) != AREA_VOID; next_area = next_area->succ) {
+    switch (code) {
+    case AREA_TSTACK:
+      purify_tstack_area(next_area, low, high, target);
+      break;
+      
+    case AREA_VSTACK:
+      purify_vstack_area(next_area, low, high, target);
+      break;
+      
+    case AREA_CSTACK:
+      purify_cstack_area(next_area, low, high, target);
+      break;
+      
+    case AREA_STATIC:
+    case AREA_DYNAMIC:
+      purify_range((LispObj *) next_area->low, (LispObj *) next_area->active, low, high, target);
+      break;
+      
+    default:
+      break;
+    }
+  }
+}
+
+/*
+  So far, this is mostly for save_application's benefit.
+  We -should- be able to return to lisp code after doing this,
+  however.
+
+*/
+
+
+signed_natural
+purify(TCR *tcr, signed_natural param)
+{
+  extern area *extend_readonly_area(unsigned);
+  area 
+    *a = active_dynamic_area,
+    *new_pure_area;
+
+  TCR  *other_tcr;
+  natural max_pure_size;
+  BytePtr new_pure_start;
+
+
+  max_pure_size = unboxed_bytes_in_range((LispObj *)(a->low + (static_dnodes_for_area(a) << dnode_shift)), 
+                                         (LispObj *) a->active);
+  new_pure_area = extend_readonly_area(max_pure_size);
+  if (new_pure_area) {
+    new_pure_start = new_pure_area->active;
+    lisp_global(IN_GC) = (1<<fixnumshift);
+
+    
+    purify_areas(a->low, a->active, new_pure_area);
+    
+    other_tcr = tcr;
+    do {
+      purify_tcr_xframes(other_tcr, a->low, a->active, new_pure_area);
+      purify_tcr_tlb(other_tcr, a->low, a->active, new_pure_area);
+      other_tcr = other_tcr->next;
+    } while (other_tcr != tcr);
+
+    purify_gcable_ptrs(a->low, a->active, new_pure_area);
+
+    {
+      natural puresize = (unsigned) (new_pure_area->active-new_pure_start);
+      if (puresize != 0) {
+        xMakeDataExecutable(new_pure_start, puresize);
+  
+      }
+    }
+    ProtectMemory(new_pure_area->low,
+		  align_to_power_of_2(new_pure_area->active-new_pure_area->low,
+				      log2_page_size));
+    lisp_global(IN_GC) = 0;
+    just_purified_p = true;
+    return 0;
+  }
+  return -1;
+}
+
+void
+impurify_locref(LispObj *p, LispObj low, LispObj high, int delta)
+{
+  LispObj q = *p;
+  
+  switch (fulltag_of(q)) {
+#ifdef PPC64
+  case fulltag_cons:
+#endif
+  case fulltag_misc:
+  case fulltag_even_fixnum:
+  case fulltag_odd_fixnum:
+    if ((q >= low) && (q < high)) {
+      *p = (q+delta);
+    }
+  }
+}
+
+  
+void
+impurify_noderef(LispObj *p, LispObj low, LispObj high, int delta)
+{
+  LispObj q = *p;
+  
+  if ((fulltag_of(q) == fulltag_misc) &&
+      (q >= low) && 
+      (q < high)) {
+    *p = (q+delta);
+  }
+}
+  
+
+#ifdef PPC
+void
+impurify_cstack_area(area *a, LispObj low, LispObj high, int delta)
+{
+  BytePtr
+    current,
+    next,
+    limit = a->high;
+
+  for (current = a->active; current != limit; current = next) {
+    next = *((BytePtr *)current);
+    if (next == NULL) break;
+    if (((next - current) == sizeof(lisp_frame)) && 
+	(((((lisp_frame *)current)->savefn) == 0) ||
+	 (fulltag_of(((lisp_frame *)current)->savefn) == fulltag_misc))) {
+      impurify_locref(&((lisp_frame *) current)->savelr, low, high, delta);
+    } else {
+      /* Clear low bits of "next", just in case */
+      next = (BytePtr) (((natural)next) & ~(sizeof(natural)-1));
+    }
+  }
+}
+#endif
+
+void
+impurify_xp(ExceptionInformation *xp, LispObj low, LispObj high, int delta)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+#ifdef PPC
+  int r;
+  /* registers >= fn should be treated as roots.
+     The PC, LR, loc_pc, and CTR should be treated as "locatives".
+   */
+
+  for (r = fn; r < 32; r++) {
+    impurify_noderef((LispObj*) (&(regs[r])), low, high, delta);
+  };
+
+  impurify_locref((LispObj*) (&(regs[loc_pc])), low, high, delta);
+
+  impurify_locref((LispObj*) (&(xpPC(xp))), low, high, delta);
+  impurify_locref((LispObj*) (&(xpLR(xp))), low, high, delta);
+  impurify_locref((LispObj*) (&(xpCTR(xp))), low, high, delta);
+#endif
+
+}
+
+
+void
+impurify_range(LispObj *start, LispObj *end, LispObj low, LispObj high, int delta)
+{
+  LispObj header;
+  unsigned tag;
+
+  while (start < end) {
+    header = *start;
+    tag = fulltag_of(header);
+    if (immheader_tag_p(tag)) {
+      start = (LispObj *)skip_over_ivector((natural)start, header);
+    } else {
+      if (!nodeheader_tag_p(tag)) {
+        impurify_noderef(start, low, high, delta);
+        }
+      start++;
+      impurify_noderef(start, low, high, delta);
+      start++;
+    }
+  }
+}
+
+
+
+
+void
+impurify_tcr_tlb(TCR *tcr,  LispObj low, LispObj high, int delta)
+{
+  unsigned n = tcr->tlb_limit;
+  LispObj *start = tcr->tlb_pointer, *end = (LispObj *) ((BytePtr)start+n);
+  
+  impurify_range(start, end, low, high, delta);
+}
+
+void
+impurify_tcr_xframes(TCR *tcr, LispObj low, LispObj high, int delta)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+  
+  xp = tcr->gc_context;
+  if (xp) {
+    impurify_xp(xp, low, high, delta);
+  }
+
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+    impurify_xp(xframes->curr, low, high, delta);
+  }
+}
+
+void
+impurify_tstack_area(area *a, LispObj low, LispObj high, int delta)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) (a->active),
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = (LispObj *) ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    if (current[1] == 0) {
+      impurify_range(current+2, end, low, high, delta);
+    }
+  }
+}
+void
+impurify_vstack_area(area *a, LispObj low, LispObj high, int delta)
+{
+  LispObj
+    *p = (LispObj *) a->active,
+    *q = (LispObj *) a->high;
+
+  if (((natural)p) & sizeof(natural)) {
+    impurify_noderef(p, low, high, delta);
+    p++;
+  }
+  impurify_range(p, q, low, high, delta);
+}
+
+
+void
+impurify_areas(LispObj low, LispObj high, int delta)
+{
+  area *next_area;
+  area_code code;
+      
+  for (next_area = active_dynamic_area; (code = next_area->code) != AREA_VOID; next_area = next_area->succ) {
+    switch (code) {
+    case AREA_TSTACK:
+      impurify_tstack_area(next_area, low, high, delta);
+      break;
+      
+    case AREA_VSTACK:
+      impurify_vstack_area(next_area, low, high, delta);
+      break;
+      
+    case AREA_CSTACK:
+#ifdef PPC
+      impurify_cstack_area(next_area, low, high, delta);
+#endif
+      break;
+      
+    case AREA_STATIC:
+    case AREA_DYNAMIC:
+      impurify_range((LispObj *) next_area->low, (LispObj *) next_area->active, low, high, delta);
+      break;
+      
+    default:
+      break;
+    }
+  }
+}
+
+void
+impurify_gcable_ptrs(LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next;
+
+  while ((*prev) != (LispObj)NULL) {
+    impurify_noderef(prev, low, high, delta);
+    next = *prev;
+    prev = &(((xmacptr *)ptr_from_lispobj(untag(next)))->link);
+  }
+}
+
+signed_natural
+impurify(TCR *tcr, signed_natural param)
+{
+  area *r = readonly_area;
+
+  if (r) {
+    area *a = active_dynamic_area;
+    BytePtr ro_base = r->low, ro_limit = r->active, oldfree = a->active,
+      oldhigh = a->high, newhigh; 
+    unsigned n = ro_limit - ro_base;
+    int delta = oldfree-ro_base;
+    TCR *other_tcr;
+
+    if (n) {
+      lisp_global(IN_GC) = 1;
+      newhigh = (BytePtr) (align_to_power_of_2(oldfree+n,
+                                               log2_heap_segment_size));
+      if (newhigh > oldhigh) {
+        grow_dynamic_area(newhigh-oldhigh);
+      }
+      a->active += n;
+      memmove(oldfree, ro_base, n);
+      munmap(ro_base, n);
+      a->ndnodes = area_dnode(a, a->active);
+      pure_space_active = r->active = r->low;
+      r->ndnodes = 0;
+
+      impurify_areas(ptr_to_lispobj(ro_base), ptr_to_lispobj(ro_limit), delta);
+
+      other_tcr = tcr;
+      do {
+        impurify_tcr_xframes(other_tcr, ptr_to_lispobj(ro_base), ptr_to_lispobj(ro_limit), delta);
+        impurify_tcr_tlb(other_tcr, ptr_to_lispobj(ro_base), ptr_to_lispobj(ro_limit), delta);
+        other_tcr = other_tcr->next;
+      } while (other_tcr != tcr);
+
+      impurify_gcable_ptrs(ptr_to_lispobj(ro_base), ptr_to_lispobj(ro_limit), delta);
+      lisp_global(IN_GC) = 0;
+    }
+    return 0;
+  }
+  return -1;
+}
+
Index: /branches/arm/lisp-kernel/ppc-macros.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-macros.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-macros.s	(revision 13357)
@@ -0,0 +1,744 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL.  */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence.   */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+/* The assembler has to do the arithmetic here:	 the expression */
+/*   may not be evaluable by m4. */
+define(`lwi',`ifdef(`DARWIN',`
+	.if ((($2) & 0xffff8000) == 0xffff8000)
+	 li $1,($2)
+	.elseif ((($2) & 0xffff8000) == 0)
+	 li $1,$2
+	.else
+	 lis $1,(($2)>>16)
+	 .if (($2) & 0xffff) <> 0
+	  ori $1,$1,(($2) & 0xffff)
+	 .endif
+	.endif',`
+	.ifeq (($2) & 0xffff8000)-0xffff8000
+	 li $1,$2
+	.else
+	 .ifeq (($2) & 0xffff8000)
+	  li $1,$2
+	 .else
+	  lis $1,($2>>16)
+	  .ifne ($2 & 0xffff)
+	   ori $1,$1,$2 & 0xffff
+	  .endif
+	 .endif
+	.endif
+')')
+
+ifdef(`PPC64',`
+        define(`clrrri',`clrrdi $@')       
+        define(`clrlri',`clrldi $@')
+        define(`clrlri_',`clrldi. $@')
+        define(`ldr',`ld $@')
+        define(`ldrx',`ldx $@')
+        define(`ldru',`ldu $@')
+        define(`str',`std $@')
+        define(`strx',`stdx $@')
+        define(`stru',`stdu $@')
+        define(`strux',`stdux $@')	
+        define(`cmpr',`cmpd $@')
+        define(`cmpri',`cmpdi $@')
+        define(`cmplr',`cmpld $@')
+        define(`cmplri',`cmpldi $@')
+        define(`trlge',`tdlge $@')
+        define(`trllt',`tdllt $@')
+        define(`trlt',`tdlt $@')
+	define(`trlle',`tdlle $@')
+        define(`treqi',`tdeqi $@')
+        define(`trnei',`tdnei $@')
+        define(`trgti',`tdgti $@')
+        define(`srari',`sradi $@')
+        define(`srri',`srdi $@')
+        define(`srr',`srd $@')
+        define(`slri',`sldi $@')
+        define(`lrarx',`ldarx $@')
+        define(`strcx',`stdcx. $@')
+        define(`load_highbit',`
+        __(lis $1,0x8000)
+        __(sldi $1,$1,32)
+        ')
+        define(`extract_bit_shift_count',`
+        __(clrldi $1,$2,64-bitmap_shift)
+        ')
+        define(`alloc_trap',`
+        __(tdlt allocptr,allocbase)
+        ')
+        define(`mullr',`mulld $@')
+',`
+        define(`clrrri',`clrrwi $@')
+        define(`clrlri',`clrlwi $@')
+        define(`clrlri_',`clrlwi. $@')
+        define(`ldr',`lwz $@')
+        define(`ldrx',`lwzx $@')
+        define(`ldru',`lwzu $@')
+        define(`str',`stw $@')
+        define(`strx',`stwx $@')
+        define(`stru',`stwu $@')
+        define(`strux',`stwux $@')
+        define(`cmpr',`cmpw $@')
+        define(`cmpri',`cmpwi $@')
+        define(`cmplr',`cmplw $@')
+        define(`cmplri',`cmplwi $@')
+        define(`trlge',`twlge $@')
+        define(`trllt',`twllt $@')
+        define(`trlt',`twlt $@')
+        define(`trlle',`twlle $@')       
+        define(`treqi',`tweqi $@')
+        define(`trnei',`twnei $@')
+        define(`trgti',`twgti $@')
+        define(`srari',`srawi $@')
+        define(`srri',`srwi $@')
+        define(`srr',`srw $@')
+        define(`slri',`slwi $@')
+        define(`lrarx',`lwarx $@')
+        define(`strcx',`stwcx. $@')
+        define(`load_highbit',`
+        __(lis $1,0x8000)
+        ')
+        define(`extract_bit_shift_count',`
+        __(clrlwi $1,$2,32-bitmap_shift)
+        ')
+        define(`alloc_trap',`
+        __(twllt allocptr,allocbase)
+        ')
+        define(`mullr',`mullw $@')
+')
+
+/* dnode_align(dest,src,delta) */
+        define(`dnode_align',`
+        __(la $1,($3+(dnode_size-1))($2))
+        __(clrrri($1,$1,dnode_align_bits))
+')
+
+define(`extract_fulltag',`
+	__(clrlri($1,$2,nbits_in_word-ntagbits))
+        ')
+
+define(`extract_lisptag',`
+	__(clrlri($1,$2,nbits_in_word-nlisptagbits))
+        ')
+
+define(`extract_lisptag_',`
+	__(clrlri_($1,$2,nbits_in_word-nlisptagbits))
+        ')
+
+define(`extract_subtag',`
+	__(lbz $1,misc_subtag_offset($2))
+	')
+
+ifdef(`PPC64',`
+define(`extract_lowtag',`
+        __(clrldi $1,$2,nbits_in_word-nlowtagbits)
+')
+define(`trap_unless_lowtag_equal',`
+        __(clrldi $3,$1,nbits_in_word-nlowtagbits)
+        __(tdnei $3,$2)
+')                
+        ')
+                               
+define(`extract_lowbyte',`
+        __(clrlri($1,$2,nbits_in_word-num_subtag_bits))
+        ')
+
+define(`extract_header',`
+	__(ldr($1,misc_header_offset($2)))
+	')
+
+
+ifdef(`PPC64',`
+define(`extract_typecode',`
+	new_macro_labels()
+	__(extract_fulltag($1,$2))
+	__(cmpdi cr0,$1,fulltag_misc)
+	__(extract_lisptag($1,$1))
+	__(bne cr0,macro_label(not_misc))
+	__(extract_subtag($1,$2))
+macro_label(not_misc):
+')',`	
+define(`extract_typecode',`
+	new_macro_labels()
+	__(extract_lisptag($1,$2))
+	__(cmpwi cr0,$1,tag_misc)
+	__(bne cr0,macro_label(not_misc))
+	__(extract_subtag($1,$2))
+macro_label(not_misc):
+')')
+
+define(`box_fixnum',`
+	__(slri($1,$2,fixnumshift))
+	')
+
+define(`unbox_fixnum',`	
+	__(srari($1,$2,fixnumshift))
+	')
+
+define(`loaddf',`
+	__(lfd $1,dfloat.value($2))')
+	
+define(`storedf',`
+	__(stfd $1,dfloat.value($2))
+	')
+
+define(`push',`
+	__(stru($1,-node_size($2)))
+	')
+	
+	/* Generally not a great idea. */
+define(`pop',`
+	__(ldr($1,0($2)))
+	__(la $2,node_size($2))
+	')
+	
+define(`vpush',`
+	__(push($1,vsp))
+	')
+	
+define(`vpop',`
+	__(pop($1,vsp))
+	')
+	
+		
+define(`unlink',`
+	__(ldr($1,0($1)))
+ ')
+
+	
+define(`set_nargs',`
+	__(lwi(nargs,($1)<<fixnumshift))
+	')
+	
+define(`bitclr',`
+	__(rlwinm $1,$2,0,0x1f&((31-($3))+1),0x1f&((31-($3))-1))
+	')
+	
+
+define(`vref32',`
+	__(lwz $1,misc_data_offset+(($3)<<2)($2))
+	')
+        
+define(`vref16',`/* dest,src,n*/
+	__(lhz $1,misc_data_offset+(($3)<<1)($2))
+	')
+	
+ifdef(`PPC64',`
+        define(`vref64',`
+        __(ld $1,misc_data_offset+(($3)<<3)($2))
+	')
+
+        define(`vrefr',`
+        __(vref64($1,$2,$3))
+	')
+',`
+        define(`vrefr',`
+        __(vref32($1,$2,$3))
+	')
+')
+        
+                	
+define(`getvheader',`
+	__(ldr($1,vector.header($2)))
+	')
+	
+	/* Size is unboxed element count */
+define(`header_size',`
+	__(srri($1,$2,num_subtag_bits))
+	')
+	
+	/* "Length" is fixnum element count */
+define(`header_length',`
+ifdef(`PPC64',`
+        __(rldicr $1,$2,nbits_in_word-(num_subtag_bits-nfixnumtagbits),63-nfixnumtagbits)
+        __(clrldi $1,$1,(num_subtag_bits-nfixnumtagbits))
+        ',`               
+	__(rlwinm $1,$2,nbits_in_word-(num_subtag_bits-nfixnumtagbits),(num_subtag_bits-nfixnumtagbits),31-nfixnumtagbits)
+        ')
+')        
+
+
+define(`vector_size',`
+	__(getvheader(ifelse($3.`',$1,$3),$2))
+	__(header_size($1,ifelse($3.`',$1,$3)))
+	')
+	
+define(`vector_length',`
+	__(getvheader($3,$2))
+	__(header_length($1,$3))
+	')
+
+	
+define(`ref_global',`
+	__(ldr($1,lisp_globals.$2(0)))
+')
+
+define(`set_global',`
+	__(str($1,lisp_globals.$2(0)))
+')
+
+define(`ref_nrs_value',`
+	__(ldr($1,((nrs.$2)+(symbol.vcell))(0)))
+')
+	
+define(`set_nrs_value',`
+	__(str($1,((nrs.$2)+(symbol.vcell))(0)))
+')
+
+define(`extract_unsigned_byte_bits',`
+ifdef(`PPC64',`
+        __(rldicr $1,$2,64-fixnumshift,63-$3)
+',`                
+        __(rlwinm $1,$2,0,32-fixnumshift,31-($3+fixnumshift))
+')        
+')
+
+define(`extract_unsigned_byte_bits_',`
+ifdef(`PPC64',`
+        __(rldicr. $1,$2,64-fixnumshift,63-$3)
+',`                
+        __(rlwinm. $1,$2,0,32-fixnumshift,31-($3+fixnumshift))
+')        
+')
+
+	/* vpop argregs - nargs is known to be non-zero */
+define(`vpop_argregs_nz',`
+	new_macro_labels()
+	__(cmplri(cr1,nargs,node_size*2))
+	__(vpop(arg_z))
+	__(blt cr1,macro_label(l0))
+	__(vpop(arg_y))
+	__(bne cr1,macro_label(l0))
+	__(vpop(arg_x))
+macro_label(l0):')
+
+                
+	/* vpush argregs */
+define(`vpush_argregs',`
+	new_macro_labels()
+	__(cmplri(cr0,nargs,0))
+	__(cmplri(cr1,nargs,node_size*2))
+	__(beq cr0,macro_label(done))
+	__(blt cr1,macro_label(z))
+	__(beq cr1,macro_label(yz))
+	__(vpush(arg_x))
+macro_label(yz):
+	__(vpush(arg_y))
+macro_label(z):
+	__(vpush(arg_z))
+macro_label(done):
+')
+
+define(`create_lisp_frame',`
+	__(stru(sp,-lisp_frame.size(sp)))
+')
+
+                
+define(`build_lisp_frame',`
+	create_lisp_frame()
+	__(str(ifelse($1,`',fn,$1),lisp_frame.savefn(sp)))
+	__(str(ifelse($2,`',loc_pc,$2),lisp_frame.savelr(sp)))
+	__(str(ifelse($3,`',vsp,$3),lisp_frame.savevsp(sp)))
+')
+
+        	
+define(`discard_lisp_frame',`
+	__(la sp,lisp_frame.size(sp))
+	')
+	
+	
+define(`_car',`
+	__(ldr($1,cons.car($2)))
+')
+	
+define(`_cdr',`
+	__(ldr($1,cons.cdr($2)))
+	')
+	
+define(`_rplaca',`
+	__(str($2,cons.car($1)))
+	')
+	
+define(`_rplacd',`
+	__(str($2,cons.cdr($1)))
+	')
+
+define(`vpush_saveregs',`
+	__(vpush(save7))
+	__(vpush(save6))
+	__(vpush(save5))
+	__(vpush(save4))
+	__(vpush(save3))
+	__(vpush(save2))
+	__(vpush(save1))
+	__(vpush(save0))
+	')
+	
+define(`restore_saveregs',`
+	__(ldr(save0,node_size*0($1)))
+	__(ldr(save1,node_size*1($1)))
+	__(ldr(save2,node_size*2($1)))
+	__(ldr(save3,node_size*3($1)))
+	__(ldr(save4,node_size*4($1)))
+	__(ldr(save5,node_size*5($1)))
+	__(ldr(save6,node_size*6($1)))
+	__(ldr(save7,node_size*7($1)))
+')
+
+define(`vpop_saveregs',`
+	__(restore_saveregs(vsp))
+	__(la vsp,node_size*8(vsp))
+')
+
+define(`trap_unless_lisptag_equal',`
+	__(extract_lisptag($3,$1))
+	__(trnei($3,$2))
+')
+
+ifdef(`PPC64',`
+define(`trap_unless_list',`
+	new_macro_labels()
+	__(cmpdi ifelse($3,$3,cr0),$1,nil_value)
+	__(extract_fulltag($2,$1))
+	__(beq ifelse($3,$3,cr0),macro_label(is_list))
+	__(tdnei $2,fulltag_cons)
+macro_label(is_list):	
+
+')',`	
+define(`trap_unless_list',`
+	__(trap_unless_lisptag_equal($1,tag_list,$2))
+')
+')
+
+define(`trap_unless_fulltag_equal',`
+	__(extract_fulltag($3,$1))
+	__(trnei($3,$2))
+')
+	
+define(`trap_unless_typecode_equal',`
+        __(extract_typecode($3,$1))
+        __(trnei($3,$2))
+')
+        
+/* "jump" to the code-vector of the function in nfn. */
+define(`jump_nfn',`
+	__(ldr(temp0,_function.codevector(nfn)))
+	__(mtctr temp0)
+	__(bctr)
+')
+
+/* "call the code-vector of the function in nfn. */
+define(`call_nfn',`
+	__(ldr(temp0,_function.codevector(nfn)))
+	__(mtctr temp0)
+	__(bctrl)
+')
+	
+
+/* "jump" to the function in fnames function cell. */
+define(`jump_fname',`
+	__(ldr(nfn,symbol.fcell(fname)))
+	__(jump_nfn())
+')
+
+/* call the function in fnames function cell. */
+define(`call_fname',`
+	__(ldr(nfn,symbol.fcell(fname)))
+	__(call_nfn())
+')
+
+define(`do_funcall',`
+	new_macro_labels()
+	__(extract_fulltag(imm0,temp0))
+	__(cmpri(imm0,fulltag_misc))
+	__(mr nfn,temp0)
+	__(bne- macro_label(bad))
+	__(extract_subtag(imm0,temp0))
+	__(cmpri(imm0,subtag_function))
+	__(cmpri(cr1,imm0,subtag_symbol))
+        __(bne cr0,macro_label(_sym))
+        __(jump_nfn())
+macro_label(_sym):             
+	__(mr fname,temp0)
+	__(bne cr1,macro_label(bad))
+	__(jump_fname())
+macro_label(bad):
+	__(uuo_interr(error_cant_call,temp0))
+')	
+
+define(`mkcatch',`
+	__(mflr loc_pc)
+	__(ldr(imm0,tcr.catch_top(rcontext)))
+	__(lwz imm1,0(loc_pc)) /* a forward branch to the catch/unwind cleanup */
+	__(rlwinm imm1,imm1,0,6,29)	/* extract LI */
+	__(add loc_pc,loc_pc,imm1)
+	__(build_lisp_frame(fn,loc_pc,vsp))
+	__(sub loc_pc,loc_pc,imm1)
+	__(la loc_pc,4(loc_pc))	/* skip over the forward branch */
+	__(mtlr loc_pc)
+	__(lwi(imm4,(catch_frame.element_count<<num_subtag_bits)|subtag_catch_frame))
+	__(ldr(imm3,tcr.xframe(rcontext)))
+	__(ldr(imm1,tcr.db_link(rcontext)))
+	__(TSP_Alloc_Fixed_Unboxed(catch_frame.size))
+	__(la nargs,tsp_frame.data_offset+fulltag_misc(tsp))
+        __(str(imm4,catch_frame.header(nargs)))
+	__(str(arg_z,catch_frame.catch_tag(nargs)))
+	__(str(imm0,catch_frame.link(nargs)))
+	__(str(imm2,catch_frame.mvflag(nargs)))
+	__(str(sp,catch_frame.csp(nargs)))
+	__(str(imm1,catch_frame.db_link(nargs)))
+        __(str(first_nvr,catch_frame.regs+0*node_size(nargs)))
+        __(str(second_nvr,catch_frame.regs+1*node_size(nargs)))
+        __(str(third_nvr,catch_frame.regs+2*node_size(nargs)))
+        __(str(fourth_nvr,catch_frame.regs+3*node_size(nargs)))
+        __(str(fifth_nvr,catch_frame.regs+4*node_size(nargs)))
+        __(str(sixth_nvr,catch_frame.regs+5*node_size(nargs)))
+        __(str(seventh_nvr,catch_frame.regs+6*node_size(nargs)))
+        __(str(eighth_nvr,catch_frame.regs+7*node_size(nargs)))
+	__(str(imm3,catch_frame.xframe(nargs)))
+	__(str(rzero,catch_frame.tsp_segment(nargs)))
+	__(Set_TSP_Frame_Boxed())
+	__(str(nargs,tcr.catch_top(rcontext)))
+        __(li nargs,0)
+
+')	
+
+define(`restore_catch_nvrs',`
+        __(ldr(first_nvr,catch_frame.regs+(node_size*0)($1)))
+        __(ldr(second_nvr,catch_frame.regs+(node_size*1)($1)))
+        __(ldr(third_nvr,catch_frame.regs+(node_size*2)($1)))
+        __(ldr(fourth_nvr,catch_frame.regs+(node_size*3)($1)))
+        __(ldr(fifth_nvr,catch_frame.regs+(node_size*4)($1)))
+        __(ldr(sixth_nvr,catch_frame.regs+(node_size*5)($1)))
+        __(ldr(seventh_nvr,catch_frame.regs+(node_size*6)($1)))
+        __(ldr(eighth_nvr,catch_frame.regs+(node_size*7)($1)))
+')               
+
+define(`DCBZL',`
+	__(.long (31<<26)+(1<<21)+($1<<16)+($2<<11)+(1014<<1))
+')
+	
+define(`check_stack_alignment',`
+	new_macro_labels()
+	__(andi. $1,sp,STACK_ALIGN_MASK)
+	__(beq+ macro_label(stack_ok))
+	__(.long 0)
+macro_label(stack_ok):
+')
+
+define(`stack_align',`((($1)+STACK_ALIGN_MASK)&~STACK_ALIGN_MASK)')
+
+define(`clear_alloc_tag',`
+	__(clrrri(allocptr,allocptr,ntagbits))
+')
+
+/* If the GC interrupts the current thread (after the trap), it needs */
+/*   to ensure that the cons cell that's been "reserved" stays reserved */
+/*   (e.g. the tagged allocptr has to be treated as a node.)  If that */
+/*   reserved cons cell gets tenured, the car and cdr are of a generation */
+/*   that's at least as old (so memoization isn't an issue.) */
+
+/*   More generally, if the GC interrupts a thread when allocptr is */
+/*   tagged as a cons: */
+
+/*    a) if the trap hasn't been taken (yet), the GC should force the */
+/*       thread to resume in such a way that the trap will be taken ; */
+/*       the segment allocator should worry about allocating the object. */
+
+/*    b) If the trap has been taken, allocptr is treated as a node as */
+/*       described above.  Allocbase is made to point to the base of the */
+/*       cons cell, so that the thread's next allocation attempt will */
+/*       invoke the segment allocator. */
+	
+define(`Cons',`
+	__(la allocptr,(-cons.size+fulltag_cons)(allocptr))
+        __(alloc_trap())
+	__(str($3,cons.cdr(allocptr)))
+	__(str($2,cons.car(allocptr)))
+	__(mr $1,allocptr)
+	__(clear_alloc_tag())
+')
+
+
+/* This is probably only used once or twice in the entire kernel, but */
+/* I wanted a place to describe the constraints on the mechanism. */
+
+/* Those constaints are (not surprisingly) similar to those which apply */
+/* to cons cells, except for the fact that the header (and any length */
+/* field that might describe large arrays) has to have been stored in */
+/* the object if the trap has succeeded on entry to the GC.  It follows */
+/* that storing the register containing the header must immediately */
+/* follow the allocation trap (and an auxiliary length register must */
+/* be stored immediately after the header.)  Successfully falling */
+/* through the trap must emulate any header initialization: it would */
+/* be a bad idea to have allocptr pointing to a zero header ... */
+
+
+
+/* Parameters: */
+
+/* $1 = dest reg */
+/* $2 = header.  (For now, assume that this always encodes length ; */
+/* that may change with "large vector" support.) */
+/* $3 = register containing size in bytes.  (We're going to subtract */
+/* fulltag_misc from this; do it in the macro body, rather than force the
+/* (1 ?) caller to do it. */
+
+
+define(`Misc_Alloc',`
+	__(la $3,-fulltag_misc($3))
+	__(sub allocptr,allocptr,$3)
+        __(alloc_trap())
+	__(str($2,misc_header_offset(allocptr)))
+	__(mr $1,allocptr)
+	__(clear_alloc_tag())
+')
+
+/*  Parameters $1, $2 as above; $3 = physical size constant. */
+define(`Misc_Alloc_Fixed',`
+	__(la allocptr,(-$3)+fulltag_misc(allocptr))
+        __(alloc_trap())
+	__(str($2,misc_header_offset(allocptr)))
+	__(mr $1,allocptr)
+	__(clear_alloc_tag())
+')
+
+
+/*  Zero $3 bytes worth of doublewords, starting at offset $2 relative */
+/* to the base register $1. */
+
+
+ifdef(`DARWIN',`
+	.macro zero_doublewords
+	.if $2
+	stfd fp_zero,$1($0)
+	zero_doublewords $0,$1+8,$2-8
+	.endif
+	.endmacro
+')
+
+ifdef(`LINUX',`
+	.macro zero_doublewords base,disp,nbytes
+	.if \nbytes
+	stfd fp_zero,\disp(\base)
+	zero_doublewords \base,\disp+8,\nbytes-8
+	.endif
+	.endm
+')	
+
+define(`Set_TSP_Frame_Unboxed',`
+	__(str(tsp,tsp_frame.type(tsp)))
+')
+
+define(`Set_TSP_Frame_Boxed',`
+	__(str(rzero,tsp_frame.type(tsp)))
+')
+		
+/* A newly allocated TSP frame is always "raw" (has non-zero type, indicating */
+/* that it doesn't contain tagged data. */
+
+define(`TSP_Alloc_Fixed_Unboxed',`
+	__(stru(tsp,-($1+tsp_frame.data_offset)(tsp)))
+	__(Set_TSP_Frame_Unboxed())
+')
+
+define(`TSP_Alloc_Fixed_Unboxed_Zeroed',`
+	__(TSP_Alloc_Fixed_Unboxed($1))
+	__(zero_doublewords tsp,tsp_frame.fixed_overhead,$1)
+')
+
+define(`TSP_Alloc_Fixed_Boxed',`
+	__(TSP_Alloc_Fixed_Unboxed_Zeroed($1))
+	__(Set_TSP_Frame_Boxed())
+')
+
+
+        
+	
+
+/* This assumes that the backpointer points  to the first byte beyond */
+/* each frame.  If we allow segmented tstacks, that constraint might */
+/* complicate  their implementation. */
+/* We don't need to know the size of the frame (positive or negative, */
+/* with or without header).  $1 and $2 are temp registers, $3 is an */
+/* optional CR field. */
+
+
+/* Handle the general case, where the frame might be empty */
+define(`Zero_TSP_Frame',`
+	__(new_macro_labels())
+	__(la $1,tsp_frame.size-8(tsp))
+	__(ldr($2,tsp_frame.backlink(tsp)))
+	__(la $2,-8($2))
+	__(b macro_label(zero_tsp_test))
+macro_label(zero_tsp_loop):
+	__(stfdu fp_zero,8($1))
+macro_label(zero_tsp_test):	
+	__(cmpr(ifelse($3,`',`cr0',$3),$1,$2))
+	__(bne ifelse($3,`',`cr0',$3),macro_label(zero_tsp_loop))
+')
+
+/* Save some branching when we know that the frame can't be empty.*/
+define(`Zero_TSP_Frame_nz',`
+	new_macro_labels()
+	__(la $1,tsp_frame.size-8(tsp))
+	__(ldr($2,tsp_frame.backlink(tsp)))
+	__(la $2,-8($2))
+macro_label(zero_tsp_loop):
+	__(stfdu fp_zero,8($1))
+	__(cmpr(ifelse($3,`',`cr0',$3),$1,$2))
+	__(bne ifelse($3,`',`cr0',$3),macro_label(zero_tsp_loop))
+')
+	
+/* $1 = 8-byte-aligned size, positive.  $2 (optiional) set */
+/* to negated size. */
+define(`TSP_Alloc_Var_Unboxed',`
+	__(neg ifelse($2,`',$1,$2),$1)
+	__(strux(tsp,tsp,ifelse($2,`',$1,$2)))
+	__(Set_TSP_Frame_Unboxed())
+')
+
+define(`TSP_Alloc_Var_Boxed',`
+	__(TSP_Alloc_Var_Unboxed($1))
+	__(Zero_TSP_Frame($1,$2))
+	__(Set_TSP_Frame_Boxed())
+')		
+
+
+define(`TSP_Alloc_Var_Boxed_nz',`
+	__(TSP_Alloc_Var_Unboxed($1))
+	__(Zero_TSP_Frame_nz($1,$2))
+	__(Set_TSP_Frame_Boxed())
+')		
+
+define(`check_pending_interrupt',`
+	new_macro_labels()
+        __(ldr(nargs,tcr.tlb_pointer(rcontext)))
+	__(ldr(nargs,INTERRUPT_LEVEL_BINDING_INDEX(nargs)))
+	__(cmpri(ifelse($1,`',`cr0',$1),nargs,0))
+	__(blt ifelse($1,`',`cr0',$1),macro_label(done))
+	__(bgt ifelse($1,`',`cr0',$1),macro_label(trap))
+	__(ldr(nargs,tcr.interrupt_pending(rcontext)))
+macro_label(trap):
+	__(trgti(nargs,0))
+macro_label(done):
+')
+
+/* $1 = ndigits.  Assumes 4-byte digits */        
+define(`aligned_bignum_size',`((~(dnode_size-1)&(node_size+(dnode_size-1)+(4*$1))))')
+
+define(`suspend_now',`
+	__(uuo_interr(error_propagate_suspend,rzero))
+')
Index: /branches/arm/lisp-kernel/ppc-spentry.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-spentry.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-spentry.s	(revision 13357)
@@ -0,0 +1,7064 @@
+/* Copyright (C) 2009 Clozure Associates */
+/* Copyright (C) 1994-2001 Digitool, Inc */
+/* This file is part of Clozure CL.   */
+
+/* Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/* License , known as the LLGPL and distributed with Clozure CL as the */
+/* file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/* which is distributed with Clozure CL as the file "LGPL".  Where these */
+/* conflict, the preamble takes precedence.   */
+
+/* Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/* The LLGPL is also available online at */
+/* http://opensource.franz.com/preamble.html */
+
+
+	
+	include(lisp.s)
+	_beginfile
+        .align 2
+	
+local_label(start):	
+define(`_spentry',`ifdef(`__func_name',`_endfn',`')
+	_exportfn(_SP$1)
+	.line  __line__
+')
+
+             
+define(`_endsubp',`
+	_endfn(_SP$1)
+# __line__
+')
+
+
+                	
+               
+define(`jump_builtin',`
+	ref_nrs_value(fname,builtin_functions)
+	set_nargs($2)
+	vrefr(fname,fname,$1)
+	jump_fname()
+')
+	
+_spentry(jmpsym)
+	__(jump_fname())
+        
+_spentry(jmpnfn)
+	__(jump_nfn())
+        
+	/*  Call temp0 if it's either a symbol or function */
+_spentry(funcall)
+	__(do_funcall())
+	
+/* Subprims for catch, throw, unwind_protect.  */
+
+/* Push a catch frame on the temp stack (and some of it on the cstack, as well.)  */
+/* The PC in question is 4 bytes past the caller's return address. ALWAYS.  */
+/* The catch tag is in arg_z, the multiple-value flags is in imm2.  */
+/* Bash some of the imm registers and loc_pc.  */
+
+_spentry(mkcatch1v)
+	__(li imm2,0)
+	__(mkcatch())
+        __(blr)
+        
+_spentry(mkunwind)
+	__(lwi(arg_z,unbound_marker))
+	__(li imm2,fixnum_one)
+	__(mkcatch())
+	__(blr)
+        
+_spentry(mkcatchmv)
+	__(li imm2,fixnum_one)
+	__(mkcatch())
+        __(blr)
+        
+/* Caller has pushed tag and 0 or more values; nargs = nvalues.  */
+/* Otherwise, process unwind-protects and throw to indicated catch frame.  */
+	
+_spentry(throw)
+	__(ldr(imm1,tcr.catch_top(rcontext)))
+	__(li imm0,0) /* count intervening catch/unwind-protect frames.  */
+	__(cmpri(cr0,imm1,0))
+	__(ldrx(temp0,vsp,nargs))
+	__(beq- cr0,local_label(_throw_tag_not_found))
+local_label(_throw_loop):
+	__(ldr(temp1,catch_frame.catch_tag(imm1)))
+	__(cmpr(cr0,temp0,temp1))
+	__(mr imm2,imm1)
+	__(ldr(imm1,catch_frame.link(imm1)))
+	__(cmpri(cr1,imm1,0))
+	__(beq cr0,local_label(_throw_found))
+	__(addi imm0,imm0,fixnum_one)
+	__(beq- cr1,local_label(_throw_tag_not_found))
+	__(b local_label(_throw_loop))
+/* imm2: (tstack-consed) target catch frame, imm0: count of intervening  */
+/* frames. If target isn't a multiple-value receiver, discard extra values */
+/* (less hair, maybe.)  */
+local_label(_throw_found):
+	__(ldr(imm1,catch_frame.mvflag(imm2)))
+	__(cmpri(cr0,imm1,0))
+	__(cmpri(cr1,nargs,0))
+	__(li fn,0)
+	__(add imm1,vsp,nargs)
+	__(la imm1,-node_size(imm1))
+	__(bne cr0,local_label(_throw_all_values))
+	__(set_nargs(1))
+	__(beq cr1,local_label(_throw_default_1_val))
+	__(mr vsp,imm1)
+	__(b local_label(_throw_all_values))
+local_label(_throw_default_1_val):
+	__(li imm4,nil_value)
+	__(vpush(imm4))
+local_label(_throw_all_values):
+	__(bl _SPnthrowvalues)
+	__(ldr(imm3,tcr.catch_top(rcontext)))
+	__(ldr(imm1,tcr.db_link(rcontext)))
+	__(ldr(imm0,catch_frame.db_link(imm3)))
+	__(ldr(imm4,catch_frame.mvflag(imm3)))
+	__(cmpr(cr0,imm0,imm1))
+	__(cmpri(cr1,imm4,0))
+	__(la tsp,-((tsp_frame.fixed_overhead+fulltag_misc))(imm3))
+	__(beq cr0,local_label(_throw_dont_unbind))
+        __(bl _SPunbind_to)
+local_label(_throw_dont_unbind):
+	__(add imm0,vsp,nargs)
+	__(cmpri(cr0,nargs,0))
+	__(ldr(imm1,catch_frame.csp(imm3)))
+	__(ldr(imm1,lisp_frame.savevsp(imm1)))
+	__(bne cr1,local_label(_throw_multiple))
+        /* Catcher expects single value in arg_z  */
+	__(ldr(arg_z,-node_size(imm0)))
+	__(b local_label(_throw_pushed_values))
+local_label(_throw_multiple):
+	__(beq cr0,local_label(_throw_pushed_values))
+	__(mr imm2,nargs)
+local_label(_throw_mvloop):
+	__(subi imm2,imm2,fixnum_one)
+	__(cmpri(imm2,0))
+	__(ldru(temp0,-node_size(imm0)))
+	__(push(temp0,imm1))
+	__(bgt local_label(_throw_mvloop))
+local_label(_throw_pushed_values):
+	__(mr vsp,imm1)
+	__(ldr(imm1,catch_frame.xframe(imm3)))
+	__(str(imm1,tcr.xframe(rcontext)))
+	__(ldr(sp,catch_frame.csp(imm3)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+        __(restore_catch_nvrs(imm3))
+	__(ldr(imm3,catch_frame.link(imm3)))
+	__(str(imm3,tcr.catch_top(rcontext)))
+	__(unlink(tsp))
+	__(blr)
+local_label(_throw_tag_not_found):
+	__(uuo_interr(error_throw_tag_missing,temp0))
+	__(strux(temp0,vsp,nargs))
+	__(b _SPthrow)
+
+
+/* This takes N multiple values atop the vstack.  */
+_spentry(nthrowvalues)
+        __(li imm1,1)
+	__(mr imm4,imm0)
+        __(str(imm1,tcr.unwinding(rcontext)))
+local_label(_nthrowv_nextframe):
+	__(subi imm4,imm4,fixnum_one)
+	__(cmpri(cr1,imm4,0))
+	__(ldr(temp0,tcr.catch_top(rcontext)))
+	__(ldr(imm1,tcr.db_link(rcontext)))
+	__(blt cr1,local_label(_nthrowv_done))
+	__(ldr(imm0,catch_frame.db_link(temp0)))
+	__(ldr(imm3,catch_frame.link(temp0)))
+	__(cmpr(cr0,imm0,imm1))
+	__(str(imm3,tcr.catch_top(rcontext)))
+	__(ldr(temp1,catch_frame.catch_tag(temp0)))
+	__(cmpri(cr7,temp1,unbound_marker))		/* unwind-protect ?  */
+	__(ldr(first_nvr,catch_frame.xframe(temp0)))
+	__(str(first_nvr,tcr.xframe(rcontext)))
+	__(ldr(sp,catch_frame.csp(temp0)))
+	__(beq cr0,local_label(_nthrowv_dont_unbind))
+	__(mflr loc_pc)
+        __(bl _SPunbind_to)
+	__(mtlr loc_pc)
+local_label(_nthrowv_dont_unbind):
+	__(beq cr7,local_label(_nthrowv_do_unwind))
+/* A catch frame.  If the last one, restore context from there.  */
+	__(bne cr1,local_label(_nthrowv_skip))
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(str(rzero,lisp_frame.savevsp(sp)))	/* marker for stack overflow code  */
+	__(add imm1,vsp,nargs)
+	__(mr imm2,nargs)
+	__(b local_label(_nthrowv_push_test))
+local_label(_nthrowv_push_loop):
+	__(ldru(temp1,-node_size(imm1)))
+	__(push(temp1,imm0))
+local_label(_nthrowv_push_test):
+	__(cmpri(imm2,0))
+	__(subi imm2,imm2,fixnum_one)
+	__(bne local_label(_nthrowv_push_loop))
+	__(mr vsp,imm0)
+        __(restore_catch_nvrs(temp0))
+
+local_label(_nthrowv_skip):
+	__(la tsp,-(tsp_frame.fixed_overhead+fulltag_misc)(temp0))
+	__(unlink(tsp))
+	__(discard_lisp_frame())
+	__(b local_label(_nthrowv_nextframe))
+local_label(_nthrowv_do_unwind):
+        /* This is harder.  Call the cleanup code with the multiple */
+	/* values (and nargs, which is a fixnum.)  Remember the throw count  */
+        /* (also a fixnum) as well.  */
+        /* Save our caller's LR and FN in the csp frame created by the unwind-  */
+        /* protect.  (Clever, eh ?)  */
+	__(ldr(first_nvr,catch_frame.xframe(temp0)))
+	__(str(first_nvr,tcr.xframe(rcontext)))
+        __(restore_catch_nvrs(temp0))
+	__(la tsp,-(tsp_frame.fixed_overhead+fulltag_misc)(temp0))
+	__(unlink(tsp))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(nfn,lisp_frame.savefn(sp)))
+	__(mtctr loc_pc)	/* cleanup code address.  */
+	__(str(fn,lisp_frame.savefn(sp)))
+	__(mflr loc_pc)
+	__(mr fn,nfn)
+	__(str(loc_pc,lisp_frame.savelr(sp)))
+	__(dnode_align(imm0,nargs,tsp_frame.fixed_overhead+(2*node_size))) /* tsp overhead, nargs, throw count  */
+	__(TSP_Alloc_Var_Boxed_nz(imm0,imm1))
+	__(mr imm2,nargs)
+	__(add imm1,nargs,vsp)
+	__(la imm0,tsp_frame.data_offset(tsp))
+	__(str(nargs,0(imm0)))
+	__(b local_label(_nthrowv_tpushtest))
+local_label(_nthrowv_tpushloop):
+	__(ldru(temp0,-node_size(imm1)))
+	__(stru(temp0,node_size(imm0)))
+	__(subi imm2,imm2,fixnum_one)
+local_label(_nthrowv_tpushtest):
+	__(cmpri(imm2,0))
+	__(bne local_label(_nthrowv_tpushloop))
+	__(stru(imm4,node_size(imm0)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+        /* Interrupts should be disabled here (we're calling and returning */
+        /* from the cleanup form.  Clear the tcr.unwinding flag, so that */
+        /* interrupts can be taken if they're enabled in the cleanup form.  */
+        __(str(rzero,tcr.unwinding(rcontext)))        
+	__(bctrl)
+        __(li imm1,1)
+	__(la imm0,tsp_frame.data_offset(tsp))
+        __(str(imm1,tcr.unwinding(rcontext)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+	__(ldr(nargs,0(imm0)))
+	__(mr imm2,nargs)
+	__(b local_label(_nthrowv_tpoptest))
+local_label(_nthrowv_tpoploop):
+	__(ldru(temp0,node_size(imm0)))
+	__(vpush(temp0))
+	__(subi imm2,imm2,fixnum_one)
+local_label(_nthrowv_tpoptest):
+	__(cmpri(imm2,0))
+	__(bne local_label(_nthrowv_tpoploop))
+	__(ldr(imm4,node_size(imm0)))
+	__(unlink(tsp))
+	__(b local_label(_nthrowv_nextframe))
+local_label(_nthrowv_done):
+        __(str(rzero,tcr.unwinding(rcontext)))
+        /* Poll for a deferred interrupt.  That clobbers nargs (which we've */
+        /* just expended a lot of effort to preserve), so expend a little *
+        /* more effort. */
+        __(mr imm4,nargs)
+        __(check_pending_interrupt())
+        __(mr nargs,imm4)
+        __(blr)
+
+/* This is a (slight) optimization.  When running an unwind-protect, */
+/* save the single value and the throw count in the tstack frame. */
+/* Note that this takes a single value in arg_z.  */
+_spentry(nthrow1value)
+        __(li imm1,1)
+	__(mr imm4,imm0)
+        __(str(imm1,tcr.unwinding(rcontext)))
+local_label(_nthrow1v_nextframe):
+	__(subi imm4,imm4,fixnum_one)
+	__(cmpri(cr1,imm4,0))
+	__(ldr(temp0,tcr.catch_top(rcontext)))
+	__(ldr(imm1,tcr.db_link(rcontext)))
+	__(set_nargs(1))
+	__(blt cr1,local_label(_nthrow1v_done))
+	__(ldr(imm3,catch_frame.link(temp0)))
+	__(ldr(imm0,catch_frame.db_link(temp0)))
+	__(cmpr(cr0,imm0,imm1))
+	__(str(imm3,tcr.catch_top(rcontext)))
+        __(ldr(imm3,catch_frame.xframe(temp0)))
+	__(ldr(temp1,catch_frame.catch_tag(temp0)))
+	__(cmpri(cr7,temp1,unbound_marker))		/* unwind-protect ?  */
+        __(str(imm3,tcr.xframe(rcontext)))
+	__(ldr(sp,catch_frame.csp(temp0)))
+	__(beq cr0,local_label(_nthrow1v_dont_unbind))
+	 __(mflr loc_pc)
+         __(bl _SPunbind_to)
+	 __(mtlr loc_pc)
+local_label(_nthrow1v_dont_unbind):
+	__(beq cr7,local_label(_nthrow1v_do_unwind))
+        /* A catch frame.  If the last one, restore context from there.  */
+	__(bne cr1,local_label(_nthrow1v_skip))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+        __(restore_catch_nvrs(temp0))
+local_label(_nthrow1v_skip):
+	__(la tsp,-(tsp_frame.fixed_overhead+fulltag_misc)(temp0))
+	__(unlink(tsp))
+	__(discard_lisp_frame())
+	__(b local_label(_nthrow1v_nextframe))
+local_label(_nthrow1v_do_unwind):
+        /* This is harder, but not as hard (not as much BLTing) as the  */
+        /* multiple-value case.  */
+        /* Save our caller's LR and FN in the csp frame created by the unwind-  */
+        /* protect.  (Clever, eh ?)  */
+
+        __(restore_catch_nvrs(temp0))
+	__(la tsp,-(tsp_frame.fixed_overhead+fulltag_misc)(temp0))
+	__(unlink(tsp))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(nfn,lisp_frame.savefn(sp)))
+	__(mtctr loc_pc)		/* cleanup code address.  */
+	__(str(fn,lisp_frame.savefn(sp)))
+	__(mflr loc_pc)
+	__(mr fn,nfn)
+	__(str(loc_pc,lisp_frame.savelr(sp)))
+	__(TSP_Alloc_Fixed_Boxed(2*node_size)) /* tsp overhead, value, throw count  */
+	__(str(arg_z,tsp_frame.data_offset(tsp)))
+	__(str(imm4,tsp_frame.data_offset+node_size(tsp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+        __(str(rzero,tcr.unwinding(rcontext)))
+	__(bctrl)
+        __(li imm1,1)
+	__(ldr(arg_z,tsp_frame.data_offset(tsp)))
+        __(str(imm1,tcr.unwinding(rcontext)))
+	__(ldr(imm4,tsp_frame.data_offset+node_size(tsp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+	__(unlink(tsp))
+	__(b local_label(_nthrow1v_nextframe))
+local_label(_nthrow1v_done):
+        __(str(rzero,tcr.unwinding(rcontext)))
+        /* nargs has an undefined value here, so we can clobber it while */
+        /* polling for a deferred interrupt  */
+        __(check_pending_interrupt())
+        __(blr)
+
+/* This never affects the symbol's vcell  */
+/* Non-null symbol in arg_y, new value in arg_z          */
+_spentry(bind)
+        __(ldr(imm3,symbol.binding_index(arg_y)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(cmpri(imm3,0))
+        __(trlle(imm0,imm3))           /* tlb too small  */
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldrx(temp1,imm2,imm3))
+        __(beq 9f)
+        __(vpush(temp1))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(strx(arg_z,imm2,imm3))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(blr)
+9:
+        __(mr arg_z,arg_y)
+        __(lwi(arg_y,XSYMNOBIND))
+        __(set_nargs(2))
+        __(b _SPksignalerr)
+
+/* arg_z = symbol: bind it to its current value          */
+_spentry(bind_self)
+        __(ldr(imm3,symbol.binding_index(arg_z)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(cmpri(imm3,0))
+        __(trlle(imm0,imm3))           /* tlb too small  */
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldrx(temp1,imm2,imm3))
+        __(cmpri(cr1,temp1,no_thread_local_binding_marker))
+        __(beq 9f)
+        __(mr temp0,temp1)
+        __(bne cr1,1f)
+        __(ldr(temp0,symbol.vcell(arg_z)))
+1:              
+        __(vpush(temp1))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(strx(temp0,imm2,imm3))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(blr)
+9:      __(lwi(arg_y,XSYMNOBIND))
+        __(set_nargs(2))
+        __(b _SPksignalerr)
+
+/* Bind symbol in arg_z to NIL                 */
+_spentry(bind_nil)
+        __(ldr(imm3,symbol.binding_index(arg_z)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(cmpri(imm3,0))
+        __(beq- 9f)
+        __(trlle(imm0,imm3))           /* tlb too small  */
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(ldrx(temp1,imm2,imm3))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(li imm0,nil_value)
+        __(vpush(temp1))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(strx(imm0,imm2,imm3))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(blr)
+9:      __(lwi(arg_y,XSYMNOBIND))
+        __(set_nargs(2))
+        __(b _SPksignalerr)
+
+       
+/* Bind symbol in arg_z to its current value;  trap if symbol is unbound */
+_spentry(bind_self_boundp_check)
+        __(ldr(imm3,symbol.binding_index(arg_z)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(cmpri(imm3,0))
+        __(trlle(imm0,imm3))           /* tlb too small  */
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(ldrx(temp1,imm2,imm3))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(beq 9f)              /* no real tlb index  */
+        __(cmpri(temp1,no_thread_local_binding_marker))
+        __(mr temp0,temp1)
+        __(bne 1f)
+        __(ldr(temp0,symbol.vcell(arg_z)))
+1:      __(treqi(temp0,unbound_marker))       
+        __(vpush(temp1))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(strx(temp0,imm2,imm3))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(blr)
+9:      __(lwi(arg_y,XSYMNOBIND))
+        __(set_nargs(2))
+        __(b _SPksignalerr)
+
+
+/* The function pc_luser_xp() - which is used to ensure that suspended threads */
+/* are suspended in a GC-safe way - has to treat these subprims (which  */
+/* implement the EGC write-barrier) specially.  Specifically, a store that */
+/* might introduce an intergenerational reference (a young pointer stored  */
+/* in an old object) has to "memoize" that reference by setting a bit in  */
+/* the global "refbits" bitmap. */
+/* This has to happen atomically, and has to happen atomically wrt GC. */
+/* Note that updating a word in a bitmap is itself not atomic, unless we use */
+/* interlocked loads and stores. */
+
+
+/* For RPLACA and RPLACD, things are fairly simple: regardless of where we  */
+/* are in the function, we can do the store (even if it's already been done)  */
+/* and calculate whether or not we need to set the bit out-of-line.  (Actually */
+/* setting the bit needs to be done atomically, unless we're sure that other */
+/* threads are suspended.) */
+/* We can unconditionally set the suspended thread's PC to its LR. */
+	
+        .globl C(egc_write_barrier_start)
+_spentry(rplaca)
+C(egc_write_barrier_start):
+        __(cmplr(cr2,arg_z,arg_y))
+        __(_rplaca(arg_y,arg_z))
+        __(blelr cr2)
+        __(ref_global(imm2,ref_base))
+        __(sub imm0,arg_y,imm2)
+        __(load_highbit(imm3))
+        __(srri(imm0,imm0,dnode_shift))       
+        __(ref_global(imm1,oldspace_dnode_count))
+        __(extract_bit_shift_count(imm4,imm0))
+        __(cmplr(imm0,imm1))
+        __(srr(imm3,imm3,imm4))
+        __(srri(imm0,imm0,bitmap_shift))       
+        __(ref_global(imm2,refbits))
+        __(bgelr)
+        __(slri(imm0,imm0,word_shift))
+        __(ldrx(imm1,imm2,imm0))
+        __(and. imm1,imm1,imm3)
+        __(bnelr)
+1:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx(imm1,imm2,imm0))
+        __(bne- 1b)
+        __(isync)
+        __(blr)
+
+        .globl C(egc_rplacd)
+_spentry(rplacd)
+C(egc_rplacd):
+        __(cmplr(cr2,arg_z,arg_y))
+	__(_rplacd(arg_y,arg_z))
+        __(blelr cr2)
+        __(ref_global(imm2,ref_base))
+        __(sub imm0,arg_y,imm2)
+        __(load_highbit(imm3))
+        __(srri(imm0,imm0,dnode_shift))       
+        __(ref_global(imm1,oldspace_dnode_count))
+        __(extract_bit_shift_count(imm4,imm0))
+        __(cmplr(imm0,imm1))
+        __(srr(imm3,imm3,imm4))
+        __(srri(imm0,imm0,bitmap_shift))       
+        __(ref_global(imm2,refbits))
+        __(bgelr)
+        __(slri(imm0,imm0,word_shift))
+        __(ldrx(imm1,imm2,imm0))
+        __(and. imm1,imm1,imm3)
+        __(bnelr)        
+1:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx(imm1,imm2,imm0))
+        __(bne- 1b)
+        __(isync)
+        __(blr)
+
+/* Storing into a gvector can be handled the same way as storing into a CONS. */
+
+        .globl C(egc_gvset)
+_spentry(gvset)
+C(egc_gvset):
+        __(cmplr(cr2,arg_z,arg_x))
+        __(la imm0,misc_data_offset(arg_y))
+        __(strx(arg_z,arg_x,imm0))
+        __(blelr cr2)
+        __(add imm0,imm0,arg_x)
+        __(ref_global(imm2,ref_base))
+        __(load_highbit(imm3))
+        __(ref_global(imm1,oldspace_dnode_count))
+        __(sub imm0,imm0,imm2)
+        __(srri(imm0,imm0,dnode_shift))       
+        __(cmplr(imm0,imm1))
+        __(extract_bit_shift_count(imm4,imm0))
+        __(srri(imm0,imm0,bitmap_shift))       
+        __(srr(imm3,imm3,imm4))
+        __(ref_global(imm2,refbits))
+        __(bgelr)
+        __(slri(imm0,imm0,word_shift))
+        __(ldrx(imm1,imm2,imm0))
+        __(and. imm1,imm1,imm3)
+        __(bnelr)        
+1:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx(imm1,imm2,imm0))
+        __(bne- 1b)
+        __(isync)
+        __(blr)
+
+/* This is a special case of storing into a gvector: if we need to memoize  */
+/* the store, record the address of the hash-table vector in the refmap,  */
+/* as well. */
+        .globl C(egc_set_hash_key)        
+_spentry(set_hash_key)
+C(egc_set_hash_key):
+        __(cmplr(cr2,arg_z,arg_x))
+        __(la imm0,misc_data_offset(arg_y))
+        __(strx(arg_z,arg_x,imm0))
+        __(blelr cr2)
+        __(add imm0,imm0,arg_x)
+        __(ref_global(imm2,ref_base))
+        __(load_highbit(imm3))
+        __(ref_global(imm1,oldspace_dnode_count))
+        __(sub imm0,imm0,imm2)
+        __(srri(imm0,imm0,dnode_shift))       
+        __(cmplr(imm0,imm1))
+        __(extract_bit_shift_count(imm4,imm0))
+        __(srri(imm0,imm0,bitmap_shift))       
+        __(srr(imm3,imm3,imm4))
+        __(ref_global(imm2,refbits))
+        __(bgelr)
+        __(slri(imm0,imm0,word_shift))
+        __(ldrx(imm1,imm2,imm0))
+        __(and. imm1,imm1,imm3)
+        __(bne 2f)        
+1:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx(imm1,imm2,imm0))
+        __(bne- 1b)
+        __(isync)
+2:              
+        __(ref_global(imm1,ref_base))
+        __(sub imm0,arg_x,imm1)
+        __(srri(imm0,imm0,dnode_shift))
+        __(load_highbit(imm3))
+        __(extract_bit_shift_count(imm4,imm0))
+        __(srri(imm0,imm0,bitmap_shift))
+        __(srr(imm3,imm3,imm4))
+        __(slri(imm0,imm0,word_shift))
+        __(ldrx(imm1,imm2,imm0))
+        __(and. imm1,imm1,imm3)
+        __(bnelr)
+3:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx(imm1,imm2,imm0))
+        __(bne- 3b)
+        __(isync)
+        __(blr)
+        
+/*
+   Interrupt handling (in pc_luser_xp()) notes:	
+   If we are in this function and before the test which follows the
+   conditional (at egc_store_node_conditional), or at that test
+   and cr0`eq' is clear, pc_luser_xp() should just let this continue
+   (we either haven't done the store conditional yet, or got a
+   possibly transient failure.)  If we're at that test and the
+   cr0`EQ' bit is set, then the conditional store succeeded and
+   we have to atomically memoize the possible intergenerational
+   reference.  Note that the local labels 4 and 5 are in the
+   body of the next subprim (and at or beyond 'egc_write_barrier_end').
+
+   N.B:	it's not possible to really understand what's going on just
+   by the state of the cr0`eq' bit.  A transient failure in the
+   conditional stores that handle memoization might clear cr0`eq'
+   without having completed the memoization.
+*/
+
+        .globl C(egc_store_node_conditional)
+        .globl C(egc_write_barrier_end)
+_spentry(store_node_conditional)
+C(egc_store_node_conditional):
+        __(cmplr(cr2,arg_z,arg_x))
+        __(vpop(temp0))
+        __(unbox_fixnum(imm4,temp0))
+1:      __(lrarx(temp1,arg_x,imm4))
+        __(cmpr(cr1,temp1,arg_y))
+        __(bne cr1,5f)
+        __(strcx(arg_z,arg_x,imm4))
+	.globl C(egc_store_node_conditional_test)
+C(egc_store_node_conditional_test):	
+        __(bne 1b)
+        __(isync)
+        __(add imm0,imm4,arg_x)
+        __(ref_global(imm2,ref_base))
+        __(ref_global(imm1,oldspace_dnode_count))
+        __(sub imm0,imm0,imm2)
+        __(load_highbit(imm3))
+        __(srri(imm0,imm0,dnode_shift))       
+        __(cmplr(imm0,imm1))
+        __(extract_bit_shift_count(imm2,imm0))
+        __(srri(imm0,imm0,bitmap_shift))       
+        __(srr(imm3,imm3,imm2))
+        __(ref_global(imm2,refbits))
+        __(bge 4f)
+        __(slri(imm0,imm0,word_shift))
+2:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx( imm1,imm2,imm0))
+        __(bne- 2b)
+        __(isync)
+        __(b 4f)
+
+/* arg_z = new value, arg_y = expected old value, arg_x = hash-vector,
+   vsp`0' = (boxed) byte-offset 
+   Interrupt-related issues are as in store_node_conditional, but
+   we have to do more work to actually do the memoization.*/
+_spentry(set_hash_key_conditional)
+	.globl C(egc_set_hash_key_conditional)
+C(egc_set_hash_key_conditional):
+	__(cmplr(cr2,arg_z,arg_x))
+	__(vpop(imm4))
+	__(unbox_fixnum(imm4,imm4))
+1:	__(lrarx(temp1,arg_x,imm4))
+	__(cmpr(cr1,temp1,arg_y))
+	__(bne cr1,5f)
+	__(strcx(arg_z,arg_x,imm4))
+	.globl C(egc_set_hash_key_conditional_test)
+C(egc_set_hash_key_conditional_test):	
+	__(bne 1b)
+	__(isync)
+	__(add imm0,imm4,arg_x)
+	__(ref_global(imm2,ref_base))
+	__(ref_global(imm1,oldspace_dnode_count))
+	__(sub imm0,imm0,imm2)
+	__(load_highbit(imm3))
+	__(srri(imm0,imm0,dnode_shift))
+	__(cmplr(imm0,imm1))
+	__(extract_bit_shift_count(imm2,imm0))
+	__(srri(imm0,imm0,bitmap_shift))
+	__(srr(imm3,imm3,imm2))
+	__(ref_global(imm2,refbits))
+	__(bge 4f)
+	__(slri(imm0,imm0,word_shift))
+2:	__(lrarx(imm1,imm2,imm0))
+	__(or imm1,imm1,imm3)
+	__(strcx(imm1,imm2,imm0))
+	__(bne- 2b)
+	__(isync)
+	/* Memoize hash table header */		
+        __(ref_global(imm1,ref_base))
+        __(sub imm0,arg_x,imm1)
+        __(srri(imm0,imm0,dnode_shift))
+        __(load_highbit(imm3))
+        __(extract_bit_shift_count(imm4,imm0))
+        __(srri(imm0,imm0,bitmap_shift))
+        __(srr(imm3,imm3,imm4))
+        __(slri(imm0,imm0,word_shift))
+        __(ldrx(imm1,imm2,imm0))
+        __(and. imm1,imm1,imm3)
+        __(bne 4f)
+3:      __(lrarx(imm1,imm2,imm0))
+        __(or imm1,imm1,imm3)
+        __(strcx(imm1,imm2,imm0))
+        __(bne- 3b)
+        __(isync)
+C(egc_write_barrier_end):
+4:	__(li arg_z,t_value)
+	__(blr)
+5:      __(li imm0,RESERVATION_DISCHARGE)
+        __(strcx(rzero,0,imm0))
+	__(li arg_z,nil_value)
+	__(blr)
+	
+	
+	       
+_spentry(conslist)
+	__(li arg_z,nil_value)
+	__(cmpri(nargs,0))
+	__(b 2f)	
+1:
+	__(ldr(temp0,0(vsp)))
+	__(cmpri(nargs,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(Cons(arg_z,temp0,arg_z))
+	__(subi nargs,nargs,fixnum_one)
+2:
+	__(bne 1b)
+	__(blr)
+	
+/* do list*: last arg in arg_z, all others vpushed, nargs set to #args vpushed.  */
+/* Cons, one cons cell at at time.  Maybe optimize this later.  */
+_spentry(conslist_star)
+	__(cmpri(nargs,0))
+	__(b 2f)	
+1:
+	__(ldr(temp0,0(vsp)))
+	__(cmpri(nargs,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(Cons(arg_z,temp0,arg_z))
+	__(subi nargs,nargs,fixnum_one)
+2:
+	__(bne 1b)
+	__(blr)
+
+/* We always have to create a tsp frame (even if nargs is 0), so the compiler  */
+/* doesn't get confused.  */
+_spentry(stkconslist)
+	__(li arg_z,nil_value)
+	__(cmpri(cr1,nargs,0))
+	__(add imm1,nargs,nargs)
+	__(addi imm1,imm1,tsp_frame.fixed_overhead)
+	__(TSP_Alloc_Var_Boxed(imm1,imm2))
+	__(la imm1,tsp_frame.data_offset+fulltag_cons(tsp))
+	__(b 2f)
+1:	__(ldr(temp0,0(vsp)))
+	__(cmpri(cr1,nargs,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(_rplaca(imm1,temp0))
+	__(_rplacd(imm1,arg_z))
+	__(mr arg_z,imm1)
+	__(la imm1,cons.size(imm1))
+	__(la nargs,-fixnum_one(nargs))
+2:
+	__(bne cr1,1b)
+	__(blr)
+
+/* do list*: last arg in arg_z, all others vpushed,  */
+/* nargs set to #args vpushed.  */
+_spentry(stkconslist_star)
+	__(cmpri(cr1,nargs,0))
+	__(add imm1,nargs,nargs)
+	__(addi imm1,imm1,tsp_frame.fixed_overhead)
+	__(TSP_Alloc_Var_Boxed(imm1,imm2))
+	__(la imm1,tsp_frame.data_offset+fulltag_cons(tsp))
+	__(b 2f)
+1:	__(ldr(temp0,0(vsp)))
+	__(cmpri(cr1,nargs,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(_rplaca(imm1,temp0))
+	__(_rplacd(imm1,arg_z))
+	__(mr arg_z,imm1)
+	__(la imm1,cons.size(imm1))
+	__(la nargs,-fixnum_one(nargs))
+2:
+	__(bne cr1,1b)
+	__(blr)
+
+
+/* Make a stack-consed simple-vector out of the NARGS objects  */
+/* on top of the vstack; return it in arg_z.  */
+_spentry(mkstackv)
+	__(cmpri(cr1,nargs,0))
+	__(dnode_align(imm1,nargs,tsp_frame.fixed_overhead+node_size))
+	__(TSP_Alloc_Var_Boxed_nz(imm1,imm2))
+	__(slwi imm0,nargs,num_subtag_bits-fixnumshift)
+	__(ori imm0,imm0,subtag_simple_vector)
+	__(str(imm0,tsp_frame.data_offset(tsp)))
+	__(la arg_z,tsp_frame.data_offset+fulltag_misc(tsp))
+	__(beq- cr1,2f)
+	__(la imm0,misc_data_offset(arg_z))
+	__(add imm1,imm0,nargs)
+1:
+	__(la nargs,-node_size(nargs))
+	__(cmpri(cr1,nargs,0))
+	__(ldr(temp1,0(vsp)))
+	__(la vsp,node_size(vsp))
+	__(stru(temp1,-node_size(imm1)))
+	__(bne cr1,1b)
+2:
+	__(blr)
+
+	
+        
+
+_spentry(setqsym)
+	__(ldr(imm0,symbol.flags(arg_y)))
+	__(andi. imm0,imm0,sym_vbit_const_mask)
+	__(beq _SPspecset)
+	__(mr arg_z,arg_y)
+	__(lwi(arg_y,XCONST))
+	__(set_nargs(2))
+	__(b _SPksignalerr)
+
+
+	
+_spentry(progvsave)
+	/* Error if arg_z isn't a proper list.  That's unlikely, */
+	/* but it's better to check now than to crash later. */
+	
+	__(cmpri(arg_z,nil_value))
+	__(mr arg_x,arg_z)	/* fast  */
+	__(mr temp1,arg_z)	/* slow  */
+	__(beq 9f)		/* Null list is proper  */
+0:	
+	__(trap_unless_list(arg_x,imm0))
+	__(_cdr(temp2,arg_x))	/* (null (cdr fast)) ?  */
+	__(cmpri(cr3,temp2,nil_value))
+	__(trap_unless_list(temp2,imm0,cr0))
+	__(_cdr(arg_x,temp2))
+	__(beq cr3,9f)
+	__(_cdr(temp1,temp1))
+	__(cmpr(arg_x,temp1))
+	__(bne 0b)
+	__(lwi(arg_y,XIMPROPERLIST))
+	__(set_nargs(2))
+	__(b _SPksignalerr)
+9:	/* Whew 	 */
+	
+        /* Next, determine the length of arg_y.  We  */
+        /* know that it's a proper list.  */
+	__(li imm0,-node_size)
+	__(mr arg_x,arg_y)
+1:
+	__(cmpri(cr0,arg_x,nil_value))
+	__(la imm0,node_size(imm0))
+	__(_cdr(arg_x,arg_x))
+	__(bne 1b)
+	/* imm0 is now (boxed) triplet count.  */
+	/* Determine word count, add 1 (to align), and make room.  */
+	/* if count is 0, make an empty tsp frame and exit  */
+	__(cmpri(cr0,imm0,0))
+	__(add imm1,imm0,imm0)
+	__(add imm1,imm1,imm0)
+        __(dnode_align(imm1,imm1,node_size))
+	__(bne+ cr0,2f)
+	 __(TSP_Alloc_Fixed_Boxed(2*node_size))
+	 __(blr)
+2:
+	__(la imm1,tsp_frame.fixed_overhead(imm1))	/* tsp header  */
+	__(TSP_Alloc_Var_Boxed_nz(imm1,imm2))
+	__(str(imm0,tsp_frame.data_offset(tsp)))
+	__(ldr(imm2,tsp_frame.backlink(tsp)))
+	__(mr arg_x,arg_y)
+	__(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldr(imm3,tcr.tlb_limit(rcontext)))
+3:
+        __(cmpri(cr1,arg_z,nil_value))
+	__(_car(temp0,arg_x))
+        __(ldr(imm0,symbol.binding_index(temp0)))
+	__(_cdr(arg_x,arg_x))
+        __(trlle(imm3,imm0))
+        __(ldr(imm4,tcr.tlb_pointer(rcontext))) /* Need to reload after trap  */
+        __(ldrx(temp3,imm4,imm0))
+	__(cmpri(cr0,arg_x,nil_value))
+        __(li temp2,unbound_marker)
+        __(beq cr1,4f)
+	__(_car(temp2,arg_z))
+	__(_cdr(arg_z,arg_z))
+4:      __(push(temp3,imm2))
+	__(push(imm0,imm2))
+	__(push(imm1,imm2))
+        __(strx(temp2,imm4,imm0))
+	__(mr imm1,imm2)
+	__(bne cr0,3b)
+	__(str(imm2,tcr.db_link(rcontext)))
+	__(blr)
+
+	
+/* Allocate a miscobj on the temp stack.  (Push a frame on the tsp and  */
+/* heap-cons the object if there's no room on the tstack.)  */
+_spentry(stack_misc_alloc)
+        __ifdef(`PPC64')
+         __(extract_unsigned_byte_bits_(imm2,arg_y,56))
+         __(unbox_fixnum(imm0,arg_z))
+         __(clrldi imm2,imm0,64-nlowtagbits)
+         __(extract_fulltag(imm1,imm0))
+         __(bne cr0,9f)
+         __(cmpdi cr2,imm2,lowtag_nodeheader)
+         __(cmpdi cr4,imm1,ivector_class_8_bit)
+         __(cmpdi cr1,imm1,ivector_class_64_bit)
+         __(cmpdi cr3,imm1,ivector_class_32_bit)
+         __(cmpdi cr5,imm1,ivector_class_other_bit)
+         __(sldi imm1,arg_y,num_subtag_bits-fixnumshift)
+         __(mr imm2,arg_y)
+         __(beq cr2,3f)
+         __(cmpdi cr2,imm0,subtag_bit_vector)
+         __(beq cr1,3f)
+         __(beq cr3,1f)
+         __(beq cr4,2f)
+         __(beq cr2,0f)
+         /* 2 bytes per element  */
+         __(srdi imm2,imm2,2)
+         __(b 3f)
+0:       /* bit-vector case  */
+         __(addi imm2,imm2,7<<fixnumshift)
+         __(srdi imm2,imm2,3+fixnumshift)
+         __(b 3f)        
+         /* 4 bytes per element  */
+1:       __(srdi imm2,imm2,1)
+         __(b 3f)
+2:       /* 1 byte per element  */
+         __(srdi imm2,imm2,3)
+3:       /* 8 bytes per element  */
+         __(or imm0,imm1,imm0)   /* imm0 = header, imm2 = byte count  */
+         __(dnode_align(imm3,imm2,tsp_frame.fixed_overhead+node_size))
+	 __(cmpldi cr0,imm3,tstack_alloc_limit) /* more than limit ?  */
+	 __(bgt- cr0,4f)
+	 __(TSP_Alloc_Var_Boxed_nz(imm3,imm4))
+        /* Slap the header on the vector, then return.  */
+	 __(str(imm0,tsp_frame.data_offset(tsp)))
+	 __(la arg_z,tsp_frame.data_offset+fulltag_misc(tsp))
+	__(blr)
+        /* Too large to safely fit on tstack.  Heap-cons the vector, but make  */
+        /* sure that there's an empty tsp frame to keep the compiler happy.  */
+4:       __(TSP_Alloc_Fixed_Unboxed(0))
+	 __(b _SPmisc_alloc)
+        __else
+	 __(rlwinm. imm2,arg_y,32-fixnumshift,0,(8+fixnumshift)-1)
+	 __(unbox_fixnum(imm0,arg_z))
+	 __(extract_fulltag(imm1,imm0))
+	 __(bne- cr0,9f)
+	 __(cmpri(cr0,imm1,fulltag_nodeheader))
+	 __(mr imm3,imm0)
+	 __(cmplri(cr1,imm0,max_32_bit_ivector_subtag))
+	 __(rlwimi imm0,arg_y,num_subtag_bits-fixnum_shift,0,31-num_subtag_bits) /* imm0 now = header  */
+	 __(mr imm2,arg_y)
+	 __(beq cr0,1f)	/* do probe if node object  */
+        		/* (fixnum element count = byte count).  */
+	 __(cmplri(cr0,imm3,max_16_bit_ivector_subtag))
+	 __(bng cr1,1f) /* do probe if 32-bit imm object  */
+	 __(cmplri(cr1,imm3,max_8_bit_ivector_subtag))
+	 __(srwi imm2,imm2,1)
+	 __(bgt cr0,3f)
+	 __(bgt cr1,1f)
+	 __(srwi imm2,imm2,1)
+/* imm2 now = byte count.  Add 4 for header, 7 to align, then  */
+/*	clear low three bits.  */
+1:
+         __(dnode_align(imm3,imm2,tsp_frame.fixed_overhead+node_size))
+	 __(cmplri(cr0,imm3,tstack_alloc_limit)) /* more than limit ?  */
+	 __(bgt- cr0,0f)
+	 __(TSP_Alloc_Var_Boxed_nz(imm3,imm4))
+
+/* Slap the header on the vector, then return.  */
+	 __(str(imm0,tsp_frame.data_offset(tsp)))
+	 __(la arg_z,tsp_frame.data_offset+fulltag_misc(tsp))
+	 __(blr)
+9: 
+
+
+
+/* Too large to safely fit on tstack.  Heap-cons the vector, but make  */
+/* sure that there's an empty tsp frame to keep the compiler happy.  */
+0:
+	 __(TSP_Alloc_Fixed_Unboxed(0))
+	 __(b _SPmisc_alloc)
+3:
+	 __(cmplri(imm3,subtag_double_float_vector))
+	 __(slwi imm2,arg_y,1)
+	 __(beq 1b)
+	 __(addi imm2,arg_y,7<<fixnumshift)
+	 __(srwi imm2,imm2,fixnumshift+3)
+	 __(b 1b)
+        __endif
+        
+/* subtype (boxed, of course) is vpushed, followed by nargs bytes worth of  */
+/* initial-contents.  Note that this can be used to cons any type of initialized  */
+/* node-header'ed misc object (symbols, closures, ...) as well as vector-like  */
+/* objects.  */
+/* Note that we're guaranteed to win (or force GC, or run out of memory)  */
+/* because nargs < 32K.  */
+_spentry(gvector)
+        __(subi nargs,nargs,node_size)
+	__(ldrx(arg_z,vsp,nargs))
+	__(unbox_fixnum(imm0,arg_z))
+        __ifdef(`PPC64')
+         __(sldi imm1,nargs,num_subtag_bits-fixnum_shift)
+         __(or imm0,imm0,imm1)
+        __else
+	 __(rlwimi imm0,nargs,num_subtag_bits-fixnum_shift,0,31-num_subtag_bits)
+        __endif
+        __(dnode_align(imm1,nargs,node_size))
+	__(Misc_Alloc(arg_z,imm0,imm1))
+	__(mr imm1,nargs)
+	__(la imm2,misc_data_offset(imm1))
+	__(b 2f)
+1:
+	__(strx(temp0,arg_z,imm2))
+2:
+	__(subi imm1,imm1,node_size)
+	__(cmpri(cr0,imm1,0))
+	__(subi imm2,imm2,node_size)
+	__(vpop(temp0))         /* Note the intentional fencepost: */
+				/* discard the subtype as well.  */
+	__(bge cr0,1b)
+	__(blr)
+	
+	
+/* funcall temp0, returning multiple values if it does.  */
+_spentry(mvpass)
+	__(cmpri(cr0,nargs,node_size*nargregs))
+	__(mflr loc_pc)
+	__(mr imm0,vsp)
+	__(ble+ cr0,1f)
+	 __(subi imm0,imm0,node_size*nargregs)
+	 __(add imm0,imm0,nargs)
+1:
+	__(build_lisp_frame(fn,loc_pc,imm0))
+	__(ref_global(loc_pc,ret1val_addr))
+	__(li fn,0)
+	__(mtlr loc_pc)
+	__(do_funcall())
+	
+/* ret1valn returns "1 multiple value" when a called function does not  */
+/* return multiple values.  Its presence on the stack (as a return address)  */
+/* identifies the stack frame to code which returns multiple values.  */
+
+_exportfn(C(ret1valn))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame())
+	__(vpush(arg_z))
+	__(set_nargs(1))
+	__(blr)
+	
+_spentry(fitvals)
+	__(subf. imm0,nargs,imm0)
+	__(li imm1,nil_value)
+	__(bge 2f)
+	__(sub vsp,vsp,imm0)
+	__(blr)
+1:
+	__(subic. imm0,imm0,node_size)
+	__(vpush(imm1))
+	__(addi nargs,nargs,node_size)
+2:
+	__(bne 1b)
+	__(blr)
+
+
+_spentry(nthvalue)
+	__(add imm0,vsp,nargs)
+	__(ldr(imm1,0(imm0)))
+	__(cmplr(imm1,nargs))	/*  do unsigned compare:	 if (n < 0) => nil.  */
+	__(li arg_z,nil_value)
+	__(neg imm1,imm1)
+	__(subi imm1,imm1,node_size)
+	__(bge 1f)
+	__(ldrx(arg_z,imm0,imm1))
+1:	
+	__(la vsp,node_size(imm0))
+	__(blr)
+        
+
+/* Come here to return multiple values when  */
+/* the caller's context isn't saved in a lisp_frame.  */
+/* lr, fn valid; temp0 = entry vsp  */
+
+_spentry(values)
+	__(mflr loc_pc)
+local_label(return_values):  
+	__(ref_global(imm0,ret1val_addr))
+	__(li arg_z,nil_value)
+	/* max tsp frame is 4K. 8+8 is overhead for save_values_to_tsp below  */
+	/* and @do_unwind in nthrowvalues in "sp_catch.s".  */
+	__(cmpri(cr2,nargs,4096-(dnode_size+dnode_size)))
+	__(cmpr(cr1,imm0,loc_pc))
+	__(cmpri(cr0,nargs,fixnum_one))
+	__(bge cr2,2f)
+	__(beq+ cr1,3f)
+	__(mtlr loc_pc)
+	__(add imm0,nargs,vsp)
+	__(blt- cr0,1f)
+	__(ldr(arg_z,-node_size(imm0)))
+1:
+	__(mr vsp,temp0)
+	__(blr)
+
+2:
+	__(uuo_interr(error_too_many_values,nargs))
+	__(b 2b)
+
+/* Return multiple values to real caller.  */
+3:
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(add imm1,nargs,vsp)
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(cmpr(cr0,imm1,imm0)) /* a fairly common case  */
+	__(mtlr loc_pc)
+	__(cmpri(cr1,nargs,fixnum_one)) /* sadly, a very common case  */
+	__(discard_lisp_frame())
+	__(beqlr cr0) /* already in the right place  */
+	__(bne cr1,4f)
+	 __(ldr(arg_z,0(vsp)))
+	 __(mr vsp,imm0)
+	 __(vpush(arg_z))
+	 __(blr)
+4:
+	__(blt cr1,6f)
+	__(li imm2,fixnum_one)
+5:
+	__(cmpr(cr0,imm2,nargs))
+	__(addi imm2,imm2,fixnum_one)
+	__(ldru(arg_z,-node_size(imm1)))
+	__(push(arg_z,imm0))
+	__(bne cr0,5b)
+6:
+	__(mr vsp,imm0)
+	__(blr)
+
+	.globl C(nvalret)
+	
+/* Come here with saved context on top of stack.  */
+_spentry(nvalret)
+C(nvalret):	
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(temp0,lisp_frame.savevsp(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame())
+        __(b local_label(return_values))
+        	
+/* Provide default (NIL) values for &optional arguments; imm0 is  */
+/* the (fixnum) upper limit on the total of required and &optional  */
+/* arguments.  nargs is preserved, all arguments wind up on the  */
+/* vstack.  */
+_spentry(default_optional_args)
+	__(cmplr( cr7,nargs,imm0))
+	__(li imm5,nil_value)
+	__(vpush_argregs())
+	__(mr imm1,nargs)
+	__(bgelr cr7)
+1:	
+	__(addi imm1,imm1,fixnum_one)
+	__(cmpr(cr0,imm1,imm0))
+	__(vpush(imm5))
+	__(bne cr0,1b)
+	__(blr)
+	
+/* Indicate whether &optional arguments were actually supplied.  nargs  */
+/* contains the actual arg count (minus the number of required args);  */
+/* imm0 contains the number of &optional args in the lambda list.  */
+/* Note that nargs may be > imm0 if &rest/&key is involved.  */
+_spentry(opt_supplied_p)
+	__(li imm1,0)
+1:
+	/* (vpush (< imm1 nargs))  */
+        __ifdef(`PPC64')
+	 __(xor imm2,imm1,nargs)
+	 __(sradi imm2,imm2,63)
+	 __(or imm2,imm2,imm1)
+	 __(addi imm1,imm1,fixnumone)
+	 __(cmpr(cr0,imm1,imm0))
+	 __(subf imm2,nargs,imm2)
+	 __(srdi imm2,imm2,63)
+         __(mulli imm2,imm2,t_offset)
+	 __(addi imm2,imm2,nil_value)
+	 __(vpush(imm2))
+	 __(bne cr0,1b)
+	 __(blr)
+        __else
+	 __(xor imm2,imm1,nargs)
+	 __(srawi imm2,imm2,31)
+	 __(or imm2,imm2,imm1)
+	 __(addi imm1,imm1,fixnumone)
+	 __(cmpr(cr0,imm1,imm0))
+	 __(subf imm2,nargs,imm2)
+	 __(srwi imm2,imm2,31)
+	 __(insrwi imm2,imm2,1,27)
+	 __(addi imm2,imm2,nil_value)
+	 __(vpush(imm2))
+	 __(bne cr0,1b)
+	 __(blr)
+        __endif
+	
+
+
+/* If nargs is <= imm0, vpush a nil.  Otherwise, cons a list of length  */
+/* (- nargs imm0) and vpush it.  */
+/* Use this entry point to heap-cons a simple &rest arg.  */
+_spentry(heap_rest_arg)
+	__(li imm0,0)
+	__(vpush_argregs())
+ 	__(sub imm1,nargs,imm0)
+	__(cmpri(imm1,0))
+	__(li arg_z,nil_value)
+	__(b 2f)
+1:
+	__(ldr(temp0,0(vsp)))
+	__(cmpri(imm1,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(Cons(arg_z,temp0,arg_z))
+	__(subi imm1,imm1,fixnum_one)
+2:
+	__(bgt 1b)
+	__(vpush(arg_z))
+	__(blr)
+
+	
+/* And this entry point when the argument registers haven't yet been  */
+/* vpushed (as is typically the case when required/&rest but no  */
+/* &optional/&key.)  */
+_spentry(req_heap_rest_arg)
+	__(vpush_argregs())
+ 	__(sub imm1,nargs,imm0)
+	__(cmpri(imm1,0))
+	__(li arg_z,nil_value)
+	__(b 2f)
+1:
+	__(ldr(temp0,0(vsp)))
+	__(cmpri(imm1,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(Cons(arg_z,temp0,arg_z))
+	__(subi imm1,imm1,fixnum_one)
+2:
+	__(bgt 1b)
+	__(vpush(arg_z))
+	__(blr)
+
+
+_spentry(heap_cons_rest_arg)
+ 	__(sub imm1,nargs,imm0)
+	__(cmpri(imm1,0))
+	__(li arg_z,nil_value)
+	__(b 2f)
+1:
+	__(ldr(temp0,0(vsp)))
+	__(cmpri(imm1,fixnum_one))
+	__(la vsp,node_size(vsp))
+	__(Cons(arg_z,temp0,arg_z))
+	__(subi imm1,imm1,fixnum_one)
+2:
+	__(bgt 1b)
+	__(vpush(arg_z))
+	__(blr)
+
+	
+_spentry(simple_keywords)
+	__(li imm0,0)
+        __(vpush_argregs())
+        __(b _SPkeyword_bind)
+                
+_spentry(keyword_args)
+	__(vpush_argregs())
+        __(b _SPkeyword_bind)
+
+/* Treat the last (- nargs imm0) values on the vstack as keyword/value  */
+/* pairs.  There'll be imm3 keyword arguments.  Imm2 contains flags  */
+/* that indicate whether &allow-other-keys was specified and whether  */
+/* or not to leave the keyword/value pairs on the vstack for an &rest  */
+/* argument.  Temp3 contains a vector of keyword specifiers which we  */
+/* must (in general) match.  */
+/* If the number of arguments is greater than imm0, the difference must  */
+/* be even.  */
+/* Note that the caller hasn't yet saved its caller's context and that  */
+/* the temp registers used to pass next_method_context  */
+/* (temp1) may still have "live" values in them, as does nfn (temp2).  */
+
+define(`keyword_flags',`imm2')
+define(`keyword_vector',`temp3')
+define(`keyword_count',`imm3')
+
+
+
+define(`varptr',`save0')
+define(`valptr',`save1')
+define(`limit',`save2')
+
+_spentry(keyword_bind)
+        /* Before we can really do anything, we have to  */
+        /* save the caller's context.  To do so, we need to know  */
+        /* how many args have actually been pushed.  Ordinarily, that'd  */
+        /* be "nargs", but we may have pushed more args than we received  */
+	/* if we had to default any &optionals.  */
+	/* So, the number of args pushed so far is the larger of nargs  */
+	/* and the (canonical) total of required/&optional args received.  */
+	__(cmpr(cr0,nargs,imm0))
+	__(add arg_z,vsp,nargs)
+	__(bge+ cr0,1f)
+	__(add arg_z,vsp,imm0)
+1:
+	__(build_lisp_frame(fn,loc_pc,arg_z))
+	__(mr fn,nfn)
+	/* If there are key/value pairs to consider, we slide them down  */
+	/* the vstack to make room for the value/supplied-p pairs.  */
+	/* The first step in that operation involves pushing imm3 pairs  */
+	/* of NILs.  */
+	/* If there aren't any such pairs, the first step is the last  */
+	/* step.  */
+	__(cmpri(cr0,imm3,0))
+	__(li arg_z,0)
+	__(sub imm1,nargs,imm0)
+	__(mr imm4,vsp)	/* in case odd keywords error  */
+	__(cmpri(cr1,imm1,0))
+	__(b 3f)
+2:
+	__(addi arg_z,arg_z,fixnum_one)
+	__(cmplr(cr0,arg_z,imm3))
+	__(li imm5,nil_value)
+	__(vpush(imm5))
+	__(vpush(imm5))
+3:
+	__(bne cr0,2b)
+	__(andi. arg_z,imm1,fixnum_one)
+	__(blelr cr1)	/* no keyword/value pairs to consider.  */
+	__(bne cr0,odd_keywords)
+	/* We have key/value pairs.  Move them to the top of the vstack,  */
+	/* then set the value/supplied-p vars to NIL.  */
+	/* Have to use some save regs to do this.  */
+	__(vpush(limit))
+	__(vpush(valptr))
+	__(vpush(varptr))
+	/* recompute ptr to user args in case stack overflowed  */
+	__(add imm4,vsp,imm3)
+	__(add imm4,imm4,imm3)
+	__(addi imm4,imm4,3*node_size)
+	/* error if odd number of keyword/value args  */
+	__(mr varptr,imm4)
+	__(la limit,3*node_size(vsp))
+	__(mr valptr,limit)
+	__(mr arg_z,imm1)
+4:
+	__(li imm4,nil_value)
+	__(subi arg_z,arg_z,2<<fixnumshift)
+	__(cmplri(cr0,arg_z,0))
+	__(ldr(arg_x,node_size*0(varptr)))
+	__(ldr(arg_y,node_size*1(varptr)))
+	__(str(imm4,node_size*0(varptr)))
+	__(str(imm4,node_size*1(varptr)))
+	__(la varptr,node_size*2(varptr))
+	__(str(arg_x,node_size*0(valptr)))
+	__(str(arg_y,node_size*1(valptr)))
+	__(la valptr,node_size*2(valptr))
+	__(bne cr0,4b)
+
+
+        /* Now, iterate through each supplied keyword/value pair.  If  */
+        /* it's :allow-other-keys and the corresponding value is non-nil,  */
+        /* note that other keys will be allowed.  */
+        /* Find its position in the function's keywords vector.  If that's  */
+        /* nil, note that an unknown keyword was encountered.  */
+        /* Otherwise, if the keyword arg hasn't already had a value supplied,  */
+        /* supply it.  */
+        /* When done, complain if any unknown keywords were found and that  */
+        /* situation was unexpected.  */
+	__(mr imm4,valptr)
+5:
+        __(cmpri(cr0,keyword_flags,16<<fixnumshift)) /* seen :a-o-k yet ?  */
+	__(ldru(arg_z,-node_size(valptr)))
+	__(ldru(arg_y,-node_size(valptr)))
+	__(cmpri(cr1,arg_y,nil_value))
+	__(li arg_x,nrs.kallowotherkeys)
+        /* cr6_eq <- (eq current-keyword :allow-other-keys)  */
+	__(cmpr(cr6,arg_x,arg_z))
+	__(cmpr(cr7,valptr,limit))
+	__(bne cr6,6f)
+        __(bge cr0,6f) /* Already seen :allow-other-keys  */
+        __(ori keyword_flags,keyword_flags,16<<fixnumshift)
+	__(beq cr1,6f)
+	__(ori keyword_flags,keyword_flags,fixnum_one)
+6:
+	__(cmpri(cr1,imm3,0))
+	__(li imm1,misc_data_offset)
+	__(li imm0,0)
+	__(b 8f)
+7:
+	__(addi imm0,imm0,fixnum_one)
+	__(cmpr(cr1,imm0,imm3))
+	__(ldrx(arg_x,keyword_vector,imm1))
+	__(cmpr(cr0,arg_x,arg_z))
+	__(addi imm1,imm1,fixnum_one)
+	__(bne cr0,8f)
+	__(add imm0,imm0,imm0)
+	__(sub imm0,varptr,imm0)
+	__(ldr(arg_x,0(imm0)))
+	__(cmpri(cr0,arg_x,nil_value))
+	__(li arg_z,t_value)
+	__(bne cr0,9f)
+	__(str(arg_y,node_size(imm0)))
+	__(str(arg_z,0(imm0)))
+	__(b 9f)
+8:
+	__(bne cr1,7b)
+	/* Unknown keyword. If it was :allow-other-keys, cr6_eq will still */
+        /* be set.  */
+        __(beq cr6,9f)
+	__(ori keyword_flags,keyword_flags,2<<fixnumshift)
+9:
+	__(bne cr7,5b)
+	__(vpop(varptr))
+	__(vpop(valptr))
+	__(vpop(limit))
+	/* All keyword/value pairs have been processed.  */
+	/* If we saw an unknown keyword and didn't expect to, error.  */
+	/* Unless bit 2 is set in the fixnum in keyword_flags, discard the  */
+	/* keyword/value pairs from the vstack.  */
+	__(andi. imm0,keyword_flags,(fixnum_one)|(2<<fixnumshift))
+	__(cmpri(cr0,imm0,2<<fixnumshift))
+	__(beq- cr0,badkeys)
+	__(andi. imm2,keyword_flags,4<<fixnumshift)
+	__(bnelr cr0)
+	__(mr vsp,imm4)
+	__(blr)
+
+/* Signal an error.  We saved context on entry, so this thing doesn't  */
+/* have to.  */
+/* The "unknown keywords" error could be continuable (ignore them.)  */
+/* It might be hard to then cons an &rest arg.  */
+/* In the general case, it's hard to recover the set of args that were  */
+/* actually supplied to us ...  */
+/* For now, just cons a list out of the keyword/value pairs */
+/* that were actually provided, and signal an "invalid keywords" */
+/* error with that list as an operand.  */
+odd_keywords:
+	__(mr vsp,imm4)
+	__(mr nargs,imm1)
+	__(b 1f)
+badkeys:
+	__(sub nargs,imm4,vsp)
+1:
+	__(bl _SPconslist)
+	__(li arg_y,XBADKEYS)
+	__(set_nargs(2))
+	__(b _SPksignalerr)
+
+/*  A PowerOpen ff-call.  arg_z is either a fixnum (word-aligned entrypoint) */
+/*  or a macptr (whose address had better be word-aligned as well.)  A */
+/*  PowerOpen stack frame is on top of the stack; 4 additional words (to */
+/*  be used a a lisp frame) sit under the C frame. */
+
+/*  Since we probably can't deal with FP exceptions in foreign code, we */
+/*  disable them in the FPSCR, then check on return to see if any previously */
+/*  enabled FP exceptions occurred. */
+
+/*  As it turns out, we can share a lot of code with the eabi version of */
+/*  ff-call.  Some things that happen up to the point of call differ between */
+/*  the ABIs, but everything that happens after is the same. */
+
+        
+_spentry(poweropen_ffcall)
+LocalLabelPrefix`'ffcall:                
+	__(mflr loc_pc)
+	__(vpush_saveregs())		/* Now we can use save0-save7 to point to stacks  */
+	__(mr save0,rcontext)	/* or address globals.  */
+	__(extract_typecode(imm0,arg_z))
+	__(cmpri(cr7,imm0,subtag_macptr))
+	__(ldr(save1,0(sp)))	/* bottom of reserved lisp frame  */
+	__(la save2,-lisp_frame.size(save1))	/* top of lisp frame */
+        __(zero_doublewords save2,0,lisp_frame.size)
+	__(str(save1,lisp_frame.backlink(save2)))
+	__(str(save2,c_frame.backlink(sp)))
+	__(str(fn,lisp_frame.savefn(save2)))
+	__(str(loc_pc,lisp_frame.savelr(save2)))
+	__(str(vsp,lisp_frame.savevsp(save2)))
+        __(mr nargs,arg_z)
+       	__(bne cr7,1f)
+	__(ldr(nargs,macptr.address(arg_z)))
+1:
+	__(ldr(save3,tcr.cs_area(rcontext)))
+	__(str(save2,area.active(save3)))
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(str(rzero,tcr.ffi_exception(rcontext)))
+	__(mffs f0)
+	__(stfd f0,tcr.lisp_fpscr(rcontext))	/* remember lisp's fpscr  */
+	__(mtfsf 0xff,fp_zero)	/* zero foreign fpscr  */
+	__(li r4,TCR_STATE_FOREIGN)
+	__(str(r4,tcr.valence(rcontext)))
+        __ifdef(`rTOC')
+         __(ld rTOC,8(nargs))
+         __(ld nargs,0(nargs))
+        __else
+	 __(li rcontext,0)
+        __endif
+LocalLabelPrefix`'ffcall_setup: 
+	__(mtctr nargs)
+	__(ldr(r3,c_frame.param0(sp)))
+	__(ldr(r4,c_frame.param1(sp)))
+	__(ldr(r5,c_frame.param2(sp)))
+	__(ldr(r6,c_frame.param3(sp)))
+	__(ldr(r7,c_frame.param4(sp)))
+	__(ldr(r8,c_frame.param5(sp)))
+	__(ldr(r9,c_frame.param6(sp)))
+	__(ldr(r10,c_frame.param7(sp)))
+	/* Darwin is allegedly very picky about what register points */
+	/* to the function on entry.  */
+	__(mr r12,nargs)
+LocalLabelPrefix`'ffcall_setup_end: 
+LocalLabelPrefix`'ffcall_call:
+	__(bctrl)
+LocalLabelPrefix`'ffcall_call_end:
+	/* C should have preserved save0 (= rcontext) for us.  */
+	__(ldr(sp,0(sp)))
+	__(mr imm2,save0)
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(li rzero,0)
+	__(mr loc_pc,rzero)
+	__(li arg_x,nil_value)
+	__(li arg_y,nil_value)
+	__(li arg_z,nil_value)
+	__(li temp0,nil_value)
+	__(li temp1,nil_value)
+	__(li temp2,nil_value)
+	__(li temp3,nil_value)
+	__(li fn,nil_value)
+	__(mr rcontext,imm2)
+	__(li imm2,TCR_STATE_LISP)
+	__(ldr(tsp,tcr.save_tsp(rcontext)))
+        __(li save0,0)
+        __(li save1,0)
+        __(li save2,0)
+        __(li save3,0)
+        __(li save4,0)
+        __(li save5,0)
+        __(li save6,0)
+        __(li save7,0)
+        __(li allocptr,-dnode_size)
+        __(li allocbase,-dnode_size)
+	__(str(imm2,tcr.valence(rcontext)))	
+	__(vpop_saveregs())
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(mffs f0)
+	__(stfd f0,8(sp))
+	__(lwz imm3,12(sp))	/* imm3 = FPSCR after call  */
+        __(clrrwi imm2,imm3,8)
+	__(discard_lisp_frame())
+	__(str(imm2,tcr.ffi_exception(rcontext)))
+	__(lfd f0,tcr.lisp_fpscr(rcontext))
+	__(mtfsf 0xff,f0)
+	__(check_pending_interrupt(`cr1'))
+        __(mtxer rzero)
+        __(mtctr rzero)
+        __ifdef(`PPC64')
+         __ifdef(`DARWIN')
+          __(li imm3,1<<TCR_FLAG_BIT_FOREIGN_EXCEPTION)
+          __(ld imm4,tcr.flags(rcontext))
+          __(and. imm3,imm3,imm4)
+          __(bne cr0,0f)
+         __endif
+        __endif
+	__(blr)
+        __ifdef(`PPC64')
+         __ifdef(`DARWIN')
+0:        /* Got here because TCR_FLAG_BIT_FOREIGN_EXCEPTION */
+          /* was set in tcr.flags.  Clear that bit. */
+          __(andc imm4,imm4,imm3)
+          __(std imm4,tcr.flags(rcontext))
+ 	  /* Unboxed foreign exception (likely an NSException) in %imm0. */
+	  /* Box it, then signal a lisp error. */
+          __(li imm1,macptr_header)
+          __(Misc_Alloc_Fixed(arg_z,imm1,macptr.size))
+          __(std imm0,macptr.address(arg_z))
+          __(li arg_y,XFOREIGNEXCEPTION)
+          __(set_nargs(2))
+          __(b _SPksignalerr)
+        /* Handle exceptions, for ObjC 2.0 */
+LocalLabelPrefix`'ffcallLandingPad:      
+          __(mr save1,r3)
+          __(cmpdi r4,1)
+          __(beq 1f)
+LocalLabelPrefix`'ffcallUnwindResume:
+          __(ref_global(r12,unwind_resume))
+          __(mtctr r12)
+          __(bctrl)
+LocalLabelPrefix`'ffcallUnwindResume_end:         
+1:        __(mr r3,save1)
+LocalLabelPrefix`'ffcallBeginCatch:
+          __(ref_global(r12,objc2_begin_catch))
+          __(mtctr r12)
+          __(bctrl)
+LocalLabelPrefix`'ffcallBeginCatch_end:          
+          __(ld save1,0(r3)) /* indirection is necessary because we don't provide type info in lsda */
+LocalLabelPrefix`'ffcallEndCatch:  
+          __(ref_global(r12,objc2_end_catch))
+          __(mtctr r12)
+          __(bctrl)              
+LocalLabelPrefix`'ffcallEndCatch_end:     
+          __(ref_global(r12,get_tcr))
+          __(mtctr r12)
+          __(li imm0,1)       
+	  __(bctrl)
+          __(ld imm2,tcr.flags(imm0))
+          __(ori imm2,imm2,1<<TCR_FLAG_BIT_FOREIGN_EXCEPTION)
+          __(std imm2,tcr.flags(imm0))
+          __(mr imm0,save1)
+	  __(b LocalLabelPrefix`'ffcall_call_end)
+LocalLabelPrefix`'ffcall_end:   
+
+        	.section __DATA,__gcc_except_tab
+	  .align 3
+LLSDA1:
+	  .byte	0xff	/* @LPStart format (omit) */
+	  .byte	0x0	/* @TType format (absolute) */
+	  .byte	0x4d	/* uleb128 0x4d; @TType base offset */
+	  .byte	0x3	/* call-site format (udata4) */
+	  .byte	0x41	/* uleb128 0x41; Call-site table length */
+	
+	  .long Lffcall_setup-Lffcall	/* region 0 start */
+	  .long Lffcall_setup_end-Lffcall_setup	/* length */
+	  .long	0x0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+        
+	  .long Lffcall_call-Lffcall	/* region 1 start */
+	  .long Lffcall_call_end-Lffcall_call	/* length */
+	  .long LffcallLandingPad-Lffcall	/* landing pad */
+	  .byte	0x1	/* uleb128 0x1; action */
+        
+	  .long LffcallUnwindResume-Lffcall	/* region 2 start */
+	  .long LffcallUnwindResume_end-LffcallUnwindResume	/* length */
+	  .long	0x0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+	
+	  .long LffcallBeginCatch-Lffcall	/* region 3 start */
+	  .long LffcallBeginCatch_end-LffcallBeginCatch	/* length */
+	  .long 0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+        
+	  .long LffcallEndCatch-Lffcall
+	  .long LffcallEndCatch_end-LffcallEndCatch	/* length */
+	  .long	0x0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+        
+	  .byte	0x1	/* Action record table */
+	  .byte	0x0
+	  .align 3
+	  .quad	0       /* _OBJC_EHTYPE_$_NSException */
+          .text
+         __endif
+        __endif
+
+/* Just like poweropen_ffcall, only we save all argument(result)
+   registers in a buffer passed in arg_y on entry before returning
+   to lisp.  (We have to do this in the ffcall glue here, because
+   r9 and r10 - at least - are overloaded as dedicated lisp registers */
+_spentry(poweropen_ffcall_return_registers)
+LocalLabelPrefix`'ffcall_return_registers:                
+	__(mflr loc_pc)
+	__(vpush_saveregs())		/* Now we can use save0-save7 to point to stacks  */
+        __(ldr(save7,macptr.address(arg_y)))
+	__(mr save0,rcontext)	/* or address globals.  */
+	__(extract_typecode(imm0,arg_z))
+	__(cmpri(cr7,imm0,subtag_macptr))
+	__(ldr(save1,0(sp)))	/* bottom of reserved lisp frame  */
+	__(la save2,-lisp_frame.size(save1))	/* top of lisp frame */
+        __(zero_doublewords save2,0,lisp_frame.size)
+	__(str(save1,lisp_frame.backlink(save2)))
+	__(str(save2,c_frame.backlink(sp)))
+	__(str(fn,lisp_frame.savefn(save2)))
+	__(str(loc_pc,lisp_frame.savelr(save2)))
+	__(str(vsp,lisp_frame.savevsp(save2)))
+        __(mr nargs,arg_z)
+       	__(bne cr7,1f)
+	__(ldr(nargs,macptr.address(arg_z)))
+1:
+	__(ldr(save3,tcr.cs_area(rcontext)))
+	__(str(save2,area.active(save3)))
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(str(rzero,tcr.ffi_exception(rcontext)))
+	__(mffs f0)
+	__(stfd f0,tcr.lisp_fpscr(rcontext))	/* remember lisp's fpscr  */
+	__(mtfsf 0xff,fp_zero)	/* zero foreign fpscr  */
+	__(li r4,TCR_STATE_FOREIGN)
+	__(str(r4,tcr.valence(rcontext)))
+        __ifdef(`rTOC')
+         __(ld rTOC,8(nargs))
+         __(ld nargs,0(nargs))
+        __else
+	 __(li rcontext,0)
+        __endif
+LocalLabelPrefix`'ffcall_return_registers_setup: 
+	__(mtctr nargs)
+	__(ldr(r3,c_frame.param0(sp)))
+	__(ldr(r4,c_frame.param1(sp)))
+	__(ldr(r5,c_frame.param2(sp)))
+	__(ldr(r6,c_frame.param3(sp)))
+	__(ldr(r7,c_frame.param4(sp)))
+	__(ldr(r8,c_frame.param5(sp)))
+	__(ldr(r9,c_frame.param6(sp)))
+	__(ldr(r10,c_frame.param7(sp)))
+	/* Darwin is allegedly very picky about what register points */
+	/* to the function on entry.  */
+	__(mr r12,nargs)
+LocalLabelPrefix`'ffcall_return_registers_setup_end: 
+LocalLabelPrefix`'ffcall_return_registers_call:
+	__(bctrl)
+LocalLabelPrefix`'ffcall_return_registers_call_end:
+        __(str(r3,0*node_size(save7)))        
+        __(str(r4,1*node_size(save7)))        
+        __(str(r5,2*node_size(save7)))        
+        __(str(r6,3*node_size(save7)))        
+        __(str(r7,4*node_size(save7)))        
+        __(str(r8,5*node_size(save7)))        
+        __(str(r9,6*node_size(save7)))        
+        __(str(r10,7*node_size(save7)))
+        __(stfd f1,((8*node_size)+(0*8))(save7))
+        __(stfd f2,((8*node_size)+(1*8))(save7))
+        __(stfd f3,((8*node_size)+(2*8))(save7))
+        __(stfd f4,((8*node_size)+(3*8))(save7))
+        __(stfd f5,((8*node_size)+(4*8))(save7))
+        __(stfd f6,((8*node_size)+(5*8))(save7))
+        __(stfd f7,((8*node_size)+(6*8))(save7))
+        __(stfd f8,((8*node_size)+(7*8))(save7))
+        __(stfd f9,((8*node_size)+(8*8))(save7))
+        __(stfd f10,((8*node_size)+(9*8))(save7))
+        __(stfd f11,((8*node_size)+(10*8))(save7))
+        __(stfd f12,((8*node_size)+(11*8))(save7))
+        __(stfd f13,((8*node_size)+(12*8))(save7))
+	/* C should have preserved save0 (= rcontext) for us.  */
+	__(ldr(sp,0(sp)))
+	__(mr imm2,save0)
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(li rzero,0)
+	__(mr loc_pc,rzero)
+	__(li arg_x,nil_value)
+	__(li arg_y,nil_value)
+	__(li arg_z,nil_value)
+	__(li temp0,nil_value)
+	__(li temp1,nil_value)
+	__(li temp2,nil_value)
+	__(li temp3,nil_value)
+	__(li fn,nil_value)
+	__(mr rcontext,imm2)
+	__(li imm2,TCR_STATE_LISP)
+	__(ldr(tsp,tcr.save_tsp(rcontext)))
+        __(li save0,0)
+        __(li save1,0)
+        __(li save2,0)
+        __(li save3,0)
+        __(li save4,0)
+        __(li save5,0)
+        __(li save6,0)
+        __(li save7,0)
+        __(li allocptr,-dnode_size)
+        __(li allocbase,-dnode_size)
+	__(str(imm2,tcr.valence(rcontext)))	
+	__(vpop_saveregs())
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(mffs f0)
+	__(stfd f0,8(sp))
+	__(lwz imm3,12(sp))	/* imm3 = FPSCR after call  */
+        __(clrrwi imm2,imm3,8)
+	__(discard_lisp_frame())
+	__(str(imm2,tcr.ffi_exception(rcontext)))
+	__(lfd f0,tcr.lisp_fpscr(rcontext))
+	__(mtfsf 0xff,f0)
+	__(check_pending_interrupt(`cr1'))
+        __(mtxer rzero)
+        __(mtctr rzero)
+        __ifdef(`DARWIN')
+         __ifdef(`PPC64')
+          __(li imm3,1<<TCR_FLAG_BIT_FOREIGN_EXCEPTION)
+          __(ld imm4,tcr.flags(rcontext))
+          __(and. imm3,imm3,imm4)
+          __(bne 0f)
+         __endif
+        __endif
+	__(blr)
+
+        __ifdef(`DARWIN')
+         __ifdef(`PPC64')
+0:        /* Got here because TCR_FLAG_BIT_FOREIGN_EXCEPTION */
+          /* was set in tcr.flags.  Clear that bit. */
+          __(andc imm4,imm4,imm3)
+          __(std imm4,tcr.flags(rcontext))
+ 	  /* Unboxed foreign exception (likely an NSException) in %imm0. */
+	  /* Box it, then signal a lisp error. */
+          __(li imm1,macptr_header)
+          __(Misc_Alloc_Fixed(arg_z,imm1,macptr.size))
+          __(std imm0,macptr.address(arg_z))
+          __(li arg_y,XFOREIGNEXCEPTION)
+          __(set_nargs(2))
+          __(b _SPksignalerr)
+        /* Handle exceptions, for ObjC 2.0 */
+LocalLabelPrefix`'ffcall_return_registersLandingPad:      
+          __(mr save1,r3)
+          __(cmpdi r4,1)
+          __(beq 1f)
+LocalLabelPrefix`'ffcall_return_registersUnwindResume:
+          __(ref_global(r12,unwind_resume))
+          __(mtctr r12)
+          __(bctrl)
+LocalLabelPrefix`'ffcall_return_registersUnwindResume_end:         
+1:        __(mr r3,save1)
+LocalLabelPrefix`'ffcall_return_registersBeginCatch:
+          __(ref_global(r12,objc2_begin_catch))
+          __(mtctr r12)
+          __(bctrl)
+LocalLabelPrefix`'ffcall_return_registersBeginCatch_end:          
+          __(ld save1,0(r3)) /* indirection is necessary because we don't provide type info in lsda */
+LocalLabelPrefix`'ffcall_return_registersEndCatch:  
+          __(ref_global(r12,objc2_end_catch))
+          __(mtctr r12)
+          __(bctrl)              
+LocalLabelPrefix`'ffcall_return_registersEndCatch_end:     
+          __(ref_global(r12,get_tcr))
+          __(mtctr r12)
+          __(li imm0,1)       
+	  __(bctrl)
+          __(ld imm2,tcr.flags(imm0))
+          __(ori imm2,imm2,1<<TCR_FLAG_BIT_FOREIGN_EXCEPTION)
+          __(std imm2,tcr.flags(imm0))
+          __(mr imm0,save1)
+	  __(b LocalLabelPrefix`'ffcall_return_registers_call_end)
+LocalLabelPrefix`'ffcall_return_registers_end:
+	  .section __DATA,__gcc_except_tab
+	  .align 3
+LLSDA2:
+	  .byte	0xff	/* @LPStart format (omit) */
+  	  .byte	0x0	/* @TType format (absolute) */
+	  .byte	0x4d	/* uleb128 0x4d; @TType base offset */
+	  .byte	0x3	/* call-site format (udata4) */
+	  .byte	0x41	/* uleb128 0x41; Call-site table length */
+	
+	  .long Lffcall_return_registers_setup-Lffcall_return_registers	/* region 0 start */
+	  .long Lffcall_return_registers_setup_end-Lffcall_return_registers_setup	/* length */
+	  .long	0x0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+        
+	  .long Lffcall_return_registers_call-Lffcall_return_registers	/* region 1 start */
+	  .long Lffcall_return_registers_call_end-Lffcall_return_registers_call	/* length */
+	  .long Lffcall_return_registersLandingPad-Lffcall_return_registers	/* landing pad */
+	  .byte	0x1	/* uleb128 0x1; action */
+        
+	  .long Lffcall_return_registersUnwindResume-Lffcall_return_registers	/* region 2 start */
+	  .long Lffcall_return_registersUnwindResume_end-Lffcall_return_registersUnwindResume	/* length */
+	  .long	0x0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+	
+	  .long Lffcall_return_registersBeginCatch-Lffcall_return_registers	/* region 3 start */
+	  .long Lffcall_return_registersBeginCatch_end-Lffcall_return_registersBeginCatch	/* length */
+	  .long 0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+        
+	  .long Lffcall_return_registersEndCatch-Lffcall_return_registers
+	  .long Lffcall_return_registersEndCatch_end-Lffcall_return_registersEndCatch	/* length */
+	  .long	0x0	/* landing pad */
+	  .byte	0x0	/* uleb128 0x0; action */
+	  .byte	0x1	/* Action record table */
+	  .byte	0x0
+	  .align 3
+	  .quad	0       /* _OBJC_EHTYPE_$_NSException */
+          .text
+         __endif
+        __endif
+                      
+
+        	
+/* Signal an error synchronously, via %ERR-DISP.  */
+/* If %ERR-DISP isn't fbound, it'd be nice to print a message  */
+/* on the C runtime stderr.  */
+
+_spentry(ksignalerr)
+	__(li fname,nrs.errdisp)
+	__(jump_fname)
+        
+/* As in the heap-consed cases, only stack-cons the &rest arg  */
+_spentry(stack_rest_arg)
+	__(li imm0,0)
+	__(vpush_argregs())
+        __(b _SPstack_cons_rest_arg)
+
+	
+_spentry(req_stack_rest_arg)
+	__(vpush_argregs())
+        __(b _SPstack_cons_rest_arg)
+	
+_spentry(stack_cons_rest_arg)
+	__(sub imm1,nargs,imm0)
+	__(cmpri(cr0,imm1,0))
+	__(cmpri(cr1,imm1,(4096-dnode_size)/2))
+	__(li arg_z,nil_value)
+	__(ble cr0,2f)		/* always temp-push something.  */
+	__(bge cr1,3f)
+	__(add imm1,imm1,imm1)
+	__(dnode_align(imm2,imm1,tsp_frame.fixed_overhead))
+	__(TSP_Alloc_Var_Boxed(imm2,imm3))
+	__(la imm0,tsp_frame.data_offset+fulltag_cons(tsp))
+1:
+	__(cmpri(cr0,imm1,cons.size))	/* last time through ?  */
+	__(subi imm1,imm1,cons.size)
+	__(vpop(arg_x))
+	__(_rplacd(imm0,arg_z))
+	__(_rplaca(imm0,arg_x))
+	__(mr arg_z,imm0)
+	__(la imm0,cons.size(imm0))
+	__(bne cr0,1b)
+	__(vpush(arg_z))
+	__(blr)
+2:
+	__(TSP_Alloc_Fixed_Unboxed(0))
+	__(vpush(arg_z))
+	__(blr)
+3:
+	__(TSP_Alloc_Fixed_Unboxed(0))
+	__(b _SPheap_cons_rest_arg)
+
+/* This was trying to swap exception ports to work around Darwin JNI lossage.
+   It's tended to bitrot, and we have another way to do that now.
+*/        
+_spentry(poweropen_callbackX)
+        .long 0x7c800008        /* debug trap */
+	
+/* Prepend all but the first two (closure code, fn) and last two  */
+/* (function name, lfbits) elements of nfn to the "arglist".  */
+/* Doing things this way (the same way that 68K MCL does) lets  */
+/* functions which take "inherited arguments" work consistently  */
+/* even in cases where no closure object is created.  */
+_spentry(call_closure)        
+	__(cmpri(cr0,nargs,nargregs<<fixnumshift))
+	__(cmpri(cr1,nargs,fixnum_one))
+	__(vector_length(imm0,nfn,imm0))
+	__(subi imm0,imm0,4<<fixnumshift) /* imm0 = inherited arg count  */
+	__(li imm1,misc_data_offset+(2<<fixnumshift)) /* point to 1st arg  */
+	__(li imm4,nil_value)
+	__(ble+ cr0,local_label(no_insert))
+	/* Some arguments have already been vpushed.  Vpush imm0's worth  */
+	/* of NILs, copy those arguments that have already been vpushed from  */
+	/* the old TOS to the new, then insert all of the inerited args  */
+	/* and go to the function.  */
+	__(li imm2,0)
+local_label(push_nil_loop):
+	__(addi imm2,imm2,fixnum_one)
+	__(cmpr(cr2,imm2,imm0))
+	__(vpush(imm4))
+	__(bne cr2,local_label(push_nil_loop))
+
+	__(mr imm3,vsp)
+	__(add imm4,vsp,imm0)
+	__(subi imm2,nargs,nargregs<<fixnumshift)
+local_label(copy_already_loop):
+	__(cmpri(cr2,imm2,fixnum_one))
+	__(subi imm2,imm2,fixnum_one)
+	__(ldr(fname,0(imm4)))
+	__(addi imm4,imm4,fixnum_one)
+	__(str(fname,0(imm3)))
+	__(addi imm3,imm3,fixnum_one)
+	__(bne cr2,local_label(copy_already_loop))
+
+local_label(insert_loop):
+	__(cmpri(cr2,imm0,fixnum_one))
+	__(ldrx(fname,nfn,imm1))
+	__(addi imm1,imm1,fixnum_one)
+	__(addi nargs,nargs,fixnum_one)
+	__(subi imm0,imm0,fixnum_one)
+	__(push(fname,imm4))
+	__(bne cr2,local_label(insert_loop))
+	__(b local_label(go))
+local_label(no_insert):
+	/* nargregs or fewer args were already vpushed.  */
+	/* if exactly nargregs, vpush remaining inherited vars.  */
+	__(add imm2,imm1,imm0)
+	__(bne cr0,local_label(set_regs))
+local_label(vpush_remaining):
+	__(cmpri(cr2,imm0,fixnum_one))
+	__(ldrx(fname,nfn,imm1))
+	__(addi imm1,imm1,fixnum_one)
+	__(vpush(fname))
+	__(subi imm0,imm0,fixnum_one)
+	__(addi nargs,nargs,fixnum_one)
+	__(bne cr2,local_label(vpush_remaining))
+	__(b local_label(go))
+local_label(set_regs):
+	/* if nargs was > 1 (and we know that it was < 3), it must have  */
+	/* been 2.  Set arg_x, then vpush the remaining args.  */
+	__(ble cr1,local_label(set_y_z))
+local_label(set_arg_x):
+	__(subi imm0,imm0,fixnum_one)
+	__(cmpri(cr0,imm0,0))
+	__(subi imm2,imm2,fixnum_one)
+	__(ldrx(arg_x,nfn,imm2))
+	__(addi nargs,nargs,fixnum_one)
+	__(bne cr0,local_label(vpush_remaining))
+	__(b local_label(go))
+	/* Maybe set arg_y or arg_z, preceding args  */
+local_label(set_y_z):
+	__(bne cr1,local_label(set_arg_z))
+	/* Set arg_y, maybe arg_x, preceding args  */
+local_label(set_arg_y):
+	__(subi imm0,imm0,fixnum_one)
+	__(cmpri(cr0,imm0,0))
+	__(subi imm2,imm2,fixnum_one)
+	__(ldrx(arg_y,nfn,imm2))
+	__(addi nargs,nargs,fixnum_one)
+	__(bne cr0,local_label(set_arg_x))
+	__(b local_label(go))
+local_label(set_arg_z):
+	__(subi imm0,imm0,fixnum_one)
+	__(cmpri(cr0,imm0,0))
+	__(subi imm2,imm2,fixnum_one)
+	__(ldrx(arg_z,nfn,imm2))
+	__(addi nargs,nargs,fixnum_one)
+	__(bne cr0,local_label(set_arg_y))
+
+local_label(go):
+	__(vrefr(nfn,nfn,1))
+	__(ldr(loc_pc,_function.codevector(nfn)))
+	__(mtctr loc_pc)
+	__(bctr)
+        
+/* This  treats anything that's either */
+/* #+ppc32 (signed-byte 32), (unsigned-byte 32) */
+/* #+ppc64 (signed-byte 64), (unsigned-byte 64) */
+/* as if it denoted a "natural-sized" value.  */
+/* Argument in arg_z, result in imm0.  May use temp0.  */
+_spentry(getxlong)
+        __ifdef(`PPC64')
+        __else
+        __(extract_typecode(imm0,arg_z))
+	__(cmpri(cr0,imm0,tag_fixnum))
+	__(cmpri(cr1,imm0,subtag_bignum))
+	__(unbox_fixnum(imm0,arg_z))
+	__(beqlr cr0)
+	__(mr temp0,arg_z)
+	__(bne- cr1,local_label(error))
+	__(getvheader(imm0,temp0))
+	__(cmpri(cr1,imm0,one_digit_bignum_header))
+	__(cmpri(cr7,imm0,two_digit_bignum_header))
+	__(beq cr1,local_label(big1))
+        __(beq cr7,local_label(big2))
+local_label(error):
+	__(uuo_interr(error_object_not_integer,arg_z)) /* not quite right but what 68K MCL said  */
+
+
+
+local_label(big2):
+	__(vrefr(imm0,temp0,1)) /* sign digit must be 0  */
+	__(cmpri(imm0,0))
+	__(bne local_label(error))
+local_label(big1):
+	__(vrefr(imm0,temp0,0))
+	__(blr)
+
+
+        __endif
+                
+/* Everything up to the last arg has been vpushed, nargs is set to  */
+/* the (boxed) count of things already pushed.  */
+/* On exit, arg_x, arg_y, arg_z, and nargs are set as per a normal  */
+/* function call (this may require vpopping a few things.)  */
+/* ppc2-invoke-fn assumes that temp1 is preserved here.  */
+_spentry(spreadargz)
+        __ifdef(`PPC64')
+	 __(extract_fulltag(imm1,arg_z))
+	 __(cmpri(cr1,imm1,fulltag_cons))
+        __else
+	 __(extract_lisptag(imm1,arg_z))
+	 __(cmpri(cr1,imm1,tag_list))
+        __endif
+	__(cmpri(cr0,arg_z,nil_value))
+	__(li imm0,0)
+	__(mr arg_y,arg_z)		/*  save in case of error  */
+	__(beq cr0,2f)
+1:
+	__(bne- cr1,3f)
+	__(_car(arg_x,arg_z))
+	__(_cdr(arg_z,arg_z))
+	__(cmpri(cr0,arg_z,nil_value))
+        __ifdef(`PPC64')
+	 __(extract_fulltag(imm1,arg_z))
+	 __(cmpri(cr1,imm1,fulltag_cons))
+        __else
+	 __(extract_lisptag(imm1,arg_z))
+	 __(cmpri(cr1,imm1,tag_list))
+        __endif
+	__(vpush(arg_x))
+	__(addi imm0,imm0,fixnum_one)
+	__(bne cr0,1b)
+2:
+	__(add. nargs,nargs,imm0)
+	__(cmpri(cr2,nargs,2<<fixnumshift))
+	__(beqlr- cr0)
+	__(vpop(arg_z))
+	__(bltlr cr2)
+	__(vpop(arg_y))
+	__(beqlr cr2)
+	__(vpop(arg_x))
+	__(blr)
+        /*  Discard whatever's been vpushed already, complain.  */
+3:	
+	__(add vsp,vsp,imm0)
+	__(mr arg_z,arg_y)		/* recover original arg_z  */
+	__(li arg_y,XNOSPREAD)
+	__(set_nargs(2))
+	__(b _SPksignalerr)
+        
+/* Tail-recursively funcall temp0.  */
+/* Pretty much the same as the tcallsym* cases above.  */
+_spentry(tfuncallgen)
+	__(cmpri(cr0,nargs,nargregs<<fixnumshift))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(mtlr loc_pc)
+	__(ble cr0,2f)
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	/* can use nfn (= temp2) as a temporary  */
+	__(subi imm1,nargs,nargregs<<fixnumshift)
+	__(add imm1,imm1,vsp)
+1:
+	__(ldru(temp2,-node_size(imm1)))
+	__(cmpr(cr0,imm1,vsp))
+	__(push(temp2,imm0))
+	__(bne cr0,1b)
+	__(mr vsp,imm0)
+	__(do_funcall())
+2:
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(do_funcall())
+
+
+/* Some args were vpushed.  Slide them down to the base of  */
+/* the current frame, then do funcall.  */
+_spentry(tfuncallslide)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	/* can use nfn (= temp2) as a temporary  */
+	__(subi imm1,nargs,nargregs<<fixnumshift)
+	__(add imm1,imm1,vsp)
+	__(mtlr loc_pc)
+1:
+	__(ldru(temp2,-node_size(imm1)))
+	__(cmpr(cr0,imm1,vsp))
+	__(push(temp2,imm0))
+	__(bne cr0,1b)
+	__(mr vsp,imm0)
+	__(do_funcall())
+
+/* No args were vpushed; recover saved context & do funcall  */
+_spentry(tfuncallvsp)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(mtlr loc_pc)
+	__(discard_lisp_frame())
+	__(do_funcall())
+        
+/* Tail-recursively call the (known symbol) in fname.  */
+/* In the general case, we don't know if any args were  */
+/* vpushed or not.  If so, we have to "slide" them down  */
+/* to the base of the frame.  If not, we can just restore  */
+/* vsp, lr, fn from the saved lisp frame on the control stack.  */
+_spentry(tcallsymgen)
+	__(cmpri(cr0,nargs,nargregs<<fixnumshift))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(mtlr loc_pc)
+	__(ble cr0,2f)
+
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	/* can use nfn (= temp2) as a temporary  */
+	__(subi imm1,nargs,nargregs<<fixnumshift)
+	__(add imm1,imm1,vsp)
+1:
+	__(ldru(temp2,-node_size(imm1)))
+	__(cmpr(cr0,imm1,vsp))
+	__(push(temp2,imm0))
+	__(bne cr0,1b)
+	__(mr vsp,imm0)
+	__(jump_fname)
+	
+2:		
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(jump_fname)
+	
+	
+/* Some args were vpushed.  Slide them down to the base of  */
+/* the current frame, then do funcall.  */
+_spentry(tcallsymslide)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+	/* can use nfn (= temp2) as a temporary  */
+	__(subi imm1,nargs,nargregs<<fixnumshift)
+	__(add imm1,imm1,vsp)
+1:
+	__(ldru(temp2,-node_size(imm1)))
+	__(cmpr(cr0,imm1,vsp))
+	__(push(temp2,imm0))
+	__(bne cr0,1b)
+	__(mr vsp,imm0)
+	__(jump_fname)
+
+/* No args were vpushed; recover saved context & call symbol  */
+_spentry(tcallsymvsp)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+	__(jump_fname)
+	
+/* Tail-recursively call the function in nfn.  */
+/* Pretty much the same as the tcallsym* cases above.  */
+_spentry(tcallnfngen)
+	__(cmpri(cr0,nargs,nargregs<<fixnumshift))
+	__(ble cr0,_SPtcallnfnvsp)
+        __(b _SPtcallnfnslide)
+
+/* Some args were vpushed.  Slide them down to the base of  */
+/* the current frame, then do funcall.  */
+_spentry(tcallnfnslide)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(imm0,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+	/* Since we have a known function, can use fname as a temporary.  */
+	__(subi imm1,nargs,nargregs<<fixnumshift)
+	__(add imm1,imm1,vsp)
+1:
+	__(ldru(fname,-node_size(imm1)))
+	__(cmpr(cr0,imm1,vsp))
+	__(push(fname,imm0))
+	__(bne cr0,1b)
+	__(mr vsp,imm0)
+       	__(jump_nfn())
+        
+_spentry(tcallnfnvsp)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(mtlr loc_pc)
+       	__(jump_nfn())
+	
+/* Reference index arg_z of a misc-tagged object (arg_y).  */
+/* Note that this conses in some cases.  Return a properly-tagged  */
+/* lisp object in arg_z.  Do type and bounds-checking.  */
+	
+_spentry(misc_ref)
+	__(trap_unless_fulltag_equal(arg_y,fulltag_misc,imm0))
+	__(trap_unless_lisptag_equal(arg_z,tag_fixnum,imm0))
+	__(vector_length(imm0,arg_y,imm1))
+	__(trlge(arg_z,imm0))
+	__(extract_lowbyte(imm1,imm1))	/* imm1 = subtag  */
+	
+local_label(misc_ref_common):   
+        __ifdef(`PPC64')
+         __(slwi imm1,imm1,3)
+         __(li imm0,LO(local_label(misc_ref_jmp)))
+         __(addis imm0,imm0,HA(local_label(misc_ref_jmp)))
+         __(ldx imm0,imm0,imm1)
+         __(mtctr imm0)
+         __(bctr)
+
+local_label(misc_ref_jmp):              
+        /* 00-0f  */
+         .quad local_label(misc_ref_invalid) /* 00 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 01 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 02 immheader_0  */
+         .quad local_label(misc_ref_node) /* 03 function  */
+         .quad local_label(misc_ref_invalid) /* 04 cons  */
+         .quad local_label(misc_ref_invalid) /* 05 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 06 immheader_1  */
+         .quad local_label(misc_ref_node) /* 07 catch_frame  */
+         .quad local_label(misc_ref_invalid) /* 08 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 09 imm_2  */
+         .quad local_label(misc_ref_u32) /* 0a code_vector  */
+         .quad local_label(misc_ref_node) /* 0b slot_vector  */
+         .quad local_label(misc_ref_invalid) /* 0c misc  */
+         .quad local_label(misc_ref_invalid) /* 0d imm3  */
+         .quad local_label(misc_ref_invalid) /* 0e immheader_3  */
+         .quad local_label(misc_ref_node) /* 0f ratio  */
+        /* 10-1f  */
+         .quad local_label(misc_ref_invalid) /* 10 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 11 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 12 immheader_0  */
+         .quad local_label(misc_ref_node) /* 13 symbol_0  */
+         .quad local_label(misc_ref_invalid) /* 14 cons  */
+         .quad local_label(misc_ref_invalid) /* 15 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 16 immheader_1  */
+         .quad local_label(misc_ref_node) /* 17 lisp_tread  */
+         .quad local_label(misc_ref_invalid) /* 18 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 19 imm_2  */
+         .quad local_label(misc_ref_u32) /* 1a xcode_vector  */
+         .quad local_label(misc_ref_node) /* 1b instance  */
+         .quad local_label(misc_ref_invalid) /* 1c misc  */
+         .quad local_label(misc_ref_invalid) /* 1d imm3  */
+         .quad local_label(misc_ref_u64) /* 1e macptr  */
+         .quad local_label(misc_ref_node) /* 1f complex  */
+        /* 20-2f  */
+         .quad local_label(misc_ref_invalid) /* 20 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 21 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 22 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 23 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 24 cons  */
+         .quad local_label(misc_ref_invalid) /* 25 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 26 immheader_1  */
+         .quad local_label(misc_ref_node) /* 27 lock  */
+         .quad local_label(misc_ref_invalid) /* 28 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 29 imm_2  */
+         .quad local_label(misc_ref_u32) /* 2a bignum  */
+         .quad local_label(misc_ref_node) /* 2b struct  */
+         .quad local_label(misc_ref_invalid) /* 2c misc  */
+         .quad local_label(misc_ref_invalid) /* 2d imm3  */
+         .quad local_label(misc_ref_u64) /* 2e dead_macptr  */
+         .quad local_label(misc_ref_invalid) /* 2f nodeheader_3  */
+        /* 30-3f  */
+         .quad local_label(misc_ref_invalid) /* 30 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 31 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 32 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 33 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 34 cons  */
+         .quad local_label(misc_ref_invalid) /* 35 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 36 immheader_1  */
+         .quad local_label(misc_ref_node) /* 37 hash_vector  */
+         .quad local_label(misc_ref_invalid) /* 38 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 39 imm_2  */
+         .quad local_label(misc_ref_u32) /* 3a double_float  */
+         .quad local_label(misc_ref_node) /* 3b istruct  */
+         .quad local_label(misc_ref_invalid) /* 3c misc  */
+         .quad local_label(misc_ref_invalid) /* 3d imm3  */
+         .quad local_label(misc_ref_invalid) /* 3e immheader_3  */
+         .quad local_label(misc_ref_invalid) /* 3f nodeheader_3  */
+        /* 40-4f  */
+         .quad local_label(misc_ref_invalid) /* 40 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 41 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 42 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 43 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 44 cons  */
+         .quad local_label(misc_ref_invalid) /* 45 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 46 immheader_1  */
+         .quad local_label(misc_ref_node) /* 47 pool  */
+         .quad local_label(misc_ref_invalid) /* 48 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 49 imm_2  */
+         .quad local_label(misc_ref_invalid) /* 4a immheader_2  */
+         .quad local_label(misc_ref_node) /* 4b value_cell_2  */
+         .quad local_label(misc_ref_invalid) /* 4c misc  */
+         .quad local_label(misc_ref_invalid) /* 4d imm3  */
+         .quad local_label(misc_ref_invalid) /* 4e immheader_3  */
+         .quad local_label(misc_ref_invalid) /* 4f nodeheader_3  */
+        /* 50-5f  */
+         .quad local_label(misc_ref_invalid) /* 50 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 51 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 52 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 53 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 54 cons  */
+         .quad local_label(misc_ref_invalid) /* 55 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 56 immheader_1  */
+         .quad local_label(misc_ref_node) /* 57 weak  */
+         .quad local_label(misc_ref_invalid) /* 58 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 59 imm_2  */
+         .quad local_label(misc_ref_invalid) /* 5a immheader_2  */
+         .quad local_label(misc_ref_node) /* 5b xfunction  */
+         .quad local_label(misc_ref_invalid) /* 5c misc  */
+         .quad local_label(misc_ref_invalid) /* 5d imm3  */
+         .quad local_label(misc_ref_invalid) /* 5e immheader_3  */
+         .quad local_label(misc_ref_invalid) /* 5f nodeheader_3  */
+        /* 60-6f  */
+         .quad local_label(misc_ref_invalid) /* 60 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 61 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 62 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 63 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 64 cons  */
+         .quad local_label(misc_ref_invalid) /* 65 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 66 immheader_1  */
+         .quad local_label(misc_ref_node) /* 67 package  */
+         .quad local_label(misc_ref_invalid) /* 68 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 69 imm_2  */
+         .quad local_label(misc_ref_invalid) /* 6a immheader_2  */
+         .quad local_label(misc_ref_invalid) /* 6b nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* 6c misc  */
+         .quad local_label(misc_ref_invalid) /* 6d imm3  */
+         .quad local_label(misc_ref_invalid) /* 6e immheader_3  */
+         .quad local_label(misc_ref_invalid) /* 6f nodeheader_3  */
+        /* 70-7f  */
+         .quad local_label(misc_ref_invalid) /* 70 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 71 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 72 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 73 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 74 cons  */
+         .quad local_label(misc_ref_invalid) /* 75 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 76 immheader_1  */
+         .quad local_label(misc_ref_invalid) /* 77 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* 78 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 79 imm_2  */
+         .quad local_label(misc_ref_invalid) /* 7a immheader_2  */
+         .quad local_label(misc_ref_invalid) /* 7b nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* 7c misc  */
+         .quad local_label(misc_ref_invalid) /* 7d imm3  */
+         .quad local_label(misc_ref_invalid) /* 7e immheader_3  */
+         .quad local_label(misc_ref_invalid) /* 7f nodeheader_3  */
+        /* 80-8f  */
+         .quad local_label(misc_ref_invalid) /* 80 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 81 imm_0  */
+         .quad local_label(misc_ref_invalid) /* 82 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* 83 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 84 cons  */
+         .quad local_label(misc_ref_invalid) /* 85 imm_1  */
+         .quad local_label(misc_ref_invalid) /* 86 immheader_1  */
+         .quad local_label(misc_ref_node)    /* 87 arrayH  */ 
+         .quad local_label(misc_ref_invalid) /* 88 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 89 imm_2  */
+         .quad local_label(misc_ref_invalid) /* 8a immheader_2  */
+         .quad local_label(misc_ref_node)    /* 8b vectorH  */
+         .quad local_label(misc_ref_invalid) /* 8c misc  */
+         .quad local_label(misc_ref_invalid) /* 8d imm3  */
+         .quad local_label(misc_ref_invalid) /* 8e immheader_3  */
+         .quad local_label(misc_ref_node) /* 8f simple_vector  */
+        /* 90-9f  */
+         .quad local_label(misc_ref_invalid) /* 90 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 91 imm_0  */
+         .quad local_label(misc_ref_s8) /* 92 s8  */
+         .quad local_label(misc_ref_invalid) /* 93 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* 94 cons  */
+         .quad local_label(misc_ref_invalid) /* 95 imm_1  */
+         .quad local_label(misc_ref_s16) /* 96 immheader_1  */
+         .quad local_label(misc_ref_invalid) /* 97 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* 98 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* 99 imm_2  */
+         .quad local_label(misc_ref_s32) /* 9a s32  */
+         .quad local_label(misc_ref_invalid) /* 9b nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* 9c misc  */
+         .quad local_label(misc_ref_invalid) /* 9d imm3  */
+         .quad local_label(misc_ref_s64) /* 9e s64  */
+         .quad local_label(misc_ref_invalid) /* 9f nodeheader_3  */
+        /* a0-af  */
+         .quad local_label(misc_ref_invalid) /* a0 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* a1 imm_0  */
+         .quad local_label(misc_ref_u8) /* a2 u8  */
+         .quad local_label(misc_ref_invalid) /* a3 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* a4 cons  */
+         .quad local_label(misc_ref_invalid) /* a5 imm_1  */
+         .quad local_label(misc_ref_u16) /* a6 u16  */
+         .quad local_label(misc_ref_invalid) /* a7 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* a8 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* a9 imm_2  */
+         .quad local_label(misc_ref_u32) /* aa u32  */
+         .quad local_label(misc_ref_invalid) /* ab nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* ac misc  */
+         .quad local_label(misc_ref_invalid) /* ad imm3  */
+         .quad local_label(misc_ref_u64) /* ae u64  */
+         .quad local_label(misc_ref_invalid) /* af nodeheader_3  */
+        /* b0-bf  */
+         .quad local_label(misc_ref_invalid) /* b0 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* b1 imm_0  */
+         .quad local_label(misc_ref_invalid) /* b2 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* b3 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* b4 cons  */
+         .quad local_label(misc_ref_invalid) /* b5 imm_1  */
+         .quad local_label(misc_ref_invalid) /* b6 immheader_1  */
+         .quad local_label(misc_ref_invalid) /* b7 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* b8 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* b9 imm_2  */
+         .quad local_label(misc_ref_single_float_vector) /* ba sf vector  */
+         .quad local_label(misc_ref_invalid) /* bb nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* bc misc  */
+         .quad local_label(misc_ref_invalid) /* bd imm3  */
+         .quad local_label(misc_ref_fixnum_vector) /* be fixnum_vector  */
+         .quad local_label(misc_ref_invalid) /* bf nodeheader_3  */
+        /* c0-cf  */
+         .quad local_label(misc_ref_invalid) /* c0 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* c1 imm_0  */
+         .quad local_label(misc_ref_invalid) /* c2 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* c3 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* c4 cons  */
+         .quad local_label(misc_ref_invalid) /* c5 imm_1  */
+         .quad local_label(misc_ref_invalid) /* c6 immheader_1  */
+         .quad local_label(misc_ref_invalid) /* c7 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* c8 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* c9 imm_2  */
+         .quad local_label(misc_ref_invalid) /* ca immheader_2  */
+         .quad local_label(misc_ref_invalid) /* cb nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* cc misc  */
+         .quad local_label(misc_ref_invalid) /* cd imm3  */
+         .quad local_label(misc_ref_double_float_vector) /* ce double-float vector  */
+         .quad local_label(misc_ref_invalid) /* cf nodeheader_3  */
+        /* d0-df  */
+         .quad local_label(misc_ref_invalid) /* d0 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* d1 imm_0  */
+         .quad local_label(misc_ref_string) /* d2 string  */
+         .quad local_label(misc_ref_invalid) /* d3 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* d4 cons  */
+         .quad local_label(misc_ref_invalid) /* d5 imm_1  */
+         .quad local_label(misc_ref_invalid) /* d6 immheader_1  */
+         .quad local_label(misc_ref_invalid) /* d7 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* d8 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* d9 imm_2  */
+         .quad local_label(misc_ref_new_string) /* da new_string  */
+         .quad local_label(misc_ref_invalid) /* db nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* dc misc  */
+         .quad local_label(misc_ref_invalid) /* dd imm3  */
+         .quad local_label(misc_ref_invalid) /* de immheader_3  */
+         .quad local_label(misc_ref_invalid) /* df nodeheader_3  */
+        /* e0-ef  */
+         .quad local_label(misc_ref_invalid) /* e0 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* e1 imm_0  */
+         .quad local_label(misc_ref_invalid) /* e2 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* e3 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* e4 cons  */
+         .quad local_label(misc_ref_invalid) /* e5 imm_1  */
+         .quad local_label(misc_ref_invalid) /* e6 immheader_1  */
+         .quad local_label(misc_ref_invalid) /* e7 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* e8 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* e9 imm_2  */
+         .quad local_label(misc_ref_invalid) /* ea immheader_2  */
+         .quad local_label(misc_ref_invalid) /* eb nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* ec misc  */
+         .quad local_label(misc_ref_invalid) /* ed imm3  */
+         .quad local_label(misc_ref_invalid) /* ee immheader_3  */
+         .quad local_label(misc_ref_invalid) /* ef nodeheader_3  */
+        /* f0-ff  */
+         .quad local_label(misc_ref_invalid) /* f0 even_fixnum  */
+         .quad local_label(misc_ref_invalid) /* f1 imm_0  */
+         .quad local_label(misc_ref_invalid) /* f2 immheader_0  */
+         .quad local_label(misc_ref_invalid) /* f3 nodeheader_0  */
+         .quad local_label(misc_ref_invalid) /* f4 cons  */
+         .quad local_label(misc_ref_invalid) /* f5 imm_1  */
+         .quad local_label(misc_ref_bit_vector) /* f6 bit_vector  */
+         .quad local_label(misc_ref_invalid) /* f7 nodeheader_1  */
+         .quad local_label(misc_ref_invalid) /* f8 odd_fixnum  */
+         .quad local_label(misc_ref_invalid) /* f9 imm_2  */
+         .quad local_label(misc_ref_invalid) /* fa immheader_2  */
+         .quad local_label(misc_ref_invalid) /* fb nodeheader_2  */
+         .quad local_label(misc_ref_invalid) /* fc misc  */
+         .quad local_label(misc_ref_invalid) /* fd imm3  */
+         .quad local_label(misc_ref_invalid) /* fe immheader_3  */
+         .quad local_label(misc_ref_invalid) /* ff nodeheader_3  */
+	
+         /* A node vector  */
+local_label(misc_ref_node):        
+         __(la imm0,misc_data_offset(arg_z))
+         __(ldx arg_z,arg_y,imm0)
+         __(blr)
+local_label(misc_ref_double_float_vector):        
+         __(la imm0,misc_data_offset(arg_z))
+         __(ldx imm0,arg_y,imm0)
+         __(li imm1,double_float_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm1,double_float.size))
+         __(std imm0,double_float.value(arg_z))
+         __(blr)
+local_label(misc_ref_s64):      
+         __(la imm0,misc_data_offset(arg_z))
+         __(ldx imm0,arg_y,imm0)
+         __(b _SPmakes64)
+local_label(misc_ref_fixnum_vector):    
+         __(la imm0,misc_data_offset(arg_z))
+         __(ldx imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_u64):      
+         __(la imm0,misc_data_offset(arg_z))
+         __(ldx imm0,arg_y,imm0)
+         __(b _SPmakeu64)
+local_label(misc_ref_new_string):        
+         __(srdi imm0,arg_z,1)
+         __(la imm0,misc_data_offset(imm0))
+         __(lwzx imm0,arg_y,imm0)
+         __(slwi imm0,imm0,charcode_shift)
+         __(ori arg_z,imm0,subtag_character)
+         __(blr)
+local_label(misc_ref_s32):                     
+         __(srdi imm0,arg_z,1)
+         __(la imm0,misc_data_offset(imm0))
+         __(lwax imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_u32):                     
+         __(srdi imm0,arg_z,1)
+         __(la imm0,misc_data_offset(imm0))
+         __(lwzx imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_single_float_vector):             
+         __(srdi imm0,arg_z,1)
+         __(la imm0,misc_data_offset(imm0))
+         __(lwzx imm0,arg_y,imm0)
+         __(rldicr arg_z,imm0,32,31)
+         __(ori arg_z,arg_z,subtag_single_float)
+         __(blr)
+local_label(misc_ref_s16):      
+         __(srdi imm0,arg_z,2)
+         __(la imm0,misc_data_offset(imm0))
+         __(lhax imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_u16):
+         __(srdi imm0,arg_z,2)
+         __(la imm0,misc_data_offset(imm0))
+         __(lhzx imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_s8):       
+         __(srdi imm0,arg_z,3)
+         __(la imm0,misc_data_offset(imm0))
+         __(lbzx imm0,arg_y,imm0)
+         __(extsb imm0,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_u8):       
+         __(srdi imm0,arg_z,3)
+         __(la imm0,misc_data_offset(imm0))
+         __(lbzx imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_string):              
+         __(srdi imm0,arg_z,3)
+         __(la imm0,misc_data_offset(imm0))
+         __(lbzx imm0,arg_y,imm0)
+         __(sldi imm0,imm0,charcode_shift)
+         __(ori arg_z,imm0,subtag_character)
+         __(blr)
+local_label(misc_ref_bit_vector):               
+	 __(extrwi imm1,arg_z,5,32-(fixnumshift+5))	/* imm1 = bitnum  */
+         __(la imm1,1+fixnumshift(imm1))
+         __(srdi imm0,arg_z,5+fixnumshift)
+         __(sldi imm0,imm0,2)
+	 __(la imm0,misc_data_offset(imm0))
+	 __(lwzx imm0,arg_y,imm0)
+	 __(rlwnm arg_z,imm0,imm1,31-fixnumshift,31-fixnumshift)
+	 __(blr)
+local_label(misc_ref_invalid):      
+         __(li arg_x,XBADVEC)
+         __(set_nargs(3))
+         __(b _SPksignalerr)        
+        __else
+         __(slwi imm1,imm1,2)
+         __(li imm0,LO(local_label(misc_ref_jmp)))
+         __(addis imm0,imm0,HA(local_label(misc_ref_jmp)))
+         __(lwzx imm0,imm0,imm1)
+         __(mtctr imm0)
+         __(bctr)
+
+local_label(misc_ref_jmp):           
+        /* 00-0f  */
+         .long local_label(misc_ref_invalid) /* 00 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 01 cons  */
+         .long local_label(misc_ref_invalid) /* 02 nodeheader  */
+         .long local_label(misc_ref_invalid) /* 03 imm  */
+         .long local_label(misc_ref_invalid) /* 04 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 05 nil  */
+         .long local_label(misc_ref_invalid) /* 06 misc  */
+         .long local_label(misc_ref_u32) /* 07 bignum  */
+         .long local_label(misc_ref_invalid) /* 08 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 09 cons  */
+         .long local_label(misc_ref_node) /* 0a ratio  */
+         .long local_label(misc_ref_invalid) /* 0b imm  */
+         .long local_label(misc_ref_invalid) /* 0c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 0d nil  */
+         .long local_label(misc_ref_invalid) /* 0e misc  */
+         .long local_label(misc_ref_u32) /* 0f single_float  */
+        /* 10-1f  */
+         .long local_label(misc_ref_invalid) /* 10 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 11 cons  */
+         .long local_label(misc_ref_invalid) /* 12 nodeheader  */
+         .long local_label(misc_ref_invalid) /* 13 imm  */
+         .long local_label(misc_ref_invalid) /* 14 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 15 nil  */
+         .long local_label(misc_ref_invalid) /* 16 misc  */
+         .long local_label(misc_ref_u32) /* 17 double_float  */
+         .long local_label(misc_ref_invalid) /* 18 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 19 cons  */
+         .long local_label(misc_ref_node) /* 1a complex  */
+         .long local_label(misc_ref_invalid) /* 1b imm  */
+         .long local_label(misc_ref_invalid) /* 1c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 1d nil  */
+         .long local_label(misc_ref_invalid) /* 1e misc  */
+         .long local_label(misc_ref_u32) /* 1f macptr  */
+        /* 20-2f  */
+         .long local_label(misc_ref_invalid) /* 20 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 21 cons  */
+         .long local_label(misc_ref_node) /* 22 catch_frame  */
+         .long local_label(misc_ref_invalid) /* 23 imm  */
+         .long local_label(misc_ref_invalid) /* 24 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 25 nil  */
+         .long local_label(misc_ref_invalid) /* 26 misc  */
+         .long local_label(misc_ref_u32) /* 27 dead_macptr  */
+         .long local_label(misc_ref_invalid) /* 28 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 29 cons  */
+         .long local_label(misc_ref_node) /* 2a function  */
+         .long local_label(misc_ref_invalid) /* 2b imm  */
+         .long local_label(misc_ref_invalid) /* 2c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 2d nil  */
+         .long local_label(misc_ref_invalid) /* 2e misc  */
+         .long local_label(misc_ref_u32) /* 2f code_vector  */
+        /* 30-3f  */
+         .long local_label(misc_ref_invalid) /* 30 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 31 cons  */
+         .long local_label(misc_ref_node) /* 32 lisp_thread  */
+         .long local_label(misc_ref_invalid) /* 33 imm  */
+         .long local_label(misc_ref_invalid) /* 34 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 35 nil  */
+         .long local_label(misc_ref_invalid) /* 36 misc  */
+         .long local_label(misc_ref_u32) /* 37 creole  */
+         .long local_label(misc_ref_invalid) /* 38 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 39 cons  */
+         .long local_label(misc_ref_node) /* 3a symbol  */
+         .long local_label(misc_ref_invalid) /* 3b imm  */
+         .long local_label(misc_ref_invalid) /* 3c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 3d nil  */
+         .long local_label(misc_ref_invalid) /* 3e misc  */
+         .long local_label(misc_ref_u32) /* 3f xcode_vector  */
+        /* 40-4f  */
+         .long local_label(misc_ref_invalid) /* 40 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 41 cons  */
+         .long local_label(misc_ref_node) /* 42 lock  */
+         .long local_label(misc_ref_invalid) /* 43 imm  */
+         .long local_label(misc_ref_invalid) /* 44 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 45 nil  */
+         .long local_label(misc_ref_invalid) /* 46 misc  */
+         .long local_label(misc_ref_invalid) /* 47 immheader  */
+         .long local_label(misc_ref_invalid) /* 48 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 49 cons  */
+         .long local_label(misc_ref_node) /* 4a hash_vector  */
+         .long local_label(misc_ref_invalid) /* 4b imm  */
+         .long local_label(misc_ref_invalid) /* 4c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 4d nil  */
+         .long local_label(misc_ref_invalid) /* 4e misc  */
+         .long local_label(misc_ref_invalid) /* 4f immheader  */
+        /* 50-5f  */
+         .long local_label(misc_ref_invalid) /* 50 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 51 cons  */
+         .long local_label(misc_ref_node) /* 52 pool  */
+         .long local_label(misc_ref_invalid) /* 53 imm  */
+         .long local_label(misc_ref_invalid) /* 54 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 55 nil  */
+         .long local_label(misc_ref_invalid) /* 56 misc  */
+         .long local_label(misc_ref_invalid) /* 57 immheader  */
+         .long local_label(misc_ref_invalid) /* 58 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 59 cons  */
+         .long local_label(misc_ref_node) /* 5a weak  */
+         .long local_label(misc_ref_invalid) /* 5b imm  */
+         .long local_label(misc_ref_invalid) /* 5c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 5d nil  */
+         .long local_label(misc_ref_invalid) /* 5e misc  */
+         .long local_label(misc_ref_invalid) /* 5f immheader  */
+        /* 60-6f  */
+         .long local_label(misc_ref_invalid) /* 60 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 61 cons  */
+         .long local_label(misc_ref_node) /* 62 package  */
+         .long local_label(misc_ref_invalid) /* 63 imm  */
+         .long local_label(misc_ref_invalid) /* 64 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 65 nil  */
+         .long local_label(misc_ref_invalid) /* 66 misc  */
+         .long local_label(misc_ref_invalid) /* 67 immheader  */
+         .long local_label(misc_ref_invalid) /* 68 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 69 cons  */
+         .long local_label(misc_ref_node) /* 6a slot_vector  */
+         .long local_label(misc_ref_invalid) /* 6b imm  */
+         .long local_label(misc_ref_invalid) /* 6c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 6d nil  */
+         .long local_label(misc_ref_invalid) /* 6e misc  */
+         .long local_label(misc_ref_invalid) /* 6f immheader  */
+        /* 70-7f  */
+         .long local_label(misc_ref_invalid) /* 70 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 71 cons  */
+         .long local_label(misc_ref_node) /* 72 instance  */
+         .long local_label(misc_ref_invalid) /* 73 imm  */
+         .long local_label(misc_ref_invalid) /* 74 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 75 nil  */
+         .long local_label(misc_ref_invalid) /* 76 misc  */
+         .long local_label(misc_ref_invalid) /* 77 immheader  */
+         .long local_label(misc_ref_invalid) /* 78 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 79 cons  */
+         .long local_label(misc_ref_node) /* 7a struct  */
+         .long local_label(misc_ref_invalid) /* 7b imm  */
+         .long local_label(misc_ref_invalid) /* 7c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 7d nil  */
+         .long local_label(misc_ref_invalid) /* 7e misc  */
+         .long local_label(misc_ref_invalid) /* 7f immheader  */
+        /* 80-8f  */
+         .long local_label(misc_ref_invalid) /* 80 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 81 cons  */
+         .long local_label(misc_ref_node) /* 82 istruct  */
+         .long local_label(misc_ref_invalid) /* 83 imm  */
+         .long local_label(misc_ref_invalid) /* 84 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 85 nil  */
+         .long local_label(misc_ref_invalid) /* 86 misc  */
+         .long local_label(misc_ref_invalid) /* 87 immheader  */
+         .long local_label(misc_ref_invalid) /* 88 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 89 cons  */
+         .long local_label(misc_ref_node) /* 8a value_cell  */
+         .long local_label(misc_ref_invalid) /* 8b imm  */
+         .long local_label(misc_ref_invalid) /* 8c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 8d nil  */
+         .long local_label(misc_ref_invalid) /* 8e misc  */
+         .long local_label(misc_ref_invalid) /* 8f immheader  */
+        /* 90-9f  */
+         .long local_label(misc_ref_invalid) /* 90 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 91 cons  */
+         .long local_label(misc_ref_node) /* 92 xfunction  */
+         .long local_label(misc_ref_invalid) /* 93 imm  */
+         .long local_label(misc_ref_invalid) /* 94 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 95 nil  */
+         .long local_label(misc_ref_invalid) /* 96 misc  */
+         .long local_label(misc_ref_invalid) /* 97 immheader  */
+         .long local_label(misc_ref_invalid) /* 98 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* 99 cons  */
+         .long local_label(misc_ref_node) /* 9a arrayN  */
+         .long local_label(misc_ref_invalid) /* 9b imm  */
+         .long local_label(misc_ref_invalid) /* 9c odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* 9d nil  */
+         .long local_label(misc_ref_invalid) /* 9e misc  */
+         .long local_label(misc_ref_invalid) /* 9f immheader  */
+        /* a0-af  */
+         .long local_label(misc_ref_invalid) /* a0 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* a1 cons  */
+         .long local_label(misc_ref_node) /* a2 vectorH  */
+         .long local_label(misc_ref_invalid) /* a3 imm  */
+         .long local_label(misc_ref_invalid) /* a4 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* a5 nil  */
+         .long local_label(misc_ref_invalid) /* a6 misc  */
+         .long local_label(misc_ref_single_float_vector) /* a7 sf_vector  */
+         .long local_label(misc_ref_invalid) /* a8 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* a9 cons  */
+         .long local_label(misc_ref_node) /* aa simple_vector  */
+         .long local_label(misc_ref_invalid) /* ab imm  */
+         .long local_label(misc_ref_invalid) /* ac odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* ad nil  */
+         .long local_label(misc_ref_invalid) /* ae misc  */
+         .long local_label(misc_ref_u32) /* af u32  */
+        /* b0-bf  */
+         .long local_label(misc_ref_invalid) /* b0 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* b1 cons  */
+         .long local_label(misc_ref_invalid) /* b2 nodeheader  */
+         .long local_label(misc_ref_invalid) /* b3 imm  */
+         .long local_label(misc_ref_invalid) /* b4 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* b5 nil  */
+         .long local_label(misc_ref_invalid) /* b6 misc  */
+         .long local_label(misc_ref_s32) /* b7 s32  */
+         .long local_label(misc_ref_invalid) /* b8 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* b9 cons  */
+         .long local_label(misc_ref_invalid) /* ba nodeheader  */
+         .long local_label(misc_ref_invalid) /* bb imm  */
+         .long local_label(misc_ref_invalid) /* bc odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* bd nil  */
+         .long local_label(misc_ref_invalid) /* be misc  */
+         .long local_label(misc_ref_fixnum_vector) /* bf fixnum_vector  */
+        /* c0-cf  */
+         .long local_label(misc_ref_invalid) /* c0 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* c1 cons  */
+         .long local_label(misc_ref_invalid) /* c2 nodeheader  */
+         .long local_label(misc_ref_invalid) /* c3 imm  */
+         .long local_label(misc_ref_invalid) /* c4 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* c5 nil  */
+         .long local_label(misc_ref_invalid) /* c6 misc  */
+         .long local_label(misc_ref_new_string) /* c7 new_string  */
+         .long local_label(misc_ref_invalid) /* c8 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* c9 cons  */
+         .long local_label(misc_ref_invalid) /* ca nodeheader  */
+         .long local_label(misc_ref_invalid) /* cb imm  */
+         .long local_label(misc_ref_invalid) /* cc odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* cd nil  */
+         .long local_label(misc_ref_invalid) /* ce misc  */
+         .long local_label(misc_ref_u8) /* cf u8  */
+        /* d0-df  */
+         .long local_label(misc_ref_invalid) /* d0 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* d1 cons  */
+         .long local_label(misc_ref_invalid) /* d2 nodeheader  */
+         .long local_label(misc_ref_invalid) /* d3 imm  */
+         .long local_label(misc_ref_invalid) /* d4 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* d5 nil  */
+         .long local_label(misc_ref_invalid) /* d6 misc  */
+         .long local_label(misc_ref_s8)      /* d7 s8  */
+         .long local_label(misc_ref_invalid) /* d8 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* d9 cons  */
+         .long local_label(misc_ref_invalid) /* da nodeheader  */
+         .long local_label(misc_ref_invalid) /* db imm  */
+         .long local_label(misc_ref_invalid) /* dc odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* dd nil  */
+         .long local_label(misc_ref_invalid) /* de misc  */
+         .long local_label(misc_ref_old_string) /* df (old)subtag_simple_base_string  */
+        /* e0-ef  */
+         .long local_label(misc_ref_invalid) /* e0 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* e1 cons  */
+         .long local_label(misc_ref_invalid) /* e2 nodeheader  */
+         .long local_label(misc_ref_invalid) /* e3 imm  */
+         .long local_label(misc_ref_invalid) /* e4 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* e5 nil  */
+         .long local_label(misc_ref_invalid) /* e6 misc  */
+         .long local_label(misc_ref_u16) /* e7 u16  */
+         .long local_label(misc_ref_invalid) /* e8 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* e9 cons  */
+         .long local_label(misc_ref_invalid) /* ea nodeheader  */
+         .long local_label(misc_ref_invalid) /* eb imm  */
+         .long local_label(misc_ref_invalid) /* ec odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* ed nil  */
+         .long local_label(misc_ref_invalid) /* ee misc  */
+         .long local_label(misc_ref_s16) /* ef s16  */
+        /* f0-ff  */
+         .long local_label(misc_ref_invalid) /* f0 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* f1 cons  */
+         .long local_label(misc_ref_invalid) /* f2 nodeheader  */
+         .long local_label(misc_ref_invalid) /* f3 imm  */
+         .long local_label(misc_ref_invalid) /* f4 odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* f5 nil  */
+         .long local_label(misc_ref_invalid) /* f6 misc  */
+         .long local_label(misc_ref_double_float_vector) /* f7 df vector  */
+         .long local_label(misc_ref_invalid) /* f8 even_fixnum  */
+         .long local_label(misc_ref_invalid) /* f9 cons  */
+         .long local_label(misc_ref_invalid) /* fa nodeheader  */
+         .long local_label(misc_ref_invalid) /* fb imm  */
+         .long local_label(misc_ref_invalid) /* fc odd_fixnum  */
+         .long local_label(misc_ref_invalid) /* fd nil  */
+         .long local_label(misc_ref_invalid) /* fe misc  */
+         .long local_label(misc_ref_bit_vector) /* ff bit_vector  */
+                
+local_label(misc_ref_node):         
+	 /* A node vector.  */
+	 __(addi imm0,arg_z,misc_data_offset)
+	 __(ldrx(arg_z,arg_y,imm0))
+	 __(blr)
+local_label(misc_ref_single_float_vector):        
+	 __(addi imm0,arg_z,misc_data_offset)
+	 __(li imm1,single_float_header)
+	 __(ldrx(imm0,arg_y,imm0))
+	 __(Misc_Alloc_Fixed(arg_z,imm1,single_float.size))
+	 __(str(imm0,single_float.value(arg_z)))
+	 __(blr)
+local_label(misc_ref_new_string):        
+	 __(addi imm0,arg_z,misc_data_offset)
+	 __(ldrx(imm0,arg_y,imm0))
+         __(slwi arg_z,imm0,charcode_shift)
+         __(ori arg_z,arg_z,subtag_character)
+         __(blr)
+local_label(misc_ref_s32):        
+	 __(addi imm0,arg_z,misc_data_offset)
+	 __(ldrx(imm0,arg_y,imm0))
+         __(b _SPmakes32)
+local_label(misc_ref_fixnum_vector):    
+	 __(addi imm0,arg_z,misc_data_offset)
+	 __(ldrx(imm0,arg_y,imm0))
+         __(box_fixnum(arg_z,imm0))
+         __(blr)        
+local_label(misc_ref_u32):        
+	 __(addi imm0,arg_z,misc_data_offset)
+	 __(ldrx(imm0,arg_y,imm0))
+         __(b _SPmakeu32)
+local_label(misc_ref_double_float_vector):      
+         __(slwi imm0,arg_z,1)
+	 __(la imm0,misc_dfloat_offset(imm0))
+         __(lfdx f0,arg_y,imm0)
+	 __(li imm2,double_float_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,double_float.size))
+	 __(stfd f0,double_float.value(arg_z))
+	 __(blr)
+local_label(misc_ref_bit_vector):       
+	 __(extrwi imm1,arg_z,5,32-(fixnumshift+5))	/* imm1 = bitnum  */
+	 __(la imm1,1+fixnumshift(imm1))
+	 __(rlwinm imm0,arg_z,32-5,5,31-fixnumshift)
+	 __(la imm0,misc_data_offset(imm0))
+	 __(ldrx(imm0,arg_y,imm0))
+	 __(rlwnm arg_z,imm0,imm1,31-fixnumshift,31-fixnumshift)
+	 __(blr)
+local_label(misc_ref_s8):       
+         __(srwi imm0,arg_z,2)
+         __(la imm0,misc_data_offset(imm0))
+         __(lbzx imm0,arg_y,imm0)
+         __(extsb imm0,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_u8):       
+         __(srwi imm0,arg_z,2)
+         __(la imm0,misc_data_offset(imm0))
+         __(lbzx imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_old_string):           
+         __(srwi imm0,arg_z,2)
+         __(la imm0,misc_data_offset(imm0))
+         __(lbzx imm0,arg_y,imm0)
+	 __(slwi arg_z,imm0,charcode_shift)
+	 __(ori arg_z,arg_z,subtag_character)
+	 __(blr)
+local_label(misc_ref_u16):              
+         __(srwi imm0,arg_z,1)
+         __(la imm0,misc_data_offset(imm0))
+         __(lhzx imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_s16):              
+         __(srwi imm0,arg_z,1)
+         __(la imm0,misc_data_offset(imm0))
+         __(lhax imm0,arg_y,imm0)
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+local_label(misc_ref_invalid):
+         __(li arg_x,XBADVEC)
+         __(set_nargs(3))
+         __(b _SPksignalerr)        
+
+        __endif
+        
+/* like misc_ref, only the boxed subtag is in arg_x.  */
+
+_spentry(subtag_misc_ref)
+	__(trap_unless_fulltag_equal(arg_y,fulltag_misc,imm0))
+        __(trap_unless_lisptag_equal(arg_z,tag_fixnum,imm0))
+	__(vector_length(imm0,arg_y,imm1))
+	__(trlge(arg_z,imm0))
+	__(unbox_fixnum(imm1,arg_x))
+        __(b local_label(misc_ref_common))
+
+_spentry(builtin_aref1)
+	__(extract_typecode(imm0,arg_y))
+	__(cmpri(cr0,imm0,min_vector_subtag))
+	__(box_fixnum(arg_x,imm0))
+	__(bgt cr0,_SPsubtag_misc_ref)
+	__(jump_builtin(_builtin_aref1,2))
+        	
+	
+/* Make a cons cell on the vstack.  Always push 3 words, 'cause we're   */
+/* not sure how the vstack will be aligned.  */
+_spentry(stkconsyz)
+	__(li imm0,nil_value)
+	__(vpush(imm0))
+	__(vpush(imm0))
+	__(vpush(imm0))
+	__(andi. imm0,vsp,1<<word_shift) /* (oddp vsp ?)  */
+	__(beq cr0,1f)
+	__(str(arg_y,node_size*2(vsp))) /* car  */
+	__(str(arg_z,node_size(vsp))) /* cdr  */
+	__(la arg_z,fulltag_cons+node_size(vsp))
+	__(blr)
+1:
+	__(str(arg_y,node_size(vsp))) /* car, again  */
+	__(str(arg_z,0(vsp)))
+	__(la arg_z,fulltag_cons(vsp))
+	__(blr)
+
+/* Make a stack-consed value cell.  Much like the case of */
+/* stack-allocating a cons cell.  Imm0 points to the closed-over value */
+/* (already vpushed).  Replace that locative with the vcell.  */
+_spentry(stkvcell0)
+	__(sub imm1,imm0,vsp) /* imm1 = delta from vsp to value cell loc  */
+	__(li arg_z,nil_value)
+	__(vpush(arg_z))
+	__(vpush(arg_z))
+	__(vpush(arg_z))
+	__(addi imm1,imm1,node_size*3)
+	__(add imm0,vsp,imm1) /* in case stack overflowed  */
+	__(andi. imm1,vsp,1<<word_shift) /* (oddp vsp) ?  */
+	__(li imm1,value_cell_header)
+	__(ldr(arg_z,0(imm0)))
+	__(beq cr0,1f)
+	__(str(arg_z,node_size*2(vsp)))
+	__(str(imm1,node_size(vsp)))
+	__(la arg_z,fulltag_misc+node_size(vsp))
+	__(str(arg_z,0(imm0)))
+	__(blr)
+1:
+	__(str(arg_z,node_size(vsp)))
+	__(str(imm1,0(vsp)))
+	__(la arg_z,fulltag_misc(vsp))
+	__(str(arg_z,0(imm0)))
+	__(blr)
+
+        
+_spentry(stkvcellvsp)      
+	__(li arg_z,nil_value)
+	__(vpush(arg_z))
+	__(vpush(arg_z))
+	__(vpush(arg_z))
+	__(li imm1,node_size*3)
+	__(add imm0,vsp,imm1) /* in case stack overflowed  */
+	__(andi. imm1,vsp,1<<word_shift) /* (oddp vsp) ?  */
+	__(li imm1,value_cell_header)
+	__(ldr(arg_z,0(imm0)))
+	__(beq cr0,1f)
+	__(str(arg_z,node_size*2(vsp)))
+	__(str(imm1,node_size(vsp)))
+	__(la arg_z,fulltag_misc+node_size(vsp))
+	__(str(arg_z,0(imm0)))
+	__(blr)
+1:
+	__(str(arg_z,node_size(vsp)))
+	__(str(imm1,0(vsp)))
+	__(la arg_z,fulltag_misc(vsp))
+	__(str(arg_z,0(imm0)))
+	__(blr)
+
+/* Make a "raw" area on the temp stack, stack-cons a macptr to point to it,  */
+/* and return the macptr.  Size (in bytes, boxed) is in arg_z on entry; macptr */
+/* in arg_z on exit.  */
+_spentry(makestackblock)
+	__(unbox_fixnum(imm0,arg_z))
+        __(dnode_align(imm0,imm0,tsp_frame.fixed_overhead+macptr.size))
+	__(cmplri(cr0,imm0,tstack_alloc_limit))
+	__(bge cr0,1f)
+	__(TSP_Alloc_Var_Unboxed(imm0))
+	__(li imm0,macptr_header)
+	__(la imm1,tsp_frame.data_offset+macptr.size(tsp))
+	__(str(imm0,tsp_frame.data_offset(tsp)))
+	__(la arg_z,tsp_frame.data_offset+fulltag_misc(tsp))
+	__(str(imm1,macptr.address(arg_z)))
+        __ifdef(`PPC64')
+         __(std rzero,macptr.domain(arg_z))
+         __(std rzero,macptr.type(arg_z))
+        __else
+	 __(stfd fp_zero,macptr.domain(arg_z))
+        __endif
+	__(blr)
+
+        /* Too big. Heap cons a gcable macptr  */
+1:
+	__(TSP_Alloc_Fixed_Unboxed(0))
+	__(set_nargs(1))
+	__(li fname,nrs.new_gcable_ptr)
+	__(jump_fname())
+
+/* As above, only set the block's contents to 0.  */
+_spentry(makestackblock0)
+	__(unbox_fixnum(imm0,arg_z))
+        __(dnode_align(imm0,imm0,tsp_frame.fixed_overhead+macptr.size))
+	__(cmplri(cr0,imm0,tstack_alloc_limit))
+	__(bge cr0,3f)
+	__(TSP_Alloc_Var_Unboxed(imm0))
+	__(Zero_TSP_Frame(imm0,imm1))
+	__(li imm0,macptr_header)
+	__(la imm1,tsp_frame.data_offset+macptr.size(tsp))
+	__(str(imm0,tsp_frame.data_offset(tsp)))
+	__(la arg_z,tsp_frame.data_offset+fulltag_misc(tsp))
+	__(str(imm1,macptr.address(arg_z))) /* makestackblock0 expects the address to be in imm1  */
+	__(stfd fp_zero,macptr.domain(arg_z))
+	__(blr)
+
+        /* Too big. Heap cons a gcable macptr  */
+3:
+	__(TSP_Alloc_Fixed_Unboxed(0)) /* "raw" block to make the compiler happy  */
+
+	__(mr arg_y,arg_z) /* save block size  */
+	__(li arg_z,t_value) /* clear-p arg to %new-gcable-ptr  */
+	__(set_nargs(2))
+	__(li fname,nrs.new_gcable_ptr)
+	__(jump_fname())
+
+/* Make a list of length arg_y (boxed), initial-element arg_z (boxed) on  */
+/* the tstack.  Return the list in arg_z.  */
+_spentry(makestacklist)
+	__(add imm0,arg_y,arg_y)
+	__(cmplri(cr1,imm0,((tstack_alloc_limit+1)-cons.size)))
+	__(addi imm0,imm0,tsp_frame.fixed_overhead)
+	__(bge cr1,3f)
+	__(TSP_Alloc_Var_Boxed(imm0,imm1))
+	__(mr imm1,arg_y)
+	__(cmpri(cr1,imm1,0))
+	__(mr arg_y,arg_z)
+	__(li arg_z,nil_value)
+	__(ldr(imm2,tsp_frame.backlink(tsp)))
+	__(la imm2,-tsp_frame.fixed_overhead+fulltag_cons(imm2))
+	__(b 2f)
+1:
+	__(subi imm1,imm1,fixnum1)
+	__(cmpri(cr1,imm1,0))
+	__(_rplacd(imm2,arg_z))
+	__(_rplaca(imm2,arg_y))
+	__(mr arg_z,imm2)
+	__(subi imm2,imm2,cons.size)
+2:
+	__(bne cr1,1b)
+	__(blr)
+
+3:
+	__(cmpri(cr1,arg_y,0))
+	__(TSP_Alloc_Fixed_Boxed(0))  /* make the compiler happy  */
+	__(mr imm1,arg_y) /* count  */
+	__(mr arg_y,arg_z) /* initial value  */
+	__(li arg_z,nil_value) /* result  */
+	__(b 5f)
+4:
+	__(subi imm1,imm1,fixnum1)
+	__(cmpri(cr1,imm1,0))
+	__(Cons(arg_z,arg_y,arg_z))
+5:
+	__(bne cr1,4b)
+	__(blr)
+
+/* subtype (boxed) vpushed before initial values. (Had better be a  */
+/* node header subtag.) Nargs set to count of things vpushed.  */
+
+_spentry(stkgvector)
+	__(la imm0,-fixnum_one(nargs))
+	__(cmpri(cr1,imm0,0))
+	__(add imm1,vsp,nargs)
+	__(ldru(temp0,-node_size(imm1)))
+	__(slri(imm2,imm0,num_subtag_bits-fixnumshift))
+        __ifdef(`PPC64')
+         __(unbox_fixnum(imm3,temp0))
+         __(or imm2,imm3,imm2)
+        __else
+	 __(rlwimi imm2,temp0,32-fixnumshift,32-num_subtag_bits,31)
+        __endif
+        __(dnode_align(imm0,imm0,node_size+tsp_frame.fixed_overhead))
+	__(TSP_Alloc_Var_Boxed_nz(imm0,imm3))
+	__(str(imm2,tsp_frame.data_offset(tsp)))
+	__(la arg_z,tsp_frame.data_offset+fulltag_misc(tsp))
+	__(la imm3,misc_header_offset(arg_z))
+	__(li imm0,fixnum1)
+	__(b 2f)
+1:
+	__(addi imm0,imm0,fixnum1)
+	__(cmpr(cr1,imm0,nargs))
+	__(ldru(temp0,-node_size(imm1)))
+	__(stru(temp0,node_size(imm3)))
+2:
+	__(bne cr1,1b)
+	__(add vsp,vsp,nargs)
+	__(blr)
+
+/* Allocate a "fulltag_misc" object.  On entry, arg_y contains the element  */
+/* count (boxed) and  arg_z contains the subtag (boxed).  Both of these   */
+/* parameters must be "reasonable" (the  subtag must be valid, the element  */
+/* count must be of type (unsigned-byte 24)/(unsigned-byte 56).   */
+/* On exit, arg_z contains the (properly tagged) misc object; it'll have a  */
+/* proper header on it and its contents will be 0.   imm0 contains   */
+/* the object's header (fulltag = fulltag_immheader or fulltag_nodeheader.)  */
+/* This is intended for things like "make-array" and "%make-bignum" and the   */
+/* like.  Things that involve creating small objects of known size can usually  */
+/* do so inline with less hair.  */
+
+/* If this has to go out-of-line (to GC or whatever), it should do so via a   */
+/* trap (or should otherwise ensure that both the LR and CTR are preserved   */
+/* where the GC can find them.)  */
+
+
+_spentry(misc_alloc)
+        __ifdef(`PPC64')
+         __(extract_unsigned_byte_bits_(imm2,arg_y,56))
+         __(unbox_fixnum(imm0,arg_z))
+         __(sldi imm2,arg_y,num_subtag_bits-fixnumshift)
+         __(clrldi imm1,imm0,64-nlowtagbits)
+         __(or imm0,imm2,imm0)
+         __(extract_fulltag(imm2,imm0))
+         __(cmpdi cr1,imm1,lowtag_nodeheader)
+         __(cmpdi cr2,imm2,ivector_class_64_bit)
+         __(bne- cr0,9f)
+         __(cmpdi cr3,imm2,ivector_class_32_bit)
+         __(cmpdi cr4,imm2,ivector_class_8_bit)
+         __(mr imm2,arg_y)
+         __(cmpdi cr5,imm1,subtag_bit_vector)
+         __(beq cr1,1f)
+         __(beq cr2,1f)
+         __(srdi imm2,imm2,1)
+         __(beq cr3,1f)
+         __(beq cr5,2f)
+         __(srdi imm2,imm2,1)
+         __(bne cr4,1f)
+         __(srdi imm2,imm2,1)
+/* imm2 now = byte count.  Add 8 for header, 15 to align, then clear */
+/* low four bits. */
+1:
+         __(dnode_align(imm2,imm2,node_size))
+
+	 __(Misc_Alloc(arg_z,imm0,imm2))
+	 __(blr)
+2:      /* bit-vector case  */
+         __(addi imm2,arg_y,7<<fixnumshift)
+         __(srdi imm2,imm2,3+fixnumshift)
+         __(b 1b)
+9:                      
+	 __(uuo_interr(error_object_not_unsigned_byte_56,arg_y))
+        __else
+	 __(extract_unsigned_byte_bits_(imm2,arg_y,24))
+	 __(unbox_fixnum(imm0,arg_z))
+	 __(extract_fulltag(imm1,imm0))
+	 __(bne- cr0,9f)
+	 __(cmpri(cr0,imm1,fulltag_nodeheader))
+	 __(mr imm3,imm0)
+	 __(cmplri(cr1,imm0,max_32_bit_ivector_subtag))
+	 __(rlwimi imm0,arg_y,num_subtag_bits-fixnum_shift,0,31-num_subtag_bits	)/* imm0 now = header  */
+	 __(mr imm2,arg_y)
+	 __(beq cr0,1f)	/* do probe if node object (fixnum element count = byte count).  */
+	 __(cmplri(cr0,imm3,max_16_bit_ivector_subtag))
+	 __(bng cr1,1f)	/* do probe if 32-bit imm object  */
+	 __(cmplri(cr1,imm3,max_8_bit_ivector_subtag))
+	 __(srwi imm2,imm2,1)
+	 __(bgt cr0,2f)
+	 __(bgt cr1,1f)
+	 __(srwi imm2,imm2,1)
+        /* imm2 now = byte count.  Add 4 for header, 7 to align, then clear */
+        /* low three bits.  */
+1:
+         __(dnode_align(imm2,imm2,node_size))
+
+	 __(Misc_Alloc(arg_z,imm0,imm2))
+	 __(blr)
+2:
+	 __(cmplri(imm3,subtag_double_float_vector))
+	 __(slwi imm2,arg_y,1)
+	 __(beq 1b)
+	 __(addi imm2,arg_y,7<<fixnumshift)
+	 __(srwi imm2,imm2,fixnumshift+3)
+	 __(b 1b)
+9:
+	 __(uuo_interr(error_object_not_unsigned_byte_24,arg_y))
+        __endif
+        
+/* almost exactly as above, but "swap exception handling info" */
+/* on exit and return  */
+/* Deprecated */        
+_spentry(poweropen_ffcallX)
+        .long 0x7c800008        /* debug trap */
+
+
+/* Destructuring-bind, macro-bind.  */
+   
+/* OK to use arg_x, arg_y for whatever (tagged) purpose;  */
+/* likewise immX regs.  */
+/* arg_z preserved, nothing else in particular defined on exit.  */
+/* nargs contains req count (0-255) in PPC bits mask_req_start/mask_req_width,  */
+/* opt count (0-255) in PPC bits mask_opt_start/mask_opt_width,  */
+/* key count (0-255) in PPC bits mask_key_start/mask_key_width,  */
+/* opt-supplied-p flag in PPC bit mask_initopt,  */
+/* keyp flag in PPC bit mask_keyp,  */
+/* &allow-other-keys flag in PPC bit mask_aok,  */
+/* &rest flag in PPC bit mask_restp.  */
+/* When mask_keyp bit is set, keyvect contains vector of keyword symbols,  */
+/* length key count.  */
+
+_spentry(macro_bind)
+        __ifdef(`PPC64')
+ 	 __(mr whole_reg,arg_reg)
+	 __(extract_fulltag(imm0,arg_reg))
+         __(cmpri(cr1,arg_reg,nil_value))
+	 __(cmpri(cr0,imm0,fulltag_cons))
+         __(beq cr1,0f)
+	 __(bne- cr0,1f)
+0:             
+	 __(_cdr(arg_reg,arg_reg))
+	 __(b local_label(destbind1))
+        __else
+	 __(mr whole_reg,arg_reg)
+	 __(extract_lisptag(imm0,arg_reg))
+	 __(cmpri(cr0,imm0,tag_list))
+	 __(bne- cr0,1f)
+	 __(_cdr(arg_reg,arg_reg))
+	 __(b (local_label(destbind1)))
+        __endif
+1:
+	__(li arg_y,XCALLNOMATCH)
+	__(mr arg_z,whole_reg)
+	__(set_nargs(2))
+	__(b _SPksignalerr)
+
+
+_spentry(destructuring_bind)
+	__(mr whole_reg,arg_reg)
+        __(b local_label(destbind1))
+	
+_spentry(destructuring_bind_inner)
+	__(mr whole_reg,arg_z)
+local_label(destbind1): 
+	/* Extract required arg count.  */
+	/* A bug in gas: can't handle shift count of "32" (= 0  */
+	ifelse(eval(mask_req_width+mask_req_start),eval(32),`
+	__(clrlwi. imm0,nargs,mask_req_start)
+	',`
+	__(extrwi. imm0,nargs,mask_req_width,mask_req_start)
+	')
+	__(extrwi imm1,nargs,mask_opt_width,mask_opt_start)
+	__(rlwinm imm2,nargs,0,mask_initopt,mask_initopt)
+	__(rlwinm imm4,nargs,0,mask_keyp,mask_keyp)
+	__(cmpri(cr4,imm4,0))
+	__(rlwinm imm4,nargs,0,mask_restp,mask_restp)
+	__(cmpri(cr5,imm4,0))
+	__(cmpri(cr1,imm1,0))
+	__(cmpri(cr2,imm2,0))
+	/* Save entry vsp in case of error.  */
+	__(mr imm4,vsp)
+	__(beq cr0,2f)
+1:
+	__(cmpri(cr7,arg_reg,nil_value))
+        __ifdef(`PPC64')
+         __(extract_fulltag(imm3,arg_reg))
+         __(cmpri(cr3,imm3,fulltag_cons))
+        __else       
+	 __(extract_lisptag(imm3,arg_reg))
+	 __(cmpri(cr3,imm3,tag_list))
+        __endif
+	__(subi imm0,imm0,1)
+	__(cmpri(cr0,imm0,0))
+	__(beq cr7,toofew)
+	__(bne cr3,badlist)
+	__(ldr(arg_x,cons.car(arg_reg)))
+	__(ldr(arg_reg,cons.cdr(arg_reg)))
+	__(vpush(arg_x))
+	__(bne cr0,1b)
+2:
+	__(beq cr1,rest_keys)
+	__(bne cr2,opt_supp)
+	/* 'simple' &optionals:	 no supplied-p, default to nil.  */
+simple_opt_loop:
+	__(cmpri(cr0,arg_reg,nil_value))
+        __ifdef(`PPC64')
+         __(extract_fulltag(imm3,arg_reg))
+         __(cmpri(cr3,imm3,fulltag_cons))
+        __else
+	 __(extract_lisptag(imm3,arg_reg))
+	 __(cmpri(cr3,imm3,tag_list))
+        __endif
+	__(subi imm1,imm1,1)
+	__(cmpri(cr1,imm1,0))
+	__(li imm5,nil_value)
+	__(beq cr0,default_simple_opt)
+	__(bne cr3,badlist)
+	__(ldr(arg_x,cons.car(arg_reg)))
+	__(ldr(arg_reg,cons.cdr(arg_reg)))
+	__(vpush(arg_x))
+	__(bne cr1,simple_opt_loop)
+	__(b rest_keys)
+default_simple_opt_loop:
+	__(subi imm1,imm1,1)
+	__(cmpri(cr1,imm1,0))
+default_simple_opt:
+	__(vpush(imm5))
+	__(bne cr1,default_simple_opt_loop)
+	__(b rest_keys)
+	/* Provide supplied-p vars for the &optionals.  */
+opt_supp:
+	__(li arg_y,t_value)
+opt_supp_loop:
+	__(cmpri(cr0,arg_reg,nil_value))
+        __ifdef(`PPC64')
+         __(extract_fulltag(imm3,arg_reg))
+         __(cmpri(cr3,imm3,fulltag_cons))
+        __else        
+	 __(extract_lisptag(imm3,arg_reg))
+	 __(cmpri(cr3,imm3,tag_list))
+        __endif
+	__(subi imm1,imm1,1)
+	__(cmpri(cr1,imm1,0))
+	__(beq cr0,default_hard_opt)
+	__(bne cr3,badlist)
+	__(ldr(arg_x,cons.car(arg_reg)))
+	__(ldr(arg_reg,cons.cdr(arg_reg)))
+	__(vpush(arg_x))
+	__(vpush(arg_y))
+	__(bne cr1,opt_supp_loop)
+	__(b rest_keys)
+default_hard_opt_loop:
+	__(subi imm1,imm1,1)
+	__(cmpri(cr1,imm1,0))
+default_hard_opt:
+	__(vpush(imm5))
+	__(vpush(imm5))
+	__(bne cr1,default_hard_opt_loop)
+rest_keys:
+	__(cmpri(cr0,arg_reg,nil_value))
+	__(bne cr5,have_rest)
+	__(bne cr4,have_keys)
+	__(bne cr0,toomany)
+	__(blr)
+have_rest:
+	__(vpush(arg_reg))
+	__(beqlr cr4)
+have_keys:
+	/* Ensure that arg_reg contains a proper,even-length list.  */
+	/* Insist that its length is <= 512 (as a cheap circularity check.)  */
+	__(li imm0,256)
+	__(mr arg_x,arg_reg)
+count_keys_loop:
+        __ifdef(`PPC64')
+         __(extract_fulltag(imm3,arg_x))
+         __(cmpri(cr3,imm3,fulltag_cons))
+        __else
+	 __(extract_lisptag(imm3,arg_x))
+	 __(cmpri(cr3,imm3,tag_list))
+        __endif
+	__(cmpri(cr0,arg_x,nil_value))
+	__(subi imm0,imm0,1)
+	__(cmpri(cr4,imm0,0))
+	__(beq cr0,counted_keys)
+	__(bne cr3,badlist)
+	__(ldr(arg_x,cons.cdr(arg_x)))
+        __ifdef(`PPC64')
+         __(extract_fulltag(imm3,arg_x))
+         __(cmpri(cr3,imm3,fulltag_cons))
+        __else
+	 __(extract_lisptag(imm3,arg_x))
+	 __(cmpri(cr3,imm3,tag_list))
+        __endif
+	__(blt cr4,toomany)
+	__(cmpri(cr0,arg_x,nil_value))
+	__(beq cr0,db_badkeys)
+	__(bne cr3,badlist)
+	__(ldr(arg_x,cons.cdr(arg_x)))
+	__(b count_keys_loop)
+counted_keys:
+	/* We've got a proper, even-length list of key/value pairs in */
+	/* arg_reg. For each keyword var in the lambda-list, push a pair */
+	/* of NILs on the vstack.  */
+	__(extrwi. imm0,nargs,mask_key_width,mask_key_start )
+	__(mr imm2,imm0) 	/* save number of keys  */
+	__(li imm5,nil_value)
+	__(b push_pair_test)
+push_pair_loop:
+	__(cmpri(cr0,imm0,1))
+	__(subi imm0,imm0,1)
+	__(vpush(imm5))
+	__(vpush(imm5))
+push_pair_test:
+	__(bne cr0,push_pair_loop)
+	__(slwi imm2,imm2,dnode_shift)  /* pairs -> bytes  */
+	__(add imm2,vsp,imm2)		/* imm2 points below pairs  */
+	__(li imm0,0)			/* count unknown keywords so far  */
+	__(extrwi imm1,nargs,1,mask_aok) /* unknown keywords allowed  */
+	__(extrwi nargs,nargs,mask_key_width,mask_key_start)
+	/* Now, for each keyword/value pair in the list  */
+	/*  a) if the keyword is found in the keyword vector, set the  */
+	/*     corresponding entry on the vstack to the value and the  */
+	/*     associated supplied-p var to T.  */
+	/*  b) Regardless of whether or not the keyword is found,  */
+        /*     if :ALLOW-OTHER-KEYS is provided with a non-nil value, */
+	/*     set the low bit of imm1 to indicate that unknown keywords  */
+	/*     are acceptable. (This bit is pre-set above to the value */
+        /*     the encoded value of &allow_other_keys.) */
+	/*  c) If the keyword is not found (and isn't :ALLOW-OTHER-KEYS), increment  */
+	/*     the count of unknown keywords in the high bits of imm1*/
+	/* At the end of the list, signal an error if any unknown keywords were seen  */
+	/* but not allowed.  Otherwise, return.  */
+
+match_keys_loop:
+	__(cmpri(cr0,arg_reg,nil_value))
+	__(li imm0,0)
+	__(li imm3,misc_data_offset)
+	__(beq cr0,matched_keys)
+	__(ldr(arg_x,cons.car(arg_reg)))
+	__(li arg_y,nrs.kallowotherkeys)
+	__(cmpr(cr3,arg_x,arg_y))	/* :ALLOW-OTHER-KEYS ?  */
+	__(ldr(arg_reg,cons.cdr(arg_reg)))
+	__(ldr(arg_y,cons.car(arg_reg)))
+	__(cmpr(cr4,imm0,nargs))
+	__(ldr(arg_reg,cons.cdr(arg_reg)))
+	__(b match_test)
+match_loop:
+	__(ldrx(temp0,keyvect_reg,imm3))
+	__(cmpr(cr0,arg_x,temp0))
+	__(addi imm0,imm0,1)
+	__(cmpr(cr4,imm0,nargs))
+	__(addi imm3,imm3,node_size)
+	__(bne cr0,match_test)
+	/* Got a hit.  Unless this keyword's been seen already, set it.  */
+	__(slwi imm0,imm0,dnode_shift)
+	__(subf imm0,imm0,imm2)
+	__(ldr(temp0,0(imm0)))
+	__(cmpri(cr0,temp0,nil_value))
+	__(li temp0,t_value)
+	__(bne cr0,match_keys_loop)	/* already saw this  */
+	__(str(arg_y,node_size*1(imm0)))
+	__(str(temp0,node_size*0(imm0)))
+        __(bne cr3,match_keys_loop)
+	__(b match_keys_check_aok)
+match_test:
+	__(bne cr4,match_loop)
+        __(beq cr3,match_keys_check_aok)
+        __(addi imm1,imm1,node_size)
+        __(b match_keys_loop)
+match_keys_check_aok:
+        __(andi. imm0,imm1,2)  /* check "seen-aok" bit in imm1 */
+        __(cmpri cr1,arg_y,nil_value) /* check value */
+        __(ori imm1,imm1,2)
+        __(bne cr0,match_keys_loop) /* duplicate aok */
+        __(beq cr1,match_keys_loop)
+        __(ori imm1,imm1,1)
+	__(b match_keys_loop)
+matched_keys:
+        __(clrrwi. imm0,imm1,2)
+        __(beqlr)
+        __(andi. imm1,imm1,1)
+        __(bnelr)
+	/* Some unrecognized keywords.  Complain generically about  */
+	/* invalid keywords.  */
+db_badkeys:
+	__(li arg_y,XBADKEYS)
+	__(b destructure_error)
+toomany:
+	__(li arg_y,XCALLTOOMANY)
+	__(b destructure_error)
+toofew:
+	__(li arg_y,XCALLTOOFEW)
+	__(b destructure_error)
+badlist:
+	__(li arg_y,XCALLNOMATCH)
+	/* b destructure_error  */
+destructure_error:
+	__(mr vsp,imm4)		/* undo everything done to the stack  */
+	__(mr arg_z,whole_reg)
+	__(set_nargs(2))
+	__(b _SPksignalerr)
+        
+/* vpush the values in the value set atop the vsp, incrementing nargs.  */
+/* Discard the tsp frame; leave values atop the vsp.  */
+
+_spentry(recover_values)
+
+/* First, walk the segments reversing the pointer to previous segment pointers  */
+/* Can tell the end because that previous segment pointer is the prev tsp pointer  */
+	__(ldr(imm0,tsp_frame.backlink(tsp))) /* previous tsp  */
+	__(mr imm1,tsp) /* current segment  */
+	__(mr imm2,tsp) /* last segment  */
+local_label(walkloop):
+	__(ldr(imm3,tsp_frame.fixed_overhead+node_size(imm1))) /* next segment  */
+	__(cmpr(cr0,imm0,imm3)) /* last segment?  */
+	__(str(imm2,tsp_frame.fixed_overhead+node_size(imm1))) /* reverse pointer  */
+	__(mr imm2,imm1) /* last segment <- current segment  */
+	__(mr imm1,imm3) /* current segment <- next segment  */
+	__(bne cr0,local_label(walkloop))
+
+        /* the final segment ptr is now in imm2  */
+        /* walk backwards, pushing values on VSP and incrementing NARGS  */
+local_label(pushloop):
+	__(ldr(imm0,tsp_frame.data_offset(imm2))) /* nargs in segment  */
+	__(cmpri(cr0,imm0,0))
+	__(cmpr(cr1,imm2,tsp))
+	__(la imm3,tsp_frame.data_offset+(2*node_size)(imm2))
+	__(add imm3,imm3,imm0)
+	__(add nargs,nargs,imm0)
+	__(b 2f)
+1:
+	__(ldru(arg_z,-node_size(imm3)))
+	__(cmpri(cr0,imm0,fixnum_one))
+	__(subi imm0,imm0,fixnum_one)
+	__(vpush(arg_z))
+2:
+	__(bne cr0,1b)
+	__(ldr(imm2,tsp_frame.data_offset+node_size(imm2))) /* previous segment  */
+	__(bne cr1,local_label(pushloop))
+	__(unlink(tsp))
+	__(blr)
+
+	
+/* Go out of line to do this.  Sheesh.  */
+
+_spentry(vpopargregs)
+	__(cmpri(cr0,nargs,0))
+	__(cmpri(cr1,nargs,2<<fixnumshift))
+	__(beqlr cr0)
+	__(beq cr1,local_label(yz))
+	__(blt cr1,local_label(z))
+	__(ldr(arg_z,node_size*0(vsp)))
+	__(ldr(arg_y,node_size*1(vsp)))
+	__(ldr(arg_x,node_size*2(vsp)))
+	__(la vsp,node_size*3(vsp))
+	__(blr)
+local_label(yz):
+	__(ldr(arg_z,node_size*0(vsp)))
+	__(ldr(arg_y,node_size*1(vsp)))
+	__(la vsp,node_size*2(vsp))
+	__(blr)
+local_label(z):
+	__(ldr(arg_z,node_size*0(vsp)))
+	__(la vsp,node_size*1(vsp))
+	__(blr)
+
+/* If arg_z is an integer, return in imm0 something whose sign  */
+/* is the same as arg_z's.  If not an integer, error.  */
+_spentry(integer_sign)
+	__(extract_typecode(imm0,arg_z))
+	__(cmpri(cr1,imm0,tag_fixnum))
+	__(cmpri(cr0,imm0,subtag_bignum))
+	__(mr imm0,arg_z)
+	__(beqlr+ cr1)
+	__(bne- cr0,1f)
+	__(getvheader(imm0,arg_z))
+        __ifdef(`PPC64')
+         __(header_size(imm0,imm0))
+         __(sldi imm0,imm0,2)
+        __else
+         __(header_length(imm0,imm0)) /* boxed length = scaled size  */
+        __endif
+        __(addi imm0,imm0,misc_data_offset-4) /* bias, less 1 element  */
+	__(lwzx imm0,arg_z,imm0)
+	__(cmpwi cr0,imm0,0)
+	__(li imm0,1)
+	__(bgelr cr0)
+	__(li imm0,-1)
+	__(blr)
+1:
+	__(uuo_interr(error_object_not_integer,arg_z))
+
+/* like misc_set, only pass the (boxed) subtag in temp0  */
+_spentry(subtag_misc_set)
+	__(trap_unless_fulltag_equal(arg_x,fulltag_misc,imm0))
+	__(trap_unless_lisptag_equal(arg_y,tag_fixnum,imm0))
+	__(vector_length(imm0,arg_x,imm1))
+	__(trlge(arg_y,imm0))
+	__(unbox_fixnum(imm1,temp0))
+local_label(misc_set_common):
+        __ifdef(`PPC64')
+         __(slwi imm1,imm1,3)
+         __(li imm0,LO(local_label(misc_set_jmp)))
+         __(addis imm0,imm0,HA(local_label(misc_set_jmp)))
+         __(ldx imm0,imm0,imm1)
+         __(mtctr imm0)
+         __(bctr)
+local_label(misc_set_jmp):              
+        /* 00-0f  */
+         .quad local_label(misc_set_invalid) /* 00 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 01 imm_0  */
+         .quad local_label(misc_set_invalid) /* 02 immheader_0  */
+         .quad _SPgvset /* 03 function  */
+         .quad local_label(misc_set_invalid) /* 04 cons  */
+         .quad local_label(misc_set_invalid) /* 05 imm_1  */
+         .quad local_label(misc_set_invalid) /* 06 immheader_1  */
+         .quad _SPgvset /* 07 catch_frame  */
+         .quad local_label(misc_set_invalid) /* 08 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 09 imm_2  */
+         .quad local_label(misc_set_u32) /* 0a code_vector  */
+         .quad _SPgvset /* 0b slot_vector  */
+         .quad local_label(misc_set_invalid) /* 0c misc  */
+         .quad local_label(misc_set_invalid) /* 0d imm3  */
+         .quad local_label(misc_set_invalid) /* 0e immheader_3  */
+         .quad _SPgvset /* 0f ratio  */
+        /* 10-1f  */
+         .quad local_label(misc_set_invalid) /* 10 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 11 imm_0  */
+         .quad local_label(misc_set_invalid) /* 12 immheader_0  */
+         .quad _SPgvset /* 13 symbol_0  */
+         .quad local_label(misc_set_invalid) /* 14 cons  */
+         .quad local_label(misc_set_invalid) /* 15 imm_1  */
+         .quad local_label(misc_set_invalid) /* 16 immheader_1  */
+         .quad _SPgvset /* 17 lisp_tread  */
+         .quad local_label(misc_set_invalid) /* 18 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 19 imm_2  */
+         .quad local_label(misc_set_u32) /* 1a xcode_vector  */
+         .quad _SPgvset /* 1b instance  */
+         .quad local_label(misc_set_invalid) /* 1c misc  */
+         .quad local_label(misc_set_invalid) /* 1d imm3  */
+         .quad local_label(misc_set_u64) /* 1e macptr  */
+         .quad _SPgvset /* 1f complex  */
+        /* 20-2f  */
+         .quad local_label(misc_set_invalid) /* 20 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 21 imm_0  */
+         .quad local_label(misc_set_invalid) /* 22 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 23 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 24 cons  */
+         .quad local_label(misc_set_invalid) /* 25 imm_1  */
+         .quad local_label(misc_set_invalid) /* 26 immheader_1  */
+         .quad _SPgvset /* 27 lock  */
+         .quad local_label(misc_set_invalid) /* 28 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 29 imm_2  */
+         .quad local_label(misc_set_u32) /* 2a bignum  */
+         .quad _SPgvset /* 2b struct  */
+         .quad local_label(misc_set_invalid) /* 2c misc  */
+         .quad local_label(misc_set_invalid) /* 2d imm3  */
+         .quad local_label(misc_set_u64) /* 2e dead_macptr  */
+         .quad local_label(misc_set_invalid) /* 2f nodeheader_3  */
+        /* 30-3f  */
+         .quad local_label(misc_set_invalid) /* 30 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 31 imm_0  */
+         .quad local_label(misc_set_invalid) /* 32 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 33 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 34 cons  */
+         .quad local_label(misc_set_invalid) /* 35 imm_1  */
+         .quad local_label(misc_set_invalid) /* 36 immheader_1  */
+         .quad _SPgvset /* 37 hash_vector  */
+         .quad local_label(misc_set_invalid) /* 38 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 39 imm_2  */
+         .quad local_label(misc_set_u32) /* 3a double_float  */
+         .quad _SPgvset /* 3b istruct  */
+         .quad local_label(misc_set_invalid) /* 3c misc  */
+         .quad local_label(misc_set_invalid) /* 3d imm3  */
+         .quad local_label(misc_set_invalid) /* 3e immheader_3  */
+         .quad local_label(misc_set_invalid) /* 3f nodeheader_3  */
+        /* 40-4f  */
+         .quad local_label(misc_set_invalid) /* 40 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 41 imm_0  */
+         .quad local_label(misc_set_invalid) /* 42 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 43 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 44 cons  */
+         .quad local_label(misc_set_invalid) /* 45 imm_1  */
+         .quad local_label(misc_set_invalid) /* 46 immheader_1  */
+         .quad _SPgvset /* 47 pool  */
+         .quad local_label(misc_set_invalid) /* 48 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 49 imm_2  */
+         .quad local_label(misc_set_invalid) /* 4a immheader_2  */
+         .quad _SPgvset /* 4b value_cell_2  */
+         .quad local_label(misc_set_invalid) /* 4c misc  */
+         .quad local_label(misc_set_invalid) /* 4d imm3  */
+         .quad local_label(misc_set_invalid) /* 4e immheader_3  */
+         .quad local_label(misc_set_invalid) /* 4f nodeheader_3  */
+        /* 50-5f  */
+         .quad local_label(misc_set_invalid) /* 50 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 51 imm_0  */
+         .quad local_label(misc_set_invalid) /* 52 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 53 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 54 cons  */
+         .quad local_label(misc_set_invalid) /* 55 imm_1  */
+         .quad local_label(misc_set_invalid) /* 56 immheader_1  */
+         .quad _SPgvset /* 57 weak  */
+         .quad local_label(misc_set_invalid) /* 58 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 59 imm_2  */
+         .quad local_label(misc_set_invalid) /* 5a immheader_2  */
+         .quad _SPgvset /* 5b xfunction  */
+         .quad local_label(misc_set_invalid) /* 5c misc  */
+         .quad local_label(misc_set_invalid) /* 5d imm3  */
+         .quad local_label(misc_set_invalid) /* 5e immheader_3  */
+         .quad local_label(misc_set_invalid) /* 5f nodeheader_3  */
+        /* 60-6f  */
+         .quad local_label(misc_set_invalid) /* 60 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 61 imm_0  */
+         .quad local_label(misc_set_invalid) /* 62 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 63 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 64 cons  */
+         .quad local_label(misc_set_invalid) /* 65 imm_1  */
+         .quad local_label(misc_set_invalid) /* 66 immheader_1  */
+         .quad _SPgvset /* 67 package  */
+         .quad local_label(misc_set_invalid) /* 68 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 69 imm_2  */
+         .quad local_label(misc_set_invalid) /* 6a immheader_2  */
+         .quad local_label(misc_set_invalid) /* 6b nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* 6c misc  */
+         .quad local_label(misc_set_invalid) /* 6d imm3  */
+         .quad local_label(misc_set_invalid) /* 6e immheader_3  */
+         .quad local_label(misc_set_invalid) /* 6f nodeheader_3  */
+        /* 70-7f  */
+         .quad local_label(misc_set_invalid) /* 70 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 71 imm_0  */
+         .quad local_label(misc_set_invalid) /* 72 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 73 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 74 cons  */
+         .quad local_label(misc_set_invalid) /* 75 imm_1  */
+         .quad local_label(misc_set_invalid) /* 76 immheader_1  */
+         .quad local_label(misc_set_invalid) /* 77 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* 78 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 79 imm_2  */
+         .quad local_label(misc_set_invalid) /* 7a immheader_2  */
+         .quad local_label(misc_set_invalid) /* 7b nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* 7c misc  */
+         .quad local_label(misc_set_invalid) /* 7d imm3  */
+         .quad local_label(misc_set_invalid) /* 7e immheader_3  */
+         .quad local_label(misc_set_invalid) /* 7f nodeheader_3  */
+        /* 80-8f  */
+         .quad local_label(misc_set_invalid) /* 80 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 81 imm_0  */
+         .quad local_label(misc_set_invalid) /* 82 immheader_0  */
+         .quad local_label(misc_set_invalid) /* 83 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 84 cons  */
+         .quad local_label(misc_set_invalid) /* 85 imm_1  */
+         .quad local_label(misc_set_invalid) /* 86 immheader_1  */
+         .quad _SPgvset /* 87 arrayH  */
+         .quad local_label(misc_set_invalid) /* 88 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 89 imm_2  */
+         .quad local_label(misc_set_invalid) /* 8a immheader_2  */
+         .quad _SPgvset /* 8b vectorH  */
+         .quad local_label(misc_set_invalid) /* 8c misc  */
+         .quad local_label(misc_set_invalid) /* 8d imm3  */
+         .quad local_label(misc_set_invalid) /* 8e immheader_3  */
+         .quad _SPgvset /* 8f simple_vector  */
+        /* 90-9f  */
+         .quad local_label(misc_set_invalid) /* 90 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* 91 imm_0  */
+         .quad local_label(misc_set_s8) /* 92 s8  */
+         .quad local_label(misc_set_invalid) /* 93 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* 94 cons  */
+         .quad local_label(misc_set_invalid) /* 95 imm_1  */
+         .quad local_label(misc_set_s16) /* 96 immheader_1  */
+         .quad local_label(misc_set_invalid) /* 97 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* 98 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* 99 imm_2  */
+         .quad local_label(misc_set_s32) /* 9a s32  */
+         .quad local_label(misc_set_invalid) /* 9b nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* 9c misc  */
+         .quad local_label(misc_set_invalid) /* 9d imm3  */
+         .quad local_label(misc_set_s64) /* 9e s64  */
+         .quad local_label(misc_set_invalid) /* 9f nodeheader_3  */
+        /* a0-af  */
+         .quad local_label(misc_set_invalid) /* a0 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* a1 imm_0  */
+         .quad local_label(misc_set_u8) /* a2 u8  */
+         .quad local_label(misc_set_invalid) /* a3 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* a4 cons  */
+         .quad local_label(misc_set_invalid) /* a5 imm_1  */
+         .quad local_label(misc_set_u16) /* a6 u16  */
+         .quad local_label(misc_set_invalid) /* a7 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* a8 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* a9 imm_2  */
+         .quad local_label(misc_set_u32) /* aa u32  */
+         .quad local_label(misc_set_invalid) /* ab nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* ac misc  */
+         .quad local_label(misc_set_invalid) /* ad imm3  */
+         .quad local_label(misc_set_u64) /* ae u64  */
+         .quad local_label(misc_set_invalid) /* af nodeheader_3  */
+        /* b0-bf  */
+         .quad local_label(misc_set_invalid) /* b0 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* b1 imm_0  */
+         .quad local_label(misc_set_invalid) /* b2 immheader_0  */
+         .quad local_label(misc_set_invalid) /* b3 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* b4 cons  */
+         .quad local_label(misc_set_invalid) /* b5 imm_1  */
+         .quad local_label(misc_set_invalid) /* b6 immheader_1  */
+         .quad local_label(misc_set_invalid) /* b7 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* b8 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* b9 imm_2  */
+         .quad local_label(misc_set_single_float_vector) /* ba sf vector  */
+         .quad local_label(misc_set_invalid) /* bb nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* bc misc  */
+         .quad local_label(misc_set_invalid) /* bd imm3  */
+         .quad local_label(misc_set_fixnum_vector) /* be fixnum_vector  */
+         .quad local_label(misc_set_invalid) /* bf nodeheader_3  */
+        /* c0-cf  */
+         .quad local_label(misc_set_invalid) /* c0 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* c1 imm_0  */
+         .quad local_label(misc_set_invalid) /* c2 immheader_0  */
+         .quad local_label(misc_set_invalid) /* c3 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* c4 cons  */
+         .quad local_label(misc_set_invalid) /* c5 imm_1  */
+         .quad local_label(misc_set_invalid) /* c6 immheader_1  */
+         .quad local_label(misc_set_invalid) /* c7 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* c8 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* c9 imm_2  */
+         .quad local_label(misc_set_invalid) /* ca immheader_2  */
+         .quad local_label(misc_set_invalid) /* cb nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* cc misc  */
+         .quad local_label(misc_set_invalid) /* cd imm3  */
+         .quad local_label(misc_set_double_float_vector) /* ce double-float vector  */
+         .quad local_label(misc_set_invalid) /* cf nodeheader_3  */
+        /* d0-df  */
+         .quad local_label(misc_set_invalid) /* d0 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* d1 imm_0  */
+         .quad local_label(misc_set_string) /* d2 string  */
+         .quad local_label(misc_set_invalid) /* d3 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* d4 cons  */
+         .quad local_label(misc_set_invalid) /* d5 imm_1  */
+         .quad local_label(misc_set_invalid) /* d6 immheader_1  */
+         .quad local_label(misc_set_invalid) /* d7 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* d8 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* d9 imm_2  */
+         .quad local_label(misc_set_new_string) /* da new_string  */
+         .quad local_label(misc_set_invalid) /* db nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* dc misc  */
+         .quad local_label(misc_set_invalid) /* dd imm3  */
+         .quad local_label(misc_set_invalid) /* de immheader_3  */
+         .quad local_label(misc_set_invalid) /* df nodeheader_3  */
+        /* e0-ef  */
+         .quad local_label(misc_set_invalid) /* e0 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* e1 imm_0  */
+         .quad local_label(misc_set_invalid) /* e2 immheader_0  */
+         .quad local_label(misc_set_invalid) /* e3 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* e4 cons  */
+         .quad local_label(misc_set_invalid) /* e5 imm_1  */
+         .quad local_label(misc_set_invalid) /* e6 immheader_1  */
+         .quad local_label(misc_set_invalid) /* e7 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* e8 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* e9 imm_2  */
+         .quad local_label(misc_set_invalid) /* ea immheader_2  */
+         .quad local_label(misc_set_invalid) /* eb nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* ec misc  */
+         .quad local_label(misc_set_invalid) /* ed imm3  */
+         .quad local_label(misc_set_invalid) /* ee immheader_3  */
+         .quad local_label(misc_set_invalid) /* ef nodeheader_3  */
+        /* f0-ff  */
+         .quad local_label(misc_set_invalid) /* f0 even_fixnum  */
+         .quad local_label(misc_set_invalid) /* f1 imm_0  */
+         .quad local_label(misc_set_invalid) /* f2 immheader_0  */
+         .quad local_label(misc_set_invalid) /* f3 nodeheader_0  */
+         .quad local_label(misc_set_invalid) /* f4 cons  */
+         .quad local_label(misc_set_invalid) /* f5 imm_1  */
+         .quad local_label(misc_set_bit_vector) /* f6 bit_vector  */
+         .quad local_label(misc_set_invalid) /* f7 nodeheader_1  */
+         .quad local_label(misc_set_invalid) /* f8 odd_fixnum  */
+         .quad local_label(misc_set_invalid) /* f9 imm_2  */
+         .quad local_label(misc_set_invalid) /* fa immheader_2  */
+         .quad local_label(misc_set_invalid) /* fb nodeheader_2  */
+         .quad local_label(misc_set_invalid) /* fc misc  */
+         .quad local_label(misc_set_invalid) /* fd imm3  */
+         .quad local_label(misc_set_invalid) /* fe immheader_3  */
+         .quad local_label(misc_set_invalid) /* ff nodeheader_3  */
+
+local_label(misc_set_bit_vector):               
+         __(lis imm3,0x8000)
+         __(extract_unsigned_byte_bits_(imm0,arg_z,1))
+	 __(extrwi imm1,arg_y,5,32-(fixnumshift+5))	/* imm1 = bitnum  */
+         __(srdi imm0,arg_y,5+fixnumshift)
+	 __(srw imm3,imm3,imm1)
+         __(bne local_label(misc_set_bad))
+         __(cmpdi cr0,arg_z,0)
+         __(sldi imm0,imm0,2)
+	 __(la imm0,misc_data_offset(imm0))
+	 __(lwzx imm2,arg_x,imm0)
+         __(beq 1f)
+         __(or imm2,imm3,imm2)
+         __(stwx imm2,arg_x,imm0)
+         __(blr)
+1:       __(andc imm2,imm2,imm3)
+         __(stwx imm2,arg_x,imm0)
+         __(blr)
+local_label(misc_set_s16):
+         __(extract_lisptag(imm2,arg_z))
+         __(sldi imm0,arg_z,64-(16+fixnumshift))
+         __(srdi imm1,arg_y,2)
+         __(cmpdi cr7,imm2,tag_fixnum)
+         __(sradi imm0,imm0,64-(16+fixnumshift))
+         __(cmpd imm0,arg_z)
+         __(la imm1,misc_data_offset(imm1))
+         __(unbox_fixnum(imm0,arg_z))
+         __(bne local_label(misc_set_bad))
+         __(bne cr7,local_label(misc_set_bad))
+         __(sthx imm0,arg_x,imm1)
+         __(blr)
+local_label(misc_set_u16):
+         __(extract_unsigned_byte_bits_(imm0,arg_z,16))
+         __(srdi imm1,arg_y,2)                
+         __(unbox_fixnum(imm0,arg_z))
+         __(la imm1,misc_data_offset(imm1))
+         __(bne local_label(misc_set_bad))
+         __(sthx imm0,arg_x,imm1)
+         __(blr)
+local_label(misc_set_single_float_vector):
+         __(extract_fulltag(imm3,arg_z))
+         __(srdi imm4,arg_y,1)
+         __(cmpdi cr3,imm3,subtag_single_float)
+         __(la imm4,misc_data_offset(imm4))
+         __(bne cr3,local_label(misc_set_bad))
+         __(srdi imm0,arg_z,32)
+         __(stwx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_s32):
+         __(extract_lisptag(imm2,arg_z))
+         __(srdi imm4,arg_y,1)
+         __(unbox_fixnum(imm0,arg_z))
+         __(cmpdi imm2,tag_fixnum)
+         __(sldi imm1,imm0,32)
+         __(sradi imm1,imm1,32)
+         __(la imm4,misc_data_offset(imm4))
+         __(bne local_label(misc_set_bad))
+         __(cmpd imm1,imm0)
+         __(bne local_label(misc_set_bad))
+         __(stwx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_u32):              
+         __(extract_unsigned_byte_bits_(imm0,arg_z,32))
+         __(srdi imm4,arg_y,1)
+	 __(la imm4,misc_data_offset(imm4))
+         __(unbox_fixnum(imm0,arg_z))
+         __(bne local_label(misc_set_bad))
+         __(stwx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_new_string):
+         __(extract_lowbyte(imm0,arg_z))
+         __(srdi imm4,arg_y,1)
+         __(cmpdi imm0,subtag_character)
+	 __(la imm4,misc_data_offset(imm4))
+         __(srwi imm0,arg_z,charcode_shift)
+         __(bne local_label(misc_set_bad))
+         __(stwx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_string):      
+         __(extract_lowbyte(imm0,arg_z))                
+         __(srdi imm4,arg_y,3)
+         __(cmpdi imm0,subtag_character)
+         __(la imm4,misc_data_offset(imm4))
+         __(bne cr0,local_label(misc_set_bad))
+         __(srwi imm0,arg_z,charcode_shift)
+         __(stbx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_s8):     
+         __(extract_lisptag(imm2,arg_z))
+         __(unbox_fixnum(imm0,arg_z))
+         __(cmpdi cr2,imm2,tag_fixnum)
+         __(srdi imm4,arg_y,3)
+         __(sldi imm1,imm0,56)
+         __(sradi imm1,imm1,56)
+         __(cmpd imm1,imm0)
+         __(bne cr2,local_label(misc_set_bad))
+         __(la imm4,misc_data_offset(imm4))
+         __(bne local_label(misc_set_bad))
+         __(stbx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_u8):     
+         __(extract_unsigned_byte_bits_(imm0,arg_z,8))
+         __(srdi imm4,arg_y,3)
+         __(unbox_fixnum(imm0,arg_z))
+         __(la imm4,misc_data_offset(imm4))
+         __(bne local_label(misc_set_bad))
+         __(stbx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_u64):
+         __(extract_lisptag(imm0,arg_z))
+         __(extract_fulltag(imm2,arg_z))
+         __(cmpdi cr0,arg_z,0)
+         __(cmpdi cr7,imm0,0)
+         __(cmpdi cr6,imm2,fulltag_misc)
+         __(la imm4,misc_data_offset(arg_y))
+         __(bne cr7,local_label(setu64_maybe_bignum))
+         __(unbox_fixnum(imm0,arg_z))
+         __(blt cr0,local_label(misc_set_bad))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+local_label(setu64_maybe_bignum):
+         __(bne cr6,local_label(misc_set_bad))
+         __(getvheader(imm1,arg_z))
+         __(ld imm0,misc_data_offset(arg_z))
+         __(rotldi imm0,imm0,32)
+         __(cmpdi cr2,imm1,two_digit_bignum_header)
+         __(cmpdi cr3,imm1,three_digit_bignum_header)
+         __(cmpdi cr0,imm0,0)
+         __(beq cr2,1f)
+         __(bne cr3,local_label(misc_set_bad))
+         __(lwz imm3,misc_data_offset+8(arg_z))
+         __(cmpwi cr0,imm3,0)
+         __(bne cr0,local_label(misc_set_bad))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+1:       __(blt cr0,local_label(misc_set_bad))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_double_float_vector):
+         __(extract_typecode(imm0,arg_z))
+         __(la imm4,misc_data_offset(arg_y))
+         __(cmpdi imm0,subtag_double_float)
+         __(bne local_label(misc_set_bad))
+         __(ld imm0,misc_dfloat_offset(arg_z))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_fixnum_vector):
+         __(extract_lisptag(imm2,arg_z))
+         __(unbox_fixnum(imm0,arg_z))
+         __(cmpdi cr2,imm2,tag_fixnum)
+         __(la imm4,misc_data_offset(arg_y))
+         __(bne cr2,local_label(misc_set_bad))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_s64):
+         __(extract_lisptag(imm2,arg_z))
+         __(extract_fulltag(imm3,arg_z))
+         __(unbox_fixnum(imm0,arg_z))
+         __(cmpdi cr2,imm2,tag_fixnum)
+         __(cmpdi cr6,imm3,fulltag_misc) 
+         __(la imm4,misc_data_offset(arg_y))
+         __(bne cr2,local_label(sets64_maybe_bignum))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+local_label(sets64_maybe_bignum):       
+         __(bne cr6,local_label(misc_set_bad))
+         __(getvheader(imm1,arg_z))
+         __(ld imm0,misc_data_offset(arg_z))
+         __(cmpdi cr1,imm1,two_digit_bignum_header)
+         __(rotldi imm0,imm0,32)
+         __(bne cr1,local_label(misc_set_bad))
+         __(stdx imm0,arg_x,imm4)
+         __(blr)
+local_label(misc_set_bad):
+	 __(mr arg_y,arg_z)
+	 __(mr arg_z,arg_x)
+	 __(li arg_x,XNOTELT)
+	 __(set_nargs(3))
+	 __(b _SPksignalerr)
+local_label(misc_set_invalid):  
+         __(li temp0,XSETBADVEC)        
+         __(set_nargs(4))
+         __(vpush(temp0))
+         __(b _SPksignalerr)        
+        __else
+         __(slwi imm1,imm1,2)
+         __(li imm0,LO(local_label(misc_set_jmp)))
+         __(addis imm0,imm0,HA(local_label(misc_set_jmp)))
+         __(lwzx imm0,imm0,imm1)
+         __(mtctr imm0)
+         __(bctr)
+local_label(misc_set_jmp):             
+        /* 00-0f  */
+         .long local_label(misc_set_invalid) /* 00 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 01 cons  */
+         .long local_label(misc_set_invalid) /* 02 nodeheader  */
+         .long local_label(misc_set_invalid) /* 03 imm  */
+         .long local_label(misc_set_invalid) /* 04 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 05 nil  */
+         .long local_label(misc_set_invalid) /* 06 misc  */
+         .long local_label(misc_set_u32) /* 07 bignum  */
+         .long local_label(misc_set_invalid) /* 08 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 09 cons  */
+         .long _SPgvset /* 0a ratio  */
+         .long local_label(misc_set_invalid) /* 0b imm  */
+         .long local_label(misc_set_invalid) /* 0c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 0d nil  */
+         .long local_label(misc_set_invalid) /* 0e misc  */
+         .long local_label(misc_set_u32) /* 0f single_float  */
+        /* 10-1f  */
+         .long local_label(misc_set_invalid) /* 10 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 11 cons  */
+         .long local_label(misc_set_invalid) /* 12 nodeheader  */
+         .long local_label(misc_set_invalid) /* 13 imm  */
+         .long local_label(misc_set_invalid) /* 14 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 15 nil  */
+         .long local_label(misc_set_invalid) /* 16 misc  */
+         .long local_label(misc_set_u32) /* 17 double_float  */
+         .long local_label(misc_set_invalid) /* 18 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 19 cons  */
+         .long _SPgvset /* 1a complex  */
+         .long local_label(misc_set_invalid) /* 1b imm  */
+         .long local_label(misc_set_invalid) /* 1c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 1d nil  */
+         .long local_label(misc_set_invalid) /* 1e misc  */
+         .long local_label(misc_set_u32) /* 1f macptr  */
+        /* 20-2f  */
+         .long local_label(misc_set_invalid) /* 20 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 21 cons  */
+         .long _SPgvset /* 22 catch_frame  */
+         .long local_label(misc_set_invalid) /* 23 imm  */
+         .long local_label(misc_set_invalid) /* 24 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 25 nil  */
+         .long local_label(misc_set_invalid) /* 26 misc  */
+         .long local_label(misc_set_u32) /* 27 dead_macptr  */
+         .long local_label(misc_set_invalid) /* 28 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 29 cons  */
+         .long _SPgvset /* 2a function  */
+         .long local_label(misc_set_invalid) /* 2b imm  */
+         .long local_label(misc_set_invalid) /* 2c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 2d nil  */
+         .long local_label(misc_set_invalid) /* 2e misc  */
+         .long local_label(misc_set_u32) /* 2f code_vector  */
+        /* 30-3f  */
+         .long local_label(misc_set_invalid) /* 30 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 31 cons  */
+         .long _SPgvset /* 32 lisp_thread  */
+         .long local_label(misc_set_invalid) /* 33 imm  */
+         .long local_label(misc_set_invalid) /* 34 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 35 nil  */
+         .long local_label(misc_set_invalid) /* 36 misc  */
+         .long local_label(misc_set_u32) /* 37 creole  */
+         .long local_label(misc_set_invalid) /* 38 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 39 cons  */
+         .long _SPgvset /* 3a symbol  */
+         .long local_label(misc_set_invalid) /* 3b imm  */
+         .long local_label(misc_set_invalid) /* 3c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 3d nil  */
+         .long local_label(misc_set_invalid) /* 3e misc  */
+         .long local_label(misc_set_u32) /* 3f xcode_vector  */
+        /* 40-4f  */
+         .long local_label(misc_set_invalid) /* 40 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 41 cons  */
+         .long _SPgvset /* 42 lock  */
+         .long local_label(misc_set_invalid) /* 43 imm  */
+         .long local_label(misc_set_invalid) /* 44 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 45 nil  */
+         .long local_label(misc_set_invalid) /* 46 misc  */
+         .long local_label(misc_set_invalid) /* 47 immheader  */
+         .long local_label(misc_set_invalid) /* 48 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 49 cons  */
+         .long _SPgvset /* 4a hash_vector  */
+         .long local_label(misc_set_invalid) /* 4b imm  */
+         .long local_label(misc_set_invalid) /* 4c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 4d nil  */
+         .long local_label(misc_set_invalid) /* 4e misc  */
+         .long local_label(misc_set_invalid) /* 4f immheader  */
+        /* 50-5f  */
+         .long local_label(misc_set_invalid) /* 50 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 51 cons  */
+         .long _SPgvset /* 52 pool  */
+         .long local_label(misc_set_invalid) /* 53 imm  */
+         .long local_label(misc_set_invalid) /* 54 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 55 nil  */
+         .long local_label(misc_set_invalid) /* 56 misc  */
+         .long local_label(misc_set_invalid) /* 57 immheader  */
+         .long local_label(misc_set_invalid) /* 58 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 59 cons  */
+         .long _SPgvset /* 5a weak  */
+         .long local_label(misc_set_invalid) /* 5b imm  */
+         .long local_label(misc_set_invalid) /* 5c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 5d nil  */
+         .long local_label(misc_set_invalid) /* 5e misc  */
+         .long local_label(misc_set_invalid) /* 5f immheader  */
+        /* 60-6f  */
+         .long local_label(misc_set_invalid) /* 60 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 61 cons  */
+         .long _SPgvset /* 62 package  */
+         .long local_label(misc_set_invalid) /* 63 imm  */
+         .long local_label(misc_set_invalid) /* 64 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 65 nil  */
+         .long local_label(misc_set_invalid) /* 66 misc  */
+         .long local_label(misc_set_invalid) /* 67 immheader  */
+         .long local_label(misc_set_invalid) /* 68 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 69 cons  */
+         .long _SPgvset /* 6a slot_vector  */
+         .long local_label(misc_set_invalid) /* 6b imm  */
+         .long local_label(misc_set_invalid) /* 6c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 6d nil  */
+         .long local_label(misc_set_invalid) /* 6e misc  */
+         .long local_label(misc_set_invalid) /* 6f immheader  */
+        /* 70-7f  */
+         .long local_label(misc_set_invalid) /* 70 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 71 cons  */
+         .long _SPgvset /* 72 instance  */
+         .long local_label(misc_set_invalid) /* 73 imm  */
+         .long local_label(misc_set_invalid) /* 74 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 75 nil  */
+         .long local_label(misc_set_invalid) /* 76 misc  */
+         .long local_label(misc_set_invalid) /* 77 immheader  */
+         .long local_label(misc_set_invalid) /* 78 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 79 cons  */
+         .long _SPgvset /* 7a struct  */
+         .long local_label(misc_set_invalid) /* 7b imm  */
+         .long local_label(misc_set_invalid) /* 7c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 7d nil  */
+         .long local_label(misc_set_invalid) /* 7e misc  */
+         .long local_label(misc_set_invalid) /* 7f immheader  */
+        /* 80-8f  */
+         .long local_label(misc_set_invalid) /* 80 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 81 cons  */
+         .long _SPgvset /* 82 istruct  */
+         .long local_label(misc_set_invalid) /* 83 imm  */
+         .long local_label(misc_set_invalid) /* 84 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 85 nil  */
+         .long local_label(misc_set_invalid) /* 86 misc  */
+         .long local_label(misc_set_invalid) /* 87 immheader  */
+         .long local_label(misc_set_invalid) /* 88 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 89 cons  */
+         .long _SPgvset /* 8a value_cell  */
+         .long local_label(misc_set_invalid) /* 8b imm  */
+         .long local_label(misc_set_invalid) /* 8c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 8d nil  */
+         .long local_label(misc_set_invalid) /* 8e misc  */
+         .long local_label(misc_set_invalid) /* 8f immheader  */
+        /* 90-9f  */
+         .long local_label(misc_set_invalid) /* 90 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 91 cons  */
+         .long _SPgvset /* 92 xfunction  */
+         .long local_label(misc_set_invalid) /* 93 imm  */
+         .long local_label(misc_set_invalid) /* 94 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 95 nil  */
+         .long local_label(misc_set_invalid) /* 96 misc  */
+         .long local_label(misc_set_invalid) /* 97 immheader  */
+         .long local_label(misc_set_invalid) /* 98 even_fixnum  */
+         .long local_label(misc_set_invalid) /* 99 cons  */
+         .long _SPgvset /* 9a arrayH  */
+         .long local_label(misc_set_invalid) /* 9b imm  */
+         .long local_label(misc_set_invalid) /* 9c odd_fixnum  */
+         .long local_label(misc_set_invalid) /* 9d nil  */
+         .long local_label(misc_set_invalid) /* 9e misc  */
+         .long local_label(misc_set_invalid) /* 9f immheader  */
+        /* a0-af  */
+         .long local_label(misc_set_invalid) /* a0 even_fixnum  */
+         .long local_label(misc_set_invalid) /* a1 cons  */
+         .long _SPgvset /* a2 vectorH  */
+         .long local_label(misc_set_invalid) /* a3 imm  */
+         .long local_label(misc_set_invalid) /* a4 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* a5 nil  */
+         .long local_label(misc_set_invalid) /* a6 misc  */
+         .long local_label(misc_set_single_float_vector) /* a7 sf vector  */
+         .long local_label(misc_set_invalid) /* a8 even_fixnum  */
+         .long local_label(misc_set_invalid) /* a9 cons  */
+         .long _SPgvset /* aa vectorH  */
+         .long local_label(misc_set_invalid) /* ab imm  */
+         .long local_label(misc_set_invalid) /* ac odd_fixnum  */
+         .long local_label(misc_set_invalid) /* ad nil  */
+         .long local_label(misc_set_invalid) /* ae misc  */
+         .long local_label(misc_set_u32) /* af u32  */
+        /* b0-bf  */
+         .long local_label(misc_set_invalid) /* b0 even_fixnum  */
+         .long local_label(misc_set_invalid) /* b1 cons  */
+         .long local_label(misc_set_invalid) /* b2 node  */
+         .long local_label(misc_set_invalid) /* b3 imm  */
+         .long local_label(misc_set_invalid) /* b4 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* b5 nil  */
+         .long local_label(misc_set_invalid) /* b6 misc  */
+         .long local_label(misc_set_s32) /* b7 s32  */
+         .long local_label(misc_set_invalid) /* b8 even_fixnum  */
+         .long local_label(misc_set_invalid) /* b9 cons  */
+         .long local_label(misc_set_invalid) /* ba nodeheader  */
+         .long local_label(misc_set_invalid) /* bb imm  */
+         .long local_label(misc_set_invalid) /* bc odd_fixnum  */
+         .long local_label(misc_set_invalid) /* bd nil  */
+         .long local_label(misc_set_invalid) /* be misc  */
+         .long local_label(misc_set_fixnum_vector) /* bf fixnum_vector  */
+        /* c0-cf  */
+         .long local_label(misc_set_invalid) /* c0 even_fixnum  */
+         .long local_label(misc_set_invalid) /* c1 cons  */
+         .long local_label(misc_set_invalid) /* c2 nodeheader  */
+         .long local_label(misc_set_invalid) /* c3 imm  */
+         .long local_label(misc_set_invalid) /* c4 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* c5 nil  */
+         .long local_label(misc_set_invalid) /* c6 misc  */
+         .long local_label(misc_set_new_string) /* c7 new_string  */
+         .long local_label(misc_set_invalid) /* c8 even_fixnum  */
+         .long local_label(misc_set_invalid) /* c9 cons  */
+         .long local_label(misc_set_invalid) /* ca nodeheader  */
+         .long local_label(misc_set_invalid) /* cb imm  */
+         .long local_label(misc_set_invalid) /* cc odd_fixnum  */
+         .long local_label(misc_set_invalid) /* cd nil  */
+         .long local_label(misc_set_invalid) /* ce misc  */
+         .long local_label(misc_set_u8) /* cf u8  */
+        /* d0-df  */
+         .long local_label(misc_set_invalid) /* d0 even_fixnum  */
+         .long local_label(misc_set_invalid) /* d1 cons  */
+         .long local_label(misc_set_invalid) /* d2 nodeheader  */
+         .long local_label(misc_set_invalid) /* d3 imm  */
+         .long local_label(misc_set_invalid) /* d4 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* d5 nil  */
+         .long local_label(misc_set_invalid) /* d6 misc  */
+         .long local_label(misc_set_s8) /* d7 s8  */
+         .long local_label(misc_set_invalid) /* d8 even_fixnum  */
+         .long local_label(misc_set_invalid) /* d9 cons  */
+         .long local_label(misc_set_invalid) /* da nodeheader  */
+         .long local_label(misc_set_invalid) /* db imm  */
+         .long local_label(misc_set_invalid) /* dc odd_fixnum  */
+         .long local_label(misc_set_invalid) /* dd nil  */
+         .long local_label(misc_set_invalid) /* de misc  */
+         .long local_label(misc_set_old_string) /* df (old) simple_base_string  */
+        /* e0-ef  */
+         .long local_label(misc_set_invalid) /* e0 even_fixnum  */
+         .long local_label(misc_set_invalid) /* e1 cons  */
+         .long local_label(misc_set_invalid) /* e2 nodeheader  */
+         .long local_label(misc_set_invalid) /* e3 imm  */
+         .long local_label(misc_set_invalid) /* e4 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* e5 nil  */
+         .long local_label(misc_set_invalid) /* e6 misc  */
+         .long local_label(misc_set_u16) /* e7 u16  */
+         .long local_label(misc_set_invalid) /* e8 even_fixnum  */
+         .long local_label(misc_set_invalid) /* e9 cons  */
+         .long local_label(misc_set_invalid) /* ea nodeheader  */
+         .long local_label(misc_set_invalid) /* eb imm  */
+         .long local_label(misc_set_invalid) /* ec odd_fixnum  */
+         .long local_label(misc_set_invalid) /* ed nil  */
+         .long local_label(misc_set_invalid) /* ee misc  */
+         .long local_label(misc_set_s16) /* ef s16  */
+        /* f0-ff  */
+         .long local_label(misc_set_invalid) /* f0 even_fixnum  */
+         .long local_label(misc_set_invalid) /* f1 cons  */
+         .long local_label(misc_set_invalid) /* f2 nodeheader  */
+         .long local_label(misc_set_invalid) /* f3 imm  */
+         .long local_label(misc_set_invalid) /* f4 odd_fixnum  */
+         .long local_label(misc_set_invalid) /* f5 nil  */
+         .long local_label(misc_set_invalid) /* f6 misc  */
+         .long local_label(misc_set_double_float_vector) /* f7 df vector  */
+         .long local_label(misc_set_invalid) /* f8 even_fixnum  */
+         .long local_label(misc_set_invalid) /* f9 cons  */
+         .long local_label(misc_set_invalid) /* fa nodeheader  */
+         .long local_label(misc_set_invalid) /* fb imm  */
+         .long local_label(misc_set_invalid) /* fc odd_fixnum  */
+         .long local_label(misc_set_invalid) /* fd nil  */
+         .long local_label(misc_set_invalid) /* fe misc  */
+         .long local_label(misc_set_bit_vector) /* ff bit_vector  */
+
+local_label(misc_set_u32):        
+	/* Either a non-negative fixnum, a positiveone-digit bignum, */
+	/* or a two-digit bignum whose sign-digit is 0 is ok.  */
+	 __(extract_lisptag(imm2,arg_z))
+	 __(srawi. imm1,arg_z,fixnum_shift)
+         __(cmpwi cr5,imm2,tag_fixnum)         
+         __(la imm0,misc_data_offset(arg_y))
+         __(cmpwi cr7,imm2,tag_misc)
+	 __(bne cr5,local_label(set_not_fixnum_u32))
+	 __(blt- cr0,local_label(set_bad))
+local_label(set_set32):         
+	 __(stwx imm1,arg_x,imm0)
+	 __(blr)
+local_label(set_not_fixnum_u32):
+	 __(bne cr7,local_label(set_bad))
+	 __(extract_header(imm2,arg_z))
+	 __(cmpri(cr0,imm2,one_digit_bignum_header))
+	 __(cmpri(cr1,imm2,two_digit_bignum_header))
+	 __(vrefr(imm1,arg_z,0))
+	 __(cmpri(cr2,imm1,0))
+	 __(bne cr0,local_label(set_not_1_digit_u32))
+	 __(bge cr2,local_label(set_set32))
+	 __(b local_label(set_bad))
+local_label(set_not_1_digit_u32):
+	 __(bne- cr1,local_label(set_bad))
+	 __(vrefr(imm2,arg_z,1))
+	 __(cmpri(cr0,imm2,0))
+	 __(bne- cr1,local_label(set_bad))
+	 __(beq cr0,local_label(set_set32))
+local_label(set_bad):
+	/* arg_z does not match the array-element-type of arg_x.  */
+	 __(mr arg_y,arg_z)
+	 __(mr arg_z,arg_x)
+	 __(li arg_x,XNOTELT)
+	 __(set_nargs(3))
+	 __(b _SPksignalerr)
+local_label(misc_set_fixnum_vector):   
+         __(extract_lisptag(imm2,arg_z))
+         __(la imm0,misc_data_offset(arg_y))
+         __(cmpwi cr5,imm2,tag_fixnum)
+         __(unbox_fixnum(imm1,arg_z))
+         __(bne cr5,local_label(set_bad))
+         __(stwx imm1,arg_x,imm0)
+         __(blr)
+local_label(misc_set_new_string):   
+         __(clrlwi imm2,arg_z,ncharcodebits)
+         __(la imm0,misc_data_offset(arg_y))
+         __(cmpwi cr5,imm2,subtag_character)
+         __(srwi imm1,arg_z,charcode_shift)
+         __(bne cr5,local_label(set_bad))
+         __(stwx imm1,arg_x,imm0)
+         __(blr)
+local_label(misc_set_s32):
+         __(extract_lisptag(imm2,arg_z))
+         __(cmpwi cr5,imm2,tag_fixnum)
+         __(cmpwi cr7,imm2,tag_misc)
+         __(la imm0,misc_data_offset(arg_y))
+	 __(unbox_fixnum(imm1,arg_z))
+	 __(beq cr5,local_label(set_set32))
+	 __(bne cr7,local_label(set_bad))
+	 __(extract_header(imm2,arg_z))
+	 __(cmpri(cr0,imm2,one_digit_bignum_header))
+	 __(vrefr(imm1,arg_z,0))
+	 __(bne- cr0,local_label(set_bad))
+	 __(strx(imm1,arg_x,imm0))
+	 __(blr)
+local_label(misc_set_single_float_vector):
+         __(extract_lisptag(imm2,arg_z))
+         __(cmpwi cr7,imm2,tag_misc)
+         __(la imm0,misc_data_offset(arg_y))
+	 __(bne- cr7,local_label(set_bad))
+	 __(extract_header(imm2,arg_z))
+	 __(cmpri(cr0,imm2,single_float_header))
+	 __(bne- cr0,local_label(set_bad))
+	 __(ldr(imm1,single_float.value(arg_z)))
+	 __(strx(imm1,arg_x,imm0))
+	 __(blr)
+local_label(misc_set_u8):               
+	 __(extract_lisptag(imm2,arg_z))
+	 __(srwi imm0,arg_y,2)
+	 __(la imm0,misc_data_offset(imm0))
+	 __(extract_unsigned_byte_bits_(imm1,arg_z,8))
+	 __(unbox_fixnum(imm1,arg_z))
+	 __(bne- cr0,local_label(set_bad))
+	 __(stbx imm1,arg_x,imm0)
+	 __(blr)
+local_label(misc_set_old_string):
+	 __(srwi imm0,arg_y,2)
+	 __(extract_lowbyte(imm2,arg_z))
+	 __(cmpri(cr2,imm2,subtag_character))
+	 __(la imm0,misc_data_offset(imm0))
+	 __(srwi imm1,arg_z,charcode_shift)
+	 __(bne- cr2,local_label(set_bad))
+	 __(stbx imm1,arg_x,imm0)
+	 __(blr)
+local_label(misc_set_s8):
+	 __(extract_lisptag(imm2,arg_z))
+         __(srwi imm0,arg_y,2)
+	 __(unbox_fixnum(imm1,arg_z))
+         __(la imm0,misc_data_offset(imm0))
+         __(cmpwi cr5,imm2,tag_fixnum)
+	 __(extsb imm2,imm1)
+	 __(cmpw cr0,imm2,imm1)
+	 __(bne- cr5,local_label(set_bad))
+	 __(bne- cr0,local_label(set_bad))
+	 __(stbx imm1,arg_x,imm0)
+	 __(blr)
+local_label(misc_set_u16):         
+	 __(srwi imm0,arg_y,1)
+	 __(extract_unsigned_byte_bits_(imm1,arg_z,16))
+	 __(unbox_fixnum(imm1,arg_z))
+	 __(la imm0,misc_data_offset(imm0))
+	 __(bne- cr0,local_label(set_bad))
+	 __(sthx imm1,arg_x,imm0)
+	 __(blr)
+local_label(misc_set_s16):
+         __(extract_lisptag(imm2,arg_z))
+         __(srwi imm0,arg_y,1)
+	 __(unbox_fixnum(imm1,arg_z))
+         __(cmpwi cr5,imm2,tag_fixnum)
+         __(la imm0,misc_data_offset(imm0))
+	 __(extsh imm2,imm1)
+	 __(cmpw cr0,imm2,imm1)
+	 __(bne- cr5,local_label(set_bad))
+	 __(bne- cr0,local_label(set_bad))
+	 __(sthx imm1,arg_x,imm0)
+	 __(blr)
+local_label(misc_set_bit_vector):	
+	 __(cmplwi cr2,arg_z,fixnumone)   /* nothing not a (boxed) bit   */
+	 __(extrwi imm1,arg_y,5,32-(fixnumshift+5))	/* imm1 = bitnum  */
+	 __(extlwi imm2,arg_z,1,31-fixnumshift)
+	 __(srw imm2,imm2,imm1)
+	 __(lis imm3,0x8000)
+	 __(rlwinm imm0,arg_y,32-5,5,31-fixnumshift)
+	 __(la imm0,misc_data_offset(imm0))
+	 __(srw imm3,imm3,imm1)
+	 __(bgt- cr2,local_label(set_bad))
+	 __(lwzx imm1,arg_x,imm0)
+	 __(andc imm1,imm1,imm3)
+	 __(or imm1,imm1,imm2)
+	 __(stwx imm1,arg_x,imm0)
+	 __(blr)
+
+local_label(misc_set_double_float_vector):
+         __(extract_lisptag(imm2,arg_z))
+	 __(slwi imm0,arg_y,1)
+         __(cmpwi cr7,imm2,tag_misc)
+	 __(la imm0,misc_dfloat_offset(imm0))
+         __(bne- cr7,local_label(set_bad))
+	 __(extract_header(imm2,arg_z))
+	 __(cmpri(cr0,imm2,double_float_header))
+	 __(bne- cr0,local_label(set_bad))
+	 __(lwz imm1,double_float.value(arg_z))
+	 __(lwz imm2,double_float.value+4(arg_z))
+	 __(stwx imm1,arg_x,imm0)
+	 __(la imm0,4(imm0))
+	 __(stwx imm2,arg_x,imm0)
+	 __(blr)
+local_label(misc_set_invalid):  
+         __(li temp0,XSETBADVEC)        
+         __(set_nargs(4))
+         __(vpush(temp0))
+         __(b _SPksignalerr)                
+        __endif
+
+/* misc_set (vector index newval).  Pretty damned similar to  */
+/* misc_ref, as one might imagine.  */
+
+_spentry(misc_set)
+	__(trap_unless_fulltag_equal(arg_x,fulltag_misc,imm0))
+	__(trap_unless_lisptag_equal(arg_y,tag_fixnum,imm0))
+	__(vector_length(imm0,arg_x,imm1))
+	__(trlge(arg_y,imm0))
+	__(extract_lowbyte(imm1,imm1))
+        __(b local_label(misc_set_common))
+        
+/* "spread" the lexpr in arg_z.  */
+/* ppc2-invoke-fn assumes that temp1 is preserved here.  */
+_spentry(spread_lexprz)
+	__(ldr(imm0,0(arg_z)))
+	__(cmpri(cr3,imm0,3<<fixnumshift))
+	__(cmpri(cr4,imm0,2<<fixnumshift))
+	__(add imm1,arg_z,imm0)
+	__(cmpri(cr0,imm0,0))
+	__(add nargs,nargs,imm0)
+	__(cmpri(cr1,nargs,0))
+	__(cmpri(cr2,nargs,2<<fixnumshift))
+	__(la imm1,node_size(imm1))
+	__(bge cr3,9f)
+	__(beq cr4,2f)
+	__(bne cr0,1f)
+	/* lexpr count was 0; vpop the arg regs that  */
+	/* were vpushed by the caller  */
+	__(beqlr cr1)
+	__(vpop(arg_z))
+	__(bltlr cr2)
+	__(vpop(arg_y))
+	__(beqlr cr2)
+	__(vpop(arg_x))
+	__(blr)
+
+	/* vpush args from the lexpr until we have only  */
+	/* three left, then assign them to arg_x, arg_y,  */
+	/* and arg_z.  */
+8:
+	__(cmpri(cr3,imm0,4<<fixnumshift))
+	__(subi imm0,imm0,fixnumone)
+	__(ldru(arg_z,-node_size(imm1)))
+	__(vpush(arg_z))
+9:
+	__(bne cr3,8b)
+	__(ldr(arg_x,-node_size*1(imm1)))
+	__(ldr(arg_y,-node_size*2(imm1)))
+	__(ldr(arg_z,-node_size*3(imm1)))
+	__(blr)
+
+	/* lexpr count is two: set arg_y, arg_z from the  */
+	/* lexpr, maybe vpop arg_x  */
+2:	
+	__(ldr(arg_y,-node_size*1(imm1)))
+	__(ldr(arg_z,-node_size*2(imm1)))
+	__(beqlr cr2)		/* return if (new) nargs = 2  */
+	__(vpop(arg_x))
+	__(blr)
+
+	/* lexpr count is one: set arg_z from the lexpr,  */
+	/* maybe vpop arg_y, arg_x  */
+1:	
+	__(ldr(arg_z,-node_size(imm1)))
+	__(bltlr cr2)		/* return if (new) nargs < 2  */
+	__(vpop(arg_y))
+	__(beqlr cr2)		/* return if (new) nargs = 2  */
+	__(vpop(arg_x))
+	__(blr)
+        
+		
+_spentry(reset)
+	.globl _SPthrow
+	__(nop)
+	__(ref_nrs_value(temp0,toplcatch))
+	__(li temp1,XSTKOVER)
+	__(vpush(temp0))
+	__(vpush(temp1))
+	__(set_nargs(1))
+	__(b _SPthrow)
+
+	
+/* "slide" nargs worth of values up the vstack.  IMM0 contains  */
+/* the difference between the current VSP and the target.  */
+_spentry(mvslide)
+	__(cmpri(cr0,nargs,0))
+	__(mr imm3,nargs)
+	__(add imm2,vsp,nargs)
+	__(add imm2,imm2,imm0)
+	__(add imm0,vsp,nargs)
+	__(beq 2f)
+1:
+	__(cmpri(cr0,imm3,1<<fixnumshift))
+	__(subi imm3,imm3,1<<fixnumshift)
+	__(ldru(temp0,-node_size(imm0)))
+	__(stru(temp0,-node_size(imm2)))
+	__(bne cr0,1b)
+2:
+	__(mr vsp,imm2)
+	__(blr)
+
+/* Build a new TSP area to hold nargs worth of multiple-values.  */
+/* Pop the multiple values off of the vstack.  */
+/* The new TSP frame will look like this:  */
+/*  */
+/*+--------+-------+-------+---------+--------+--------+--------+======+----------+ */
+/*| ptr to | zero  | nargs | ptr to  | valn-1 | valn-2 | val-0  | ???? | prev TSP |  */
+/*|  prev  |       |       |  prev   |        |        |        | fill |          |  */
+/*| TSP    |       |       | segment |        |        |        |      |          | */
+/*+--------+-------+-------+---------+--------+--------+--------+------+----------+  */
+/*  */
+/* e.g., the first multiple value goes in the last cell in the frame, the  */
+/* count of values goes in the first word, and the word after the value count  */
+/* is 0 if the number of values is even (for alignment).  */
+/* Subsequent calls to .SPadd_values preserve this alignment.  */
+/* .SPrecover_values is therefore pretty simple.  */
+
+_spentry(save_values)
+	__(mr imm1,tsp)
+
+        /* common exit: nargs = values in this set, imm1 = ptr to tsp before  */
+        /* call to save_values  */
+local_label(save_values_to_tsp):
+	__(mr imm2,tsp)
+	__(dnode_align(imm0,nargs,tsp_frame.fixed_overhead+(2*node_size))) /* count, link  */
+	__(TSP_Alloc_Var_Boxed_nz(imm0,imm3))
+	__(str(imm1,tsp_frame.backlink(tsp))) /* keep one tsp "frame" as far as rest of lisp is concerned  */
+	__(str(nargs,tsp_frame.data_offset(tsp)))
+	__(str(imm2,tsp_frame.data_offset+node_size(tsp))) /* previous tsp  */
+	__(la imm3,tsp_frame.data_offset+node_size*2(tsp))
+	__(add imm3,imm3,nargs)
+	__(add imm0,vsp,nargs)
+	__(cmpr(cr0,imm0,vsp))
+	__(b 2f)
+1:
+	__(ldru(arg_z,-node_size(imm0)))
+	__(cmpr(cr0,imm0,vsp))
+	__(stru(arg_z,-node_size(imm3)))
+2:
+	__(bne cr0,1b)
+	__(add vsp,vsp,nargs) /*  discard values  */
+	__(blr)
+	
+
+/* Add the multiple values that are on top of the vstack to the set  */
+/* saved in the top tsp frame, popping them off of the vstack in the  */
+/* process.  It is an error (a bad one) if the TSP contains something  */
+/* other than a previously saved set of multiple-values.  */
+/* Since adding to the TSP may cause a new TSP segment to be allocated,  */
+/* each add_values call adds another linked element to the list of  */
+/* values. This makes recover_values harder.  */
+
+_spentry(add_values)
+	__(cmpri(cr0,nargs,0))
+	__(ldr(imm1,0(tsp)))
+	__(bne cr0,local_label(save_values_to_tsp))
+	__(blr)
+        
+/* On entry, R11->callback-index  */
+/* Restore lisp context, then funcall #'%pascal-functions% with  */
+/* two args: callback-index, args-ptr (a macptr pointing to the args on the stack)  */
+_spentry(poweropen_callback)
+        __ifdef(`rTOC')
+         __(mr r11,rTOC)
+        __endif
+	/* Save C argument registers  */
+	__(str(r3,c_frame.param0(sp)))
+	__(str(r4,c_frame.param1(sp)))
+	__(str(r5,c_frame.param2(sp)))
+	__(str(r6,c_frame.param3(sp)))
+	__(str(r7,c_frame.param4(sp)))
+	__(str(r8,c_frame.param5(sp)))
+	__(str(r9,c_frame.param6(sp)))
+	__(str(r10,c_frame.param7(sp)))
+	__(mflr imm3)
+	__(str(imm3,c_frame.savelr(sp)))
+	__(mfcr imm0)
+	__(str(imm0,c_frame.crsave(sp)))
+
+	/* Save the non-volatile registers on the sp stack  */
+	/* This is a non-standard stack frame, but noone will ever see it,  */
+        /* so it doesn't matter. It will look like more of the stack frame pushed below.  */
+	__(stru(sp,-(stack_align(c_reg_save.size))(sp)))
+        __(str(r13,c_reg_save.save_gprs+(0*node_size)(sp)))
+        __(str(r14,c_reg_save.save_gprs+(1*node_size)(sp)))
+        __(str(r15,c_reg_save.save_gprs+(2*node_size)(sp)))
+        __(str(r16,c_reg_save.save_gprs+(3*node_size)(sp)))
+        __(str(r17,c_reg_save.save_gprs+(4*node_size)(sp)))
+        __(str(r18,c_reg_save.save_gprs+(5*node_size)(sp)))
+        __(str(r19,c_reg_save.save_gprs+(6*node_size)(sp)))
+        __(str(r20,c_reg_save.save_gprs+(7*node_size)(sp)))
+        __(str(r21,c_reg_save.save_gprs+(8*node_size)(sp)))
+        __(str(r22,c_reg_save.save_gprs+(9*node_size)(sp)))
+        __(str(r23,c_reg_save.save_gprs+(10*node_size)(sp)))
+        __(str(r24,c_reg_save.save_gprs+(11*node_size)(sp)))
+        __(str(r25,c_reg_save.save_gprs+(12*node_size)(sp)))
+        __(str(r26,c_reg_save.save_gprs+(13*node_size)(sp)))
+        __(str(r27,c_reg_save.save_gprs+(14*node_size)(sp)))
+        __(str(r28,c_reg_save.save_gprs+(15*node_size)(sp)))
+        __(str(r29,c_reg_save.save_gprs+(16*node_size)(sp)))
+        __(str(r30,c_reg_save.save_gprs+(17*node_size)(sp)))
+        __(str(r31,c_reg_save.save_gprs+(18*node_size)(sp)))
+        __(stfd f1,c_reg_save.save_fprs+(0*8)(sp))
+        __(stfd f2,c_reg_save.save_fprs+(1*8)(sp))
+        __(stfd f3,c_reg_save.save_fprs+(2*8)(sp))
+        __(stfd f4,c_reg_save.save_fprs+(3*8)(sp))
+        __(stfd f5,c_reg_save.save_fprs+(4*8)(sp))
+        __(stfd f6,c_reg_save.save_fprs+(5*8)(sp))
+        __(stfd f7,c_reg_save.save_fprs+(6*8)(sp))
+        __(stfd f8,c_reg_save.save_fprs+(7*8)(sp))
+        __(stfd f9,c_reg_save.save_fprs+(8*8)(sp))
+        __(stfd f10,c_reg_save.save_fprs+(9*8)(sp))
+        __(stfd f11,c_reg_save.save_fprs+(10*8)(sp))
+        __(stfd f12,c_reg_save.save_fprs+(11*8)(sp))
+        __(stfd f13,c_reg_save.save_fprs+(12*8)(sp))
+	__(check_stack_alignment(r0))
+	__(mffs f0)
+	__(stfd f0,c_reg_save.save_fp_zero(sp))
+	__(lwz r31,c_reg_save.save_fp_zero+4(sp))	/* recover FPSCR image  */
+	__(stw r31,c_reg_save.save_fpscr(sp))
+	__(lwi(r30,0x43300000))
+	__(lwi(r31,0x80000000))
+	__(stw r30,c_reg_save.save_fp_zero(sp))
+	__(stw r31,c_reg_save.save_fp_zero+4(sp))
+	__(stfd fp_s32conv,c_reg_save.save_fps32conv(sp))
+	__(lfd fp_s32conv,c_reg_save.save_fp_zero(sp))
+	__(stfd fp_zero,c_reg_save.save_fp_zero(sp))
+	__(lfs fp_zero,lisp_globals.short_float_zero(0))	/* ensure that fp_zero contains 0.0  */
+
+/* Restore rest of Lisp context.  */
+/* Could spread out the memory references here to gain a little speed  */
+
+	__(li loc_pc,0)
+	__(li fn,0)                     /* subprim, not a lisp function  */
+	__(li temp3,0)
+	__(li temp2,0)
+	__(li temp1,0)
+	__(li temp0,0)
+	__(li arg_x,0)
+	__(box_fixnum(arg_y,r11))	/* callback-index  */
+        __(la arg_z,c_reg_save.save_fprs(sp))
+        __(str(arg_z,stack_align(c_reg_save.size)+c_frame.unused(sp)))
+	__(la arg_z,stack_align(c_reg_save.size)+c_frame.param0(sp))	/* parameters (tagged as a fixnum)  */
+
+	/* Recover lisp thread context. Have to call C code to do so.  */
+	__(ref_global(r12,get_tcr))
+        __ifdef(`rTOC')
+         __(ld rTOC,8(r12))
+         __(ld r12,0(r12))
+        __endif
+	__(mtctr r12)
+        __(li r3,1)
+	__(stru(sp,-(stack_align(c_frame.minsiz))(sp)))
+	__(bctrl)
+	__(la rcontext,TCR_BIAS(r3))
+	__(la sp,(stack_align(c_frame.minsiz))(sp))
+
+	__(ldr(vsp,tcr.save_vsp(rcontext)))
+	__(ldr(tsp,tcr.save_tsp(rcontext)))		
+	__(li rzero,0)
+	__(li imm0,TCR_STATE_LISP)
+	__(mtxer rzero) /* lisp wants the overflow bit being clear  */
+        __(mtctr rzero)
+	__(li save0,0)
+	__(li save1,0)
+	__(li save2,0)
+	__(li save3,0)
+	__(li save4,0)
+	__(li save5,0)
+	__(li save6,0)
+	__(li save7,0)
+	__(lfd f0,tcr.lisp_fpscr(rcontext))
+	__(mtfsf 0xff,f0)
+	__(li allocbase,0)
+	__(li allocptr,0)	
+	__(str(imm0,tcr.valence(rcontext)))
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	
+        __(restore_saveregs(vsp))
+
+	/* load nargs and callback to the lisp  */
+	__(set_nargs(2))
+	__(ldr(imm2,tcr.cs_area(rcontext)))
+	__(ldr(imm4,area.active(imm2)))
+	__(stru(imm4,-lisp_frame.size(sp)))
+	__(str(imm3,lisp_frame.savelr(sp)))
+	__(li fname,nrs.callbacks)	/* %pascal-functions%  */
+	__(call_fname)
+	__(ldr(imm2,lisp_frame.backlink(sp)))
+	__(ldr(imm3,tcr.cs_area(rcontext)))
+	__(str(imm2,area.active(imm3)))
+	__(discard_lisp_frame())
+	/* save_vsp will be restored from ff_call's stack frame, but  */
+	/* I included it here for consistency.  */
+	/* save_tsp is set below after we exit Lisp context.  */
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	/* Exit lisp context  */
+	__(li imm1,TCR_STATE_FOREIGN)
+	__(str(imm1,tcr.valence(rcontext)))
+	/* Restore the non-volatile registers & fpscr  */
+	__(lfd fp_zero,c_reg_save.save_fp_zero(sp))
+	__(lwz r31,c_reg_save.save_fpscr(sp))
+	__(stw r31,c_reg_save.save_fp_zero+4(sp))
+	__(lfd f0,c_reg_save.save_fp_zero(sp))
+	__(mtfsf 0xff,f0)
+	__(ldr(r13,c_reg_save.save_gprs+(0*node_size)(sp)))
+	__(ldr(r14,c_reg_save.save_gprs+(1*node_size)(sp)))
+	__(ldr(r15,c_reg_save.save_gprs+(2*node_size)(sp)))
+	__(ldr(r16,c_reg_save.save_gprs+(3*node_size)(sp)))
+	__(ldr(r17,c_reg_save.save_gprs+(4*node_size)(sp)))
+	__(ldr(r18,c_reg_save.save_gprs+(5*node_size)(sp)))
+	__(ldr(r19,c_reg_save.save_gprs+(6*node_size)(sp)))
+	__(ldr(r20,c_reg_save.save_gprs+(7*node_size)(sp)))
+	__(ldr(r21,c_reg_save.save_gprs+(8*node_size)(sp)))
+	__(ldr(r22,c_reg_save.save_gprs+(9*node_size)(sp)))
+	__(ldr(r23,c_reg_save.save_gprs+(10*node_size)(sp)))
+	__(ldr(r24,c_reg_save.save_gprs+(11*node_size)(sp)))
+	__(ldr(r25,c_reg_save.save_gprs+(12*node_size)(sp)))
+	__(ldr(r26,c_reg_save.save_gprs+(13*node_size)(sp)))
+	__(ldr(r27,c_reg_save.save_gprs+(14*node_size)(sp)))
+	__(ldr(r28,c_reg_save.save_gprs+(15*node_size)(sp)))
+	__(ldr(r29,c_reg_save.save_gprs+(16*node_size)(sp)))
+	__(ldr(r30,c_reg_save.save_gprs+(17*node_size)(sp)))
+	__(ldr(r31,c_reg_save.save_gprs+(18*node_size)(sp)))
+        __(lfd f1,c_reg_save.save_fprs+(0*8)(sp))
+        __(lfd f2,c_reg_save.save_fprs+(1*8)(sp))
+        __(lfd f3,c_reg_save.save_fprs+(2*8)(sp))
+        __(lfd f4,c_reg_save.save_fprs+(3*8)(sp))
+        __(lfd f5,c_reg_save.save_fprs+(4*8)(sp))
+        __(lfd f6,c_reg_save.save_fprs+(5*8)(sp))
+        __(lfd f7,c_reg_save.save_fprs+(6*8)(sp))
+        __(lfd f8,c_reg_save.save_fprs+(7*8)(sp))
+        __(lfd f9,c_reg_save.save_fprs+(8*8)(sp))
+        __(lfd f10,c_reg_save.save_fprs+(9*8)(sp))
+        __(lfd f11,c_reg_save.save_fprs+(10*8)(sp))
+        __(lfd f12,c_reg_save.save_fprs+(11*8)(sp))
+        __(lfd f13,c_reg_save.save_fprs+(12*8)(sp))
+	__(lfd fp_s32conv,c_reg_save.save_fps32conv(sp))
+	__(ldr(sp,0(sp)))
+	__(ldr(r3,c_frame.param0(sp)))
+	__(ldr(r4,c_frame.param1(sp)))
+	__(ldr(r5,c_frame.param2(sp)))
+	__(ldr(r6,c_frame.param3(sp)))
+	__(ldr(r7,c_frame.param4(sp)))
+	__(ldr(r8,c_frame.param5(sp)))
+	__(ldr(r9,c_frame.param6(sp)))
+	__(ldr(r10,c_frame.param7(sp)))
+	__(ldr(r11,c_frame.savelr(sp)))
+	__(mtlr r11)
+	__(ldr(r11,c_frame.crsave(sp)))
+	__(mtcr r11)
+	__(blr)
+        
+/* Like misc_alloc (a LOT like it, since it does most of the work), but takes  */
+/* an initial-value arg in arg_z, element_count in arg_x, subtag in arg_y.  */
+/* Calls out to %init-misc, which does the rest of the work.  */
+
+_spentry(misc_alloc_init)
+	__(mflr loc_pc)
+	__(build_lisp_frame(fn,loc_pc,vsp))
+	__(li fn,0)
+	__(mr temp0,arg_z)		/* initval  */
+	__(mr arg_z,arg_y)		/* subtag  */
+	__(mr arg_y,arg_x)		/* element-count  */
+	__(bl _SPmisc_alloc)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp))) 
+	__(discard_lisp_frame())
+	__(li fname,nrs.init_misc)
+	__(set_nargs(2))
+	__(mr arg_y,temp0)
+	__(jump_fname())
+
+/* As in stack_misc_alloc above, only with a non-default initial-value.  */
+
+_spentry(stack_misc_alloc_init)
+	__(mflr loc_pc)
+	__(build_lisp_frame(fn,loc_pc,vsp))
+	__(li fn,0)
+	__(mr temp0,arg_z) /* initval  */
+	__(mr arg_z,arg_y) /* subtag  */
+	__(mr arg_y,arg_x) /* element-count  */
+	__(bl _SPstack_misc_alloc)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(discard_lisp_frame())
+	__(li fname,nrs.init_misc)
+	__(set_nargs(2))
+	__(mr arg_y,temp0)
+	__(jump_fname())
+
+	
+_spentry(callbuiltin)
+	__(ref_nrs_value(fname,builtin_functions))
+	__(la imm0,misc_data_offset(imm0))
+	__(ldrx(fname,fname,imm0))
+	__(jump_fname())
+
+/* the value of the nilreg-relative symbol %builtin-functions% should be  */
+/* a vector of symbols.  Call the symbol indexed by imm0 (boxed) and  */
+/* return a single value.  */
+
+_spentry(callbuiltin0)
+	__(set_nargs(0))
+	__(ref_nrs_value(fname,builtin_functions))
+	__(la imm0,misc_data_offset(imm0))
+	__(ldrx(fname,fname,imm0))
+	__(jump_fname())
+
+_spentry(callbuiltin1)
+	__(ref_nrs_value(fname,builtin_functions))
+	__(set_nargs(1))
+	__(la imm0,misc_data_offset(imm0))
+	__(ldrx(fname,fname,imm0))
+	__(jump_fname())
+
+_spentry(callbuiltin2)
+	__(set_nargs(2))
+	__(ref_nrs_value(fname,builtin_functions))
+	__(la imm0,misc_data_offset(imm0))
+	__(ldrx(fname,fname,imm0))
+	__(jump_fname())
+
+
+_spentry(callbuiltin3)
+	__(set_nargs(3))
+	__(ref_nrs_value(fname,builtin_functions))
+	__(la imm0,misc_data_offset(imm0))
+	__(ldrx(fname,fname,imm0))
+	__(jump_fname())
+	
+
+_spentry(popj)
+	.globl C(popj)
+C(popj):
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame())
+	__(blr)
+
+_spentry(restorefullcontext)
+	__(mflr loc_pc)
+	__(mtctr loc_pc)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame())
+	__(bctr)
+
+_spentry(savecontextvsp)
+	__(ldr(imm0,tcr.cs_limit(rcontext)))
+	__(build_lisp_frame(fn,loc_pc,vsp))
+	__(mr fn,nfn)
+	__(trllt(sp,imm0))
+	__(blr)
+
+_spentry(savecontext0)
+	__(add imm0,vsp,imm0)
+	__(build_lisp_frame(fn,loc_pc,imm0))
+	__(ldr(imm0,tcr.cs_limit(rcontext)))
+	__(mr fn,nfn)
+	__(trllt(sp,imm0))
+	__(blr)
+
+
+/* Like .SPrestorefullcontext, only the saved return address  */
+/* winds up in loc-pc instead of getting thrashed around ...  */
+_spentry(restorecontext)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame())
+	__(blr)
+
+        
+/* Nargs is valid; all arg regs, lexpr-count pushed by caller.  */
+/* imm0 = vsp to restore.  */
+/* Return all values returned by caller to its caller, hiding  */
+/* the variable-length arglist.  */
+/* If we can detect that the caller's caller didn't expect  */
+/* multiple values, then things are even simpler.  */
+_spentry(lexpr_entry)
+	__(ref_global(imm1,ret1val_addr))
+	__(cmpr(cr0,imm1,loc_pc))
+	__(build_lisp_frame(fn,loc_pc,imm0))
+	__(bne cr0,1f)
+	__(ref_global(imm0,lexpr_return))
+	__(build_lisp_frame(rzero,imm0,vsp))
+	__(mr loc_pc,imm1)
+	__(ldr(imm0,tcr.cs_limit(rcontext)))
+	__(trllt(sp,imm0))
+	__(li fn,0)
+	__(blr)
+
+        /* The single-value case just needs to return to something that'll pop  */
+        /* the variable-length frame off of the vstack.  */
+1:
+	__(ref_global(loc_pc,lexpr_return1v))
+	__(ldr(imm0,tcr.cs_limit(rcontext)))
+	__(trllt(sp,imm0))
+	__(li fn,0)
+	__(blr)
+
+/* */
+/* Do a system call in Darwin.  The stack is set up much as it would be */
+/* for a PowerOpen ABI ff-call:	register parameters are in the stack */
+/* frame, and there are 4 extra words at the bottom of the frame that */
+/* we can carve a lisp frame out of. */
+/*  */
+/* System call return conventions are a little funky in Darwin: if "@sc" */
+/* is the address of the "sc" instruction, errors return to @sc+4 and */
+/* non-error cases return to @sc+8.  Error values are returned as */
+/* positive values in r3; this is true even if the system call returns */
+/* a doubleword (64-bit) result.  Since r3 would ordinarily contain */
+/* the high half of a doubleword result, this has to be special-cased. */
+/*  */
+/* The caller should set the c_frame.crsave field of the stack frame */
+/* to 0 if the result is to be interpreted as anything but a doubleword */
+/* and to non-zero otherwise.  (This only matters on an error return.) */
+
+        
+_spentry(poweropen_syscall)
+	__(mflr loc_pc)
+	__(vpush_saveregs())
+	__(ldr(imm1,0(sp)))
+	__(la imm2,-lisp_frame.size(imm1))
+        __(zero_doublewords imm2,0,lisp_frame.size)
+	__(str(imm1,lisp_frame.backlink(imm2)))
+	__(str(imm2,c_frame.backlink(sp)))
+	__(str(fn,lisp_frame.savefn(imm2)))
+	__(str(loc_pc,lisp_frame.savelr(imm2)))
+	__(str(vsp,lisp_frame.savevsp(imm2)))
+	__(ldr(imm3,tcr.cs_area(rcontext)))
+	__(str(imm2,area.active(imm3)))
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(str(rzero,tcr.ffi_exception(rcontext)))
+	__(mr save0,rcontext)
+	__(li r3,TCR_STATE_FOREIGN)
+	__(str(r3,tcr.valence(rcontext)))
+	__(li rcontext,0)
+	__(ldr(r3,c_frame.param0(sp)))
+	__(ldr(r4,c_frame.param1(sp)))
+	__(ldr(r5,c_frame.param2(sp)))
+	__(ldr(r6,c_frame.param3(sp)))
+	__(ldr(r7,c_frame.param4(sp)))
+	__(ldr(r8,c_frame.param5(sp)))
+	__(ldr(r9,c_frame.param6(sp)))
+	__(ldr(r10,c_frame.param7(sp)))
+	__(unbox_fixnum(r0,arg_z))
+	__(sc)
+        __ifdef(`LINUX')
+         __(bns+ 9f)
+        __else
+	 __(b 1f)
+	 __(b 9f)
+        __endif
+1:
+        __ifdef(`PPC64')
+         __(neg r3,r3)
+        __else
+	 __(ldr(imm2,c_frame.crsave(sp)))
+	 __(cmpri(cr0,imm2,0))
+	 __(bne cr0,2f)
+	 /* 32-bit result  */
+	 __(neg r3,r3)
+	 __(b 9f)
+2:
+	 /* 64-bit result  */
+	 __(neg r4,r3)
+	 __(li r3,-1)
+        __endif
+9:
+	__(mr imm2,save0)	/* recover context  */
+	__(ldr(sp,c_frame.backlink(sp)))
+	__(li imm4,TCR_STATE_LISP)
+	__(li rzero,0)
+	__(li loc_pc,0)
+	__(li arg_x,nil_value)
+	__(li arg_y,nil_value)
+	__(li arg_z,nil_value)
+	__(li temp0,nil_value)
+	__(li temp1,nil_value)
+	__(li temp2,nil_value)
+	__(li temp3,nil_value)
+	__(li fn,nil_value)
+	__(mr rcontext,imm2)
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	__(ldr(tsp,tcr.save_tsp(rcontext)))
+        __(li save0,0)
+        __(li save1,0)
+        __(li save2,0)
+        __(li save3,0)
+        __(li save4,0)
+        __(li save5,0)
+        __(li save6,0)
+        __(li save7,0)        
+	__(str(imm4,tcr.valence(rcontext)))
+	__(vpop_saveregs)
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame)
+        __(mtxer rzero)
+	__(check_pending_interrupt(`cr1'))
+	__(blr)
+        
+        
+_spentry(builtin_plus)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(addo. arg_z,arg_y,arg_z)
+	__(bnslr+)
+	__(mtxer rzero)
+	__(unbox_fixnum(imm1,arg_z))
+        __ifdef(`PPC64')
+	 __(li imm0,two_digit_bignum_header)
+         __(rotldi imm1,imm1,32)
+	 __(xoris imm1,imm1,0xe000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __else
+	 __(li imm0,one_digit_bignum_header)
+	 __(xoris imm1,imm1,0xc000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(1)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __endif
+	__(blr)
+1:
+	__(jump_builtin(_builtin_plus,2))
+_spentry(builtin_minus)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(subo. arg_z,arg_y,arg_z)
+	__(bnslr+)
+	__(mtxer rzero)
+	__(unbox_fixnum(imm1,arg_z))
+        __ifdef(`PPC64')
+	 __(li imm0,two_digit_bignum_header)
+         __(rotldi imm1,imm1,32)
+	 __(xoris imm1,imm1,0xe000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __else
+	 __(li imm0,one_digit_bignum_header)
+	 __(xoris imm1,imm1,0xc000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(1)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __endif
+	__(blr)
+1:
+	__(jump_builtin(_builtin_minus,2))
+_spentry(builtin_times)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(unbox_fixnum(imm2,arg_y))
+	__(bne cr0,1f)
+        __(bne cr1,1f)
+        __ifdef(`PPC64')
+         __(mulldo. imm3,arg_z,imm2)
+         __(bso 2f)
+         __(mr arg_z,imm3)
+         __(blr)
+	 /* Args are fixnums; result can't be  */
+2:	 __(mtxer rzero)
+	 __(unbox_fixnum(imm3,arg_z))
+	 __(mulld imm1,imm3,imm2) /* imm1 = low  64 bits  */
+	 __(mulhd imm0,imm3,imm2) /* imm0 = high 64 bits  */
+	 __(b _SPmakes128)
+        __else
+	 __(mullwo. imm3,arg_z,imm2)
+	 __(bso 2f)		/*  SO set if result would overflow a fixnum  */
+	 __(mr arg_z,imm3)
+	 __(blr)
+	 /* Args are fixnums; result can't be  */
+2:	 __(mtxer rzero)
+	 __(unbox_fixnum(imm3,arg_z))
+	 __(mullw imm1,imm3,imm2) /* imm1 = low  32 bits  */
+	 __(mulhw imm0,imm3,imm2) /* imm0 = high 32 bits  */
+	 __(b _SPmakes64)
+        __endif
+
+1:	__(jump_builtin(_builtin_times,2))
+
+_spentry(builtin_div)
+	__(jump_builtin(_builtin_div,2))
+
+_spentry(builtin_eq)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(cmpr(cr2,arg_y,arg_z))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(li arg_z,nil_value)
+	__(bnelr cr2)
+	__(li arg_z,t_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_eq,2))
+
+_spentry(builtin_ne)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(cmpr(cr2,arg_y,arg_z))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(li arg_z,nil_value)
+	__(beqlr cr2)
+	__(li arg_z,t_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_ne,2))
+
+_spentry(builtin_gt)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(cmpr(cr2,arg_y,arg_z))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(li arg_z,nil_value)
+	__(bnglr cr2)
+	__(li arg_z,t_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_gt,2))
+
+_spentry(builtin_ge)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(cmpr(cr2,arg_y,arg_z))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(li arg_z,nil_value)
+	__(bltlr cr2)
+	__(li arg_z,t_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_ge,2))
+
+_spentry(builtin_lt)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(cmpr(cr2,arg_y,arg_z))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(li arg_z,nil_value)
+	__(bnllr cr2)
+	__(li arg_z,t_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_lt,2))
+
+_spentry(builtin_le)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(cmpr(cr2,arg_y,arg_z))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(li arg_z,nil_value)
+	__(bgtlr cr2)
+	__(li arg_z,t_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_le,2))
+
+
+_spentry(builtin_eql)
+        __(cmpr(cr1,arg_y,arg_z))
+        __(extract_fulltag(imm2,arg_y))
+        __(extract_fulltag(imm3,arg_z))
+        __(beq cr1,1f)
+        __(cmpri(cr1,imm2,fulltag_misc))
+        __(cmpri(cr0,imm3,fulltag_misc))
+        __(bne cr1,2f)
+        __(extract_subtag(imm0,arg_y))
+        __(bne cr0,2f)
+        __(extract_subtag(imm1,arg_z))
+        __(cmpr(cr0,imm0,imm1))
+        __(bne cr0,2f)
+	__(jump_builtin(_builtin_eql,2))
+1:	__(li arg_z,t_value)
+	__(blr)
+2:	__(li arg_z,nil_value)
+	__(blr)
+        
+_spentry(builtin_length)
+        __(cmpri(cr1,arg_z,nil_value))
+	__(extract_typecode(imm0,arg_z))
+	__(cmpri(cr0,imm0,min_vector_subtag))
+        __(beq cr1,1f)
+        __ifdef(`PPC64')
+         __(cmpdi cr2,imm0,fulltag_cons)
+        __else
+	 __(cmpwi cr2,imm0,tag_list)
+        __endif
+	__(beq- cr0,2f)
+	__(blt- cr0,3f)
+	/* (simple-array * (*))  */
+	__(vector_length(arg_z,arg_z,imm0))
+	__(blr)
+1:      __(li arg_z,0)
+        __(blr)
+2:
+	__(ldr(arg_z,vectorH.logsize(arg_z)))
+	__(blr)        
+3:	__(bne cr2,8f)
+	__(li temp2,-1<<fixnum_shift)
+	__(mr temp0,arg_z)	/* fast pointer  */
+	__(mr temp1,arg_z)	/* slow pointer  */
+        __ifdef(`PPC64')
+4:       __(extract_fulltag(imm0,temp0))
+         __(cmpdi cr7,temp0,nil_value)
+         __(cmpdi cr1,imm0,fulltag_cons)
+         __(addi temp2,temp2,fixnum_one)
+         __(beq cr7,9f)
+         __(andi. imm0,temp2,1<<fixnum_shift)
+         __(bne cr1,8f)
+         __(extract_fulltag(imm1,temp1))
+         __(_cdr(temp0,temp0))
+         __(cmpdi cr1,imm1,fulltag_cons)
+	 __(beq cr0,4b)
+	 __(bne cr1,8f)
+	 __(_cdr(temp1,temp1))
+	 __(cmpd cr0,temp0,temp1)
+	 __(bne cr0,4b)
+        __else
+4:	 __(extract_lisptag(imm0,temp0))
+	 __(cmpri(cr7,temp0,nil_value))
+	 __(cmpri(cr1,imm0,tag_list))
+	 __(addi temp2,temp2,fixnum_one)
+	 __(beq cr7,9f)
+	 __(andi. imm0,temp2,1<<fixnum_shift)
+	 __(bne cr1,8f)
+	 __(extract_lisptag(imm1,temp1))	
+	 __(_cdr(temp0,temp0))
+	 __(cmpri(cr1,imm1,tag_list))
+	 __(beq cr0,4b)
+	 __(bne cr1,8f)
+	 __(_cdr(temp1,temp1))
+	 __(cmpr(cr0,temp0,temp1))
+	 __(bne cr0,4b)
+        __endif
+8:	
+	__(jump_builtin(_builtin_length,1))
+9:	
+	__(mr arg_z,temp2)
+	__(blr)
+        
+_spentry(builtin_seqtype)
+        __ifdef(`PPC64')
+         __(cmpdi cr2,arg_z,nil_value)
+         __(extract_typecode(imm0,arg_z))
+         __(beq cr2,1f)
+	 __(cmpri(cr0,imm0,fulltag_cons))
+        __else
+	 __(extract_typecode(imm0,arg_z))
+ 	 __(cmpri(cr0,imm0,tag_list))
+        __endif
+	__(cmpri(cr1,imm0,min_vector_subtag))
+	__(beq cr0,1f)
+	__(blt- cr1,2f)
+	__(li arg_z,nil_value)
+	__(blr)
+1:	__(li arg_z,t_value)
+	__(blr)
+2:
+	__(jump_builtin(_builtin_seqtype,1))
+        
+_spentry(builtin_assq)
+	__(cmpri(arg_z,nil_value))
+	__(beqlr)
+1:	__(trap_unless_list(arg_z,imm0))
+	__(_car(arg_x,arg_z))
+	__(_cdr(arg_z,arg_z))
+	__(cmpri(cr2,arg_x,nil_value))
+	__(cmpri(cr1,arg_z,nil_value))
+	__(beq cr2,2f)
+	__(trap_unless_list(arg_x,imm0))
+	__(_car(temp0,arg_x))
+	__(cmpr(temp0,arg_y))
+	__(bne cr0,2f)
+	__(mr arg_z,arg_x)
+	__(blr)
+2:	__(bne cr1,1b)
+	__(blr)
+
+_spentry(builtin_memq)
+	__(cmpri(cr1,arg_z,nil_value))
+	__(b 2f)
+1:	__(trap_unless_list(arg_z,imm0))
+	__(_car(arg_x,arg_z))
+	__(_cdr(temp0,arg_z))
+	__(cmpr(arg_x,arg_y))
+	__(cmpri(cr1,temp0,nil_value))
+	__(beqlr)
+	__(mr arg_z,temp0)
+2:	__(bne cr1,1b)
+	__(blr)
+
+        __ifdef(`PPC64')
+logbitp_max_bit = 61
+        __else
+logbitp_max_bit = 30
+        __endif
+        
+_spentry(builtin_logbitp)
+	/* Call out unless both fixnums,0 <=  arg_y < logbitp_max_bit  */
+        __(cmplri(cr2,arg_y,logbitp_max_bit<<fixnum_shift))
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(unbox_fixnum(imm0,arg_y))
+	__(subfic imm0,imm0,logbitp_max_bit)
+        __ifdef(`PPC64')
+         __(rldcl imm0,arg_z,imm0,63)
+         __(mulli imm0,imm0,t_offset)
+        __else
+  	 __(rlwnm imm0,arg_z,imm0,31,31)
+	 __(rlwimi imm0,imm0,4,27,27)
+        __endif
+	__(bnl cr2,1f)
+	__(bne cr0,1f)
+        __(bne cr1,1f)
+	__(addi arg_z,imm0,nil_value)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_logbitp,2))
+
+_spentry(builtin_logior)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(or arg_z,arg_y,arg_z)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_logior,2))
+
+_spentry(builtin_logand)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(and arg_z,arg_y,arg_z)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_logand,2))
+	
+_spentry(builtin_ash)
+        __ifdef(`PPC64')
+	 __(cmpdi cr1,arg_z,0)
+         __(extract_lisptag(imm0,arg_y))
+         __(extract_lisptag(imm1,arg_z))
+         __(cmpdi cr0,imm0,tag_fixnum)
+         __(cmpdi cr3,imm1,tag_fixnum)
+	 __(cmpdi cr2,arg_z,-(63<<3))	/* !! 3 =  fixnumshift  */
+	 __(bne- cr0,9f)
+         __(bne- cr3,9f)
+	 __(bne cr1,0f)
+	 __(mr arg_z,arg_y)	/* (ash n 0) => n  */
+	 __(blr)
+0:		
+	 __(unbox_fixnum(imm1,arg_y))
+	 __(unbox_fixnum(imm0,arg_z))
+	 __(bgt cr1,2f)
+	 /* (ash n -count) => fixnum  */
+	 __(neg imm2,imm0)
+	 __(bgt cr2,1f)
+	 __(li imm2,63)
+1:	
+	 __(srad imm0,imm1,imm2)
+	 __(box_fixnum(arg_z,imm0))
+	 __(blr)
+	 /* Integer-length of arg_y/imm1 to imm2  */
+2:		
+	 __(cntlzd. imm2,imm1)
+	 __(bne 3f)		/* cr0`eq' set if negative  */
+	 __(not imm2,imm1)
+	 __(cntlzd imm2,imm2)
+3:
+	 __(subfic imm2,imm2,64)
+	 __(add imm2,imm2,imm0)	 /* imm2 <- integer-length(imm1) + count  */
+	 __(cmpdi cr1,imm2,63-fixnumshift)
+	 __(cmpdi cr2,imm0,64)
+	 __(sld imm2,imm1,imm0)
+	 __(bgt cr1,6f)
+	 __(box_fixnum(arg_z,imm2))
+	 __(blr)	
+6:
+	 __(bgt cr2,9f)
+	 __(bne cr2,7f)
+	 /* Shift left by 64 bits exactly  */
+	 __(mr imm0,imm1)
+	 __(li imm1,0)
+	 __(beq _SPmakes128)
+	 __(b _SPmakeu128)
+7:
+	 /* Shift left by fewer than 64 bits, result not a fixnum  */
+	 __(subfic imm0,imm0,64)
+	 __(beq 8f)
+	 __(srd imm0,imm1,imm0)
+	 __(mr imm1,imm2)
+	 __(b _SPmakeu128)
+8:	
+	 __(srad imm0,imm1,imm0)
+	 __(mr imm1,imm2)
+	 __(b _SPmakes128)
+        __else
+	 __(cmpri(cr1,arg_z,0))
+         __(extract_lisptag(imm0,arg_y))
+         __(extract_lisptag(imm1,arg_z))
+         __(cmpri(cr0,imm0,tag_fixnum))
+         __(cmpri(cr3,imm1,tag_fixnum))
+	 __(cmpri(cr2,arg_z,-(29<<2)))	/* !! 2 =  fixnumshift  */
+	 __(bne- cr0,9f)
+         __(bne- cr3,9f)
+	 __(bne cr1,0f)
+	 __(mr arg_z,arg_y)	/* (ash n 0) => n  */
+	 __(blr)
+0:		
+	 __(unbox_fixnum(imm1,arg_y))
+	 __(unbox_fixnum(imm0,arg_z))
+	 __(bgt cr1,2f)
+	 /* (ash n -count) => fixnum  */
+	 __(neg imm2,imm0)
+	 __(bgt cr2,1f)
+	 __(li imm2,31)
+1:	
+	 __(sraw imm0,imm1,imm2)
+	 __(box_fixnum(arg_z,imm0))
+	 __(blr)
+	 /* Integer-length of arg_y/imm1 to imm2  */
+2:		
+	 __(cntlzw. imm2,imm1)
+	 __(bne 3f)		/* cr0`eq' set if negative  */
+	 __(not imm2,imm1)
+	 __(cntlzw imm2,imm2)
+3:
+	 __(subfic imm2,imm2,32)
+	 __(add imm2,imm2,imm0)	 /* imm2 <- integer-length(imm1) + count  */
+	 __(cmpri(cr1,imm2,31-fixnumshift))
+	 __(cmpri(cr2,imm0,32))
+	 __(slw imm2,imm1,imm0)
+	 __(bgt cr1,6f)
+	 __(box_fixnum(arg_z,imm2))
+	 __(blr)	
+6:
+	 __(bgt cr2,9f)
+	 __(bne cr2,7f)
+	 /* Shift left by 32 bits exactly  */
+	 __(mr imm0,imm1)
+	 __(li imm1,0)
+	 __(beq _SPmakes64)
+	 __(b _SPmakeu64)
+7:
+	 /* Shift left by fewer than 32 bits, result not a fixnum  */
+	 __(subfic imm0,imm0,32)
+	 __(beq 8f)
+	 __(srw imm0,imm1,imm0)
+	 __(mr imm1,imm2)
+	 __(b _SPmakeu64)
+8:	
+	 __(sraw imm0,imm1,imm0)
+	 __(mr imm1,imm2)
+	 __(b _SPmakes64)
+        __endif
+9:		
+	__(jump_builtin(_builtin_ash,2))
+
+_spentry(builtin_negate)
+	__(extract_lisptag_(imm0,arg_z))
+	__(bne- cr0,1f)
+	__(nego. arg_z,arg_z)
+	__(bnslr+)
+	__(mtxer rzero)
+	__(unbox_fixnum(imm1,arg_z))
+        __ifdef(`PPC64')
+	 __(li imm0,two_digit_bignum_header)
+         __(rotldi imm1,imm1,32)
+	 __(xoris imm1,imm1,0xe000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __else
+	 __(li imm0,one_digit_bignum_header)
+	 __(xoris imm1,imm1,0xc000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(1)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __endif
+	__(blr)
+1:
+	__(jump_builtin(_builtin_negate,1))
+
+_spentry(builtin_logxor)
+        __(extract_lisptag(imm0,arg_y))
+        __(extract_lisptag(imm1,arg_z))
+        __(cmpri(cr0,imm0,tag_fixnum))
+        __(cmpri(cr1,imm1,tag_fixnum))
+	__(bne- cr0,1f)
+        __(bne- cr1,1f)
+	__(xor arg_z,arg_y,arg_z)
+	__(blr)
+1:
+	__(jump_builtin(_builtin_logxor,2))
+
+
+
+        
+_spentry(builtin_aset1)
+	__(extract_typecode(imm0,arg_x))
+	__(cmpri(cr0,imm0,min_vector_subtag))
+	__(box_fixnum(temp0,imm0))
+	__(bgt cr0,1f)
+	__(jump_builtin(_builtin_aset1,3))
+1:
+	__(b _SPsubtag_misc_set)
+
+/* Enter the debugger  */
+_spentry(breakpoint)
+	__(li r3,0)
+	__(tw 28,sp,sp)	/* 28 = lt|gt|eq (assembler bug for the latter)  */
+	__(blr)		/* if handler didn't  */
+
+/* */
+/* We're entered with an eabi_c_frame on the C stack.  There's a */
+/* lisp_frame reserved underneath it; we'll link it in in a minute. */
+/* Load the outgoing GPR arguments from eabi_c_frame.param`0-7', */
+/* then shrink the eabi_c_frame. */
+/*  */
+	
+_spentry(eabi_ff_call)
+	__(mflr loc_pc)
+	__(str(sp,eabi_c_frame.savelr(sp)))
+	__(vpush_saveregs())		/* Now we can use save0-save7 to point to stacks  */
+	__(mr save0,rcontext)	/* or address globals.  */
+	__(extract_typecode(imm0,arg_z))
+	__(cmpri(imm0,subtag_macptr))
+	__(ldr(save1,0(sp)))	/* bottom of reserved lisp frame  */
+	__(la save2,-lisp_frame.size(save1))	/* top of lisp frame */
+        __(zero_doublewords save2,0,lisp_frame.size)
+	__(str(save1,lisp_frame.backlink(save2)))
+	__(str(save2,c_frame.backlink(sp)))
+	__(str(fn,lisp_frame.savefn(save2)))
+	__(str(loc_pc,lisp_frame.savelr(save2)))
+	__(str(vsp,lisp_frame.savevsp(save2)))
+	__(bne 1f)
+	__(ldr(arg_z,macptr.address(arg_z)))
+1:
+	__(ldr(save3,tcr.cs_area(rcontext)))
+	__(str(save2,area.active(save3)))
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(mtctr arg_z)
+	__(str(rzero,tcr.ffi_exception(rcontext)))
+	__(mffs f0)
+	__(stfd f0,tcr.lisp_fpscr(rcontext))	/* remember lisp's fpscr  */
+	__(mtfsf 0xff,fp_zero)	/* zero foreign fpscr  */
+	__(li imm1,TCR_STATE_FOREIGN)
+	__(str(imm1,tcr.valence(rcontext)))
+	__(ldr(r2,tcr.native_thread_info(rcontext)))
+	__(ldr(r13,lisp_globals.saveR13(0)))
+	__(ldr(r3,eabi_c_frame.param0(sp)))
+	__(ldr(r4,eabi_c_frame.param1(sp)))
+	__(ldr(r5,eabi_c_frame.param2(sp)))
+	__(ldr(r6,eabi_c_frame.param3(sp)))
+	__(ldr(r7,eabi_c_frame.param4(sp)))
+	__(ldr(r8,eabi_c_frame.param5(sp)))
+	__(ldr(r9,eabi_c_frame.param6(sp)))
+	__(ldr(r10,eabi_c_frame.param7(sp)))
+	__(la save1,eabi_c_frame.minsiz-eabi_c_frame.param0(sp))
+	__(str(rzero,eabi_c_frame.savelr(save1)))
+	__(str(save2,eabi_c_frame.backlink(save1)))
+	__(mr sp,save1)
+	/* If we're calling a varargs C function, it'll want to */
+	/* know whether or not we've passed any args in FP regs. */
+	/* Better to say that we did (and force callee to save FP */
+	/* arg regs on entry) than to say that we didn't and get */
+	/* garbage results  */
+	__(crset 6)
+	__(bctrl)
+	/* C should have preserved save0 (= rcontext) for us.  */
+	__(ldr(sp,0(sp)))
+	__(mr imm2,save0)
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(li rzero,0)
+	__(mr loc_pc,rzero)
+	__(li arg_x,nil_value)
+	__(li arg_y,nil_value)
+	__(li arg_z,nil_value)
+	__(li temp0,nil_value)
+	__(li temp1,nil_value)
+	__(li temp2,nil_value)
+	__(li temp3,nil_value)
+	__(li fn,nil_value)
+	__(mr rcontext,imm2)
+	__(li imm2,TCR_STATE_LISP)
+	__(ldr(tsp,tcr.save_tsp(rcontext)))
+        __(li save0,0)
+        __(li save1,0)
+        __(li save2,0)
+        __(li save3,0)
+        __(li save4,0)
+        __(li save5,0)
+        __(li save6,0)
+        __(li save7,0)
+        __(li allocptr,-dnode_size)
+        __(li allocbase,-dnode_size)
+	__(str(imm2,tcr.valence(rcontext)))	
+	__(vpop_saveregs())
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(mffs f0)
+	__(stfd f0,8(sp))
+	__(lwz imm3,12(sp))	/* imm3 = FPSCR after call  */
+        __(clrrwi imm2,imm3,8)
+	__(discard_lisp_frame())
+	__(str(imm2,tcr.ffi_exception(rcontext)))
+	__(lfd f0,tcr.lisp_fpscr(rcontext))
+	__(mtfsf 0xff,f0)
+	__(check_pending_interrupt(`cr1'))
+        __(mtxer rzero)
+        __(mtctr rzero)
+	__(blr)
+        
+/*  */
+/* This gets called with R11 holding the unboxed callback index. */
+/* */
+        
+_spentry(eabi_callback)
+	/* First, we extend the C frame so that it has room for */
+        /* incoming arg regs.  */
+	__(ldr(r0,eabi_c_frame.backlink(sp)))
+	__(stru(r0,eabi_c_frame.param0-varargs_eabi_c_frame.incoming_stack_args(sp)))
+	__(mflr r0)
+	__(str(r0,varargs_eabi_c_frame.savelr(sp)))
+	__(str(r3,varargs_eabi_c_frame.gp_save+(0*4)(sp)))
+	__(str(r4,varargs_eabi_c_frame.gp_save+(1*4)(sp)))
+	__(str(r5,varargs_eabi_c_frame.gp_save+(2*4)(sp)))
+	__(str(r6,varargs_eabi_c_frame.gp_save+(3*4)(sp)))
+	__(str(r7,varargs_eabi_c_frame.gp_save+(4*4)(sp)))
+	__(str(r8,varargs_eabi_c_frame.gp_save+(5*4)(sp)))
+	__(str(r9,varargs_eabi_c_frame.gp_save+(6*4)(sp)))
+	__(str(r10,varargs_eabi_c_frame.gp_save+(7*4)(sp)))
+	/* Could check the appropriate CR bit and skip saving FP regs here  */
+	__(stfd f1,varargs_eabi_c_frame.fp_save+(0*8)(sp))
+	__(stfd f2,varargs_eabi_c_frame.fp_save+(1*8)(sp))
+	__(stfd f3,varargs_eabi_c_frame.fp_save+(2*8)(sp))
+	__(stfd f4,varargs_eabi_c_frame.fp_save+(3*8)(sp))
+	__(stfd f5,varargs_eabi_c_frame.fp_save+(4*8)(sp))
+	__(stfd f6,varargs_eabi_c_frame.fp_save+(5*8)(sp))
+	__(stfd f7,varargs_eabi_c_frame.fp_save+(6*8)(sp))
+	__(stfd f8,varargs_eabi_c_frame.fp_save+(7*8)(sp))
+	__(la r0,varargs_eabi_c_frame.incoming_stack_args(sp))
+	__(str(r0,varargs_eabi_c_frame.overflow_arg_area(sp)))
+	__(la r0,varargs_eabi_c_frame.regsave(sp))
+	__(str(r0,varargs_eabi_c_frame.reg_save_area(sp)))
+	__(li r0,0)
+	__(str(r0,varargs_eabi_c_frame.flags(sp)))
+
+	/* Save the non-volatile registers on the sp stack  */
+	/* This is a non-standard stack frame, but noone will ever see it,  */
+        /* so it doesn't matter. It will look like more of the stack frame pushed below.  */
+	__(stru(sp,-(c_reg_save.size)(sp)))
+        __(str(r13,c_reg_save.save_gprs+(0*node_size)(sp)))
+        __(str(r14,c_reg_save.save_gprs+(1*node_size)(sp)))
+        __(str(r15,c_reg_save.save_gprs+(2*node_size)(sp)))
+        __(str(r16,c_reg_save.save_gprs+(3*node_size)(sp)))
+        __(str(r17,c_reg_save.save_gprs+(4*node_size)(sp)))
+        __(str(r18,c_reg_save.save_gprs+(5*node_size)(sp)))
+        __(str(r19,c_reg_save.save_gprs+(6*node_size)(sp)))
+        __(str(r20,c_reg_save.save_gprs+(7*node_size)(sp)))
+        __(str(r21,c_reg_save.save_gprs+(8*node_size)(sp)))
+        __(str(r22,c_reg_save.save_gprs+(9*node_size)(sp)))
+        __(str(r23,c_reg_save.save_gprs+(10*node_size)(sp)))
+        __(str(r24,c_reg_save.save_gprs+(11*node_size)(sp)))
+        __(str(r25,c_reg_save.save_gprs+(12*node_size)(sp)))
+        __(str(r26,c_reg_save.save_gprs+(13*node_size)(sp)))
+        __(str(r27,c_reg_save.save_gprs+(14*node_size)(sp)))
+        __(str(r28,c_reg_save.save_gprs+(15*node_size)(sp)))
+        __(str(r29,c_reg_save.save_gprs+(16*node_size)(sp)))
+        __(str(r30,c_reg_save.save_gprs+(17*node_size)(sp)))
+        __(str(r31,c_reg_save.save_gprs+(18*node_size)(sp)))
+	__(mffs f0)
+	__(stfd f0,c_reg_save.save_fp_zero(sp))
+	__(ldr(r31,c_reg_save.save_fp_zero+4(sp)))	/* recover FPSCR image  */
+	__(str(r31,c_reg_save.save_fpscr(sp)))
+	__(lwi(r30,0x43300000))
+	__(lwi(r31,0x80000000))
+	__(str(r30,c_reg_save.save_fp_zero(sp)))
+	__(str(r31,c_reg_save.save_fp_zero+4(sp)))
+	__(stfd fp_s32conv,c_reg_save.save_fps32conv(sp))
+	__(lfd fp_s32conv,c_reg_save.save_fp_zero(sp))
+	__(stfd fp_zero,c_reg_save.save_fp_zero(sp))
+	__(lfs fp_zero,lisp_globals.short_float_zero(0))	/* ensure that fp_zero contains 0.0  */
+
+	
+/* Restore rest of Lisp context.  */
+/* Could spread out the memory references here to gain a little speed  */
+	__(li loc_pc,0)
+	__(li fn,0)                     /* subprim, not a lisp function  */
+	__(li temp3,0)
+	__(li temp2,0)
+	__(li temp1,0)
+	__(li temp0,0)
+	__(li arg_x,0)
+	__(box_fixnum(arg_y,r11))	/* callback-index  */
+	__(la arg_z,c_reg_save.size+varargs_eabi_c_frame.gp_save(sp))	/* parameters (tagged as a fixnum)  */
+
+	/* Recover lisp thread context. Have to call C code to do so.  */
+	__(ref_global(r12,get_tcr))
+	__(mtctr r12)
+        __(li r3,1)
+	__(stru(sp,-(stack_align(eabi_c_frame.minsiz))(sp)))
+	__(bctrl)
+	__(la sp,(stack_align(eabi_c_frame.minsiz))(sp))
+	__(la rcontext,TCR_BIAS(r3))
+	__(li allocptr,0)
+	__(li allocbase,0)
+	__(ldr(vsp,tcr.save_vsp(rcontext)))
+	__(ldr(tsp,tcr.save_tsp(rcontext)))		
+	__(li rzero,0)
+	__(mtxer rzero) /* lisp wants the overflow bit clear  */
+	__(li imm0,TCR_STATE_LISP)
+	__(li save0,0)
+	__(li save1,0)
+	__(li save2,0)
+	__(li save3,0)
+	__(li save4,0)
+	__(li save5,0)
+	__(li save6,0)
+	__(li save7,0)
+        __(mtctr rzero)
+	__(str(imm0,tcr.valence(rcontext)))
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	__(lfd f0,tcr.lisp_fpscr(rcontext))
+	__(mtfsf 0xff,f0)
+
+        __(restore_saveregs(vsp))        
+	/* load nargs and callback to the lisp  */
+	__(set_nargs(2))
+	__(ldr(imm2,tcr.cs_area(rcontext)))
+	__(ldr(imm4,area.active(imm2)))
+	__(stru(imm4,-lisp_frame.size(sp)))
+	__(str(imm3,lisp_frame.savelr(sp)))
+	__(str(vsp,lisp_frame.savevsp(sp)))	/* for stack overflow code  */
+	__(li fname,nrs.callbacks)	/* %pascal-functions%  */
+	__(call_fname)
+	__(ldr(imm2,lisp_frame.backlink(sp)))
+	__(ldr(imm3,tcr.cs_area(rcontext)))
+	__(str(imm2,area.active(imm3)))
+	__(discard_lisp_frame())
+	/* save_vsp will be restored from ff_call's stack frame, but  */
+	/* I included it here for consistency.  */
+	/* save_tsp is set below after we exit Lisp context.  */
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	/* Exit lisp context  */
+	/* This is not necessary yet, but will be once we can be interrupted  */
+	__(li imm1,TCR_STATE_FOREIGN)
+	__(str(imm1,tcr.valence(rcontext)))
+	/* Restore the non-volatile registers & fpscr  */
+	__(lfd fp_zero,c_reg_save.save_fp_zero(sp))
+	__(ldr(r31,c_reg_save.save_fpscr(sp)))
+	__(str(r31,c_reg_save.save_fp_zero+4(sp)))
+	__(lfd f0,c_reg_save.save_fp_zero(sp))
+	__(mtfsf 0xff,f0)
+	__(ldr(r13,c_reg_save.save_gprs+(0*node_size)(sp)))
+	__(ldr(r14,c_reg_save.save_gprs+(1*node_size)(sp)))
+	__(ldr(r15,c_reg_save.save_gprs+(2*node_size)(sp)))
+	__(ldr(r16,c_reg_save.save_gprs+(3*node_size)(sp)))
+	__(ldr(r17,c_reg_save.save_gprs+(4*node_size)(sp)))
+	__(ldr(r18,c_reg_save.save_gprs+(5*node_size)(sp)))
+	__(ldr(r19,c_reg_save.save_gprs+(6*node_size)(sp)))
+	__(ldr(r20,c_reg_save.save_gprs+(7*node_size)(sp)))
+	__(ldr(r21,c_reg_save.save_gprs+(8*node_size)(sp)))
+	__(ldr(r22,c_reg_save.save_gprs+(9*node_size)(sp)))
+	__(ldr(r23,c_reg_save.save_gprs+(10*node_size)(sp)))
+	__(ldr(r24,c_reg_save.save_gprs+(11*node_size)(sp)))
+	__(ldr(r25,c_reg_save.save_gprs+(12*node_size)(sp)))
+	__(ldr(r26,c_reg_save.save_gprs+(13*node_size)(sp)))
+	__(ldr(r27,c_reg_save.save_gprs+(14*node_size)(sp)))
+	__(ldr(r28,c_reg_save.save_gprs+(15*node_size)(sp)))
+	__(ldr(r29,c_reg_save.save_gprs+(16*node_size)(sp)))
+	__(ldr(r30,c_reg_save.save_gprs+(17*node_size)(sp)))
+	__(ldr(r31,c_reg_save.save_gprs+(18*node_size)(sp)))
+	__(lfd fp_s32conv,c_reg_save.save_fps32conv(sp))
+	__(ldr(sp,0(sp)))
+
+	__(ldr(r3,varargs_eabi_c_frame.gp_save+(0*4)(sp)))
+	__(ldr(r4,varargs_eabi_c_frame.gp_save+(1*4)(sp)))
+	__(lfd f1,varargs_eabi_c_frame.gp_save+(2*4)(sp))
+	__(ldr(r5,varargs_eabi_c_frame.savelr(sp)))
+	__(str(r5,varargs_eabi_c_frame.old_savelr(sp)))
+	__(mtlr r5)
+	__(ldr(r5,varargs_eabi_c_frame.backlink(sp)))
+	__(str(r5,varargs_eabi_c_frame.old_backlink(sp)))
+	__(la sp,varargs_eabi_c_frame.old_backlink(sp))
+	__(blr)
+	
+
+/*	Do a linux system call:	 the system call index is (boxed) */
+/*	in arg_z, and other arguments are in an eabi_c_frame on */
+/*	the C stack.  As is the case with an eabi_ff_call, there's */
+/*	a lisp frame reserved underneath the eabi_c_frame. */
+
+/*	This is a little simpler than eabi_ff_call, because we */
+/*	can assume that there are no synchronous callbacks to */
+/*	lisp (that might cause a GC.)  It's also simpler for the */
+/*	caller, since we return error status atomically. */
+
+/*	A system call can clobber any or all of r9-r12, so we need */
+/*	to save and restore allocptr, allocbase, and tsp. */
+	
+_spentry(eabi_syscall)
+/*	We're entered with an eabi_c_frame on the C stack.  There's a */
+/*	lisp_frame reserved underneath it; we'll link it in in a minute. */
+/*	Load the outgoing GPR arguments from eabi_c_frame.param`0-7', */
+/*	then shrink the eabi_c_frame. */
+
+	__(mflr loc_pc)
+        __(vpush_saveregs())
+	__(str(sp,eabi_c_frame.savelr(sp)))
+	__(li arg_x,nil_value)
+	__(mr temp0,rcontext)
+	__(ldr(temp1,c_frame.backlink(sp)))	/* bottom of reserved lisp frame  */
+	__(la temp2,-lisp_frame.size(temp1))	/* top of lisp frame  */
+        __(zero_doublewords temp2,0,lisp_frame.size)
+	__(str(temp1,lisp_frame.backlink(temp2)))
+	__(str(temp2,c_frame.backlink(sp)))
+	__(str(fn,lisp_frame.savefn(temp2)))
+	__(str(loc_pc,lisp_frame.savelr(temp2)))
+	__(str(vsp,lisp_frame.savevsp(temp2)))
+	__(ldr(temp3,tcr.cs_area(rcontext)))
+	__(str(temp2,area.active(temp3)))
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+	__(str(rzero,tcr.ffi_exception(rcontext)))
+	__(li imm1,TCR_STATE_FOREIGN)
+	__(str(imm1,tcr.valence(rcontext)))
+	__(ldr(r13,lisp_globals.saveR13(0)))
+	__(ldr(r3,eabi_c_frame.param0(sp)))
+	__(ldr(r4,eabi_c_frame.param1(sp)))
+	__(ldr(r5,eabi_c_frame.param2(sp)))
+	__(ldr(r6,eabi_c_frame.param3(sp)))
+	__(ldr(r7,eabi_c_frame.param4(sp)))
+	__(ldr(r8,eabi_c_frame.param5(sp)))
+	__(ldr(r9,eabi_c_frame.param6(sp)))
+	__(ldr(r10,eabi_c_frame.param7(sp)))
+	__(la temp1,eabi_c_frame.minsiz-eabi_c_frame.param0(sp))
+	__(str(rzero,eabi_c_frame.savelr(temp1)))
+	__(str(temp2,eabi_c_frame.backlink(temp1)))
+	__(mr sp,temp1)
+	__(unbox_fixnum(r0,arg_z))
+	__(sc)
+	__(nop)
+	/* C should have preserved temp0 (= rcontext) for us.  */
+	__(ldr(sp,0(sp)))
+	__(mr imm2,temp0)
+	__(ldr(vsp,lisp_frame.savevsp(sp)))
+	__(li rzero,0)
+	__(mr loc_pc,rzero)
+	__(mr fn,rzero)
+	__(li arg_x,nil_value)
+	__(li arg_y,nil_value)
+	__(li arg_z,nil_value)
+	__(li temp0,nil_value)
+	__(li temp1,nil_value)
+	__(li temp2,nil_value)
+	__(li temp3,nil_value)
+	__(li fn,nil_value)
+        
+	__(li imm3,TCR_STATE_LISP)
+	__(mr rcontext,imm2)
+        __(li save0,0)
+        __(li save1,0)
+        __(li save2,0)
+        __(li save3,0)
+        __(li save4,0)
+        __(li save5,0)
+        __(li save6,0)
+        __(li save7,0)        
+	__(str(imm3,tcr.valence(rcontext)))
+	__(vpop_saveregs)
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+	__(ldr(tsp,tcr.save_tsp(rcontext)))
+	__(ldr(loc_pc,lisp_frame.savelr(sp)))
+	__(mtlr loc_pc)
+	__(ldr(fn,lisp_frame.savefn(sp)))
+	__(discard_lisp_frame())
+	__(bns 1f)
+	__(neg r3,r3)
+1:      
+	__(check_pending_interrupt(`cr1'))                
+	__(mtxer rzero)
+	__(blr)
+        
+/* arg_z should be of type (UNSIGNED-BYTE 64);  */
+/* On PPC32, return high 32 bits in imm0, low 32 bits in imm1 */
+/* On PPC64, return unboxed value in imm0  */
+
+_spentry(getu64)
+        __ifdef(`PPC64')
+        __(extract_typecode(imm0,arg_z))
+        __(cmpdi cr0,imm0,tag_fixnum)
+        __(cmpdi cr2,arg_z,0)
+        __(cmpdi cr1,imm0,subtag_bignum)
+        __(bne cr0,1f)
+        __(unbox_fixnum(imm0,arg_z))
+        __(bgelr cr2)
+0:             
+	__(uuo_interr(error_object_not_u64,arg_z))
+        
+1:      __(bne cr1,0b)
+        __(getvheader(imm1,arg_z))
+        __(ld imm0,misc_data_offset(arg_z))
+        __(cmpdi cr2,imm1,two_digit_bignum_header)
+        __(rotldi imm0,imm0,32)
+        __(cmpdi cr1,imm1,three_digit_bignum_header)
+        __(cmpdi cr0,imm0,0)
+        __(beq cr2,2f)
+        __(lwz imm1,misc_data_offset+8(arg_z))
+        __(bne cr1,0b)
+        __(cmpwi imm1,0)
+        __(bne 0b)
+        __(blr)
+2:      __(blt 0b)
+        __(blr)        
+        __else
+	__(extract_typecode(imm0,arg_z))
+	__(cmpri(cr0,imm0,tag_fixnum))
+	__(cmpri(cr1,arg_z,0))
+	__(cmpri(cr2,imm0,subtag_bignum))
+	__(unbox_fixnum(imm1,arg_z))
+	__(bne cr0,8f)
+	__(bgelr cr1)
+9:
+	__(uuo_interr(error_object_not_u64,arg_z))
+8:
+	__(bne- cr2,9b)
+	__(getvheader(imm2,arg_z))
+	__(cmpri(cr2,imm2,two_digit_bignum_header))
+	__(vrefr(imm1,arg_z,0))
+	__(cmpri(cr1,imm1,0))
+	__(li imm0,0)
+	__(bge cr2,2f)
+	__(blt- cr1,9b)
+	__(blr)
+2:
+	__(cmpri(cr0,imm2,three_digit_bignum_header))
+	__(vrefr(imm0,arg_z,1))
+	__(cmpri(cr1,imm0,0))
+	__(bne cr2,3f)
+	__(blt- cr1,9b)
+	__(blr)
+3:
+	__(vrefr(imm2,arg_z,2))
+	__(cmpri(cr1,imm2,0))
+	__(bne- cr0,9b)
+	__(bne- cr1,9b)
+	__(blr)
+        __endif
+        
+/* arg_z should be of type (SIGNED-BYTE 64);  */
+/* PPC32:   return high 32 bits  in imm0, low 32 bits in imm1  */
+/* PPC64:   return unboxed value in imm0  */
+
+_spentry(gets64)
+        __ifdef(`PPC64')
+	 __(extract_typecode(imm1,arg_z))
+         __(unbox_fixnum(imm0,arg_z))
+	 __(cmpri(cr0,imm1,tag_fixnum))
+	 __(cmpri(cr2,imm1,subtag_bignum))
+         __(beqlr cr0)
+         __(bne cr2,9f)
+         __(ld imm1,misc_header_offset(arg_z))
+         __(ld imm0,misc_data_offset(arg_z))
+         __(cmpdi imm1,two_digit_bignum_header)
+         __(rotldi imm0,imm0,32)
+         __(beqlr)
+        __else
+	 __(extract_typecode(imm0,arg_z))
+	 __(cmpri(cr0,imm0,tag_fixnum))
+	 __(cmpri(cr2,imm0,subtag_bignum))
+	 __(unbox_fixnum(imm1,arg_z))
+	 __(srawi imm0,imm1,31)
+	 __(beqlr cr0)
+	 __(bne cr2,9f)
+	 __(getvheader(imm2,arg_z))
+	 __(cmpri(cr2,imm2,two_digit_bignum_header))
+	 __(vrefr(imm1,arg_z,0))
+	 __(srawi imm0,imm1,31)
+	 __(bltlr cr2)
+	 __(vrefr(imm0,arg_z,1))
+	 __(beqlr cr2)
+        __endif
+9:
+	__(uuo_interr(error_object_not_s64,arg_z))
+
+
+/*  Construct a lisp integer out of the 64-bit unsigned value in */
+/*        ppc32:    imm0 (high 32 bits) and imm1 (low 32 bits) */
+/*        ppc64:    imm0 (64 bits) .  */
+_spentry(makeu64)
+        __ifdef(`PPC64')
+	 __(clrrdi. imm1,imm0,63-nfixnumtagbits)
+	 __(cmpri(cr1,imm0,0))
+	 __(box_fixnum(arg_z,imm0))
+	 __(beqlr cr0) /* A fixnum  */
+         __(rotldi imm1,imm0,32)
+	 __(li imm2,two_digit_bignum_header)
+	 __(blt cr1,2f)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(blr)
+2:
+	 __(li imm2,three_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(3)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(blr)
+        __else        
+ 	 __(cmpri(cr1,imm0,0))
+	 __(rlwinm. imm2,imm1,0,0,fixnum_shift)
+	 __(li imm2,three_digit_bignum_header)
+	 __(box_fixnum(arg_z,imm1))
+	 __(blt cr1,3f)
+	 __(bne cr1,2f)
+	 __(beqlr cr0) /* A fixnum  */
+	 __(blt cr0,2f)
+	 __(li imm2,one_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(1)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(blr)
+2:
+	 __(li imm2,two_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(str(imm0,misc_data_offset+4(arg_z)))
+	 __(blr)
+3:
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(3)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(str(imm0,misc_data_offset+4(arg_z)))
+	 __(blr)
+        __endif
+
+
+
+/*  Construct a lisp integer out of the 64-bit signed value in */
+/*        ppc32:    imm0 (high 32 bits) and imm1 (low 32 bits). */
+/*        ppc64:    imm0  */
+_spentry(makes64)
+        __ifdef(`PPC64')
+	 __(addo imm1,imm0,imm0)
+         __(addo imm1,imm1,imm1)
+	 __(addo. arg_z,imm1,imm1)
+	 __(bnslr+)
+	 __(mtxer rzero)
+	 __(li imm1,two_digit_bignum_header)
+         __(rotldi imm0,imm0,32)
+	 __(Misc_Alloc_Fixed(arg_z,imm1,aligned_bignum_size(2)))
+	 __(str(imm0,misc_data_offset(arg_z)))
+         __(blr)
+        __else
+	 __(srawi imm2,imm1,31)
+	 __(cmpr(cr1,imm2,imm0))
+	 __(addo imm2,imm1,imm1)
+	 __(addo. arg_z,imm2,imm2)
+	 __(bne cr1,2f) /* High word is significant  */
+	 __(li imm2,one_digit_bignum_header)
+	 __(bnslr cr0) /* No overflow:	 fixnum  */
+	 __(mtxer rzero)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(1)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(blr)
+2:
+	 __(mtxer rzero)
+	 __(li imm2,two_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+	 __(str(imm0,misc_data_offset+4(arg_z)))
+	 __(blr)
+        __endif
+
+/* imm0:imm1 constitute an unsigned integer, almost certainly a bignum. */
+/* Make a lisp integer out of those 128 bits ..  */
+_spentry(makeu128)
+        __ifdef(`PPC64')
+         __(cmpdi imm0,0)
+         __(cmpdi cr1,imm1,0)
+         __(srdi imm3,imm0,32)
+         __(srawi imm4,imm0,31)
+         __(cmpdi cr3,imm3,0)
+         __(cmpdi cr4,imm4,0)
+         __(li imm2,five_digit_bignum_header)
+         __(blt cr1,0f)
+         __(beq 3f)
+0:              
+         __(bge 1f)
+         /* All 128 bits are significant, and the most significant */
+         /* bit is set.  Allocate a 5-digit bignum (with a zero */
+         /* sign digit  */
+         __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(5)))
+         __(rotldi imm0,imm0,32)
+         __(rotldi imm1,imm1,32)
+         __(std imm1,misc_data_offset(arg_z))
+         __(std imm0,misc_data_offset+8(arg_z))
+         __(blr)
+1:       /* If the high word of imm0 is a zero-extension of the low */
+         /* word, we only need 3 digits ; otherwise, we need 4.  */
+         __(li imm2,three_digit_bignum_header)
+         __(rotldi imm1,imm1,32)
+         __(bne cr3,2f) /* high word of imm0 is non-zero  */
+         __(bne cr4,2f) /* sign bit is on in low word of imm0  */
+         __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(3)))
+         __(std imm1,misc_data_offset(arg_z))
+         __(stw imm0,misc_data_offset+8(arg_z))
+         __(blr)
+2:       __(li imm2,four_digit_bignum_header)
+         __(rotldi imm0,imm0,32)
+         __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(4)))
+         __(std imm1,misc_data_offset(arg_z))
+         __(std imm0,misc_data_offset+8(arg_z))
+         __(blr)
+3:       __(mr imm0,imm1)
+         __(b _SPmakeu64)              
+        __else
+         __(twgei r0,r0)
+        __endif
+
+/* imm0:imm1 constitute a signed integer, almost certainly a bignum. */
+/* Make a lisp integer out of those 128 bits ..  */
+_spentry(makes128)
+        __ifdef(`PPC64')
+         /* Is imm0 just a sign-extension of imm1 ?  */
+         __(sradi imm2,imm1,63)
+         /* Is the high word of imm0 just a sign-extension of the low word ?  */
+         __(extsw imm3,imm0)
+         __(cmpd imm2,imm0)
+         __(cmpd cr1,imm3,imm0)
+         __(beq 2f)
+         __(rotldi imm0,imm0,32)
+         __(rotldi imm1,imm1,32)
+         __(beq cr1,1f)
+         __(li imm2,four_digit_bignum_header)
+         __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(4)))
+         __(std imm1,misc_data_offset(arg_z))
+         __(std imm0,misc_data_offset+8(arg_z))
+         __(blr)
+1:       __(li imm2,three_digit_bignum_header)
+         __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(3)))
+         __(std imm1,misc_data_offset(arg_z))
+         __(stw imm3,misc_data_offset+8(arg_z))
+         __(blr)
+2:       __(mr imm0,imm1)
+         __(b _SPmakes64)        
+        __else
+         __(twgei r0,r0)
+        __endif        
+                        
+/* on entry: arg_z = symbol.  On exit, arg_z = value (possibly */
+/* unbound_marker), arg_y = symbol, imm3 = symbol.binding-index  */
+_spentry(specref)
+        __(ldr(imm3,symbol.binding_index(arg_z)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(cmpr(imm3,imm0))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(mr arg_y,arg_z)
+        __(bge 1f)
+        __(ldrx(arg_z,imm2,imm3))
+        __(cmpri(arg_z,no_thread_local_binding_marker))
+        __(bnelr)
+1:     	__(ldr(arg_z,symbol.vcell(arg_y)))
+        __(blr)
+
+
+_spentry(specrefcheck)
+        __(ldr(imm3,symbol.binding_index(arg_z)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(cmpr(imm3,imm0))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(mr arg_y,arg_z)
+        __(bge 1f)
+        __(ldrx(arg_z,imm2,imm3))
+        __(cmpri(arg_z,no_thread_local_binding_marker))
+        __(bne 2f)
+1:     	__(ldr(arg_z,symbol.vcell(arg_y)))
+2:      __(treqi(arg_z,unbound_marker))
+        __(blr)
+	
+/* arg_y = special symbol, arg_z = new value.          */
+_spentry(specset)
+        __(ldr(imm3,symbol.binding_index(arg_y)))
+        __(ldr(imm0,tcr.tlb_limit(rcontext)))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(cmpr(imm3,imm0))
+        __(bge 1f)
+        __(ldrx(temp1,imm2,imm3))
+        __(cmpri(temp1,no_thread_local_binding_marker))
+        __(beq 1f)
+        __(strx(arg_z,imm2,imm3))
+        __(blr)
+1:     	__(mr arg_x,arg_y)
+        __(li arg_y,symbol.vcell-misc_data_offset)
+        __(b _SPgvset)
+
+/* Restore current thread's interrupt level to arg_z, */
+/* noting whether the tcr's interrupt_pending flag was set.  */
+_spentry(restoreintlevel)
+	__(cmpri(cr1,arg_z,0))
+	__(ldr(imm0,tcr.interrupt_pending(rcontext)))
+	__(cmpri(cr0,imm0,0))
+	__(bne cr1,1f)
+	__(beq cr0,1f)
+	__(str(rzero,tcr.interrupt_pending(rcontext)))
+	__(li nargs,fixnum_one)
+	__(trgti(nargs,0))
+	__(blr)
+1:
+        __(ldr(nargs,tcr.tlb_pointer(rcontext)))
+	__(str(arg_z,INTERRUPT_LEVEL_BINDING_INDEX(nargs)))
+	__(blr)
+
+
+/* Construct a lisp integer out of the 32-bit signed value in imm0 */
+
+        
+_spentry(makes32)
+        __ifdef(`PPC64')
+         __(box_fixnum(arg_z,imm0))
+        __else
+	 __(addo imm1,imm0,imm0)
+	 __(addo. arg_z,imm1,imm1)
+	 __(bnslr+)
+	 __(mtxer rzero)
+	 __(li imm1,one_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm1,aligned_bignum_size(1)))
+	 __(str(imm0,misc_data_offset(arg_z)))
+        __endif
+	 __(blr)
+
+
+/* Construct a lisp integer out of the 32-bit unsigned value in imm0 */
+
+        
+_spentry(makeu32)
+        __ifdef(`PPC64')
+         __(box_fixnum(arg_z,imm0))
+         __(blr)
+        __else
+	 __(clrrwi. imm1,imm0,31-nfixnumtagbits)
+	 __(cmpri(cr1,imm0,0))
+	 __(box_fixnum(arg_z,imm0))
+	 __(beqlr cr0) /* A fixnum  */
+	 __(blt cr1,2f)
+	 __(li imm2,one_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(1)))
+	 __(str(imm0,misc_data_offset(arg_z)))
+	 __(blr)
+2:
+	 __(li imm2,two_digit_bignum_header)
+	 __(Misc_Alloc_Fixed(arg_z,imm2,aligned_bignum_size(2)))
+	 __(str(imm0,misc_data_offset(arg_z)))
+	 __(blr)
+        __endif
+
+/*  */
+/* arg_z should be of type (SIGNED-BYTE 32); return unboxed result in imm0 */
+/*  */
+_spentry(gets32)
+        __ifdef(`PPC64')
+         __(sldi imm1,arg_z,32-fixnumshift)
+         __(extract_lisptag_(imm0,arg_z))
+         __(sradi imm1,imm1,32-fixnumshift)
+         __(box_fixnum(imm0,arg_z))
+         __(cmpd cr1,imm1,arg_z)
+         __(bne cr0,9f)
+         __(beqlr cr1)
+         __(b 9f)
+        __else
+	 __(extract_typecode(imm1,arg_z))
+	 __(cmpri(cr0,imm1,tag_fixnum))
+	 __(cmpri(cr2,imm1,subtag_bignum))
+	 __(unbox_fixnum(imm0,arg_z))
+	 __(beqlr+ cr0)
+	 __(bne cr2,9f)
+	 __(getvheader(imm1,arg_z))
+	 __(cmpri(cr1,imm1,one_digit_bignum_header))
+	 __(vrefr(imm0,arg_z,0))
+	 __(beqlr+ cr1)
+        __endif
+9:
+	__(uuo_interr(error_object_not_signed_byte_32,arg_z))
+
+/*  */
+/* arg_z should be of type (UNSIGNED-BYTE 32); return unboxed result in imm0 */
+/*  */
+
+_spentry(getu32)
+	__(extract_typecode(imm1,arg_z))
+	__(cmpri(cr0,imm1,tag_fixnum))
+	__(cmpri(cr1,arg_z,0))
+	__(cmpri(cr2,imm1,subtag_bignum))
+	__(unbox_fixnum(imm0,arg_z))
+	__(bne cr0,8f)
+	__(bgelr cr1)
+8:
+	__(bne- cr2,9f)
+	__(getvheader(imm2,arg_z))
+	__(cmpri(cr2,imm2,two_digit_bignum_header))
+	__(vrefr(imm0,arg_z,0))
+	__(cmpri(cr0,imm0,0))
+	__(bgt cr2,9f)
+	__(beq cr2,2f)
+	__(blt cr0,9f)
+	__(blr)
+2:
+	__(vrefr(imm1,arg_z,1))
+	__(cmpri(cr0,imm1,0))
+	__(beqlr+ cr0)
+
+9:
+	__(uuo_interr(error_object_not_unsigned_byte_32,arg_z))
+
+/* */
+/* arg_z has overflowed (by one bit) as the result of an addition or subtraction. */
+/* Make a bignum out of it. */
+
+_spentry(fix_overflow)
+	__(mtxer rzero)
+	__(unbox_fixnum(imm1,arg_z))
+        __ifdef(`PPC64')
+	 __(li imm0,two_digit_bignum_header)
+         __(rotldi imm1,imm1,32)
+	 __(xoris imm1,imm1,0xe000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(2)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __else
+	 __(li imm0,one_digit_bignum_header)
+	 __(xoris imm1,imm1,0xc000)
+	 __(Misc_Alloc_Fixed(arg_z,imm0,aligned_bignum_size(1)))
+	 __(str(imm1,misc_data_offset(arg_z)))
+        __endif
+	__(blr)
+		
+
+
+/* */
+/* As per mvpass above, but in this case fname is known to be a */
+/* symbol. */
+
+_spentry(mvpasssym)
+	__(cmpri(cr0,nargs,node_size*nargregs))
+	__(mflr loc_pc)
+	__(mr imm0,vsp)
+	__(ble+ cr0,1f)
+	 __(subi imm0,imm0,node_size*nargregs)
+	 __(add imm0,imm0,nargs)
+1:            
+	__(build_lisp_frame(fn,loc_pc,imm0))
+	__(ref_global(loc_pc,ret1val_addr))
+	__(li fn,0)
+	__(mtlr loc_pc)
+	__(jump_fname())
+
+
+
+_spentry(unbind)
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))   
+        __(ldr(imm3,binding.sym(imm1)))
+        __(ldr(temp1,binding.val(imm1)))
+        __(ldr(imm1,binding.link(imm1)))
+        __(strx(temp1,imm2,imm3))
+        __(str(imm1,tcr.db_link(rcontext)))
+        __(blr)
+
+_spentry(unbind_n)
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))   
+1:      __(subi imm0,imm0,1)
+        __(ldr(imm3,binding.sym(imm1)))
+        __(ldr(temp1,binding.val(imm1)))
+        __(cmpri(imm0,0))
+        __(ldr(imm1,binding.link(imm1)))
+        __(strx(temp1,imm2,imm3))
+        __(bne 1b)
+        __(str(imm1,tcr.db_link(rcontext)))
+        __(blr)
+
+/* */
+/* Clobbers imm1,imm2,imm5,arg_x, arg_y */
+
+_spentry(unbind_to)
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+1:      __(ldr(imm5,binding.sym(imm1)))
+        __(ldr(arg_y,binding.val(imm1)))
+        __(ldr(imm1,binding.link(imm1)))
+        __(cmpr(imm0,imm1))
+        __(strx(arg_y,imm2,imm5))
+        __(bne 1b)
+        __(str(imm1,tcr.db_link(rcontext)))
+        __(blr)
+	
+
+
+/* */
+/* Restore the special bindings from the top of the tstack,  */
+/* leaving the tstack frame allocated.  */
+/* Note that there might be 0 saved bindings, in which case  */
+/* do nothing.  */
+/* Note also that this is -only- called from an unwind-protect  */
+/* cleanup form, and that .SPnthrowXXX is keeping one or more  */
+/* values in a frame on top of the tstack.  */
+/*  */
+                        
+_spentry(progvrestore)
+	__(ldr(imm0,tsp_frame.backlink(tsp)))	/* ignore .SPnthrowXXX values frame  */
+	__(ldr(imm0,tsp_frame.data_offset(imm0)))
+	__(cmpri(cr0,imm0,0))
+	__(unbox_fixnum(imm0,imm0))
+	__(bne+ cr0,_SPunbind_n)
+	__(blr)
+
+/* Bind CCL::*INTERRUPT-LEVEL* to 0.  If its value had been negative, check  */
+/* for pending interrupts after doing so.  "nargs" can be freely used for an */
+/* interrupt trap in this context.  */
+_spentry(bind_interrupt_level_0)
+        __(ldr(imm4,tcr.tlb_pointer(rcontext)))
+        __(ldr(temp0,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(cmpri(temp0,0))
+        __(li imm3,INTERRUPT_LEVEL_BINDING_INDEX)
+        __(vpush(temp0))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(str(rzero,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(beqlr)
+        __(mr nargs,temp0)
+        __(bgt 1f)
+        __(ldr(nargs,tcr.interrupt_pending(rcontext)))
+1:      __(trgti(nargs,0))        
+        __(blr)
+
+/* Bind CCL::*INTERRUPT-LEVEL* to the fixnum -1.  (This has the effect */
+/* of disabling interrupts.)  */
+_spentry(bind_interrupt_level_m1)
+        __(li imm2,-fixnumone)
+        __(li imm3,INTERRUPT_LEVEL_BINDING_INDEX)
+        __(ldr(imm4,tcr.tlb_pointer(rcontext)))
+        __(ldr(temp0,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(vpush(temp0))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(str(imm2,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(blr)
+
+        
+/* Bind CCL::*INTERRUPT-LEVEL* to the value in arg_z.  If that value's 0, */
+/* do what _SPbind_interrupt_level_0 does  */
+_spentry(bind_interrupt_level)
+        __(cmpri(arg_z,0))
+        __(li imm3,INTERRUPT_LEVEL_BINDING_INDEX)
+        __(ldr(imm4,tcr.tlb_pointer(rcontext)))
+        __(ldr(temp0,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(beq _SPbind_interrupt_level_0)
+        __(vpush(temp0))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(str(arg_z,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(str(vsp,tcr.db_link(rcontext)))
+        __(blr)
+
+/* Unbind CCL::*INTERRUPT-LEVEL*.  If the value changes from negative to */
+/* non-negative, check for pending interrupts.  This is often called in */
+/* a context where nargs is significant, so save and restore nargs around */
+/* any interrupt polling  */
+        
+_spentry(unbind_interrupt_level)
+        __(ldr(imm0,tcr.flags(rcontext)))
+        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
+        __(andi. imm0,imm0,1<<TCR_FLAG_BIT_PENDING_SUSPEND)
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(ldr(temp1,INTERRUPT_LEVEL_BINDING_INDEX(imm2)))
+        __(bne 5f)
+0:      __(cmpri(cr1,temp1,0))
+        __(ldr(temp1,binding.val(imm1)))
+        __(ldr(imm1,binding.link(imm1)))
+        __(cmpri(cr0,temp1,0))
+        __(str(temp1,INTERRUPT_LEVEL_BINDING_INDEX(imm2)))
+        __(str(imm1,tcr.db_link(rcontext)))
+        __(bgelr cr1)
+        __(bltlr cr0)
+        __(mr imm2,nargs)
+        __(check_pending_interrupt(`cr1'))
+        __(mr nargs,imm2)
+        __(blr)
+5:       /* Missed a suspend request; force suspend now if we're restoring
+          interrupt level to -1 or greater */
+        __(cmpri(temp1,-2<<fixnumshift))
+        __(bne 0b)
+        __(ldr(imm0,binding.val(imm1)))
+        __(cmpr(imm0,temp1))
+        __(beq 0b)
+        __(li imm0,1<<fixnumshift)
+        __(str(imm0,INTERRUPT_LEVEL_BINDING_INDEX(imm2)))
+        __(suspend_now())
+        __(b 0b)
+
+
+/* arg_x = array, arg_y = i, arg_z = j. Typecheck everything.
+   We don't know whether the array is alleged to be simple or
+   not, and don't know anythng about the element type.  */
+_spentry(aref2)
+        __(extract_typecode(imm2,arg_x))
+        __(trap_unless_lisptag_equal(arg_y,tag_fixnum,imm0))
+        __(cmpri(cr2,imm2,subtag_arrayH))
+        __(trap_unless_lisptag_equal(arg_z,tag_fixnum,imm0))
+        __(bne cr2,1f)
+        __(ldr(imm1,arrayH.rank(arg_x)))
+        __(cmpri(imm1,2<<fixnumshift))
+        __(bne 1f)
+        /* It's a 2-dimensional array.  Check bounds */
+        __(ldr(imm0,arrayH.dim0(arg_x)))
+        __(trlge(arg_y,imm0))
+        __(ldr(imm0,arrayH.dim0+node_size(arg_x)))
+        __(trlge(arg_z,imm0))
+        __(unbox_fixnum(imm0,imm0))
+        __(mullr(arg_y,arg_y,imm0))
+        __(add arg_z,arg_z,arg_y)
+        /* arg_z is now row-major-index; get data vector and
+           add in possible offset */
+        __(mr arg_y,arg_x)
+0:      __(ldr(imm0,arrayH.displacement(arg_y)))
+        __(ldr(arg_y,arrayH.data_vector(arg_y)))
+        __(extract_subtag(imm1,arg_y))
+        __(cmpri(imm1,subtag_vectorH))
+        __(add arg_z,arg_z,imm0)
+        __(bgt local_label(misc_ref_common))
+        __(b 0b)
+1:              
+        __(uuo_interr(error_object_not_array_2d,arg_x))
+
+/* temp0 = array, arg_x = i, arg_y = j, arg_z = k */
+_spentry(aref3)
+        __(extract_typecode(imm2,temp0))
+        __(trap_unless_lisptag_equal(arg_x,tag_fixnum,imm0))
+        __(cmpri(cr2,imm2,subtag_arrayH))
+        __(trap_unless_lisptag_equal(arg_y,tag_fixnum,imm0))
+        __(bne cr2,1f)
+        __(ldr(imm1,arrayH.rank(temp0)))
+        __(trap_unless_lisptag_equal(arg_z,tag_fixnum,imm0))
+        __(cmpri(imm1,3<<fixnumshift))
+        __(bne 1f)
+        /* It's a 3-dimensional array.  Check bounds */
+        __(ldr(imm2,arrayH.dim0+(node_size*2)(temp0)))
+        __(ldr(imm1,arrayH.dim0+node_size(temp0)))
+        __(ldr(imm0,arrayH.dim0(temp0)))
+        __(trlge(arg_z,imm2))
+        __(unbox_fixnum(imm2,imm2))
+        __(trlge(arg_y,imm1))
+        __(unbox_fixnum(imm1,imm1))
+        __(trlge(arg_x,imm0))
+        __(mullr(arg_y,arg_y,imm2))
+        __(mullr(imm1,imm2,imm1))
+        __(mullr(arg_x,imm1,arg_x))
+        __(add arg_z,arg_z,arg_y)
+        __(add arg_z,arg_z,arg_x)
+        __(mr arg_y,temp0)
+0:      __(ldr(arg_x,arrayH.displacement(arg_y)))
+        __(ldr(arg_y,arrayH.data_vector(arg_y)))
+        __(extract_subtag(imm1,arg_y))
+        __(cmpri(imm1,subtag_vectorH))
+        __(add arg_z,arg_x,arg_z)
+        __(bgt local_label(misc_ref_common))
+        __(b 0b)
+1:              
+        __(uuo_interr(error_object_not_array_3d,temp0))
+
+        
+        
+
+/* As for aref2 above, but temp = array, arg_x = i, arg_y = j, arg_z = newval */
+_spentry(aset2)
+        __(extract_typecode(imm2,temp0))
+        __(trap_unless_lisptag_equal(arg_x,tag_fixnum,imm0))
+        __(cmpri(cr2,imm2,subtag_arrayH))
+        __(trap_unless_lisptag_equal(arg_y,tag_fixnum,imm0))
+        __(bne cr2,1f)
+        __(ldr(imm1,arrayH.rank(temp0)))
+        __(cmpri(imm1,2<<fixnumshift))
+        __(bne 1f)
+        /* It's a 2-dimensional array.  Check bounds */
+        __(ldr(imm0,arrayH.dim0(temp0)))
+        __(trlge(arg_x,imm0))
+        __(ldr(imm0,arrayH.dim0+node_size(temp0)))
+        __(trlge(arg_y,imm0))
+        __(unbox_fixnum(imm0,imm0))
+        __(mullr(arg_x,arg_x,imm0))
+        __(add arg_y,arg_y,arg_x)
+        /* arg_y is now row-major-index; get data vector and
+           add in possible offset */
+        __(mr arg_x,temp0)
+0:      __(ldr(imm0,arrayH.displacement(arg_x)))
+        __(ldr(arg_x,arrayH.data_vector(arg_x)))
+        __(extract_subtag(imm1,arg_x))
+        __(cmpri(imm1,subtag_vectorH))
+        __(add arg_y,arg_y,imm0)
+        __(bgt local_label(misc_set_common))
+        __(b 0b)
+1:              
+        __(uuo_interr(error_object_not_array_2d,temp0))        
+                
+/* temp1 = array, temp0 = i, arg_x = j, arg_y = k, arg_z = new */        
+_spentry(aset3)
+        __(extract_typecode(imm2,temp1))
+        __(trap_unless_lisptag_equal(temp0,tag_fixnum,imm0))
+        __(cmpri(cr2,imm2,subtag_arrayH))
+        __(trap_unless_lisptag_equal(arg_x,tag_fixnum,imm0))
+        __(bne cr2,1f)
+        __(ldr(imm1,arrayH.rank(temp1)))
+        __(trap_unless_lisptag_equal(arg_y,tag_fixnum,imm0))
+        __(cmpri(imm1,3<<fixnumshift))
+        __(bne 1f)
+        /* It's a 3-dimensional array.  Check bounds */
+        __(ldr(imm2,arrayH.dim0+(node_size*2)(temp1)))
+        __(ldr(imm1,arrayH.dim0+node_size(temp1)))
+        __(ldr(imm0,arrayH.dim0(temp1)))
+        __(trlge(arg_y,imm2))
+        __(unbox_fixnum(imm2,imm2))
+        __(trlge(arg_x,imm1))
+        __(unbox_fixnum(imm1,imm1))
+        __(trlge(temp0,imm0))
+        __(mullr(arg_x,arg_x,imm2))
+        __(mullr(imm1,imm2,imm1))
+        __(mullr(temp0,imm1,temp0))
+        __(add arg_y,arg_y,arg_x)
+        __(add arg_y,arg_y,temp0)
+        __(mr arg_x,temp1)
+0:      __(ldr(temp0,arrayH.displacement(arg_x)))
+        __(ldr(arg_x,arrayH.data_vector(arg_x)))
+        __(extract_subtag(imm1,arg_x))
+        __(cmpri(imm1,subtag_vectorH))
+        __(add arg_y,arg_y,temp0)
+        __(bgt local_label(misc_set_common))
+        __(b 0b)
+1:              
+        __(uuo_interr(error_object_not_array_3d,temp1))
+
+
+        
+
+_spentry(nmkunwind)
+        __(li imm2,-fixnumone)
+        __(li imm3,INTERRUPT_LEVEL_BINDING_INDEX)
+        __(ldr(imm4,tcr.tlb_pointer(rcontext)))
+        __(ldr(arg_y,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(ldr(imm1,tcr.db_link(rcontext)))
+        __(vpush(arg_y))
+        __(vpush(imm3))
+        __(vpush(imm1))
+        __(str(imm2,INTERRUPT_LEVEL_BINDING_INDEX(imm4)))
+        __(str(vsp,tcr.db_link(rcontext)))
+	__(lwi(arg_z,unbound_marker))
+	__(li imm2,fixnum_one)
+	__(mkcatch())
+        __(mr arg_z,arg_y)
+        __(b _SPbind_interrupt_level)
+
+        .if 1
+        __ifdef(`DARWIN')
+         __ifdef(`PPC64')
+L_lisp_objc2_personality:       
+        __(ref_global(r12,objc_2_personality))
+        __(mtctr r12)
+        __(bctr)
+        .data
+        .globl _lisp_objc2_personality
+_lisp_objc2_personality: 
+        .quad L_lisp_objc2_personality
+	
+	.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+EH_frame1:
+	.set L$set$12,LECIE1-LSCIE1
+	.long L$set$12	/* Length of Common Information Entry */
+LSCIE1:
+	.long	0x0	/* CIE Identifier Tag */
+	.byte	0x1	/* CIE Version */
+	.ascii "zPLR\0"	/* CIE Augmentation */
+	.byte	0x1	/* uleb128 0x1; CIE Code Alignment Factor */
+	.byte	0x78	/* sleb128 -8; CIE Data Alignment Factor */
+	.byte	0x41	/* CIE RA Column */
+	.byte	0x7
+	.byte	0x9b
+	.long   _lisp_objc2_personality-.
+	.byte	0x10	/* LSDA Encoding (pcrel) */
+	.byte	0x10	/* FDE Encoding (pcrel) */
+	.byte	0xc
+	.byte	0x1
+	.byte	0x0
+	.align 3
+LECIE1:
+        .globl _SPffcall.eh
+_SPffcall.eh:
+        .set assembler_nonsense,LEFDEffcall-LSFDEffcall
+        .long assembler_nonsense
+LSFDEffcall:      
+        .long LSFDEffcall-EH_frame1 /* FDE CIE offset */
+        .quad Lffcall-. /* FDE Initial Location */
+        .quad Lffcall_end-Lffcall /* FDE address range */
+        .byte 8 /* uleb128 0x8; Augmentation size */
+        .quad LLSDA1-.           /* Language Specific Data Area */
+	.byte DW_CFA_def_cfa_offset 
+	.byte 0xc0,0x1 /* uleb128 0xc0.  A lie:  the frame is variable-length */
+	.byte DW_CFA_offset_extended_sf
+	.byte	0x41	
+	.byte	0x7e	/* sleb128 -2 */
+	.byte DW_CFA_advance_loc4
+	.long Lffcall_setup-Lffcall
+	.byte DW_CFA_advance_loc4
+	.long Lffcall_setup_end-Lffcall_setup
+	.byte DW_CFA_advance_loc4
+	.long Lffcall_call_end-Lffcall_call
+	.align 3
+LEFDEffcall:
+	
+        .globl _SPffcall_return_registers.eh
+_SPffcall_return_registers.eh:
+        .set Lfmh,LEFDEffcall_return_registers-LSFDEffcall_return_registers
+        .long Lfmh
+LSFDEffcall_return_registers:      
+        .long LSFDEffcall_return_registers-EH_frame1 /* FDE CIE offset */
+        .quad Lffcall_return_registers-. /* FDE Initial Location */
+        .quad Lffcall_return_registers_end-Lffcall_return_registers /* FDE address range */
+        .byte 8 /* uleb128 0x8; Augmentation size */
+        .quad LLSDA2-.           /* Language Specific Data Area */
+	.byte DW_CFA_def_cfa_offset 
+	.byte 0xc0,0x1 /* uleb128 0xc0.  A lie:  the frame is variable-length */
+	.byte DW_CFA_offset_extended_sf
+	.byte 0x41	
+	.byte 0x7e	/* sleb128 -2 */
+	.byte DW_CFA_advance_loc4
+	.long Lffcall_return_registers_setup-Lffcall_return_registers
+	.byte DW_CFA_advance_loc4
+	.long Lffcall_return_registers_setup_end-Lffcall_return_registers_setup
+	.byte DW_CFA_advance_loc4
+	.long Lffcall_return_registers_call_end-Lffcall_return_registers_call
+	.align 3
+LEFDEffcall_return_registers:
+        .text
+         __endif
+        __endif
+        .endif
+
+                                
+/*  EOF, basically  */
+        .globl _SPsp_end
+        b _SPsp_end
+	_endfile
Index: /branches/arm/lisp-kernel/ppc-spjump.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-spjump.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-spjump.s	(revision 13357)
@@ -0,0 +1,191 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL.   */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence.   */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+        include(lisp.s)
+	_beginfile
+	
+define(`_spjump',`
+        .align 2
+        .globl _SP$1
+_exportfn(j_SP$1)
+          __(b _SP$1)
+_endfn
+')
+         .org 0x5000-0x2000
+        /*	.align 12 */
+         .globl C(spjump_start)
+C(spjump_start):
+        _spjump(jmpsym)
+        _spjump(jmpnfn)
+        _spjump(funcall)
+        _spjump(mkcatch1v)
+        _spjump(mkunwind)
+        _spjump(mkcatchmv)
+        _spjump(throw)
+        _spjump(nthrowvalues)
+        _spjump(nthrow1value)
+        _spjump(bind)
+        _spjump(bind_self)
+        _spjump(bind_nil)
+        _spjump(bind_self_boundp_check)
+        _spjump(rplaca)
+        _spjump(rplacd)
+        _spjump(conslist)
+        _spjump(conslist_star)
+        _spjump(stkconslist)
+        _spjump(stkconslist_star)
+        _spjump(mkstackv)
+        _spjump(subtag_misc_ref)
+        _spjump(setqsym)
+        _spjump(progvsave)
+        _spjump(stack_misc_alloc)
+        _spjump(gvector)
+        _spjump(nvalret)
+        _spjump(mvpass)
+        _spjump(fitvals)
+        _spjump(nthvalue)
+        _spjump(values)
+        _spjump(default_optional_args)
+        _spjump(opt_supplied_p)
+        _spjump(heap_rest_arg)
+        _spjump(req_heap_rest_arg)
+        _spjump(heap_cons_rest_arg)
+        _spjump(simple_keywords)
+        _spjump(keyword_args)
+        _spjump(keyword_bind)
+        _spjump(poweropen_ffcall)
+        _spjump(aref2)
+        _spjump(ksignalerr)
+        _spjump(stack_rest_arg)
+        _spjump(req_stack_rest_arg)
+        _spjump(stack_cons_rest_arg)
+        _spjump(poweropen_callbackX)        
+        _spjump(call_closure)        
+        _spjump(getxlong)
+        _spjump(spreadargz)
+        _spjump(tfuncallgen)
+        _spjump(tfuncallslide)
+        _spjump(tfuncallvsp)
+        _spjump(tcallsymgen)
+        _spjump(tcallsymslide)
+        _spjump(tcallsymvsp)
+        _spjump(tcallnfngen)
+        _spjump(tcallnfnslide)
+        _spjump(tcallnfnvsp)
+        _spjump(misc_ref)
+        _spjump(misc_set)
+        _spjump(stkconsyz)
+        _spjump(stkvcell0)
+        _spjump(stkvcellvsp)      
+        _spjump(makestackblock)
+        _spjump(makestackblock0)
+        _spjump(makestacklist)
+        _spjump(stkgvector)
+        _spjump(misc_alloc)
+        _spjump(poweropen_ffcallX)
+        _spjump(gvset)
+        _spjump(macro_bind)
+        _spjump(destructuring_bind)
+        _spjump(destructuring_bind_inner)
+        _spjump(recover_values)
+        _spjump(vpopargregs)
+        _spjump(integer_sign)
+        _spjump(subtag_misc_set)
+        _spjump(spread_lexprz)
+        _spjump(store_node_conditional)
+        _spjump(reset)
+        _spjump(mvslide)
+        _spjump(save_values)
+        _spjump(add_values)
+        _spjump(poweropen_callback)
+        _spjump(misc_alloc_init)
+        _spjump(stack_misc_alloc_init)
+        _spjump(set_hash_key)
+        _spjump(aset2)
+        _spjump(callbuiltin)
+        _spjump(callbuiltin0)
+        _spjump(callbuiltin1)
+        _spjump(callbuiltin2)
+        _spjump(callbuiltin3)
+        _spjump(popj)
+        _spjump(restorefullcontext)
+        _spjump(savecontextvsp)
+        _spjump(savecontext0)
+        _spjump(restorecontext)
+        _spjump(lexpr_entry)
+        _spjump(poweropen_syscall)
+        _spjump(builtin_plus)
+        _spjump(builtin_minus)
+        _spjump(builtin_times)
+        _spjump(builtin_div)
+        _spjump(builtin_eq)
+        _spjump(builtin_ne)
+        _spjump(builtin_gt)
+        _spjump(builtin_ge)
+        _spjump(builtin_lt)
+        _spjump(builtin_le)
+        _spjump(builtin_eql)
+        _spjump(builtin_length)
+        _spjump(builtin_seqtype)
+        _spjump(builtin_assq)
+        _spjump(builtin_memq)
+        _spjump(builtin_logbitp)
+        _spjump(builtin_logior)
+        _spjump(builtin_logand)
+        _spjump(builtin_ash)
+        _spjump(builtin_negate)
+        _spjump(builtin_logxor)
+        _spjump(builtin_aref1)
+        _spjump(builtin_aset1)
+        _spjump(breakpoint)
+        _spjump(eabi_ff_call)
+        _spjump(eabi_callback)
+        _spjump(eabi_syscall)
+        _spjump(getu64)
+        _spjump(gets64)
+        _spjump(makeu64)
+        _spjump(makes64)
+        _spjump(specref)
+        _spjump(specset)
+        _spjump(specrefcheck)
+        _spjump(restoreintlevel)
+        _spjump(makes32)
+        _spjump(makeu32)
+        _spjump(gets32)
+        _spjump(getu32)
+        _spjump(fix_overflow)
+        _spjump(mvpasssym)
+        _spjump(aref3)
+        _spjump(aset3)
+        _spjump(poweropen_ffcall_return_registers)
+        _spjump(nmkunwind)
+        _spjump(set_hash_key_conditional)
+        _spjump(unbind_interrupt_level)
+        _spjump(unbind)
+        _spjump(unbind_n)
+        _spjump(unbind_to)
+        _spjump(bind_interrupt_level_m1)
+        _spjump(bind_interrupt_level)
+        _spjump(bind_interrupt_level_0)
+        _spjump(progvrestore)
+          .globl C(spjump_end)
+C(spjump_end):
+	__ifdef(`DARWIN')
+	 __ifdef(`PPC64')
+           .org 0x5000-0x1000
+	 __endif
+	__endif
+        _endfile
+        
Index: /branches/arm/lisp-kernel/ppc-subprims.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-subprims.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-subprims.s	(revision 13357)
@@ -0,0 +1,241 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL.  */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence.   */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+
+	include(lisp.s)
+	_beginfile
+
+	.globl _SPmkcatch1v
+	.globl _SPnthrow1value
+
+
+/* This is called from a c-style context and calls a lisp function. */
+/* This does the moral equivalent of */
+/*   (loop  */
+/*	(let* ((fn (%function_on_top_of_lisp_stack))) */
+/*	  (if fn */
+/*           (catch %toplevel-catch% */
+/*	       (funcall fn)) */
+/*            (return nil)))) */
+
+_exportfn(toplevel_loop)
+	__(mflr imm0)
+        __ifdef(`POWEROPENABI')
+	 __(str(imm0,c_frame.savelr(sp)))
+        __else
+	 __(str(imm0,eabi_c_frame.savelr(sp)))
+        __endif
+	__(b local_label(test))
+local_label(loop):
+	__(ref_nrs_value(arg_z,toplcatch))
+	__(bl _SPmkcatch1v)
+	__(b local_label(test))			/* cleanup address, not really a branch */
+
+	__(set_nargs(0))
+	__(bl _SPfuncall)
+	__(li arg_z,nil_value)
+	__(li imm0,fixnum_one)
+	__(bl _SPnthrow1value)
+local_label(test):
+	__(ldr(temp0,0(vsp)))
+	__(cmpri(cr0,temp0,nil_value))
+	__(bne cr0,local_label(loop))
+local_label(back_to_c):
+        __ifdef(`POWEROPENABI')
+	 __(ldr(imm0,c_frame.savelr(sp)))
+        __else
+	 __(ldr(imm0,eabi_c_frame.savelr(sp)))
+        __endif
+	__(mtlr imm0)
+	__(blr)
+	_endfn
+
+
+/* This sucker gets called with R3 pointing to the current TCR. */
+/* r4 is 0 if we want to start the whole thing rolling, */
+/* non-zero if we want to reset the current process */
+/* by throwing to toplevel */
+
+	.globl _SPreset
+_exportfn(C(start_lisp))
+	__(mflr r0)
+        __ifdef(`POWEROPENABI')
+	 __(str(r0,c_frame.savelr(sp)))
+         __ifdef(`rTOC')
+          __(str(rTOC,c_frame.savetoc(sp)))
+         __endif
+	 __(stru(sp,-(stack_align(c_frame.minsiz+(32*node_size)))(sp)))
+         __(str(r13,c_frame.minsiz+(0*node_size)(sp)))
+         __(str(r14,c_frame.minsiz+(1*node_size)(sp)))
+         __(str(r15,c_frame.minsiz+(2*node_size)(sp)))
+         __(str(r16,c_frame.minsiz+(3*node_size)(sp)))
+         __(str(r17,c_frame.minsiz+(4*node_size)(sp)))
+         __(str(r18,c_frame.minsiz+(5*node_size)(sp)))
+         __(str(r19,c_frame.minsiz+(6*node_size)(sp)))
+         __(str(r20,c_frame.minsiz+(7*node_size)(sp)))
+         __(str(r21,c_frame.minsiz+(8*node_size)(sp)))
+         __(str(r22,c_frame.minsiz+(9*node_size)(sp)))
+         __(str(r23,c_frame.minsiz+(10*node_size)(sp)))
+         __(str(r24,c_frame.minsiz+(11*node_size)(sp)))
+         __(str(r25,c_frame.minsiz+(12*node_size)(sp)))
+         __(str(r26,c_frame.minsiz+(13*node_size)(sp)))
+         __(str(r27,c_frame.minsiz+(14*node_size)(sp)))
+         __(str(r28,c_frame.minsiz+(15*node_size)(sp)))
+         __(str(r29,c_frame.minsiz+(16*node_size)(sp)))
+         __(str(r30,c_frame.minsiz+(17*node_size)(sp)))
+         __(str(r31,c_frame.minsiz+(18*node_size)(sp)))
+	 __(stfd fp_s32conv,c_frame.minsiz+(22*node_size)(sp))
+        __else
+	 __(str(r0,eabi_c_frame.savelr(sp)))
+	 __(stru(sp,-(eabi_c_frame.minsiz+(32*node_size))(sp)))
+         __(str(r13,eabi_c_frame.minsiz+(0*node_size)(sp)))
+         __(str(r14,eabi_c_frame.minsiz+(1*node_size)(sp)))
+         __(str(r15,eabi_c_frame.minsiz+(2*node_size)(sp)))
+         __(str(r16,eabi_c_frame.minsiz+(3*node_size)(sp)))
+         __(str(r17,eabi_c_frame.minsiz+(4*node_size)(sp)))
+         __(str(r18,eabi_c_frame.minsiz+(5*node_size)(sp)))
+         __(str(r19,eabi_c_frame.minsiz+(6*node_size)(sp)))
+         __(str(r20,eabi_c_frame.minsiz+(7*node_size)(sp)))
+         __(str(r21,eabi_c_frame.minsiz+(8*node_size)(sp)))
+         __(str(r22,eabi_c_frame.minsiz+(9*node_size)(sp)))
+         __(str(r23,eabi_c_frame.minsiz+(10*node_size)(sp)))
+         __(str(r24,eabi_c_frame.minsiz+(11*node_size)(sp)))
+         __(str(r25,eabi_c_frame.minsiz+(12*node_size)(sp)))
+         __(str(r26,eabi_c_frame.minsiz+(13*node_size)(sp)))
+         __(str(r27,eabi_c_frame.minsiz+(14*node_size)(sp)))
+         __(str(r28,eabi_c_frame.minsiz+(15*node_size)(sp)))
+         __(str(r29,eabi_c_frame.minsiz+(16*node_size)(sp)))
+         __(str(r30,eabi_c_frame.minsiz+(17*node_size)(sp)))
+         __(str(r31,eabi_c_frame.minsiz+(18*node_size)(sp)))
+	 __(stfd fp_s32conv,eabi_c_frame.minsiz+(22*node_size)(sp))
+        __endif
+	__(mr rcontext,r3)
+	__(lwi(r30,0x43300000))
+	__(lwi(r31,0x80000000))
+        __ifdef(`POWEROPENABI')
+	 __(stw r30,c_frame.minsiz+(20*node_size)(sp))
+	 __(stw r31,c_frame.minsiz+(20*node_size)+4(sp))
+	 __(lfd fp_s32conv,c_frame.minsiz+(20*node_size)(sp))
+	 __(stfd fp_zero,c_frame.minsiz+(20*node_size)(sp))
+        __else                
+ 	 __(stw r30,eabi_c_frame.minsiz+(20*node_size)(sp))
+	 __(stw r31,eabi_c_frame.minsiz+(20*node_size)+4(sp))
+	 __(lfd fp_s32conv,eabi_c_frame.minsiz+(20*node_size)(sp))
+	 __(stfd fp_zero,eabi_c_frame.minsiz+(20*node_size)(sp))
+        __endif
+	__(lfs fp_zero,lisp_globals.short_float_zero(0))
+	__(lfd f0,tcr.lisp_fpscr(rcontext))
+        __(mtfsf 0xff,f0)
+	__(li rzero,0)
+	__(mr save0,rzero)
+	__(mr save1,rzero)
+	__(mr save2,rzero)
+	__(mr save3,rzero)
+	__(mr save4,rzero)
+	__(mr save5,rzero)
+	__(mr save6,rzero)
+	__(mr save7,rzero)
+	__(mr arg_z,rzero)
+	__(mr arg_y,rzero)
+	__(mr arg_x,rzero)
+	__(mr temp0,rzero)
+	__(mr temp1,rzero)
+	__(mr temp2,rzero)
+	__(mr temp3,rzero)
+	__(li loc_pc,0)
+	__(li fn,0)
+	__(cmpri(cr0,r4,0))
+	__(mtxer rzero)  /* start lisp with the overflow bit clear */
+	__(ldr(vsp,tcr.save_vsp(rcontext)))
+	__(ldr(tsp,tcr.save_tsp(rcontext)))
+	__(ldr(allocptr,tcr.save_allocptr(rcontext)))
+	__(ldr(allocbase,tcr.save_allocbase(rcontext)))
+        __(li imm0,TCR_STATE_LISP)
+        __(str(imm0,tcr.valence(rcontext)))
+	__(bne cr0,1f)
+	__(bl toplevel_loop)
+	__(b 2f)
+1:
+	__(bl _SPreset)
+2:
+	__(str(allocptr,tcr.save_allocptr(rcontext)))
+	__(str(allocbase,tcr.save_allocbase(rcontext)))
+	__(str(tsp,tcr.save_tsp(rcontext)))
+	__(str(vsp,tcr.save_vsp(rcontext)))
+        __(li imm0,TCR_STATE_FOREIGN)
+        __(str(imm0,tcr.valence(rcontext)))
+        __ifdef(`POWEROPENABI')
+         __(ldr(r13,c_frame.minsiz+(0*node_size)(sp)))
+         __(ldr(r14,c_frame.minsiz+(1*node_size)(sp)))
+         __(ldr(r15,c_frame.minsiz+(2*node_size)(sp)))
+         __(ldr(r16,c_frame.minsiz+(3*node_size)(sp)))
+         __(ldr(r17,c_frame.minsiz+(4*node_size)(sp)))
+         __(ldr(r18,c_frame.minsiz+(5*node_size)(sp)))
+         __(ldr(r19,c_frame.minsiz+(6*node_size)(sp)))
+         __(ldr(r20,c_frame.minsiz+(7*node_size)(sp)))
+         __(ldr(r21,c_frame.minsiz+(8*node_size)(sp)))
+         __(ldr(r22,c_frame.minsiz+(9*node_size)(sp)))
+         __(ldr(r23,c_frame.minsiz+(10*node_size)(sp)))
+         __(ldr(r24,c_frame.minsiz+(11*node_size)(sp)))
+         __(ldr(r25,c_frame.minsiz+(12*node_size)(sp)))
+         __(ldr(r26,c_frame.minsiz+(13*node_size)(sp)))
+         __(ldr(r27,c_frame.minsiz+(14*node_size)(sp)))
+         __(ldr(r28,c_frame.minsiz+(15*node_size)(sp)))
+         __(ldr(r29,c_frame.minsiz+(16*node_size)(sp)))
+         __(ldr(r30,c_frame.minsiz+(17*node_size)(sp)))
+         __(ldr(r31,c_frame.minsiz+(18*node_size)(sp)))
+        __else
+         __(ldr(r13,eabi_c_frame.minsiz+(0*node_size)(sp)))
+         __(ldr(r14,eabi_c_frame.minsiz+(1*node_size)(sp)))
+         __(ldr(r15,eabi_c_frame.minsiz+(2*node_size)(sp)))
+         __(ldr(r16,eabi_c_frame.minsiz+(3*node_size)(sp)))
+         __(ldr(r17,eabi_c_frame.minsiz+(4*node_size)(sp)))
+         __(ldr(r18,eabi_c_frame.minsiz+(5*node_size)(sp)))
+         __(ldr(r19,eabi_c_frame.minsiz+(6*node_size)(sp)))
+         __(ldr(r20,eabi_c_frame.minsiz+(7*node_size)(sp)))
+         __(ldr(r21,eabi_c_frame.minsiz+(8*node_size)(sp)))
+         __(ldr(r22,eabi_c_frame.minsiz+(9*node_size)(sp)))
+         __(ldr(r23,eabi_c_frame.minsiz+(10*node_size)(sp)))
+         __(ldr(r24,eabi_c_frame.minsiz+(11*node_size)(sp)))
+         __(ldr(r25,eabi_c_frame.minsiz+(12*node_size)(sp)))
+         __(ldr(r26,eabi_c_frame.minsiz+(13*node_size)(sp)))
+         __(ldr(r27,eabi_c_frame.minsiz+(14*node_size)(sp)))
+         __(ldr(r28,eabi_c_frame.minsiz+(15*node_size)(sp)))
+         __(ldr(r29,eabi_c_frame.minsiz+(16*node_size)(sp)))
+         __(ldr(r30,eabi_c_frame.minsiz+(17*node_size)(sp)))
+         __(ldr(r31,eabi_c_frame.minsiz+(18*node_size)(sp)))
+        __endif
+	__(li r3,nil_value)
+        __ifdef(`POWEROPENABI')
+	 __(lfd fp_zero,c_frame.minsiz+(20*node_size)(sp))
+	 __(lfd fp_s32conv,c_frame.minsiz+(22*node_size)(sp))
+	 __(ldr(r0,((stack_align(c_frame.minsiz+(32*node_size)))+c_frame.savelr)(sp)))
+        __else
+	 __(lfd fp_zero,eabi_c_frame.minsiz+(20*4)(sp))
+	 __(lfd fp_s32conv,eabi_c_frame.minsiz+(22*4)(sp))
+	 __(ldr(r0,(eabi_c_frame.minsiz+(32*node_size)+eabi_c_frame.savelr)(sp)))
+        __endif
+	__(mtlr r0)
+	__(ldr(sp,0(sp)))
+         __ifdef(`rTOC')
+          __(ld rTOC,c_frame.savetoc(sp))
+         __endif
+	__(blr)
+
+_exportfn(_SPsp_end)
+	nop
+	_endfile
+
Index: /branches/arm/lisp-kernel/ppc-uuo.s
===================================================================
--- /branches/arm/lisp-kernel/ppc-uuo.s	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc-uuo.s	(revision 13357)
@@ -0,0 +1,91 @@
+/* Copyright (C) 2009 Clozure Associates */
+/* Copyright (C) 1994-2001 Digitool, Inc */
+/* This file is part of Clozure CL. */
+
+/* Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/* License , known as the LLGPL and distributed with Clozure CL as the */
+/* file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/* which is distributed with Clozure CL as the file "LGPL".  Where these */
+/* conflict, the preamble takes precedence. */
+ 
+/* Clozure CL is referenced in the preamble as the "LIBRARY." */
+ 
+/* The LLGPL is also available online at */
+/* http://opensource.franz.com/preamble.html */
+
+
+
+
+/* A uuo looks like:  */
+/*  0      5 6                  15 16   20 21          27 28  31  */
+/* +--------+-----------------------------+--------------+------+  */
+/* |   0    |XXXXXXXXXXXXXXXXXXXX |  RB   |  <minor op>  |  11  |  */
+/* +--------+-----------------------------+--------------+------+  */
+/*  */
+/* e.g., the major opcode (bits 0-5) is 0, the low 4 bits (bits 28-31)  */
+/* have the value "11" decimal (that's tagged as an immediate as far  */
+/* as lisp is concerned, a 7-bit opcode in bits 21-27, and the format  */
+/* of bits 6-20 depend on the value of the minor opcode, though typically  */
+/* bits 16-20 are used to specify a register value between 0 and 31.  */
+/*  */
+/* There are a few cases where bits 6-15 are also used to denote registers  */
+/* (RT and RA, as in an X-form PPC instruction), some where bits 6-10 are  */
+/* to be interpreted as a constant (error number or type code), and some  */
+/* where bits 6-15 do so.  */
+/*  */
+/* Since C code is typically more interested in disassembling UUOs, the  */
+/* full list of UUOs is in "uuo.h".  This file contains macros for creating  */
+/* them.  */
+/*  */
+/* Of course, there -is- no such file as "uuo.h".  That's a stale comment.  */
+/* For all anyone knows, so is this one.  */
+
+UUO_TAG = 11
+UUU_MINOR_SHIFT = 4
+UUO_RB_SHIFT = 11
+UUO_RA_SHIFT = 16
+UUO_RT_SHIFT = 21
+
+define(`rt_ra_uuo',`
+	.long (UUO_TAG|(($1)<<UUU_MINOR_SHIFT)|(($3)<<UUO_RA_SHIFT)|(($2)<<UUO_RT_SHIFT))')
+
+define(`rt_ra_rb_uuo',`
+	.long (UUO_TAG|(($1)<<UUU_MINOR_SHIFT)|(($3)<<UUO_RA_SHIFT)|(($4)<<UUO_RB_SHIFT)|(($2)<<UUO_RT_SHIFT))')
+	
+define(`errnum_rb_uuo',`
+	.long (UUO_TAG|(($1)<<UUU_MINOR_SHIFT)|(($2)<<UUO_RA_SHIFT)|(($3)<<UUO_RB_SHIFT))')
+	
+define(`errnum_ra_rb_uuo',` /* minorop,errnum,ra,rb */
+	.long (UUO_TAG|(($1)<<UUU_MINOR_SHIFT)|(($2)<<UUO_RA_SHIFT)|(($3)<<UUO_RB_SHIFT)|((\errnum)<<UUO_RT_SHIFT))')
+	
+	
+	
+/* Signal an internal error - type error or whatever - with error   */
+/* number (0-1023) and "register" argument.  */
+
+define(`uuo_interr',`
+	errnum_rb_uuo(11,$1,$2)')
+	
+/* As above, but make the error continuable.  (A branch presumably  */
+/* follows the UUO opcode.)  */
+
+define(`uuo_intcerr',`
+	errnum_rb_uuo(12,$1,$2)')
+
+
+/* Signal an error with a much smaller error number (0-31) and  */
+/* two "register" fields.  */
+
+define(`uuo_interr2',`
+	errnum_ra_rb_uuo(13,$1,$2,$3)')
+	
+/* Continuably ....  */
+
+define(`uuo_intcerr2',`
+	errnum_ra_rb_uuo(14,$1,$2,$3)')
+
+	
+
+/* A distinguished UUO: the handler should zero the FPSCR  */
+define(`uuo_zero_fpscr',`
+	rt_ra_rb_uuo(25,0,0,0)')
Index: /branches/arm/lisp-kernel/ppc_print.c
===================================================================
--- /branches/arm/lisp-kernel/ppc_print.c	(revision 13357)
+++ /branches/arm/lisp-kernel/ppc_print.c	(revision 13357)
@@ -0,0 +1,490 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <setjmp.h>
+
+#include "lisp.h"
+#include "area.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+
+void
+sprint_lisp_object(LispObj, int);
+
+#define PBUFLEN 252
+
+char printbuf[PBUFLEN + 4];
+int bufpos = 0;
+
+jmp_buf escape;
+
+void
+add_char(char c)
+{
+  if (bufpos >= PBUFLEN) {
+    longjmp(escape, 1);
+  } else {
+    printbuf[bufpos++] = c;
+  }
+}
+
+void
+add_string(char *s, int len) 
+{
+  while(len--) {
+    add_char(*s++);
+  }
+}
+
+void
+add_lisp_base_string(LispObj str)
+{
+  lisp_char_code *src = (lisp_char_code *)  (ptr_from_lispobj(str + misc_data_offset));
+  natural i, n = header_element_count(header_of(str));
+
+  for (i=0; i < n; i++) {
+    add_char((char)(*src++));
+  }
+}
+
+void
+add_c_string(char *s)
+{
+  add_string(s, strlen(s));
+}
+
+char numbuf[64];
+
+void
+sprint_signed_decimal(signed_natural n)
+{
+  sprintf(numbuf, "%ld", n);
+  add_c_string(numbuf);
+}
+
+void
+sprint_unsigned_decimal(natural n)
+{
+  sprintf(numbuf, "%lu", n);
+  add_c_string(numbuf);
+}
+
+void
+sprint_unsigned_hex(natural n)
+{
+#ifdef PPC64
+  sprintf(numbuf, "#x%016lx", n);
+#else
+  sprintf(numbuf, "#x%08lx", n);
+#endif
+  add_c_string(numbuf);
+}
+
+void
+sprint_list(LispObj o, int depth)
+{
+  LispObj the_cdr;
+  
+  add_char('(');
+  while(1) {
+    if (o != lisp_nil) {
+      sprint_lisp_object(ptr_to_lispobj(car(o)), depth);
+      the_cdr = ptr_to_lispobj(cdr(o));
+      if (the_cdr != lisp_nil) {
+        add_char(' ');
+        if (fulltag_of(the_cdr) == fulltag_cons) {
+          o = the_cdr;
+          continue;
+        }
+        add_c_string(". ");
+        sprint_lisp_object(the_cdr, depth);
+        break;
+      }
+    }
+    break;
+  }
+  add_char(')');
+}
+
+/* 
+  Print a list of method specializers, using the class name instead of the class object.
+*/
+
+void
+sprint_specializers_list(LispObj o, int depth)
+{
+  LispObj the_cdr, the_car;
+  
+  add_char('(');
+  while(1) {
+    if (o != lisp_nil) {
+      the_car = car(o);
+      if (fulltag_of(the_car) == fulltag_misc) {
+        sprint_lisp_object(deref(deref(the_car,3), 4), depth);
+      } else {
+        sprint_lisp_object(the_car, depth);
+      }
+      the_cdr = cdr(o);
+      if (the_cdr != lisp_nil) {
+        add_char(' ');
+        if (fulltag_of(the_cdr) == fulltag_cons) {
+          o = the_cdr;
+          continue;
+        }
+        add_c_string(". ");
+        sprint_lisp_object(the_cdr, depth);
+        break;
+      }
+    }
+    break;
+  }
+  add_char(')');
+}
+
+char *
+vector_subtag_name(unsigned subtag)
+{
+  switch (subtag) {
+  case subtag_bit_vector:
+    return "BIT-VECTOR";
+    break;
+  case subtag_instance:
+    return "INSTANCE";
+    break;
+  case subtag_bignum:
+    return "BIGNUM";
+    break;
+  case subtag_u8_vector:
+    return "(UNSIGNED-BYTE 8)";
+    break;
+  case subtag_s8_vector:
+    return "(SIGNED-BYTE 8)";
+    break;
+  case subtag_u16_vector:
+    return "(UNSIGNED-BYTE 16)";
+    break;
+  case subtag_s16_vector:
+    return "(SIGNED-BYTE 16)";
+    break;
+  case subtag_u32_vector:
+    return "(UNSIGNED-BYTE 32)";
+    break;
+  case subtag_s32_vector:
+    return "(SIGNED-BYTE 32)";
+    break;
+#ifdef PPC64
+  case subtag_u64_vector:
+    return "(UNSIGNED-BYTE 64)";
+    break;
+  case subtag_s64_vector:
+    return "(SIGNED-BYTE 64)";
+    break;
+#endif
+  case subtag_package:
+    return "PACKAGE";
+    break;
+  case subtag_code_vector:
+    return "CODE-VECTOR";
+    break;
+  case subtag_slot_vector:
+    return "SLOT-VECTOR";
+    break;
+  default:
+    return "";
+    break;
+  }
+}
+
+
+void
+sprint_random_vector(LispObj o, unsigned subtag, natural elements)
+{
+  add_c_string("#<");
+  sprint_unsigned_decimal(elements);
+  add_c_string("-element vector subtag = ");
+  sprintf(numbuf, "%02X @", subtag);
+  add_c_string(numbuf);
+  sprint_unsigned_hex(o);
+  add_c_string(" (");
+  add_c_string(vector_subtag_name(subtag));
+  add_c_string(")>");
+}
+
+void
+sprint_symbol(LispObj o)
+{
+  lispsymbol *rawsym = (lispsymbol *) ptr_from_lispobj(untag(o));
+  LispObj 
+    pname = rawsym->pname,
+    package = rawsym->package_predicate;
+
+#ifdef PPC64
+  if (o == lisp_nil) {
+    add_c_string("()");
+    return;
+  }
+#endif
+  if (fulltag_of(package) == fulltag_cons) {
+    package = car(package);
+  }
+
+  if (package == nrs_KEYWORD_PACKAGE.vcell) {
+    add_char(':');
+  }
+  add_lisp_base_string(pname);
+}
+
+void
+sprint_function(LispObj o, int depth)
+{
+  LispObj lfbits, header, name = lisp_nil;
+  natural elements;
+
+  header = header_of(o);
+  elements = header_element_count(header);
+  lfbits = deref(o, elements);
+
+  if ((lfbits & lfbits_noname_mask) == 0) {
+    name = deref(o, elements-1);
+  }
+  
+  add_c_string("#<");
+  if (name == lisp_nil) {
+    add_c_string("Anonymous Function ");
+  } else {
+    if (lfbits & lfbits_method_mask) {
+      LispObj 
+	slot_vector = deref(name,3),
+        method_name = deref(slot_vector, 6),
+        method_qualifiers = deref(slot_vector, 2),
+        method_specializers = deref(slot_vector, 3);
+      add_c_string("Method-Function ");
+      sprint_lisp_object(method_name, depth);
+      add_char(' ');
+      if (method_qualifiers != lisp_nil) {
+        if (cdr(method_qualifiers) == lisp_nil) {
+          sprint_lisp_object(car(method_qualifiers), depth);
+        } else {
+          sprint_lisp_object(method_qualifiers, depth);
+        }
+        add_char(' ');
+      }
+      sprint_specializers_list(method_specializers, depth);
+      add_char(' ');
+    } else {
+      add_c_string("Function ");
+      sprint_lisp_object(name, depth);
+      add_char(' ');
+    }
+  }
+  sprint_unsigned_hex(o);
+  add_char('>');
+}
+
+void
+sprint_gvector(LispObj o, int depth)
+{
+  LispObj header = header_of(o);
+  unsigned 
+    elements = header_element_count(header),
+    subtag = header_subtag(header);
+    
+  switch(subtag) {
+  case subtag_function:
+    sprint_function(o, depth);
+    break;
+    
+  case subtag_symbol:
+    sprint_symbol(o);
+    break;
+    
+  case subtag_struct:
+  case subtag_istruct:
+    add_c_string("#<");
+    sprint_lisp_object(deref(o,1), depth);
+    add_c_string(" @");
+    sprint_unsigned_hex(o);
+    add_c_string(">");
+    break;
+   
+  case subtag_simple_vector:
+    {
+      int i;
+      add_c_string("#(");
+      for(i = 1; i <= elements; i++) {
+        if (i > 1) {
+          add_char(' ');
+        }
+        sprint_lisp_object(deref(o, i), depth);
+      }
+      add_char(')');
+      break;
+    }
+      
+  default:
+    sprint_random_vector(o, subtag, elements);
+    break;
+  }
+}
+
+void
+sprint_ivector(LispObj o)
+{
+  LispObj header = header_of(o);
+  unsigned 
+    elements = header_element_count(header),
+    subtag = header_subtag(header);
+    
+  switch(subtag) {
+  case subtag_simple_base_string:
+    add_char('"');
+    add_lisp_base_string(o);
+    add_char('"');
+    return;
+    
+  case subtag_bignum:
+    if (elements == 1) {
+      sprint_signed_decimal((signed_natural)(deref(o, 1)));
+      return;
+    }
+    if ((elements == 2) && (deref(o, 2) == 0)) {
+      sprint_unsigned_decimal(deref(o, 1));
+      return;
+    }
+    break;
+    
+  case subtag_double_float:
+    break;
+
+  case subtag_macptr:
+    add_c_string("#<MACPTR ");
+    sprint_unsigned_hex(deref(o,1));
+    add_c_string(">");
+    break;
+
+  default:
+    sprint_random_vector(o, subtag, elements);
+  }
+}
+
+void
+sprint_vector(LispObj o, int depth)
+{
+  LispObj header = header_of(o);
+  
+  if (immheader_tag_p(fulltag_of(header))) {
+    sprint_ivector(o);
+  } else {
+    sprint_gvector(o, depth);
+  }
+}
+
+void
+sprint_lisp_object(LispObj o, int depth) 
+{
+  if (--depth < 0) {
+    add_char('#');
+  } else {
+    switch (fulltag_of(o)) {
+    case fulltag_even_fixnum:
+    case fulltag_odd_fixnum:
+      sprint_signed_decimal(unbox_fixnum(o));
+      break;
+    
+#ifdef PPC64
+    case fulltag_immheader_0:
+    case fulltag_immheader_1:
+    case fulltag_immheader_2:
+    case fulltag_immheader_3:
+    case fulltag_nodeheader_0:
+    case fulltag_nodeheader_1:
+    case fulltag_nodeheader_2:
+    case fulltag_nodeheader_3:
+#else
+    case fulltag_immheader:
+    case fulltag_nodeheader:
+#endif      
+      add_c_string("#<header ? ");
+      sprint_unsigned_hex(o);
+      add_c_string(">");
+      break;
+
+#ifdef PPC64
+    case fulltag_imm_0:
+    case fulltag_imm_1:
+    case fulltag_imm_2:
+    case fulltag_imm_3:
+#else
+    case fulltag_imm:
+#endif
+      if (o == unbound) {
+        add_c_string("#<Unbound>");
+      } else {
+        if (header_subtag(o) == subtag_character) {
+          unsigned c = (o >> charcode_shift);
+          add_c_string("#\\");
+          if ((c >= ' ') && (c < 0x7f)) {
+            add_char(c);
+          } else {
+            sprintf(numbuf, "%o", c);
+            add_c_string(numbuf);
+          }
+#ifdef PPC64
+        } else if (header_subtag(o) == subtag_single_float) {
+          sprintf(numbuf, "%f", o>>32);
+          add_c_string(numbuf);
+#endif
+        } else {
+
+          add_c_string("#<imm ");
+          sprint_unsigned_hex(o);
+          add_c_string(">");
+        }
+      }
+      break;
+   
+#ifndef PPC64
+    case fulltag_nil:
+#endif
+    case fulltag_cons:
+      sprint_list(o, depth);
+      break;
+     
+    case fulltag_misc:
+      sprint_vector(o, depth);
+      break;
+    }
+  }
+}
+
+char *
+print_lisp_object(LispObj o)
+{
+  bufpos = 0;
+  if (setjmp(escape) == 0) {
+    sprint_lisp_object(o, 5);
+    printbuf[bufpos] = 0;
+  } else {
+    printbuf[PBUFLEN+0] = '.';
+    printbuf[PBUFLEN+1] = '.';
+    printbuf[PBUFLEN+2] = '.';
+    printbuf[PBUFLEN+3] = 0;
+  }
+  return printbuf;
+}
Index: /branches/arm/lisp-kernel/solarisx64/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/solarisx64/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/solarisx64/.gdbinit	(revision 13357)
@@ -0,0 +1,82 @@
+define x86_lisp_string
+x/s $arg0-5
+end
+
+define gtra
+br *$r10
+cont
+end
+
+define x86pname
+set $temp=*((long *)((long)($arg0-6)))
+x86_lisp_string $temp
+end
+
+
+define pname
+ x86pname $arg0
+end
+
+define l
+ call print_lisp_object($arg0)
+end
+
+define lw
+ l $r13
+end
+
+define clobber_breakpoint
+  set *(short *)($pc-2)=0x9090
+end
+
+define arg_z
+ l $rsi
+end
+
+define arg_y
+ l $rdi
+end
+
+define arg_x
+ l $r8
+end
+
+define bx
+ l $rbx
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x200b
+   set $car = *((LispObj *)($l+5))
+   set $l =  *((LispObj *)($l-3))
+   l $car
+  end
+end
+
+define lbt
+ call plbt_sp($rbp)
+end
+
+define ada
+ p/x *(all_areas->succ)
+end
+
+define lregs
+ call debug_lisp_registers($arg0,0,0)
+end
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
+handle SIGPWR pass nostop noprint
+handle SIGQUIT pass nostop noprint
+
Index: /branches/arm/lisp-kernel/solarisx64/Makefile
===================================================================
--- /branches/arm/lisp-kernel/solarisx64/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/solarisx64/Makefile	(revision 13357)
@@ -0,0 +1,88 @@
+#
+#   Copyright (C) 2006 Clozure Associates and contributors
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ..
+RM = /bin/rm
+AS = /usr/sfw/bin/gas
+# As of this writing, /usr/sfw/bin/gm4 is both more recent (1.4.2 vs 1.4)
+# and more buggy than /opt/sfw/bin/gm4, which is available on the 
+# "Solaris companion" disk.  Do you get the impression that the people
+# who put this stuff together aren't paying much attention ?
+# Marching forward: as of the OpenSolais 0805 (snv_86) release, there
+# doesn't seem to be any way of obtaining a non-broken GNU m4 from Sun.
+# I just downloaded the source to 1.4.11 and installed it in /usr/local/bin;
+# I didn't try blastwave.org or sunfreeware.com; there might be working
+# packages there
+M4 = /usr/local/bin/m4
+CC = /usr/sfw/bin/gcc
+ASFLAGS = --64 --divide
+M4FLAGS = -DSOLARIS -DX86 -DX8664
+CDEFINES = -DSOLARIS -D_REENTRANT -DX86 -DX8664 -D__EXTENSIONS__ -DHAVE_TLS #-DDISABLE_EGC
+CDEBUG = -g
+COPT = #-O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m64 -o $@
+
+SPOBJ = pad.o x86-spjump64.o x86-spentry64.o x86-subprims64.o
+ASMOBJ = x86-asmutils64.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils64.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants64.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants64.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../sx86cl64
+
+
+OSLIBS = -ldl -lm -lpthread -lsocket -lnsl -lrt
+
+
+../../sx86cl64:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile
+	$(CC)  -m64 $(CDEBUG) -o $@  $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../sx86cl64
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../sx86cl64
+	strip -g ../../sx86cl64
Index: /branches/arm/lisp-kernel/solarisx86/Makefile
===================================================================
--- /branches/arm/lisp-kernel/solarisx86/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/solarisx86/Makefile	(revision 13357)
@@ -0,0 +1,88 @@
+#
+#   Copyright (C) 2006 Clozure Associates and contributors
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ..
+RM = /bin/rm
+AS = /usr/sfw/bin/gas
+# As of this writing, /usr/sfw/bin/gm4 is both more recent (1.4.2 vs 1.4)
+# and more buggy than /opt/sfw/bin/gm4, which is available on the 
+# "Solaris companion" disk.  Do you get the impression that the people
+# who put this stuff together aren't paying much attention ?
+# Marching forward: as of the OpenSolais 0805 (snv_86) release, there
+# doesn't seem to be any way of obtaining a non-broken GNU m4 from Sun.
+# I just downloaded the source to 1.4.11 and installed it in /usr/local/bin;
+# I didn't try blastwave.org or sunfreeware.com; there might be working
+# packages there
+M4 = /usr/local/bin/m4
+CC = /usr/sfw/bin/gcc
+ASFLAGS = --32 --divide
+M4FLAGS = -DSOLARIS -DX86 -DX8632
+CDEFINES = -DSOLARIS -D_REENTRANT -DX86 -DX8632 -D__EXTENSIONS__ -DHAVE_TLS #-DDISABLE_EGC
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) -m32 -o $@
+
+SPOBJ = pad.o x86-spjump32.o x86-spentry32.o x86-subprims32.o
+ASMOBJ = x86-asmutils32.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o unix-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils32.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants32.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants32.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../sx86cl
+
+
+OSLIBS = -ldl -lm -lpthread -lsocket -lnsl -lrt
+
+
+../../sx86cl:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile
+	$(CC)  -m32 $(CDEBUG) -o $@  $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../sx86cl
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../sx86cl
+	strip -g ../../sx86cl
Index: /branches/arm/lisp-kernel/static-linuxppc/.cvsignore
===================================================================
--- /branches/arm/lisp-kernel/static-linuxppc/.cvsignore	(revision 13357)
+++ /branches/arm/lisp-kernel/static-linuxppc/.cvsignore	(revision 13357)
@@ -0,0 +1,2 @@
+external-functions.h
+*~.*
Index: /branches/arm/lisp-kernel/static-linuxppc/Makefile
===================================================================
--- /branches/arm/lisp-kernel/static-linuxppc/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/static-linuxppc/Makefile	(revision 13357)
@@ -0,0 +1,103 @@
+#
+#   Copyright (C) 1994-2001 Digitool, Inc
+#   This file is part of OpenMCL.  
+#
+#   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with OpenMCL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with OpenMCL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   OpenMCL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+OPENMCL_MAJOR_VERSION=0
+OPENMCL_MINOR_VERSION=14
+
+VPATH = ../
+RM = /bin/rm
+# Versions of GNU as >= 2.9.1 all seem to work
+# AS = gas-2.9.1
+AS = as
+M4 = m4
+ASFLAGS = -mregnames -mppc32
+M4FLAGS = -DLINUX -DPPC
+CDEFINES = -DLINUX -DPPC -D_REENTRANT -DSTATIC -D_GNU_SOURCE
+CDEBUG = -g
+COPT = -O2
+
+# The only version of GCC I have that supports both ppc32 and ppc64
+# compilation uses the -m32 option to target ppc32.  This may not be
+# definitive; there seem to be a bewildering array of similar options
+# in other GCC versions.  It's assumed here that if "-m32" is recognized,
+# it's required as well.
+
+PPC32 = $(shell ($(CC) --help -v 2>&1 | grep -q -e "-m32 ") && /bin/echo "-m32")
+
+# Likewise, some versions of GAS may need a "-a32" flag, to force the
+# output file to be 32-bit compatible.
+
+A32 = $(shell ($(AS) --help -v 2>&1 | grep -q -e "-a32") && /bin/echo "-a32")
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS) $(A32) $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(PPC32) -o $@
+
+SPOBJ = pad.o ppc-spjump.o ppc-spentry.o ppc-subprims.o
+ASMOBJ = ppc-asmutils.o imports.o
+
+COBJ  = pmcl-kernel.o ppc-gc.o bits.o  ppc-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o plbt.o ppc_print.o
+KERNELOBJ= $(COBJ) ppc-asmutils.o  imports.o
+STATICOBJ= staticlib.o
+
+SPINC =	lisp.s m4macros.m4 ppc-constants.s ppc-macros.s errors.s ppc-uuo.s ppc-constants32.s
+
+CHEADERS = area.h bits.h ppc-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h ppc-constants32.h ppc-exceptions.h
+
+# Subprims linked into the kernel ?
+# Yes:
+
+KSPOBJ = $(SPOBJ)
+all:	../../static-ppccl
+
+
+# No:
+
+# KSPOBJ=
+# all:	../../static-ppccl ../../subprims.so
+
+OSLIBS =  -lm -lpthread
+
+
+../../static-ppccl:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(STATICOBJ)
+	$(CC) $(PPC32) $(CDEBUG) -static  -o $@ -T ../linux//elf32ppclinux.x $(KSPOBJ) $(KERNELOBJ) $(STATICOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+staticlib.o: external-functions.h staticlib.c
+	$(CC) -c staticlib.c -fno-builtin $(CDEFINES) $(CDEBUG) $(COPT) $(PPC32) -o $@
+
+
+external-functions.h:
+	echo "Must generate external-functions.h from running lisp"
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../ppccl
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../ppccl
+	strip -g ../../ppccl
Index: /branches/arm/lisp-kernel/static-linuxppc/staticlib.c
===================================================================
--- /branches/arm/lisp-kernel/static-linuxppc/staticlib.c	(revision 13357)
+++ /branches/arm/lisp-kernel/static-linuxppc/staticlib.c	(revision 13357)
@@ -0,0 +1,53 @@
+typedef struct  {
+  char *name;
+  void *(*func)();
+} external_function;
+
+#define NULL ((void *)0)
+#include "external-functions.h"
+
+int
+string_compare(char *a, char *b)
+{
+  char ch;
+
+  while (ch = *a++) {
+    if (*b++ != ch) {
+      return 1;
+    }
+  }
+  return !!*b;
+}
+
+      
+void *
+dlsym(void *handle, char *name)
+{
+  external_function *p;
+  char *fname;
+
+  for (p = external_functions; fname = p->name; p++) {
+    if (!string_compare(name, fname)) {
+      return (void *)(p->func);
+    }
+  }
+  return NULL;
+}
+
+void *
+dlopen(char *path, int mode)
+{
+  return NULL;
+}
+
+void *
+dlerror()
+{
+  return (void *)"No shared library support\n";
+}
+
+void *
+dlclose()
+{
+  return NULL;
+}
Index: /branches/arm/lisp-kernel/thread_manager.c
===================================================================
--- /branches/arm/lisp-kernel/thread_manager.c	(revision 13357)
+++ /branches/arm/lisp-kernel/thread_manager.c	(revision 13357)
@@ -0,0 +1,2697 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+
+#include "Threads.h"
+
+
+typedef struct {
+  TCR *tcr;
+  natural vsize, tsize;
+  void *created;
+} thread_activation;
+
+#ifdef HAVE_TLS
+__thread char tcrbuf[sizeof(TCR)+16];
+__thread TCR *current_tcr;
+#endif
+
+/* This is set to true when running a 32-bit Lisp on 64-bit FreeBSD */
+Boolean rcontext_readonly = false;
+
+extern natural
+store_conditional(natural*, natural, natural);
+
+extern signed_natural
+atomic_swap(signed_natural*, signed_natural);
+
+#ifdef USE_FUTEX
+#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
+#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
+#define FUTEX_AVAIL (0)
+#define FUTEX_LOCKED (1)
+#define FUTEX_CONTENDED (2)
+#endif
+
+#ifdef WINDOWS
+extern pc spentry_start, spentry_end,subprims_start,subprims_end;
+extern pc restore_windows_context_start, restore_windows_context_end,
+  restore_windows_context_iret;
+
+
+extern void interrupt_handler(int, siginfo_t *, ExceptionInformation *);
+
+void CALLBACK 
+nullAPC(ULONG_PTR arg) 
+{
+}
+  
+BOOL (*pCancelIoEx)(HANDLE, OVERLAPPED*) = NULL;
+BOOL (*pCancelSynchronousIo)(HANDLE) = NULL;
+
+
+
+extern void *windows_find_symbol(void*, char*);
+
+int
+raise_thread_interrupt(TCR *target)
+{
+  /* GCC doesn't align CONTEXT corrcectly */
+  char _contextbuf[sizeof(CONTEXT)+__alignof(CONTEXT)];
+  CONTEXT  *pcontext;
+  HANDLE hthread = (HANDLE)(target->osid);
+  pc where;
+  area *cs = target->cs_area, *ts = target->cs_area;
+  DWORD rc;
+  BOOL io_pending;
+
+  pcontext = (CONTEXT *)((((natural)&_contextbuf)+15)&~15);
+  rc = SuspendThread(hthread);
+  if (rc == -1) {
+    return -1;
+  }
+  /* What if the suspend count is > 1 at this point ?  I don't think
+     that that matters, but I'm not sure */
+  pcontext->ContextFlags = CONTEXT_ALL;
+  rc = GetThreadContext(hthread, pcontext);
+  if (rc == 0) {
+    return ESRCH;
+  }
+
+  where = (pc)(xpPC(pcontext));
+  
+  if ((target->valence != TCR_STATE_LISP) ||
+      (TCR_INTERRUPT_LEVEL(target) < 0) ||
+      (target->unwinding != 0) ||
+      (!((where < (pc)lisp_global(HEAP_END)) &&
+         (where >= (pc)lisp_global(HEAP_START))) &&
+       !((where < spentry_end) && (where >= spentry_start)) &&
+       !((where < subprims_end) && (where >= subprims_start)) &&
+       !((where < (pc) 0x16000) &&
+         (where >= (pc) 0x15000)) &&
+       !((where < (pc) (ts->high)) &&
+         (where >= (pc) (ts->low))))) {
+
+    target->interrupt_pending = (1LL << (nbits_in_word - 1LL));
+
+#if 0
+    /* If the thread's in a blocking syscall, it'd be nice to
+       get it out of that state here. */
+    GetThreadIOPendingFlag(hthread,&io_pending);
+    if (io_pending) {
+      pending_io * pending = (pending_io *) (target->pending_io_info);
+      if (pending) {
+        if (pCancelIoEx) {
+          pCancelIoEx(pending->h, pending->o);
+        } else {
+          CancelIo(pending->h);
+        }
+      }
+    }
+#endif
+    if (pCancelSynchronousIo) {
+      pCancelSynchronousIo(hthread);
+    }
+    QueueUserAPC(nullAPC, hthread, 0);
+    ResumeThread(hthread);
+    return 0;
+  } else {
+    /* Thread is running lisp code with interupts enabled.  Set it
+       so that it calls out and then returns to the context,
+       handling any necessary pc-lusering. */
+    LispObj foreign_rsp = (((LispObj)(target->foreign_sp))-0x200)&~15;
+    CONTEXT *icontext = ((CONTEXT *) foreign_rsp) -1;
+    icontext = (CONTEXT *)(((LispObj)icontext)&~15);
+    
+    *icontext = *pcontext;
+
+#ifdef WIN_64    
+    xpGPR(pcontext,REG_RCX) = SIGNAL_FOR_PROCESS_INTERRUPT;
+    xpGPR(pcontext,REG_RDX) = 0;
+    xpGPR(pcontext,REG_R8) = (LispObj) icontext;
+    xpGPR(pcontext,REG_RSP) = (LispObj)(((LispObj *)icontext)-1);
+    *(((LispObj *)icontext)-1) = (LispObj)raise_thread_interrupt;
+#else
+    {
+      LispObj *p = (LispObj *)icontext;
+      p -= 4;
+      p[0] = SIGNAL_FOR_PROCESS_INTERRUPT;
+      p[1] = 0;
+      p[2] = (DWORD)icontext;
+      *(--p) = (LispObj)raise_thread_interrupt;;
+      xpGPR(pcontext,Isp) = (DWORD)p;
+#ifdef WIN32_ES_HACK
+      pcontext->SegEs = pcontext->SegDs;
+#endif
+    }
+#endif
+    pcontext->EFlags &= ~0x400;  /* clear direction flag */
+    xpPC(pcontext) = (LispObj)interrupt_handler;
+    SetThreadContext(hthread,pcontext);
+    ResumeThread(hthread);
+    return 0;
+  }
+}
+#else
+int
+raise_thread_interrupt(TCR *target)
+{
+  pthread_t thread = (pthread_t)target->osid;
+#ifdef DARWIN_not_yet
+  if (use_mach_exception_handling) {
+    return mach_raise_thread_interrupt(target);
+  }
+#endif
+  if (thread != (pthread_t) 0) {
+    return pthread_kill(thread, SIGNAL_FOR_PROCESS_INTERRUPT);
+  }
+  return ESRCH;
+}
+#endif
+
+signed_natural
+atomic_incf_by(signed_natural *ptr, signed_natural by)
+{
+  signed_natural old, new;
+  do {
+    old = *ptr;
+    new = old+by;
+  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
+           (natural) old);
+  return new;
+}
+
+signed_natural
+atomic_incf(signed_natural *ptr)
+{
+  return atomic_incf_by(ptr, 1);
+}
+
+signed_natural
+atomic_decf(signed_natural *ptr)
+{
+  signed_natural old, new;
+  do {
+    old = *ptr;
+    new = old == 0 ? old : old-1;
+  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
+           (natural) old);
+  return old-1;
+}
+
+
+#ifndef USE_FUTEX
+int spin_lock_tries = 1;
+
+void
+get_spin_lock(signed_natural *p, TCR *tcr)
+{
+  int i, n = spin_lock_tries;
+  
+  while (1) {
+    for (i = 0; i < n; i++) {
+      if (atomic_swap(p,(signed_natural)tcr) == 0) {
+        return;
+      }
+    }
+#ifndef WINDOWS
+    sched_yield();
+#endif
+  }
+}
+#endif
+
+#ifndef USE_FUTEX
+int
+lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
+{
+
+  if (tcr == NULL) {
+    tcr = get_tcr(true);
+  }
+  if (m->owner == tcr) {
+    m->count++;
+    return 0;
+  }
+  while (1) {
+    LOCK_SPINLOCK(m->spinlock,tcr);
+    ++m->avail;
+    if (m->avail == 1) {
+      m->owner = tcr;
+      m->count = 1;
+      RELEASE_SPINLOCK(m->spinlock);
+      break;
+    }
+    RELEASE_SPINLOCK(m->spinlock);
+    SEM_WAIT_FOREVER(m->signal);
+  }
+  return 0;
+}
+
+#else /* USE_FUTEX */
+
+static void inline
+lock_futex(signed_natural *p)
+{
+  
+  while (1) {
+    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
+      return;
+    }
+    while (1) {
+      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
+        return;
+      }
+      futex_wait(p,FUTEX_CONTENDED);
+    }
+  }
+}
+
+static void inline
+unlock_futex(signed_natural *p)
+{
+  if (atomic_decf(p) != FUTEX_AVAIL) {
+    *p = FUTEX_AVAIL;
+    futex_wake(p,INT_MAX);
+  }
+}
+    
+int
+lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
+{
+  if (tcr == NULL) {
+    tcr = get_tcr(true);
+  }
+  if (m->owner == tcr) {
+    m->count++;
+    return 0;
+  }
+  lock_futex(&m->avail);
+  m->owner = tcr;
+  m->count = 1;
+  return 0;
+}
+#endif /* USE_FUTEX */
+
+
+#ifndef USE_FUTEX  
+int
+unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
+{
+  int ret = EPERM, pending;
+
+  if (tcr == NULL) {
+    tcr = get_tcr(true);
+  }
+
+  if (m->owner == tcr) {
+    --m->count;
+    if (m->count == 0) {
+      LOCK_SPINLOCK(m->spinlock,tcr);
+      m->owner = NULL;
+      pending = m->avail-1 + m->waiting;     /* Don't count us */
+      m->avail = 0;
+      --pending;
+      if (pending > 0) {
+        m->waiting = pending;
+      } else {
+        m->waiting = 0;
+      }
+      RELEASE_SPINLOCK(m->spinlock);
+      if (pending >= 0) {
+	SEM_RAISE(m->signal);
+      }
+    }
+    ret = 0;
+  }
+  return ret;
+}
+#else /* USE_FUTEX */
+int
+unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
+{
+  int ret = EPERM;
+
+   if (tcr == NULL) {
+    tcr = get_tcr(true);
+  }
+
+  if (m->owner == tcr) {
+    --m->count;
+    if (m->count == 0) {
+      m->owner = NULL;
+      unlock_futex(&m->avail);
+    }
+    ret = 0;
+  }
+  return ret;
+}
+#endif /* USE_FUTEX */
+
+void
+destroy_recursive_lock(RECURSIVE_LOCK m)
+{
+#ifndef USE_FUTEX
+  destroy_semaphore((void **)&m->signal);
+#endif
+  postGCfree((void *)(m->malloced_ptr));
+}
+
+/*
+  If we're already the owner (or if the lock is free), lock it
+  and increment the lock count; otherwise, return EBUSY without
+  waiting.
+*/
+
+#ifndef USE_FUTEX
+int
+recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
+{
+  TCR *owner = m->owner;
+
+  LOCK_SPINLOCK(m->spinlock,tcr);
+  if (owner == tcr) {
+    m->count++;
+    if (was_free) {
+      *was_free = 0;
+      RELEASE_SPINLOCK(m->spinlock);
+      return 0;
+    }
+  }
+  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
+    m->owner = tcr;
+    m->count = 1;
+    if (was_free) {
+      *was_free = 1;
+    }
+    RELEASE_SPINLOCK(m->spinlock);
+    return 0;
+  }
+
+  RELEASE_SPINLOCK(m->spinlock);
+  return EBUSY;
+}
+#else
+int
+recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
+{
+  TCR *owner = m->owner;
+
+  if (owner == tcr) {
+    m->count++;
+    if (was_free) {
+      *was_free = 0;
+      return 0;
+    }
+  }
+  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
+    m->owner = tcr;
+    m->count = 1;
+    if (was_free) {
+      *was_free = 1;
+    }
+    return 0;
+  }
+
+  return EBUSY;
+}
+#endif
+
+void
+sem_wait_forever(SEMAPHORE s)
+{
+  int status;
+
+  do {
+#ifdef USE_MACH_SEMAPHORES
+    mach_timespec_t q = {1,0};
+    status = SEM_TIMEDWAIT(s,q);
+#endif
+#ifdef USE_POSIX_SEMAPHORES
+    struct timespec q;
+    gettimeofday((struct timeval *)&q, NULL);
+    q.tv_sec += 1;
+    status = SEM_TIMEDWAIT(s,&q);
+#endif
+#ifdef USE_WINDOWS_SEMAPHORES
+    status = (WaitForSingleObject(s,1000L) == WAIT_TIMEOUT) ? 1 : 0;
+#endif
+  } while (status != 0);
+}
+
+int
+wait_on_semaphore(void *s, int seconds, int millis)
+{
+#ifdef USE_POSIX_SEMAPHORES
+  int nanos = (millis % 1000) * 1000000;
+  int status;
+
+  struct timespec q;
+  gettimeofday((struct timeval *)&q, NULL);
+  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
+    
+  q.tv_nsec += nanos;
+  if (q.tv_nsec >= 1000000000L) {
+    q.tv_nsec -= 1000000000L;
+    seconds += 1;
+  }
+  q.tv_sec += seconds;
+  status = SEM_TIMEDWAIT(s, &q);
+  if (status < 0) {
+    return errno;
+  }
+  return status;
+#endif
+#ifdef USE_MACH_SEMAPHORES
+  int nanos = (millis % 1000) * 1000000;
+  mach_timespec_t q = {seconds, nanos};
+  int status = SEM_TIMEDWAIT(s, q);
+
+  
+  switch (status) {
+  case 0: return 0;
+  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
+  case KERN_ABORTED: return EINTR;
+  default: return EINVAL;
+  }
+#endif
+#ifdef USE_WINDOWS_SEMAPHORES
+  switch (WaitForSingleObjectEx(s, seconds*1000L+(DWORD)millis,true)) {
+  case WAIT_OBJECT_0:
+    return 0;
+  case WAIT_TIMEOUT:
+    return /* ETIMEDOUT */ WAIT_TIMEOUT;
+  case WAIT_IO_COMPLETION:
+    return EINTR;
+  default:
+    break;
+  }
+  return EINVAL;
+
+#endif
+}
+
+
+int
+semaphore_maybe_timedwait(void *s, struct timespec *t)
+{
+  if (t) {
+    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
+  }
+  SEM_WAIT_FOREVER(s);
+  return 0;
+}
+
+void
+signal_semaphore(SEMAPHORE s)
+{
+  SEM_RAISE(s);
+}
+
+  
+#ifdef WINDOWS
+LispObj
+current_thread_osid()
+{
+  TCR *tcr = get_tcr(false);
+  LispObj current = 0;
+
+  if (tcr) {
+    current = tcr->osid;
+  }
+  if (current == 0) {
+    DuplicateHandle(GetCurrentProcess(),
+                    GetCurrentThread(),
+                    GetCurrentProcess(),
+                    (LPHANDLE)(&current),
+                    0,
+                    FALSE,
+                    DUPLICATE_SAME_ACCESS);
+    if (tcr) {
+      tcr->osid = current;
+    }
+  }
+  return current;
+}
+#else
+LispObj
+current_thread_osid()
+{
+  return (LispObj)ptr_to_lispobj(pthread_self());
+}
+#endif
+
+
+int thread_suspend_signal = 0, thread_kill_signal = 0;
+
+
+
+void
+linux_exception_init(TCR *tcr)
+{
+}
+
+
+TCR *
+get_interrupt_tcr(Boolean create)
+{
+  return get_tcr(create);
+}
+  
+void
+suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_interrupt_tcr(false);
+  
+  if (tcr == NULL) {
+    /* Got a suspend signal sent to the pthread. */
+    extern natural initial_stack_size;
+    void register_thread_tcr(TCR *);
+    
+    tcr = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
+    tcr->suspend_count = 1;
+    tcr->vs_area->active -= node_size;
+    *(--tcr->save_vsp) = lisp_nil;
+    register_thread_tcr(tcr);
+  }
+  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
+    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
+  } else {
+    tcr->suspend_context = context;
+    SEM_RAISE(tcr->suspend);
+    SEM_WAIT_FOREVER(tcr->resume);
+    tcr->suspend_context = NULL;
+  }
+#ifdef DARWIN_GS_HACK
+  if (gs_was_tcr) {
+    set_gs_address(tcr);
+  }
+#endif
+  SIGRETURN(context);
+}
+
+  
+
+/*
+  'base' should be set to the bottom (origin) of the stack, e.g., the
+  end from which it grows.
+*/
+  
+#ifdef WINDOWS
+void
+os_get_current_thread_stack_bounds(void **base, natural *size)
+{
+  natural natbase;
+  MEMORY_BASIC_INFORMATION info;
+  void *addr = (void *)current_stack_pointer();
+  
+  VirtualQuery(addr, &info, sizeof(info));
+  natbase = (natural)info.BaseAddress+info.RegionSize;
+  *size = natbase - (natural)(info.AllocationBase);
+  *base = (void *)natbase;
+}
+#else
+void
+os_get_current_thread_stack_bounds(void **base, natural *size)
+{
+  pthread_t p = pthread_self();
+#ifdef DARWIN
+  *base = pthread_get_stackaddr_np(p);
+  *size = pthread_get_stacksize_np(p);
+#endif
+#ifdef LINUX
+  pthread_attr_t attr;
+
+  pthread_getattr_np(p,&attr);
+  pthread_attr_getstack(&attr, base, size);
+  pthread_attr_destroy(&attr);
+  *(natural *)base += *size;
+#endif
+#ifdef FREEBSD
+  pthread_attr_t attr;
+  void * temp_base;
+  size_t temp_size;
+  
+
+  pthread_attr_init(&attr);  
+  pthread_attr_get_np(p, &attr);
+  pthread_attr_getstackaddr(&attr,&temp_base);
+  pthread_attr_getstacksize(&attr,&temp_size);
+  *base = (void *)((natural)temp_base + temp_size);
+  *size = temp_size;
+  pthread_attr_destroy(&attr);
+#endif
+#ifdef SOLARIS
+  stack_t st;
+  
+  thr_stksegment(&st);
+  *size = st.ss_size;
+  *base = st.ss_sp;
+  
+#endif
+}
+#endif
+
+void *
+new_semaphore(int count)
+{
+#ifdef USE_POSIX_SEMAPHORES
+  sem_t *s = malloc(sizeof(sem_t));
+  sem_init(s, 0, count);
+  return s;
+#endif
+#ifdef USE_MACH_SEMAPHORES
+  semaphore_t s = (semaphore_t)0;
+  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
+  return (void *)(natural)s;
+#endif
+#ifdef USE_WINDOWS_SEMAPHORES
+  return CreateSemaphore(NULL, count, 0x7fffL, NULL);
+#endif
+}
+
+RECURSIVE_LOCK
+new_recursive_lock()
+{
+  extern int cache_block_size;
+  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
+  RECURSIVE_LOCK m = NULL;
+#ifndef USE_FUTEX
+  void *signal = new_semaphore(0);
+#endif
+  if (p) {
+    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
+    m->malloced_ptr = p;
+  }
+
+#ifdef USE_FUTEX
+  if (m) {
+    return m;
+  }
+#else
+  if (m && signal) {
+    m->signal = signal;
+    return m;
+  }
+  if (m) {
+    free(p);
+  }
+  if (signal) {
+    destroy_semaphore(&signal);
+  }
+#endif
+  return NULL;
+}
+
+void
+destroy_semaphore(void **s)
+{
+  if (*s) {
+#ifdef USE_POSIX_SEMAPHORES
+    sem_destroy((sem_t *)*s);
+    if (lisp_global(IN_GC)) {
+      postGCfree(*s);
+    } else {
+      free(*s);
+    }
+#endif
+#ifdef USE_MACH_SEMAPHORES
+    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
+#endif
+#ifdef USE_WINDOWS_SEMAPHORES
+    CloseHandle(*s);
+#endif
+    *s=NULL;
+  }
+}
+
+#ifdef WINDOWS
+void
+tsd_set(LispObj key, void *datum)
+{
+  TlsSetValue((DWORD)key, datum);
+}
+
+void *
+tsd_get(LispObj key)
+{
+  return TlsGetValue((DWORD)key);
+}
+#else
+void
+tsd_set(LispObj key, void *datum)
+{
+  pthread_setspecific((pthread_key_t)key, datum);
+}
+
+void *
+tsd_get(LispObj key)
+{
+  return pthread_getspecific((pthread_key_t)key);
+}
+#endif
+
+void
+dequeue_tcr(TCR *tcr)
+{
+  TCR *next, *prev;
+
+  next = tcr->next;
+  prev = tcr->prev;
+
+  prev->next = next;
+  next->prev = prev;
+  tcr->prev = tcr->next = NULL;
+#ifdef X8664
+  tcr->linear = NULL;
+#endif
+}
+  
+void
+enqueue_tcr(TCR *new)
+{
+  TCR *head, *tail;
+  
+  LOCK(lisp_global(TCR_AREA_LOCK),new);
+  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
+  tail = head->prev;
+  tail->next = new;
+  head->prev = new;
+  new->prev = tail;
+  new->next = head;
+  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
+}
+
+#ifdef WIN_32
+TCR *
+allocate_tcr()
+{
+  void *p = calloc(1,sizeof(TCR)+15);
+  TCR *tcr = (TCR *)((((natural)p)+15)&~15);
+
+  tcr->allocated = p;
+  return tcr;
+}
+#else
+TCR *
+allocate_tcr()
+{
+  TCR *tcr, *chain = NULL, *next;
+#ifdef DARWIN
+  extern Boolean use_mach_exception_handling;
+  kern_return_t kret;
+  mach_port_t 
+    thread_exception_port,
+    task_self = mach_task_self();
+#endif
+  for (;;) {
+    tcr = calloc(1, sizeof(TCR));
+#ifdef DARWIN
+#if WORD_SIZE == 64
+    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
+      tcr->next = chain;
+      chain = tcr;
+      continue;
+    }
+#endif
+    if (use_mach_exception_handling) {
+      thread_exception_port = (mach_port_t)((natural)tcr);
+      kret = mach_port_allocate_name(task_self,
+                                     MACH_PORT_RIGHT_RECEIVE,
+                                     thread_exception_port);
+    } else {
+      kret = KERN_SUCCESS;
+    }
+
+    if (kret != KERN_SUCCESS) {
+      tcr->next = chain;
+      chain = tcr;
+      continue;
+    }
+#endif
+    for (;chain;chain = next) {
+      next = chain->next;
+      free(chain);
+    }
+    return tcr;
+  }
+}
+#endif
+
+#ifdef X8664
+#ifdef LINUX
+#include <asm/prctl.h>
+#include <sys/prctl.h>
+#endif
+#ifdef FREEBSD
+#include <machine/sysarch.h>
+#endif
+
+void
+setup_tcr_extra_segment(TCR *tcr)
+{
+#ifdef FREEBSD
+  amd64_set_gsbase(tcr);
+#endif
+#ifdef LINUX
+  arch_prctl(ARCH_SET_GS, (natural)tcr);
+#endif
+#ifdef DARWIN
+  /* There's no way to do this yet.  See DARWIN_GS_HACK */
+  /* darwin_set_x8664_fs_reg(tcr); */
+#endif
+#ifdef SOLARIS
+  /* Chris Curtis found this and suggested the use of syscall here */
+  syscall(SYS_lwp_private,_LWP_SETPRIVATE, _LWP_GSBASE, tcr);
+#endif
+}
+
+#endif
+
+#ifdef X8632
+
+#ifdef DARWIN
+#include <architecture/i386/table.h>
+#include <architecture/i386/sel.h>
+#include <i386/user_ldt.h>
+
+void setup_tcr_extra_segment(TCR *tcr)
+{
+    uintptr_t addr = (uintptr_t)tcr;
+    unsigned int size = sizeof(*tcr);
+    ldt_entry_t desc;
+    sel_t sel;
+    int i;
+
+    desc.data.limit00 = (size - 1) & 0xffff;
+    desc.data.limit16 = ((size - 1) >> 16) & 0xf;
+    desc.data.base00 = addr & 0xffff;
+    desc.data.base16 = (addr >> 16) & 0xff;
+    desc.data.base24 = (addr >> 24) & 0xff;
+    desc.data.type = DESC_DATA_WRITE;
+    desc.data.dpl = USER_PRIV;
+    desc.data.present = 1;
+    desc.data.stksz = DESC_CODE_32B;
+    desc.data.granular = DESC_GRAN_BYTE;
+    
+    i = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1);
+
+    if (i < 0) {
+	perror("i386_set_ldt");
+    } else {
+	sel.index = i;
+	sel.rpl = USER_PRIV;
+	sel.ti = SEL_LDT;
+	tcr->ldt_selector = sel;
+    }
+}
+
+void free_tcr_extra_segment(TCR *tcr)
+{
+  /* load %fs with null segement selector */
+  __asm__ volatile ("mov %0,%%fs" : : "r"(0));
+  if (i386_set_ldt(tcr->ldt_selector.index, NULL, 1) < 0)
+    perror("i386_set_ldt");
+  tcr->ldt_selector = NULL_SEL;
+}
+#endif
+
+#ifdef LINUX
+
+#include <asm/ldt.h>
+#include <sys/syscall.h>
+
+/* see desc_struct in kernel/include/asm-i386/processor.h */
+typedef struct {
+  uint32_t a;
+  uint32_t b;
+} linux_desc_struct;
+
+
+#define desc_avail(d) (((d)->a) == 0)
+
+linux_desc_struct linux_ldt_entries[LDT_ENTRIES];
+
+/* We have to ask the Linux kernel for a copy of the ldt table
+   and manage it ourselves.  It's not clear that this is 
+   thread-safe in general, but we can at least ensure that
+   it's thread-safe wrt lisp threads. */
+
+pthread_mutex_t ldt_lock = PTHREAD_MUTEX_INITIALIZER;  /* simple, non-recursive mutex */
+
+int
+modify_ldt(int func, void *ptr, unsigned long bytecount)
+{
+  return syscall(__NR_modify_ldt, func, ptr, bytecount);
+}
+
+
+void
+setup_tcr_extra_segment(TCR *tcr)
+{
+  int i, n;
+  short sel;
+  struct user_desc u = {1, 0, 0, 1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1};
+  linux_desc_struct *d = linux_ldt_entries;
+
+  pthread_mutex_lock(&ldt_lock);
+  n = modify_ldt(0,d,LDT_ENTRIES*LDT_ENTRY_SIZE)/LDT_ENTRY_SIZE;
+  for (i = 0; i < n; i++,d++) {
+    if (desc_avail(d)) {
+      break;
+    }
+  }
+  if (i == LDT_ENTRIES) {
+    pthread_mutex_unlock(&ldt_lock);
+    fprintf(dbgout, "All 8192 ldt entries in use ?\n");
+    _exit(1);
+  }
+  u.entry_number = i;
+  u.base_addr = (uint32_t)tcr;
+  u.limit = sizeof(TCR);
+  u.limit_in_pages = 0;
+  if (modify_ldt(1,&u,sizeof(struct user_desc)) != 0) {
+    pthread_mutex_unlock(&ldt_lock);
+    fprintf(dbgout,"Can't assign LDT entry\n");
+    _exit(1);
+  }
+  sel = (i << 3) | 7;
+  tcr->ldt_selector = sel;
+  pthread_mutex_unlock(&ldt_lock);
+}
+
+void
+free_tcr_extra_segment(TCR *tcr)
+{
+  struct user_desc u = {0, 0, 0, 0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0};
+  short sel = tcr->ldt_selector;
+
+  pthread_mutex_lock(&ldt_lock);
+  /* load %fs with null segment selector */
+  __asm__ volatile ("mov %0,%%fs" : : "r"(0));
+  tcr->ldt_selector = 0;
+  u.entry_number = (sel>>3);
+  modify_ldt(1,&u,sizeof(struct user_desc));
+  pthread_mutex_unlock(&ldt_lock);
+  
+}
+
+#endif
+
+#ifdef WINDOWS
+bitvector ldt_entries_in_use = NULL;
+HANDLE ldt_lock;
+
+typedef struct {
+  DWORD offset;
+  DWORD size;
+  LDT_ENTRY entry;
+} win32_ldt_info;
+
+
+int WINAPI (*NtQueryInformationProcess)(HANDLE,DWORD,VOID*,DWORD,DWORD*);
+int WINAPI (*NtSetInformationProcess)(HANDLE,DWORD,VOID*,DWORD);
+
+void
+init_win32_ldt()
+{
+  HANDLE hNtdll;
+  int status = 0xc0000002;
+  win32_ldt_info info;
+  DWORD nret;
+  
+
+  ldt_entries_in_use=malloc(8192/8);
+  zero_bits(ldt_entries_in_use,8192);
+  ldt_lock = CreateMutex(NULL,0,NULL);
+
+  hNtdll = LoadLibrary("ntdll.dll");
+  NtQueryInformationProcess = (void*)GetProcAddress(hNtdll, "NtQueryInformationProcess");
+  NtSetInformationProcess = (void*)GetProcAddress(hNtdll, "NtSetInformationProcess");
+  if (NtQueryInformationProcess != NULL) {
+    info.offset = 0;
+    info.size = sizeof(LDT_ENTRY);
+    status = NtQueryInformationProcess(GetCurrentProcess(),10,&info,sizeof(info),&nret);
+  }
+
+  if (status) {
+    fprintf(dbgout, "This application can't run under this OS version\n");
+    _exit(1);
+  }
+}
+
+void
+setup_tcr_extra_segment(TCR *tcr)
+{
+  int i, status;
+  DWORD nret;
+  win32_ldt_info info;
+  LDT_ENTRY *entry = &(info.entry);
+  DWORD *words = (DWORD *)entry, tcraddr = (DWORD)tcr;
+
+
+  WaitForSingleObject(ldt_lock,INFINITE);
+
+  for (i = 0; i < 8192; i++) {
+    if (!ref_bit(ldt_entries_in_use,i)) {
+      info.offset = i << 3;
+      info.size = sizeof(LDT_ENTRY);
+      words[0] = 0;
+      words[1] = 0;
+      status = NtQueryInformationProcess(GetCurrentProcess(),10,&info,sizeof(info),&nret);
+      if (status == 0) {
+        if ((info.size == 0) ||
+            ((words[0] == 0) && (words[1] == 0))) {
+          break;
+        }
+      }
+    }
+  }
+  if (i == 8192) {
+    ReleaseMutex(ldt_lock);
+    fprintf(dbgout, "All 8192 ldt entries in use ?\n");
+    _exit(1);
+  }
+  set_bit(ldt_entries_in_use,i);
+  words[0] = 0;
+  words[1] = 0;
+  entry->LimitLow = sizeof(TCR);
+  entry->BaseLow = tcraddr & 0xffff;
+  entry->HighWord.Bits.BaseMid = (tcraddr >> 16) & 0xff;
+  entry->HighWord.Bits.BaseHi = (tcraddr >> 24);
+  entry->HighWord.Bits.Pres = 1;
+  entry->HighWord.Bits.Default_Big = 1;
+  entry->HighWord.Bits.Type = 16 | 2; /* read-write data */
+  entry->HighWord.Bits.Dpl = 3; /* for use by the great unwashed */
+  info.size = sizeof(LDT_ENTRY);
+  status = NtSetInformationProcess(GetCurrentProcess(),10,&info,sizeof(info));
+  if (status != 0) {
+    ReleaseMutex(ldt_lock);
+    FBug(NULL, "can't set LDT entry %d, status = 0x%x", i, status);
+  }
+#if 1
+  /* Sanity check */
+  info.offset = i << 3;
+  info.size = sizeof(LDT_ENTRY);
+  words[0] = 0;
+  words[0] = 0;
+  NtQueryInformationProcess(GetCurrentProcess(),10,&info,sizeof(info),&nret);
+  if (((entry->BaseLow)|((entry->HighWord.Bits.BaseMid)<<16)|((entry->HighWord.Bits.BaseHi)<<24)) != tcraddr) {
+    Bug(NULL, "you blew it: bad address in ldt entry\n");
+  }
+#endif
+  tcr->ldt_selector = (i << 3) | 7;
+  ReleaseMutex(ldt_lock);
+}
+
+void 
+free_tcr_extra_segment(TCR *tcr)
+{
+  win32_ldt_info info;
+  LDT_ENTRY *entry = &(info.entry);
+  DWORD *words = (DWORD *)entry;
+  int idx = tcr->ldt_selector >> 3;
+
+
+  info.offset = idx << 3;
+  info.size = sizeof(LDT_ENTRY);
+
+  words[0] = 0;
+  words[1] = 0;
+
+  WaitForSingleObject(ldt_lock,INFINITE);
+  NtSetInformationProcess(GetCurrentProcess(),10,&info,sizeof(info));
+  clr_bit(ldt_entries_in_use,idx);
+  ReleaseMutex(ldt_lock);
+
+  tcr->ldt_selector = 0;
+}
+
+#endif
+#ifdef FREEBSD
+#include <machine/segments.h>
+#include <machine/sysarch.h>
+
+/* It'd be tempting to use i386_set_fsbase() here, but there doesn't
+   seem to be any way to free the GDT entry it creates.  Actually,
+   it's not clear that that really sets a GDT entry; let's see */
+
+#define FREEBSD_USE_SET_FSBASE 1
+void
+setup_tcr_extra_segment(TCR *tcr)
+{
+#if !FREEBSD_USE_SET_FSBASE
+  struct segment_descriptor sd;
+  uintptr_t addr = (uintptr_t)tcr;
+  unsigned int size = sizeof(*tcr);
+  int i;
+
+  sd.sd_lolimit = (size - 1) & 0xffff;
+  sd.sd_hilimit = ((size - 1) >> 16) & 0xf;
+  sd.sd_lobase = addr & ((1<<24)-1);
+  sd.sd_hibase = (addr>>24)&0xff;
+
+
+
+  sd.sd_type = 18;
+  sd.sd_dpl = SEL_UPL;
+  sd.sd_p = 1;
+  sd.sd_def32 = 1;
+  sd.sd_gran = 0;
+
+  i = i386_set_ldt(LDT_AUTO_ALLOC, (union descriptor *)&sd, 1);
+
+  if (i < 0) {
+    perror("i386_set_ldt");
+    exit(1);
+  } else {
+    tcr->ldt_selector = LSEL(i,SEL_UPL);
+  }
+#else
+  extern unsigned short get_fs_register(void);
+
+  if (i386_set_fsbase((void*)tcr)) {
+    perror("i386_set_fsbase");
+    exit(1);
+  }
+
+
+  /* Once we've called i386_set_fsbase, we can't write to %fs. */
+  tcr->ldt_selector = GSEL(GUFS_SEL, SEL_UPL);
+#endif
+}
+
+void 
+free_tcr_extra_segment(TCR *tcr)
+{
+#if FREEBSD_USE_SET_FSBASE
+  /* On a 32-bit kernel, this allocates a GDT entry.  It's not clear
+     what it would mean to deallocate that entry. */
+  /* If we're running on a 64-bit kernel, we can't write to %fs */
+#else
+  int idx = tcr->ldt_selector >> 3;
+  /* load %fs with null segment selector */
+  __asm__ volatile ("mov %0,%%fs" : : "r"(0));
+  if (i386_set_ldt(idx, NULL, 1) < 0)
+    perror("i386_set_ldt");
+#endif
+  tcr->ldt_selector = 0;
+}
+#endif
+
+#ifdef SOLARIS
+#include <sys/sysi86.h>
+
+bitvector ldt_entries_in_use = NULL;
+pthread_mutex_t ldt_lock = PTHREAD_MUTEX_INITIALIZER;  /* simple, non-recursive mutex */
+
+void
+solaris_ldt_init()
+{
+  int fd;
+  struct ssd s;
+
+  ldt_entries_in_use=malloc(8192/8);
+  zero_bits(ldt_entries_in_use,8192);
+  
+  fd = open("/proc/self/ldt", O_RDONLY);
+
+  while(read(fd,&s,sizeof(s)) == sizeof(s)) {
+    set_bit(ldt_entries_in_use,s.sel>>3);
+  }
+  close(fd);
+}
+    
+
+void
+setup_tcr_extra_segment(TCR *tcr)
+{
+  struct ssd s;
+  int i;
+
+  pthread_mutex_lock(&ldt_lock);
+
+  for (i = 0; i < 8192; i++) {
+    if (!ref_bit(ldt_entries_in_use,i)) {
+      s.sel = (i<<3)|7;
+      s.bo = (unsigned int)tcr;
+      s.ls = sizeof(TCR);
+      s.acc1 = 0xf2;
+      s.acc2 = 4;
+
+      if (sysi86(SI86DSCR, &s) >= 0) {
+        set_bit(ldt_entries_in_use,i);
+        tcr->ldt_selector = (i<<3)|7;
+        pthread_mutex_unlock(&ldt_lock);
+        return;
+      }
+      set_bit(ldt_entries_in_use,i);
+    }
+  }
+  pthread_mutex_unlock(&ldt_lock);
+  fprintf(dbgout, "All 8192 LDT descriptors in use\n");
+  _exit(1);
+
+
+  
+}
+
+void 
+free_tcr_extra_segment(TCR *tcr)
+{
+  struct ssd s;
+  int i;
+
+  pthread_mutex_lock(&ldt_lock);
+  __asm__ volatile ("mov %0,%%fs" : : "r"(0));
+  s.sel = tcr->ldt_selector;
+  i = s.sel>>3;
+  tcr->ldt_selector = 0;
+  s.bo = 0;
+  s.ls = 0;
+  s.acc1 = 0;
+  s.acc2 = 0;
+  sysi86(SI86DSCR, &s);
+  clr_bit(ldt_entries_in_use,i);
+  pthread_mutex_unlock(&ldt_lock);
+}
+
+#endif
+#endif
+
+/*
+  Caller must hold the area_lock.
+*/
+TCR *
+new_tcr(natural vstack_size, natural tstack_size)
+{
+  extern area
+    *allocate_vstack_holding_area_lock(natural),
+    *allocate_tstack_holding_area_lock(natural);
+  area *a;
+  int i;
+#ifndef WINDOWS
+  sigset_t sigmask;
+
+  sigemptyset(&sigmask);
+  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
+#endif
+
+#ifdef HAVE_TLS
+  TCR *tcr = (TCR *) ((((natural)&tcrbuf)+((natural)15)) & ~((natural)15));
+  current_tcr = tcr;
+#else /* no TLS */
+  TCR *tcr = allocate_tcr();
+#endif
+
+#ifdef X86
+  setup_tcr_extra_segment(tcr);
+  tcr->linear = tcr;
+#ifdef X8632
+  tcr->node_regs_mask = X8632_DEFAULT_NODE_REGS_MASK;
+#endif
+#endif
+
+#if (WORD_SIZE == 64)
+  tcr->single_float_convert.tag = subtag_single_float;
+#endif
+  tcr->suspend = new_semaphore(0);
+  tcr->resume = new_semaphore(0);
+  tcr->reset_completion = new_semaphore(0);
+  tcr->activate = new_semaphore(0);
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+  a = allocate_vstack_holding_area_lock(vstack_size);
+  tcr->vs_area = a;
+  a->owner = tcr;
+  tcr->save_vsp = (LispObj *) a->active;  
+  a = allocate_tstack_holding_area_lock(tstack_size);
+  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
+  tcr->ts_area = a;
+  a->owner = tcr;
+  tcr->save_tsp = (LispObj *) a->active;
+#ifdef X86
+  tcr->next_tsp = tcr->save_tsp;
+#endif
+
+  tcr->valence = TCR_STATE_FOREIGN;
+#ifdef PPC
+  tcr->lisp_fpscr.words.l = 0xd0;
+#endif
+#ifdef X86
+  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
+#if 1                           /* Mask underflow; too hard to 
+                                   deal with denorms if underflow is 
+                                   enabled */
+    (1 << MXCSR_UM_BIT) | 
+#endif
+    (1 << MXCSR_PM_BIT);
+#endif
+  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
+  tcr->tlb_limit = 2048<<fixnumshift;
+  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
+  for (i = 0; i < 2048; i++) {
+    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
+  }
+  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
+#ifndef WINDOWS
+  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
+#else
+  tcr->shutdown_count = 1;
+#endif
+  return tcr;
+}
+
+void
+shutdown_thread_tcr(void *arg)
+{
+  TCR *tcr = TCR_FROM_TSD(arg),*current=get_tcr(0);
+
+  area *vs, *ts, *cs;
+  
+  if (current == NULL) {
+    current = tcr;
+  }
+
+  if (--(tcr->shutdown_count) == 0) {
+    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
+      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
+	callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
+    
+      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
+      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
+      tsd_set(lisp_global(TCR_KEY), NULL);
+    }
+#ifdef DARWIN
+    darwin_exception_cleanup(tcr);
+#endif
+    LOCK(lisp_global(TCR_AREA_LOCK),current);
+    vs = tcr->vs_area;
+    tcr->vs_area = NULL;
+    ts = tcr->ts_area;
+    tcr->ts_area = NULL;
+    cs = tcr->cs_area;
+    tcr->cs_area = NULL;
+    if (vs) {
+      condemn_area_holding_area_lock(vs);
+    }
+    if (ts) {
+      condemn_area_holding_area_lock(ts);
+    }
+    if (cs) {
+      condemn_area_holding_area_lock(cs);
+    }
+    destroy_semaphore(&tcr->suspend);
+    destroy_semaphore(&tcr->resume);
+    destroy_semaphore(&tcr->reset_completion);
+    destroy_semaphore(&tcr->activate);
+    tcr->tlb_limit = 0;
+    free(tcr->tlb_pointer);
+    tcr->tlb_pointer = NULL;
+    tcr->osid = 0;
+    tcr->interrupt_pending = 0;
+    tcr->termination_semaphore = NULL;
+#ifdef HAVE_TLS
+    dequeue_tcr(tcr);
+#endif
+#ifdef X8632
+    free_tcr_extra_segment(tcr);
+#endif
+#ifdef WIN32
+    CloseHandle((HANDLE)tcr->io_datum);
+    tcr->io_datum = NULL;
+    free(tcr->native_thread_info);
+    tcr->native_thread_info = NULL;
+#endif
+    UNLOCK(lisp_global(TCR_AREA_LOCK),current);
+  } else {
+    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
+  }
+}
+
+void
+tcr_cleanup(void *arg)
+{
+  TCR *tcr = (TCR *)arg;
+  area *a;
+
+  a = tcr->vs_area;
+  if (a) {
+    a->active = a->high;
+  }
+  a = tcr->ts_area;
+  if (a) {
+    a->active = a->high;
+  }
+  a = tcr->cs_area;
+  if (a) {
+    a->active = a->high;
+  }
+  tcr->valence = TCR_STATE_FOREIGN;
+  tcr->shutdown_count = 1;
+  shutdown_thread_tcr(tcr);
+  tsd_set(lisp_global(TCR_KEY), NULL);
+}
+
+void *
+current_native_thread_id()
+{
+  return ((void *) (natural)
+#ifdef LINUX
+#ifdef __NR_gettid
+          syscall(__NR_gettid)
+#else
+          getpid()
+#endif
+#endif
+#ifdef DARWIN
+	  mach_thread_self()
+#endif
+#ifdef FREEBSD
+	  pthread_self()
+#endif
+#ifdef SOLARIS
+	  pthread_self()
+#endif
+#ifdef WINDOWS
+	  GetCurrentThreadId()
+#endif
+	  );
+}
+
+
+void
+thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
+{
+  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
+
+  tcr->osid = current_thread_osid();
+  tcr->native_thread_id = current_native_thread_id();
+  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
+  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
+  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
+  tcr->cs_area = a;
+  a->owner = tcr;
+  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
+    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
+  }
+#ifdef LINUX
+#ifdef PPC
+#ifndef PPC64
+  tcr->native_thread_info = current_r2;
+#endif
+#endif
+#endif
+  tcr->errno_loc = &errno;
+  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
+#ifdef DARWIN
+  extern Boolean use_mach_exception_handling;
+  if (use_mach_exception_handling) {
+    darwin_exception_init(tcr);
+  }
+#endif
+#ifdef LINUX
+  linux_exception_init(tcr);
+#endif
+#ifdef WINDOWS
+  tcr->io_datum = (VOID *)CreateEvent(NULL, true, false, NULL);
+  tcr->native_thread_info = malloc(sizeof(CONTEXT));
+#endif
+  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
+}
+
+/*
+  Register the specified tcr as "belonging to" the current thread.
+  Under Darwin, setup Mach exception handling for the thread.
+  Install cleanup handlers for thread termination.
+*/
+void
+register_thread_tcr(TCR *tcr)
+{
+  void *stack_base = NULL;
+  natural stack_size = 0;
+
+  os_get_current_thread_stack_bounds(&stack_base, &stack_size);
+  thread_init_tcr(tcr, stack_base, stack_size);
+  enqueue_tcr(tcr);
+}
+
+
+  
+  
+#ifndef MAP_GROWSDOWN
+#define MAP_GROWSDOWN 0
+#endif
+
+Ptr
+create_stack(natural size)
+{
+  Ptr p;
+  size=align_to_power_of_2(size, log2_page_size);
+  p = (Ptr) MapMemoryForStack((size_t)size);
+  if (p != (Ptr)(-1)) {
+    *((size_t *)p) = size;
+    return p;
+  }
+  allocation_failure(true, size);
+
+}
+
+void *
+allocate_stack(natural size)
+{
+  return create_stack(size);
+}
+
+void
+free_stack(void *s)
+{
+  size_t size = *((size_t *)s);
+  UnMapMemory(s, size);
+}
+
+Boolean threads_initialized = false;
+
+#ifndef USE_FUTEX
+#ifdef WINDOWS
+void
+count_cpus()
+{
+  SYSTEM_INFO si;
+
+  GetSystemInfo(&si);
+  if (si.dwNumberOfProcessors > 1) {
+    spin_lock_tries = 1024;
+  }
+}
+#else
+void
+count_cpus()
+{
+#ifdef DARWIN
+  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
+#include <mach/host_info.h>
+
+  struct host_basic_info info;
+  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+  
+  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
+    if (info.max_cpus > 1) {
+      spin_lock_tries = 1024;
+    }
+  }
+#else
+  int n = sysconf(_SC_NPROCESSORS_ONLN);
+  
+  if (n > 1) {
+    spin_lock_tries = 1024;
+  }
+#endif
+}
+#endif
+#endif
+
+void
+init_threads(void * stack_base, TCR *tcr)
+{
+  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
+#ifdef WINDOWS
+  lisp_global(TCR_KEY) = TlsAlloc();
+  pCancelIoEx = windows_find_symbol(NULL, "CancelIoEx");
+  pCancelSynchronousIo = windows_find_symbol(NULL, "CancelSynchronousIo");
+#else
+  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
+  thread_signal_setup();
+#endif
+
+#ifndef USE_FUTEX
+  count_cpus();
+#endif
+  threads_initialized = true;
+}
+
+
+#ifdef WINDOWS
+unsigned CALLBACK
+#else
+void *
+#endif
+lisp_thread_entry(void *param)
+{
+  thread_activation *activation = (thread_activation *)param;
+  TCR *tcr = new_tcr(activation->vsize, activation->tsize);
+  LispObj *start_vsp;
+#ifndef WINDOWS
+  sigset_t mask, old_mask;
+
+  sigemptyset(&mask);
+  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
+#endif
+
+  register_thread_tcr(tcr);
+
+#ifndef WINDOWS
+  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
+#endif
+  tcr->vs_area->active -= node_size;
+  *(--tcr->save_vsp) = lisp_nil;
+  start_vsp = tcr->save_vsp;
+  enable_fp_exceptions();
+  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
+  activation->tcr = tcr;
+  SEM_RAISE(activation->created);
+  do {
+    SEM_RAISE(tcr->reset_completion);
+    SEM_WAIT_FOREVER(tcr->activate);
+    /* Now go run some lisp code */
+    start_lisp(TCR_TO_TSD(tcr),0);
+    tcr->save_vsp = start_vsp;
+  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
+#ifndef WINDOWS
+  pthread_cleanup_pop(true);
+#else
+  tcr_cleanup(tcr);
+#endif
+#ifdef WINDOWS
+  return 0;
+#else
+  return NULL;
+#endif
+}
+
+typedef 
+short (*suspendf)();
+
+
+void
+suspend_current_cooperative_thread()
+{
+  static suspendf cooperative_suspend = NULL;
+  void *xFindSymbol(void*,char*);
+
+  if (cooperative_suspend == NULL) {
+    cooperative_suspend = (suspendf)xFindSymbol(NULL, "SetThreadState");
+  }
+  if (cooperative_suspend) {
+    cooperative_suspend(1 /* kCurrentThreadID */,
+                        1 /* kStoppedThreadState */,
+                        0 /* kAnyThreadID */);
+  }
+}
+
+void *
+cooperative_thread_startup(void *arg)
+{
+
+  TCR *tcr = get_tcr(0);
+  LispObj *start_vsp;
+
+  if (!tcr) {
+    return NULL;
+  }
+#ifndef WINDOWS
+  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
+#endif
+  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
+  start_vsp = tcr->save_vsp;
+  do {
+    SEM_RAISE(tcr->reset_completion);
+    suspend_current_cooperative_thread();
+      
+    start_lisp(tcr, 0);
+    tcr->save_vsp = start_vsp;
+  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
+#ifndef WINDOWS
+  pthread_cleanup_pop(true);
+#else
+  tcr_cleanup(tcr);
+#endif
+}
+
+void *
+xNewThread(natural control_stack_size,
+	   natural value_stack_size,
+	   natural temp_stack_size)
+
+{
+  thread_activation activation;
+
+
+  activation.tsize = temp_stack_size;
+  activation.vsize = value_stack_size;
+  activation.tcr = 0;
+  activation.created = new_semaphore(0);
+  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
+                           NULL, 
+                           lisp_thread_entry,
+                           (void *) &activation)) {
+    
+    SEM_WAIT_FOREVER(activation.created);	/* Wait until thread's entered its initial function */
+  }
+  destroy_semaphore(&activation.created);  
+  return TCR_TO_TSD(activation.tcr);
+}
+
+Boolean
+active_tcr_p(TCR *q)
+{
+  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
+  
+  do {
+    if (p == q) {
+      return true;
+    }
+    p = p->next;
+  } while (p != head);
+  return false;
+}
+
+#ifdef WINDOWS
+OSErr
+xDisposeThread(TCR *tcr)
+{
+  return 0;                     /* I don't think that this is ever called. */
+}
+#else
+OSErr
+xDisposeThread(TCR *tcr)
+{
+  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
+    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
+      pthread_cancel((pthread_t)(tcr->osid));
+      return 0;
+    }
+  }
+  return -50;
+}
+#endif
+
+OSErr
+xYieldToThread(TCR *target)
+{
+  Bug(NULL, "xYieldToThread ?");
+  return 0;
+}
+  
+OSErr
+xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
+{
+  Bug(NULL, "xThreadCurrentStackSpace ?");
+  return 0;
+}
+
+
+#ifdef WINDOWS
+LispObj
+create_system_thread(size_t stack_size,
+		     void* stackaddr,
+		     unsigned CALLBACK (*start_routine)(void *),
+		     void* param)
+{
+  HANDLE thread_handle;
+
+  stack_size = ((stack_size+(((1<<16)-1)))&~((1<<16)-1));
+
+  thread_handle = (HANDLE)_beginthreadex(NULL, 
+                                         0/*stack_size*/,
+                                         start_routine,
+                                         param,
+                                         0, 
+                                         NULL);
+
+  if (thread_handle == NULL) {
+    wperror("CreateThread");
+  }
+  return (LispObj) ptr_to_lispobj(thread_handle);
+}
+#else
+LispObj
+create_system_thread(size_t stack_size,
+		     void* stackaddr,
+		     void* (*start_routine)(void *),
+		     void* param)
+{
+  pthread_attr_t attr;
+  pthread_t returned_thread = (pthread_t) 0;
+  TCR *current = get_tcr(true);
+
+  pthread_attr_init(&attr);
+  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);  
+
+  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
+    stack_size = PTHREAD_STACK_MIN;
+  }
+
+  stack_size = ensure_stack_limit(stack_size);
+  if (stackaddr != NULL) {
+    /* Size must have been specified.  Sort of makes sense ... */
+#ifdef DARWIN
+    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
+#else
+    pthread_attr_setstack(&attr, stackaddr, stack_size);
+#endif
+  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
+    pthread_attr_setstacksize(&attr,stack_size);
+  }
+
+  /* 
+     I think that's just about enough ... create the thread.
+     Well ... not quite enough.  In Leopard (at least), many
+     pthread routines grab an internal spinlock when validating
+     their arguments.  If we suspend a thread that owns this
+     spinlock, we deadlock.  We can't in general keep that
+     from happening: if arbitrary C code is suspended while
+     it owns the spinlock, we still deadlock.  It seems that
+     the best that we can do is to keep -this- code from
+     getting suspended (by grabbing TCR_AREA_LOCK)
+  */
+  LOCK(lisp_global(TCR_AREA_LOCK),current);
+  pthread_create(&returned_thread, &attr, start_routine, param);
+  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
+  pthread_attr_destroy(&attr);
+  return (LispObj) ptr_to_lispobj(returned_thread);
+}
+#endif
+
+TCR *
+get_tcr(Boolean create)
+{
+#ifdef HAVE_TLS
+  TCR *current = current_tcr;
+#else
+  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
+  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
+#endif
+
+  if ((current == NULL) && create) {
+    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
+      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
+    int i, nbindwords = 0;
+    extern natural initial_stack_size;
+    
+    /* Make one. */
+    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
+    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
+    register_thread_tcr(current);
+#ifdef DEBUG_TCR_CREATION
+#ifndef WINDOWS
+    fprintf(dbgout, "\ncreating TCR for pthread 0x%x", pthread_self());
+#endif
+#endif
+    current->vs_area->active -= node_size;
+    *(--current->save_vsp) = lisp_nil;
+#ifdef PPC
+#define NSAVEREGS 8
+#endif
+#ifdef X8664
+#define NSAVEREGS 4
+#endif
+#ifdef X8632
+#define NSAVEREGS 0
+#endif
+    for (i = 0; i < NSAVEREGS; i++) {
+      *(--current->save_vsp) = 0;
+      current->vs_area->active -= node_size;
+    }
+    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
+    for (i = 0; i < nbindwords; i++) {
+      *(--current->save_vsp) = 0;
+      current->vs_area->active -= node_size;
+    }
+    current->shutdown_count = 1;
+    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
+
+  }
+  
+  return current;
+}
+
+#ifdef WINDOWS
+void *
+pc_luser_restore_windows_context(CONTEXT *pcontext, TCR *tcr, pc where)
+{
+  /* Thread has started to return from an exception. */
+  if (where < restore_windows_context_iret) {
+    /* In the process of restoring registers; context still in
+       %rcx.  Just make our suspend_context be the context
+       we're trying to restore, so that we'll resume from
+       the suspend in the same context that we're trying to
+       restore */
+#ifdef WIN_64
+    *pcontext = * (CONTEXT *)(pcontext->Rcx);
+#else
+    *pcontext = * (CONTEXT *)(pcontext->Ecx);
+#endif
+  } else {
+    /* Most of the context has already been restored; fix %rcx
+       if need be, then restore ss:rsp, cs:rip, and flags. */
+#ifdef WIN_64
+    x64_iret_frame *iret_frame = (x64_iret_frame *) (pcontext->Rsp);
+
+    pcontext->Rip = iret_frame->Rip;
+    pcontext->SegCs = (WORD) iret_frame->Cs;
+    pcontext->EFlags = (DWORD) iret_frame->Rflags;
+    pcontext->Rsp = iret_frame->Rsp;
+    pcontext->SegSs = (WORD) iret_frame->Ss;
+#else
+    ia32_iret_frame *iret_frame = (ia32_iret_frame *) (pcontext->Esp);
+
+    pcontext->Eip = iret_frame->Eip;
+    pcontext->SegCs = (WORD) iret_frame->Cs;
+    pcontext->EFlags = (DWORD) iret_frame->EFlags;
+    pcontext->Esp += sizeof(ia32_iret_frame);
+#endif
+  }
+  tcr->pending_exception_context = NULL;
+}
+
+Boolean
+suspend_tcr(TCR *tcr)
+{
+  int suspend_count = atomic_incf(&(tcr->suspend_count));
+  DWORD rc;
+  if (suspend_count == 1) {
+    CONTEXT  *pcontext = (CONTEXT *)tcr->native_thread_info;
+    HANDLE hthread = (HANDLE)(tcr->osid);
+    pc where;
+    area *cs = tcr->cs_area;
+    LispObj foreign_rsp;
+
+    if (hthread == NULL) {
+      return false;
+    }
+    rc = SuspendThread(hthread);
+    if (rc == -1) {
+      /* If the thread's simply dead, we should handle that here */
+      return false;
+    }
+    pcontext->ContextFlags = CONTEXT_ALL;
+    rc = GetThreadContext(hthread, pcontext);
+    if (rc == 0) {
+      return false;
+    }
+    where = (pc)(xpPC(pcontext));
+
+    if (tcr->valence == TCR_STATE_LISP) {
+      if ((where >= restore_windows_context_start) &&
+          (where < restore_windows_context_end)) {
+        pc_luser_restore_windows_context(pcontext, tcr, where);
+      } else {
+        area *ts = tcr->ts_area;
+        /* If we're in the lisp heap, or in x86-spentry??.o, or in
+           x86-subprims??.o, or in the subprims jump table at #x15000,
+           or on the tstack ... we're just executing lisp code.  Otherwise,
+           we got an exception while executing lisp code, but haven't
+           entered the handler yet (still in Windows exception glue
+           or switching stacks or something.)  In the latter case, we
+           basically want to get to he handler and have it notice
+           the pending exception request, and suspend the thread at that
+           point. */
+        if (!((where < (pc)lisp_global(HEAP_END)) &&
+              (where >= (pc)lisp_global(HEAP_START))) &&
+            !((where < spentry_end) && (where >= spentry_start)) &&
+            !((where < subprims_end) && (where >= subprims_start)) &&
+            !((where < (pc) 0x16000) &&
+              (where >= (pc) 0x15000)) &&
+            !((where < (pc) (ts->high)) &&
+              (where >= (pc) (ts->low)))) {
+          /* The thread has lisp valence, but is not executing code
+             where we expect lisp code to be and is not exiting from
+             an exception handler.  That pretty much means that it's
+             on its way into an exception handler; we have to handshake
+             until it enters an exception-wait state. */
+          /* There are likely race conditions here */
+          SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
+          ResumeThread(hthread);
+          SEM_WAIT_FOREVER(tcr->suspend);
+          SuspendThread(hthread);
+          /* The thread is either waiting for its resume semaphore to
+             be signaled or is about to wait.  Signal it now, while
+             the thread's suspended. */
+          SEM_RAISE(tcr->resume);
+          pcontext->ContextFlags = CONTEXT_ALL;
+          GetThreadContext(hthread, pcontext);
+        }
+      }
+#if 0
+    } else {
+      if (tcr->valence == TCR_STATE_EXCEPTION_RETURN) {
+        if (!tcr->pending_exception_context) {
+          FBug(pcontext, "we're confused here.");
+        }
+        *pcontext = *tcr->pending_exception_context;
+        tcr->pending_exception_context = NULL;
+        tcr->valence = TCR_STATE_LISP;
+      }
+#endif
+    }
+    tcr->suspend_context = pcontext;
+    return true;
+  }
+  return false;
+}
+#else
+Boolean
+suspend_tcr(TCR *tcr)
+{
+  int suspend_count = atomic_incf(&(tcr->suspend_count));
+  pthread_t thread;
+  if (suspend_count == 1) {
+    thread = (pthread_t)(tcr->osid);
+    if ((thread != (pthread_t) 0) &&
+        (pthread_kill(thread, thread_suspend_signal) == 0)) {
+      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
+    } else {
+      /* A problem using pthread_kill.  On Darwin, this can happen
+	 if the thread has had its signal mask surgically removed
+	 by pthread_exit.  If the native (Mach) thread can be suspended,
+	 do that and return true; otherwise, flag the tcr as belonging
+	 to a dead thread by setting tcr->osid to 0.
+      */
+      tcr->osid = 0;
+      return false;
+    }
+    return true;
+  }
+  return false;
+}
+#endif
+
+#ifdef WINDOWS
+Boolean
+tcr_suspend_ack(TCR *tcr)
+{
+  return true;
+}
+#else
+Boolean
+tcr_suspend_ack(TCR *tcr)
+{
+  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
+    SEM_WAIT_FOREVER(tcr->suspend);
+    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
+  }
+  return true;
+}
+#endif
+      
+
+Boolean
+kill_tcr(TCR *tcr)
+{
+  TCR *current = get_tcr(true);
+  Boolean result = false;
+
+  LOCK(lisp_global(TCR_AREA_LOCK),current);
+  {
+    LispObj osid = tcr->osid;
+    
+    if (osid) {
+      result = true;
+#ifdef WINDOWS
+      /* What we really want to de hear is (something like)
+         forcing the thread to run quit_handler().  For now,
+         mark the TCR as dead and kill thw Windows thread. */
+      tcr->osid = 0;
+      if (!TerminateThread((HANDLE)osid, 0)) {
+        result = false;
+      } else {
+        shutdown_thread_tcr(tcr);
+      }
+#else
+      if (pthread_kill((pthread_t)osid,thread_kill_signal)) {
+        result = false;
+      }
+#endif
+    }
+  }
+  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
+  return result;
+}
+
+Boolean
+lisp_suspend_tcr(TCR *tcr)
+{
+  Boolean suspended;
+  TCR *current = get_tcr(true);
+  
+  LOCK(lisp_global(TCR_AREA_LOCK),current);
+  suspended = suspend_tcr(tcr);
+  if (suspended) {
+    while (!tcr_suspend_ack(tcr));
+  }
+  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
+  return suspended;
+}
+	 
+#ifdef WINDOWS
+Boolean
+resume_tcr(TCR *tcr)
+{
+  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
+  DWORD rc;
+  if (suspend_count == 0) {
+    CONTEXT *context = tcr->suspend_context;
+    HANDLE hthread = (HANDLE)(tcr->osid);
+
+    if (context) {
+      context->ContextFlags = CONTEXT_ALL;
+      tcr->suspend_context = NULL;
+      SetThreadContext(hthread,context);
+      rc = ResumeThread(hthread);
+      if (rc == -1) {
+        wperror("ResumeThread");
+        return false;
+      }
+      return true;
+    }
+  }
+  return false;
+}   
+#else
+Boolean
+resume_tcr(TCR *tcr)
+{
+  int suspend_count = atomic_decf(&(tcr->suspend_count));
+  if (suspend_count == 0) {
+    void *s = (tcr->resume);
+    if (s != NULL) {
+      SEM_RAISE(s);
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+    
+
+
+Boolean
+lisp_resume_tcr(TCR *tcr)
+{
+  Boolean resumed;
+  TCR *current = get_tcr(true);
+  
+  LOCK(lisp_global(TCR_AREA_LOCK),current);
+  resumed = resume_tcr(tcr);
+  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
+  return resumed;
+}
+
+
+TCR *freed_tcrs = NULL;
+
+void
+enqueue_freed_tcr (TCR *tcr)
+{
+#ifndef HAVE_TLS
+  tcr->next = freed_tcrs;
+  freed_tcrs = tcr;
+#endif
+}
+
+/* It's not clear that we can safely condemn a dead tcr's areas, since
+   we may not be able to call free() if a suspended thread owns a 
+   malloc lock. At least make the areas appear to be empty. 
+*/
+   
+
+void
+normalize_dead_tcr_areas(TCR *tcr)
+{
+  area *a;
+
+  a = tcr->vs_area;
+  if (a) {
+    a->active = a->high;
+  }
+
+  a = tcr->ts_area;
+  if (a) {
+    a->active = a->high;
+  }
+
+  a = tcr->cs_area;
+  if (a) {
+    a->active = a->high;
+  }
+}
+    
+void
+free_freed_tcrs ()
+{
+  TCR *current, *next;
+
+  for (current = freed_tcrs; current; current = next) {
+    next = current->next;
+#ifndef HAVE_TLS
+#ifdef WIN32
+    free(current->allocated);
+#else
+    free(current);
+#endif
+#endif
+  }
+  freed_tcrs = NULL;
+}
+
+void
+suspend_other_threads(Boolean for_gc)
+{
+  TCR *current = get_tcr(true), *other, *next;
+  int dead_tcr_count = 0;
+  Boolean all_acked;
+
+  LOCK(lisp_global(TCR_AREA_LOCK), current);
+  for (other = current->next; other != current; other = other->next) {
+    if ((other->osid != 0)) {
+      suspend_tcr(other);
+      if (other->osid == 0) {
+	dead_tcr_count++;
+      }
+    } else {
+      dead_tcr_count++;
+    }
+  }
+
+  do {
+    all_acked = true;
+    for (other = current->next; other != current; other = other->next) {
+      if ((other->osid != 0)) {
+        if (!tcr_suspend_ack(other)) {
+          all_acked = false;
+        }
+      }
+    }
+  } while(! all_acked);
+
+      
+
+  /* All other threads are suspended; can safely delete dead tcrs now */
+  if (dead_tcr_count) {
+    for (other = current->next; other != current; other = next) {
+      next = other->next;
+      if ((other->osid == 0))  {
+        normalize_dead_tcr_areas(other);
+	dequeue_tcr(other);
+	enqueue_freed_tcr(other);
+      }
+    }
+  }
+}
+
+void
+lisp_suspend_other_threads()
+{
+  suspend_other_threads(false);
+}
+
+void
+resume_other_threads(Boolean for_gc)
+{
+  TCR *current = get_tcr(true), *other;
+  for (other = current->next; other != current; other = other->next) {
+    if ((other->osid != 0)) {
+      resume_tcr(other);
+    }
+  }
+  free_freed_tcrs();
+  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
+}
+
+void
+lisp_resume_other_threads()
+{
+  resume_other_threads(false);
+}
+
+
+
+rwlock *
+rwlock_new()
+{
+  extern int cache_block_size;
+
+  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
+  rwlock *rw = NULL;;
+  
+  if (p) {
+    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
+    rw->malloced_ptr = p;
+#ifndef USE_FUTEX
+    rw->reader_signal = new_semaphore(0);
+    rw->writer_signal = new_semaphore(0);
+    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
+      if (rw->reader_signal) {
+        destroy_semaphore(&(rw->reader_signal));
+      } else {
+        destroy_semaphore(&(rw->writer_signal));
+      }
+      free(rw);
+      rw = NULL;
+    }
+#endif
+  }
+  return rw;
+}
+
+     
+/*
+  Try to get read access to a multiple-readers/single-writer lock.  If
+  we already have read access, return success (indicating that the
+  lock is held another time.  If we already have write access to the
+  lock ... that won't work; return EDEADLK.  Wait until no other
+  thread has or is waiting for write access, then indicate that we
+  hold read access once.
+*/
+#ifndef USE_FUTEX
+int
+rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
+{
+  int err = 0;
+  
+  LOCK_SPINLOCK(rw->spin, tcr);
+
+  if (rw->writer == tcr) {
+    RELEASE_SPINLOCK(rw->spin);
+    return EDEADLK;
+  }
+
+  while (rw->blocked_writers || (rw->state > 0)) {
+    rw->blocked_readers++;
+    RELEASE_SPINLOCK(rw->spin);
+    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
+    LOCK_SPINLOCK(rw->spin,tcr);
+    rw->blocked_readers--;
+    if (err == EINTR) {
+      err = 0;
+    }
+    if (err) {
+      RELEASE_SPINLOCK(rw->spin);
+      return err;
+    }
+  }
+  rw->state--;
+  RELEASE_SPINLOCK(rw->spin);
+  return err;
+}
+#else
+int
+rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
+{
+  natural waitval;
+
+  lock_futex(&rw->spin);
+
+  if (rw->writer == tcr) {
+    unlock_futex(&rw->spin);
+    return EDEADLOCK;
+  }
+  while (1) {
+    if (rw->writer == NULL) {
+      --rw->state;
+      unlock_futex(&rw->spin);
+      return 0;
+    }
+    rw->blocked_readers++;
+    waitval = rw->reader_signal;
+    unlock_futex(&rw->spin);
+    futex_wait(&rw->reader_signal,waitval);
+    lock_futex(&rw->spin);
+    rw->blocked_readers--;
+  }
+  return 0;
+}
+#endif   
+
+
+/*
+  Try to obtain write access to the lock.
+  It is an error if we already have read access, but it's hard to
+  detect that.
+  If we already have write access, increment the count that indicates
+  that.
+  Otherwise, wait until the lock is not held for reading or writing,
+  then assert write access.
+*/
+
+#ifndef USE_FUTEX
+int
+rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
+{
+  int err = 0;
+
+  LOCK_SPINLOCK(rw->spin,tcr);
+  if (rw->writer == tcr) {
+    rw->state++;
+    RELEASE_SPINLOCK(rw->spin);
+    return 0;
+  }
+
+  while (rw->state != 0) {
+    rw->blocked_writers++;
+    RELEASE_SPINLOCK(rw->spin);
+    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
+    LOCK_SPINLOCK(rw->spin,tcr);
+    rw->blocked_writers--;
+    if (err == EINTR) {
+      err = 0;
+    }
+    if (err) {
+      RELEASE_SPINLOCK(rw->spin);
+      return err;
+    }
+  }
+  rw->state = 1;
+  rw->writer = tcr;
+  RELEASE_SPINLOCK(rw->spin);
+  return err;
+}
+
+#else
+int
+rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
+{
+  int err = 0;
+  natural waitval;
+
+  lock_futex(&rw->spin);
+  if (rw->writer == tcr) {
+    rw->state++;
+    unlock_futex(&rw->spin);
+    return 0;
+  }
+
+  while (rw->state != 0) {
+    rw->blocked_writers++;
+    waitval = rw->writer_signal;
+    unlock_futex(&rw->spin);
+    futex_wait(&rw->writer_signal,waitval);
+    lock_futex(&rw->spin);
+    rw->blocked_writers--;
+  }
+  rw->state = 1;
+  rw->writer = tcr;
+  unlock_futex(&rw->spin);
+  return err;
+}
+#endif
+
+/*
+  Sort of the same as above, only return EBUSY if we'd have to wait.
+*/
+#ifndef USE_FUTEX
+int
+rwlock_try_wlock(rwlock *rw, TCR *tcr)
+{
+  int ret = EBUSY;
+
+  LOCK_SPINLOCK(rw->spin,tcr);
+  if (rw->writer == tcr) {
+    rw->state++;
+    ret = 0;
+  } else {
+    if (rw->state == 0) {
+      rw->writer = tcr;
+      rw->state = 1;
+      ret = 0;
+    }
+  }
+  RELEASE_SPINLOCK(rw->spin);
+  return ret;
+}
+#else
+int
+rwlock_try_wlock(rwlock *rw, TCR *tcr)
+{
+  int ret = EBUSY;
+
+  lock_futex(&rw->spin);
+  if (rw->writer == tcr) {
+    rw->state++;
+    ret = 0;
+  } else {
+    if (rw->state == 0) {
+      rw->writer = tcr;
+      rw->state = 1;
+      ret = 0;
+    }
+  }
+  unlock_futex(&rw->spin);
+  return ret;
+}
+#endif
+
+#ifndef USE_FUTEX
+int
+rwlock_try_rlock(rwlock *rw, TCR *tcr)
+{
+  int ret = EBUSY;
+
+  LOCK_SPINLOCK(rw->spin,tcr);
+  if (rw->state <= 0) {
+    --rw->state;
+    ret = 0;
+  }
+  RELEASE_SPINLOCK(rw->spin);
+  return ret;
+}
+#else
+int
+rwlock_try_rlock(rwlock *rw, TCR *tcr)
+{
+  int ret = EBUSY;
+
+  lock_futex(&rw->spin);
+  if (rw->state <= 0) {
+    --rw->state;
+    ret = 0;
+  }
+  unlock_futex(&rw->spin);
+  return ret;
+}
+#endif
+
+
+
+#ifndef USE_FUTEX
+int
+rwlock_unlock(rwlock *rw, TCR *tcr)
+{
+
+  int err = 0;
+  natural blocked_readers = 0;
+
+  LOCK_SPINLOCK(rw->spin,tcr);
+  if (rw->state > 0) {
+    if (rw->writer != tcr) {
+      err = EINVAL;
+    } else {
+      --rw->state;
+      if (rw->state == 0) {
+        rw->writer = NULL;
+      }
+    }
+  } else {
+    if (rw->state < 0) {
+      ++rw->state;
+    } else {
+      err = EINVAL;
+    }
+  }
+  if (err) {
+    RELEASE_SPINLOCK(rw->spin);
+    return err;
+  }
+  
+  if (rw->state == 0) {
+    if (rw->blocked_writers) {
+      SEM_RAISE(rw->writer_signal);
+    } else {
+      blocked_readers = rw->blocked_readers;
+      if (blocked_readers) {
+        SEM_BROADCAST(rw->reader_signal, blocked_readers);
+      }
+    }
+  }
+  RELEASE_SPINLOCK(rw->spin);
+  return 0;
+}
+#else
+int
+rwlock_unlock(rwlock *rw, TCR *tcr)
+{
+
+  int err = 0;
+
+  lock_futex(&rw->spin);
+  if (rw->state > 0) {
+    if (rw->writer != tcr) {
+      err = EINVAL;
+    } else {
+      --rw->state;
+      if (rw->state == 0) {
+        rw->writer = NULL;
+      }
+    }
+  } else {
+    if (rw->state < 0) {
+      ++rw->state;
+    } else {
+      err = EINVAL;
+    }
+  }
+  if (err) {
+    unlock_futex(&rw->spin);
+    return err;
+  }
+  
+  if (rw->state == 0) {
+    if (rw->blocked_writers) {
+      ++rw->writer_signal;
+      unlock_futex(&rw->spin);
+      futex_wake(&rw->writer_signal,1);
+      return 0;
+    }
+    if (rw->blocked_readers) {
+      ++rw->reader_signal;
+      unlock_futex(&rw->spin);
+      futex_wake(&rw->reader_signal, INT_MAX);
+      return 0;
+    }
+  }
+  unlock_futex(&rw->spin);
+  return 0;
+}
+#endif
+
+        
+void
+rwlock_destroy(rwlock *rw)
+{
+#ifndef USE_FUTEX
+  destroy_semaphore((void **)&rw->reader_signal);
+  destroy_semaphore((void **)&rw->writer_signal);
+#endif
+  postGCfree((void *)(rw->malloced_ptr));
+}
+
+
+
Index: /branches/arm/lisp-kernel/unix-calls.c
===================================================================
--- /branches/arm/lisp-kernel/unix-calls.c	(revision 13357)
+++ /branches/arm/lisp-kernel/unix-calls.c	(revision 13357)
@@ -0,0 +1,143 @@
+/*
+   Copyright (C) 2008-2009, Clozure Associates and contributors
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+/* Provide wrappers around some standard C library functions that
+   can't easily be called from CCL's FFI for some reason (or where
+   we want to override/extend the function's default behavior.)
+ 
+   Functions in this file should be referenced via the kernel
+   imports table.
+
+   Callers should generally expect standard C library error-handling
+   conventions (e.g., return -1 or NULL and set errno on error.)
+*/
+
+#define _LARGEFILE64_SOURCE
+#include <errno.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <stdint.h>
+#include <signal.h>
+
+ssize_t
+lisp_read(int fd, void *buf, size_t count)
+{
+  return read(fd,buf,count);
+}
+
+ssize_t
+lisp_write(int fd, void *buf, size_t count)
+{
+  return write(fd,buf,count);
+}
+
+int
+lisp_open(char *path, int flags, mode_t mode)
+{
+  return open(path,flags,mode);
+}
+
+int
+lisp_fchmod(int fd, mode_t mode)
+{
+  return fchmod(fd,mode);
+}
+
+int64_t
+lisp_lseek(int fd, int64_t offset, int whence)
+{
+#ifdef LINUX
+  return lseek64(fd,offset,whence);
+#else
+  return lseek(fd,offset,whence);
+#endif
+}
+
+int
+lisp_close(int fd)
+{
+  return close(fd);
+}
+
+int
+lisp_ftruncate(int fd, off_t length)
+{
+  return ftruncate(fd,length);
+}
+
+int
+lisp_stat(char *path, void *buf)
+{
+  return stat(path,buf);
+}
+
+int
+lisp_fstat(int fd, void *buf)
+{
+  return fstat(fd,buf);
+}
+
+
+int
+lisp_futex(int *uaddr, int op, int val, void *timeout, int *uaddr2, int val3)
+{
+#ifdef LINUX
+  return syscall(SYS_futex,uaddr,op,val,timeout,uaddr2,val3);
+#else
+  errno = ENOSYS;
+  return -1;
+#endif
+}
+
+DIR *
+lisp_opendir(char *path)
+{
+  return opendir(path);
+}
+
+struct dirent *
+lisp_readdir(DIR *dir)
+{
+  return readdir(dir);
+}
+
+int
+lisp_closedir(DIR *dir)
+{
+  return closedir(dir);
+}
+
+int
+lisp_pipe(int pipefd[2])
+{
+  return pipe(pipefd);
+}
+
+int
+lisp_gettimeofday(struct timeval *tp, void *tzp)
+{
+  return gettimeofday(tp, tzp);
+}
+
+int
+lisp_sigexit(int signum)
+{
+  signal(signum, SIG_DFL);
+  return kill(getpid(), signum);
+}
Index: /branches/arm/lisp-kernel/win32/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/win32/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/win32/.gdbinit	(revision 13357)
@@ -0,0 +1,51 @@
+directory lisp-kernel
+
+define pl
+  call print_lisp_object($arg0)
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x3001
+   set $car = *((LispObj *)($l+3))
+   set $l =  *((LispObj *)($l-1))
+   pl $car
+  end
+end
+
+
+define fn
+  pl $edi
+end
+
+define arg_y
+ pl $esi
+end
+
+define arg_z
+ pl $ebx
+end
+
+define offset
+ p (int)$pc-$edi
+end
+
+
+break Bug
+break FBug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIGUSR1 pass nostop noprint
+handle SIGUSR2 pass nostop noprint
+handle SIGEMT pass nostop noprint
+# Work around apparent Apple GDB bug
+handle SIGTTIN nopass nostop noprint
+# Work around Leopard bug du jour
+handle SIGSYS pass nostop noprint
+
Index: /branches/arm/lisp-kernel/win32/Makefile
===================================================================
--- /branches/arm/lisp-kernel/win32/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/win32/Makefile	(revision 13357)
@@ -0,0 +1,112 @@
+#
+#   Copyright (C) 2008 Clozure Associates
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ../
+RM = /bin/rm
+CC = gcc
+AS = as
+M4 = m4
+LD = ld
+ASFLAGS = -g --32
+M4FLAGS = -DWIN_32 -DWINDOWS -DX86 -DX8632 -DWIN32_ES_HACK
+CDEFINES = -DWIN_32 -DWINDOWS -D_REENTRANT -DX86 -DX8632 -D_GNU_SOURCE  -D__MSVCRT__ -D__MSVCRT_VERSION__=0x700 -D_WIN32_WINNT=0x0502 -DWIN32_ES_HACK
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+# If the linker supports a "--hash-style=" option, use traditional
+# SysV hash tables.  (If it doesn't support that option, assume
+# that traditional hash tables will be used by default.)
+ld_has_hash_style = $(shell $(LD) --help | grep "hash-style=")
+ifeq ($(ld_has_hash_style),)
+HASH_STYLE=
+else
+HASH_STYLE="-Wl,--hash-style=sysv"
+endif
+
+CRT2 = $(shell $(CC) -mno-cygwin -print-file-name=crt2.o)
+
+# There may be some confusion about whether or not C symbols have
+# leading underscores or not.  The assembler sources seem to
+# expect them to and mingw import libs seem to use them, but
+# it's not clear whether or not native win64 libraries use this
+# convention (and I'm not sure whether the Cygwin-hosted win64
+# toolchain behaves the same way as when hosted on Linux ...
+# The compiler default seems to be to use them; if we want to
+# suppress their use, uncomment the following:
+
+SUPPRESS_UNDERSCORES=#-fno-leading-underscore
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) ${SUPPRESS_UNDERSCORES} -mno-cygwin -o $@
+
+# order matters: x86-spjump32.o must be first.
+SPOBJ = x86-spjump32.o x86-spentry32.o x86-subprims32.o
+ASMOBJ = x86-asmutils32.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o windows-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils32.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants32.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants32.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../wx86cl.exe
+
+# Order matters: libs that provide definitions must follow libs that
+# reference them.  (It's legal to use -lfoo multiple times to try to
+# work around this.)
+LIBGCC = $(shell $(CC) -mno-cygwin -print-libgcc-file-name)
+OSLIBS =  -L/usr/lib/w32api -L/mingw/lib -L/usr/lib/mingw\
+	-lm -lpsapi -lws2_32 -lmingw32 $(LIBGCC) -lmoldname -lmingwex \
+	-lmsvcrt -luser32 -lkernel32 -ladvapi32 -lshell32  $(GCCLIB) \
+	-lmoldname -lmingwex -lmsvcrt
+
+
+
+IMAGE_BASE =--image-base=0x10000
+
+../../wx86cl.exe: $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile
+	ld -o ../../wx86cl.exe  $(IMAGE_BASE) --enable-auto-import \
+	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(CRT2) $(OSLIBS) $(LIBGCC) $(LATELIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../wx86cl.exe
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../wx86cl.exe
+	strip -g ../../wx86cl.exe
Index: /branches/arm/lisp-kernel/win32/pei-ia32.x
===================================================================
--- /branches/arm/lisp-kernel/win32/pei-ia32.x	(revision 13357)
+++ /branches/arm/lisp-kernel/win32/pei-ia32.x	(revision 13357)
@@ -0,0 +1,205 @@
+OUTPUT_FORMAT(pei-i386)
+SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SECTIONS
+{
+  . = SIZEOF_HEADERS;
+  . = ALIGN(__section_alignment__);
+  .spfoo  __image_base__ + __section_alignment__ :
+  {
+    __spfoo_start__ = . ;
+    . = __spfoo_start__ + 0x10000 ;
+    __spfoo_end__ = . ;
+  }
+  .text  BLOCK(__section_alignment__) :
+  {
+     *(.init)
+    *(.text)
+    *(SORT(.text$*))
+    *(.glue_7t)
+    *(.glue_7)
+     ___CTOR_LIST__ = .; __CTOR_LIST__ = . ;
+			LONG (-1);*(.ctors); *(.ctor); *(SORT(.ctors.*));  LONG (0);
+     ___DTOR_LIST__ = .; __DTOR_LIST__ = . ;
+			LONG (-1); *(.dtors); *(.dtor); *(SORT(.dtors.*));  LONG (0);
+     *(.fini)
+    /* ??? Why is .gcc_exc here?  */
+     *(.gcc_exc)
+    PROVIDE (etext = .);
+     *(.gcc_except_table)
+  }
+  /* The Cygwin32 library uses a section to avoid copying certain data
+     on fork.  This used to be named ".data".  The linker used
+     to include this between __data_start__ and __data_end__, but that
+     breaks building the cygwin32 dll.  Instead, we name the section
+     ".data_cygwin_nocopy" and explictly include it after __data_end__. */
+  .data BLOCK(__section_alignment__) :
+  {
+    __data_start__ = . ;
+    *(.data)
+    *(.data2)
+    *(SORT(.data$*))
+    *(.jcr)
+    __data_end__ = . ;
+    *(.data_cygwin_nocopy)
+  }
+  .rdata BLOCK(__section_alignment__) :
+  {
+    *(.rdata)
+             *(SORT(.rdata$*))
+     *(.eh_frame)
+    ___RUNTIME_PSEUDO_RELOC_LIST__ = .;
+    __RUNTIME_PSEUDO_RELOC_LIST__ = .;
+    *(.rdata_runtime_pseudo_reloc)
+    ___RUNTIME_PSEUDO_RELOC_LIST_END__ = .;
+    __RUNTIME_PSEUDO_RELOC_LIST_END__ = .;
+  }
+  .pdata BLOCK(__section_alignment__) :
+  {
+    *(.pdata)
+  }
+  .bss BLOCK(__section_alignment__) :
+  {
+    __bss_start__ = . ;
+    *(.bss)
+    *(COMMON)
+    __bss_end__ = . ;
+  }
+  .edata BLOCK(__section_alignment__) :
+  {
+    *(.edata)
+  }
+  /DISCARD/ :
+  {
+    *(.debug$S)
+    *(.debug$T)
+    *(.debug$F)
+    *(.drectve)
+  }
+  .idata BLOCK(__section_alignment__) :
+  {
+    /* This cannot currently be handled with grouped sections.
+	See pe.em:sort_sections.  */
+    SORT(*)(.idata$2)
+    SORT(*)(.idata$3)
+    /* These zeroes mark the end of the import list.  */
+    LONG (0); LONG (0); LONG (0); LONG (0); LONG (0);
+    SORT(*)(.idata$4)
+    SORT(*)(.idata$5)
+    SORT(*)(.idata$6)
+    SORT(*)(.idata$7)
+  }
+  .CRT BLOCK(__section_alignment__) :
+  {
+    ___crt_xc_start__ = . ;
+    *(SORT(.CRT$XC*))  /* C initialization */
+    ___crt_xc_end__ = . ;
+    ___crt_xi_start__ = . ;
+    *(SORT(.CRT$XI*))  /* C++ initialization */
+    ___crt_xi_end__ = . ;
+    ___crt_xl_start__ = . ;
+    *(SORT(.CRT$XL*))  /* TLS callbacks */
+    /* ___crt_xl_end__ is defined in the TLS Directory support code */
+    ___crt_xp_start__ = . ;
+    *(SORT(.CRT$XP*))  /* Pre-termination */
+    ___crt_xp_end__ = . ;
+    ___crt_xt_start__ = . ;
+    *(SORT(.CRT$XT*))  /* Termination */
+    ___crt_xt_end__ = . ;
+  }
+  .tls BLOCK(__section_alignment__) :
+  {
+    ___tls_start__ = . ;
+    *(.tls)
+    *(.tls$)
+    *(SORT(.tls$*))
+    ___tls_end__ = . ;
+  }
+  .endjunk BLOCK(__section_alignment__) :
+  {
+    /* end is deprecated, don't use it */
+    PROVIDE (end = .);
+    PROVIDE ( _end = .);
+     __end__ = .;
+  }
+  .rsrc BLOCK(__section_alignment__) :
+  {
+    *(.rsrc)
+    *(SORT(.rsrc$*))
+  }
+  .reloc BLOCK(__section_alignment__) :
+  {
+    *(.reloc)
+  }
+  .stab BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.stab)
+  }
+  .stabstr BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.stabstr)
+  }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section.  Unlike other targets that fake this by putting the
+     section VMA at 0, the PE format will not allow it.  */
+  /* DWARF 1.1 and DWARF 2.  */
+  .debug_aranges BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_aranges)
+  }
+  .debug_pubnames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_pubnames)
+  }
+  /* DWARF 2.  */
+  .debug_info BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_info) *(.gnu.linkonce.wi.*)
+  }
+  .debug_abbrev BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_abbrev)
+  }
+  .debug_line BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_line)
+  }
+  .debug_frame BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_frame)
+  }
+  .debug_str BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_str)
+  }
+  .debug_loc BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_loc)
+  }
+  .debug_macinfo BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_macinfo)
+  }
+  /* SGI/MIPS DWARF 2 extensions.  */
+  .debug_weaknames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_weaknames)
+  }
+  .debug_funcnames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_funcnames)
+  }
+  .debug_typenames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_typenames)
+  }
+  .debug_varnames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_varnames)
+  }
+  /* DWARF 3.  */
+  .debug_ranges BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_ranges)
+  }
+}
Index: /branches/arm/lisp-kernel/win64/.gdbinit
===================================================================
--- /branches/arm/lisp-kernel/win64/.gdbinit	(revision 13357)
+++ /branches/arm/lisp-kernel/win64/.gdbinit	(revision 13357)
@@ -0,0 +1,85 @@
+directory lisp-kernel
+
+define x86_lisp_string
+x/s $arg0-5
+end
+
+define gtra
+br *$r10
+cont
+end
+
+define x86pname
+set $temp=*((long *)((long)($arg0-6)))
+x86_lisp_string $temp
+end
+
+
+define pname
+ x86pname $arg0
+end
+
+define l
+ call print_lisp_object($arg0)
+end
+
+define lw
+ l $r13
+end
+
+define clobber_breakpoint
+  set *(short *)($pc-2)=0x9090
+end
+
+define arg_z
+ l $rsi
+end
+
+define arg_y
+ l $rdi
+end
+
+define arg_x
+ l $r8
+end
+
+define bx
+ l $rbx
+end
+
+define showlist
+  set $l=$arg0
+  while $l != 0x200b
+   set $car = *((LispObj *)($l+5))
+   set $l =  *((LispObj *)($l-3))
+   l $car
+  end
+end
+
+define lbt
+ call plbt_sp($rbp)
+end
+
+define ada
+ p/x *(all_areas->succ)
+end
+
+define lregs
+ call debug_lisp_registers($arg0,0,0)
+end
+
+break Bug
+
+display/i $pc
+
+handle SIGKILL pass nostop noprint
+handle SIGILL pass nostop noprint
+handle SIGSEGV pass nostop noprint
+handle SIGBUS pass nostop noprint
+handle SIGFPE pass nostop noprint
+handle SIG40 pass nostop noprint
+handle SIG41 pass nostop noprint
+handle SIG42 pass nostop noprint
+handle SIGPWR pass nostop noprint
+handle SIGQUIT pass nostop noprint
+
Index: /branches/arm/lisp-kernel/win64/Makefile
===================================================================
--- /branches/arm/lisp-kernel/win64/Makefile	(revision 13357)
+++ /branches/arm/lisp-kernel/win64/Makefile	(revision 13357)
@@ -0,0 +1,102 @@
+#
+#   Copyright (C) 2007 Clozure Associates
+#   This file is part of Clozure CL.  
+#
+#   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with Clozure CL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with Clozure CL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   Clozure CL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+VPATH = ../
+RM = /bin/rm
+# gcc64, as64: until there's a real win64 gcc, assume that gcc and gas
+# are installed under these names
+CC = x86_64-w64-mingw32-gcc
+AS = x86_64-w64-mingw32-as
+M4 = m4
+LD = x86_64-w64-mingw32-ld
+ASFLAGS = -g --64
+M4FLAGS = -DWIN_64 -DWINDOWS -DX86 -DX8664 -DHAVE_TLS -DEMUTLS -DTCR_IN_GPR
+CDEFINES = -DWIN_64 -DWINDOWS -D_REENTRANT -DX86 -DX8664 -D_GNU_SOURCE -DHAVE_TLS -DEMUTLS -DTCR_IN_GPR
+CDEBUG = -g
+COPT = -O2
+# Once in a while, -Wformat says something useful.  The odds are against that,
+# however.
+WFORMAT = -Wno-format
+
+
+# If the linker supports a "--hash-style=" option, use traditional
+# SysV hash tables.  (If it doesn't support that option, assume
+# that traditional hash tables will be used by default.)
+ld_has_hash_style = $(shell $(LD) --help | grep "hash-style=")
+ifeq ($(ld_has_hash_style),)
+HASH_STYLE=
+else
+HASH_STYLE="-Wl,--hash-style=sysv"
+endif
+
+# There may be some confusion about whether or not C symbols have
+# leading underscores or not.  The assembler sources seem to
+# expect them to and mingw import libs seem to use them, but
+# it's not clear whether or not native win64 libraries use this
+# convention (and I'm not sure whether the Cygwin-hosted win64
+# toolchain behaves the same was as when hosted on Linux ...
+# The compiler default seems to be to use them; if we want to
+# suppress their use, uncomment the following:
+
+SUPPRESS_UNDERSCORES=#-fno-leading-underscore
+
+.s.o:
+	$(M4) $(M4FLAGS) -I../ $< | $(AS)  $(ASFLAGS) -o $@
+.c.o:
+	$(CC) -c $< $(CDEFINES) $(CDEBUG) $(COPT) $(WFORMAT) ${SUPPRESS_UNDERSCORES} -m64 -o $@
+
+SPOBJ = pad.o x86-spjump64.o x86-spentry64.o x86-subprims64.o
+ASMOBJ = x86-asmutils64.o imports.o
+
+COBJ  = pmcl-kernel.o gc-common.o x86-gc.o bits.o  x86-exceptions.o \
+	image.o thread_manager.o lisp-debug.o memory.o windows-calls.o
+
+DEBUGOBJ = lispdcmd.o plprint.o plsym.o xlbt.o x86_print.o
+KERNELOBJ= $(COBJ) x86-asmutils64.o  imports.o
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants64.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants64.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	../../wx86cl64.exe
+
+
+OSLIBS = -lpsapi -lws2_32
+
+
+../../wx86cl64.exe:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile pei-x86-64.x
+	$(CC) -Wl,--image-base=0x10000 -Wl,-script=pei-x86-64.x -m64 $(CDEBUG)  -Wl,--export-dynamic $(HASH_STYLE) -o $@ $(USE_LINK_MAP) $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) -f $(KERNELOBJ) $(DEBUGOBJ) ../../wx86cl64.exe
+
+clean:	cclean
+	$(RM) -f $(SPOBJ)
+
+strip:	../../wx86cl64.exe
+	strip -g ../../wx86cl64.exe
Index: /branches/arm/lisp-kernel/win64/Makefile.nmake
===================================================================
--- /branches/arm/lisp-kernel/win64/Makefile.nmake	(revision 13357)
+++ /branches/arm/lisp-kernel/win64/Makefile.nmake	(revision 13357)
@@ -0,0 +1,83 @@
+#
+#   Copyright (C) 2008 Clozure Associates
+#   This file is part of OpenMCL.  
+#
+#   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
+#   License , known as the LLGPL and distributed with OpenMCL as the
+#   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+#   which is distributed with OpenMCL as the file "LGPL".  Where these
+#   conflict, the preamble takes precedence.  
+#
+#   OpenMCL is referenced in the preamble as the "LIBRARY."
+#
+#   The LLGPL is also available online at
+#   http://opensource.franz.com/preamble.html
+
+
+# With nmake, VPATH is broken.  Invoke from the directory above:
+#
+# Z:\ccl-source\lisp-kernel> nmake /f win64/Makefile.nmake
+
+RM = del
+CC = cl
+M4 = m4
+MV = move
+LD = link
+ASFLAGS = -f win64 -g cv8 -p gas
+AS = yasm
+M4FLAGS = -DWIN64 -DWINDOWS -DX86 -DX8664 -DHAVE_TLS -DEMUTLS
+CDEFINES = /I.. /DWIN64 /DWINDOWS /D_REENTRANT /DX86 /DX8664 /D_GNU_SOURCE /DHAVE_TLS /DEMUTLS /DVC #-DDISABLE_EGC
+CDEBUG = /Zi
+COPT =
+LDFLAGS =
+
+.SUFFIXES : .exe .obj .c .asm .s
+
+.s.asm:
+	$(M4) $(M4FLAGS) -I. $< > $@.temp && mv $@.temp $@
+.asm.obj:
+	$(AS) $(ASFLAGS) -o $@ $<
+.c.obj:
+	$(CC) /c $< $(CDEFINES) $(CDEBUG) $(COPT)
+
+SPOBJ = pad.obj x86-spjump64.obj x86-spentry64.obj x86-subprims64.obj
+ASMOBJ = x86-asmutils64.obj imports.obj
+
+COBJ  = pmcl-kernel.obj gc-common.obj x86-gc.obj bits.obj  x86-exceptions.obj \
+	image.obj thread_manager.obj lisp-debug.obj memory.obj
+
+DEBUGOBJ = lispdcmd.obj plprint.obj plsym.obj xlbt.obj x86_print.obj
+KERNELOBJ= $(COBJ) x86-asmutils64.obj  imports.obj
+
+SPINC =	lisp.s m4macros.m4 x86-constants.s x86-macros.s errors.s x86-uuo.s \
+	x86-constants64.s
+
+CHEADERS = area.h bits.h x86-constants.h lisp-errors.h gc.h lisp.h \
+	lisp-exceptions.h lisp_globals.h macros.h memprotect.h image.h \
+	Threads.h x86-constants64.h x86-exceptions.h lisptypes.h
+
+
+KSPOBJ = $(SPOBJ)
+all:	..\wx86cl64
+
+OSLIBS = bufferoverflowu.lib
+
+..\wx86cl64:	$(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) Makefile 
+	$(LD) $(LDFLAGS) /out:$@ $(KSPOBJ) $(KERNELOBJ) $(DEBUGOBJ) $(OSLIBS)
+
+cobjs: $(COBJ)
+
+$(SPOBJ): $(SPINC)
+$(ASMOBJ): $(SPINC)
+$(COBJ): $(CHEADERS)
+$(DEBUGOBJ): $(CHEADERS) lispdcmd.h
+
+
+cclean:
+	$(RM) $(KERNELOBJ) $(DEBUGOBJ) ../../wx86cl64
+
+clean:	cclean
+	$(RM) $(SPOBJ)
+
+strip:	../../wx86cl64
+	strip -g ../../wx86cl64
Index: /branches/arm/lisp-kernel/win64/pei-x86-64.x
===================================================================
--- /branches/arm/lisp-kernel/win64/pei-x86-64.x	(revision 13357)
+++ /branches/arm/lisp-kernel/win64/pei-x86-64.x	(revision 13357)
@@ -0,0 +1,205 @@
+OUTPUT_FORMAT(pei-x86-64)
+SEARCH_DIR("=/usr/local/lib"); SEARCH_DIR("=/lib"); SEARCH_DIR("=/usr/lib");
+SECTIONS
+{
+  . = SIZEOF_HEADERS;
+  . = ALIGN(__section_alignment__);
+  .spfoo  __image_base__ + __section_alignment__ :
+  {
+    __spfoo_start__ = . ;
+    . = __spfoo_start__ + 0x10000 ;
+    __spfoo_end__ = . ;
+  }
+  .text  BLOCK(__section_alignment__) :
+  {
+     *(.init)
+    *(.text)
+    *(SORT(.text$*))
+    *(.glue_7t)
+    *(.glue_7)
+     ___CTOR_LIST__ = .; __CTOR_LIST__ = . ;
+			LONG (-1); LONG (-1);*(.ctors); *(.ctor); *(SORT(.ctors.*));  LONG (0); LONG (0);
+     ___DTOR_LIST__ = .; __DTOR_LIST__ = . ;
+			LONG (-1); LONG (-1); *(.dtors); *(.dtor); *(SORT(.dtors.*));  LONG (0); LONG (0);
+     *(.fini)
+    /* ??? Why is .gcc_exc here?  */
+     *(.gcc_exc)
+    PROVIDE (etext = .);
+     *(.gcc_except_table)
+  }
+  /* The Cygwin32 library uses a section to avoid copying certain data
+     on fork.  This used to be named ".data".  The linker used
+     to include this between __data_start__ and __data_end__, but that
+     breaks building the cygwin32 dll.  Instead, we name the section
+     ".data_cygwin_nocopy" and explictly include it after __data_end__. */
+  .data BLOCK(__section_alignment__) :
+  {
+    __data_start__ = . ;
+    *(.data)
+    *(.data2)
+    *(SORT(.data$*))
+    *(.jcr)
+    __data_end__ = . ;
+    *(.data_cygwin_nocopy)
+  }
+  .rdata BLOCK(__section_alignment__) :
+  {
+    *(.rdata)
+             *(SORT(.rdata$*))
+     *(.eh_frame)
+    ___RUNTIME_PSEUDO_RELOC_LIST__ = .;
+    __RUNTIME_PSEUDO_RELOC_LIST__ = .;
+    *(.rdata_runtime_pseudo_reloc)
+    ___RUNTIME_PSEUDO_RELOC_LIST_END__ = .;
+    __RUNTIME_PSEUDO_RELOC_LIST_END__ = .;
+  }
+  .pdata BLOCK(__section_alignment__) :
+  {
+    *(.pdata)
+  }
+  .bss BLOCK(__section_alignment__) :
+  {
+    __bss_start__ = . ;
+    *(.bss)
+    *(COMMON)
+    __bss_end__ = . ;
+  }
+  .edata BLOCK(__section_alignment__) :
+  {
+    *(.edata)
+  }
+  /DISCARD/ :
+  {
+    *(.debug$S)
+    *(.debug$T)
+    *(.debug$F)
+    *(.drectve)
+  }
+  .idata BLOCK(__section_alignment__) :
+  {
+    /* This cannot currently be handled with grouped sections.
+	See pep.em:sort_sections.  */
+    SORT(*)(.idata$2)
+    SORT(*)(.idata$3)
+    /* These zeroes mark the end of the import list.  */
+    LONG (0); LONG (0); LONG (0); LONG (0); LONG (0);
+    SORT(*)(.idata$4)
+    SORT(*)(.idata$5)
+    SORT(*)(.idata$6)
+    SORT(*)(.idata$7)
+  }
+  .CRT BLOCK(__section_alignment__) :
+  {
+    ___crt_xc_start__ = . ;
+    *(SORT(.CRT$XC*))  /* C initialization */
+    ___crt_xc_end__ = . ;
+    ___crt_xi_start__ = . ;
+    *(SORT(.CRT$XI*))  /* C++ initialization */
+    ___crt_xi_end__ = . ;
+    ___crt_xl_start__ = . ;
+    *(SORT(.CRT$XL*))  /* TLS callbacks */
+    /* ___crt_xl_end__ is defined in the TLS Directory support code */
+    ___crt_xp_start__ = . ;
+    *(SORT(.CRT$XP*))  /* Pre-termination */
+    ___crt_xp_end__ = . ;
+    ___crt_xt_start__ = . ;
+    *(SORT(.CRT$XT*))  /* Termination */
+    ___crt_xt_end__ = . ;
+  }
+  .tls BLOCK(__section_alignment__) :
+  {
+    ___tls_start__ = . ;
+    *(.tls)
+    *(.tls$)
+    *(SORT(.tls$*))
+    ___tls_end__ = . ;
+  }
+  .endjunk BLOCK(__section_alignment__) :
+  {
+    /* end is deprecated, don't use it */
+    PROVIDE (end = .);
+    PROVIDE ( _end = .);
+     __end__ = .;
+  }
+  .rsrc BLOCK(__section_alignment__) :
+  {
+    *(.rsrc)
+    *(SORT(.rsrc$*))
+  }
+  .reloc BLOCK(__section_alignment__) :
+  {
+    *(.reloc)
+  }
+  .stab BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.stab)
+  }
+  .stabstr BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.stabstr)
+  }
+  /* DWARF debug sections.
+     Symbols in the DWARF debugging sections are relative to the beginning
+     of the section.  Unlike other targets that fake this by putting the
+     section VMA at 0, the PE format will not allow it.  */
+  /* DWARF 1.1 and DWARF 2.  */
+  .debug_aranges BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_aranges)
+  }
+  .debug_pubnames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_pubnames)
+  }
+  /* DWARF 2.  */
+  .debug_info BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_info) *(.gnu.linkonce.wi.*)
+  }
+  .debug_abbrev BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_abbrev)
+  }
+  .debug_line BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_line)
+  }
+  .debug_frame BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_frame)
+  }
+  .debug_str BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_str)
+  }
+  .debug_loc BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_loc)
+  }
+  .debug_macinfo BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_macinfo)
+  }
+  /* SGI/MIPS DWARF 2 extensions.  */
+  .debug_weaknames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_weaknames)
+  }
+  .debug_funcnames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_funcnames)
+  }
+  .debug_typenames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_typenames)
+  }
+  .debug_varnames BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_varnames)
+  }
+  /* DWARF 3.  */
+  .debug_ranges BLOCK(__section_alignment__) (NOLOAD) :
+  {
+    *(.debug_ranges)
+  }
+}
Index: /branches/arm/lisp-kernel/win64/yasm-redefinition.patch
===================================================================
--- /branches/arm/lisp-kernel/win64/yasm-redefinition.patch	(revision 13357)
+++ /branches/arm/lisp-kernel/win64/yasm-redefinition.patch	(revision 13357)
@@ -0,0 +1,22 @@
+Index: libyasm/symrec.c
+===================================================================
+--- libyasm/symrec.c	(revision 2037)
++++ libyasm/symrec.c	(working copy)
+@@ -281,10 +281,15 @@
+ yasm_symtab_define_equ(yasm_symtab *symtab, const char *name, yasm_expr *e,
+                        unsigned long line)
+ {
+-    yasm_symrec *rec = symtab_define(symtab, name, SYM_EQU, 1, line);
++    yasm_symrec *rec = yasm_symtab_get(symtab, name);
++	if (rec) {
++		/* redefinition. Emit warning here. */
++	} else {
++		rec = symtab_define(symtab, name, SYM_EQU, 1, line);
++	}
+     if (yasm_error_occurred())
+         return rec;
+-    rec->value.expn = e;
++    rec->value.expn = yasm_expr_simplify(e, 1);
+     rec->status |= YASM_SYM_VALUED;
+     return rec;
+ }
Index: /branches/arm/lisp-kernel/windows-calls.c
===================================================================
--- /branches/arm/lisp-kernel/windows-calls.c	(revision 13357)
+++ /branches/arm/lisp-kernel/windows-calls.c	(revision 13357)
@@ -0,0 +1,1015 @@
+/*
+   Copyright (C) 2008-2009, Clozure Associates and contributors,
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "x86-exceptions.h"
+#include <io.h>
+#include <unistd.h>
+#include <sys/fcntl.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <windows.h>
+#include <psapi.h>
+#include <dirent.h>
+#include <signal.h>
+#undef __argv
+#include <stdio.h>
+#include <math.h>
+
+#ifndef WIN_32
+#define _dosmaperr mingw_dosmaperr
+#else
+void
+_dosmaperr(unsigned long oserrno)
+{
+  switch(oserrno) {
+  case  ERROR_INVALID_FUNCTION:
+    errno = EINVAL;
+    break;
+  case ERROR_FILE_NOT_FOUND:
+    errno = ENOENT;
+    break;
+  case ERROR_PATH_NOT_FOUND:
+    errno = ENOENT;
+    break;
+  case  ERROR_TOO_MANY_OPEN_FILES:
+    errno = EMFILE;
+    break;
+  case  ERROR_ACCESS_DENIED:
+    errno = EACCES;
+    break;
+  case  ERROR_ARENA_TRASHED:
+    errno = ENOMEM;
+    break;
+  case  ERROR_NOT_ENOUGH_MEMORY:
+    errno = ENOMEM;
+    break;
+  case  ERROR_INVALID_BLOCK:
+    errno = ENOMEM;
+    break;
+  case  ERROR_BAD_ENVIRONMENT:
+    errno = E2BIG;
+    break;
+  case  ERROR_BAD_FORMAT:
+    errno = ENOEXEC;
+    break;
+  case  ERROR_INVALID_ACCESS:
+    errno = EINVAL;
+    break;
+  case  ERROR_INVALID_DATA:
+    errno = EINVAL;
+    break;
+  case  ERROR_INVALID_DRIVE:
+    errno = ENOENT;
+    break;
+  case  ERROR_CURRENT_DIRECTORY:
+    errno = EACCES;
+    break;
+  case  ERROR_NOT_SAME_DEVICE:
+    errno = EXDEV;
+    break;
+  case  ERROR_NO_MORE_FILES:
+    errno = ENOENT;
+    break;
+  case  ERROR_LOCK_VIOLATION:
+    errno = EACCES;
+    break;
+  case  ERROR_BAD_NETPATH:
+    errno = ENOENT;
+    break;
+  case  ERROR_NETWORK_ACCESS_DENIED:
+    errno = EACCES;
+    break;
+  case  ERROR_BAD_NET_NAME:
+    errno = ENOENT;
+    break;
+  case  ERROR_FILE_EXISTS:
+    errno = EEXIST;
+    break;
+  case  ERROR_CANNOT_MAKE:
+    errno = EACCES;
+    break;
+  case  ERROR_FAIL_I24:
+    errno = EACCES;
+    break;
+  case  ERROR_INVALID_PARAMETER:
+    errno = EINVAL;
+    break;
+  case  ERROR_NO_PROC_SLOTS:
+    errno = EAGAIN;
+    break;
+  case  ERROR_DRIVE_LOCKED:
+    errno = EACCES;
+    break;
+  case  ERROR_BROKEN_PIPE:
+    errno = EPIPE;
+    break;
+  case  ERROR_DISK_FULL:
+    errno = ENOSPC;
+    break;
+  case  ERROR_INVALID_TARGET_HANDLE:
+    errno = EBADF;
+    break;
+  case  ERROR_INVALID_HANDLE:
+    errno = EINVAL;
+    break;
+  case  ERROR_WAIT_NO_CHILDREN:
+    errno = ECHILD;
+    break;
+  case  ERROR_CHILD_NOT_COMPLETE:
+    errno = ECHILD;
+    break;
+  case  ERROR_DIRECT_ACCESS_HANDLE:
+    errno = EBADF;
+    break;
+  case  ERROR_NEGATIVE_SEEK:
+    errno = EINVAL;
+    break;
+  case  ERROR_SEEK_ON_DEVICE:   
+    errno = EACCES;
+    break;
+  case  ERROR_DIR_NOT_EMPTY:
+    errno = ENOTEMPTY;
+    break;
+  case  ERROR_NOT_LOCKED:
+    errno = EACCES;
+    break;
+  case  ERROR_BAD_PATHNAME:
+    errno = ENOENT;
+    break;
+  case  ERROR_MAX_THRDS_REACHED:
+    errno = EAGAIN;
+    break;
+  case  ERROR_LOCK_FAILED:
+    errno = EACCES;
+    break;
+  case  ERROR_ALREADY_EXISTS:
+    errno = EEXIST;
+    break;
+  case  ERROR_FILENAME_EXCED_RANGE:
+    errno = ENOENT;
+    break;
+  case  ERROR_NESTING_NOT_ALLOWED:
+    errno = EAGAIN;
+    break;
+  case  ERROR_NOT_ENOUGH_QUOTA:
+    errno = ENOMEM;
+    break;
+  case ERROR_OPERATION_ABORTED:
+    errno = EINTR;
+    break;
+  default:
+    errno = EINVAL;
+    break;
+  }
+}
+    
+#endif
+
+#define MAX_FD 32
+
+HANDLE
+lisp_open(wchar_t *path, int flag, int mode)
+{
+  HANDLE hfile;
+  DWORD dwDesiredAccess = 0;
+  DWORD dwShareMode = 0;
+  DWORD dwCreationDistribution = 0;
+  DWORD dwFlagsAndAttributes = 0;
+  SECURITY_ATTRIBUTES sa = {sizeof(SECURITY_ATTRIBUTES), NULL, TRUE};
+
+  dwShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+
+  if ((flag & _O_WRONLY) == _O_WRONLY) {
+    dwDesiredAccess |= GENERIC_WRITE;
+  } else if ((flag & _O_RDWR) == _O_RDWR) {
+    dwDesiredAccess |= GENERIC_WRITE|GENERIC_READ;
+  } else {
+    dwDesiredAccess |= GENERIC_READ;
+  }
+    
+
+  if ((flag & (_O_CREAT | _O_EXCL)) == (_O_CREAT | _O_EXCL)) {
+    dwCreationDistribution |= CREATE_NEW;
+  } else if ((flag &  O_TRUNC) == O_TRUNC) {
+    if ((flag &  O_CREAT) ==  O_CREAT) {
+      dwCreationDistribution |= CREATE_ALWAYS;
+    } else if ((flag & O_RDONLY) != O_RDONLY) {
+      dwCreationDistribution |= TRUNCATE_EXISTING;
+    }
+  } else if ((flag & _O_APPEND) == _O_APPEND) {
+    dwCreationDistribution |= OPEN_EXISTING;
+  } else if ((flag &  _O_CREAT) == _O_CREAT) {
+    dwCreationDistribution |= OPEN_ALWAYS;
+  } else {
+    dwCreationDistribution |= OPEN_EXISTING;
+  }
+  if ((flag &  _O_RANDOM) == _O_RANDOM) {
+    dwFlagsAndAttributes |= FILE_FLAG_RANDOM_ACCESS;
+  }
+  if ((flag &  _O_SEQUENTIAL) == _O_SEQUENTIAL) {
+    dwFlagsAndAttributes |= FILE_FLAG_SEQUENTIAL_SCAN;
+  }
+
+  if ((flag &  _O_TEMPORARY) == _O_TEMPORARY) {
+    dwFlagsAndAttributes |= FILE_FLAG_DELETE_ON_CLOSE;
+  }
+
+  if ((flag &  _O_SHORT_LIVED) == _O_SHORT_LIVED) {
+    dwFlagsAndAttributes |= FILE_FLAG_DELETE_ON_CLOSE;
+  }
+
+  if (flag & _O_NOINHERIT) {
+    sa.bInheritHandle = FALSE;
+  }
+
+#if 0
+  dwFlagsAndAttributes |= FILE_FLAG_OVERLAPPED;
+#endif
+    
+
+  hfile = CreateFileW(path,
+                      dwDesiredAccess,
+                      dwShareMode,
+                      &sa,
+                      dwCreationDistribution,
+                      dwFlagsAndAttributes,
+                      NULL);
+  if (hfile == ((HANDLE)-1)) {
+    _dosmaperr(GetLastError());
+    return (HANDLE)-1;
+  }
+  return hfile;
+}
+
+int
+wopen(wchar_t *path, int flag, int mode)
+{
+  HANDLE h = lisp_open(path, flag, mode);
+
+  if (h == (HANDLE)-1) {
+    return -1;                  /* errno already set */
+  }
+  return  _open_osfhandle((intptr_t)h,0);
+}
+
+int
+lisp_close(HANDLE hfile)
+{
+  int err;
+
+  if (closesocket((SOCKET)hfile) == 0) {
+    return 0;
+  }
+
+  err = WSAGetLastError();
+  if (err != WSAENOTSOCK) {
+    _dosmaperr(err);
+    return -1;
+  }
+  if (CloseHandle(hfile)) {
+    return 0;
+  }
+  _dosmaperr(GetLastError());
+  return -1;
+}
+
+extern TCR *get_tcr(int);
+
+ssize_t
+lisp_standard_read(HANDLE hfile, void *buf, unsigned int count)
+{
+  HANDLE hevent;
+  OVERLAPPED overlapped;
+  DWORD err, nread, wait_result;
+  pending_io pending;
+  TCR *tcr;
+  
+  
+  memset(&overlapped,0,sizeof(overlapped));
+
+  if (GetFileType(hfile) == FILE_TYPE_DISK) {
+    overlapped.Offset = SetFilePointer(hfile, 0, &(overlapped.OffsetHigh), FILE_CURRENT);
+  }
+
+  tcr = (TCR *)get_tcr(1);
+  pending.h = hfile;
+  pending.o = &overlapped;
+  tcr->pending_io_info = &pending;
+  hevent = (HANDLE)(tcr->io_datum);
+  overlapped.hEvent = hevent;
+  ResetEvent(hevent);
+  if (ReadFile(hfile, buf, count, &nread, &overlapped)) {
+    tcr->pending_io_info = NULL;
+    return nread;
+  }
+
+  err = GetLastError();
+  
+  if (err == ERROR_HANDLE_EOF) {
+    tcr->pending_io_info = NULL;
+    return 0;
+  }
+
+  if (err != ERROR_IO_PENDING) {
+    _dosmaperr(err);
+    tcr->pending_io_info = NULL;
+    return -1;
+  }
+  
+  err = 0;
+  
+  /* We block here */    
+  wait_result = WaitForSingleObjectEx(hevent, INFINITE, true);
+
+
+
+  tcr->pending_io_info = NULL;
+  if (wait_result == WAIT_OBJECT_0) {
+    err = overlapped.Internal;
+    if (err == ERROR_HANDLE_EOF) {
+      return 0;
+    }
+    if (err) {
+      _dosmaperr(err);
+      return -1;
+    }
+    return overlapped.InternalHigh;
+  }
+
+  if (wait_result == WAIT_IO_COMPLETION) {
+    CancelIo(hfile);
+    errno = EINTR;
+    return -1;
+  }
+  err = GetLastError();
+  
+
+  switch (err) {
+  case ERROR_HANDLE_EOF: 
+    return 0;
+  default:
+    _dosmaperr(err);
+    return -1;
+  }
+}
+
+ssize_t
+pipe_read(HANDLE hfile, void *buf, unsigned int count)
+{
+  DWORD navail, err;;
+
+  do {
+    navail = 0;
+    if (PeekNamedPipe(hfile, NULL, 0, NULL, &navail, NULL) == 0) {
+      err = GetLastError();
+      if (err = ERROR_HANDLE_EOF) {
+        return 0;
+      } else {
+        _dosmaperr(err);
+        return -1;
+      }
+    }
+    if (navail != 0) {
+      return lisp_standard_read(hfile, buf, count);
+    }
+    if (SleepEx(50, TRUE) == WAIT_IO_COMPLETION) {
+      errno = EINTR;
+      return -1;
+    }
+  } while (1);
+}
+
+ssize_t
+console_read(HANDLE hfile, void *buf, unsigned int count)
+{
+  DWORD err, eventcount, i, n;
+  INPUT_RECORD ir;
+
+  do {
+    err = WaitForSingleObjectEx(hfile, INFINITE, TRUE);
+    switch (err) {
+    case WAIT_OBJECT_0:
+      eventcount = 0;
+      GetNumberOfConsoleInputEvents(hfile, &eventcount);
+      for (i = 0; i < eventcount; i++) {
+        PeekConsoleInput(hfile, &ir, 1, &n);
+        if (ir.EventType == KEY_EVENT) {
+          return lisp_standard_read(hfile, buf, count);
+        } else {
+          ReadConsoleInput(hfile, &ir, 1, &n);
+        }
+      }
+      break;
+    case WAIT_IO_COMPLETION:
+      errno = EINTR;
+      return -1;
+      break;
+    case WAIT_FAILED:
+      _dosmaperr(GetLastError());
+      return -1;
+      break;
+    }
+  } while (1);
+}
+
+ssize_t
+lisp_read(HANDLE hfile, void *buf, unsigned int count) {
+  switch(GetFileType(hfile)) {
+  case FILE_TYPE_CHAR:
+    return console_read(hfile, buf, count);
+    break;
+
+  case FILE_TYPE_PIPE:          /* pipe or one of these newfangled socket things */
+    {
+      int socktype, optlen = sizeof(int);
+      if ((getsockopt((SOCKET)hfile, SOL_SOCKET, SO_TYPE, (char *)&socktype, &optlen) != 0) && (GetLastError() == WSAENOTSOCK)) {
+        return pipe_read(hfile, buf, count);
+      }
+    }
+    /* It's a socket, fall through */
+    
+  case FILE_TYPE_DISK:
+    return lisp_standard_read(hfile, buf, count);
+    break;
+
+  default:
+    errno = EBADF;
+    return -1;
+  }
+}
+
+
+
+ssize_t
+lisp_write(HANDLE hfile, void *buf, ssize_t count)
+{
+  HANDLE hevent;
+  OVERLAPPED overlapped;
+  DWORD err, nwritten, wait_result;
+  pending_io pending;
+  TCR *tcr = (TCR *)get_tcr(1);
+
+  hevent = (HANDLE)tcr->io_datum;
+  if (hfile == (HANDLE)1) {
+    hfile = GetStdHandle(STD_OUTPUT_HANDLE);
+  } else if (hfile == (HANDLE) 2) {
+    hfile = GetStdHandle(STD_ERROR_HANDLE);
+  }
+
+
+  memset(&overlapped,0,sizeof(overlapped));
+
+  if (GetFileType(hfile) == FILE_TYPE_DISK) {
+    overlapped.Offset = SetFilePointer(hfile, 0, &(overlapped.OffsetHigh), FILE_CURRENT);
+  }
+
+
+  pending.h = hfile;
+  pending.o = &overlapped;
+  tcr->pending_io_info = &pending;
+  overlapped.hEvent = hevent;
+  ResetEvent(hevent);
+  if (WriteFile(hfile, buf, count, &nwritten, &overlapped)) {
+    tcr->pending_io_info = NULL;
+    return nwritten;
+  }
+  
+  err = GetLastError();
+  if (err != ERROR_IO_PENDING) {
+    _dosmaperr(err);
+    tcr->pending_io_info = NULL;
+    return -1;
+  }
+  err = 0;
+  wait_result = WaitForSingleObjectEx(hevent, INFINITE, true);
+  tcr->pending_io_info = NULL;
+  if (wait_result == WAIT_OBJECT_0) {
+    err = overlapped.Internal;
+    if (err) {
+      _dosmaperr(err);
+      return -1;
+    }
+    return overlapped.InternalHigh;
+  }
+  if (wait_result == WAIT_IO_COMPLETION) {
+    CancelIo(hfile);
+    errno = EINTR;
+    return -1;
+  }
+  err = GetLastError();
+  _dosmaperr(err);
+  return -1;
+}
+
+int
+lisp_fchmod(HANDLE hfile, int mode)
+{
+  errno = ENOSYS;
+  return -1;
+}
+
+__int64
+lisp_lseek(HANDLE hfile, __int64 offset, int whence)
+{
+  DWORD high, low;
+
+  high = ((__int64)offset)>>32;
+  low = offset & 0xffffffff;
+  low = SetFilePointer(hfile, low, &high, whence);
+  if (low != INVALID_SET_FILE_POINTER) {
+    return ((((__int64)high)<<32)|low);
+  }
+  _dosmaperr(GetLastError());
+  return -1;
+}
+
+#define ALL_USERS(f) ((f) | ((f)>> 3) | ((f >> 6)))
+#define STAT_READONLY ALL_USERS(_S_IREAD)
+#define STAT_READWRITE ALL_USERS((_S_IREAD|_S_IWRITE))
+int
+lisp_stat(wchar_t *path, struct __stat64 *buf)
+{
+  return _wstat64(path,buf);
+}
+
+#define UNIX_EPOCH_IN_WINDOWS_EPOCH  116444736000000000LL
+
+__time64_t
+filetime_to_unix_time(FILETIME *ft)
+{
+  __time64_t then = *((__time64_t *) ft);
+
+  then -= UNIX_EPOCH_IN_WINDOWS_EPOCH;
+  return then/10000000;
+}
+
+int
+lisp_fstat(HANDLE hfile, struct __stat64 *buf)
+{
+  int filetype;
+
+  filetype = GetFileType(hfile) & ~FILE_TYPE_REMOTE;
+
+  if (filetype == FILE_TYPE_UNKNOWN) {
+    errno = EBADF;
+    return -1;
+  }
+
+  memset(buf, 0, sizeof(*buf));
+  buf->st_nlink = 1;
+  
+  switch(filetype) {
+  case FILE_TYPE_CHAR:
+  case FILE_TYPE_PIPE:
+    if (filetype == FILE_TYPE_CHAR) {
+      buf->st_mode = _S_IFCHR;
+    } else {
+      buf->st_mode = _S_IFIFO;
+    }
+    break;
+  case FILE_TYPE_DISK:
+    {
+      BY_HANDLE_FILE_INFORMATION info;
+
+      if (!GetFileInformationByHandle(hfile, &info)) {
+        _dosmaperr(GetLastError());
+        return -1;
+      }
+
+      if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) {
+        buf->st_mode = STAT_READONLY;
+      } else {
+        buf->st_mode = STAT_READWRITE;
+      }
+      buf->st_mode |= _S_IFREG;
+      buf->st_size = ((((__int64)(info.nFileSizeHigh))<<32LL) |
+                      ((__int64)(info.nFileSizeLow)));
+      buf->st_mtime = filetime_to_unix_time(&info.ftLastWriteTime);
+      buf->st_atime = filetime_to_unix_time(&info.ftLastAccessTime);
+      buf->st_ctime = filetime_to_unix_time(&info.ftCreationTime);
+    }
+    break;
+  case FILE_TYPE_UNKNOWN:
+  default:
+    errno = EBADF;
+    return -1;
+  }
+  return 0;
+}
+
+int
+lisp_futex(int *uaddr, int op, int val, void *timeout, int *uaddr2, int val3)
+{
+  errno = ENOSYS;
+  return -1;
+}
+
+
+__int64
+lisp_ftruncate(HANDLE hfile, off_t new_size)
+{
+  __int64 oldpos;
+
+
+  oldpos = lisp_lseek(hfile, 0, SEEK_END);
+  if (oldpos == -1) {
+    return 0;
+  }
+  if (oldpos < new_size) {
+    char buf[4096];
+    __int64 n = new_size-oldpos;
+    DWORD nwritten, to_write;
+
+    memset(buf,0,sizeof(buf));
+    while(n) {
+      if (n > 4096LL) {
+        to_write = 4096;
+      } else {
+        to_write = n;
+      }
+      if (!WriteFile(hfile,buf,to_write,&nwritten,NULL)) {
+        _dosmaperr(GetLastError());
+        return -1;
+      }
+      n -= nwritten;
+    }
+    return 0;
+  }
+  lisp_lseek(hfile, new_size, SEEK_SET);
+  if (SetEndOfFile(hfile)) {
+    return 0;
+  }
+  _dosmaperr(GetLastError());
+  return -1;
+}
+
+
+_WDIR *
+lisp_opendir(wchar_t *path)
+{
+  return _wopendir(path);
+}
+
+struct _wdirent *
+lisp_readdir(_WDIR *dir)
+{
+  return _wreaddir(dir);
+}
+
+__int64
+lisp_closedir(_WDIR *dir)
+{
+  return _wclosedir(dir);
+}
+
+int
+lisp_pipe(int fd[2])
+{
+  HANDLE input, output;
+  SECURITY_ATTRIBUTES sa;
+
+  sa.nLength= sizeof(SECURITY_ATTRIBUTES);
+  sa.lpSecurityDescriptor = NULL;
+  sa.bInheritHandle = TRUE;
+
+  if (!CreatePipe(&input, &output, &sa, 0))
+    {
+      wperror("CreatePipe");
+      return -1;
+    }
+  fd[0] = (int) ((intptr_t)input);
+  fd[1] = (int) ((intptr_t)output);
+  return 0;
+}
+
+int
+lisp_gettimeofday(struct timeval *tp, void *tzp)
+{
+  __time64_t now;
+
+  gettimeofday(tp,tzp);       /* trust it to get time zone right, at least */
+  GetSystemTimeAsFileTime((FILETIME*)&now);
+  now -= UNIX_EPOCH_IN_WINDOWS_EPOCH;
+  now /= 10000;               /* convert time to milliseconds */
+  tp->tv_sec = now/1000LL;
+  tp->tv_usec = 1000 * (now%1000LL); /* convert milliseconds to microseconds */
+  return 0;
+}
+
+int
+lisp_sigexit(int signum)
+{
+  signal(signum, SIG_DFL);
+  return raise(signum);
+}
+
+#ifdef WIN_64
+
+/* Make sure that the lisp calls these functions, when they do something */
+/* This code is taken from the 32-bit mingw library and is in the
+   public domain */
+double
+acosh(double x)
+{
+  if (isnan (x)) 
+    return x;
+
+  if (x < 1.0)
+    {
+      errno = EDOM;
+      return nan("");
+    }
+
+  if (x > 0x1p32)
+    /*  Avoid overflow (and unnecessary calculation when
+        sqrt (x * x - 1) == x). GCC optimizes by replacing
+        the long double M_LN2 const with a fldln2 insn.  */ 
+    return log (x) + 6.9314718055994530941723E-1L;
+
+  /* Since  x >= 1, the arg to log will always be greater than
+     the fyl2xp1 limit (approx 0.29) so just use logl. */ 
+  return log (x + sqrt((x + 1.0) * (x - 1.0)));
+}
+
+float
+acoshf(float x)
+{
+  if (isnan (x)) 
+    return x;
+  if (x < 1.0f)
+    {
+      errno = EDOM;
+      return nan("");
+    }
+
+ if (x > 0x1p32f)
+    /*  Avoid overflow (and unnecessary calculation when
+        sqrt (x * x - 1) == x). GCC optimizes by replacing
+        the long double M_LN2 const with a fldln2 insn.  */ 
+    return log (x) + 6.9314718055994530941723E-1L;
+
+  /* Since  x >= 1, the arg to log will always be greater than
+     the fyl2xp1 limit (approx 0.29) so just use logl. */ 
+  return log (x + sqrt((x + 1.0) * (x - 1.0)));
+}
+
+double
+asinh(double x)
+{
+  double z;
+  if (!isfinite (x))
+    return x;
+  z = fabs (x);
+
+  /* Avoid setting FPU underflow exception flag in x * x. */
+#if 0
+  if ( z < 0x1p-32)
+    return x;
+#endif
+
+  /* Use log1p to avoid cancellation with small x. Put
+     x * x in denom, so overflow is harmless. 
+     asinh(x) = log1p (x + sqrt (x * x + 1.0) - 1.0)
+              = log1p (x + x * x / (sqrt (x * x + 1.0) + 1.0))  */
+
+  z = log1p (z + z * z / (sqrt (z * z + 1.0) + 1.0));
+
+  return ( x >= 0.0 ? z : -z);
+}
+
+float
+asinhf(float x)
+{
+  float z;
+  if (!isfinite (x))
+    return x;
+  z = fabsf (x);
+
+  /* Avoid setting FPU underflow exception flag in x * x. */
+#if 0
+  if ( z < 0x1p-32)
+    return x;
+#endif
+
+
+  /* Use log1p to avoid cancellation with small x. Put
+     x * x in denom, so overflow is harmless. 
+     asinh(x) = log1p (x + sqrt (x * x + 1.0) - 1.0)
+              = log1p (x + x * x / (sqrt (x * x + 1.0) + 1.0))  */
+
+  z = log1p (z + z * z / (sqrt (z * z + 1.0) + 1.0));
+
+  return ( x >= 0.0 ? z : -z);
+}
+
+double
+atanh(double x)
+{
+  double z;
+  if (isnan (x))
+    return x;
+  z = fabs (x);
+  if (z == 1.0)
+    {
+      errno  = ERANGE;
+      return (x > 0 ? INFINITY : -INFINITY);
+    }
+  if (z > 1.0)
+    {
+      errno = EDOM;
+      return nan("");
+    }
+  /* Rearrange formula to avoid precision loss for small x.
+
+  atanh(x) = 0.5 * log ((1.0 + x)/(1.0 - x))
+	   = 0.5 * log1p ((1.0 + x)/(1.0 - x) - 1.0)
+           = 0.5 * log1p ((1.0 + x - 1.0 + x) /(1.0 - x)) 
+           = 0.5 * log1p ((2.0 * x ) / (1.0 - x))  */
+  z = 0.5 * log1p ((z + z) / (1.0 - z));
+  return x >= 0 ? z : -z;
+}
+
+float
+atanhf(float x)
+{
+  float z;
+  if (isnan (x))
+    return x;
+  z = fabsf (x);
+  if (z == 1.0)
+    {
+      errno  = ERANGE;
+      return (x > 0 ? INFINITY : -INFINITY);
+    }
+  if ( z > 1.0)
+    {
+      errno = EDOM;
+      return nanf("");
+    }
+  /* Rearrange formula to avoid precision loss for small x.
+
+  atanh(x) = 0.5 * log ((1.0 + x)/(1.0 - x))
+	   = 0.5 * log1p ((1.0 + x)/(1.0 - x) - 1.0)
+           = 0.5 * log1p ((1.0 + x - 1.0 + x) /(1.0 - x)) 
+           = 0.5 * log1p ((2.0 * x ) / (1.0 - x))  */
+  z = 0.5 * log1p ((z + z) / (1.0 - z));
+  return x >= 0 ? z : -z;
+}
+
+#endif
+
+typedef struct {
+  char *name;
+  void *addr;
+} math_fn_entry;
+
+
+math_fn_entry math_fn_entries [] = {
+  {"acos",acos},
+  {"acosf",acosf},
+  {"acosh",acosh},
+  {"acoshf",acoshf},
+  {"asin",asin},
+  {"asinf",asinf},
+  {"asinh",asinh},
+  {"asinhf",asinhf},
+  {"atan",atan},
+  {"atan2",atan2},
+  {"atan2f",atan2f},
+  {"atanf",atanf},
+  {"atanh",atanh},
+  {"atanhf",atanhf},
+  {"cos",cos},
+  {"cosf",cosf},
+  {"cosh",cosh},
+  {"coshf",coshf},
+  {"exp",exp},
+  {"expf",expf},
+  {"log",log},
+  {"logf",logf},
+  {"pow",pow},
+  {"powf",powf},
+  {"sin",sin},
+  {"sinf",sinf},
+  {"sinh",sinh},
+  {"sinhf",sinhf},
+  {"tan",tan},
+  {"tanf",tanf},
+  {"tanh",tanh},
+  {"tanhf",tanhf},
+  {NULL, 0}};
+
+void *
+lookup_math_fn(char *name)
+{
+  math_fn_entry *p = math_fn_entries;
+  char *entry_name;
+  
+  while ((entry_name = p->name) != NULL) {
+    if (!strcmp(name, entry_name)) {
+      return p->addr;
+    }
+    p++;
+  }
+  return NULL;
+}
+
+HMODULE *modules = NULL;
+DWORD cbmodules = 0;
+HANDLE find_symbol_lock = 0;
+
+void *
+windows_find_symbol(void *handle, char *name)
+{
+  void *addr;
+
+  if ((handle == ((void *)-2L)) ||
+      (handle == ((void *)-1L))) {
+    handle = NULL;
+  }
+  if (handle != NULL) {
+    addr = GetProcAddress(handle, name);
+  } else {
+    DWORD cbneeded,  have, i;
+    WaitForSingleObject(find_symbol_lock,INFINITE);
+
+    if (cbmodules == 0) {
+      cbmodules = 16 * sizeof(HANDLE);
+      modules = malloc(cbmodules);
+    }
+    
+    while (1) {
+      EnumProcessModules(GetCurrentProcess(),modules,cbmodules,&cbneeded);
+      if (cbmodules >= cbneeded) {
+        break;
+      }
+      cbmodules = cbneeded;
+      modules = realloc(modules,cbmodules);
+    }
+    have = cbneeded/sizeof(HANDLE);
+
+    for (i = 0; i < have; i++) {
+      addr = GetProcAddress(modules[i],name);
+
+      if (addr) {
+        break;
+      }
+    }
+    ReleaseMutex(find_symbol_lock);
+    if (addr) {
+      return addr;
+    }
+    return lookup_math_fn(name);
+  }
+}
+
+/* Note that we're using 8-bit strings here */
+
+void *
+windows_open_shared_library(char *path)
+{
+  HMODULE module = (HMODULE)0;
+
+  /* Try to open an existing module in a way that increments its
+     reference count without running any initialization code in
+     the dll. */
+  if (!GetModuleHandleExA(0,path,&module)) {
+    /* If that failed ... */
+    module = LoadLibraryA(path);
+  }
+  return (void *)module;
+}
+
+
+void
+init_windows_io()
+{
+#ifdef WIN_32
+  extern void init_win32_ldt(void);
+  init_win32_ldt();
+#endif
+  find_symbol_lock = CreateMutex(NULL,false,NULL);
+}
+
+void
+init_winsock()
+{
+  WSADATA data;
+
+  WSAStartup((2<<8)|2,&data);
+}
+
Index: /branches/arm/lisp-kernel/x86-asmutils32.s
===================================================================
--- /branches/arm/lisp-kernel/x86-asmutils32.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-asmutils32.s	(revision 13357)
@@ -0,0 +1,285 @@
+/*   Copyright (C) 2005-2009 Clozure Associates */
+/*   This file is part of Clozure CL.   */
+ 
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence.   */
+ 
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+ 
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+	
+
+	include(lisp.s)
+
+	_beginfile
+
+_exportfn(C(current_stack_pointer))
+	__(movl %esp,%eax)
+	__(ret)
+_endfn
+                        
+_exportfn(C(count_leading_zeros))
+	__(bsr 4(%esp),%eax)
+	__(xor $31,%eax)
+	__(ret)
+_endfn
+
+_exportfn(C(noop))
+	__(ret)
+_endfn
+
+_exportfn(C(set_mxcsr))
+        __(ldmxcsr 4(%esp))
+        __(ret)
+_endfn
+	
+_exportfn(C(get_mxcsr))
+        __(push $0)
+        __(stmxcsr (%esp))
+        __(pop %eax)
+        __(ret)
+_endfn
+
+_exportfn(C(save_fp_context))
+_endfn
+        
+_exportfn(C(restore_fp_context))
+_endfn                        
+
+/*  Atomically store new in *p, if *p == old. */
+/*  Return actual old value. */
+/* natural store_conditional(natural *p, natural old, natural new) */
+_exportfn(C(store_conditional))
+	__(movl 12(%esp),%edx)	/* new */
+	__(movl 8(%esp),%eax)	/* old */
+	__(movl 4(%esp),%ecx)	/* ptr */
+	__(lock)
+        __(cmpxchgl %edx,(%ecx))
+	__(cmovne %edx,%eax)
+	__(ret)
+_endfn
+
+/*	Atomically store val in *p; return previous *p */
+/*	of *%rdi. */
+/* signed_natural atomic_swap(signed_natural *p, signed_natural val) */
+_exportfn(C(atomic_swap))
+	__(movl 8(%esp),%eax)
+	__(movl 4(%esp),%edx)
+	__(lock)
+        __(xchg %eax,(%edx))
+	__(ret)
+_endfn
+
+/*      Logior the value in *p with mask (presumably a */
+/*	bitmask with exactly 1 bit set.)  Return non-zero if any of */
+/*	the bits in that bitmask were already set. */
+/* natural atomic_ior(natural *p, natural mask) */
+_exportfn(C(atomic_ior))
+	__(movl 4(%esp),%edx)	/* ptr */
+0:	__(movl (%edx),%eax)
+	__(movl %eax,%ecx)
+	__(orl 8(%esp),%ecx)
+	__(lock)
+        __(cmpxchg %ecx,(%edx))
+        __(jnz 0b)
+	__(andl 8(%esp),%eax)
+	__(ret)
+_endfn
+        
+        
+/* Logand the value in *p with mask (presumably a bitmask with exactly 1 */
+/* bit set.)  Return the value now in *p (for some value of "now"). */
+/* natural atomic_and(natural *p, natural mask) */
+_exportfn(C(atomic_and))
+	__(movl 4(%esp),%edx)
+0:	__(movl (%edx),%eax)
+	__(movl %eax,%ecx)
+	__(and 8(%esp),%ecx)
+	__(lock)
+        __(cmpxchg %ecx,(%edx))
+        __(jnz 0b)
+	__(movl %ecx,%eax)
+	__(ret)
+_endfn
+
+
+        __ifdef(`DARWIN')
+_exportfn(C(pseudo_sigreturn))
+        __(hlt)
+        __(jmp C(pseudo_sigreturn))
+_endfn
+        __endif    
+
+/* int cpuid (int code, int *pebx, int *pecx, int *pedx)  */
+_exportfn(C(cpuid))
+	__(push %ebx)		/* %ebx is non-volatile */
+	__(push %esi)		/* ditto here */
+	__(movl 12(%esp),%eax)
+        __(xorl %ecx,%ecx)
+	__(cpuid)
+	__(movl 16(%esp),%esi)
+	__(movl %ebx,(%esi))
+	__(movl 20(%esp),%esi)
+	__(movl %ecx,(%esi))
+	__(movl 24(%esp),%esi)
+	__(movl %edx,(%esi))
+	__(pop %esi)
+	__(pop %ebx)
+	__(ret)
+_endfn
+
+/* switch_to_foreign_stack(new_sp, func, arg_0, arg_1, arg_2)  */
+/*   Not fully general, but should get us off of the signal stack */
+/* Beware: on Darwin, GDB can get very confused by this code, and
+   doesn't really get unconfused until the target function - the
+   handler - has built its stack frame
+   The lone caller of this function passes 3 arguments (besides
+   the new stack pointer and the handler address.)
+   On platforms where the C stack must be 16-byte aligned, pushing
+   a 4th word helps make the stack aligned before the return
+   address is (re-)pushed.
+   On Linux, there are severe constraints on what the top of stack
+   can look like when rt_sigreturn (the code at the return address)
+   runs, and there aren't any constraints on stack alignment, so
+   we don't push the extra word on the new stack.*/
+_exportfn(C(switch_to_foreign_stack))
+        __(addl $4,%esp)        /* discard return address, on wrong stack */
+        __(pop %edi)            /* new esp */
+        __(pop %esi)            /* handler */
+        __(pop %eax)            /* arg_0 */
+        __(pop %ebx)            /* arg_1 */
+        __(pop %ecx)            /* arg_2 */
+        __(mov %edi,%esp)
+        __(pop %edi)            /* Return address pushed by caller */
+        __ifndef(`LINUX')
+        __(push $0)             /* For alignment. See comment above */
+        __endif
+        __(push %ecx)           /* arg_2 */
+        __(push %ebx)           /* arg_1 */
+        __(push %eax)           /* arg_0 */
+        __(push %edi)           /* return address */
+        __(jmp *%esi)           /* On some platforms, we don't really return */
+_endfn
+
+        __ifdef(`FREEBSD')
+        .globl C(sigreturn)
+_exportfn(C(freebsd_sigreturn))
+        __(jmp C(sigreturn))
+_endfn
+        __endif
+
+        __ifdef(`DARWIN')
+_exportfn(C(darwin_sigreturn))
+/* Need to set the sigreturn 'infostyle' argument, which is mostly
+   undocumented.  On x8632 Darwin, sigtramp() sets it to 0x1e, and
+   since we're trying to do what sigtramp() would do if we'd returned
+   to it ... */
+        __(movl $0x1e,8(%esp))
+	__(movl $0xb8,%eax)	/* SYS_sigreturn */
+	__(int $0x80)
+	__(ret)			/* shouldn't return */
+
+_endfn
+        __endif        
+		
+_exportfn(C(get_vector_registers))
+	__(ret)
+_endfn
+
+_exportfn(C(put_vector_registers))
+	__(ret)
+_endfn				
+
+        __ifdef(`WIN_32')
+_exportfn(C(restore_windows_context))
+Xrestore_windows_context_start:
+        __(movl 4(%esp),%ecx)   /* context */
+        __(movl 12(%esp),%edx)  /* old valence */
+        __(movl 8(%esp),%eax)   /* tcr */
+        __(movw tcr.ldt_selector(%eax), %rcontext_reg)
+        __(movl %edx,rcontext(tcr.valence))
+        __(movl $0,rcontext(tcr.pending_exception_context))
+        __(frstor win32_context.FloatSave(%ecx))
+        /* Windows doesn't bother to align the context, so use
+          'movupd' here */
+        __(movupd win32_context.Xmm0(%ecx),%xmm0)
+        __(movupd win32_context.Xmm1(%ecx),%xmm1)
+        __(movupd win32_context.Xmm2(%ecx),%xmm2)
+        __(movupd win32_context.Xmm3(%ecx),%xmm3)
+        __(movupd win32_context.Xmm4(%ecx),%xmm4)
+        __(movupd win32_context.Xmm5(%ecx),%xmm5)
+        __(movupd win32_context.Xmm6(%ecx),%xmm6)
+        __(movupd win32_context.Xmm7(%ecx),%xmm7)
+        __(ldmxcsr win32_context.MXCSR(%ecx))
+        __(movl win32_context.Ebp(%ecx),%ebp)
+        __(movl win32_context.Edi(%ecx),%edi)
+        __(movl win32_context.Esi(%ecx),%esi)
+        __(movl win32_context.Edx(%ecx),%edx)
+        __(movl win32_context.Ebx(%ecx),%ebx)
+        __(movl win32_context.Eax(%ecx),%eax)
+        __(movl win32_context.Esp(%ecx),%esp)
+        __(pushl win32_context.EFlags(%ecx))
+        __(pushl %cs)
+        __(pushl win32_context.Eip(%ecx))        
+        /* This must be the last thing before the iret, e.g., if we're
+        interrupted before the iret, the context we're returning to here
+        is still in %ecx.  If we're interrupted -at- the iret, then
+        everything but that which the iret will restore has been restored. */
+        __(movl win32_context.Ecx(%ecx),%ecx)
+Xrestore_windows_context_iret:            
+        __(iret)
+Xrestore_windows_context_end:             
+        __(nop)
+_endfn
+	
+_exportfn(C(windows_switch_to_foreign_stack))
+        __(pop %eax)
+        __(pop %ebx)            /* new %esp */
+        __(pop %ecx)            /* handler */
+        __(pop %edx)            /* arg */
+        __(movl %ebx,%esp)
+        __(subl $0x10,%esp)
+        __(movl %edx,(%esp))
+        __(push %eax)
+        __(jmp *%ecx)
+_endfn        
+
+        .data
+        .globl C(restore_windows_context_start)
+        .globl C(restore_windows_context_end)
+        .globl C(restore_windows_context_iret)
+C(restore_windows_context_start):  .long Xrestore_windows_context_start
+C(restore_windows_context_end): .long Xrestore_windows_context_end
+C(restore_windows_context_iret): .long Xrestore_windows_context_iret
+        .text
+        
+        __ifdef(`WIN32_ES_HACK')
+/* Something that we shouldn't return to */
+_exportfn(C(windows_halt))
+        __(hlt)
+_endfn         
+        __endif
+_exportfn(C(ensure_safe_for_string_operations))
+        __ifdef(`WIN32_ES_HACK')
+        __(movw %es,%ax)
+        __(movw %ds,%dx)
+        __(cmpw %ax,%dx)
+        __(jne 9f)
+0:      __(movw %dx,%es)
+        __endif
+        __(cld)        
+	__(ret)
+        __ifdef(`WIN32_ES_HACK')
+9:      __(hlt)
+        __(jmp 0b)
+        __endif
+_endfn                                       
+        __endif
+        _endfile
+
Index: /branches/arm/lisp-kernel/x86-asmutils64.s
===================================================================
--- /branches/arm/lisp-kernel/x86-asmutils64.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-asmutils64.s	(revision 13357)
@@ -0,0 +1,308 @@
+/*   Copyright (C) 2005-2009 Clozure Associates */
+/*   This file is part of Clozure CL.   */
+ 
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence.   */
+ 
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+ 
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+	
+
+	include(lisp.s)
+
+	_beginfile
+
+/* Flush %carg1 cache lines, starting at address in %carg0.  Each line is */
+/*   assumed to be %carg2 bytes wide. */
+_exportfn(C(flush_cache_lines))
+	__(cmpq $0,%carg1)
+	__(jmp 2f)
+1:	__(clflush (%carg0))
+	__(addq %carg2,%carg0)
+	__(subq $1,%carg1)
+2:	__(jg 1b)	
+	__(repret)
+_endfn
+
+_exportfn(C(current_stack_pointer))
+	__(movq %rsp,%cret)
+	__(ret)
+_endfn
+
+_exportfn(C(touch_page))
+        __(movq %carg0,(%carg0))
+        __(movq $0,(%carg0))
+        __(movl $1,%cret_l)
+        .globl C(touch_page_end)
+C(touch_page_end):	
+        __(ret)
+                        
+_exportfn(C(count_leading_zeros))
+	__(bsrq %carg0,%cret)
+	__(xorq $63,%cret)
+	__(ret)
+_endfn
+
+_exportfn(C(noop))
+	__(retq)
+_endfn
+
+_exportfn(C(set_mxcsr))
+        __(pushq %carg0)
+        __(ldmxcsr (%rsp))
+        __(addq $8,%rsp)
+        __(ret)
+_endfn
+	
+_exportfn(C(get_mxcsr))
+        __(pushq $0)
+        __(stmxcsr (%rsp))
+        __(popq %cret)
+        __(ret)
+_endfn
+
+_exportfn(C(save_fp_context))
+_endfn
+        
+_exportfn(C(restore_fp_context))
+_endfn                        
+
+/*  Atomically store new value (%carg2) in *%carg0, if old value == %carg1. */
+/*  Return actual old value. */
+_exportfn(C(store_conditional))
+	__(mov %carg1,%cret)
+	__(lock) 
+        __(cmpxchgq %carg2,(%carg0))
+	__(cmovne %carg2,%cret)
+	__(ret)	
+_endfn
+
+/*	Atomically store new_value(%carg1) in *%carg0 ;  return previous contents */
+/*	of *%carg0. */
+
+_exportfn(C(atomic_swap))
+	__(lock) 
+        __(xchg %carg1,(%carg0))
+	__(mov %carg1,%cret)
+	__(ret)
+_endfn
+
+/*        Logior the value in *%carg0 with the value in %carg1 (presumably a */
+/*	bitmask with exactly 1 bit set.)  Return non-zero if any of */
+/*	the bits in that bitmask were already set. */
+_exportfn(C(atomic_ior))
+0:	__(movq (%carg0),%cret)
+	__(movq %cret,%carg2)
+	__(orq %carg1,%carg2)
+	__(lock)
+        __(cmpxchg %carg2,(%carg0))
+        __(jnz 0b)
+	__(andq %carg1,%cret)
+	__(ret)
+_endfn
+        
+        
+/* Logand the value in *carg0 with the value in carg1 (presumably a bitmask with exactly 1 */
+/* bit set.)  Return the value now in *carg0 (for some value of "now" */
+
+_exportfn(C(atomic_and))
+0:	__(movq (%carg0),%cret)
+	__(movq %cret,%carg2)
+	__(and %carg1,%carg2)
+	__(lock)
+        __(cmpxchg %carg2,(%carg0))
+        __(jnz 0b)
+	__(movq %carg2,%cret)
+	__(ret)
+_endfn
+
+
+        __ifdef(`DARWIN')
+_exportfn(C(pseudo_sigreturn))
+        __(hlt)
+        __(jmp C(pseudo_sigreturn))
+_endfn
+        __endif                        
+
+/* int cpuid (natural code, natural *pebx, natural *pecx, natural *pedx)  */
+_exportfn(C(cpuid))
+	__(pushq %carg2)
+	__(pushq %carg3)
+	__(movq %carg1, %ctemp0)
+	__(pushq %rbx)		/* non-volatile reg, clobbered by CPUID */
+	__(movq %carg0, %rax)
+        __(xorq %rcx,%rcx)
+	__(cpuid)
+	__(movq %rbx,(%ctemp0))
+	__(popq %rbx)
+	__(popq %ctemp0)           /* recover pedx */
+	__(movq %rdx,(%ctemp0))
+	__(popq %ctemp0)		/* recover pecx */
+	__(movq %rcx,(%ctemp0))
+	__(ret)
+_endfn
+
+/* switch_to_foreign_stack(new_sp, func, arg_0, arg_1, arg_2, arg_3)  */
+/*   Not fully general, but should get us off of the signal stack */
+        __ifndef(`WINDOWS')
+_exportfn(C(switch_to_foreign_stack))
+	__(movq %rdi,%rsp)
+	__(movq %rsi,%rax)
+	__(movq %rdx,%rdi)
+	__(movq %rcx,%rsi)
+	__(movq %r8,%rdx)
+	__(movq %r9,%rcx)
+	__(jmp *%rax)
+_endfn
+        __endif
+        
+_exportfn(C(freebsd_sigreturn))
+	__(movl $417,%eax)	/* SYS_sigreturn */
+	__(syscall)				
+	
+_exportfn(C(get_vector_registers))
+_endfn
+
+_exportfn(C(put_vector_registers))
+_endfn				
+        
+	__ifdef(`DARWIN')
+_exportfn(C(darwin_sigreturn))
+        .globl C(sigreturn)
+/* Need to set the sigreturn 'infostyle' argument, which is mostly
+   undocumented.  On x8664 Darwin, sigtramp() sets it to 0x1e, and
+   since we're trying to do what sigtramp() would do if we'd returned
+   to it ... */
+        __(movl $0x1e,%esi)
+	__(movl $0x20000b8,%eax)
+	__(syscall)
+	__(ret)
+_endfn
+	__endif
+
+	
+        
+        __ifdef(`DARWIN_GS_HACK')
+/* Check (in an ugly, non-portable way) to see if %gs is addressing
+   pthreads data.  If it was, return 0; otherwise, assume that it's
+   addressing a lisp tcr and set %gs to point to the tcr's tcr.osid,
+   then return 1. */
+	
+thread_signature = 0x54485244 /* 'THRD' */
+	
+_exportfn(C(ensure_gs_pthread))
+        __(cmpl $thread_signature,%gs:0)
+        __(movl $0,%eax)
+        __(je 9f)
+        __(movq %gs:tcr.osid,%rdi)
+        __(movl $0x3000003,%eax)
+        __(syscall)
+        __(movl $1,%eax)
+9:      __(repret)
+_endfn
+
+        /* Ensure that %gs addresses the linear address in %rdi */
+        /* This incidentally returns the segment selector .*/
+_exportfn(C(set_gs_address))
+        __(movl $0x3000003,%eax)
+        __(syscall)
+        __(ret)
+_endfn
+        __endif
+
+        __ifdef(`WIN_64')
+/* %rcx = CONTEXT, %rdx = tcr, %r8 = old_valence.  This pretty
+   much has to be uninterruptible */        
+_exportfn(C(restore_windows_context))
+Xrestore_windows_context_start: 	
+        __(subq $0x38,%rsp)
+        __(xorl %eax,%eax)
+        __(movq %r8,tcr.valence(%rdx))
+        __(movq %rax,tcr.pending_exception_context(%rdx))
+        __(fxrstor win64_context.fpstate(%rcx))
+        __(movapd win64_context.Xmm0(%rcx),%xmm0)
+        __(movapd win64_context.Xmm1(%rcx),%xmm1)
+        __(movapd win64_context.Xmm2(%rcx),%xmm2)
+        __(movapd win64_context.Xmm3(%rcx),%xmm3)
+        __(movapd win64_context.Xmm4(%rcx),%xmm4)
+        __(movapd win64_context.Xmm5(%rcx),%xmm5)
+        __(movapd win64_context.Xmm6(%rcx),%xmm6)
+        __(movapd win64_context.Xmm7(%rcx),%xmm7)
+        __(movapd win64_context.Xmm8(%rcx),%xmm8)
+        __(movapd win64_context.Xmm9(%rcx),%xmm9)
+        __(movapd win64_context.Xmm10(%rcx),%xmm10)
+        __(movapd win64_context.Xmm11(%rcx),%xmm11)
+        __(movapd win64_context.Xmm12(%rcx),%xmm12)
+        __(movapd win64_context.Xmm13(%rcx),%xmm13)
+        __(movapd win64_context.Xmm14(%rcx),%xmm14)
+        __(movapd win64_context.Xmm15(%rcx),%xmm15)
+        __(ldmxcsr win64_context.MxCsr(%rcx))
+        __(movw win64_context.SegSs(%rcx),%ax)
+        __(movw %ax,0x20(%rsp))
+        __(movq win64_context.Rsp(%rcx),%rax)
+        __(movq %rax,0x18(%rsp))
+        __(movl win64_context.EFlags(%rcx),%eax)
+        __(movl %eax,0x10(%rsp))
+        __(movw win64_context.SegCs(%rcx),%ax)
+        __(movw %ax,8(%rsp))
+        __(movq win64_context.Rip(%rcx),%rax)
+        __(movq %rax,(%rsp))
+        __(movq win64_context.Rax(%rcx),%rax)
+        __(movq win64_context.Rbx(%rcx),%rbx)
+        __(movq win64_context.Rdx(%rcx),%rdx)
+        __(movq win64_context.Rdi(%rcx),%rdi)
+        __(movq win64_context.Rsi(%rcx),%rsi)
+        __(movq win64_context.Rbp(%rcx),%rbp)
+        __(movq win64_context.R8(%rcx),%r8)
+        __(movq win64_context.R9(%rcx),%r9)
+        __(movq win64_context.R10(%rcx),%r10)
+        __(movq win64_context.R11(%rcx),%r11)
+        __(movq win64_context.R12(%rcx),%r12)
+        __(movq win64_context.R13(%rcx),%r13)
+        __(movq win64_context.R14(%rcx),%r14)
+        __(movq win64_context.R15(%rcx),%r15)
+        /* This must be the last thing before the iret, e.g., if we're
+        interrupted before the iret, the context we're returning to here
+        is still in %rcx.  If we're interrupted -at- the iret, then
+        everything but that which the iret will restore has been restored. */
+        __(movq win64_context.Rcx(%rcx),%rcx)
+Xrestore_windows_context_iret:            
+        __(iretq)
+Xrestore_windows_context_end:             
+        __(nop)
+_endfn
+	
+_exportfn(C(windows_switch_to_foreign_stack))
+        __(pop %rax)
+        __(lea -0x20(%rcx),%rsp)
+        __(push %rax)
+        __(movq %r8,%rcx)
+        __(jmp *%rdx)
+_endfn        
+
+        .data
+        .globl C(restore_windows_context_start)
+        .globl C(restore_windows_context_end)
+        .globl C(restore_windows_context_iret)
+C(restore_windows_context_start):  .quad Xrestore_windows_context_start
+C(restore_windows_context_end): .quad Xrestore_windows_context_end
+C(restore_windows_context_iret): .quad Xrestore_windows_context_iret
+        .text
+
+/* Something that we shouldn't return to */
+_exportfn(C(windows_halt))
+        __(hlt)
+_endfn         
+_exportfn(C(ensure_safe_for_string_operations))
+        __(cld)
+        __(ret)
+_endfn                                       
+        __endif
+	_endfile
Index: /branches/arm/lisp-kernel/x86-constants.h
===================================================================
--- /branches/arm/lisp-kernel/x86-constants.h	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-constants.h	(revision 13357)
@@ -0,0 +1,63 @@
+/*
+   Copyright (C) 2005-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef __x86_constants__
+#define __x86_constants__ 1
+
+#define TCR_FLAG_BIT_FOREIGN fixnumshift
+#define TCR_FLAG_BIT_AWAITING_PRESET (fixnumshift+1)
+#define TCR_FLAG_BIT_ALT_SUSPEND (fixnumshift+2)
+#define TCR_FLAG_BIT_PROPAGATE_EXCEPTION (fixnumshift+3)
+#define TCR_FLAG_BIT_SUSPEND_ACK_PENDING (fixnumshift+4)
+#define TCR_FLAG_BIT_PENDING_EXCEPTION (fixnumshift+5)
+#define TCR_FLAG_BIT_FOREIGN_EXCEPTION (fixnumshift+6)
+#define TCR_FLAG_BIT_PENDING_SUSPEND (fixnumshift+7)
+#define TCR_STATE_FOREIGN (1)
+#define TCR_STATE_LISP    (0)
+#define TCR_STATE_EXCEPTION_WAIT (2)
+#define TCR_STATE_EXCEPTION_RETURN (4)
+
+#ifdef X8664
+#include "x86-constants64.h"
+#else
+#include "x86-constants32.h"
+#endif
+
+#define dnode_size (node_size*2)
+#define dnode_shift (node_shift+1)
+
+#define INTERRUPT_LEVEL_BINDING_INDEX (1)
+
+/* FP exception mask bits */
+#define MXCSR_IM_BIT (7)        /* invalid masked when set*/
+#define MXCSR_DM_BIT (8)        /* denormals masked when set*/
+#define MXCSR_ZM_BIT (9)        /* divide-by-zero masked when set */
+#define MXCSR_OM_BIT (10)       /* overflow masked when set */
+#define MXCSR_UM_BIT (11)       /* underflow masked when set */
+#define MXCSR_PM_BIT (12)       /* precision masked when set */
+
+/* Bits in the xFLAGS register */
+#define X86_CARRY_FLAG_BIT (0)
+#define X86_PARITY_FLAG_BIT (2)
+#define X86_AUX_CARRY_FLAG_BIT (4)
+#define X86_ZERO_FLAG_BIT (6)
+#define X86_SIGN_FLAG_BIT (7)
+#define X86_DIRECTION_FLAG_BIT (10)
+#define X86_OVERFLOW_FLAG_BIT (11)
+
+
+#endif /* __x86_constants__ */
+
Index: /branches/arm/lisp-kernel/x86-constants.s
===================================================================
--- /branches/arm/lisp-kernel/x86-constants.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-constants.s	(revision 13357)
@@ -0,0 +1,140 @@
+/*   Copyright (C) 2005-2009 Clozure Associates  */
+/*   This file is part of Clozure CL.    */
+ 
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+ 
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+ 
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+
+
+        
+/* Indices in %builtin-functions%  */
+	
+_builtin_plus = 0	/* +-2   */
+_builtin_minus = 1	/* --2   */
+_builtin_times = 2	/* *-2   */
+_builtin_div = 3	/* /-2   */
+_builtin_eq = 4		/* =-2   */
+_builtin_ne = 5		/* /-2   */
+_builtin_gt = 6		/* >-2   */
+_builtin_ge = 7		/* >=-2   */
+_builtin_lt = 8		/* <-2   */
+_builtin_le = 9		/* <=-2   */
+_builtin_eql = 10	/* eql   */
+_builtin_length = 11	/* length   */
+_builtin_seqtype = 12	/* sequence-type   */
+_builtin_assq = 13	/* assq   */
+_builtin_memq = 14	/* memq   */
+_builtin_logbitp = 15	/* logbitp   */
+_builtin_logior = 16	/* logior-2   */
+_builtin_logand = 17	/* logand-2   */
+_builtin_ash = 18	/* ash   */
+_builtin_negate = 19	/* %negate   */
+_builtin_logxor = 20	/* logxor-2   */
+_builtin_aref1 = 21	/* %aref1   */
+_builtin_aset1 = 22	/* %aset1   */
+	
+
+ifdef(`X8664',`
+	include(x86-constants64.s)
+',`
+	include(x86-constants32.s)
+')						
+
+/* registers, as used in destructuring-bind/macro-bind   */
+ifdef(`X8664',`
+define(`whole_reg',`temp1')
+define(`arg_reg',`temp0')
+define(`keyvect_reg',`arg_x')
+',`
+define(`arg_reg',`temp1')
+define(`arg_reg_b',`temp1_b')
+define(`keyvect_reg',`arg_y')
+')
+
+define(`initopt_bit',`24')
+define(`keyp_bit',`25') /*  note that keyp can be true even when 0 keys.   */
+define(`aok_bit',`26')
+define(`restp_bit',`27')
+define(`seen_aok_bit',`28')        
+        
+num_lisp_globals = 49		 /* MUST UPDATE THIS !!!   */
+	
+	_struct(lisp_globals,lisp_globals_limit-(num_lisp_globals*node_size))
+	 _node(weakvll)                 /* all populations as of last GC */
+	 _node(initial_tcr)	        /* initial thread tcr */
+	 _node(image_name)	        /* --image-name argument */
+	 _node(BADfpscr_save_high)      /* high word of FP reg used to save FPSCR */
+	 _node(unwind_resume)           /* _Unwind_Resume */
+	 _node(batch_flag)	        /* -b */
+	 _node(host_platform)	        /* for runtime platform-specific stuff   */
+	 _node(argv)			/* address of argv`0'   */
+	 _node(ref_base)                /* start of oldest pointer-bearing area */
+	 _node(tenured_area) 		/* the tenured_area   */
+	 _node(oldest_ephemeral) 	/* dword address of oldest ephemeral object or 0   */
+	 _node(lisp_exit_hook)		/* install foreign exception_handling   */
+	 _node(lisp_return_hook)	/* install lisp exception_handling   */
+	 _node(double_float_one) 	/* high half of 1.0d0   */
+	 _node(short_float_zero) 	/* low half of 1.0d0   */
+	 _node(objc2_end_catch) 	/* objc_end_catch()  */
+	 _node(metering_info) 		/* address of lisp_metering global   */
+	 _node(in_gc) 			/* non-zero when GC active   */
+	 _node(lexpr_return1v) 		/* simpler when &lexpr called for single value.   */
+	 _node(lexpr_return) 		/* magic &lexpr return code.   */
+	 _node(all_areas) 		/* doubly-linked list of all memory areas   */
+	 _node(kernel_path)	 	/* real executable name */
+	 _node(objc2_begin_catch)	/* objc_begin_catch   */
+	 _node(stack_size) 		/* from the command line */
+	 _node(statically_linked)	/* non-zero if -static   */
+	 _node(heap_end)                /* end of lisp heap   */
+	 _node(heap_start)              /* start of lisp heap   */
+	 _node(gcable_pointers)         /* linked-list of weak macptrs.   */
+	 _node(gc_num)                  /* fixnum: GC call count.   */
+	 _node(fwdnum)                  /* fixnum: GC "forwarder" call count.   */
+	 _node(altivec_present)         /* non-zero when AltiVec available   */
+	 _node(oldspace_dnode_count) 	/* dynamic dnodes older than g0 start   */
+	 _node(refbits) 		/* EGC refbits   */
+	 _node(gc_inhibit_count)
+	 _node(intflag) 		/* sigint pending   */
+	 _node(default_allocation_quantum)	/* for per-thread allocation   */
+	 _node(deleted_static_pairs) 		
+	 _node(exception_lock)
+	 _node(area_lock)
+	 _node(tcr_key) 		/* tsd key for per-thread tcr   */
+	 _node(ret1val_addr) 		/* address of "dynamic" subprims magic values return addr   */
+	 _node(subprims_base) 		/* address of dynamic subprims jump table   */
+	 _node(saveR13)			/* probably don't really need this   */
+	 _node(saveTOC)                 /* where the 68K emulator stores the  emulated regs   */
+	 _node(objc_2_personality)		/* exception "personality routine" address for ObjC 2.0 */
+	 _node(kernel_imports) 		/* some things we need imported for us   */
+	 _node(interrupt_signal)	/* signal used by PROCESS-INTERRUPT   */
+	 _node(tcr_count) 		/* tcr_id for next tcr   */
+	 _node(get_tcr) 		/* address of get_tcr()  */
+	_ends
+	
+	
+		
+define(`TCR_STATE_FOREIGN',1)
+define(`TCR_STATE_LISP',0)
+define(`TCR_STATE_EXCEPTION_WAIT',2)
+define(`TCR_STATE_EXCEPTION_RETURN',4)
+
+tstack_alloc_limit = 0xffff
+	
+mxcsr_ie_bit = 0                /* invalid */
+mxcsr_de_bit = 1                /* denorm */        
+mxcsr_ze_bit = 2
+mxcsr_oe_bit = 3
+mxcsr_ue_bit = 4
+mxcsr_pe_bit = 5
+num_mxcsr_exception_bits = 6
+        
+mxcsr_all_exceptions = ((1<<num_mxcsr_exception_bits)-1)
+        
Index: /branches/arm/lisp-kernel/x86-constants32.h
===================================================================
--- /branches/arm/lisp-kernel/x86-constants32.h	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-constants32.h	(revision 13357)
@@ -0,0 +1,499 @@
+/* offsets into uc_mcontext.ss */
+#ifdef DARWIN
+#define REG_EAX 0
+#define REG_EBX 1
+#define REG_ECX 2
+#define REG_EDX 3
+#define REG_EDI 4
+#define REG_ESI 5
+#define REG_EBP 6
+#define REG_ESP 7
+#define REG_EFL 9
+#define REG_EIP 10
+#endif
+
+#ifdef WINDOWS
+/* Offsets relative to _CONTEXT.Edi */
+#define REG_EDI 0
+#define REG_ESI 1
+#define REG_EBX 2
+#define REG_EDX 3
+#define REG_ECX 4
+#define REG_EAX 5
+#define REG_EBP 6
+#define REG_EIP 7
+#define REG_EFL 9
+#define REG_ESP 10
+#endif
+
+#ifdef FREEBSD
+#define REG_EDI 5
+#define REG_ESI 6
+#define REG_EBP 7
+#define REG_ISP 8
+#define REG_EBX 9
+#define REG_EDX 10
+#define REG_ECX 11
+#define REG_EAX 12
+#define REG_EIP 15
+#define REG_EFL 17
+#define REG_ESP 18
+#endif
+
+#ifdef SOLARIS
+#include <sys/regset.h>
+#include <limits.h>
+#define REG_EAX EAX
+#define REG_EBX EBX
+#define REG_ECX ECX
+#define REG_EDX EDX
+#define REG_ESI ESI
+#define REG_EDI EDI
+#define REG_EBP EBP
+#define REG_ESP UESP    /* Maybe ... ESP is often 0, but who knows why ? */
+#define REG_EFL EFL
+#define REG_EIP EIP
+#endif
+
+/* Indicies of GPRs in the mcontext component of a ucontext */
+#define Iimm0  REG_EAX
+#define Iarg_z REG_EBX
+#define Itemp0 REG_ECX
+#define Itemp1 REG_EDX
+#define Ifn    REG_EDI
+#define Iarg_y REG_ESI
+#define Iesp   REG_ESP
+#define Iebp   REG_EBP
+#define Ieip   REG_EIP
+#define Iflags REG_EFL
+
+#define Isp Iesp
+#define Iip Ieip
+#define Iallocptr Itemp0
+#define Ira0 Itemp0
+#define Inargs Itemp1
+#define Ixfn Itemp1
+#define Ifp Iebp
+
+/* MMX register offsets from where mm0 is found in uc_mcontext.fs */
+#define Imm0 0
+#define Imm1 1
+
+#define nbits_in_word 32
+#define log2_nbits_in_word 5
+#define nbits_in_byte 8
+#define ntagbits 3
+#define nlisptagbits 2
+#define nfixnumtagbits 2
+#define num_subtag_bits 8
+#define fixnumshift 2
+#define fixnum_shift 2
+#define fulltagmask 7
+#define tagmask  3
+#define fixnummask 3
+#define subtagmask ((1<<num_subtag_bits)-1)
+#define ncharcodebits 8
+#define charcode_shift 8
+#define node_size 4
+#define node_shift 2
+#define nargregs 2
+
+#define tag_fixnum 0
+#define tag_list 1
+#define tag_misc 2
+#define tag_imm 3
+
+#define fulltag_even_fixnum 0
+#define fulltag_cons 1
+#define fulltag_nodeheader 2
+#define fulltag_imm 3
+#define fulltag_odd_fixnum 4
+#define fulltag_tra 5
+#define fulltag_misc 6
+#define fulltag_immheader 7
+
+#define SUBTAG(tag,subtag) ((tag) | ((subtag) << ntagbits))
+#define IMM_SUBTAG(subtag) SUBTAG(fulltag_immheader,(subtag))
+#define NODE_SUBTAG(subtag) SUBTAG(fulltag_nodeheader,(subtag))
+
+#define subtag_bignum IMM_SUBTAG(0)
+#define min_numeric_subtag subtag_bignum
+#define subtag_ratio NODE_SUBTAG(1)
+#define max_rational_subtag subtag_ratio
+#define subtag_single_float IMM_SUBTAG(1)
+#define subtag_double_float IMM_SUBTAG(2)
+#define min_float_subtag subtag_single_float
+#define max_float_subtag subtag_double_float
+#define max_real_subtag subtag_double_float
+#define subtag_complex NODE_SUBTAG(3)
+#define max_numeric_subtag subtag_complex
+
+#define subtag_bit_vector IMM_SUBTAG(31)
+#define subtag_double_float_vector IMM_SUBTAG(30)
+#define subtag_s16_vector IMM_SUBTAG(29)
+#define subtag_u16_vector IMM_SUBTAG(28)
+#define min_16_bit_ivector_subtag subtag_u16_vector
+#define max_16_bit_ivector_subtag subtag_s16_vector
+
+/* subtag 27 unused*/
+#define subtag_s8_vector IMM_SUBTAG(26)
+#define subtag_u8_vector IMM_SUBTAG(25)
+#define min_8_bit_ivector_subtag subtag_u8_vector
+#define max_8_bit_ivector_subtag IMM_SUBTAG(27)
+
+#define subtag_simple_base_string IMM_SUBTAG(24)
+#define subtag_fixnum_vector IMM_SUBTAG(23)
+#define subtag_s32_vector IMM_SUBTAG(22)
+#define subtag_u32_vector IMM_SUBTAG(21)
+#define subtag_single_float_vector IMM_SUBTAG(20)
+#define max_32_bit_ivector_subtag IMM_SUBTAG(24)
+#define min_cl_ivector_subtag subtag_single_float_vector
+
+#define subtag_vectorH NODE_SUBTAG(20)
+#define subtag_arrayH NODE_SUBTAG(19)
+#define subtag_simple_vector NODE_SUBTAG(21)    /*  Only one such subtag */
+#define min_vector_subtag subtag_vectorH
+#define min_array_subtag subtag_arrayH
+
+#define subtag_macptr IMM_SUBTAG(3)
+#define min_non_numeric_imm_subtag subtag_macptr
+
+#define subtag_dead_macptr IMM_SUBTAG(4)
+#define subtag_code_vector IMM_SUBTAG(5)
+#define subtag_creole IMM_SUBTAG(6)
+
+#define max_non_array_imm_subtag ((19<<ntagbits)|fulltag_immheader)
+
+#define subtag_catch_frame NODE_SUBTAG(4)
+#define subtag_function NODE_SUBTAG(5)
+#define subtag_basic_stream NODE_SUBTAG(6)
+#define subtag_symbol NODE_SUBTAG(7)
+#define subtag_lock NODE_SUBTAG(8)
+#define subtag_hash_vector NODE_SUBTAG(9)
+#define subtag_pool NODE_SUBTAG(10)
+#define subtag_weak NODE_SUBTAG(11)
+#define subtag_package NODE_SUBTAG(12)
+#define subtag_slot_vector NODE_SUBTAG(13)
+#define subtag_instance NODE_SUBTAG(14)
+#define subtag_struct NODE_SUBTAG(15)
+#define subtag_istruct NODE_SUBTAG(16)
+#define max_non_array_node_subtag ((19<<ntagbits)|fulltag_immheader)
+
+#define subtag_unbound SUBTAG(fulltag_imm, 6)
+#define unbound_marker subtag_unbound
+#define undefined subtag_unbound
+#define unbound subtag_unbound
+#define subtag_character SUBTAG(fulltag_imm, 9)
+#define slot_unbound SUBTAG(fulltag_imm, 10)
+#define slot_unbound_marker slot_unbound
+#define subtag_illegal SUBTAG(fulltag_imm,11)
+#define illegal_marker subtag_illegal
+#define subtag_forward_marker SUBTAG(fulltag_imm,28)
+#define subtag_reserved_frame  SUBTAG(fulltag_imm,29)
+#define reserved_frame_marker subtag_reserved_frame
+#define subtag_no_thread_local_binding SUBTAG(fulltag_imm,30)
+#define no_thread_local_binding_marker subtag_no_thread_local_binding
+#define subtag_function_boundary_marker SUBTAG(fulltag_imm,31)
+#define function_boundary_marker subtag_function_boundary_marker
+
+typedef struct cons {
+    LispObj cdr;
+    LispObj car;
+} cons;
+
+typedef struct lispsymbol {
+    LispObj header;
+    LispObj pname;
+    LispObj vcell;
+    LispObj fcell;
+    LispObj package_predicate;
+    LispObj flags;
+    LispObj plist;
+    LispObj binding_index;
+} lispsymbol;
+
+typedef struct ratio {
+    LispObj header;
+    LispObj numer;
+    LispObj denom;
+} ratio;
+
+typedef struct double_float {
+    LispObj header;
+    LispObj pad;
+    LispObj value_low;
+    LispObj value_high;
+} double_float;
+
+typedef struct single_float {
+    LispObj header;
+    LispObj value;
+} single_float;
+
+typedef struct macptr {
+    LispObj header;
+    LispObj address;
+    LispObj class;
+    LispObj type;
+} macptr;
+
+typedef struct xmacptr {
+    LispObj header;
+    LispObj address;
+    LispObj class;
+    LispObj type;
+    LispObj flags;
+    LispObj link;
+} xmacptr;
+
+typedef struct special_binding {
+    struct special_binding *link;
+    struct lispsymbol *sym;
+    LispObj value;
+} special_binding;
+
+typedef struct lisp_frame {
+    struct lisp_frame *backlink;
+    LispObj tra;
+    LispObj xtra;		/* if tra is nvalretn */
+} lisp_frame;
+
+typedef struct exception_callback_frame {
+    struct lisp_frame *backlink;
+    LispObj tra;		/* ALWAYS 0 FOR AN XCF */
+    LispObj nominal_function;   /* the current function at the time of the exception */
+    LispObj relative_pc;        /* Boxed byte offset within actual function or absolute address */
+    LispObj containing_uvector;	/* the uvector that contains the relative PC or NIL */
+    LispObj xp;			/* exception context */
+    LispObj ra0;		/* value of ra0 from context */
+    LispObj foreign_sp;		/* foreign sp at the time that exception occurred */
+    LispObj prev_xframe;	/* so %apply-in-frame can unwind it */
+} xcf;
+
+/* The GC (at least) needs to know what a
+   package looks like, so that it can do GCTWA. */
+typedef struct package {
+    LispObj header;
+    LispObj itab; 		/* itab and etab look like (vector (fixnum . fixnum) */
+    LispObj etab;
+    LispObj used;
+    LispObj used_by;
+    LispObj names;
+    LispObj shadowed;
+} package;
+
+typedef struct catch_frame {
+    LispObj header;
+    LispObj catch_tag;
+    LispObj link;
+    LispObj mvflag;
+    LispObj esp;
+    LispObj ebp;
+    LispObj foreign_sp;
+    LispObj db_link;
+    LispObj xframe;
+    LispObj pc;
+} catch_frame;
+
+#define catch_frame_element_count ((sizeof(catch_frame)/sizeof(LispObj))-1)
+#define catch_frame_header make_header(subtag_catch_frame,catch_frame_element_count)
+
+/* 
+   All exception frames in a thread are linked together 
+ */
+typedef struct xframe_list {
+  ExceptionInformation *curr;
+  natural node_regs_mask;
+  struct xframe_list *prev;
+} xframe_list;
+
+#define fixnum_bitmask(n)  (1<<((n)+fixnumshift))
+
+/* 
+  The GC (at least) needs to know about hash-table-vectors and their flag bits.
+*/
+
+typedef struct hash_table_vector_header {
+  LispObj header;
+  LispObj link;                 /* If weak */
+  LispObj flags;                /* a fixnum; see below */
+  LispObj gc_count;             /* gc-count kernel global */
+  LispObj free_alist;           /* preallocated conses for finalization_alist */
+  LispObj finalization_alist;   /* key/value alist for finalization */
+  LispObj weak_deletions_count; /* incremented when GC deletes weak pair */
+  LispObj hash;                 /* backpointer to hash-table */
+  LispObj deleted_count;        /* number of deleted entries [not maintained if lock-free] */
+  LispObj count;                /* number of valid entries [not maintained if lock-free] */
+  LispObj cache_idx;            /* index of last cached pair */
+  LispObj cache_key;            /* value of last cached key */
+  LispObj cache_value;          /* last cached value */
+  LispObj size;                 /* number of entries in table */
+  LispObj size_reciprocal;      /* shifted reciprocal of size */
+} hash_table_vector_header;
+
+/*
+  Bits (masks) in hash_table_vector.flags:
+*/
+
+/* GC should track keys when addresses change */ 
+#define nhash_track_keys_mask fixnum_bitmask(28) 
+
+/* GC should set when nhash_track_keys_bit & addresses change */
+#define nhash_key_moved_mask  fixnum_bitmask(27) 
+
+/* weak on key or value (need new "weak both" encoding.) */
+#define nhash_weak_mask       fixnum_bitmask(12)
+
+/* weak on value */
+#define nhash_weak_value_mask fixnum_bitmask(11)
+
+/* finalizable */
+#define nhash_finalizable_mask fixnum_bitmask(10)
+
+/* keys frozen, i.e. don't clobber keys, only values */
+#define nhash_keys_frozen_mask fixnum_bitmask(9)
+
+/* Lfun bits */
+
+#define lfbits_nonnullenv_mask fixnum_bitmask(0)
+#define lfbits_keys_mask fixnum_bitmask(1)
+#define lfbits_restv_mask fixnum_bitmask(7)
+#define lfbits_optinit_mask fixnum_bitmask(14)
+#define lfbits_rest_mask fixnum_bitmask(15)
+#define lfbits_aok_mask fixnum_bitmask(16)
+#define lfbits_lap_mask fixnum_bitmask(23)
+#define lfbits_trampoline_mask fixnum_bitmask(24)
+#define lfbits_evaluated_mask fixnum_bitmask(25)
+#define lfbits_cm_mask fixnum_bitmask(26)         /* combined_method */
+#define lfbits_nextmeth_mask fixnum_bitmask(26)   /* or call_next_method with method_mask */
+#define lfbits_gfn_mask fixnum_bitmask(27)        /* generic_function */
+#define lfbits_nextmeth_with_args_mask fixnum_bitmask(27)   /* or call_next_method_with_args with method_mask */
+#define lfbits_method_mask fixnum_bitmask(28)     /* method function */
+/* PPC only but want it defined for xcompile */
+#define lfbits_noname_mask fixnum_bitmask(29)
+
+
+/* Creole */
+
+#define doh_quantum 400
+#define doh_block_slots ((doh_quantum >> 2) - 3)
+
+typedef struct doh_block {
+  struct doh_block *link;
+  unsigned size;
+  unsigned free;
+  LispObj data[doh_block_slots];
+} doh_block, *doh_block_ptr;
+
+#define population_weak_list (0<<fixnum_shift)
+#define population_weak_alist (1<<fixnum_shift)
+#define population_termination_bit (16+fixnum_shift)
+#define population_type_mask ((1<<population_termination_bit)-1)
+
+#define gc_retain_pages_bit fixnum_bitmask(0)
+#define gc_integrity_check_bit fixnum_bitmask(2)
+#define egc_verbose_bit fixnum_bitmask(3)
+#define gc_verbose_bit fixnum_bitmask(4)
+#define gc_allow_stack_overflows_bit fixnum_bitmask(5)
+#define gc_postgc_pending fixnum_bitmask(26)
+
+#include "lisp-errors.h"
+
+#ifdef DARWIN
+#include <architecture/i386/sel.h>
+#else
+typedef unsigned short sel_t;   /* for now */
+#endif
+
+#define TCR_BIAS 0
+
+/*
+ * bits correspond to reg encoding used in instructions
+ *   7   6   5   4   3   2   1   0
+ *  edi esi ebp esp ebx edx ecx eax
+ */
+
+#define X8632_DEFAULT_NODE_REGS_MASK 0xce
+
+typedef struct tcr {
+  struct tcr *next;
+  struct tcr *prev;
+  natural node_regs_mask; /* bit set means correspnding reg contains node */
+  struct tcr *linear;
+  /* this spill area must be 16-byte aligned */
+  LispObj save0;		/* spill area for node registers */
+  LispObj save1;
+  LispObj save2;
+  LispObj save3;
+  LispObj *save_fp;		/* EBP when in foreign code */
+  u32_t lisp_mxcsr;
+  u32_t foreign_mxcsr;
+  special_binding *db_link;     /* special binding chain head */
+  LispObj catch_top;            /* top catch frame */
+  LispObj *save_vsp;		  /* VSP when in foreign code */
+  LispObj *save_tsp;		  /* TSP when in foreign code */
+  LispObj *foreign_sp;
+  struct area *cs_area;		/* cstack area pointer */
+  struct area *vs_area;		/* vstack area pointer */
+  struct area *ts_area;		/* tstack area pointer */
+  LispObj cs_limit;			/* stack overflow limit */
+  natural bytes_allocated;
+  natural bytes_consed_high;
+  natural log2_allocation_quantum;      /* for per-thread consing */
+  signed_natural interrupt_pending;     /* pending interrupt flag */
+  xframe_list *xframe;	  /* exception-frame linked list */
+  int *errno_loc;               /* per-thread (?) errno location */
+  LispObj ffi_exception;        /* fpscr bits from ff-call */
+  LispObj osid;                 /* OS thread id */
+  signed_natural valence;	  /* odd when in foreign code */
+  signed_natural foreign_exception_status; /* non-zero -> call lisp_exit_hook */
+  void *native_thread_info;		     /* platform-dependent */
+  void *native_thread_id;	/* mach_thread_t, pid_t, etc. */
+  void *last_allocptr;
+  void *save_allocptr;
+  void *save_allocbase;
+  void *reset_completion;
+  void *activate;
+  signed_natural suspend_count;
+  ExceptionInformation *suspend_context;
+  ExceptionInformation *pending_exception_context;
+  void *suspend;                /* suspension semaphore */
+  void *resume;                 /* resumption semaphore */
+  natural flags;
+  ExceptionInformation *gc_context;
+  void *termination_semaphore;
+  signed_natural unwinding;
+  natural tlb_limit;
+  LispObj *tlb_pointer;
+  natural shutdown_count;
+  LispObj *next_tsp;
+  void *safe_ref_address;
+  sel_t ldt_selector;
+  natural scratch_mxcsr;
+  natural unboxed0;
+  natural unboxed1;
+  LispObj next_method_context; /* used in lieu of register */
+  natural save_eflags;
+  void *allocated;
+  void *pending_io_info;
+  void *io_datum;
+} TCR;
+
+#define nil_value ((0x13000 + (fulltag_cons))+(LOWMEM_BIAS))
+#define t_value ((0x13008 + (fulltag_misc))+(LOWMEM_BIAS))
+#define t_offset (t_value-nil_value)
+#define misc_header_offset -fulltag_misc
+#define misc_data_offset misc_header_offset + node_size
+
+typedef struct {
+  natural Eip;
+  natural Cs;                   /* in low 16 bits */
+  natural EFlags;
+} ia32_iret_frame;
+
+#define heap_segment_size 0x00010000
+#define log2_heap_segment_size 16
+
+#ifndef EFL_DF
+#define EFL_DF 1024
+#endif
Index: /branches/arm/lisp-kernel/x86-constants32.s
===================================================================
--- /branches/arm/lisp-kernel/x86-constants32.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-constants32.s	(revision 13357)
@@ -0,0 +1,626 @@
+define(`eax_l',`eax')
+define(`ecx_l',`ecx')
+define(`edx_l',`edx')
+define(`ebx_l',`ebx')
+define(`esi_l',`esi')
+define(`edi_l',`edi')
+
+define(`eax_b',`al')
+define(`ecx_b',`cl')
+define(`edx_b',`dl')
+define(`ebx_b',`bl')
+
+define(`imm0',`eax')
+	define(`imm0_l',`eax')
+	define(`imm0_w',`ax')
+	define(`imm0_b',`al')
+	define(`imm0_bh',`ah')
+	define(`Rimm0',`0')
+
+define(`temp0',`ecx')
+	define(`temp0_l',`ecx')
+	define(`temp0_w',`cx')
+	define(`temp0_b',`cl')
+	define(`temp0_bh',`ch')
+	define(`Rtemp0',`1')
+
+define(`temp1',`edx')
+	define(`temp1_l',`edx')
+	define(`temp1_w',`dx')
+	define(`temp1_b',`dl')
+	define(`temp1_bh',`dh')
+	define(`Rtemp1',`2')
+
+define(`arg_z',`ebx')
+	define(`arg_z_l',`ebx')
+	define(`arg_z_w',`bx')
+	define(`arg_z_b',`bl')
+	define(`arg_z_bh',`bh')
+	define(`Rarg_z',`3')
+
+define(`arg_y',`esi')
+	define(`Rarg_y',`6')
+
+define(`fn',`edi')
+	define(`Rfn',`7')
+
+define(`rcontext_reg',`fs')
+	
+        ifdef(`WINDOWS',`
+undefine(`rcontext_reg')        
+define(`rcontext_reg',`es')
+        ')
+                
+define(`rcontext',`%rcontext_reg:$1')
+
+define(`fname',`temp0')
+define(`allocptr',`temp0')
+
+define(`nargs',`temp1')
+define(`nargs_w',`temp1_w')
+
+define(`ra0',`temp0')
+define(`xfn',`temp1')
+
+define(`allocptr',`temp0')
+define(`stack_temp',`mm7')
+
+define(`fp0',`xmm0')		
+define(`fp1',`xmm1')		
+define(`fp2',`xmm2')		
+define(`fp3',`xmm3')		
+define(`fp4',`xmm4')		
+define(`fp5',`xmm5')		
+define(`fp6',`xmm6')		
+define(`fp7',`xmm7')		
+define(`fpzero',`fp7')
+
+nbits_in_word = 32
+nbits_in_byte = 8
+ntagbits = 3
+nlisptagbits = 2
+nfixnumtagbits = 2
+num_subtag_bits = 8
+subtag_shift = num_subtag_bits
+fixnumshift = 2
+fixnum_shift = 2
+fulltagmask = 7
+tagmask = 3
+fixnummask = 3
+ncharcodebits = 8
+charcode_shift = 8
+word_shift = 2
+node_size = 4
+dnode_size = 8
+dnode_align_bits = 3
+dnode_shift = dnode_align_bits        
+bitmap_shift = 5
+
+fixnumone = (1<<fixnumshift)
+fixnum_one = fixnumone
+fixnum1 = fixnumone
+
+nargregs = 2
+
+tag_fixnum = 0
+tag_list = 1
+tag_misc = 2
+tag_imm = 3
+
+fulltag_even_fixnum = 0
+fulltag_cons = 1
+fulltag_nodeheader = 2
+fulltag_imm = 3
+fulltag_odd_fixnum = 4
+fulltag_tra = 5
+fulltag_misc = 6
+fulltag_immheader = 7
+
+define(`define_subtag',`subtag_$1 = ($2 | ($3 << ntagbits))')
+define(`define_imm_subtag',`define_subtag($1,fulltag_immheader,$2)')
+define(`define_node_subtag',`define_subtag($1,fulltag_nodeheader,$2)')
+
+define_imm_subtag(bignum,0)
+min_numeric_subtag = subtag_bignum
+define_node_subtag(ratio,1)
+max_rational_subtag = subtag_ratio
+define_imm_subtag(single_float,1)
+define_imm_subtag(double_float,2)
+min_float_subtag = subtag_single_float
+max_float_subtag = subtag_double_float
+max_real_subtag = subtag_double_float
+define_node_subtag(complex,3)
+max_numeric_subtag = subtag_complex
+
+define_imm_subtag(bit_vector,31)
+define_imm_subtag(double_float_vector,30)
+define_imm_subtag(s16_vector,29)
+define_imm_subtag(u16_vector,28)
+min_16_bit_ivector_subtag = subtag_u16_vector
+max_16_bit_ivector_subtag = subtag_s16_vector
+define_imm_subtag(s8_vector,26)
+define_imm_subtag(u8_vector,25)
+min_8_bit_ivector_subtag = subtag_u8_vector
+max_8_bit_ivector_subtag = fulltag_immheader|(27<<ntagbits)
+define_imm_subtag(simple_base_string,24)
+define_imm_subtag(fixnum_vector,23)
+define_imm_subtag(s32_vector,22)
+define_imm_subtag(u32_vector,21)
+define_imm_subtag(single_float_vector,20)
+max_32_bit_ivector_subtag = fulltag_immheader|(24<<ntagbits)
+min_cl_ivector_subtag = subtag_single_float_vector
+
+define_node_subtag(arrayH,19)
+define_node_subtag(vectorH,20)
+define_node_subtag(simple_vector,21)
+min_vector_subtag = subtag_vectorH
+min_array_subtag = subtag_arrayH
+
+define_imm_subtag(macptr,3)
+min_non_numeric_imm_subtag = subtag_macptr
+define_imm_subtag(dead_macptr,4)
+define_imm_subtag(xcode_vector,7)
+
+define_subtag(unbound,fulltag_imm,6)
+unbound_marker = subtag_unbound
+undefined = unbound_marker
+define_subtag(character,fulltag_imm,9)
+define_subtag(slot_unbound,fulltag_imm,10)
+slot_unbound_marker = subtag_slot_unbound
+define_subtag(illegal,fulltag_imm,11)
+illegal = subtag_illegal
+define_subtag(reserved_frame,fulltag_imm,29)
+reserved_frame_marker = subtag_reserved_frame
+define_subtag(no_thread_local_binding,fulltag_imm,30)
+no_thread_local_binding_marker = subtag_no_thread_local_binding
+define_subtag(function_boundary_marker,fulltag_imm,31)
+function_boundary_marker = subtag_function_boundary_marker
+
+max_non_array_imm_subtag = (18<<ntagbits)|fulltag_immheader
+
+define_node_subtag(catch_frame,4)
+define_node_subtag(function,5)
+define_node_subtag(basic_stream,6)
+define_node_subtag(symbol,7)
+define_node_subtag(lock,8)
+define_node_subtag(hash_vector,9)
+define_node_subtag(pool,10)
+define_node_subtag(weak,11)
+define_node_subtag(package,12)
+define_node_subtag(slot_vector,13)
+define_node_subtag(instance,14)
+define_node_subtag(struct,15)
+define_node_subtag(istruct,16)
+define_node_subtag(value_cell,17)
+define_node_subtag(xfunction,18)
+
+max_non_array_node_subtag = (18<<ntagbits)|fulltag_immheader
+
+misc_header_offset = -fulltag_misc
+misc_subtag_offset = misc_header_offset
+misc_data_offset = misc_header_offset+node_size
+misc_dfloat_offset = misc_header_offset+8
+
+nil_value = ((0x13000 + fulltag_cons)+(LOWMEM_BIAS))
+t_value = ((0x13008 + fulltag_misc)+(LOWMEM_BIAS))
+t_offset = (t_value-nil_value)
+misc_bias = fulltag_misc
+cons_bias = fulltag_cons
+
+	_struct(cons,-cons_bias)
+         _node(cdr)
+         _node(car)
+        _ends
+
+        _structf(ratio)
+         _node(numer)
+         _node(denom)
+        _endstructf
+
+        _structf(single_float)
+         _word(value)
+        _endstructf
+
+        _structf(double_float)
+         _word(pad)
+         _dword(value)
+        _endstructf
+
+	_structf(macptr)
+         _node(address)
+         _node(domain)
+         _node(type)
+        _endstructf
+
+	_structf(catch_frame)
+	 _node(catch_tag)  /* #<unbound> -> unwind-protect, else catch */
+	 _node(link)	   /* backpointer to previous catch frame */
+	 _node(mvflag)     /* 0 if single-valued catch, fixnum 1 otherwise */
+	 _node(esp)	   /* saved lisp esp */
+	 _node(ebp)	   /* saved lisp ebp */
+	 _node(foreign_sp) /* necessary? */
+	 _node(db_link)	   /* head of special-binding chain */
+	 _node(xframe)	   /* exception frame chain */
+	 _node(pc)	   /* TRA of catch exit or cleanup form */
+	_endstructf
+
+	_struct(_function,-misc_bias)
+         _node(header)
+         _node(codevector)
+        _ends
+
+        _struct(tsp_frame,0)
+         _node(backlink)
+         _node(save_ebp)
+         _struct_label(fixed_overhead)
+         _struct_label(data_offset)
+        _ends
+
+	_struct(csp_frame,0)
+         _node(backlink)
+         _node(save_ebp)
+         _struct_label(fixed_overhead)
+         _struct_label(data_offset)
+        _ends
+
+        _structf(symbol)
+         _node(pname)
+         _node(vcell)
+         _node(fcell)
+         _node(package_predicate)
+         _node(flags)
+         _node(plist)
+         _node(binding_index)
+        _endstructf
+
+	_structf(vectorH)
+	 _node(logsize)
+	 _node(physsize)
+	 _node(data_vector)
+	 _node(displacement)
+	 _node(flags)
+	_endstructf	
+
+	_structf(arrayH)
+	 _node(rank)
+	 _node(physsize)
+	 _node(data_vector)
+	 _node(displacement)
+	 _node(flags)
+	 _struct_label(dim0)        
+	_endstructf	
+
+	_struct(lisp_frame,0)
+	 _node(backlink) 
+	 _node(savera0)	
+	_ends
+
+	_struct(vector,-fulltag_misc)
+	 _node(header)
+	 _struct_label(data)
+	_ends
+
+        _struct(binding,0)
+         _node(link)
+         _node(sym)
+         _node(val)
+        _ends
+
+symbol_extra = symbol.size-fulltag_misc
+
+	_struct(nrs,(0x13008+(LOWMEM_BIAS)))
+	 _struct_pad(fulltag_misc)
+	 _struct_label(tsym)
+	 _struct_pad(symbol_extra)	/* t */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(nilsym)
+         _struct_pad(symbol_extra)      /* nil */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(errdisp)
+         _struct_pad(symbol_extra)      /* %err-disp */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(cmain)
+         _struct_pad(symbol_extra)      /* cmain */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(eval)
+         _struct_pad(symbol_extra)      /* eval */
+ 
+         _struct_pad(fulltag_misc)
+         _struct_label(appevalfn)
+         _struct_pad(symbol_extra)      /* apply-evaluated-function */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(error)
+         _struct_pad(symbol_extra)      /* error */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(defun)
+         _struct_pad(symbol_extra)      /* %defun */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(defvar)
+         _struct_pad(symbol_extra)      /* %defvar */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(defconstant)
+         _struct_pad(symbol_extra)      /* %defconstant */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(macrosym)
+         _struct_pad(symbol_extra)      /* %macro */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(kernelrestart)
+         _struct_pad(symbol_extra)      /* %kernel-restart */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(package)
+         _struct_pad(symbol_extra)      /* *package* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(total_bytes_freed)
+         _struct_pad(symbol_extra)	/* *total-bytes-freed* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(kallowotherkeys)
+         _struct_pad(symbol_extra)      /* allow-other-keys */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(toplcatch)
+         _struct_pad(symbol_extra)      /* %toplevel-catch% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(toplfunc)
+         _struct_pad(symbol_extra)      /* %toplevel-function% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(callbacks)
+         _struct_pad(symbol_extra)      /* %pascal-functions% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(allmeteredfuns)
+         _struct_pad(symbol_extra)      /* *all-metered-functions* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(total_gc_microseconds)
+         _struct_pad(symbol_extra)  	/* *total-gc-microseconds* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(builtin_functions)
+         _struct_pad(symbol_extra)      /* %builtin-functions% */
+	
+         _struct_pad(fulltag_misc)
+         _struct_label(udf)
+         _struct_pad(symbol_extra)      /* %unbound-function% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(init_misc)
+         _struct_pad(symbol_extra)      /* %init-misc */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(macro_code)
+         _struct_pad(symbol_extra)      /* %macro-code% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(closure_code)
+         _struct_pad(symbol_extra)      /* %closure-code% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(new_gcable_ptr)
+         _struct_pad(symbol_extra)	/* %new-gcable-ptr */
+        
+         _struct_pad(fulltag_misc)
+         _struct_label(gc_event_status_bits)
+         _struct_pad(symbol_extra)      /* *gc-event-status-bits* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(post_gc_hook)
+         _struct_pad(symbol_extra)      /* *post-gc-hook* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(handlers)
+         _struct_pad(symbol_extra)      /* %handlers% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(all_packages)
+         _struct_pad(symbol_extra)      /* %all-packages% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(keyword_package)
+         _struct_pad(symbol_extra)      /* *keyword-package* */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(finalization_alist)
+         _struct_pad(symbol_extra)      /* %finalization-alist% */
+
+         _struct_pad(fulltag_misc)
+         _struct_label(foreign_thread_control)
+         _struct_pad(symbol_extra)      /* %foreign-thread-control */
+
+        _ends
+
+define(`def_header',`$1 = ($2<<num_subtag_bits)|$3')
+
+def_header(single_float_header,single_float.element_count,subtag_single_float)
+def_header(double_float_header,double_float.element_count,subtag_double_float)
+def_header(one_digit_bignum_header,1,subtag_bignum)
+def_header(two_digit_bignum_header,2,subtag_bignum)
+def_header(three_digit_bignum_header,3,subtag_bignum)
+def_header(symbol_header,symbol.element_count,subtag_symbol)
+def_header(value_cell_header,1,subtag_value_cell)
+def_header(macptr_header,macptr.element_count,subtag_macptr)
+def_header(vectorH_header,vectorH.element_count,subtag_vectorH)
+
+	include(errors.s)
+
+/* Symbol bits that we care about */
+sym_vbit_bound = (0+fixnum_shift)
+sym_vbit_bound_mask = (1<<sym_vbit_bound)
+sym_vbit_const = (1+fixnum_shift)
+sym_vbit_const_mask = (1<<sym_vbit_const)
+
+        _struct(area,0)
+         _node(pred) 
+         _node(succ) 
+         _node(low) 
+         _node(high) 
+         _node(active) 
+         _node(softlimit) 
+         _node(hardlimit) 
+         _node(code) 
+         _node(markbits) 
+         _node(ndwords) 
+         _node(older) 
+         _node(younger) 
+         _node(h) 
+         _node(sofprot) 
+         _node(hardprot) 
+         _node(owner) 
+         _node(refbits) 
+         _node(nextref) 
+        _ends
+
+TCR_BIAS = 0
+                
+/*  Thread context record.  */
+
+        _struct(tcr,TCR_BIAS)
+         _node(next)            /* in doubly-linked list */
+         _node(prev)            /* in doubly-linked list */
+         _word(node_regs_mask)
+         _node(linear)          /* our linear (non-segment-based) address. */
+	 _node(save0)		/* spill area for node registers (16-byte aligned ) */
+	 _node(save1)
+	 _node(save2)
+	 _node(save3)
+         _node(save_ebp)        /* lisp EBP when in foreign code */
+         _word(lisp_mxcsr)
+         _word(foreign_mxcsr)   
+         _node(db_link)         /* special binding chain head */
+         _node(catch_top)       /* top catch frame */
+         _node(save_vsp)        /* VSP when in foreign code */
+         _node(save_tsp)        /* TSP when in foreign code */
+         _node(foreign_sp)      /* Saved foreign SP when in lisp code */
+         _node(cs_area)         /* cstack area pointer */
+         _node(vs_area)         /* vstack area pointer */
+         _node(ts_area)         /* tstack area pointer */
+         _node(cs_limit)        /* cstack overflow limit */
+         _word(bytes_allocated)
+         _word(bytes_consed_high)
+         _node(log2_allocation_quantum)
+         _node(interrupt_pending)
+         _node(xframe)          /* per-thread exception frame list */
+         _node(errno_loc)       /* per-thread  errno location */
+         _node(ffi_exception)   /* mxcsr exception bits from ff-call */
+         _node(osid)            /* OS thread id */
+         _node(valence)         /* odd when in foreign code */
+         _node(foreign_exception_status)
+         _node(native_thread_info)
+         _node(native_thread_id)
+         _node(last_allocptr)
+         _node(save_allocptr)
+         _node(save_allocbase)
+         _node(reset_completion)
+         _node(activate)
+         _node(suspend_count)
+         _node(suspend_context)
+         _node(pending_exception_context)
+         _node(suspend)         /* semaphore for suspension notify */
+         _node(resume)          /* sempahore for resumption notify */
+         _node(flags)      
+         _node(gc_context)
+         _node(termination_semaphore)
+         _node(unwinding)
+         _node(tlb_limit)
+         _node(tlb_pointer)     /* Consider using tcr+N as tlb_pointer */
+         _node(shutdown_count)
+         _node(next_tsp)
+         _node(safe_ref_address)
+	 _word(ldt_selector)
+	 _word(scratch_mxcsr)
+	 _word(unboxed0)
+	 _word(unboxed1)
+	 _node(next_method_context)
+	 _word(save_eflags)
+         _word(allocated)
+         _word(pending_io_info)
+         _word(io_datum)
+        _ends
+
+        _struct(win32_context,0)
+	 _field(ContextFlags, 4)
+	 _field(Dr0, 4)
+	 _field(Dr1, 4)
+	 _field(Dr2, 4)
+	 _field(Dr3, 4)
+	 _field(Dr6, 4)
+	 _field(Dr7, 4)
+	 _struct_label(FloatSave)
+	 _field(ControlWord, 4);
+	 _field(StatusWord, 4)
+	 _field(TagWord, 4)
+	 _field(ErrorOffset, 4)
+	 _field(ErrorSelector, 4)
+	 _field(DataOffset, 4)
+	 _field(DataSelector, 4)
+         _field(RegisterArea, 80)
+	 _field(Cr0NpxState, 4)
+        
+	 _field(SegGs, 4)
+	 _field(SegFs, 4)
+	 _field(SegEs, 4)
+	 _field(SegDs, 4)
+	 _field(Edi, 4)
+	 _field(Esi, 4)
+	 _field(Ebx, 4)
+	 _field(Edx, 4)
+	 _field(Ecx, 4)
+	 _field(Eax, 4)
+	 _field(Ebp, 4)
+	 _field(Eip, 4)
+	 _field(SegCs, 4)
+	 _field(EFlags, 4)
+	 _field(Esp, 4)
+	 _field(SegSs, 4)
+         _struct_label(ExtendedRegisters)
+         _struct_pad(24)
+         _field(MXCSR,4)
+         _struct_pad(132) /* (- 160 28) */
+         _field(Xmm0,16)
+         _field(Xmm1,16)
+         _field(Xmm2,16)
+         _field(Xmm3,16)
+         _field(Xmm4,16)
+         _field(Xmm5,16)
+         _field(Xmm6,16)
+         _field(Xmm7,16)
+         _struct_pad(224)
+         _ends
+        
+TCR_FLAG_BIT_FOREIGN = fixnum_shift
+TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)	
+TCR_FLAG_BIT_ALT_SUSPEND = (fixnumshift+2)
+TCR_FLAG_BIT_PROPAGATE_EXCEPTION = (fixnumshift+3)
+TCR_FLAG_BIT_SUSPEND_ACK_PENDING = (fixnumshift+4)
+TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
+TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
+TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)
+
+target_most_positive_fixnum = 536870911
+target_most_negative_fixnum = -536870912
+call_arguments_limit = 8192
+
+lisp_globals_limit = (0x13000+(LOWMEM_BIAS))
+        
+INTERRUPT_LEVEL_BINDING_INDEX = fixnumone
+
+
+ifdef(`DARWIN',`
+c_stack_16_byte_aligned = 1
+',`
+c_stack_16_byte_aligned = 0
+')                
Index: /branches/arm/lisp-kernel/x86-constants64.h
===================================================================
--- /branches/arm/lisp-kernel/x86-constants64.h	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-constants64.h	(revision 13357)
@@ -0,0 +1,551 @@
+/*
+   Copyright (C) 2005-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifdef DARWIN
+#define REG_RAX 0
+#define REG_RBX 1
+#define REG_RCX 2
+#define REG_RDX 3
+#define REG_RDI 4
+#define REG_RSI 5
+#define REG_RBP 6
+#define REG_RSP 7
+#define REG_R8 8
+#define REG_R9 9
+#define REG_R10 10
+#define REG_R11 11
+#define REG_R12 12
+#define REG_R13 13
+#define REG_R14 14
+#define REG_R15 15
+#define REG_RIP 16
+#define REG_RFL 17
+#endif
+
+#ifdef FREEBSD
+#define REG_RDI 1
+#define REG_RSI 2
+#define REG_RDX 3
+#define REG_RCX 4
+#define REG_R8 5
+#define REG_R9 6
+#define REG_RAX 7
+#define REG_RBX 8
+#define REG_RBP 9
+#define REG_R10 10
+#define REG_R11 11
+#define REG_R12 12
+#define REG_R13 13
+#define REG_R14 14
+#define REG_R15 15
+#define REG_RIP 20
+#define REG_RFL 22
+#define REG_RSP 23
+#endif
+
+#ifdef WIN_64
+/* DWORD64 indices in &(CONTEXT->Rax) */
+#define REG_RAX     0
+#define REG_RCX     1
+#define REG_RDX     2
+#define REG_RBX     3
+#define REG_RSP     4
+#define REG_RBP     5
+#define REG_RSI     6
+#define REG_RDI     7
+#define REG_R8      8
+#define REG_R9      9
+#define REG_R10     10
+#define REG_R11     11
+#define REG_R12     12
+#define REG_R13     13
+#define REG_R14     14
+#define REG_R15     15
+#define REG_RIP     16
+#endif
+
+/* Define indices of the GPRs in the mcontext component of a ucontext */
+#define Itemp0      REG_RBX
+#define Iarg_y      REG_RDI
+#define Iarg_x      REG_R8
+#define Iarg_z      REG_RSI
+#define Isave3      REG_R11
+#define Isave2      REG_R12
+#define Isave1      REG_R14
+#define Isave0      REG_R15
+#define Itemp2        REG_R10
+#define Ifn         REG_R13
+#define Irbp        REG_RBP
+#define Iimm0       REG_RAX
+#define Iimm1       REG_RDX
+#define Iimm2       REG_RCX
+#define Itemp1      REG_R9
+#define Isp         REG_RSP
+#define Iip         REG_RIP
+#if defined(LINUX) || defined(WINDOWS)
+#define Iflags      REG_EFL
+#endif
+
+#if defined(SOLARIS) || defined(FREEBSD) || defined(DARWIN)
+#define Iflags      REG_RFL
+#endif
+
+
+#define Iallocptr Itemp0
+#define Ira0 Itemp2
+#define Inargs Iimm2
+#define Ixfn Itemp1
+#define Ifp Irbp
+
+
+#define nbits_in_word 64L
+#define log2_nbits_in_word 6L
+#define nbits_in_byte 8L
+#define ntagbits 4L
+#define nlisptagbits 3L
+#define nfixnumtagbits 2L
+#define num_subtag_bits 8L
+#define fixnumshift 3L
+#define fixnum_shift 3L
+#define fulltagmask 15L
+#define tagmask	 7L
+#define fixnummask 3
+#define subtagmask ((1L<<num_subtag_bits)-1L)
+#define ncharcodebits 8L
+#define charcode_shift 8L
+#define node_size 8L
+#define node_shift 3L
+#define nargregs 3L
+
+#define tag_fixnum 0L
+#define tag_imm_0 1L		/* subtag_single_float ONLY */
+#define tag_imm_1 2L		/* subtag_character, internal markers */
+#define tag_list 3L		/* subtag_cons or NIL */
+#define tag_tra 4L		/* tagged return_address */
+#define tag_misc 5L		/* random uvector */
+#define tag_symbol 6L	        /* non-null symbol */
+#define tag_function 7L	/* function entry point */
+
+#define fulltag_even_fixnum 0L
+#define fulltag_imm_0 1L
+#define fulltag_imm_1 2L
+#define fulltag_cons 3L
+#define fulltag_tra_0 4L
+#define fulltag_nodeheader_0 5L
+#define fulltag_nodeheader_1 6L
+#define fulltag_immheader_0 7L
+#define fulltag_odd_fixnum 8L
+#define fulltag_immheader_1 9L
+#define fulltag_immheader_2 10L
+#define fulltag_nil 11L
+#define fulltag_tra_1 12L
+#define fulltag_misc 13L
+#define fulltag_symbol 14L
+#define fulltag_function 15L
+
+#define SUBTAG(tag,subtag) ((tag) | ((subtag) << ntagbits))
+#define subtag_arrayH SUBTAG(fulltag_nodeheader_0,10L)
+#define subtag_vectorH SUBTAG(fulltag_nodeheader_1,10L)
+#define subtag_simple_vector SUBTAG(fulltag_nodeheader_1,11L)
+#define min_vector_subtag subtag_vectorH	
+
+#define ivector_class_64_bit fulltag_immheader_2
+#define ivector_class_32_bit fulltag_immheader_1
+#define ivector_class_other_bit fulltag_immheader_0
+
+
+#define subtag_fixnum_vector SUBTAG(ivector_class_64_bit,12L)
+#define subtag_s64_vector SUBTAG(ivector_class_64_bit,13L)
+#define subtag_u64_vector SUBTAG(ivector_class_64_bit,14L)
+#define subtag_double_float_vector SUBTAG(ivector_class_64_bit,15L)
+
+#define subtag_simple_base_string SUBTAG(ivector_class_32_bit,12L)
+#define subtag_s32_vector SUBTAG(ivector_class_32_bit,13L)
+#define subtag_u32_vector SUBTAG(ivector_class_32_bit,14L)
+#define subtag_single_float_vector SUBTAG(ivector_class_32_bit,15L)
+
+#define subtag_s16_vector SUBTAG(ivector_class_other_bit,10L)
+#define subtag_u16_vector SUBTAG(ivector_class_other_bit,11L)
+#define subtag_s8_vector SUBTAG(ivector_class_other_bit,13L)
+#define subtag_u8_vector SUBTAG(ivector_class_other_bit,14L)
+#define subtag_bit_vector SUBTAG(ivector_class_other_bit,15L)
+/* min_8_bit_ivector_subtag is the old 8-bit simple_base_string */
+#define min_8_bit_ivector_subtag SUBTAG(ivector_class_other_bit,12L)
+
+/* There's some room for expansion in non-array ivector space. */
+#define subtag_macptr SUBTAG(ivector_class_64_bit,1)
+#define subtag_dead_macptr SUBTAG(ivector_class_64_bit,2)
+#define subtag_bignum SUBTAG(ivector_class_32_bit,0)
+#define subtag_double_float SUBTAG(ivector_class_32_bit,1)
+#define subtag_xcode_vector SUBTAG(ivector_class_32_bit,2)
+
+/* Note the difference between (e.g) fulltag_function - which
+   defines what the low 4 bytes of a function pointer look like -
+   and subtag_function - which describes what the subtag byte
+   in a function header looks like.  (Likewise for fulltag_symbol
+   and subtag_symbol)
+*/		
+
+#define subtag_symbol SUBTAG(fulltag_nodeheader_0,1)
+#define subtag_catch_frame SUBTAG(fulltag_nodeheader_0,2)
+#define subtag_hash_vector SUBTAG(fulltag_nodeheader_0,3)
+#define subtag_pool SUBTAG(fulltag_nodeheader_0,4)
+#define subtag_weak SUBTAG(fulltag_nodeheader_0,5)
+#define subtag_package SUBTAG(fulltag_nodeheader_0,6)
+#define subtag_slot_vector SUBTAG(fulltag_nodeheader_0,7)
+#define subtag_basic_stream SUBTAG(fulltag_nodeheader_0,8)
+#define subtag_function SUBTAG(fulltag_nodeheader_0,9)
+
+#define subtag_ratio SUBTAG(fulltag_nodeheader_1,1)
+#define subtag_complex SUBTAG(fulltag_nodeheader_1,2)
+#define subtag_struct SUBTAG(fulltag_nodeheader_1,3)
+#define subtag_istruct SUBTAG(fulltag_nodeheader_1,4)
+#define subtag_value_cell SUBTAG(fulltag_nodeheader_1,5)
+#define subtag_xfunction SUBTAG(fulltag_nodeheader_1,6)
+#define subtag_lock SUBTAG(fulltag_nodeheader_1,7)
+#define subtag_instance SUBTAG(fulltag_nodeheader_1,8)
+
+
+
+#define nil_value ((0x13000+fulltag_nil)+(LOWMEM_BIAS))
+#define t_value ((0x13020+fulltag_symbol)+(LOWMEM_BIAS))
+#define misc_bias fulltag_misc
+#define cons_bias fulltag_cons
+
+	
+#define misc_header_offset -fulltag_misc
+#define misc_subtag_offset misc_header_offset       /* low byte of header */
+#define misc_data_offset misc_header_offset+node_size	/* first word of data */
+#define misc_dfloat_offset misc_header_offset		/* double-floats are doubleword-aligned */
+
+#define subtag_single_float SUBTAG(fulltag_imm_0,0)
+#define subtag_character SUBTAG(fulltag_imm_1,0)
+
+#define subtag_unbound SUBTAG(fulltag_imm_1,1)
+#define unbound_marker subtag_unbound
+#define undefined unbound_marker
+#define unbound unbound_marker
+#define subtag_slot_unbound SUBTAG(fulltag_imm_1,2)
+#define slot_unbound_marker subtag_slot_unbound
+#define slot_unbound slot_unbound_marker
+#define subtag_illegal SUBTAG(fulltag_imm_1,3)
+#define illegal_marker subtag_illegal
+#define subtag_no_thread_local_binding SUBTAG(fulltag_imm_1,4)
+#define no_thread_local_binding_marker subtag_no_thread_local_binding
+#define subtag_reserved_frame  SUBTAG(fulltag_imm_1,5)
+#define reserved_frame_marker subtag_reserved_frame
+#define subtag_forward_marker SUBTAG(fulltag_imm_1,6)
+
+#define function_boundary_marker SUBTAG(fulltag_imm_1,15)	
+
+/* The objects themselves look something like this: */
+
+/*  Order of CAR and CDR doesn't seem to matter much - there aren't */
+/*  too many tricks to be played with predecrement/preincrement addressing. */
+/*  Keep them in the confusing MCL 3.0 order, to avoid confusion. */
+
+typedef struct cons {
+  LispObj cdr;
+  LispObj car;
+} cons;
+
+
+
+typedef struct lispsymbol {
+  LispObj header;
+  LispObj pname;
+  LispObj vcell;
+  LispObj fcell;
+  LispObj package_predicate;
+  LispObj flags;
+  LispObj plist;
+  LispObj binding_index;
+} lispsymbol;
+
+typedef struct ratio {
+  LispObj header;
+  LispObj numer;
+  LispObj denom;
+} ratio;
+
+typedef struct double_float {
+  LispObj header;
+  LispObj value;
+} double_float;
+
+
+typedef struct macptr {
+  LispObj header;
+  LispObj address;
+  LispObj class;
+  LispObj type;
+} macptr;
+
+typedef struct xmacptr {
+  LispObj header;
+  LispObj address;
+  LispObj class;
+  LispObj type;
+  LispObj flags;
+  LispObj link;
+} xmacptr;
+  
+
+
+typedef struct special_binding {
+  struct special_binding *link;
+  struct lispsymbol *sym;
+  LispObj value;
+} special_binding;
+
+typedef struct lisp_frame {
+  struct lisp_frame *backlink;
+  LispObj tra;
+  LispObj xtra;			/* if tra is nvalretn */
+} lisp_frame;
+
+/* These are created on the lisp stack by the exception callback mechanism,
+   but nothing ever returns to them.  (At the very least, nothing -should-
+   try to return to them ...).
+*/
+typedef struct exception_callback_frame {
+  struct lisp_frame *backlink;
+  LispObj tra;                  /* ALWAYS 0 FOR AN XCF */
+  LispObj nominal_function;     /* the current function at the time of the exception */
+  LispObj relative_pc;          /* Boxed byte offset within actual
+                                   function or absolute address */
+  LispObj containing_uvector;   /* the uvector that contains the relative PC or NIL */
+  LispObj xp;                   /* exception context */
+  LispObj ra0;                  /* value of ra0 from context */
+  LispObj foreign_sp;           /* foreign sp at the time that exception occurred */
+  LispObj prev_xframe;          /* so %apply-in-frame can unwind it */
+} xcf;
+
+
+/* The GC (at least) needs to know what a
+   package looks like, so that it can do GCTWA. */
+typedef struct package {
+  LispObj header;
+  LispObj itab;			/* itab and etab look like (vector (fixnum . fixnum) */
+  LispObj etab;
+  LispObj used;
+  LispObj used_by;
+  LispObj names;
+  LispObj shadowed;
+} package;
+
+/*
+  The GC also needs to know what a catch_frame looks like.
+*/
+
+typedef struct catch_frame {
+  LispObj header;
+  LispObj catch_tag;
+  LispObj link;
+  LispObj mvflag;
+  LispObj csp;
+  LispObj db_link;
+  LispObj regs[4];
+  LispObj xframe;
+  LispObj tsp_segment;
+} catch_frame;
+
+#define catch_frame_element_count ((sizeof(catch_frame)/sizeof(LispObj))-1)
+#define catch_frame_header make_header(subtag_catch_frame,catch_frame_element_count)
+
+
+/* 
+  All exception frames in a thread are linked together 
+  */
+typedef struct xframe_list {
+  ExceptionInformation *curr;
+  struct xframe_list *prev;
+} xframe_list;
+
+#define fixnum_bitmask(n)  (1LL<<((n)+fixnumshift))
+
+/* 
+  The GC (at least) needs to know about hash-table-vectors and their flag bits.
+*/
+
+typedef struct hash_table_vector_header {
+  LispObj header;
+  LispObj link;                 /* If weak */
+  LispObj flags;                /* a fixnum; see below */
+  LispObj gc_count;             /* gc-count kernel global */
+  LispObj free_alist;           /* preallocated conses for finalization_alist */
+  LispObj finalization_alist;   /* key/value alist for finalization */
+  LispObj weak_deletions_count; /* incremented when GC deletes weak pair */
+  LispObj hash;                 /* backpointer to hash-table */
+  LispObj deleted_count;        /* number of deleted entries [not maintained if lock-free] */
+  LispObj count;                /* number of valid entries [not maintained if lock-free] */
+  LispObj cache_idx;            /* index of last cached pair */
+  LispObj cache_key;            /* value of last cached key */
+  LispObj cache_value;          /* last cached value */
+  LispObj size;                 /* number of entries in table */
+  LispObj size_reciprocal;      /* shifted reciprocal of size */
+} hash_table_vector_header;
+
+/*
+  Bits (masks)  in hash_table_vector.flags:
+*/
+
+/* GC should track keys when addresses change */ 
+#define nhash_track_keys_mask fixnum_bitmask(28) 
+
+/* GC should set when nhash_track_keys_bit & addresses change */
+#define nhash_key_moved_mask  fixnum_bitmask(27) 
+
+/* weak on key or value (need new "weak both" encoding.) */
+#define nhash_weak_mask       fixnum_bitmask(12)
+
+/* weak on value */
+#define nhash_weak_value_mask fixnum_bitmask(11)
+
+/* finalizable */
+#define nhash_finalizable_mask fixnum_bitmask(10)
+
+/* keys frozen, i.e. don't clobber keys, only values */
+#define nhash_keys_frozen_mask fixnum_bitmask(9)
+
+/* Lfun bits */
+
+#define lfbits_nonnullenv_mask fixnum_bitmask(0)
+#define lfbits_keys_mask fixnum_bitmask(1)
+#define lfbits_restv_mask fixnum_bitmask(7)
+#define lfbits_optinit_mask fixnum_bitmask(14)
+#define lfbits_rest_mask fixnum_bitmask(15)
+#define lfbits_aok_mask fixnum_bitmask(16)
+#define lfbits_lap_mask fixnum_bitmask(23)
+#define lfbits_trampoline_mask fixnum_bitmask(24)
+#define lfbits_evaluated_mask fixnum_bitmask(25)
+#define lfbits_cm_mask fixnum_bitmask(26)         /* combined_method */
+#define lfbits_nextmeth_mask fixnum_bitmask(26)   /* or call_next_method with method_mask */
+#define lfbits_gfn_mask fixnum_bitmask(27)        /* generic_function */
+#define lfbits_nextmeth_with_args_mask fixnum_bitmask(27)   /* or call_next_method_with_args with method_mask */
+#define lfbits_method_mask fixnum_bitmask(28)     /* method function */
+/* PPC only but want it defined for xcompile */
+#define lfbits_noname_mask fixnum_bitmask(29)
+
+/*
+  known values of an "extended" (gcable) macptr's flags word:
+*/
+
+
+/* Creole */
+
+#define doh_quantum 400
+#define doh_block_slots ((doh_quantum >> 2) - 3)
+
+typedef struct doh_block {
+  struct doh_block *link;
+  unsigned size;
+  unsigned free;
+  LispObj data[doh_block_slots];
+} doh_block, *doh_block_ptr;
+
+
+#define population_weak_list (0<<fixnum_shift)
+#define population_weak_alist (1<<fixnum_shift)
+#define population_termination_bit (16+fixnum_shift)
+#define population_type_mask ((1<<population_termination_bit)-1)
+
+#define gc_retain_pages_bit fixnum_bitmask(0)
+#define gc_integrity_check_bit fixnum_bitmask(2)
+#define egc_verbose_bit fixnum_bitmask(3)
+#define gc_verbose_bit fixnum_bitmask(4)
+#define gc_allow_stack_overflows_bit fixnum_bitmask(5)
+#define gc_postgc_pending fixnum_bitmask(26)
+
+#include "lisp-errors.h"
+
+
+
+#define TCR_BIAS (0x0)
+
+typedef struct tcr {
+  struct tcr* next;
+  struct tcr* prev;
+  struct {
+    u32_t tag;
+    float f;
+  } single_float_convert;
+  struct tcr* linear;
+  LispObj *save_fp;            /* RBP when in foreign code */
+  u32_t lisp_mxcsr;
+  u32_t foreign_mxcsr;
+  special_binding* db_link;	/* special binding chain head */
+  LispObj catch_top;		/* top catch frame */
+  LispObj* save_vsp;  /* VSP when in foreign code */
+  LispObj* save_tsp;  /* TSP when in foreign code */
+  LispObj* foreign_sp;
+  struct area* cs_area; /* cstack area pointer */
+  struct area* vs_area; /* vstack area pointer */
+  struct area* ts_area; /* tstack area pointer */
+  LispObj cs_limit;		/* stack overflow limit */
+  natural bytes_allocated;
+  natural log2_allocation_quantum;      /* for per-thread consing */
+  signed_natural interrupt_pending;	/* pending interrupt flag */
+  xframe_list* xframe; /* exception-frame linked list */
+  int* errno_loc;		/* per-thread (?) errno location */
+  LispObj ffi_exception;	/* fpscr bits from ff-call */
+  LispObj osid;			/* OS thread id */
+  signed_natural valence;			/* odd when in foreign code */
+  signed_natural foreign_exception_status;	/* non-zero -> call lisp_exit_hook */
+  void* native_thread_info;	/* platform-dependent */
+  void* native_thread_id;	/* mach_thread_t, pid_t, etc. */
+  void* last_allocptr;
+  void* save_allocptr;
+  void* save_allocbase;
+  void* reset_completion;
+  void* activate;
+  signed_natural suspend_count;
+  ExceptionInformation* suspend_context;
+  ExceptionInformation* pending_exception_context;
+  void* suspend;		/* suspension semaphore */
+  void* resume;			/* resumption semaphore */
+  natural flags;
+  ExceptionInformation* gc_context;
+  void* termination_semaphore;
+  signed_natural unwinding;
+  natural tlb_limit;
+  LispObj* tlb_pointer;
+  natural shutdown_count;
+  LispObj* next_tsp;
+  void *safe_ref_address;
+  void *pending_io_info;
+  void *io_datum;
+} TCR;
+
+#define t_offset (t_value-nil_value)
+
+typedef struct {
+  natural Rip;
+  natural Cs;                   /* in low 16 bits */
+  natural Rflags;               /* in low 32 bits */
+  natural Rsp;
+  natural Ss;                   /* in low 16 bits*/
+} x64_iret_frame;
+
+/* 
+  These were previously global variables.  There are lots of implicit
+  assumptions about the size of a heap segment, so they might as well
+  be constants.
+*/
+
+#define heap_segment_size 0x00020000L
+#define log2_heap_segment_size 17L
+
Index: /branches/arm/lisp-kernel/x86-constants64.s
===================================================================
--- /branches/arm/lisp-kernel/x86-constants64.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-constants64.s	(revision 13357)
@@ -0,0 +1,1048 @@
+/*   Copyright (C) 2005-2009 Clozure Associates  */
+/*   This file is part of Clozure CL.    */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+ 
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+ 
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+
+/* Register usage.  This is certainly a little short of  */
+/* immediate registers; we can maybe use the low bits  */
+/* of mmx or xmm registers to hold immediate values and  */
+/* do some unboxed arithmetic.   */
+
+
+/*
+
+	Register usage in C calling conventions differ between
+	Darwin/Linux/FreeBSD (which use the AMD-defined ABI) and
+	Windows64 (which uses something else).  The good news is that
+	Win64 did away with the cdecl/stdcall/fastcall madness, there
+	is only one ABI left.  Here's a rundown.
+
+	AMD64^Wx86-64 ABI:
+	 * Integer and pointer function arguments passed (from left to
+	right) in RDI, RSI, RDX, RCX, R8 and R9
+	 * FP arguments are passed in XMM0..XMM7
+	 * rest is passed on stack
+	 * return value in RAX
+	 * Callee must preserve RBP, RBX, R12..R15, MXCSR control bits
+	 * On function entry, x87 mode and DF clear is assumed
+	 * `RSP'..`RSP-128' must not be touched by signal handlers
+
+	Win64 ABI:
+	 * Integer and pointers passed in RCX, RDX, R8, R9
+	 * FP passed in XMM0..XMM3
+	 * rest is passed on stack
+	 * Return value in RAX or XMM0
+	 * Caller (!) responsible for creating and cleaning stack space for
+	spilling integer registers
+	 * Callee must preserve RBP, RBX, RSI, RDI, R12..R15, XMM6..XMM15
+
+	Both want their stack pointers to be 16 byte aligned on call,
+	equivalent to 8 byte offset after call due to pushed return address.
+	
+	http://msdn2.microsoft.com/en-us/library/zthk2dkh(VS.80).aspx
+	http://www.tortall.net/projects/yasm/manual/html/objfmt-win64-exception.html
+	http://www.x86-64.org/documentation/abi.pdf
+
+
+	Lisp register usage:
+
+	Clozure CL renames the physical registers, giving them names
+	based on their usage. An overview:
+
+	imm0..imm2
+	temp0..temp2
+	save0..save3
+	arg_x, arg_y, arg_z
+	fn
+
+	On top of that, further mappings are defined:
+
+	fname, next_method_context: 	temp0
+        nargs:				imm2
+        ra0:				temp2
+        xfn:				temp1
+        allocptr:			temp0
+        stack_temp:			mm7	
+	
+	x86-64 ABI mapping:
+	
+	imm0..imm2:		RAX, RDX, RCX
+	temp0..temp2:		RBX, R9, R10
+	save0..save3:		R15, R14, R12, R11
+	arg_x, arg_y, arg_z:	R8, RDI, RSI
+        fn:			R13
+        rcontext_reg:		GS
+
+	Win64 specifics:
+        rcontext_reg:		R11
+	
+*/
+	
+
+/* Redefining these standard register names - with the same _l, _w, _b suffixes  */
+/*  used in lispy symbolic names - allows us to play Stupid M4 Tricks in macros  */
+			
+define(`rax_l',`eax')
+define(`rax_w',`ax')
+define(`rax_b',`al')
+define(`rbx_l',`ebx')
+define(`rbx_w',`bx')
+define(`rbx_b',`bl')
+define(`rcx_l',`ecx')
+define(`rcx_w',`cx')
+define(`rdx_l',`edx')
+define(`rdx_w',`dx')					
+define(`rdx_b',`dl')							
+define(`rsi_l',`esi')
+define(`rsi_w',`si')				
+define(`rsi_b',`sil')
+define(`rdi_l',`edo')
+define(`rdi_w',`di')				
+define(`rdi_b',`dil')
+define(`r8_l',`r8d')
+define(`r8_w',`r8w')					
+define(`r8_b',`r8b')							
+define(`r9_l',`r9d')
+define(`r9_w',`r9w')					
+define(`r9_b',`r9b')							
+define(`r10_l',`r10d')
+define(`r10_w',`r10w')					
+define(`r10_b',`r10b')							
+define(`r10_l',`r11d')
+define(`r11_w',`r11w')					
+define(`r11_b',`r11b')							
+define(`r12_l',`r12d')
+define(`r12_w',`r12w')					
+define(`r12_b',`r12b')							
+define(`r13_l',`r13d')
+define(`r13_w',`r13w')					
+define(`r13_b',`r13b')							
+define(`r14_l',`r14d')
+define(`r14_w',`r14w')					
+define(`r14_b',`r14b')							
+define(`r15_l',`r15d')
+define(`r15_w',`r15w')					
+define(`r15_b',`r15b')							
+
+/* Registers when using Lisp calling conventions */
+	
+define(`imm0',`rax') 
+	define(`imm0_l',`eax')
+	define(`imm0_w',`ax')
+	define(`imm0_b',`al')
+	define(`Rimm0',`0')
+	
+define(`temp0',`rbx')
+	define(`temp0_l',`ebx')
+	define(`temp0_w',`bx')
+	define(`temp0_b',`bl')
+	define(`Rtemp0',`3')
+
+define(`imm2',`rcx')
+	define(`imm2_l',`ecx')
+	define(`imm2_w',`cx')
+	define(`imm2_b',`cl')
+	define(`Rimm2',`1')
+	
+define(`imm1',`rdx')
+	define(`imm1_l',`edx')
+	define(`imm1_w',`dx')
+	define(`imm1_b',`dl')
+	define(`Rimm1',`2')
+	
+define(`arg_z',`rsi')
+	define(`arg_z_l',`esi')
+	define(`arg_z_w',`si')
+	define(`arg_z_b',`sil')
+	define(`Rarg_z',`6')
+
+define(`arg_y',`rdi')
+	define(`arg_y_l',`edi')
+	define(`arg_y_w',`di')
+	define(`arg_y_b',`dil')
+	define(`Rarg_y',`7')
+
+define(`arg_x',`r8')
+	define(`arg_x_l',`r8d')
+	define(`arg_x_w',`r8w')
+	define(`arg_x_b',`r8b')
+	define(`Rarg_x',`8')
+
+define(`temp1',`r9')
+	define(`temp1_l',`r9d')
+	define(`temp1_w',`r9w')
+	define(`temp1_b',`r9b')
+	define(`Rtemp1',`9')
+
+define(`temp2',`r10')
+	define(`temp2_l',`r10d')
+	define(`temp2_w',`r10w')
+	define(`temp2_x_b',`r10b')
+	define(`Rtemp2',`10')
+	
+define(`save3',`r11')		
+	define(`save3_l',`r11d')
+	define(`save3_w',`r11w')
+	define(`save3_b',`r11b')
+	define(`Rsave3',`11')
+	
+define(`save2',`r12')
+	define(`save2_l',`r12d')
+	define(`save2_w',`r12w')
+	define(`save2_b',`r12b')
+	define(`Rsave2',`12')
+	
+define(`fn',`r13')		/* some addressing restrictions   */
+	define(`fn_l',`r13d')
+	define(`fn_w',`r13w')
+	define(`fn_b',`r13b')
+	define(`Rfn',`13')
+	
+define(`save1',`r14')
+	define(`save1_l',`r14d')
+	define(`save1_w',`r14w')
+	define(`save1_b',`r14b')
+	define(`Rsave1',`14')
+		
+define(`save0',`r15')
+	define(`save0_l',`r15d')
+	define(`save0_w',`r15w')
+	define(`save0_b',`r15b')
+	define(`Rsave0',`15')	
+
+
+ifdef(`TCR_IN_GPR',`
+/* We keep the TCR pointer in r11 */
+	define(`rcontext_reg', r11)
+	define(`rcontext',`$1(%rcontext_reg)')
+',`
+/* The TCR can be accessed relative to %gs   */
+	define(`rcontext_reg',`gs')
+	define(`rcontext',`%rcontext_reg:$1')
+')
+define(`fname',`temp0')
+define(`next_method_context',`temp0')
+define(`nargs_b',`imm2_b')	
+define(`nargs_w',`imm2_w')
+define(`nargs_q',`imm2')
+define(`nargs',`imm2_l')
+define(`ra0',`temp2')        
+						
+define(`xfn',`temp1')
+
+define(`allocptr',`temp0')		
+define(`stack_temp',`mm7')
+
+		
+define(`fp0',`xmm0')		
+define(`fp1',`xmm1')		
+define(`fp2',`xmm2')		
+define(`fp3',`xmm3')		
+define(`fp4',`xmm4')		
+define(`fp5',`xmm5')		
+define(`fp6',`xmm6')		
+define(`fp7',`xmm7')		
+define(`fp8',`xmm8')		
+define(`fp9',`xmm9')		
+define(`fp10',`xmm10')		
+define(`fp11',`xmm11')		
+define(`fp12',`xmm12')		
+define(`fp13',`xmm13')		
+define(`fp14',`xmm14')		
+define(`fp15',`xmm15')		
+define(`fpzero',`fp15')
+
+/* Registers when running with native C calling conventions */
+
+define(`cret',`rax') 
+	define(`cret_l',`eax')
+	define(`cret_w',`ax')
+	define(`cret_b',`al')
+	define(`Rcret',`0')
+	
+define(`ctemp0',`r10')
+	define(`ctemp0_l',`r10d')
+	define(`ctemp0_w',`r10w')
+	define(`ctemp0_b',`r10b')
+	define(`Rctemp0',`10')
+	
+define(`ctemp1',`r11')		
+	define(`ctemp1_l',`r11d')
+	define(`ctemp1_w',`r11w')
+	define(`ctemp1_b',`r11b')
+	define(`Rctemp1',`11')
+	
+define(`csave0',`rbx')
+	define(`csave0_l',`ebx')
+	define(`csave0_w',`bx')
+	define(`csave0_b',`bl')
+	define(`Rcsave0',`3')
+
+define(`csave1',`r12')
+	define(`csave1_l',`r12d')
+	define(`csave1_w',`r12w')
+	define(`csave1_b',`r12b')
+	define(`Rcsave1',`12')
+	
+define(`csave2',`r13')
+	define(`csave2_l',`r13d')
+	define(`csave2_w',`r13w')
+	define(`csave2_b',`r13b')
+	define(`Rcsave2',`13')
+	
+define(`csave3',`r14')
+	define(`csave3_l',`r14d')
+	define(`csave3_w',`r14w')
+	define(`csave3_b',`r14b')
+	define(`Rcsave3',`14')
+		
+define(`csave4',`r15')
+	define(`csave4_l',`r15d')
+	define(`csave4_w',`r15w')
+	define(`csave4_b',`r15b')
+	define(`Rcsave4',`15')	
+
+ifdef(`WINDOWS',`
+
+define(`carg0',`rcx')
+	define(`carg0_l',`ecx')
+	define(`carg0_w',`cx')
+	define(`carg0_b',`cl')
+	define(`Rcarg0',`1')
+	
+define(`carg1',`rdx')
+	define(`carg1_l',`edx')
+	define(`carg1_w',`dx')
+	define(`carg1_b',`dl')
+	define(`Rcarg1',`2')
+	
+define(`carg2',`r8')
+	define(`carg2_l',`r8d')
+	define(`carg2_w',`r8w')
+	define(`carg2_b',`r8b')
+	define(`Rcarg2',`8')
+
+define(`carg3',`r9')
+	define(`carg3_l',`r9d')
+	define(`carg3_w',`r9w')
+	define(`carg3_b',`r9b')
+	define(`Rcarg3',`9')
+
+define(`csave5',`rsi')
+	define(`csave5_l',`esi')
+	define(`csave5_w',`si')
+	define(`csave5_b',`sil')
+	define(`csave5_z',`6')
+
+define(`csave6',`rdi')
+	define(`csave6_l',`edi')
+	define(`csave6_w',`di')
+	define(`csave6_b',`dil')
+	define(`Rcsave6',`7')
+
+',`
+	
+define(`carg0',`rdi')
+	define(`carg0_l',`edi')
+	define(`carg0_w',`di')
+	define(`carg0_b',`dil')
+	define(`Rcarg0',`7')
+
+define(`carg1',`rsi')
+	define(`carg1_l',`esi')
+	define(`carg1_w',`si')
+	define(`carg1_b',`sil')
+	define(`carg1_z',`6')
+
+define(`carg2',`rdx')
+	define(`carg2_l',`edx')
+	define(`carg2_w',`dx')
+	define(`carg2_b',`dl')
+	define(`Rcarg2',`2')
+	
+define(`carg3',`rcx')
+	define(`carg3_l',`ecx')
+	define(`carg3_w',`cx')
+	define(`carg3_b',`cl')
+	define(`Rcarg3',`1')
+	
+define(`carg4',`r8')
+	define(`carg4_l',`r8d')
+	define(`carg4_w',`r8w')
+	define(`carg4_b',`r8b')
+	define(`Rcarg4',`8')
+
+define(`carg5',`r9')
+	define(`carg5_l',`r9d')
+	define(`carg5_w',`r9w')
+	define(`carg5_b',`r9b')
+	define(`Rcarg5',`9')	
+')
+	
+nbits_in_word = 64
+nbits_in_byte = 8
+ntagbits = 4
+nlisptagbits = 3
+nfixnumtagbits = 3
+nlowtagbits = 2        
+num_subtag_bits = 8
+subtag_shift = num_subtag_bits	
+fixnumshift = 3
+fixnum_shift = 3
+fulltagmask = 15
+tagmask = 7
+fixnummask = 7
+ncharcodebits = 8
+charcode_shift = 8
+word_shift = 3
+node_size = 8
+dnode_size = 16
+dnode_align_bits = 4
+dnode_shift = dnode_align_bits        
+bitmap_shift = 6
+        
+fixnumone = (1<<fixnumshift)
+fixnum_one = fixnumone
+fixnum1 = fixnumone
+
+nargregs = 3
+nsaveregs = 4	
+                
+
+tag_fixnum = 0
+tag_imm_0 = 1		/* subtag_single_float ONLY   */
+tag_imm_1 = 2		/* subtag_character, internal markers   */
+tag_list = 3		/* fulltag_cons or NIL   */
+tag_tra = 4		/* tagged return_address   */
+tag_misc = 5		/* random uvector   */
+tag_symbol = 6	        /* non-null symbol   */
+tag_function = 7	/* function entry point   */
+
+tag_single_float = tag_imm_0
+		
+fulltag_even_fixnum = 0
+fulltag_imm_0 = 1		/* subtag_single_float (ONLY)   */
+fulltag_imm_1 = 2		/* subtag_character (mostly)   */
+fulltag_cons = 3
+fulltag_tra_0 = 4		/* tagged return address   */
+fulltag_nodeheader_0 = 5
+fulltag_nodeheader_1 = 6
+fulltag_immheader_0 = 7	
+fulltag_odd_fixnum = 8
+fulltag_immheader_1 = 9
+fulltag_immheader_2 = 10
+fulltag_nil = 11
+fulltag_tra_1 = 12
+fulltag_misc = 13
+fulltag_symbol = 14
+fulltag_function = 15
+
+define(`define_subtag',`
+subtag_$1 = ($2 | ($3 << ntagbits))
+')
+	
+
+define_subtag(arrayH,fulltag_nodeheader_0,10)
+define_subtag(vectorH,fulltag_nodeheader_1,10)
+define_subtag(simple_vector,fulltag_nodeheader_1,11)
+min_vector_subtag = subtag_vectorH
+min_array_subtag = subtag_arrayH
+        
+	
+ivector_class_64_bit = fulltag_immheader_2
+ivector_class_32_bit = fulltag_immheader_1
+ivector_class_other_bit = fulltag_immheader_0
+
+define_subtag(fixnum_vector,ivector_class_64_bit,12)
+define_subtag(s64_vector,ivector_class_64_bit,13)
+define_subtag(u64_vector,ivector_class_64_bit,14)
+define_subtag(double_float_vector,ivector_class_64_bit,15)
+
+define_subtag(simple_base_string,ivector_class_32_bit,12)
+define_subtag(s32_vector,ivector_class_32_bit,13)
+define_subtag(u32_vector,ivector_class_32_bit,14)
+define_subtag(single_float_vector,ivector_class_32_bit,15)
+	
+define_subtag(s16_vector,ivector_class_other_bit,10)
+define_subtag(u16_vector,ivector_class_other_bit,11)
+define_subtag(s8_vector,ivector_class_other_bit,13)
+define_subtag(u8_vector,ivector_class_other_bit,14)
+define_subtag(bit_vector,ivector_class_other_bit,15)
+
+
+/* There's some room for expansion in non-array ivector space.   */
+define_subtag(macptr,ivector_class_64_bit,1)
+define_subtag(dead_macptr,ivector_class_64_bit,2)
+define_subtag(bignum,ivector_class_32_bit,1)
+define_subtag(double_float,ivector_class_32_bit,2)
+define_subtag(xcode_vector,ivector_class_32_bit,3)
+
+        
+/* Note the difference between (e.g) fulltag_function - which  */
+/* defines what the low 4 bytes of a function pointer look like -  */
+/* and subtag_function - which describes what the subtag byte  */
+/* in a function header looks like.  (Likewise for fulltag_symbol  */
+/* and subtag_symbol)  */
+		
+
+define_subtag(symbol,fulltag_nodeheader_0,1)
+define_subtag(catch_frame,fulltag_nodeheader_0,2)
+define_subtag(hash_vector,fulltag_nodeheader_0,3)
+define_subtag(pool,fulltag_nodeheader_0,4)
+define_subtag(weak,fulltag_nodeheader_0,5)
+define_subtag(package,fulltag_nodeheader_0,6)
+define_subtag(slot_vector,fulltag_nodeheader_0,7)
+define_subtag(basic_stream,fulltag_nodeheader_0,8)
+define_subtag(function,fulltag_nodeheader_0,9)
+	
+define_subtag(ratio,fulltag_nodeheader_1,1)
+define_subtag(complex,fulltag_nodeheader_1,2)
+define_subtag(struct,fulltag_nodeheader_1,3)
+define_subtag(istruct,fulltag_nodeheader_1,4)
+define_subtag(value_cell,fulltag_nodeheader_1,5)
+define_subtag(xfunction,fulltag_nodeheader_1,6)
+define_subtag(lock,fulltag_nodeheader_1,7)
+define_subtag(instance,fulltag_nodeheader_1,8)
+	
+			
+nil_value = (0x13000+fulltag_nil)
+t_value = (0x13020+fulltag_symbol)
+misc_bias = fulltag_misc
+cons_bias = fulltag_cons
+define(`t_offset',(t_value-nil_value))
+	
+misc_header_offset = -fulltag_misc
+misc_data_offset = misc_header_offset+node_size /* first word of data    */
+misc_subtag_offset = misc_header_offset       /* low byte of header   */
+misc_dfloat_offset = misc_data_offset		/* double-floats are doubleword-aligned   */
+function_header_offset = -fulltag_function
+function_data_offset = function_header_offset+node_size	
+
+define_subtag(single_float,fulltag_imm_0,0)
+
+
+define_subtag(character,fulltag_imm_1,0)
+                	
+define_subtag(unbound,fulltag_imm_1,1)
+unbound_marker = subtag_unbound
+undefined = unbound_marker
+define_subtag(slot_unbound,fulltag_imm_1,2)
+slot_unbound_marker = subtag_slot_unbound
+define_subtag(illegal,fulltag_imm_1,3)
+illegal_marker = subtag_illegal
+define_subtag(no_thread_local_binding,fulltag_imm_1,4)
+no_thread_local_binding_marker = subtag_no_thread_local_binding
+define_subtag(reserved_frame,fulltag_imm_1,5)
+reserved_frame_marker = subtag_reserved_frame
+define_subtag(function_boundary_marker,fulltag_imm_1,15)                        
+
+	
+
+
+	
+/* The objects themselves look something like this:   */
+	
+/* Order of CAR and CDR doesn't seem to matter much - there aren't   */
+/* too many tricks to be played with predecrement/preincrement addressing.   */
+/* Keep them in the confusing MCL 3.0 order, to avoid confusion.   */
+	_struct(cons,-cons_bias)
+	 _node(cdr)
+	 _node(car)
+	_ends
+	
+	_structf(ratio)
+	 _node(numer)
+	 _node(denom)
+	_endstructf
+	
+	_structf(double_float)
+	 _word(value)
+         _word(val_low)
+	_endstructf
+	
+	_structf(macptr)
+	 _node(address)
+         _node(domain)
+         _node(type)
+	_endstructf
+	
+/* Functions are of (conceptually) unlimited size.  */
+	
+	_struct(_function,-misc_bias)
+	 _node(header)
+	 _node(codevector)
+	_ends
+
+	_struct(tsp_frame,0)
+	 _node(backlink)
+	 _node(save_rbp)
+	 _struct_label(fixed_overhead)
+	 _struct_label(data_offset)
+	_ends
+
+	_struct(csp_frame,0)
+	 _node(backlink)
+	 _node(save_rbp)
+	 _struct_label(fixed_overhead)
+	 _struct_label(data_offset)
+	_ends
+        
+
+
+	_structf(symbol,-fulltag_symbol)
+	 _node(pname)
+	 _node(vcell)
+	 _node(fcell)
+	 _node(package_predicate)
+	 _node(flags)
+         _node(plist)
+         _node(binding_index)
+	_endstructf
+
+	_structf(catch_frame)
+	 _node(catch_tag)	/* #<unbound> -> unwind-protect, else catch   */
+	 _node(link)		/* backpointer to previous catch frame   */
+	 _node(mvflag)		/* 0 if single-valued catch, fixnum 1 otherwise   */
+	 _node(rsp)		/* saved lisp sp   */
+	 _node(rbp)		/* saved lisp rbp   */
+	 _node(foreign_sp)      /* necessary ?    */
+	 _node(db_link)		/* head of special-binding chain   */
+	 _node(_save3)
+	 _node(_save2)
+	 _node(_save1)
+	 _node(_save0)
+	 _node(xframe)		/* exception frame chain   */
+	 _node(pc)		/* TRA of catch exit or cleanup form   */
+	_endstructf
+
+
+	_structf(vectorH)
+	 _node(logsize)
+	 _node(physsize)
+	 _node(data_vector)
+	 _node(displacement)
+	 _node(flags)
+	_endstructf	
+
+	_structf(arrayH)
+	 _node(rank)
+	 _node(physsize)
+	 _node(data_vector)
+	 _node(displacement)
+	 _node(flags)
+	 _struct_label(dim0)        
+	_endstructf	
+        	
+        
+	_struct(c_frame,0)	/* PowerOpen ABI C stack frame   */
+	 _node(backlink)
+	 _node(crsave)
+	 _node(savelr)
+	 _field(unused, 16)
+	 _node(savetoc)
+	 _struct_label(params)
+         _node(param0)
+         _node(param1)
+         _node(param2)
+         _node(param3)
+         _node(param4)
+         _node(param5)
+         _node(param6)
+         _node(param7)
+	 _struct_label(minsiz)
+	_ends
+
+
+	_struct(eabi_c_frame,0)
+	 _word(backlink) 
+	 _word(savelr)
+	 _word(param0)
+	 _word(param1)
+	 _word(param2)
+	 _word(param3)
+	 _word(param4)
+	 _word(param5)
+	 _word(param6)
+	 _word(param7)
+	 _struct_label(minsiz)
+	_ends
+
+	/* For entry to variable-argument-list functions   */
+	/* (e.g., via callback)   */
+	_struct(varargs_eabi_c_frame,0)
+	 _word(backlink)
+	 _word(savelr)
+	 _struct_label(va_list)
+	 _word(flags)		/* gpr count byte, fpr count byte, padding   */
+	 _word(overflow_arg_area)
+	 _word(reg_save_area)
+	 _field(padding,4)
+	 _struct_label(regsave)
+	 _field(gp_save,8*4)
+	 _field(fp_save,8*8)
+	 _word(old_backlink)
+	 _word(old_savelr)
+	 _struct_label(incoming_stack_args)
+	_ends
+        	
+	_struct(lisp_frame,0)
+	 _node(backlink) 
+	 _node(savera0)	
+	_ends
+
+	_struct(vector,-fulltag_misc)
+	 _node(header)
+	 _struct_label(data)
+	_ends
+
+        _struct(binding,0)
+         _node(link)
+         _node(sym)
+         _node(val)
+        _ends
+
+
+/* Nilreg-relative globals.  Talking the assembler into doing  */
+/* something reasonable here  */
+/* is surprisingly hard.   */
+
+symbol_extra = symbol.size-fulltag_symbol
+
+	
+	_struct(nrs,0x13020)
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(tsym)
+	 _struct_pad(symbol_extra)	/* t    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(nil)
+	 _struct_pad(symbol_extra)	/* nil    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(errdisp)
+	 _struct_pad(symbol_extra)	/* %err-disp    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(cmain)
+	 _struct_pad(symbol_extra)	/* cmain    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(eval)
+	 _struct_pad(symbol_extra)	/* eval    */
+ 
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(appevalfn)
+	 _struct_pad(symbol_extra)	/* apply-evaluated-function    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(error)
+	 _struct_pad(symbol_extra)	/* error    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(defun)
+	 _struct_pad(symbol_extra)	/* %defun    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(defvar)
+	 _struct_pad(symbol_extra)	/* %defvar    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(defconstant)
+	 _struct_pad(symbol_extra)	/* %defconstant    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(macrosym)
+	 _struct_pad(symbol_extra)	/* %macro    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(kernelrestart)
+	 _struct_pad(symbol_extra)	/* %kernel-restart    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(package)
+	 _struct_pad(symbol_extra)	/* *package*    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(total_bytes_freed)		/* *total-bytes-freed*   */
+	 _struct_pad(symbol_extra)
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(kallowotherkeys)
+	 _struct_pad(symbol_extra)	/* allow-other-keys    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(toplcatch)
+	 _struct_pad(symbol_extra)	/* %toplevel-catch%    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(toplfunc)
+	 _struct_pad(symbol_extra)	/* %toplevel-function%    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(callbacks)
+	 _struct_pad(symbol_extra)	/* %pascal-functions%    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(allmeteredfuns)
+	 _struct_pad(symbol_extra)	/* *all-metered-functions*    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(total_gc_microseconds)		/* *total-gc-microseconds*   */
+	 _struct_pad(symbol_extra)
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(builtin_functions)		/* %builtin-functions%   */
+	 _struct_pad(symbol_extra)                
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(udf)
+	 _struct_pad(symbol_extra)	/* %unbound-function%    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(init_misc)
+	 _struct_pad(symbol_extra)	/* %init-misc   */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(macro_code)
+	 _struct_pad(symbol_extra)	/* %macro-code%   */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(closure_code)
+	 _struct_pad(symbol_extra)      /* %closure-code%   */
+
+       	 _struct_pad(fulltag_symbol)
+	 _struct_label(new_gcable_ptr) /* %new-gcable-ptr   */
+	 _struct_pad(symbol_extra)
+	
+       	 _struct_pad(fulltag_symbol)
+	 _struct_label(gc_event_status_bits)
+	 _struct_pad(symbol_extra)	/* *gc-event-status-bits*    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(post_gc_hook)
+	 _struct_pad(symbol_extra)	/* *post-gc-hook*    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(handlers)
+	 _struct_pad(symbol_extra)	/* %handlers%    */
+
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(all_packages)
+	 _struct_pad(symbol_extra)	/* %all-packages%    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(keyword_package)
+	 _struct_pad(symbol_extra)	/* *keyword-package*    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(finalization_alist)
+	 _struct_pad(symbol_extra)	/* %finalization-alist%    */
+
+	 _struct_pad(fulltag_symbol)
+	 _struct_label(foreign_thread_control)
+	 _struct_pad(symbol_extra)	/* %foreign-thread-control    */
+
+	_ends
+
+define(`def_header',`
+$1 = ($2<<num_subtag_bits)|$3')
+
+	def_header(double_float_header,2,subtag_double_float)
+	def_header(two_digit_bignum_header,2,subtag_bignum)
+	def_header(three_digit_bignum_header,3,subtag_bignum)
+	def_header(four_digit_bignum_header,4,subtag_bignum)
+	def_header(five_digit_bignum_header,5,subtag_bignum)        
+	def_header(symbol_header,symbol.element_count,subtag_symbol)
+	def_header(value_cell_header,1,subtag_value_cell	)
+	def_header(macptr_header,macptr.element_count,subtag_macptr)
+	def_header(vectorH_header,vectorH.element_count,subtag_vectorH)
+
+	include(errors.s)
+
+/* Symbol bits that we care about  */
+	
+sym_vbit_bound = (0+fixnum_shift)
+sym_vbit_bound_mask = (1<<sym_vbit_bound)
+sym_vbit_const = (1+fixnum_shift)
+sym_vbit_const_mask = (1<<sym_vbit_const)
+
+	_struct(area,0)
+	 _node(pred) 
+	 _node(succ) 
+	 _node(low) 
+	 _node(high) 
+	 _node(active) 
+	 _node(softlimit) 
+	 _node(hardlimit) 
+	 _node(code) 
+	 _node(markbits) 
+	 _node(ndwords) 
+	 _node(older) 
+	 _node(younger) 
+	 _node(h) 
+	 _node(sofprot) 
+	 _node(hardprot) 
+	 _node(owner) 
+	 _node(refbits) 
+	 _node(nextref) 
+	_ends
+
+
+
+TCR_BIAS = 0
+		
+/*  Thread context record.  */
+
+	_struct(tcr,TCR_BIAS)
+	 _node(prev)		/* in doubly-linked list   */
+	 _node(next)		/* in doubly-linked list   */
+         _node(single_float_convert)
+	 _node(linear)		/* our linear (non-segment-based) address.   */
+         _node(save_rbp)        /* lisp RBP when in foreign code    */
+	 _word(lisp_mxcsr)
+	 _word(foreign_mxcsr)	
+	 _node(db_link)		/* special binding chain head   */
+	 _node(catch_top)	/* top catch frame   */
+	 _node(save_vsp)	/* VSP when in foreign code   */
+	 _node(save_tsp)	/* TSP when in foreign code   */
+	 _node(foreign_sp)	/* Saved foreign SP when in lisp code   */
+	 _node(cs_area)		/* cstack area pointer   */
+	 _node(vs_area)		/* vstack area pointer   */
+	 _node(ts_area)		/* tstack area pointer   */
+	 _node(cs_limit)	/* cstack overflow limit   */
+	 _word(bytes_consed_low)
+	 _word(bytes_consed_high)
+	 _node(log2_allocation_quantum)
+	 _node(interrupt_pending)
+	 _node(xframe)		/* per-thread exception frame list   */
+	 _node(errno_loc)	/* per-thread  errno location   */
+	 _node(ffi_exception)	/* mxcsr exception bits from ff-call   */
+	 _node(osid)		/* OS thread id   */
+         _node(valence)		/* odd when in foreign code 	  */
+	 _node(foreign_exception_status)
+	 _node(native_thread_info)
+	 _node(native_thread_id)
+	 _node(last_allocptr)
+	 _node(save_allocptr)
+	 _node(save_allocbase)
+	 _node(reset_completion)
+	 _node(activate)
+         _node(suspend_count)
+         _node(suspend_context)
+	 _node(pending_exception_context)
+	 _node(suspend)		/* semaphore for suspension notify   */
+	 _node(resume)		/* sempahore for resumption notify   */
+	 _node(flags)      
+	 _node(gc_context)
+         _node(termination_semaphore)
+         _node(unwinding)
+         _node(tlb_limit)
+         _node(tlb_pointer)     /* Consider using tcr+N as tlb_pointer   */
+	 _node(shutdown_count)
+         _node(next_tsp)
+         _node(safe_ref_address)
+         _node(pending_io_info)
+         _node(io_datum)
+	_ends
+
+        _struct(win64_context,0)
+         _field(P1Home, 8)
+         _field(P2Home, 8)
+         _field(P3Home, 8)
+         _field(P4Home, 8)
+         _field(P5Home, 8)
+         _field(P6Home, 8)
+         _field(ContextFlags, 4)
+         _field(MxCsr, 4)
+         _field(SegCs, 2)
+         _field(SegDs, 2)
+         _field(SegEs, 2)
+         _field(SegFs, 2)
+         _field(SegGs, 2)
+         _field(SegSs, 2)
+         _field(EFlags, 4)
+         _field(Dr0, 8)
+         _field(Dr1, 8)
+         _field(Dr2, 8)
+         _field(Dr3, 8)
+         _field(Dr6, 8)
+         _field(Dr7, 8)
+         _field(Rax, 8)
+         _field(Rcx, 8)
+         _field(Rdx, 8)
+         _field(Rbx, 8)
+         _field(Rsp, 8)
+         _field(Rbp, 8)
+         _field(Rsi, 8)
+         _field(Rdi, 8)
+         _field(R8, 8)
+         _field(R9, 8)
+         _field(R10, 8)
+         _field(R11, 8)
+         _field(R12, 8)
+         _field(R13, 8)
+         _field(R14, 8)
+         _field(R15, 8)
+         _field(Rip, 8)
+         _struct_label(fpstate)
+         _field(Header, 32)
+         _field(Legacy, 128)
+         _field(Xmm0, 16)
+         _field(Xmm1, 16)        
+         _field(Xmm2, 16)        
+         _field(Xmm3, 16)        
+         _field(Xmm4, 16)        
+         _field(Xmm5, 16)        
+         _field(Xmm6, 16)        
+         _field(Xmm7, 16)        
+         _field(Xmm8, 16)        
+         _field(Xmm9, 16)        
+         _field(Xmm10, 16)        
+         _field(Xmm11, 16)        
+         _field(Xmm12, 16)        
+         _field(Xmm13, 16)        
+         _field(Xmm14, 16)        
+         _field(Xmm15, 16)
+         _field(__pad, 96)
+         _field(VectorRegister, 416)
+         _field(VectorControl, 8)
+         _field(DebugControl, 8)
+         _field(LastBranchToRip, 8)
+         _field(LastBranchFromRip, 8)
+         _field(LastExceptionToRip, 8)
+         _field(LastExceptionFromRip, 8)
+ _ends
+
+	
+TCR_FLAG_BIT_FOREIGN = fixnum_shift
+TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)	
+TCR_FLAG_BIT_ALT_SUSPEND = (fixnumshift+2)
+TCR_FLAG_BIT_PROPAGATE_EXCEPTION = (fixnumshift+3)
+TCR_FLAG_BIT_SUSPEND_ACK_PENDING = (fixnumshift+4)
+TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
+TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
+TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)        
+	
+target_most_positive_fixnum = 1152921504606846975
+target_most_negative_fixnum = -1152921504606846976
+
+
+lisp_globals_limit = 0x13000
+        
+INTERRUPT_LEVEL_BINDING_INDEX = fixnumone
+
+c_stack_16_byte_aligned = 1
+        	
+		        
+                
Index: /branches/arm/lisp-kernel/x86-exceptions.c
===================================================================
--- /branches/arm/lisp-kernel/x86-exceptions.c	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-exceptions.c	(revision 13357)
@@ -0,0 +1,3813 @@
+/*
+   Copyright (C) 2005-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+#include "Threads.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#ifdef LINUX
+#include <strings.h>
+#include <sys/mman.h>
+#include <fpu_control.h>
+#include <linux/prctl.h>
+#endif
+#ifdef DARWIN
+#include <sysexits.h>
+#endif
+#ifndef WINDOWS
+#include <sys/syslog.h>
+#endif
+#ifdef WINDOWS
+#include <windows.h>
+#ifdef WIN_64
+#include <winternl.h>
+#include <ntstatus.h>
+#endif
+#ifndef EXCEPTION_WRITE_FAULT
+#define EXCEPTION_WRITE_FAULT 1
+#endif
+#endif
+
+int
+page_size = 4096;
+
+int
+log2_page_size = 12;
+
+
+void
+update_bytes_allocated(TCR* tcr, void *cur_allocptr)
+{
+  BytePtr 
+    last = (BytePtr) tcr->last_allocptr, 
+    current = (BytePtr) cur_allocptr;
+  if (last && (tcr->save_allocbase != ((void *)VOID_ALLOCPTR))) {
+    tcr->bytes_allocated += last-current;
+  }
+  tcr->last_allocptr = 0;
+}
+
+
+
+//  This doesn't GC; it returns true if it made enough room, false
+//  otherwise.
+//  If "extend" is true, it can try to extend the dynamic area to
+//  satisfy the request.
+
+
+Boolean
+new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr)
+{
+  area *a;
+  natural newlimit, oldlimit;
+  natural log2_allocation_quantum = tcr->log2_allocation_quantum;
+
+  a  = active_dynamic_area;
+  oldlimit = (natural) a->active;
+  newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) +
+	      align_to_power_of_2(need, log2_allocation_quantum));
+  if (newlimit > (natural) (a->high)) {
+    if (extend) {
+      signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
+      natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold;
+      do {
+        if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) {
+          break;
+        }
+        extend_by = align_to_power_of_2(extend_by>>1,log2_allocation_quantum);
+        if (extend_by < 4<<20) {
+          return false;
+        }
+      } while (1);
+    } else {
+      return false;
+    }
+  }
+  a->active = (BytePtr) newlimit;
+  tcr->last_allocptr = (void *)newlimit;
+  tcr->save_allocptr = (void *)newlimit;
+  xpGPR(xp,Iallocptr) = (LispObj) newlimit;
+  tcr->save_allocbase = (void *) oldlimit;
+
+  return true;
+}
+
+Boolean
+allocate_object(ExceptionInformation *xp,
+                natural bytes_needed, 
+                signed_natural disp_from_allocptr,
+		TCR *tcr)
+{
+  area *a = active_dynamic_area;
+
+  /* Maybe do an EGC */
+  if (a->older && lisp_global(OLDEST_EPHEMERAL)) {
+    if (((a->active)-(a->low)) >= a->threshold) {
+      gc_from_xp(xp, 0L);
+    }
+  }
+
+  /* Life is pretty simple if we can simply grab a segment
+     without extending the heap.
+  */
+  if (new_heap_segment(xp, bytes_needed, false, tcr)) {
+    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
+    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
+    return true;
+  }
+  
+  /* It doesn't make sense to try a full GC if the object
+     we're trying to allocate is larger than everything
+     allocated so far.
+  */
+  if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) {
+    untenure_from_area(tenured_area); /* force a full GC */
+    gc_from_xp(xp, 0L);
+  }
+  
+  /* Try again, growing the heap if necessary */
+  if (new_heap_segment(xp, bytes_needed, true, tcr)) {
+    xpGPR(xp, Iallocptr) -= disp_from_allocptr;
+    tcr->save_allocptr = (void *) (xpGPR(xp, Iallocptr));
+    return true;
+  }
+  
+  return false;
+}
+
+natural gc_deferred = 0, full_gc_deferred = 0;
+
+signed_natural
+flash_freeze(TCR *tcr, signed_natural param)
+{
+  return 0;
+}
+
+
+Boolean
+handle_gc_trap(ExceptionInformation *xp, TCR *tcr)
+{
+  LispObj selector = xpGPR(xp,Iimm0);
+#ifdef X8664
+  LispObj arg = xpGPR(xp,Iimm1);
+#else
+  LispObj arg = xpMMXreg(xp,Imm0);
+#endif
+  area *a = active_dynamic_area;
+  Boolean egc_was_enabled = (a->older != NULL);
+  
+  natural gc_previously_deferred = gc_deferred;
+
+  switch (selector) {
+  case GC_TRAP_FUNCTION_EGC_CONTROL:
+    egc_control(arg != 0, a->active);
+    xpGPR(xp,Iarg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0);
+    break;
+
+  case GC_TRAP_FUNCTION_CONFIGURE_EGC:
+#ifdef X8664
+    a->threshold = unbox_fixnum(xpGPR(xp, Iarg_x));
+#else
+    a->threshold = unbox_fixnum(xpGPR(xp, Itemp0));
+#endif
+    g1_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_y));
+    g2_area->threshold = unbox_fixnum(xpGPR(xp, Iarg_z));
+    xpGPR(xp,Iarg_z) = lisp_nil+t_offset;
+    break;
+
+  case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD:
+    if (((signed_natural) arg) > 0) {
+      lisp_heap_gc_threshold = 
+        align_to_power_of_2((arg-1) +
+                            (heap_segment_size - 1),
+                            log2_heap_segment_size);
+    }
+    /* fall through */
+  case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD:
+    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
+    break;
+
+  case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD:
+    /*  Try to put the current threshold in effect.  This may
+        need to disable/reenable the EGC. */
+    untenure_from_area(tenured_area);
+    resize_dynamic_heap(a->active,lisp_heap_gc_threshold);
+    if (egc_was_enabled) {
+      if ((a->high - a->active) >= a->threshold) {
+        tenure_to_area(tenured_area);
+      }
+    }
+    xpGPR(xp, Iimm0) = lisp_heap_gc_threshold;
+    break;
+
+  case GC_TRAP_FUNCTION_ENSURE_STATIC_CONSES:
+    ensure_static_conses(xp, tcr, 32768);
+    break;
+
+  case GC_TRAP_FUNCTION_FLASH_FREEZE: /* Like freeze below, but no GC */
+    untenure_from_area(tenured_area);
+    gc_like_from_xp(xp,flash_freeze,0);
+    a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
+    tenured_area->static_dnodes = area_dnode(a->active, a->low);
+    if (egc_was_enabled) {
+      tenure_to_area(tenured_area);
+    }
+    xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
+    break;
+
+  default:
+    update_bytes_allocated(tcr, (void *) tcr->save_allocptr);
+
+    if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) {
+      if (!full_gc_deferred) {
+        gc_from_xp(xp, 0L);
+        break;
+      }
+      /* Tried to do a full GC when gc was disabled.  That failed,
+         so try full GC now */
+      selector = GC_TRAP_FUNCTION_GC;
+    }
+    
+    if (egc_was_enabled) {
+      egc_control(false, (BytePtr) a->active);
+    }
+    gc_from_xp(xp, 0L);
+    if (gc_deferred > gc_previously_deferred) {
+      full_gc_deferred = 1;
+    } else {
+      full_gc_deferred = 0;
+    }
+    if (selector > GC_TRAP_FUNCTION_GC) {
+      if (selector & GC_TRAP_FUNCTION_IMPURIFY) {
+        impurify_from_xp(xp, 0L);
+        /*        nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */
+        lisp_global(OLDSPACE_DNODE_COUNT) = 0;
+        gc_from_xp(xp, 0L);
+      }
+      if (selector & GC_TRAP_FUNCTION_PURIFY) {
+        purify_from_xp(xp, 1);
+        lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active, managed_static_area->low);
+        gc_from_xp(xp, 0L);
+      }
+      if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) {
+        OSErr err;
+        extern OSErr save_application(unsigned, Boolean);
+        area *vsarea = tcr->vs_area;
+
+#ifdef WINDOWS	
+        arg = _open_osfhandle(arg,0);
+#endif
+        nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1);
+        err = save_application(arg, egc_was_enabled);
+        if (err == noErr) {
+          _exit(0);
+        }
+        fatal_oserr(": save_application", err);
+      }
+      switch (selector) {
+      case GC_TRAP_FUNCTION_FREEZE:
+        a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
+        tenured_area->static_dnodes = area_dnode(a->active, a->low);
+        xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
+        break;
+      default:
+        break;
+      }
+    }
+    if (egc_was_enabled) {
+      egc_control(true, NULL);
+    }
+    break;
+  }
+  return true;
+}
+
+  
+
+
+
+void
+push_on_lisp_stack(ExceptionInformation *xp, LispObj value)
+{
+  LispObj *vsp = (LispObj *)xpGPR(xp,Isp);
+  *--vsp = value;
+  xpGPR(xp,Isp) = (LispObj)vsp;
+}
+
+
+/* Hard to know if or whether this is necessary in general.  For now,
+   do it when we get a "wrong number of arguments" trap.
+*/
+void
+finish_function_entry(ExceptionInformation *xp)
+{
+  natural nargs = xpGPR(xp,Inargs)>>fixnumshift;
+  signed_natural disp = nargs - nargregs;
+  LispObj *vsp =  (LispObj *) xpGPR(xp,Isp), ra = *vsp++;
+   
+  xpGPR(xp,Isp) = (LispObj) vsp;
+
+  if (disp > 0) {               /* implies that nargs > nargregs */
+    vsp[disp] = xpGPR(xp,Ifp);
+    vsp[disp+1] = ra;
+    xpGPR(xp,Ifp) = (LispObj)(vsp+disp);
+#ifdef X8664
+    push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
+#endif
+    push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
+    push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
+  } else {
+    push_on_lisp_stack(xp,ra);
+    push_on_lisp_stack(xp,xpGPR(xp,Ifp));
+    xpGPR(xp,Ifp) = xpGPR(xp,Isp);
+#ifdef X8664
+    if (nargs == 3) {
+      push_on_lisp_stack(xp,xpGPR(xp,Iarg_x));
+    }
+#endif
+    if (nargs >= 2) {
+      push_on_lisp_stack(xp,xpGPR(xp,Iarg_y));
+    }
+    if (nargs >= 1) {
+      push_on_lisp_stack(xp,xpGPR(xp,Iarg_z));
+    }
+  }
+}
+
+Boolean
+object_contains_pc(LispObj container, LispObj addr)
+{
+  if (fulltag_of(container) >= fulltag_misc) {
+    natural elements = header_element_count(header_of(container));
+    if ((addr >= container) &&
+        (addr < ((LispObj)&(deref(container,1+elements))))) {
+      return true;
+    }
+  }
+  return false;
+}
+
+LispObj
+create_exception_callback_frame(ExceptionInformation *xp, TCR *tcr)
+{
+  LispObj containing_uvector = 0, 
+    relative_pc, 
+    nominal_function = lisp_nil, 
+    f, tra, tra_f = 0, abs_pc;
+
+  f = xpGPR(xp,Ifn);
+  tra = *(LispObj*)(xpGPR(xp,Isp));
+
+#ifdef X8664
+  if (tag_of(tra) == tag_tra) {
+    if ((*((unsigned short *)tra) == RECOVER_FN_FROM_RIP_WORD0) &&
+        (*((unsigned char *)(tra+2)) == RECOVER_FN_FROM_RIP_BYTE2)) {
+      int sdisp = (*(int *) (tra+3));
+      tra_f = RECOVER_FN_FROM_RIP_LENGTH+tra+sdisp;
+    }
+    if (fulltag_of(tra_f) != fulltag_function) {
+      tra_f = 0;
+    }
+  } else {
+    tra = 0;
+  }
+#endif
+#ifdef X8632
+  if (fulltag_of(tra) == fulltag_tra) {
+    if (*(unsigned char *)tra == RECOVER_FN_OPCODE) {
+      tra_f = (LispObj)*(LispObj *)(tra + 1);
+    }
+    if (tra_f && header_subtag(header_of(tra_f)) != subtag_function) {
+      tra_f = 0;
+    }
+  } else {
+    tra = 0;
+  }
+#endif
+
+  abs_pc = (LispObj)xpPC(xp);
+
+#ifdef X8664
+  if (fulltag_of(f) == fulltag_function) 
+#else
+    if (fulltag_of(f) == fulltag_misc &&
+        header_subtag(header_of(f)) == subtag_function) 
+#endif
+      {
+        nominal_function = f;
+      } else {
+      if (tra_f) {
+        nominal_function = tra_f;
+      }
+    }
+  
+  f = xpGPR(xp,Ifn);
+  if (object_contains_pc(f, abs_pc)) {
+    containing_uvector = untag(f)+fulltag_misc;
+  } else {
+    f = xpGPR(xp,Ixfn);
+    if (object_contains_pc(f, abs_pc)) {
+      containing_uvector = untag(f)+fulltag_misc;
+    } else {
+      if (tra_f) {
+        f = tra_f;
+        if (object_contains_pc(f, abs_pc)) {
+          containing_uvector = untag(f)+fulltag_misc;
+          relative_pc = (abs_pc - f) << fixnumshift;
+        }
+      }
+    }
+  }
+  if (containing_uvector) {
+    relative_pc = (abs_pc - (LispObj)&(deref(containing_uvector,1))) << fixnumshift;
+  } else {
+    containing_uvector = lisp_nil;
+    relative_pc = abs_pc << fixnumshift;
+  }
+  push_on_lisp_stack(xp,(LispObj)(tcr->xframe->prev));
+  push_on_lisp_stack(xp,(LispObj)(tcr->foreign_sp));
+  push_on_lisp_stack(xp,tra);
+  push_on_lisp_stack(xp,(LispObj)xp);
+  push_on_lisp_stack(xp,containing_uvector); 
+  push_on_lisp_stack(xp,relative_pc);
+  push_on_lisp_stack(xp,nominal_function);
+  push_on_lisp_stack(xp,0);
+  push_on_lisp_stack(xp,xpGPR(xp,Ifp));
+  xpGPR(xp,Ifp) = xpGPR(xp,Isp);
+  return xpGPR(xp,Isp);
+}
+
+#ifndef XMEMFULL
+#define XMEMFULL (76)
+#endif
+
+void
+lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed )
+{
+  LispObj xcf = create_exception_callback_frame(xp, tcr),
+    cmain = nrs_CMAIN.vcell;
+  int skip;
+    
+  tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR;
+  xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
+
+  skip = callback_to_lisp(tcr, cmain, xp, xcf, -1, XMEMFULL, 0, 0);
+  xpPC(xp) += skip;
+}
+
+/*
+  Allocate a large list, where "large" means "large enough to
+  possibly trigger the EGC several times if this was done
+  by individually allocating each CONS."  The number of 
+  ocnses in question is in arg_z; on successful return,
+  the list will be in arg_z 
+*/
+
+Boolean
+allocate_list(ExceptionInformation *xp, TCR *tcr)
+{
+  natural 
+    nconses = (unbox_fixnum(xpGPR(xp,Iarg_z))),
+    bytes_needed = (nconses << dnode_shift);
+  LispObj
+    prev = lisp_nil,
+    current,
+    initial = xpGPR(xp,Iarg_y);
+
+  if (nconses == 0) {
+    /* Silly case */
+    xpGPR(xp,Iarg_z) = lisp_nil;
+    xpGPR(xp,Iallocptr) = lisp_nil;
+    return true;
+  }
+  update_bytes_allocated(tcr, (void *)tcr->save_allocptr);
+  if (allocate_object(xp,bytes_needed,bytes_needed-fulltag_cons,tcr)) {
+    tcr->save_allocptr -= fulltag_cons;
+    for (current = xpGPR(xp,Iallocptr);
+         nconses;
+         prev = current, current+= dnode_size, nconses--) {
+      deref(current,0) = prev;
+      deref(current,1) = initial;
+    }
+    xpGPR(xp,Iarg_z) = prev;
+  } else {
+    lisp_allocation_failure(xp,tcr,bytes_needed);
+  }
+  return true;
+}
+
+Boolean
+handle_alloc_trap(ExceptionInformation *xp, TCR *tcr)
+{
+  natural cur_allocptr, bytes_needed;
+  unsigned allocptr_tag;
+  signed_natural disp;
+  
+  cur_allocptr = xpGPR(xp,Iallocptr);
+  allocptr_tag = fulltag_of(cur_allocptr);
+  if (allocptr_tag == fulltag_misc) {
+#ifdef X8664
+    disp = xpGPR(xp,Iimm1);
+#else
+    disp = xpGPR(xp,Iimm0);
+#endif
+  } else {
+    disp = dnode_size-fulltag_cons;
+  }
+  bytes_needed = disp+allocptr_tag;
+
+  update_bytes_allocated(tcr,((BytePtr)(cur_allocptr+disp)));
+  if (allocate_object(xp, bytes_needed, disp, tcr)) {
+    return true;
+  }
+  
+  lisp_allocation_failure(xp,tcr,bytes_needed);
+
+  return true;
+}
+
+  
+int
+callback_to_lisp (TCR * tcr, LispObj callback_macptr, ExceptionInformation *xp,
+                  natural arg1, natural arg2, natural arg3, natural arg4, natural arg5)
+{
+  natural  callback_ptr;
+  int delta;
+  unsigned old_mxcsr = get_mxcsr();
+#ifdef X8632
+  natural saved_node_regs_mask = tcr->node_regs_mask;
+  natural saved_unboxed0 = tcr->unboxed0;
+  natural saved_unboxed1 = tcr->unboxed1;
+  LispObj *vsp = (LispObj *)xpGPR(xp, Isp);
+#endif
+
+  set_mxcsr(0x1f80);
+
+  /* Put the active stack pointers where .SPcallback expects them */
+#ifdef X8632
+  tcr->node_regs_mask = X8632_DEFAULT_NODE_REGS_MASK;
+
+  *--vsp = tcr->save0;
+  *--vsp = tcr->save1;
+  *--vsp = tcr->save2;
+  *--vsp = tcr->save3;
+  *--vsp = tcr->next_method_context;
+  xpGPR(xp, Isp) = (LispObj)vsp;
+#endif
+  tcr->save_vsp = (LispObj *)xpGPR(xp, Isp);
+  tcr->save_fp = (LispObj *)xpGPR(xp, Ifp);
+
+  /* Call back.  The caller of this function may have modified stack/frame
+     pointers (and at least should have called prepare_for_callback()).
+  */
+  callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
+  UNLOCK(lisp_global(EXCEPTION_LOCK), tcr);
+  delta = ((int (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5);
+  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
+
+#ifdef X8632
+  tcr->next_method_context = *vsp++;
+  tcr->save3 = *vsp++;
+  tcr->save2 = *vsp++;
+  tcr->save1 = *vsp++;
+  tcr->save0 = *vsp++;
+  xpGPR(xp, Isp) = (LispObj)vsp;
+
+  tcr->node_regs_mask = saved_node_regs_mask;
+  tcr->unboxed0 = saved_unboxed0;
+  tcr->unboxed1 = saved_unboxed1;
+#endif
+  set_mxcsr(old_mxcsr);
+  return delta;
+}
+
+void
+callback_for_interrupt(TCR *tcr, ExceptionInformation *xp)
+{
+  LispObj *save_vsp = (LispObj *)xpGPR(xp,Isp),
+    word_beyond_vsp = save_vsp[-1],
+    save_fp = xpGPR(xp,Ifp),
+    xcf = create_exception_callback_frame(xp, tcr);
+  int save_errno = errno;
+
+  callback_to_lisp(tcr, nrs_CMAIN.vcell,xp, xcf, 0, 0, 0, 0);
+  xpGPR(xp,Ifp) = save_fp;
+  xpGPR(xp,Isp) = (LispObj)save_vsp;
+  save_vsp[-1] = word_beyond_vsp;
+  errno = save_errno;
+}
+
+Boolean
+handle_error(TCR *tcr, ExceptionInformation *xp)
+{
+  pc program_counter = (pc)xpPC(xp);
+  unsigned char op0 = program_counter[0], op1 = program_counter[1];
+  LispObj rpc, errdisp = nrs_ERRDISP.vcell,
+    save_vsp = xpGPR(xp,Isp), xcf0,
+    save_fp = xpGPR(xp,Ifp);
+  int skip;
+
+  if ((fulltag_of(errdisp) == fulltag_misc) &&
+      (header_subtag(header_of(errdisp)) == subtag_macptr)) {
+
+    if ((op0 == 0xcd) && (op1 >= 0xc0) && (op1 <= 0xc2)) {
+      finish_function_entry(xp);
+    }
+    xcf0 = create_exception_callback_frame(xp, tcr);
+    skip = callback_to_lisp(tcr, errdisp, xp, xcf0, 0, 0, 0, 0);
+    if (skip == -1) {
+      xcf *xcf1 = (xcf *)xcf0;
+      LispObj container = xcf1->containing_uvector;
+      
+      rpc = xcf1->relative_pc >> fixnumshift;
+      if (container == lisp_nil) {
+        xpPC(xp) = rpc;
+      } else {
+        xpPC(xp) = (LispObj)(&(deref(container,
+#ifdef X8664
+                                     1
+#else
+                                     0
+#endif
+)))+rpc;
+      }
+        
+      skip = 0;
+    }
+    xpGPR(xp,Ifp) = save_fp;
+    xpGPR(xp,Isp) = save_vsp;
+    if ((op0 == 0xcd) && (op1 == 0xc7)) {
+      /* Continue after an undefined function call. The function
+         that had been undefined has already been called (in the
+         break loop), and a list of the values that it returned
+         in in the xp's %arg_z.  A function that returns those
+         values in in the xp's %fn; we just have to adjust the
+         stack (keeping the return address in the right place
+         and discarding any stack args/reserved stack frame),
+         then set nargs and the PC so that that function's
+         called when we resume.
+      */
+      LispObj *vsp =(LispObj *)save_vsp, ra = *vsp;
+      int nargs = xpGPR(xp, Inargs)>>fixnumshift;
+
+#ifdef X8664
+      if (nargs > 3) {
+        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 3)));
+        push_on_lisp_stack(xp,ra);
+      }
+#else
+      if (nargs > 2) {
+        xpGPR(xp,Isp)=(LispObj) (vsp + (1 + 2 + (nargs - 2)));
+        push_on_lisp_stack(xp,ra);
+      }
+#endif
+      xpPC(xp) = xpGPR(xp,Ifn);
+      xpGPR(xp,Inargs) = 1<<fixnumshift;
+    } else {
+      xpPC(xp) += skip;
+    }
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+protection_handler
+* protection_handlers[] = {
+  do_spurious_wp_fault,
+  do_soft_stack_overflow,
+  do_soft_stack_overflow,
+  do_soft_stack_overflow,
+  do_hard_stack_overflow,    
+  do_hard_stack_overflow,
+  do_hard_stack_overflow,
+};
+
+
+/* Maybe this'll work someday.  We may have to do something to
+   make the thread look like it's not handling an exception */
+void
+reset_lisp_process(ExceptionInformation *xp)
+{
+}
+
+Boolean
+do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
+{
+  reset_lisp_process(xp);
+  return false;
+}
+
+
+Boolean
+do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr)
+{
+
+  return false;
+}
+
+Boolean
+do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr)
+{
+  /* Trying to write into a guard page on the vstack or tstack.
+     Allocate a new stack segment, emulate stwu and stwux for the TSP, and
+     signal an error_stack_overflow condition.
+      */
+  lisp_protection_kind which = prot_area->why;
+  Boolean on_TSP = (which == kTSPsoftguard);
+  LispObj save_fp = xpGPR(xp,Ifp);
+  LispObj save_vsp = xpGPR(xp,Isp), 
+    xcf,
+    cmain = nrs_CMAIN.vcell;
+  area *a;
+  protected_area_ptr soft;
+  TCR *tcr = get_tcr(false);
+  int skip;
+
+  if ((fulltag_of(cmain) == fulltag_misc) &&
+      (header_subtag(header_of(cmain)) == subtag_macptr)) {
+    if (on_TSP) {
+      a = tcr->ts_area;
+    } else {
+      a = tcr->vs_area;
+    }
+    soft = a->softprot;
+    unprotect_area(soft);
+    xcf = create_exception_callback_frame(xp, tcr);
+    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, on_TSP, 0, 0);
+    xpGPR(xp,Ifp) = save_fp;
+    xpGPR(xp,Isp) = save_vsp;
+    xpPC(xp) += skip;
+    return true;
+  }
+  return false;
+}
+
+Boolean
+is_write_fault(ExceptionInformation *xp, siginfo_t *info)
+{
+#ifdef DARWIN
+  return (UC_MCONTEXT(xp)->__es.__err & 0x2) != 0;
+#endif
+#if defined(LINUX) || defined(SOLARIS)
+  return (xpGPR(xp,REG_ERR) & 0x2) != 0;
+#endif
+#ifdef FREEBSD
+  return (xp->uc_mcontext.mc_err & 0x2) != 0;
+#endif
+#ifdef WINDOWS
+  return (info->ExceptionFlags == EXCEPTION_WRITE_FAULT);
+#endif
+}
+
+Boolean
+handle_fault(TCR *tcr, ExceptionInformation *xp, siginfo_t *info, int old_valence)
+{
+#ifdef FREEBSD
+#ifdef X8664
+  BytePtr addr = (BytePtr) xp->uc_mcontext.mc_addr;
+#else
+  BytePtr addr = (BytePtr) info->si_addr;
+#endif
+#else
+#ifdef WINDOWS
+  BytePtr addr = (BytePtr) info->ExceptionInformation[1];
+#else
+  BytePtr addr = (BytePtr) info->si_addr;
+#endif
+#endif
+  Boolean valid = IS_PAGE_FAULT(info,xp);
+
+  if (valid) {
+    if (addr && (addr == tcr->safe_ref_address)) {
+      xpGPR(xp,Iimm0) = 0;
+      xpPC(xp) = xpGPR(xp,Ira0);
+      return true;
+    }
+    
+    {
+      protected_area *a = find_protected_area(addr);
+      protection_handler *handler;
+      
+      if (a) {
+        handler = protection_handlers[a->why];
+        return handler(xp, a, addr);
+      }
+    }
+
+    if ((addr >= readonly_area->low) &&
+	(addr < readonly_area->active)) {
+      UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)),
+		      page_size);
+      return true;
+    }
+
+    {
+      area *a = area_containing(addr);
+
+      if (a && a->code == AREA_WATCHED && addr < a->high) {
+	/* caught a write to a watched object */
+	LispObj *p = (LispObj *)a->low;
+	LispObj node = *p;
+	unsigned tag_n = fulltag_of(node);
+	LispObj cmain = nrs_CMAIN.vcell;
+	LispObj obj;
+
+	if (immheader_tag_p(tag_n) || nodeheader_tag_p(tag_n))
+	  obj = (LispObj)p + fulltag_misc;
+	else
+	  obj = (LispObj)p + fulltag_cons;
+
+	if ((fulltag_of(cmain) == fulltag_misc) &&
+	    (header_subtag(header_of(cmain)) == subtag_macptr)) {
+	  LispObj save_vsp = xpGPR(xp, Isp);
+	  LispObj save_fp = xpGPR(xp, Ifp);
+	  LispObj xcf;
+	  natural offset = (LispObj)addr - obj;
+	  int skip;
+
+	  push_on_lisp_stack(xp, obj);
+	  xcf = create_exception_callback_frame(xp, tcr);
+
+	  /* The magic 2 means this was a write to a watchd object */
+	  skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGSEGV, 2,
+				  (natural)addr, offset);
+	  xpPC(xp) += skip;
+	  xpGPR(xp, Ifp) = save_fp;
+	  xpGPR(xp, Isp) = save_vsp;
+	  return true;
+	}
+      }
+    }
+  }
+
+  if (old_valence == TCR_STATE_LISP) {
+    LispObj cmain = nrs_CMAIN.vcell,
+      xcf;
+    if ((fulltag_of(cmain) == fulltag_misc) &&
+      (header_subtag(header_of(cmain)) == subtag_macptr)) {
+      xcf = create_exception_callback_frame(xp, tcr);
+      callback_to_lisp(tcr, cmain, xp, xcf, SIGBUS, valid ? is_write_fault(xp,info) : (natural)-1, valid ? (natural)addr : 0, 0);
+    }
+  }
+  return false;
+}
+
+Boolean
+handle_floating_point_exception(TCR *tcr, ExceptionInformation *xp, siginfo_t *info)
+{
+  int code,skip;
+  LispObj  xcf, cmain = nrs_CMAIN.vcell,
+    save_vsp = xpGPR(xp,Isp),
+    save_fp = xpGPR(xp,Ifp);
+#ifdef WINDOWS
+  code = info->ExceptionCode;
+#else
+  code = info->si_code;
+#endif  
+
+  if ((fulltag_of(cmain) == fulltag_misc) &&
+      (header_subtag(header_of(cmain)) == subtag_macptr)) {
+    xcf = create_exception_callback_frame(xp, tcr);
+    skip = callback_to_lisp(tcr, cmain, xp, xcf, SIGFPE, code, 0, 0);
+    xpPC(xp) += skip;
+    xpGPR(xp,Ifp) = save_fp;
+    xpGPR(xp,Isp) = save_vsp;
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
+Boolean
+extend_tcr_tlb(TCR *tcr, ExceptionInformation *xp)
+{
+  LispObj index, old_limit = tcr->tlb_limit, new_limit, new_bytes;
+  LispObj *old_tlb = tcr->tlb_pointer, *new_tlb, *work, *tos;
+
+  tos = (LispObj*)(xpGPR(xp,Isp));
+  index = *tos++;
+  (xpGPR(xp,Isp))=(LispObj)tos;
+  
+  new_limit = align_to_power_of_2(index+1,12);
+  new_bytes = new_limit-old_limit;
+  new_tlb = realloc(old_tlb, new_limit);
+
+  if (new_tlb == NULL) {
+    return false;
+  }
+  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
+
+  while (new_bytes) {
+    *work++ = no_thread_local_binding_marker;
+    new_bytes -= sizeof(LispObj);
+  }
+  tcr->tlb_pointer = new_tlb;
+  tcr->tlb_limit = new_limit;
+  return true;
+}
+
+
+#if defined(FREEBSD) || defined(DARWIN)
+static
+char mxcsr_bit_to_fpe_code[] = {
+  FPE_FLTINV,                   /* ie */
+  0,                            /* de */
+  FPE_FLTDIV,                   /* ze */
+  FPE_FLTOVF,                   /* oe */
+  FPE_FLTUND,                   /* ue */
+  FPE_FLTRES                    /* pe */
+};
+
+void
+decode_vector_fp_exception(siginfo_t *info, uint32_t mxcsr)
+{
+  /* If the exception appears to be an XMM FP exception, try to
+     determine what it was by looking at bits in the mxcsr.
+  */
+  int xbit, maskbit;
+  
+  for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
+    if ((mxcsr & (1 << xbit)) &&
+        !(mxcsr & (1 << maskbit))) {
+      info->si_code = mxcsr_bit_to_fpe_code[xbit];
+      return;
+    }
+  }
+}
+
+#ifdef FREEBSD
+void
+freebsd_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
+{
+  if (info->si_code == 0) {
+#ifdef X8664
+    struct savefpu *fpu = (struct savefpu *) &(xp->uc_mcontext.mc_fpstate);
+#else
+    struct ccl_savexmm *fpu = (struct ccl_savexmm *) &(xp->uc_mcontext.mc_fpstate);
+#endif
+    uint32_t mxcsr = fpu->sv_env.en_mxcsr;
+
+    decode_vector_fp_exception(info, mxcsr);
+  }
+}
+#endif
+
+#ifdef DARWIN
+void
+darwin_decode_vector_fp_exception(siginfo_t *info, ExceptionInformation *xp)
+{
+  if (info->si_code == EXC_I386_SSEEXTERR) {
+    uint32_t mxcsr = UC_MCONTEXT(xp)->__fs.__fpu_mxcsr;
+
+    decode_vector_fp_exception(info, mxcsr);
+  }
+}
+
+#endif
+
+#endif
+
+void
+get_lisp_string(LispObj lisp_string, char *c_string, natural max)
+{
+  lisp_char_code *src = (lisp_char_code *)  (ptr_from_lispobj(lisp_string + misc_data_offset));
+  natural i, n = header_element_count(header_of(lisp_string));
+
+  if (n > max) {
+    n = max;
+  }
+
+  for (i = 0; i < n; i++) {
+    c_string[i] = 0xff & (src[i]);
+  }
+  c_string[n] = 0;
+}
+
+Boolean
+handle_exception(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
+{
+  pc program_counter = (pc)xpPC(context);
+
+  switch (signum) {
+  case SIGNUM_FOR_INTN_TRAP:
+    if (IS_MAYBE_INT_TRAP(info,context)) {
+      /* Something mapped to SIGSEGV/SIGBUS that has nothing to do with
+	 a memory fault.  On x86, an "int n" instruction that's
+         not otherwise implemented causes a "protecton fault".  Of
+         course that has nothing to do with accessing protected
+         memory; of course, most Unices act as if it did.*/
+      if ((program_counter != NULL) &&
+          (*program_counter == INTN_OPCODE)) {
+        program_counter++;
+        switch (*program_counter) {
+        case UUO_ALLOC_TRAP:
+          if (handle_alloc_trap(context, tcr)) {
+            xpPC(context) += 2;	/* we might have GCed. */
+            return true;
+          }
+          break;
+        case UUO_GC_TRAP:
+          if (handle_gc_trap(context, tcr)) {
+            xpPC(context) += 2;
+            return true;
+          }
+          break;
+	case UUO_WATCH_TRAP:
+	  /* add or remove watched object */
+	  if (handle_watch_trap(context, tcr)) {
+	    xpPC(context) += 2;
+	    return true;
+	  }
+	  break;
+        case UUO_DEBUG_TRAP:
+          xpPC(context) = (natural) (program_counter+1);
+          lisp_Debugger(context, info, debug_entry_dbg, false, "Lisp Breakpoint");
+          return true;
+            
+        case UUO_DEBUG_TRAP_WITH_STRING:
+          xpPC(context) = (natural) (program_counter+1);
+          {
+            char msg[512];
+
+            get_lisp_string(xpGPR(context,Iarg_z),msg, sizeof(msg)-1);
+            lisp_Debugger(context, info, debug_entry_dbg, false, msg);
+          }
+	  return true;
+          
+        default:
+          return handle_error(tcr, context);
+	}
+      } else {
+	return false;
+      }
+
+    } else {
+      return handle_fault(tcr, context, info, old_valence);
+    }
+    break;
+
+  case SIGNAL_FOR_PROCESS_INTERRUPT:
+    tcr->interrupt_pending = 0;
+    callback_for_interrupt(tcr, context);
+    return true;
+    break;
+
+
+  case SIGILL:
+    if ((program_counter[0] == XUUO_OPCODE_0) &&
+	(program_counter[1] == XUUO_OPCODE_1)) {
+      TCR *target = (TCR *)xpGPR(context, Iarg_z);
+
+      switch (program_counter[2]) {
+      case XUUO_TLB_TOO_SMALL:
+        if (extend_tcr_tlb(tcr,context)) {
+          xpPC(context)+=3;
+          return true;
+        }
+	break;
+	
+      case XUUO_INTERRUPT_NOW:
+	callback_for_interrupt(tcr,context);
+	xpPC(context)+=3;
+	return true;
+
+      case XUUO_SUSPEND_NOW:
+	xpPC(context)+=3;
+	return true;
+
+      case XUUO_INTERRUPT:
+        raise_thread_interrupt(target);
+	xpPC(context)+=3;
+	return true;
+
+      case XUUO_SUSPEND:
+        xpGPR(context,Iimm0) = (LispObj) lisp_suspend_tcr(target);
+	xpPC(context)+=3;
+	return true;
+
+      case XUUO_SUSPEND_ALL:
+        lisp_suspend_other_threads();
+	xpPC(context)+=3;
+	return true;
+
+
+      case XUUO_RESUME:
+        xpGPR(context,Iimm0) = (LispObj) lisp_resume_tcr(target);
+	xpPC(context)+=3;
+	return true;
+        
+      case XUUO_RESUME_ALL:
+        lisp_resume_other_threads();
+	xpPC(context)+=3;
+	return true;
+	
+      case XUUO_KILL:
+        xpGPR(context,Iimm0) = (LispObj)kill_tcr(target);
+        xpPC(context)+=3;
+        return true;
+
+      case XUUO_ALLOCATE_LIST:
+        allocate_list(context,tcr);
+        xpPC(context)+=3;
+        return true;
+
+      default:
+	return false;
+      }
+    } else {
+      return false;
+    }
+    break;
+    
+  case SIGFPE:
+#ifdef FREEBSD
+    /* As of 6.1, FreeBSD/AMD64 doesn't seem real comfortable
+       with this newfangled XMM business (and therefore info->si_code
+       is often 0 on an XMM FP exception.
+       Try to figure out what really happened by decoding mxcsr
+       bits.
+    */
+    freebsd_decode_vector_fp_exception(info,context);
+#endif
+#ifdef DARWIN
+    /* Same general problem with Darwin as of 8.7.2 */
+    darwin_decode_vector_fp_exception(info,context);
+#endif
+
+    return handle_floating_point_exception(tcr, context, info);
+
+#if SIGBUS != SIGNUM_FOR_INTN_TRAP
+  case SIGBUS:
+    return handle_fault(tcr, context, info, old_valence);
+#endif
+    
+#if SIGSEGV != SIGNUM_FOR_INTN_TRAP
+  case SIGSEGV:
+    return handle_fault(tcr, context, info, old_valence);
+#endif    
+    
+  default:
+    return false;
+  }
+}
+
+
+/* 
+   Current thread has all signals masked.  Before unmasking them,
+   make it appear that the current thread has been suspended.
+   (This is to handle the case where another thread is trying
+   to GC before this thread is able to seize the exception lock.)
+*/
+int
+prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context)
+{
+  int old_valence = tcr->valence;
+
+  tcr->pending_exception_context = context;
+  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
+
+#ifdef WINDOWS
+  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
+    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
+    SEM_RAISE(tcr->suspend);
+    SEM_WAIT_FOREVER(tcr->resume);
+  }
+#else
+  ALLOW_EXCEPTIONS(context);
+#endif
+  return old_valence;
+}  
+
+void
+wait_for_exception_lock_in_handler(TCR *tcr, 
+				   ExceptionInformation *context,
+				   xframe_list *xf)
+{
+
+  LOCK(lisp_global(EXCEPTION_LOCK), tcr);
+#if 0
+  fprintf(dbgout, "0x" LISP " has exception lock\n", tcr);
+#endif
+  xf->curr = context;
+#ifdef X8632
+  xf->node_regs_mask = tcr->node_regs_mask;
+#endif
+  xf->prev = tcr->xframe;
+  tcr->xframe =  xf;
+  tcr->pending_exception_context = NULL;
+  tcr->valence = TCR_STATE_FOREIGN; 
+}
+
+void
+unlock_exception_lock_in_handler(TCR *tcr)
+{
+  tcr->pending_exception_context = tcr->xframe->curr;
+#ifdef X8632
+  tcr->node_regs_mask = tcr->xframe->node_regs_mask;
+#endif
+  tcr->xframe = tcr->xframe->prev;
+  tcr->valence = TCR_STATE_EXCEPTION_RETURN;
+  UNLOCK(lisp_global(EXCEPTION_LOCK),tcr);
+#if 0
+  fprintf(dbgout, "0x" LISP " released exception lock\n", tcr);
+#endif
+}
+
+/* 
+   If an interrupt is pending on exception exit, try to ensure
+   that the thread sees it as soon as it's able to run.
+*/
+#ifdef WINDOWS
+void
+raise_pending_interrupt(TCR *tcr)
+{
+}
+void
+exit_signal_handler(TCR *tcr, int old_valence)
+{
+}
+void
+signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context, TCR *tcr, int old_valence)
+{
+}
+#else
+void
+raise_pending_interrupt(TCR *tcr)
+{
+  if ((TCR_INTERRUPT_LEVEL(tcr) >= 0) &&
+      (tcr->interrupt_pending)) {
+    pthread_kill((pthread_t)(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT);
+  }
+}
+
+void
+exit_signal_handler(TCR *tcr, int old_valence)
+{
+  sigset_t mask;
+  sigfillset(&mask);
+#ifdef FREEBSD
+  sigdelset(&mask,SIGTRAP);
+#endif
+  
+  pthread_sigmask(SIG_SETMASK,&mask, NULL);
+  tcr->valence = old_valence;
+  tcr->pending_exception_context = NULL;
+}
+
+void
+signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context
+#ifdef DARWIN
+               , TCR *tcr, int old_valence
+#endif
+)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  xframe_list xframe_link;
+#ifndef DARWIN
+  TCR *tcr = get_tcr(false);
+
+  int old_valence = prepare_to_wait_for_exception_lock(tcr, context);
+#endif
+  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
+    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
+    pthread_kill(pthread_self(), thread_suspend_signal);
+  }
+  wait_for_exception_lock_in_handler(tcr,context, &xframe_link);
+
+
+  if (! handle_exception(signum, info, context, tcr, old_valence)) {
+    char msg[512];
+    Boolean foreign = (old_valence != TCR_STATE_LISP);
+
+    snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x" LISP ", context->regs at #x" LISP "", signum, xpPC(context), (natural)xpGPRvector(context));
+    
+    if (lisp_Debugger(context, info, signum,  foreign, msg)) {
+      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
+    }
+  }
+  unlock_exception_lock_in_handler(tcr);
+#ifndef DARWIN_USE_PSEUDO_SIGRETURN
+  exit_signal_handler(tcr, old_valence);
+#endif
+  /* raise_pending_interrupt(tcr); */
+#ifdef DARWIN_GS_HACK
+  if (gs_was_tcr) {
+    set_gs_address(tcr);
+  }
+#endif
+#ifndef DARWIN_USE_PSEUDO_SIGRETURN
+  SIGRETURN(context);
+#endif
+}
+#endif
+
+
+
+
+#ifdef LINUX
+/* type of pointer to saved fp state */
+#ifdef X8664
+typedef fpregset_t FPREGS;
+#else
+typedef struct _fpstate *FPREGS;
+#endif
+LispObj *
+copy_fpregs(ExceptionInformation *xp, LispObj *current, FPREGS *destptr)
+{
+  FPREGS src = (FPREGS)(xp->uc_mcontext.fpregs), dest;
+  
+  if (src) {
+    dest = ((FPREGS)current)-1;
+    *dest = *src;
+    *destptr = dest;
+    current = (LispObj *) dest;
+  }
+  return current;
+}
+#endif
+
+#ifdef DARWIN
+LispObj *
+copy_darwin_mcontext(MCONTEXT_T context, 
+                     LispObj *current, 
+                     MCONTEXT_T *out)
+{
+  MCONTEXT_T dest = ((MCONTEXT_T)current)-1;
+  dest = (MCONTEXT_T) (((LispObj)dest) & ~15);
+
+  *dest = *context;
+  *out = dest;
+  return (LispObj *)dest;
+}
+#endif
+
+LispObj *
+copy_siginfo(siginfo_t *info, LispObj *current)
+{
+  siginfo_t *dest = ((siginfo_t *)current) - 1;
+#if !defined(LINUX) || !defined(X8632)
+  dest = (siginfo_t *) (((LispObj)dest)&~15);
+#endif
+  *dest = *info;
+  return (LispObj *)dest;
+}
+
+#ifdef LINUX
+typedef FPREGS copy_ucontext_last_arg_t;
+#else
+typedef void * copy_ucontext_last_arg_t;
+#endif
+
+#ifndef WINDOWS
+LispObj *
+copy_ucontext(ExceptionInformation *context, LispObj *current, copy_ucontext_last_arg_t fp)
+{
+  ExceptionInformation *dest = ((ExceptionInformation *)current)-1;
+#if !defined(LINUX) || !defined(X8632)
+  dest = (ExceptionInformation *) (((LispObj)dest) & ~15);
+#endif
+
+  *dest = *context;
+  /* Fix it up a little; where's the signal mask allocated, if indeed
+     it is "allocated" ? */
+#ifdef LINUX
+  dest->uc_mcontext.fpregs = (fpregset_t)fp;
+#endif
+  dest->uc_stack.ss_sp = 0;
+  dest->uc_stack.ss_size = 0;
+  dest->uc_stack.ss_flags = 0;
+  dest->uc_link = NULL;
+  return (LispObj *)dest;
+}
+#endif
+
+
+LispObj *
+tcr_frame_ptr(TCR *tcr)
+{
+  ExceptionInformation *xp;
+  LispObj *fp;
+
+  if (tcr->pending_exception_context)
+    xp = tcr->pending_exception_context;
+  else if (tcr->valence == TCR_STATE_LISP) {
+    xp = tcr->suspend_context;
+  } else {
+    xp = NULL;
+  }
+  if (xp) {
+    fp = (LispObj *)xpGPR(xp, Ifp);
+  } else {
+    fp = tcr->save_fp;
+  }
+  return fp;
+}
+
+
+LispObj *
+find_foreign_rsp(LispObj rsp, area *foreign_area, TCR *tcr)
+{
+
+  if (((BytePtr)rsp < foreign_area->low) ||
+      ((BytePtr)rsp > foreign_area->high)) {
+    rsp = (LispObj)(tcr->foreign_sp);
+  }
+  return (LispObj *) (((rsp-128) & ~15));
+}
+
+#ifdef X8632
+#ifdef LINUX
+/* This is here for debugging.  On entry to a signal handler that
+   receives info and context arguments, the stack should look exactly
+   like this.  The "pretcode field" of the structure is the address
+   of code that does an rt_sigreturn syscall, and rt_sigreturn expects
+   %esp at the time of that syscall to be pointing just past the
+   pretcode field.
+   handle_signal_on_foreign_stack() and helpers have to be very
+   careful to duplicate this "structure" exactly.
+   Note that on x8664 Linux, rt_sigreturn expects a ucontext to
+   be on top of the stack (with a siginfo_t underneath it.)
+   It sort of half-works to do sigreturn via setcontext() on 
+   x8632 Linux, but (a) it may not be available on some distributions
+   and (b) even a relatively modern version of it uses "fldenv" to
+   restore FP context, and "fldenv" isn't nearly good enough.
+*/
+
+struct rt_sigframe {
+	char *pretcode;
+	int sig;
+	siginfo_t  *pinfo;
+	void  *puc;
+	siginfo_t info;
+	struct ucontext uc;
+	struct _fpstate fpstate;
+	char retcode[8];
+};
+struct rt_sigframe *rtsf = 0;
+
+#endif
+#endif
+
+
+#ifndef WINDOWS
+/* x8632 Linux requires that the stack-allocated siginfo is nearer
+   the top of stack than the stack-allocated ucontext.  If other
+   platforms care, they expect the ucontext to be nearer the top
+   of stack.
+*/
+
+#if defined(LINUX) && defined(X8632)
+#define UCONTEXT_ON_TOP_OF_STACK 0
+#else
+#define UCONTEXT_ON_TOP_OF_STACK 1
+#endif
+void
+handle_signal_on_foreign_stack(TCR *tcr,
+                               void *handler, 
+                               int signum, 
+                               siginfo_t *info, 
+                               ExceptionInformation *context,
+                               LispObj return_address
+#ifdef DARWIN_GS_HACK
+                               , Boolean gs_was_tcr
+#endif
+                               )
+{
+#ifdef LINUX
+  FPREGS fpregs = NULL;
+#else
+  void *fpregs = NULL;
+#endif
+#ifdef DARWIN
+  MCONTEXT_T mcontextp = NULL;
+#endif
+  siginfo_t *info_copy = NULL;
+  ExceptionInformation *xp = NULL;
+  LispObj *foreign_rsp = find_foreign_rsp(xpGPR(context,Isp), tcr->cs_area, tcr);
+
+#ifdef LINUX
+  foreign_rsp = copy_fpregs(context, foreign_rsp, &fpregs);
+#endif
+#ifdef DARWIN
+  foreign_rsp = copy_darwin_mcontext(UC_MCONTEXT(context), foreign_rsp, &mcontextp);
+#endif
+#if UCONTEXT_ON_TOP_OF_STACK
+  /* copy info first */
+  foreign_rsp = copy_siginfo(info, foreign_rsp);
+  info_copy = (siginfo_t *)foreign_rsp;
+  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
+  xp = (ExceptionInformation *)foreign_rsp;
+#else
+  foreign_rsp = copy_ucontext(context, foreign_rsp, fpregs);
+  xp = (ExceptionInformation *)foreign_rsp;
+  foreign_rsp = copy_siginfo(info, foreign_rsp);
+  info_copy = (siginfo_t *)foreign_rsp;
+#endif
+#ifdef DARWIN
+  UC_MCONTEXT(xp) = mcontextp;
+#endif
+  *--foreign_rsp = return_address;
+#ifdef DARWIN_GS_HACK
+  if (gs_was_tcr) {
+    set_gs_address(tcr);
+  }
+#endif
+  switch_to_foreign_stack(foreign_rsp,handler,signum,info_copy,xp);
+}
+#endif
+
+
+#ifndef WINDOWS
+#ifndef USE_SIGALTSTACK
+void
+arbstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation *context)
+{
+  TCR *tcr = get_interrupt_tcr(false);
+#if 1
+  if (tcr->valence != TCR_STATE_LISP) {
+    FBug(context, "exception in foreign context");
+  }
+#endif
+  {
+    area *vs = tcr->vs_area;
+    BytePtr current_sp = (BytePtr) current_stack_pointer();
+
+
+    if ((current_sp >= vs->low) &&
+        (current_sp < vs->high)) {
+      handle_signal_on_foreign_stack(tcr,
+                                     signal_handler,
+                                     signum,
+                                     info,
+                                     context,
+                                     (LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                     , false
+#endif
+
+                                     );
+    } else {
+      signal_handler(signum, info, context, tcr, 0);
+    }
+  }
+}
+
+#else
+void
+altstack_signal_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
+{
+  TCR* tcr = get_tcr(true);
+#if 1
+  if (tcr->valence != TCR_STATE_LISP) {
+    FBug(context, "exception in foreign context");
+  }
+#endif
+  handle_signal_on_foreign_stack(tcr,signal_handler,signum,info,context,(LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                 , false
+#endif
+);
+}
+#endif
+#endif
+
+Boolean
+stack_pointer_on_vstack_p(LispObj stack_pointer, TCR *tcr)
+{
+  area *a = tcr->vs_area;
+ 
+  return (((BytePtr)stack_pointer <= a->high) &&
+          ((BytePtr)stack_pointer > a->low));
+}
+
+
+#ifdef WINDOWS
+extern DWORD restore_windows_context(ExceptionInformation *, TCR *, int);
+#endif
+
+void
+interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_interrupt_tcr(false);
+  int old_valence = tcr->valence;
+
+  if (tcr) {
+    if ((TCR_INTERRUPT_LEVEL(tcr) < 0) ||
+        (tcr->valence != TCR_STATE_LISP) ||
+        (tcr->unwinding != 0) ||
+        ! stack_pointer_on_vstack_p(xpGPR(context,Isp), tcr) ||
+        ! stack_pointer_on_vstack_p(xpGPR(context,Ifp), tcr)) {
+      tcr->interrupt_pending = (((natural) 1)<< (nbits_in_word - ((natural)1)));
+    } else {
+      LispObj cmain = nrs_CMAIN.vcell;
+
+      if ((fulltag_of(cmain) == fulltag_misc) &&
+	  (header_subtag(header_of(cmain)) == subtag_macptr)) {
+	/* 
+	   This thread can (allegedly) take an interrupt now. 
+        */
+
+        xframe_list xframe_link;
+        signed_natural alloc_displacement = 0;
+        LispObj 
+          *next_tsp = tcr->next_tsp,
+          *save_tsp = tcr->save_tsp,
+          *p,
+          q;
+        natural old_foreign_exception = tcr->flags & (1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
+
+        tcr->flags &= ~(1 << TCR_FLAG_BIT_FOREIGN_EXCEPTION);
+            
+        if (next_tsp != save_tsp) {
+          tcr->next_tsp = save_tsp;
+        } else {
+          next_tsp = NULL;
+        }
+        /* have to do this before allowing interrupts */
+        pc_luser_xp(context, tcr, &alloc_displacement);
+        old_valence = prepare_to_wait_for_exception_lock(tcr, context);
+        wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
+        handle_exception(signum, info, context, tcr, old_valence);
+        if (alloc_displacement) {
+          tcr->save_allocptr -= alloc_displacement;
+        }
+        if (next_tsp) {
+          tcr->next_tsp = next_tsp;
+          p = next_tsp;
+          while (p != save_tsp) {
+            *p++ = 0;
+          }
+          q = (LispObj)save_tsp;
+          *next_tsp = q;
+        }
+        tcr->flags |= old_foreign_exception;
+        unlock_exception_lock_in_handler(tcr);
+#ifndef WINDOWS
+        exit_signal_handler(tcr, old_valence);
+#endif
+      }
+    }
+  }
+#ifdef DARWIN_GS_HACK
+  if (gs_was_tcr) {
+    set_gs_address(tcr);
+  }
+#endif
+#ifdef WINDOWS
+  restore_windows_context(context,tcr,old_valence);
+#else
+  SIGRETURN(context);
+#endif
+}
+
+
+#ifndef WINDOWS
+#ifndef USE_SIGALTSTACK
+void
+arbstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_interrupt_tcr(false);
+  area *vs = tcr->vs_area;
+  BytePtr current_sp = (BytePtr) current_stack_pointer();
+
+  if ((current_sp >= vs->low) &&
+      (current_sp < vs->high)) {
+    handle_signal_on_foreign_stack(tcr,
+                                   interrupt_handler,
+                                   signum,
+                                   info,
+                                   context,
+                                   (LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                   ,gs_was_tcr
+#endif
+                                   );
+  } else {
+    /* If we're not on the value stack, we pretty much have to be on
+       the C stack.  Just run the handler. */
+#ifdef DARWIN_GS_HACK
+    if (gs_was_tcr) {
+      set_gs_address(tcr);
+    }
+#endif
+    interrupt_handler(signum, info, context);
+  }
+}
+
+#else /* altstack works */
+  
+void
+altstack_interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_interrupt_tcr(false);
+  handle_signal_on_foreign_stack(tcr,interrupt_handler,signum,info,context,(LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                 ,gs_was_tcr
+#endif
+                                 );
+}
+
+#endif
+#endif
+
+#ifndef WINDOWS
+void
+install_signal_handler(int signo, void * handler)
+{
+  struct sigaction sa;
+  
+  sa.sa_sigaction = (void *)handler;
+  sigfillset(&sa.sa_mask);
+#ifdef FREEBSD
+  /* Strange FreeBSD behavior wrt synchronous signals */
+  sigdelset(&sa.sa_mask,SIGTRAP);  /* let GDB work */
+#endif
+  sa.sa_flags = 
+    0 /* SA_RESTART */
+#ifdef USE_SIGALTSTACK
+    | SA_ONSTACK
+#endif
+    | SA_SIGINFO;
+
+  sigaction(signo, &sa, NULL);
+}
+#endif
+
+#ifdef WINDOWS
+BOOL 
+CALLBACK ControlEventHandler(DWORD event)
+{
+  switch(event) {
+  case CTRL_C_EVENT:
+    lisp_global(INTFLAG) = (1 << fixnumshift);
+    return TRUE;
+    break;
+  default:
+    return FALSE;
+  }
+}
+
+static
+DWORD mxcsr_bit_to_fpe_code[] = {
+  EXCEPTION_FLT_INVALID_OPERATION, /* ie */
+  0,                            /* de */
+  EXCEPTION_FLT_DIVIDE_BY_ZERO, /* ze */
+  EXCEPTION_FLT_OVERFLOW,       /* oe */
+  EXCEPTION_FLT_UNDERFLOW,      /* ue */
+  EXCEPTION_FLT_INEXACT_RESULT  /* pe */
+};
+
+#ifndef STATUS_FLOAT_MULTIPLE_FAULTS
+#define STATUS_FLOAT_MULTIPLE_FAULTS 0xc00002b4
+#endif
+
+#ifndef STATUS_FLOAT_MULTIPLE_TRAPS
+#define  STATUS_FLOAT_MULTIPLE_TRAPS 0xc00002b5
+#endif
+
+int
+map_windows_exception_code_to_posix_signal(DWORD code, siginfo_t *info, ExceptionInformation *context)
+{
+  switch (code) {
+#ifdef WIN_32
+  case STATUS_FLOAT_MULTIPLE_FAULTS:
+  case STATUS_FLOAT_MULTIPLE_TRAPS:
+    {
+      int xbit, maskbit;
+      DWORD mxcsr = *(xpMXCSRptr(context));
+
+      for (xbit = 0, maskbit = MXCSR_IM_BIT; xbit < 6; xbit++, maskbit++) {
+        if ((mxcsr & (1 << xbit)) &&
+            !(mxcsr & (1 << maskbit))) {
+          info->ExceptionCode = mxcsr_bit_to_fpe_code[xbit];
+          break;
+        }
+      }
+    }
+    return SIGFPE;
+#endif
+      
+  case EXCEPTION_ACCESS_VIOLATION:
+    return SIGSEGV;
+  case EXCEPTION_FLT_DENORMAL_OPERAND:
+  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+  case EXCEPTION_FLT_INEXACT_RESULT:
+  case EXCEPTION_FLT_INVALID_OPERATION:
+  case EXCEPTION_FLT_OVERFLOW:
+  case EXCEPTION_FLT_STACK_CHECK:
+  case EXCEPTION_FLT_UNDERFLOW:
+  case EXCEPTION_INT_DIVIDE_BY_ZERO:
+  case EXCEPTION_INT_OVERFLOW:
+    return SIGFPE;
+  case EXCEPTION_PRIV_INSTRUCTION:
+  case EXCEPTION_ILLEGAL_INSTRUCTION:
+    return SIGILL;
+  case EXCEPTION_IN_PAGE_ERROR:
+    return SIGBUS;
+  default:
+    return -1;
+  }
+}
+
+
+LONG
+windows_exception_handler(EXCEPTION_POINTERS *exception_pointers, TCR *tcr)
+{
+  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
+  int old_valence, signal_number;
+  ExceptionInformation *context = exception_pointers->ContextRecord;
+  siginfo_t *info = exception_pointers->ExceptionRecord;
+  xframe_list xframes;
+
+  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
+  wait_for_exception_lock_in_handler(tcr, context, &xframes);
+
+  signal_number = map_windows_exception_code_to_posix_signal(code, info, context);
+  
+  if (!handle_exception(signal_number, info, context, tcr, old_valence)) {
+    char msg[512];
+    Boolean foreign = (old_valence != TCR_STATE_LISP);
+
+    snprintf(msg, sizeof(msg), "Unhandled exception %d (windows code 0x%x) at 0x%Ix, context->regs at 0x%Ix", signal_number, code, xpPC(context), (natural)xpGPRvector(context));
+    
+    if (lisp_Debugger(context, info, signal_number,  foreign, msg)) {
+      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
+    }
+  }
+  unlock_exception_lock_in_handler(tcr);
+  return restore_windows_context(context, tcr, old_valence);
+}
+
+void
+setup_exception_handler_call(CONTEXT *context,
+                             LispObj new_sp,
+                             void *handler,
+                             EXCEPTION_POINTERS *new_ep,
+                             TCR *tcr)
+{
+  extern void windows_halt(void);
+  LispObj *p = (LispObj *)new_sp;
+#ifdef WIN_64
+  p-=4;                         /* win64 abi argsave nonsense */
+  *(--p) = (LispObj)windows_halt;
+  context->Rsp = (DWORD64)p;
+  context->Rip = (DWORD64)handler;
+  context->Rcx = (DWORD64)new_ep;
+  context->Rdx = (DWORD64)tcr;
+#else
+  p-=4;                          /* args on stack, stack aligned */
+  p[0] = (LispObj)new_ep;
+  p[1] = (LispObj)tcr;
+  *(--p) = (LispObj)windows_halt;
+  context->Esp = (DWORD)p;
+  context->Eip = (DWORD)handler;
+#ifdef WIN32_ES_HACK
+  context->SegEs = context->SegDs;
+#endif
+#endif
+  context->EFlags &= ~0x400;  /* clear direction flag */
+}
+
+void
+prepare_to_handle_windows_exception_on_foreign_stack(TCR *tcr,
+                                                     CONTEXT *context,
+                                                     void *handler,
+                                                     EXCEPTION_POINTERS *original_ep)
+{
+  LispObj foreign_rsp = 
+    (LispObj) (tcr->foreign_sp - 128) & ~15;
+  CONTEXT *new_context;
+  siginfo_t *new_info;
+  EXCEPTION_POINTERS *new_ep;
+
+  new_context = ((CONTEXT *)(foreign_rsp&~15))-1;
+  *new_context = *context;
+  foreign_rsp = (LispObj)new_context;
+  new_info = ((siginfo_t *)(foreign_rsp&~15))-1;
+  *new_info = *original_ep->ExceptionRecord;
+  foreign_rsp = (LispObj)new_info;
+  new_ep = ((EXCEPTION_POINTERS *)(foreign_rsp&~15))-1;
+  foreign_rsp = (LispObj)new_ep & ~15;
+  new_ep->ContextRecord = new_context;
+  new_ep->ExceptionRecord = new_info;
+  setup_exception_handler_call(context,foreign_rsp,handler,new_ep, tcr);
+}
+
+LONG CALLBACK
+windows_arbstack_exception_handler(EXCEPTION_POINTERS *exception_pointers)
+{
+  extern void ensure_safe_for_string_operations(void);
+  DWORD code = exception_pointers->ExceptionRecord->ExceptionCode;
+
+
+  
+  if ((code & 0x80000000L) == 0) {
+    return EXCEPTION_CONTINUE_SEARCH;
+  } else {
+    TCR *tcr = get_interrupt_tcr(false);
+    area *cs = tcr->cs_area;
+    BytePtr current_sp = (BytePtr) current_stack_pointer();
+    CONTEXT *context = exception_pointers->ContextRecord;
+    
+    ensure_safe_for_string_operations();
+
+    if ((current_sp >= cs->low) &&
+        (current_sp < cs->high)) {
+      debug_show_registers(context, exception_pointers->ExceptionRecord, 0);
+      FBug(context, "Exception on foreign stack\n");
+      return EXCEPTION_CONTINUE_EXECUTION;
+    }
+
+    prepare_to_handle_windows_exception_on_foreign_stack(tcr,
+                                                         context,
+                                                         windows_exception_handler,
+                                                         exception_pointers);
+    return EXCEPTION_CONTINUE_EXECUTION;
+  }
+}
+
+
+void
+install_pmcl_exception_handlers()
+{
+  AddVectoredExceptionHandler(1,windows_arbstack_exception_handler);
+}
+#else
+void
+install_pmcl_exception_handlers()
+{
+#ifndef DARWIN  
+  void *handler = (void *)
+#ifdef USE_SIGALTSTACK
+    altstack_signal_handler
+#else
+    arbstack_signal_handler;
+#endif
+  ;
+  install_signal_handler(SIGILL, handler);
+  
+  install_signal_handler(SIGBUS, handler);
+  install_signal_handler(SIGSEGV,handler);
+  install_signal_handler(SIGFPE, handler);
+#endif
+  
+  install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT,
+#ifdef USE_SIGALTSTACK
+			 altstack_interrupt_handler
+#else
+                         arbstack_interrupt_handler
+#endif
+);
+  signal(SIGPIPE, SIG_IGN);
+}
+#endif
+
+#ifndef WINDOWS
+#ifndef USE_SIGALTSTACK
+void
+arbstack_suspend_resume_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_interrupt_tcr(false);
+  if (tcr != NULL) {
+    area *vs = tcr->vs_area;
+    BytePtr current_sp = (BytePtr) current_stack_pointer();
+    
+    if ((current_sp >= vs->low) &&
+        (current_sp < vs->high)) {
+      return
+        handle_signal_on_foreign_stack(tcr,
+                                       suspend_resume_handler,
+                                       signum,
+                                       info,
+                                       context,
+                                       (LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                       ,gs_was_tcr
+#endif
+                                       );
+    } else {
+      /* If we're not on the value stack, we pretty much have to be on
+         the C stack.  Just run the handler. */
+#ifdef DARWIN_GS_HACK
+      if (gs_was_tcr) {
+        set_gs_address(tcr);
+      }
+#endif
+    }
+  }
+  suspend_resume_handler(signum, info, context);
+}
+
+
+#else /* altstack works */
+void
+altstack_suspend_resume_handler(int signum, siginfo_t *info, ExceptionInformation  *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR* tcr = get_tcr(true);
+  handle_signal_on_foreign_stack(tcr,
+                                 suspend_resume_handler,
+                                 signum,
+                                 info,
+                                 context,
+                                 (LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                 ,gs_was_tcr
+#endif
+                                 );
+}
+#endif
+#endif
+
+
+/* This should only be called when the tcr_area_lock is held */
+void
+empty_tcr_stacks(TCR *tcr)
+{
+  if (tcr) {
+    area *a;
+
+    tcr->valence = TCR_STATE_FOREIGN;
+    a = tcr->vs_area;
+    if (a) {
+      a->active = a->high;
+    }
+    a = tcr->ts_area;
+    if (a) {
+      a->active = a->high;
+    }
+    a = tcr->cs_area;
+    if (a) {
+      a->active = a->high;
+    }
+  }
+}
+
+#ifdef WINDOWS
+void
+thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
+{
+}
+#else
+void
+thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *xp)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_tcr(false);
+  sigset_t mask;
+
+  sigemptyset(&mask);
+
+  empty_tcr_stacks(tcr);
+
+  pthread_sigmask(SIG_SETMASK,&mask,NULL);
+  pthread_exit(NULL);
+}
+#endif
+
+#ifndef WINDOWS
+#ifndef USE_SIGALTSTACK
+void
+arbstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR *tcr = get_interrupt_tcr(false);
+  area *vs = tcr->vs_area;
+  BytePtr current_sp = (BytePtr) current_stack_pointer();
+
+  if ((current_sp >= vs->low) &&
+      (current_sp < vs->high)) {
+    handle_signal_on_foreign_stack(tcr,
+                                   thread_kill_handler,
+                                   signum,
+                                   info,
+                                   context,
+                                   (LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                   ,gs_was_tcr
+#endif
+                                   );
+  } else {
+    /* If we're not on the value stack, we pretty much have to be on
+       the C stack.  Just run the handler. */
+#ifdef DARWIN_GS_HACK
+    if (gs_was_tcr) {
+      set_gs_address(tcr);
+    }
+#endif
+    thread_kill_handler(signum, info, context);
+  }
+}
+
+
+#else
+void
+altstack_thread_kill_handler(int signum, siginfo_t *info, ExceptionInformation *context)
+{
+#ifdef DARWIN_GS_HACK
+  Boolean gs_was_tcr = ensure_gs_pthread();
+#endif
+  TCR* tcr = get_tcr(true);
+  handle_signal_on_foreign_stack(tcr,
+                                 thread_kill_handler,
+                                 signum,
+                                 info,
+                                 context,
+                                 (LispObj)__builtin_return_address(0)
+#ifdef DARWIN_GS_HACK
+                                 ,gs_was_tcr
+#endif
+                                 );
+}
+#endif
+#endif
+
+#ifdef USE_SIGALTSTACK
+#define SUSPEND_RESUME_HANDLER altstack_suspend_resume_handler
+#define THREAD_KILL_HANDLER altstack_thread_kill_handler
+#else
+#define SUSPEND_RESUME_HANDLER arbstack_suspend_resume_handler
+#define THREAD_KILL_HANDLER arbstack_thread_kill_handler
+#endif
+
+#ifdef WINDOWS
+void
+thread_signal_setup()
+{
+}
+#else
+void
+thread_signal_setup()
+{
+  thread_suspend_signal = SIG_SUSPEND_THREAD;
+  thread_kill_signal = SIG_KILL_THREAD;
+
+  install_signal_handler(thread_suspend_signal, (void *)SUSPEND_RESUME_HANDLER);
+  install_signal_handler(thread_kill_signal, (void *)THREAD_KILL_HANDLER);
+}
+#endif
+
+void
+enable_fp_exceptions()
+{
+}
+
+void
+exception_init()
+{
+  install_pmcl_exception_handlers();
+}
+
+void
+adjust_exception_pc(ExceptionInformation *xp, int delta)
+{
+  xpPC(xp) += delta;
+}
+
+/*
+  Lower (move toward 0) the "end" of the soft protected area associated
+  with a by a page, if we can.
+*/
+
+void
+
+adjust_soft_protection_limit(area *a)
+{
+  char *proposed_new_soft_limit = a->softlimit - 4096;
+  protected_area_ptr p = a->softprot;
+  
+  if (proposed_new_soft_limit >= (p->start+16384)) {
+    p->end = proposed_new_soft_limit;
+    p->protsize = p->end-p->start;
+    a->softlimit = proposed_new_soft_limit;
+  }
+  protect_area(p);
+}
+
+void
+restore_soft_stack_limit(unsigned restore_tsp)
+{
+  TCR *tcr = get_tcr(false);
+  area *a;
+ 
+  if (restore_tsp) {
+    a = tcr->ts_area;
+  } else {
+    a = tcr->vs_area;
+  }
+  adjust_soft_protection_limit(a);
+}
+
+
+#ifdef USE_SIGALTSTACK
+void
+setup_sigaltstack(area *a)
+{
+  stack_t stack;
+  stack.ss_sp = a->low;
+  a->low += SIGSTKSZ*8;
+  stack.ss_size = SIGSTKSZ*8;
+  stack.ss_flags = 0;
+  mmap(stack.ss_sp,stack.ss_size, PROT_READ|PROT_WRITE|PROT_EXEC,MAP_FIXED|MAP_ANON|MAP_PRIVATE,-1,0);
+  if (sigaltstack(&stack, NULL) != 0) {
+    perror("sigaltstack");
+    exit(-1);
+  }
+}
+#endif
+
+extern opcode egc_write_barrier_start, egc_write_barrier_end,
+  egc_set_hash_key_conditional, egc_set_hash_key_conditional_success_test,
+  egc_set_hash_key_conditional_retry,
+  egc_store_node_conditional_success_end, egc_store_node_conditional_retry,
+  egc_store_node_conditional_success_test,egc_store_node_conditional,
+  egc_set_hash_key, egc_gvset, egc_rplacd;
+
+/* We use (extremely) rigidly defined instruction sequences for consing,
+   mostly so that 'pc_luser_xp()' knows what to do if a thread is interrupted
+   while consing.
+
+   Note that we can usually identify which of these instructions is about
+   to be executed by a stopped thread without comparing all of the bytes
+   to those at the stopped program counter, but we generally need to
+   know the sizes of each of these instructions.
+*/
+
+#ifdef X8664
+opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
+#ifdef TCR_IN_GPR
+  {0x49,0x8b,0x9b,0xd8,0x00,0x00,0x00}
+#else
+  {0x65,0x48,0x8b,0x1c,0x25,0xd8,0x00,0x00,0x00}
+#endif
+;
+opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
+#ifdef TCR_IN_GPR
+  {0x49,0x3b,0x9b,0xe0,0x00,0x00,0x00}
+#else
+  {0x65,0x48,0x3b,0x1c,0x25,0xe0,0x00,0x00,0x00}
+#endif
+
+;
+opcode branch_around_alloc_trap_instruction[] =
+  {0x77,0x02};
+opcode alloc_trap_instruction[] =
+  {0xcd,0xc5};
+opcode clear_tcr_save_allocptr_tag_instruction[] =
+#ifdef TCR_IN_GPR
+  {0x41,0x80,0xa3,0xd8,0x00,0x00,0x00,0xf0}
+#else
+  {0x65,0x80,0x24,0x25,0xd8,0x00,0x00,0x00,0xf0}
+#endif
+;
+opcode set_allocptr_header_instruction[] =
+  {0x48,0x89,0x43,0xf3};
+
+
+alloc_instruction_id
+recognize_alloc_instruction(pc program_counter)
+{
+  switch(program_counter[0]) {
+  case 0xcd: return ID_alloc_trap_instruction;
+  /* 0x7f is jg, which we used to use here instead of ja */
+  case 0x7f:
+  case 0x77: return ID_branch_around_alloc_trap_instruction;
+  case 0x48: return ID_set_allocptr_header_instruction;
+#ifdef TCR_IN_GPR
+  case 0x41: return ID_clear_tcr_save_allocptr_tag_instruction;
+  case 0x49:
+    switch(program_counter[1]) {
+    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
+    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
+    }
+#else
+  case 0x65: 
+    switch(program_counter[1]) {
+    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
+    case 0x48:
+      switch(program_counter[2]) {
+      case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
+      case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
+      }
+    }
+#endif
+  default: break;
+  }
+  return ID_unrecognized_alloc_instruction;
+}
+#endif
+#ifdef X8632
+/* The lisp assembler might use both a modrm byte and a sib byte to
+   encode a memory operand that contains a displacement but no
+   base or index.  Using the sib byte is necessary for 64-bit code,
+   since the sib-less form is used to indicate %rip-relative addressing
+   on x8664.  On x8632, it's not necessary, slightly suboptimal, and
+   doesn't match what we expect; until that's fixed, we may need to
+   account for this extra byte when adjusting the PC */
+#define LISP_ASSEMBLER_EXTRA_SIB_BYTE
+#ifdef WIN32_ES_HACK
+/* Win32 keeps the TCR in %es */
+#define TCR_SEG_PREFIX 0x26     /* %es: */
+#else
+/* Other platfroms use %fs */
+#define TCR_SEG_PREFIX 0x64     /* %fs: */
+#endif
+opcode load_allocptr_reg_from_tcr_save_allocptr_instruction[] =
+  {TCR_SEG_PREFIX,0x8b,0x0d,0x84,0x00,0x00,0x00};  /* may have extra SIB byte */
+opcode compare_allocptr_reg_to_tcr_save_allocbase_instruction[] =
+  {TCR_SEG_PREFIX,0x3b,0x0d,0x88,0x00,0x00,0x00};  /* may have extra SIB byte */
+opcode branch_around_alloc_trap_instruction[] =
+  {0x77,0x02};                  /* no SIB byte issue */
+opcode alloc_trap_instruction[] =
+  {0xcd,0xc5};                  /* no SIB byte issue */
+opcode clear_tcr_save_allocptr_tag_instruction[] =
+  {TCR_SEG_PREFIX,0x80,0x25,0x84,0x00,0x00,0x00,0xf8}; /* maybe SIB byte */
+opcode set_allocptr_header_instruction[] =
+  {0x0f,0x7e,0x41,0xfa};        /* no SIB byte issue */
+
+alloc_instruction_id
+recognize_alloc_instruction(pc program_counter)
+{
+  switch(program_counter[0]) {
+  case 0xcd: return ID_alloc_trap_instruction;
+  /* 0x7f is jg, which we used to use here instead of ja */
+  case 0x7f:
+  case 0x77: return ID_branch_around_alloc_trap_instruction;
+  case 0x0f: return ID_set_allocptr_header_instruction;
+  case TCR_SEG_PREFIX: 
+    switch(program_counter[1]) {
+    case 0x80: return ID_clear_tcr_save_allocptr_tag_instruction;
+    case 0x3b: return ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction;
+    case 0x8b: return ID_load_allocptr_reg_from_tcr_save_allocptr_instruction;
+    }
+  }
+  return ID_unrecognized_alloc_instruction;
+}
+#endif      
+
+void
+pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *interrupt_displacement)
+{
+  pc program_counter = (pc)xpPC(xp);
+  int allocptr_tag = fulltag_of((LispObj)(tcr->save_allocptr));
+
+  if (allocptr_tag != 0) {
+    alloc_instruction_id state = recognize_alloc_instruction(program_counter);
+    signed_natural 
+      disp = (allocptr_tag == fulltag_cons) ?
+      sizeof(cons) - fulltag_cons :
+#ifdef X8664
+      xpGPR(xp,Iimm1)
+#else
+      xpGPR(xp,Iimm0)
+#endif
+      ;
+    LispObj new_vector;
+
+    if ((state == ID_unrecognized_alloc_instruction) ||
+        ((state == ID_set_allocptr_header_instruction) &&
+         (allocptr_tag != fulltag_misc))) {
+      Bug(xp, "Can't determine state of thread 0x" LISP ", interrupted during memory allocation", tcr);
+    }
+    switch(state) {
+    case ID_set_allocptr_header_instruction:
+      /* We were consing a vector and we won.  Set the header of the
+         new vector (in the allocptr register) to the header in %rax
+         (%mm0 on ia32) and skip over this instruction, then fall into
+         the next case. */
+      new_vector = xpGPR(xp,Iallocptr);
+      deref(new_vector,0) = 
+#ifdef X8664
+        xpGPR(xp,Iimm0)
+#else
+        xpMMXreg(xp,Imm0)
+#endif
+        ;
+      
+      xpPC(xp) += sizeof(set_allocptr_header_instruction);
+
+      /* Fall thru */
+    case ID_clear_tcr_save_allocptr_tag_instruction:
+      tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
+#ifdef LISP_ASSEMBLER_EXTRA_SIB_BYTE
+      if (((pc)(xpPC(xp)))[2] == 0x24) {
+        xpPC(xp) += 1;
+      }
+#endif
+      xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
+
+      break;
+    case ID_alloc_trap_instruction:
+      /* If we're looking at another thread, we're pretty much committed to
+         taking the trap.  We don't want the allocptr register to be pointing
+         into the heap, so make it point to (- VOID_ALLOCPTR disp), where 'disp'
+         was determined above. 
+      */
+      if (interrupt_displacement == NULL) {
+        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR - disp;
+        tcr->save_allocptr = (void *)(VOID_ALLOCPTR - disp);
+      } else {
+        /* Back out, and tell the caller how to resume the allocation attempt */
+        *interrupt_displacement = disp;
+        xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
+        tcr->save_allocptr += disp;
+#ifdef LISP_ASSEMBLER_EXTRA_SIB_BYTE
+        /* This assumes that TCR_SEG_PREFIX can't appear 
+           anywhere but at the beginning of one of these
+           magic allocation-sequence instructions. */
+        xpPC(xp) -= (sizeof(branch_around_alloc_trap_instruction)+
+                     sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction));
+        if (*((pc)(xpPC(xp))) == TCR_SEG_PREFIX) {
+          xpPC(xp) -= sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction);
+        } else {
+          xpPC(xp) -= (sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction) + 2);
+        }
+        
+#else
+        xpPC(xp) -= (sizeof(branch_around_alloc_trap_instruction)+
+                     sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
+                     sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
+#endif
+      }
+      break;
+    case ID_branch_around_alloc_trap_instruction:
+      /* If we'd take the branch - which is a "ja" - around the alloc trap,
+         we might as well finish the allocation.  Otherwise, back out of the
+         attempt. */
+      {
+        int flags = (int)eflags_register(xp);
+        
+        if ((!(flags & (1 << X86_ZERO_FLAG_BIT))) &&
+	    (!(flags & (1 << X86_CARRY_FLAG_BIT)))) {
+          /* The branch (ja) would have been taken.  Emulate taking it. */
+          xpPC(xp) += (sizeof(branch_around_alloc_trap_instruction)+
+                       sizeof(alloc_trap_instruction));
+          if (allocptr_tag == fulltag_misc) {
+            /* Slap the header on the new uvector */
+            new_vector = xpGPR(xp,Iallocptr);
+            deref(new_vector,0) = xpGPR(xp,Iimm0);
+            xpPC(xp) += sizeof(set_allocptr_header_instruction);
+          }
+          tcr->save_allocptr = (void *)(((LispObj)tcr->save_allocptr) & ~fulltagmask);
+#ifdef LISP_ASSEMBLER_EXTRA_SIB_BYTE
+          if (((pc)xpPC(xp))[2] == 0x24) {
+            xpPC(xp) += 1;
+          }
+#endif
+          xpPC(xp) += sizeof(clear_tcr_save_allocptr_tag_instruction);
+        } else {
+          /* Back up */
+          xpPC(xp) -= (sizeof(compare_allocptr_reg_to_tcr_save_allocbase_instruction) +
+                       sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction));
+#ifdef LISP_ASSEMBLER_EXTRA_SIB_BYTE
+          if (*((pc)(xpPC(xp))) != TCR_SEG_PREFIX) {
+            /* skipped two instructions with extra SIB byte */
+            xpPC(xp) -= 2;
+          }
+#endif
+          xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
+          if (interrupt_displacement) {
+            *interrupt_displacement = disp;
+            tcr->save_allocptr += disp;
+          } else {
+            tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
+          }
+        }
+      }
+      break;
+    case ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction:
+      xpGPR(xp,Iallocptr) = VOID_ALLOCPTR;
+      xpPC(xp) -= sizeof(load_allocptr_reg_from_tcr_save_allocptr_instruction);
+#ifdef LISP_ASSEMBLER_EXTRA_SIB_BYTE
+      if (*((pc)xpPC(xp)) != TCR_SEG_PREFIX) {
+        xpPC(xp) -= 1;
+      }
+#endif
+      /* Fall through */
+    case ID_load_allocptr_reg_from_tcr_save_allocptr_instruction:
+      if (interrupt_displacement) {
+        tcr->save_allocptr += disp;
+        *interrupt_displacement = disp;
+      } else {
+        tcr->save_allocptr = (void *)(VOID_ALLOCPTR-disp);
+      }
+      break;
+    default: 
+      break;
+    }
+    return;
+  }
+  if ((program_counter >= &egc_write_barrier_start) &&
+      (program_counter < &egc_write_barrier_end)) {
+    LispObj *ea = 0, val, root = 0;
+    bitvector refbits = (bitvector)(lisp_global(REFBITS));
+    Boolean need_store = true, need_check_memo = true, need_memoize_root = false;
+
+    if (program_counter >= &egc_set_hash_key_conditional) {
+      if (program_counter <= &egc_set_hash_key_conditional_retry) {
+        return;
+      }
+      if ((program_counter < &egc_set_hash_key_conditional_success_test) ||
+          ((program_counter == &egc_set_hash_key_conditional_success_test) &&
+           !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT)))) {
+        /* Back up the PC, try again */
+        xpPC(xp) = (LispObj) &egc_set_hash_key_conditional_retry;
+        return;
+      }
+      /* The conditional store succeeded.  Set the refbit, return to ra0 */
+      val = xpGPR(xp,Iarg_z);
+#ifdef X8664
+      root = xpGPR(xp,Iarg_x);
+      ea = (LispObj*)(root + (unbox_fixnum((signed_natural) xpGPR(xp,Itemp0))));
+#else
+      root = xpGPR(xp,Itemp1);
+      ea = (LispObj *)(root + misc_data_offset + xpGPR(xp,Itemp0));
+#endif
+      need_memoize_root = true;
+      need_store = false;
+      xpGPR(xp,Iarg_z) = t_value;
+    } else if (program_counter >= &egc_store_node_conditional) {
+      if (program_counter <= &egc_store_node_conditional_retry) {
+        return;
+      }
+      if ((program_counter < &egc_store_node_conditional_success_test) ||
+          ((program_counter == &egc_store_node_conditional_success_test) &&
+           !(eflags_register(xp) & (1 << X86_ZERO_FLAG_BIT)))) {
+        /* Back up the PC, try again */
+        xpPC(xp) = (LispObj) &egc_store_node_conditional_retry;
+        return;
+      }
+      if (program_counter >= &egc_store_node_conditional_success_end) {
+        return;
+      }
+
+      /* The conditional store succeeded.  Set the refbit, return to ra0 */
+      val = xpGPR(xp,Iarg_z);
+#ifdef X8664
+      ea = (LispObj*)(xpGPR(xp,Iarg_x) + (unbox_fixnum((signed_natural)
+                                                       xpGPR(xp,Itemp0))));
+#else
+      ea = (LispObj *)(misc_data_offset + xpGPR(xp,Itemp1) + xpGPR(xp,Itemp0));
+#endif
+      xpGPR(xp,Iarg_z) = t_value;
+      need_store = false;
+    } else if (program_counter >= &egc_set_hash_key) {
+#ifdef X8664
+      root = xpGPR(xp,Iarg_x);
+#else
+      root = xpGPR(xp,Itemp0);
+#endif
+      ea = (LispObj *) (root+xpGPR(xp,Iarg_y)+misc_data_offset);
+      val = xpGPR(xp,Iarg_z);
+      need_memoize_root = true;
+    } else if (program_counter >= &egc_gvset) {
+#ifdef X8664
+      ea = (LispObj *) (xpGPR(xp,Iarg_x)+xpGPR(xp,Iarg_y)+misc_data_offset);
+#else
+      ea = (LispObj *) (xpGPR(xp,Itemp0)+xpGPR(xp,Iarg_y)+misc_data_offset);
+#endif
+      val = xpGPR(xp,Iarg_z);
+    } else if (program_counter >= &egc_rplacd) {
+      ea = (LispObj *) untag(xpGPR(xp,Iarg_y));
+      val = xpGPR(xp,Iarg_z);
+    } else {                      /* egc_rplaca */
+      ea =  ((LispObj *) untag(xpGPR(xp,Iarg_y)))+1;
+      val = xpGPR(xp,Iarg_z);
+    }
+    if (need_store) {
+      *ea = val;
+    }
+    if (need_check_memo) {
+      natural  bitnumber = area_dnode(ea, lisp_global(REF_BASE));
+      if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) &&
+          ((LispObj)ea < val)) {
+        atomic_set_bit(refbits, bitnumber);
+        if (need_memoize_root) {
+          bitnumber = area_dnode(root, lisp_global(REF_BASE));
+          atomic_set_bit(refbits, bitnumber);
+        }
+      }
+    }
+    {
+      /* These subprimitives are called via CALL/RET; need
+         to pop the return address off the stack and set
+         the PC there. */
+      LispObj *sp = (LispObj *)xpGPR(xp,Isp), ra = *sp++;
+      xpPC(xp) = ra;
+      xpGPR(xp,Isp)=(LispObj)sp;
+    }
+    return;
+  }
+}
+
+
+void
+normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr)
+{
+  void *cur_allocptr = (void *)(tcr->save_allocptr);
+  LispObj lisprsp;
+  area *a;
+
+  if (xp) {
+    if (is_other_tcr) {
+      pc_luser_xp(xp, tcr, NULL);
+    }
+    a = tcr->vs_area;
+    lisprsp = xpGPR(xp, Isp);
+    if (((BytePtr)lisprsp >= a->low) &&
+	((BytePtr)lisprsp < a->high)) {
+      a->active = (BytePtr)lisprsp;
+    } else {
+      a->active = (BytePtr) tcr->save_vsp;
+    }
+    a = tcr->ts_area;
+    a->active = (BytePtr) tcr->save_tsp;
+  } else {
+    /* In ff-call; get area active pointers from tcr */
+    tcr->vs_area->active = (BytePtr) tcr->save_vsp;
+    tcr->ts_area->active = (BytePtr) tcr->save_tsp;
+  }
+  if (cur_allocptr) {
+    update_bytes_allocated(tcr, cur_allocptr);
+  }
+  tcr->save_allocbase = (void *)VOID_ALLOCPTR;
+  if (fulltag_of((LispObj)(tcr->save_allocptr)) == 0) {
+    tcr->save_allocptr = (void *)VOID_ALLOCPTR;
+  }
+}
+
+
+/* Suspend and "normalize" other tcrs, then call a gc-like function
+   in that context.  Resume the other tcrs, then return what the
+   function returned */
+
+TCR *gc_tcr = NULL;
+
+
+signed_natural
+gc_like_from_xp(ExceptionInformation *xp, 
+                signed_natural(*fun)(TCR *, signed_natural), 
+                signed_natural param)
+{
+  TCR *tcr = get_tcr(false), *other_tcr;
+  int result;
+  signed_natural inhibit;
+
+  suspend_other_threads(true);
+  inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT));
+  if (inhibit != 0) {
+    if (inhibit > 0) {
+      lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit);
+    }
+    resume_other_threads(true);
+    gc_deferred++;
+    return 0;
+  }
+  gc_deferred = 0;
+
+  gc_tcr = tcr;
+
+  /* This is generally necessary if the current thread invoked the GC
+     via an alloc trap, and harmless if the GC was invoked via a GC
+     trap.  (It's necessary in the first case because the "allocptr"
+     register - %rbx - may be pointing into the middle of something
+     below tcr->save_allocbase, and we wouldn't want the GC to see
+     that bogus pointer.) */
+  xpGPR(xp, Iallocptr) = VOID_ALLOCPTR; 
+
+  normalize_tcr(xp, tcr, false);
+
+
+  for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) {
+    if (other_tcr->pending_exception_context) {
+      other_tcr->gc_context = other_tcr->pending_exception_context;
+    } else if (other_tcr->valence == TCR_STATE_LISP) {
+      other_tcr->gc_context = other_tcr->suspend_context;
+    } else {
+      /* no pending exception, didn't suspend in lisp state:
+	 must have executed a synchronous ff-call. 
+      */
+      other_tcr->gc_context = NULL;
+    }
+    normalize_tcr(other_tcr->gc_context, other_tcr, true);
+  }
+    
+
+
+  result = fun(tcr, param);
+
+  other_tcr = tcr;
+  do {
+    other_tcr->gc_context = NULL;
+    other_tcr = other_tcr->next;
+  } while (other_tcr != tcr);
+
+  gc_tcr = NULL;
+
+  resume_other_threads(true);
+
+  return result;
+
+}
+
+signed_natural
+purify_from_xp(ExceptionInformation *xp, signed_natural param)
+{
+  return gc_like_from_xp(xp, purify, param);
+}
+
+signed_natural
+impurify_from_xp(ExceptionInformation *xp, signed_natural param)
+{
+  return gc_like_from_xp(xp, impurify, param);
+}
+
+/* Returns #bytes freed by invoking GC */
+
+signed_natural
+gc_from_tcr(TCR *tcr, signed_natural param)
+{
+  area *a;
+  BytePtr oldfree, newfree;
+  BytePtr oldend, newend;
+
+#if 0
+  fprintf(dbgout, "Start GC  in 0x" LISP "\n", tcr);
+#endif
+  a = active_dynamic_area;
+  oldend = a->high;
+  oldfree = a->active;
+  gc(tcr, param);
+  newfree = a->active;
+  newend = a->high;
+#if 0
+  fprintf(dbgout, "End GC  in 0x" LISP "\n", tcr);
+#endif
+  return ((oldfree-newfree)+(newend-oldend));
+}
+
+signed_natural
+gc_from_xp(ExceptionInformation *xp, signed_natural param)
+{
+  signed_natural status = gc_like_from_xp(xp, gc_from_tcr, param);
+
+  freeGCptrs();
+  return status;
+}
+
+#ifdef DARWIN
+
+#define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p))
+#define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr)))
+
+extern void pseudo_sigreturn(void);
+
+
+
+#define LISP_EXCEPTIONS_HANDLED_MASK \
+ (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC)
+
+/* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */
+#define NUM_LISP_EXCEPTIONS_HANDLED 4 
+
+typedef struct {
+  int foreign_exception_port_count;
+  exception_mask_t         masks[NUM_LISP_EXCEPTIONS_HANDLED];
+  mach_port_t              ports[NUM_LISP_EXCEPTIONS_HANDLED];
+  exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED];
+  thread_state_flavor_t  flavors[NUM_LISP_EXCEPTIONS_HANDLED];
+} MACH_foreign_exception_state;
+
+
+
+
+/*
+  Mach's exception mechanism works a little better than its signal
+  mechanism (and, not incidentally, it gets along with GDB a lot
+  better.
+
+  Initially, we install an exception handler to handle each native
+  thread's exceptions.  This process involves creating a distinguished
+  thread which listens for kernel exception messages on a set of
+  0 or more thread exception ports.  As threads are created, they're
+  added to that port set; a thread's exception port is destroyed
+  (and therefore removed from the port set) when the thread exits.
+
+  A few exceptions can be handled directly in the handler thread;
+  others require that we resume the user thread (and that the
+  exception thread resumes listening for exceptions.)  The user
+  thread might eventually want to return to the original context
+  (possibly modified somewhat.)
+
+  As it turns out, the simplest way to force the faulting user
+  thread to handle its own exceptions is to do pretty much what
+  signal() does: the exception handlng thread sets up a sigcontext
+  on the user thread's stack and forces the user thread to resume
+  execution as if a signal handler had been called with that
+  context as an argument.  We can use a distinguished UUO at a
+  distinguished address to do something like sigreturn(); that'll
+  have the effect of resuming the user thread's execution in
+  the (pseudo-) signal context.
+
+  Since:
+    a) we have miles of code in C and in Lisp that knows how to
+    deal with Linux sigcontexts
+    b) Linux sigcontexts contain a little more useful information
+    (the DAR, DSISR, etc.) than their Darwin counterparts
+    c) we have to create a sigcontext ourselves when calling out
+    to the user thread: we aren't really generating a signal, just
+    leveraging existing signal-handling code.
+
+  we create a Linux sigcontext struct.
+
+  Simple ?  Hopefully from the outside it is ...
+
+  We want the process of passing a thread's own context to it to
+  appear to be atomic: in particular, we don't want the GC to suspend
+  a thread that's had an exception but has not yet had its user-level
+  exception handler called, and we don't want the thread's exception
+  context to be modified by a GC while the Mach handler thread is
+  copying it around.  On Linux (and on Jaguar), we avoid this issue
+  because (a) the kernel sets up the user-level signal handler and
+  (b) the signal handler blocks signals (including the signal used
+  by the GC to suspend threads) until tcr->xframe is set up.
+
+  The GC and the Mach server thread therefore contend for the lock
+  "mach_exception_lock".  The Mach server thread holds the lock
+  when copying exception information between the kernel and the
+  user thread; the GC holds this lock during most of its execution
+  (delaying exception processing until it can be done without
+  GC interference.)
+
+*/
+
+#ifdef PPC64
+#define	C_REDZONE_LEN		320
+#define	C_STK_ALIGN             32
+#else
+#define	C_REDZONE_LEN		224
+#define	C_STK_ALIGN		16
+#endif
+#define C_PARAMSAVE_LEN		64
+#define	C_LINKAGE_LEN		48
+
+#define TRUNC_DOWN(a,b,c)  (((((natural)a)-(b))/(c)) * (c))
+
+void
+fatal_mach_error(char *format, ...);
+
+#define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);}
+
+
+void
+restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext)
+{
+  kern_return_t kret;
+#if WORD_SIZE == 64
+  MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext);
+#else
+  mcontext_t mc = UC_MCONTEXT(pseudosigcontext);
+#endif
+
+  /* Set the thread's FP state from the pseudosigcontext */
+#if WORD_SIZE == 64
+  kret = thread_set_state(thread,
+                          x86_FLOAT_STATE64,
+                          (thread_state_t)&(mc->__fs),
+                          x86_FLOAT_STATE64_COUNT);
+#else
+  kret = thread_set_state(thread,
+                          x86_FLOAT_STATE32,
+                          (thread_state_t)&(mc->__fs),
+                          x86_FLOAT_STATE32_COUNT);
+#endif
+  MACH_CHECK_ERROR("setting thread FP state", kret);
+
+  /* The thread'll be as good as new ... */
+#if WORD_SIZE == 64
+  kret = thread_set_state(thread,
+                          x86_THREAD_STATE64,
+                          (thread_state_t)&(mc->__ss),
+                          x86_THREAD_STATE64_COUNT);
+#else
+  kret = thread_set_state(thread, 
+                          x86_THREAD_STATE32,
+                          (thread_state_t)&(mc->__ss),
+                          x86_THREAD_STATE32_COUNT);
+#endif
+  MACH_CHECK_ERROR("setting thread state", kret);
+}  
+
+/* This code runs in the exception handling thread, in response
+   to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g.,
+   in response to a call to pseudo_sigreturn() from the specified
+   user thread.
+   Find that context (the user thread's R3 points to it), then
+   use that context to set the user thread's state.  When this
+   function's caller returns, the Mach kernel will resume the
+   user thread.
+*/
+
+kern_return_t
+do_pseudo_sigreturn(mach_port_t thread, TCR *tcr)
+{
+  ExceptionInformation *xp;
+
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "doing pseudo_sigreturn for 0x%x\n",tcr);
+#endif
+  xp = tcr->pending_exception_context;
+  if (xp) {
+    tcr->pending_exception_context = NULL;
+    tcr->valence = TCR_STATE_LISP;
+    restore_mach_thread_state(thread, xp);
+    raise_pending_interrupt(tcr);
+  } else {
+    Bug(NULL, "no xp here!\n");
+  }
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "did pseudo_sigreturn for 0x%x\n",tcr);
+#endif
+  return KERN_SUCCESS;
+}  
+
+ExceptionInformation *
+create_thread_context_frame(mach_port_t thread, 
+			    natural *new_stack_top,
+                            siginfo_t **info_ptr,
+                            TCR *tcr,
+#ifdef X8664
+                            x86_thread_state64_t *ts
+#else
+                            x86_thread_state32_t *ts
+#endif
+                            )
+{
+  mach_msg_type_number_t thread_state_count;
+  ExceptionInformation *pseudosigcontext;
+#ifdef X8664
+  MCONTEXT_T mc;
+#else
+  mcontext_t mc;
+#endif
+  natural stackp;
+
+#ifdef X8664  
+  stackp = (LispObj) find_foreign_rsp(ts->__rsp,tcr->cs_area,tcr);
+  stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN);
+#else
+  stackp = (LispObj) find_foreign_rsp(ts->__esp, tcr->cs_area, tcr);
+#endif
+  stackp = TRUNC_DOWN(stackp, sizeof(siginfo_t), C_STK_ALIGN);
+  if (info_ptr) {
+    *info_ptr = (siginfo_t *)stackp;
+  }
+  stackp = TRUNC_DOWN(stackp,sizeof(*pseudosigcontext), C_STK_ALIGN);
+  pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp);
+
+  stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN);
+  mc = (MCONTEXT_T) ptr_from_lispobj(stackp);
+  
+  memmove(&(mc->__ss),ts,sizeof(*ts));
+
+#ifdef X8664
+  thread_state_count = x86_FLOAT_STATE64_COUNT;
+  thread_get_state(thread,
+		   x86_FLOAT_STATE64,
+		   (thread_state_t)&(mc->__fs),
+		   &thread_state_count);
+
+  thread_state_count = x86_EXCEPTION_STATE64_COUNT;
+  thread_get_state(thread,
+                   x86_EXCEPTION_STATE64,
+		   (thread_state_t)&(mc->__es),
+		   &thread_state_count);
+#else
+  thread_state_count = x86_FLOAT_STATE32_COUNT;
+  thread_get_state(thread,
+		   x86_FLOAT_STATE32,
+		   (thread_state_t)&(mc->__fs),
+		   &thread_state_count);
+
+  thread_state_count = x86_EXCEPTION_STATE32_COUNT;
+  thread_get_state(thread,
+                   x86_EXCEPTION_STATE32,
+		   (thread_state_t)&(mc->__es),
+		   &thread_state_count);
+#endif
+
+
+  UC_MCONTEXT(pseudosigcontext) = mc;
+  if (new_stack_top) {
+    *new_stack_top = stackp;
+  }
+  return pseudosigcontext;
+}
+
+/*
+  This code sets up the user thread so that it executes a "pseudo-signal
+  handler" function when it resumes.  Create a fake ucontext struct
+  on the thread's stack and pass it as an argument to the pseudo-signal
+  handler.
+
+  Things are set up so that the handler "returns to" pseudo_sigreturn(),
+  which will restore the thread's context.
+
+  If the handler invokes code that throws (or otherwise never sigreturn()'s
+  to the context), that's fine.
+
+  Actually, check that: throw (and variants) may need to be careful and
+  pop the tcr's xframe list until it's younger than any frame being
+  entered.
+*/
+
+int
+setup_signal_frame(mach_port_t thread,
+		   void *handler_address,
+		   int signum,
+                   int code,
+		   TCR *tcr,
+#ifdef X8664
+                   x86_thread_state64_t *ts
+#else
+                   x86_thread_state32_t *ts
+#endif
+                   )
+{
+#ifdef X8664
+  x86_thread_state64_t new_ts;
+#else
+  x86_thread_state32_t new_ts;
+#endif
+  ExceptionInformation *pseudosigcontext;
+  int  old_valence = tcr->valence;
+  natural stackp, *stackpp;
+  siginfo_t *info;
+
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout,"Setting up exception handling for 0x%x\n", tcr);
+#endif
+  pseudosigcontext = create_thread_context_frame(thread, &stackp, &info, tcr,  ts);
+  bzero(info, sizeof(*info));
+  info->si_code = code;
+  info->si_addr = (void *)(UC_MCONTEXT(pseudosigcontext)->__es.__faultvaddr);
+  info->si_signo = signum;
+  pseudosigcontext->uc_onstack = 0;
+  pseudosigcontext->uc_sigmask = (sigset_t) 0;
+  pseudosigcontext->uc_stack.ss_sp = 0;
+  pseudosigcontext->uc_stack.ss_size = 0;
+  pseudosigcontext->uc_stack.ss_flags = 0;
+  pseudosigcontext->uc_link = NULL;
+  pseudosigcontext->uc_mcsize = sizeof(*UC_MCONTEXT(pseudosigcontext));
+  tcr->pending_exception_context = pseudosigcontext;
+  tcr->valence = TCR_STATE_EXCEPTION_WAIT;
+  
+
+  /* 
+     It seems like we've created a  sigcontext on the thread's
+     stack.  Set things up so that we call the handler (with appropriate
+     args) when the thread's resumed.
+  */
+
+#ifdef X8664
+  new_ts.__rip = (natural) handler_address;
+  stackpp = (natural *)stackp;
+  *--stackpp = (natural)pseudo_sigreturn;
+  stackp = (natural)stackpp;
+  new_ts.__rdi = signum;
+  new_ts.__rsi = (natural)info;
+  new_ts.__rdx = (natural)pseudosigcontext;
+  new_ts.__rcx = (natural)tcr;
+  new_ts.__r8 = (natural)old_valence;
+  new_ts.__rsp = stackp;
+  new_ts.__rflags = ts->__rflags;
+#else
+#define USER_CS 0x17
+#define USER_DS 0x1f
+  bzero(&new_ts, sizeof(new_ts));
+  new_ts.__cs = ts->__cs;
+  new_ts.__ss = ts->__ss;
+  new_ts.__ds = ts->__ds;
+  new_ts.__es = ts->__es;
+  new_ts.__fs = ts->__fs;
+  new_ts.__gs = ts->__gs;
+
+  new_ts.__eip = (natural)handler_address;
+  stackpp = (natural *)stackp;
+  *--stackpp = 0;		/* alignment */
+  *--stackpp = 0;
+  *--stackpp = 0;
+  *--stackpp = (natural)old_valence;
+  *--stackpp = (natural)tcr;
+  *--stackpp = (natural)pseudosigcontext;
+  *--stackpp = (natural)info;
+  *--stackpp = (natural)signum;
+  *--stackpp = (natural)pseudo_sigreturn;
+  stackp = (natural)stackpp;
+  new_ts.__esp = stackp;
+  new_ts.__eflags = ts->__eflags;
+#endif
+
+#ifdef X8664
+  thread_set_state(thread,
+                   x86_THREAD_STATE64,
+                   (thread_state_t)&new_ts,
+                   x86_THREAD_STATE64_COUNT);
+#else
+  thread_set_state(thread, 
+		   x86_THREAD_STATE32,
+		   (thread_state_t)&new_ts,
+		   x86_THREAD_STATE32_COUNT);
+#endif
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context);
+#endif
+  return 0;
+}
+
+
+
+
+
+
+/*
+  This function runs in the exception handling thread.  It's
+  called (by this precise name) from the library function "exc_server()"
+  when the thread's exception ports are set up.  (exc_server() is called
+  via mach_msg_server(), which is a function that waits for and dispatches
+  on exception messages from the Mach kernel.)
+
+  This checks to see if the exception was caused by a pseudo_sigreturn()
+  UUO; if so, it arranges for the thread to have its state restored
+  from the specified context.
+
+  Otherwise, it tries to map the exception to a signal number and
+  arranges that the thread run a "pseudo signal handler" to handle
+  the exception.
+
+  Some exceptions could and should be handled here directly.
+*/
+
+/* We need the thread's state earlier on x86_64 than we did on PPC;
+   the PC won't fit in code_vector[1].  We shouldn't try to get it
+   lazily (via catch_exception_raise_state()); until we own the
+   exception lock, we shouldn't have it in userspace (since a GCing
+   thread wouldn't know that we had our hands on it.)
+*/
+
+#ifdef X8664
+#define ts_pc(t) t.__rip
+#else
+#define ts_pc(t) t.__eip
+#endif
+
+
+#define DARWIN_EXCEPTION_HANDLER signal_handler
+
+
+kern_return_t
+catch_exception_raise(mach_port_t exception_port,
+		      mach_port_t thread,
+		      mach_port_t task, 
+		      exception_type_t exception,
+		      exception_data_t code_vector,
+		      mach_msg_type_number_t code_count)
+{
+  int signum = 0, code = *code_vector;
+  TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port);
+  kern_return_t kret, call_kret;
+#ifdef X8664
+  x86_thread_state64_t ts;
+#else
+  x86_thread_state32_t ts;
+#endif
+  mach_msg_type_number_t thread_state_count;
+
+
+
+
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "obtaining Mach exception lock in exception thread\n");
+#endif
+
+
+  if (1) {
+#ifdef X8664
+    do {
+      thread_state_count = x86_THREAD_STATE64_COUNT;
+      call_kret = thread_get_state(thread,
+                                   x86_THREAD_STATE64,
+                                   (thread_state_t)&ts,
+                                   &thread_state_count);
+    } while (call_kret == KERN_ABORTED);
+  MACH_CHECK_ERROR("getting thread state",call_kret);
+#else
+    thread_state_count = x86_THREAD_STATE32_COUNT;
+    call_kret = thread_get_state(thread,
+				 x86_THREAD_STATE32,
+				 (thread_state_t)&ts,
+				 &thread_state_count);
+    MACH_CHECK_ERROR("getting thread state",call_kret);
+#endif
+    if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) {
+      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION);
+    } 
+    if ((code == EXC_I386_GPFLT) &&
+        ((natural)(ts_pc(ts)) == (natural)pseudo_sigreturn)) {
+      kret = do_pseudo_sigreturn(thread, tcr);
+#if 0
+      fprintf(dbgout, "Exception return in 0x%x\n",tcr);
+#endif
+    } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) {
+      CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION);
+      kret = 17;
+    } else {
+      switch (exception) {
+      case EXC_BAD_ACCESS:
+        if (code == EXC_I386_GPFLT) {
+          signum = SIGSEGV;
+        } else {
+          signum = SIGBUS;
+        }
+        break;
+        
+      case EXC_BAD_INSTRUCTION:
+        if (code == EXC_I386_GPFLT) {
+          signum = SIGSEGV;
+        } else {
+          signum = SIGILL;
+        }
+        break;
+          
+      case EXC_SOFTWARE:
+        signum = SIGILL;
+        break;
+        
+      case EXC_ARITHMETIC:
+        signum = SIGFPE;
+        break;
+        
+      default:
+        break;
+      }
+      if (signum) {
+        kret = setup_signal_frame(thread,
+                                  (void *)DARWIN_EXCEPTION_HANDLER,
+                                  signum,
+                                  code,
+                                  tcr, 
+                                  &ts);
+#if 0
+        fprintf(dbgout, "Setup pseudosignal handling in 0x%x\n",tcr);
+#endif
+        
+      } else {
+        kret = 17;
+      }
+    }
+  }
+  return kret;
+}
+
+
+
+
+static mach_port_t mach_exception_thread = (mach_port_t)0;
+
+
+/*
+  The initial function for an exception-handling thread.
+*/
+
+void *
+exception_handler_proc(void *arg)
+{
+  extern boolean_t exc_server();
+  mach_port_t p = TCR_TO_EXCEPTION_PORT(arg);
+
+  mach_exception_thread = pthread_mach_thread_np(pthread_self());
+  mach_msg_server(exc_server, 256, p, 0);
+  /* Should never return. */
+  abort();
+}
+
+
+
+void
+mach_exception_thread_shutdown()
+{
+  kern_return_t kret;
+
+  fprintf(dbgout, "terminating Mach exception thread, 'cause exit can't\n");
+  kret = thread_terminate(mach_exception_thread);
+  if (kret != KERN_SUCCESS) {
+    fprintf(dbgout, "Couldn't terminate exception thread, kret = %d\n",kret);
+  }
+}
+
+
+mach_port_t
+mach_exception_port_set()
+{
+  static mach_port_t __exception_port_set = MACH_PORT_NULL;
+  kern_return_t kret;  
+  if (__exception_port_set == MACH_PORT_NULL) {
+
+    kret = mach_port_allocate(mach_task_self(),
+			      MACH_PORT_RIGHT_PORT_SET,
+			      &__exception_port_set);
+    MACH_CHECK_ERROR("allocating thread exception_ports",kret);
+    create_system_thread(0,
+                         NULL,
+                         exception_handler_proc, 
+                         (void *)((natural)__exception_port_set));
+  }
+  return __exception_port_set;
+}
+
+/*
+  Setup a new thread to handle those exceptions specified by
+  the mask "which".  This involves creating a special Mach
+  message port, telling the Mach kernel to send exception
+  messages for the calling thread to that port, and setting
+  up a handler thread which listens for and responds to
+  those messages.
+
+*/
+
+/*
+  Establish the lisp thread's TCR as its exception port, and determine
+  whether any other ports have been established by foreign code for
+  exceptions that lisp cares about.
+
+  If this happens at all, it should happen on return from foreign
+  code and on entry to lisp code via a callback.
+
+  This is a lot of trouble (and overhead) to support Java, or other
+  embeddable systems that clobber their caller's thread exception ports.
+  
+*/
+kern_return_t
+tcr_establish_exception_port(TCR *tcr, mach_port_t thread)
+{
+  kern_return_t kret;
+  MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info;
+  int i;
+  unsigned n = NUM_LISP_EXCEPTIONS_HANDLED;
+  mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port;
+  exception_mask_t mask = 0;
+
+  kret = thread_swap_exception_ports(thread,
+				     LISP_EXCEPTIONS_HANDLED_MASK,
+				     lisp_port,
+				     EXCEPTION_DEFAULT,
+				     THREAD_STATE_NONE,
+				     fxs->masks,
+				     &n,
+				     fxs->ports,
+				     fxs->behaviors,
+				     fxs->flavors);
+  if (kret == KERN_SUCCESS) {
+    fxs->foreign_exception_port_count = n;
+    for (i = 0; i < n; i ++) {
+      foreign_port = fxs->ports[i];
+
+      if ((foreign_port != lisp_port) &&
+	  (foreign_port != MACH_PORT_NULL)) {
+	mask |= fxs->masks[i];
+      }
+    }
+    tcr->foreign_exception_status = (int) mask;
+  }
+  return kret;
+}
+
+kern_return_t
+tcr_establish_lisp_exception_port(TCR *tcr)
+{
+  return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id));
+}
+
+/*
+  Do this when calling out to or returning from foreign code, if
+  any conflicting foreign exception ports were established when we
+  last entered lisp code.
+*/
+kern_return_t
+restore_foreign_exception_ports(TCR *tcr)
+{
+  exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status;
+  
+  if (m) {
+    MACH_foreign_exception_state *fxs  = 
+      (MACH_foreign_exception_state *) tcr->native_thread_info;
+    int i, n = fxs->foreign_exception_port_count;
+    exception_mask_t tm;
+
+    for (i = 0; i < n; i++) {
+      if ((tm = fxs->masks[i]) & m) {
+	thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id),
+				   tm,
+				   fxs->ports[i],
+				   fxs->behaviors[i],
+				   fxs->flavors[i]);
+      }
+    }
+  }
+}
+				   
+
+/*
+  This assumes that a Mach port (to be used as the thread's exception port) whose
+  "name" matches the TCR's 32-bit address has already been allocated.
+*/
+
+kern_return_t
+setup_mach_exception_handling(TCR *tcr)
+{
+  mach_port_t 
+    thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr),
+    task_self = mach_task_self();
+  kern_return_t kret;
+
+  kret = mach_port_insert_right(task_self,
+				thread_exception_port,
+				thread_exception_port,
+				MACH_MSG_TYPE_MAKE_SEND);
+  MACH_CHECK_ERROR("adding send right to exception_port",kret);
+
+  kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id));
+  if (kret == KERN_SUCCESS) {
+    mach_port_t exception_port_set = mach_exception_port_set();
+
+    kret = mach_port_move_member(task_self,
+				 thread_exception_port,
+				 exception_port_set);
+  }
+  return kret;
+}
+
+void
+darwin_exception_init(TCR *tcr)
+{
+  void tcr_monitor_exception_handling(TCR*, Boolean);
+  kern_return_t kret;
+  MACH_foreign_exception_state *fxs = 
+    calloc(1, sizeof(MACH_foreign_exception_state));
+  
+  tcr->native_thread_info = (void *) fxs;
+
+  if ((kret = setup_mach_exception_handling(tcr))
+      != KERN_SUCCESS) {
+    fprintf(dbgout, "Couldn't setup exception handler - error = %d\n", kret);
+    terminate_lisp();
+  }
+}
+
+/*
+  The tcr is the "name" of the corresponding thread's exception port.
+  Destroying the port should remove it from all port sets of which it's
+  a member (notably, the exception port set.)
+*/
+void
+darwin_exception_cleanup(TCR *tcr)
+{
+  void *fxs = tcr->native_thread_info;
+  extern Boolean use_mach_exception_handling;
+
+  if (fxs) {
+    tcr->native_thread_info = NULL;
+    free(fxs);
+  }
+  if (use_mach_exception_handling) {
+    mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
+    mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr));
+  }
+}
+
+
+Boolean
+suspend_mach_thread(mach_port_t mach_thread)
+{
+  kern_return_t status;
+  Boolean aborted = false;
+  
+  do {
+    aborted = false;
+    status = thread_suspend(mach_thread);
+    if (status == KERN_SUCCESS) {
+      status = thread_abort_safely(mach_thread);
+      if (status == KERN_SUCCESS) {
+        aborted = true;
+      } else {
+        fprintf(dbgout, "abort failed on thread = 0x%x\n",mach_thread);
+        thread_resume(mach_thread);
+      }
+    } else {
+      return false;
+    }
+  } while (! aborted);
+  return true;
+}
+
+/*
+  Only do this if pthread_kill indicated that the pthread isn't
+  listening to signals anymore, as can happen as soon as pthread_exit()
+  is called on Darwin.  The thread could still call out to lisp as it
+  is exiting, so we need another way to suspend it in this case.
+*/
+Boolean
+mach_suspend_tcr(TCR *tcr)
+{
+  mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id));
+  ExceptionInformation *pseudosigcontext;
+  Boolean result = false;
+  
+  result = suspend_mach_thread(mach_thread);
+  if (result) {
+    mach_msg_type_number_t thread_state_count;
+#ifdef X8664
+    x86_thread_state64_t ts;
+    thread_state_count = x86_THREAD_STATE64_COUNT;
+    thread_get_state(mach_thread,
+                     x86_THREAD_STATE64,
+                     (thread_state_t)&ts,
+                     &thread_state_count);
+#else
+    x86_thread_state32_t ts;
+    thread_state_count = x86_THREAD_STATE_COUNT;
+    thread_get_state(mach_thread,
+                     x86_THREAD_STATE,
+                     (thread_state_t)&ts,
+                     &thread_state_count);
+#endif
+
+    pseudosigcontext = create_thread_context_frame(mach_thread, NULL, NULL,tcr, &ts);
+    pseudosigcontext->uc_onstack = 0;
+    pseudosigcontext->uc_sigmask = (sigset_t) 0;
+    tcr->suspend_context = pseudosigcontext;
+  }
+  return result;
+}
+
+void
+mach_resume_tcr(TCR *tcr)
+{
+  ExceptionInformation *xp;
+  mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id));
+  
+  xp = tcr->suspend_context;
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "resuming TCR 0x%x, pending_exception_context = 0x%x\n",
+          tcr, tcr->pending_exception_context);
+#endif
+  tcr->suspend_context = NULL;
+  restore_mach_thread_state(mach_thread, xp);
+#ifdef DEBUG_MACH_EXCEPTIONS
+  fprintf(dbgout, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n",
+          tcr, tcr->pending_exception_context);
+#endif
+  thread_resume(mach_thread);
+}
+
+void
+fatal_mach_error(char *format, ...)
+{
+  va_list args;
+  char s[512];
+ 
+
+  va_start(args, format);
+  vsnprintf(s, sizeof(s),format, args);
+  va_end(args);
+
+  Fatal("Mach error", s);
+}
+
+
+
+
+#endif
+
+/* watchpoint stuff */
+
+area *
+new_watched_area(natural size)
+{
+  void *p;
+
+  p = MapMemory(NULL, size, MEMPROTECT_RWX);
+  if ((signed_natural)p == -1) {
+    allocation_failure(true, size);
+  }
+  return new_area(p, p + size, AREA_WATCHED);
+}
+
+void
+delete_watched_area(area *a, TCR *tcr)
+{
+  natural nbytes = a->high - a->low;
+  char *base = a->low;
+
+  condemn_area_holding_area_lock(a);
+
+  if (nbytes) {
+    int err;
+
+/* can't use UnMapMemory() beacuse it only uses MEM_DECOMMIT */
+#ifdef WINDOWS
+    err = VirtualFree(base, nbytes, MEM_RELEASE);
+#else
+    err = munmap(base, nbytes);
+#endif
+    if (err != 0)
+      Fatal("munmap in delete_watched_area", "");
+  }
+}
+
+natural
+uvector_total_size_in_bytes(LispObj *u)
+{
+  LispObj header = header_of(u);
+  natural header_tag = fulltag_of(header);
+  natural subtag = header_subtag(header);
+  natural element_count = header_element_count(header);
+  natural nbytes = 0;
+
+#ifdef X8632
+  if ((nodeheader_tag_p(header_tag)) ||
+      (subtag <= max_32_bit_ivector_subtag)) {
+    nbytes = element_count << 2;
+  } else if (subtag <= max_8_bit_ivector_subtag) {
+    nbytes = element_count;
+  } else if (subtag <= max_16_bit_ivector_subtag) {
+    nbytes = element_count << 1;
+  } else if (subtag == subtag_double_float_vector) {
+    nbytes = element_count << 3;
+  } else {
+    nbytes = (element_count + 7) >> 3;
+  }
+  /* add 4 byte header and round up to multiple of 8 bytes */
+  return ~7 & (4 + nbytes + 7);
+#endif
+#ifdef X8664
+  if ((nodeheader_tag_p(header_tag)) || (header_tag == ivector_class_64_bit)) {
+    nbytes = element_count << 3;
+  } else if (header_tag == ivector_class_32_bit) {
+    nbytes = element_count << 2;
+  } else {
+    /* ivector_class_other_bit contains 8, 16-bit arrays & bit vector */
+    if (subtag == subtag_bit_vector) {
+      nbytes = (element_count + 7) >> 3;
+    } else if (subtag >= min_8_bit_ivector_subtag) {
+      nbytes = element_count;
+    } else {
+      nbytes = element_count << 1;
+    }
+  }
+  /* add 8 byte header and round up to multiple of 16 bytes */
+  return ~15 & (8 + nbytes + 15);
+#endif
+}
+
+extern void wp_update_references(TCR *, LispObj, LispObj);
+
+/*
+ * Other threads are suspended and pc-lusered.
+ *
+ * param contains a tagged pointer to a uvector or a cons cell
+ */
+signed_natural
+watch_object(TCR *tcr, signed_natural param)
+{
+  LispObj object = (LispObj)param;
+  unsigned tag = fulltag_of(object);
+  LispObj *noderef = (LispObj *)untag(object);
+  area *object_area = area_containing((BytePtr)noderef);
+  natural size;
+
+  if (tag == fulltag_cons)
+    size = 2 * node_size;
+  else
+    size = uvector_total_size_in_bytes(noderef);
+
+  if (object_area && object_area->code == AREA_DYNAMIC) {
+    area *a = new_watched_area(size);
+    LispObj old = object;
+    LispObj new = (LispObj)((natural)a->low + tag);
+
+    add_area_holding_area_lock(a);
+
+    /* move object to watched area */
+    memcpy(a->low, noderef, size);
+    ProtectMemory(a->low, size);
+    memset(noderef, 0, size);
+    wp_update_references(tcr, old, new);
+    check_all_areas(tcr);
+    return 1;
+  }
+  return 0;
+}
+
+/*
+ * We expect the watched object in arg_y, and the new uninitialized
+ * object (which is just zeroed) in arg_z.
+ */
+signed_natural
+unwatch_object(TCR *tcr, signed_natural param)
+{
+  ExceptionInformation *xp = tcr->xframe->curr;
+  LispObj old = xpGPR(xp, Iarg_y);
+  unsigned tag = fulltag_of(old);
+  LispObj new = xpGPR(xp, Iarg_z);
+  LispObj *oldnode = (LispObj *)untag(old);
+  LispObj *newnode = (LispObj *)untag(new);
+  area *a = area_containing((BytePtr)old);
+
+  if (a && a->code == AREA_WATCHED) {
+    natural size;
+
+    if (tag == fulltag_cons)
+      size = 2 * node_size;
+    else
+      size = uvector_total_size_in_bytes(oldnode);
+
+    memcpy(newnode, oldnode, size);
+    delete_watched_area(a, tcr);
+    wp_update_references(tcr, old, new);
+    /* because wp_update_references doesn't update refbits */
+    tenure_to_area(tenured_area);
+    /* Unwatching can (re-)introduce managed_static->dynamic references */
+    zero_bits(managed_static_area->refbits,managed_static_area->ndnodes);
+    update_managed_refs(managed_static_area, low_markable_address, area_dnode(active_dynamic_area->active, low_markable_address));
+    check_all_areas(tcr);
+    xpGPR(xp, Iarg_z) = new;
+  }
+  return 0;
+}
+
+Boolean
+handle_watch_trap(ExceptionInformation *xp, TCR *tcr)
+{
+  LispObj selector = xpGPR(xp,Iimm0);
+  LispObj object = xpGPR(xp, Iarg_z);
+  signed_natural result;
+  
+  switch (selector) {
+    case WATCH_TRAP_FUNCTION_WATCH:
+      result = gc_like_from_xp(xp, watch_object, object);
+      if (result == 0)
+	xpGPR(xp,Iarg_z) = lisp_nil;
+      break;
+    case WATCH_TRAP_FUNCTION_UNWATCH:
+      gc_like_from_xp(xp, unwatch_object, 0);
+      break;
+    default:
+      break;
+  }
+  return true;
+}
+
Index: /branches/arm/lisp-kernel/x86-exceptions.h
===================================================================
--- /branches/arm/lisp-kernel/x86-exceptions.h	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-exceptions.h	(revision 13357)
@@ -0,0 +1,269 @@
+/*
+   Copyright (C) 2005-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#ifndef X86_EXCEPTIONS_H
+#define X86_EXCEPTIONS_H 1
+
+typedef u8_t opcode, *pc;
+
+#ifdef LINUX
+#define xpGPRvector(x) ((natural *)(&((x)->uc_mcontext.gregs)))
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define set_xpGPR(x,gpr,new) xpGPR((x),(gpr)) = (natural)(new)
+#define xpPC(x) (xpGPR(x,Iip))
+#define xpMMXreg(x,n)  *((natural *)(&((x)->uc_mcontext.fpregs->_st[n])))
+#define eflags_register(xp) xpGPR(xp,Iflags)
+#endif
+
+#ifdef DARWIN
+#define DARWIN_USE_PSEUDO_SIGRETURN 1
+#include <sys/syscall.h>
+#define DarwinSigReturn(context) do {\
+    darwin_sigreturn(context);\
+    Bug(context,"sigreturn returned");\
+  } while (0)
+
+#define xpGPRvector(x) ((natural *)(&(UC_MCONTEXT(x)->__ss)))
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define set_xpGPR(x,gpr,new) xpGPR((x),(gpr)) = (natural)(new)
+#define xpPC(x) (xpGPR(x,Iip))
+#define eflags_register(xp) xpGPR(xp,Iflags)
+#define xpFPRvector(x) ((natural *)(&(UC_MCONTEXT(x)->__fs.__fpu_xmm0)))
+#define xpMMXvector(x) (&(UC_MCONTEXT(x)->__fs.__fpu_stmm0))
+/* Note that this yields only the lower half of the MMX reg on x8632 */
+#define xpMMXreg(x,n) *(natural *)&(xpMMXvector(x)[n])
+
+#include <mach/mach.h>
+#include <mach/mach_error.h>
+#include <mach/machine/thread_state.h>
+#include <mach/machine/thread_status.h>
+
+pthread_mutex_t *mach_exception_lock;
+
+#endif
+
+#ifdef FREEBSD
+#ifdef X8664
+#include <machine/fpu.h>
+#else
+#include "freebsdx8632/fpu.h"
+#endif
+#define xpGPRvector(x) ((natural *)(&((x)->uc_mcontext)))
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define set_xpGPR(x,gpr,new) xpGPR((x),(gpr)) = (natural)(new)
+#define eflags_register(xp) xpGPR(xp,Iflags)
+#define xpPC(x) xpGPR(x,Iip)
+#ifdef X8664
+#define xpMMXreg(x,n) *((natural *)(&(((struct savefpu *)(&(x)->uc_mcontext.mc_fpstate))->sv_fp[n])))
+#define xpXMMregs(x)(&(((struct savefpu *)(&(x)->uc_mcontext.mc_fpstate))->sv_xmm[0]))
+#else
+#define xpMMXreg(x,n) *((natural *)(&(((struct ccl_savexmm *)(&(x)->uc_mcontext.mc_fpstate))->sv_fp[n])))
+#define xpXMMregs(x)(&(((struct ccl_savexmm *)(&(x)->uc_mcontext.mc_fpstate))->sv_xmm[0]))
+#endif
+#endif
+
+#ifdef SOLARIS
+#define xpGPRvector(x) ((x)->uc_mcontext.gregs)
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define set_xpGPR(x,gpr,new) xpGPR((x),(gpr)) = (natural)(new)
+#define xpPC(x) xpGPR(x,Iip)
+#define eflags_register(xp) xpGPR(xp,Iflags)
+#define xpXMMregs(x)(&((x)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[0]))
+#ifdef X8632
+#define xpMMXreg(x,n)*(natural *)(&(((struct fnsave_state *)(&(((x)->uc_mcontext.fpregs))))->f_st[n]))
+#endif
+#endif
+
+#ifdef WINDOWS
+#ifdef X8664
+#define xpGPRvector(x) ((DWORD64 *)(&(x)->Rax))
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define xpPC(x) xpGPR(x,Iip)
+#define eflags_register(xp) xp->EFlags
+#define xpMXCSRptr(x) (DWORD *)(&(x->MxCsr))
+#else
+#define xpGPRvector(x) ((DWORD *)(&(x)->Edi))
+#define xpGPR(x,gprno) (xpGPRvector(x)[gprno])
+#define xpPC(x) xpGPR(x,Iip)
+#define eflags_register(xp) xp->EFlags
+#define xpFPRvector(x) ((natural *)(&(x->ExtendedRegisters[10*16])))
+#define xpMMXreg(x,n)  (*((u64_t *)(&(x->FloatSave.RegisterArea[10*(n)]))))
+#define xpMXCSRptr(x) (DWORD *)(&(x->ExtendedRegisters[24]))
+#endif
+#endif
+
+#ifdef DARWIN
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGUSR1
+#endif
+#ifdef LINUX
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGPWR
+#endif
+#ifdef FREEBSD
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGEMT
+#endif
+#ifdef SOLARIS
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGUSR1
+#endif
+#ifdef WINDOWS
+#define SIGNAL_FOR_PROCESS_INTERRUPT SIGINT
+#ifndef SIGBUS
+#define SIGBUS 10
+#endif
+#ifndef CONTEXT_ALL
+#define CONTEXT_ALL (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS | CONTEXT_EXTENDED_REGISTERS)
+#endif
+#endif
+
+
+
+void switch_to_foreign_stack(void*, ...);
+
+#define INTN_OPCODE 0xcd
+
+#define UUO_GC_TRAP    0xc4
+#define UUO_ALLOC_TRAP 0xc5
+#define UUO_DEBUG_TRAP 0xca
+#define UUO_DEBUG_TRAP_WITH_STRING 0xcd
+#define UUO_WATCH_TRAP 0xce
+  #define WATCH_TRAP_FUNCTION_WATCH 0
+  #define WATCH_TRAP_FUNCTION_UNWATCH 1
+
+#define XUUO_OPCODE_0 0x0f
+#define XUUO_OPCODE_1 0x0b
+
+#define XUUO_TLB_TOO_SMALL 1
+#define XUUO_INTERRUPT_NOW 2
+#define XUUO_SUSPEND_NOW 3
+#define XUUO_INTERRUPT 4
+#define XUUO_SUSPEND 5
+#define XUUO_SUSPEND_ALL 6
+#define XUUO_RESUME 7
+#define XUUO_RESUME_ALL 8
+#define XUUO_KILL 9
+#define XUUO_ALLOCATE_LIST 10
+
+void
+pc_luser_xp(ExceptionInformation*, TCR*, signed_natural*);
+
+
+typedef enum {
+  ID_unrecognized_alloc_instruction,
+  ID_load_allocptr_reg_from_tcr_save_allocptr_instruction,
+  ID_compare_allocptr_reg_to_tcr_save_allocbase_instruction,
+  ID_branch_around_alloc_trap_instruction,
+  ID_alloc_trap_instruction,
+  ID_set_allocptr_header_instruction,
+  ID_clear_tcr_save_allocptr_tag_instruction
+} alloc_instruction_id;
+
+#ifdef LINUX
+#define SIGNUM_FOR_INTN_TRAP SIGSEGV
+#define IS_MAYBE_INT_TRAP(info,xp) ((xpGPR(xp,REG_TRAPNO)==0xd)&&((xpGPR(xp,REG_ERR)&7)==2))
+#define IS_PAGE_FAULT(info,xp) (xpGPR(xp,REG_TRAPNO)==0xe)
+#define SIGRETURN(context)
+#endif
+
+#ifdef FREEBSD
+extern void freebsd_sigreturn(ExceptionInformation *);
+#define SIGNUM_FOR_INTN_TRAP SIGBUS
+#define IS_MAYBE_INT_TRAP(info,xp) ((xp->uc_mcontext.mc_trapno == T_PROTFLT) && ((xp->uc_mcontext.mc_err & 7) == 2))
+#define IS_PAGE_FAULT(info,xp) (xp->uc_mcontext.mc_trapno == T_PAGEFLT)
+#define SIGRETURN(context) freebsd_sigreturn(context)
+#endif
+
+#ifdef DARWIN
+#define SIGNUM_FOR_INTN_TRAP SIGSEGV /* Not really, but our Mach handler fakes that */
+#define IS_MAYBE_INT_TRAP(info,xp) ((UC_MCONTEXT(xp)->__es.__trapno == 0xd) && (((UC_MCONTEXT(xp)->__es.__err)&7)==2))
+#define IS_PAGE_FAULT(info,xp) (UC_MCONTEXT(xp)->__es.__trapno == 0xe)
+/* The x86 version of sigreturn just needs the context argument; the
+   hidden, magic "flavor" argument that sigtramp uses is ignored. */
+#define SIGRETURN(context) DarwinSigReturn(context)
+#endif
+
+#ifdef SOLARIS
+#define SIGNUM_FOR_INTN_TRAP SIGSEGV
+#ifdef X8664
+#define IS_MAYBE_INT_TRAP(info,xp) ((xpGPR(xp,REG_TRAPNO)==0xd)&&((xpGPR(xp,REG_ERR)&7)==2))
+#define IS_PAGE_FAULT(info,xp) (xpGPR(xp,REG_TRAPNO)==0xe)
+#else
+#define IS_MAYBE_INT_TRAP(info,xp) ((xpGPR(xp,TRAPNO)==0xd)&&((xpGPR(xp,ERR)&7)==2))
+#define IS_PAGE_FAULT(info,xp) (xpGPR(xp,TRAPNO)==0xe)
+#endif
+#define SIGRETURN(context) setcontext(context)
+#endif
+
+#ifdef WINDOWS
+#define SIGNUM_FOR_INTN_TRAP SIGSEGV /* Also fake */
+#define IS_MAYBE_INT_TRAP(info,xp) \
+  ((info->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) &&       \
+   (info->ExceptionInformation[0]==0) &&                       \
+   (info->ExceptionInformation[1]==(ULONG_PTR)(-1L)))
+#define IS_PAGE_FAULT(info,xp) (1)
+#define SIGRETURN(context)      /* for now */
+#endif
+
+/* Please go away. */
+#ifdef DARWIN_GS_HACK
+extern Boolean ensure_gs_pthread(void);
+extern void set_gs_address(void *);
+#endif
+
+
+/* sigaltstack isn't thread-specific on The World's Most Advanced OS */
+#ifdef DARWIN
+#undef USE_SIGALTSTACK
+#else
+#ifdef WINDOWS
+#undef USE_SIGALTSTACK
+#else
+#define USE_SIGALTSTACK 1
+#endif
+#endif
+
+#ifdef USE_SIGALTSTACK
+void setup_sigaltstack(area *);
+#endif
+
+/* recognizing the function associated with a tagged return address */
+/* now involves recognizinig an "(lea (@ disp (% rip)) (% rn))" */
+/* instruction at the tra */
+
+#define RECOVER_FN_FROM_RIP_LENGTH 7 /* the instruction is 7 bytes long */
+#define RECOVER_FN_FROM_RIP_DISP_OFFSET 3 /* displacement word is 3 bytes in */
+#define RECOVER_FN_FROM_RIP_WORD0 0x8d4c /* 0x4c 0x8d, little-endian */
+#define RECOVER_FN_FROM_RIP_BYTE2 0x2d  /* third byte of opcode */
+
+extern natural get_mxcsr();
+extern void set_mxcsr(natural);
+
+#ifdef WINDOWS
+typedef struct {
+  HANDLE h;
+  OVERLAPPED *o;
+} pending_io;
+#endif
+
+#ifdef X8632
+/* The 32-bit immediate value in the instruction
+ * "(mov ($ 0x12345678) (% fn))" at a tagged return address
+ * refers to the associated function.
+ */
+#define RECOVER_FN_OPCODE 0xbf
+#define RECOVER_FN_LENGTH 5
+#endif
+
+#endif /* X86_EXCEPTIONS_H */
+
Index: /branches/arm/lisp-kernel/x86-gc.c
===================================================================
--- /branches/arm/lisp-kernel/x86-gc.c	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-gc.c	(revision 13357)
@@ -0,0 +1,3375 @@
+/*
+   Copyright (C) 2009 Clozure Associates
+   Copyright (C) 1994-2001 Digitool, Inc
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lisp.h"
+#include "lisp_globals.h"
+#include "bits.h"
+#include "gc.h"
+#include "area.h"
+#include "Threads.h"
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+
+#ifdef X8632
+inline natural
+imm_word_count(LispObj fn)
+{
+  natural w = ((unsigned short *)fn)[-1];
+
+  if (w & 0x8000) {
+    /* 
+     * The low 15 bits encode the number of contants.
+     * Compute and return the immediate word count.
+     */
+    LispObj header = header_of(fn);
+    natural element_count = header_element_count(header);
+
+    return element_count - (w & 0x7fff);
+  } else {
+    /* The immediate word count is encoded directly. */
+    return w;
+  }
+}
+#endif
+
+/* Heap sanity checking. */
+
+void
+check_node(LispObj n)
+{
+  int tag = fulltag_of(n), header_tag;
+  area *a;
+  LispObj header;
+
+  if (n == (n & 0xff)) {
+    return;
+  }
+
+  switch (tag) {
+  case fulltag_even_fixnum:
+  case fulltag_odd_fixnum:
+#ifdef X8632
+  case fulltag_imm:
+#endif
+#ifdef X8664
+  case fulltag_imm_0:
+  case fulltag_imm_1:
+#endif
+    return;
+
+#ifdef X8664
+  case fulltag_nil:
+    if (n != lisp_nil) {
+      Bug(NULL,"Object tagged as nil, not nil : " LISP, n);
+    }
+    return;
+#endif
+
+#ifdef X8632
+  case fulltag_nodeheader:
+  case fulltag_immheader:
+#endif
+#ifdef X8664
+  case fulltag_nodeheader_0: 
+  case fulltag_nodeheader_1: 
+  case fulltag_immheader_0: 
+  case fulltag_immheader_1: 
+  case fulltag_immheader_2: 
+#endif
+    Bug(NULL, "Header not expected : 0x" LISP, n);
+    return;
+
+#ifdef X8632
+  case fulltag_tra:
+#endif
+#ifdef X8664
+  case fulltag_tra_0:
+  case fulltag_tra_1:
+#endif
+    a = heap_area_containing((BytePtr)ptr_from_lispobj(n));
+    if (a == NULL) {
+      a = active_dynamic_area;
+      if ((n > (ptr_to_lispobj(a->active))) &&
+          (n < (ptr_to_lispobj(a->high)))) {
+        Bug(NULL, "TRA points to heap free space: 0x" LISP, n);
+      }
+      return;
+    }
+    /* tra points into the heap.  Check displacement, then
+       check the function it (should) identify.
+    */
+#ifdef X8632
+    {
+      LispObj fun = 0;
+
+      if (*(unsigned char *)n == RECOVER_FN_OPCODE)
+	fun = *(LispObj *)(n + 1);
+      if (fun == 0 ||
+	 (header_subtag(header_of(fun)) != subtag_function) ||
+	 (heap_area_containing((BytePtr)ptr_from_lispobj(fun)) != a)) {
+	Bug(NULL, "TRA at 0x" LISP " has bad function address 0x" LISP "\n", n, fun);
+      }
+      n = fun;
+    }
+#endif
+#ifdef X8664
+    {
+      int disp = 0;
+      LispObj m = n;
+
+      if ((*((unsigned short *)n) == RECOVER_FN_FROM_RIP_WORD0) &&
+          (*((unsigned char *)(n+2)) == RECOVER_FN_FROM_RIP_BYTE2)) {
+        disp = (*(int *) (n+3));
+        n = RECOVER_FN_FROM_RIP_LENGTH+m+disp;
+      }
+      if ((disp == 0) ||
+          (fulltag_of(n) != fulltag_function) ||
+          (heap_area_containing((BytePtr)ptr_from_lispobj(n)) != a)) {
+        Bug(NULL, "TRA at 0x" LISP " has bad displacement %d\n", n, disp);
+      }
+    }
+#endif
+    /* Otherwise, fall through and check the header on the function
+       that the tra references */
+
+  case fulltag_misc:
+  case fulltag_cons:
+#ifdef X8664
+  case fulltag_symbol:
+  case fulltag_function:
+#endif
+    a = heap_area_containing((BytePtr)ptr_from_lispobj(n));
+    
+    if (a == NULL) {
+      /* Can't do as much sanity checking as we'd like to
+         if object is a defunct stack-consed object.
+         If a dangling reference to the heap, that's
+         bad .. */
+      a = active_dynamic_area;
+      if ((n > (ptr_to_lispobj(a->active))) &&
+          (n < (ptr_to_lispobj(a->high)))) {
+        Bug(NULL, "Node points to heap free space: 0x" LISP, n);
+      }
+      return;
+    }
+    break;
+  }
+  /* Node points to heap area, so check header/lack thereof. */
+  header = header_of(n);
+  header_tag = fulltag_of(header);
+  if (tag == fulltag_cons) {
+    if ((nodeheader_tag_p(header_tag)) ||
+        (immheader_tag_p(header_tag))) {
+      Bug(NULL, "Cons cell at 0x" LISP " has bogus header : 0x" LISP, n, header);
+    }
+    return;
+  }
+
+  if ((!nodeheader_tag_p(header_tag)) &&
+      (!immheader_tag_p(header_tag))) {
+    Bug(NULL,"Vector at 0x" LISP " has bogus header : 0x" LISP, n, header);
+  }
+  return;
+}
+
+void
+check_all_mark_bits(LispObj *nodepointer) 
+{
+}
+
+
+
+
+
+void
+check_range(LispObj *start, LispObj *end, Boolean header_allowed)
+{
+  LispObj node, *current = start, *prev = NULL;
+  int tag;
+  natural elements;
+
+  while (current < end) {
+    prev = current;
+    node = *current++;
+    tag = fulltag_of(node);
+    if (immheader_tag_p(tag)) {
+      if (! header_allowed) {
+        Bug(NULL, "Header not expected at 0x" LISP "\n", prev);
+      }
+      current = (LispObj *)skip_over_ivector((natural)prev, node);
+    } else if (nodeheader_tag_p(tag)) {
+      if (! header_allowed) {
+        Bug(NULL, "Header not expected at 0x" LISP "\n", prev);
+      }
+      elements = header_element_count(node) | 1;
+      if (header_subtag(node) == subtag_function) {
+#ifdef X8632
+	int skip = *(unsigned short *)current;
+
+	/* XXX bootstrapping */
+	if (skip & 0x8000)
+	  skip = elements - (skip & 0x7fff);
+#else
+        int skip = *(int *)current;
+#endif
+        current += skip;
+        elements -= skip;
+      }
+      while (elements--) {
+        check_node(*current++);
+      }
+    } else {
+      check_node(node);
+      check_node(*current++);
+    }
+  }
+
+  if (current != end) {
+    Bug(NULL, "Overran end of memory range: start = 0x%08x, end = 0x%08x, prev = 0x%08x, current = 0x%08x",
+        start, end, prev, current);
+  }
+}
+
+#ifdef X8632
+void
+check_xp(ExceptionInformation *xp, natural node_regs_mask)
+{
+  natural *regs = (natural *) xpGPRvector(xp), dnode;
+
+  if (node_regs_mask & (1<<0)) check_node(regs[REG_EAX]);
+  if (node_regs_mask & (1<<1)) check_node(regs[REG_ECX]);
+  if (regs[REG_EFL] & EFL_DF) {
+    /* DF set means EDX should be treated as an imm reg */
+    ;
+  } else
+    if (node_regs_mask & (1<<2)) check_node(regs[REG_EDX]);
+
+  if (node_regs_mask & (1<<3)) check_node(regs[REG_EBX]);
+  if (node_regs_mask & (1<<4)) check_node(regs[REG_ESP]);
+  if (node_regs_mask & (1<<5)) check_node(regs[REG_EBP]);
+  if (node_regs_mask & (1<<6)) check_node(regs[REG_ESI]);
+  if (node_regs_mask & (1<<7)) check_node(regs[REG_EDI]);
+}
+#else
+void
+check_xp(ExceptionInformation *xp)
+{
+  natural *regs = (natural *) xpGPRvector(xp), dnode;
+
+  check_node(regs[Iarg_z]);
+  check_node(regs[Iarg_y]);
+  check_node(regs[Iarg_x]);
+  check_node(regs[Isave3]);
+  check_node(regs[Isave2]);
+  check_node(regs[Isave1]);
+  check_node(regs[Isave0]);
+  check_node(regs[Ifn]);
+  check_node(regs[Itemp0]);
+  check_node(regs[Itemp1]);
+  check_node(regs[Itemp2]);
+}
+#endif
+
+void
+check_tcrs(TCR *first)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+  
+  TCR *tcr = first;
+  LispObj *tlb_start,*tlb_end;
+
+  do {
+    xp = tcr->gc_context;
+    if (xp) {
+#ifdef X8632
+      check_xp(xp,tcr->node_regs_mask);
+#else
+      check_xp(xp);
+#endif
+    }
+#ifdef X8632
+    check_node(tcr->save0);
+    check_node(tcr->save1);
+    check_node(tcr->save2);
+    check_node(tcr->save3);
+    check_node(tcr->next_method_context);
+#endif
+    for (xframes = (xframe_list *) tcr->xframe; 
+         xframes; 
+         xframes = xframes->prev) {
+#ifndef X8632
+      check_xp(xframes->curr);
+#else
+      check_xp(xframes->curr, xframes->node_regs_mask);
+#endif
+    }
+    tlb_start = tcr->tlb_pointer;
+    if (tlb_start) {
+      tlb_end = tlb_start + ((tcr->tlb_limit)>>fixnumshift);
+      check_range(tlb_start,tlb_end,false);
+    }
+    tcr = tcr->next;
+  } while (tcr != first);
+}
+
+  
+void
+check_all_areas(TCR *tcr)
+{
+  area *a = active_dynamic_area;
+  area_code code = a->code;
+
+  while (code != AREA_VOID) {
+    switch (code) {
+    case AREA_DYNAMIC:
+    case AREA_WATCHED:
+    case AREA_STATIC:
+    case AREA_MANAGED_STATIC:
+      check_range((LispObj *)a->low, (LispObj *)a->active, true);
+      break;
+
+    case AREA_VSTACK:
+      {
+        LispObj* low = (LispObj *)a->active;
+        LispObj* high = (LispObj *)a->high;
+        
+        if (((natural)low) & node_size) {
+          check_node(*low++);
+        }
+        check_range(low, high, false);
+      }
+      break;
+
+    case AREA_TSTACK:
+      {
+        LispObj *current, *next,
+                *start = (LispObj *) a->active,
+                *end = start,
+                *limit = (LispObj *) a->high;
+                 
+        for (current = start;
+             end != limit;
+             current = next) {
+          next = ptr_from_lispobj(*current);
+          end = ((next >= start) && (next < limit)) ? next : limit;
+          check_range(current+2, end, true);
+        }
+      }
+      break;
+    }
+    a = a->succ;
+    code = (a->code);
+  }
+
+  check_tcrs(tcr);
+}
+
+
+
+
+
+
+
+
+/* Sooner or later, this probably wants to be in assembler */
+void
+mark_root(LispObj n)
+{
+  int tag_n = fulltag_of(n);
+  natural dnode, bits, *bitsp, mask;
+
+  if (!is_node_fulltag(tag_n)) {
+    return;
+  }
+
+  dnode = gc_area_dnode(n);
+  if (dnode >= GCndnodes_in_area) {
+    return;
+  }
+
+#ifdef X8632
+  if (tag_n == fulltag_tra) {
+    if (*(unsigned char *)n == RECOVER_FN_OPCODE) {
+      n = *(LispObj *)(n + 1);
+      tag_n = fulltag_misc;
+      dnode = gc_area_dnode(n);
+    } else
+      return;
+  }
+#endif
+#ifdef X8664
+  if (tag_of(n) == tag_tra) {
+    if ((*((unsigned short *)n) == RECOVER_FN_FROM_RIP_WORD0) &&
+        (*((unsigned char *)(n+2)) == RECOVER_FN_FROM_RIP_BYTE2)) {
+      int sdisp = (*(int *) (n+3));
+      n = RECOVER_FN_FROM_RIP_LENGTH+n+sdisp;
+      tag_n = fulltag_function;
+      dnode = gc_area_dnode(n);
+    }
+    else {
+      return;
+    }
+  }
+#endif
+
+  set_bits_vars(GCmarkbits,dnode,bitsp,bits,mask);
+  if (bits & mask) {
+    return;
+  }
+  *bitsp = (bits | mask);
+
+  if (tag_n == fulltag_cons) {
+    cons *c = (cons *) ptr_from_lispobj(untag(n));
+
+    rmark(c->car);
+    rmark(c->cdr);
+    return;
+  }
+  {
+    LispObj *base = (LispObj *) ptr_from_lispobj(untag(n));
+    natural
+      header = *((natural *) base),
+      subtag = header_subtag(header),
+      element_count = header_element_count(header),
+      total_size_in_bytes,      /* including 4/8-byte header */
+      suffix_dnodes;
+    natural prefix_nodes = 0;
+
+    tag_n = fulltag_of(header);
+
+#ifdef X8664
+    if ((nodeheader_tag_p(tag_n)) ||
+        (tag_n == ivector_class_64_bit)) {
+      total_size_in_bytes = 8 + (element_count<<3);
+    } else if (tag_n == ivector_class_32_bit) {
+      total_size_in_bytes = 8 + (element_count<<2);
+    } else {
+      /* ivector_class_other_bit contains 8, 16-bit arrays & bitvector */
+      if (subtag == subtag_bit_vector) {
+        total_size_in_bytes = 8 + ((element_count+7)>>3);
+      } else if (subtag >= min_8_bit_ivector_subtag) {
+	total_size_in_bytes = 8 + element_count;
+      } else {
+        total_size_in_bytes = 8 + (element_count<<1);
+      }
+    }
+#endif
+#ifdef X8632
+    if ((tag_n == fulltag_nodeheader) ||
+        (subtag <= max_32_bit_ivector_subtag)) {
+      total_size_in_bytes = 4 + (element_count<<2);
+    } else if (subtag <= max_8_bit_ivector_subtag) {
+      total_size_in_bytes = 4 + element_count;
+    } else if (subtag <= max_16_bit_ivector_subtag) {
+      total_size_in_bytes = 4 + (element_count<<1);
+    } else if (subtag == subtag_double_float_vector) {
+      total_size_in_bytes = 8 + (element_count<<3);
+    } else {
+      total_size_in_bytes = 4 + ((element_count+7)>>3);
+    }
+#endif
+
+
+    suffix_dnodes = ((total_size_in_bytes+(dnode_size-1))>>dnode_shift) -1;
+
+    if (suffix_dnodes) {
+      set_n_bits(GCmarkbits, dnode+1, suffix_dnodes);
+    }
+
+    if (nodeheader_tag_p(tag_n)) {
+      if (subtag == subtag_hash_vector) {
+        /* Don't invalidate the cache here.  It should get
+           invalidated on the lisp side, if/when we know
+           that rehashing is necessary. */
+        LispObj flags = ((hash_table_vector_header *) base)->flags;
+
+        if ((flags & nhash_keys_frozen_mask) &&
+            (((hash_table_vector_header *) base)->deleted_count > 0)) {
+          /* We're responsible for clearing out any deleted keys, since
+             lisp side can't do it without breaking the state machine
+          */
+          LispObj *pairp = base + hash_table_vector_header_count;
+          natural
+            npairs = (element_count - (hash_table_vector_header_count - 1)) >> 1;
+
+          while (npairs--) {
+            if ((pairp[1] == unbound) && (pairp[0] != unbound)) {
+              pairp[0] = slot_unbound;
+            }
+            pairp +=2;
+          }
+          ((hash_table_vector_header *) base)->deleted_count = 0;
+        }
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) base)->cache_key = undefined;
+          ((hash_table_vector_header *) base)->cache_value = lisp_nil;
+          mark_weak_htabv(n);
+          return;
+        }
+      }
+
+      if (subtag == subtag_pool) {
+        deref(n, 1) = lisp_nil;
+      }
+      
+      if (subtag == subtag_weak) {
+        natural weak_type = (natural) base[2];
+        if (weak_type >> population_termination_bit) {
+          element_count -= 2;
+        } else {
+          element_count -= 1;
+        }
+      }
+
+      if (subtag == subtag_function) {
+#ifdef X8632
+	prefix_nodes = (natural) ((unsigned short) deref(base,1));
+
+	/* XXX bootstrapping */
+	if (prefix_nodes & 0x8000)
+	  prefix_nodes = element_count - (prefix_nodes & 0x7fff);
+#else
+	prefix_nodes = (natural) ((int) deref(base,1));
+#endif
+        if (prefix_nodes > element_count) {
+          Bug(NULL, "Function 0x" LISP " trashed",n);
+        }
+      }
+      base += (1+element_count);
+
+      element_count -= prefix_nodes;
+
+      while(element_count--) {
+        rmark(*--base);
+      }
+      if (subtag == subtag_weak) {
+        deref(n, 1) = GCweakvll;
+        GCweakvll = untag(n);
+      }
+    }
+  }
+}
+
+
+/* 
+  This marks the node if it needs to; it returns true if the node
+  is either a hash table vector header or a cons/misc-tagged pointer
+  to ephemeral space.
+  Note that it  might be a pointer to ephemeral space even if it's
+  not pointing to the current generation.
+*/
+
+Boolean
+mark_ephemeral_root(LispObj n)
+{
+  int tag_n = fulltag_of(n);
+  natural eph_dnode;
+
+  if (nodeheader_tag_p(tag_n)) {
+    return (header_subtag(n) == subtag_hash_vector);
+  }
+ 
+  if (is_node_fulltag (tag_n)) {
+    eph_dnode = area_dnode(n, GCephemeral_low);
+    if (eph_dnode < GCn_ephemeral_dnodes) {
+      mark_root(n);             /* May or may not mark it */
+      return true;              /* but return true 'cause it's an ephemeral node */
+    }
+  }
+  return false;                 /* Not a heap pointer or not ephemeral */
+}
+  
+
+
+#ifdef X8664
+#define RMARK_PREV_ROOT fulltag_imm_1 /* fulltag of 'undefined' value */
+#define RMARK_PREV_CAR fulltag_nil /* fulltag_cons + node_size. Coincidence ? I think not. */
+#else
+#define RMARK_PREV_ROOT fulltag_imm /* fulltag of 'undefined' value */
+#define RMARK_PREV_CAR fulltag_odd_fixnum 
+#endif
+
+
+/*
+  This wants to be in assembler even more than "mark_root" does.
+  For now, it does link-inversion: hard as that is to express in C,
+  reliable stack-overflow detection may be even harder ...
+*/
+void
+rmark(LispObj n)
+{
+  int tag_n = fulltag_of(n);
+  bitvector markbits = GCmarkbits;
+  natural dnode, bits, *bitsp, mask;
+
+  if (!is_node_fulltag(tag_n)) {
+    return;
+  }
+
+  dnode = gc_area_dnode(n);
+  if (dnode >= GCndnodes_in_area) {
+    return;
+  }
+
+#ifdef X8632
+  if (tag_n == fulltag_tra) {
+    if (*(unsigned char *)n == RECOVER_FN_OPCODE) {
+      n = *(LispObj *)(n + 1);
+      tag_n = fulltag_misc;
+      dnode = gc_area_dnode(n);
+    } else {
+      return;
+    }
+  }
+#endif
+#ifdef X8664
+  if (tag_of(n) == tag_tra) {
+    if ((*((unsigned short *)n) == RECOVER_FN_FROM_RIP_WORD0) &&
+        (*((unsigned char *)(n+2)) == RECOVER_FN_FROM_RIP_BYTE2)) {
+      int sdisp = (*(int *) (n+3));
+      n = RECOVER_FN_FROM_RIP_LENGTH+n+sdisp;
+      tag_n = fulltag_function;
+      dnode = gc_area_dnode(n);
+    } else {
+      return;
+    }
+  }
+#endif
+
+  set_bits_vars(markbits,dnode,bitsp,bits,mask);
+  if (bits & mask) {
+    return;
+  }
+  *bitsp = (bits | mask);
+
+  if (current_stack_pointer() > GCstack_limit) {
+    if (tag_n == fulltag_cons) {
+      rmark(deref(n,1));
+      rmark(deref(n,0));
+    } else {
+      LispObj *base = (LispObj *) ptr_from_lispobj(untag(n));
+      natural
+        header = *((natural *) base),
+        subtag = header_subtag(header),
+        element_count = header_element_count(header),
+        total_size_in_bytes,
+        suffix_dnodes,
+	nmark;
+
+      tag_n = fulltag_of(header);
+
+#ifdef X8664
+      if ((nodeheader_tag_p(tag_n)) ||
+          (tag_n == ivector_class_64_bit)) {
+        total_size_in_bytes = 8 + (element_count<<3);
+      } else if (tag_n == ivector_class_32_bit) {
+        total_size_in_bytes = 8 + (element_count<<2);
+      } else {
+        /* ivector_class_other_bit contains 16-bit arrays & bitvector */
+        if (subtag == subtag_bit_vector) {
+          total_size_in_bytes = 8 + ((element_count+7)>>3);
+	} else if (subtag >= min_8_bit_ivector_subtag) {
+	  total_size_in_bytes = 8 + element_count;
+        } else {
+          total_size_in_bytes = 8 + (element_count<<1);
+        }
+      }
+#else
+      if ((tag_n == fulltag_nodeheader) ||
+	  (subtag <= max_32_bit_ivector_subtag)) {
+	total_size_in_bytes = 4 + (element_count<<2);
+      } else if (subtag <= max_8_bit_ivector_subtag) {
+	total_size_in_bytes = 4 + element_count;
+      } else if (subtag <= max_16_bit_ivector_subtag) {
+	total_size_in_bytes = 4 + (element_count<<1);
+      } else if (subtag == subtag_double_float_vector) {
+	total_size_in_bytes = 8 + (element_count<<3);
+      } else {
+	total_size_in_bytes = 4 + ((element_count+7)>>3);
+      }
+#endif
+
+      suffix_dnodes = ((total_size_in_bytes+(dnode_size-1))>>dnode_shift)-1;
+
+      if (suffix_dnodes) {
+        set_n_bits(GCmarkbits, dnode+1, suffix_dnodes);
+      }
+
+      if (!nodeheader_tag_p(tag_n)) return;
+
+      if (subtag == subtag_hash_vector) {
+        /* Splice onto weakvll, then return */
+        /* In general, there's no reason to invalidate the cached
+           key/value pair here.  However, if the hash table's weak,
+           we don't want to retain an otherwise unreferenced key
+           or value simply because they're referenced from the
+           cache.  Clear the cached entries iff the hash table's
+           weak in some sense.
+        */
+        LispObj flags = ((hash_table_vector_header *) base)->flags;
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) base)->cache_key = undefined;
+          ((hash_table_vector_header *) base)->cache_value = lisp_nil;
+          mark_weak_htabv(n);
+          return;
+        }
+      }
+
+      if (subtag == subtag_pool) {
+        deref(n, 1) = lisp_nil;
+      }
+
+      if (subtag == subtag_weak) {
+        natural weak_type = (natural) base[2];
+        if (weak_type >> population_termination_bit)
+          element_count -= 2;
+        else
+          element_count -= 1;
+      }
+
+      nmark = element_count;
+
+      if (subtag == subtag_function) {
+#ifdef X8664
+	int code_words = (int)base[1];
+#else
+	int code_words = (unsigned short)base[1];
+
+	/* XXX bootstrapping */
+	if (code_words & 0x8000)
+	  code_words = element_count - (code_words & 0x7fff);
+#endif
+        if (code_words >= nmark) {
+          Bug(NULL,"Bad function at 0x" LISP,n);
+        }
+	nmark -= code_words;
+      }
+
+      while (nmark--) {
+        rmark(deref(n,element_count));
+        element_count--;
+      }
+
+      if (subtag == subtag_weak) {
+        deref(n, 1) = GCweakvll;
+        GCweakvll = untag(n);
+      }
+
+    }
+  } else {
+    /* This is all a bit more complicated than the PPC version:
+
+       - a symbol-vector can be referenced via either a FULLTAG-MISC
+       pointer or a FULLTAG-SYMBOL pointer.  When we've finished
+       marking the symbol-vector's elements, we need to know which tag
+       the object that pointed to the symbol-vector had originally.
+
+       - a function-vector can be referenced via either a FULLTAG-MISC
+       pointer or a FULLTAG-FUNCTION pointer.  That introduces pretty
+       much the same set of issues, but ...
+
+       - a function-vector can also be referenced via a TRA; the
+       offset from the TRA to the function header is arbitrary (though
+       we can probably put an upper bound on it, and it's certainly
+       not going to be more than 32 bits.)
+
+       - function-vectors contain a mixture of code and constants,
+       with a "boundary" word (that doesn't look like a valid
+       constant) in between them.  There are 56 unused bits in the
+       boundary word; the low 8 bits must be = to the constant
+       'function_boundary_marker'.  We can store the byte displacement
+       from the address of the object which references the function
+       (tagged fulltag_misc, fulltag_function, or tra) to the address
+       of the boundary marker when the function vector is first marked
+       and recover that offset when we've finished marking the
+       function vector.  (Note that the offset is signed; it's
+       probably simplest to keep it in the high 32 bits of the
+       boundary word.) 
+
+ So:
+
+       - while marking a CONS, the 'this' pointer as a 3-bit tag of
+       tag_list; the 4-bit fulltag indicates which cell is being
+       marked.
+
+       - while marking a gvector (other than a symbol-vector or
+       function-vector), the 'this' pointer is tagged tag_misc.
+       (Obviously, it alternates between fulltag_misc and
+       fulltag_nodeheader_0, arbitrarily.)  When we encounter the
+       gvector header when the 'this' pointer has been tagged as
+       fulltag_misc, we can restore 'this' to the header's address +
+       fulltag_misc and enter the 'climb' state.  (Note that this
+       value happens to be exactly what's in 'this' when the header's
+       encountered.)
+
+       - if we encounter a symbol-vector via the FULLTAG-MISC pointer
+       to the symbol (not very likely, but legal and possible), it's
+       treated exactly like the gvector case above.
+
+       - in the more likely case where a symbol-vector is referenced
+       via a FULLTAG-SYMBOL, we do the same loop as in the general
+       gvector case, backing up through the vector with 'this' tagged
+       as 'tag_symbol' (or fulltag_nodeheader_1); when we encounter
+       the symbol header, 'this' gets fulltag_symbol added to the
+       dnode-aligned address of the header, and we climb.
+
+       - if anything (fulltag_misc, fulltag_function, tra) references
+       an unmarked function function vector, we store the byte offfset
+       from the tagged reference to the address of the boundary word
+       in the high 32 bits of the boundary word, then we back up
+       through the function-vector's constants, with 'this' tagged
+       tag_function/ fulltag_immheader_0, until the (specially-tagged)
+       boundary word is encountered.  The displacement stored in the boundary
+       word is added to the aligned address of the  boundary word (restoring
+       the original 'this' pointer, and we climb.
+
+       Not that bad.
+    */
+       
+    LispObj prev = undefined, this = n, next, *base;
+    natural header, subtag, element_count, total_size_in_bytes, suffix_dnodes, *boundary;
+
+    if (tag_n == fulltag_cons) goto MarkCons;
+    goto MarkVector;
+
+  ClimbCdr:
+    prev = deref(this,0);
+    deref(this,0) = next;
+
+  Climb:
+    next = this;
+    this = prev;
+    tag_n = fulltag_of(prev);
+    switch(tag_n) {
+    case tag_misc:
+    case fulltag_misc:
+#ifdef X8664
+    case tag_symbol:
+    case fulltag_symbol:
+    case tag_function:
+    case fulltag_function:
+#endif
+      goto ClimbVector;
+
+    case RMARK_PREV_ROOT:
+      return;
+
+    case fulltag_cons:
+      goto ClimbCdr;
+
+    case RMARK_PREV_CAR:
+      goto ClimbCar;
+
+    default: abort();
+    }
+
+  DescendCons:
+    prev = this;
+    this = next;
+
+  MarkCons:
+    next = deref(this,1);
+#ifdef X8632
+    this += (RMARK_PREV_CAR-fulltag_cons);
+#else
+    this += node_size;
+#endif
+    tag_n = fulltag_of(next);
+    if (!is_node_fulltag(tag_n)) goto MarkCdr;
+    dnode = gc_area_dnode(next);
+    if (dnode >= GCndnodes_in_area) goto MarkCdr;
+    set_bits_vars(markbits,dnode,bitsp,bits,mask);
+    if (bits & mask) goto MarkCdr;
+    *bitsp = (bits | mask);
+    deref(this,1) = prev;
+    if (tag_n == fulltag_cons) goto DescendCons;
+    goto DescendVector;
+
+  ClimbCar:
+    prev = deref(this,1);
+    deref(this,1) = next;
+
+  MarkCdr:
+    next = deref(this, 0);
+#ifdef X8632
+    this -= (RMARK_PREV_CAR-fulltag_cons);
+#else
+    this -= node_size;
+#endif
+    tag_n = fulltag_of(next);
+    if (!is_node_fulltag(tag_n)) goto Climb;
+    dnode = gc_area_dnode(next);
+    if (dnode >= GCndnodes_in_area) goto Climb;
+    set_bits_vars(markbits,dnode,bitsp,bits,mask);
+    if (bits & mask) goto Climb;
+    *bitsp = (bits | mask);
+    deref(this, 0) = prev;
+    if (tag_n == fulltag_cons) goto DescendCons;
+    /* goto DescendVector; */
+
+  DescendVector:
+    prev = this;
+    this = next;
+
+  MarkVector:
+#ifdef X8664
+    if ((tag_n == fulltag_tra_0) ||
+        (tag_n == fulltag_tra_1)) {
+      int disp = (*(int *) (n+3)) + RECOVER_FN_FROM_RIP_LENGTH;
+
+      base = (LispObj *) (untag(n-disp));
+      header = *((natural *) base);
+      subtag = header_subtag(header);
+      boundary = base + (int)(base[1]);
+      (((int *)boundary)[1]) = (int)(this-((LispObj)boundary));
+      this = (LispObj)(base)+fulltag_function;
+      /* Need to set the initial markbit here */
+      dnode = gc_area_dnode(this);
+      set_bit(markbits,dnode);
+    } else {
+      base = (LispObj *) ptr_from_lispobj(untag(this));
+      header = *((natural *) base);
+      subtag = header_subtag(header);
+      if (subtag == subtag_function) {
+        boundary = base + (int)(base[1]);
+        (((int *)boundary)[1]) = (int)(this-((LispObj)boundary));
+      }
+    }
+    element_count = header_element_count(header);
+    tag_n = fulltag_of(header);
+
+    if ((nodeheader_tag_p(tag_n)) ||
+        (tag_n == ivector_class_64_bit)) {
+      total_size_in_bytes = 8 + (element_count<<3);
+    } else if (tag_n == ivector_class_32_bit) {
+      total_size_in_bytes = 8 + (element_count<<2);
+    } else {
+      /* ivector_class_other_bit contains 16-bit arrays & bitvector */
+      if (subtag == subtag_bit_vector) {
+        total_size_in_bytes = 8 + ((element_count+7)>>3);
+      } else if (subtag >= min_8_bit_ivector_subtag) {
+        total_size_in_bytes = 8 + element_count;
+      } else {
+        total_size_in_bytes = 8 + (element_count<<1);
+      }
+    }
+#else
+    if (tag_n == fulltag_tra) {
+      LispObj fn = *(LispObj *)(n + 1);
+
+      base = (LispObj *)untag(fn);
+      header = *(natural *)base;
+      subtag = header_subtag(header);
+      boundary = base + imm_word_count(fn);
+
+      /*
+       * On x8632, the upper 24 bits of the boundary word are zero.
+       * Functions on x8632 can be no more than 2^16 words (or 2^24
+       * bytes) long (including the self-reference table but excluding
+       * any constants).  Therefore, we can do the same basic thing
+       * that the x8664 port does: namely, we keep the byte
+       * displacement from the address of the object (tagged tra or
+       * fulltag_misc) that references the function to the address of
+       * the boundary marker in those 24 bits, recovering it when
+       * we've finished marking the function vector.
+       */
+      *((int *)boundary) &= 0xff;
+      *((int *)boundary) |= ((this-(LispObj)boundary) << 8);
+      this = (LispObj)(base)+fulltag_misc;
+      dnode = gc_area_dnode(this);
+      set_bit(markbits,dnode);
+    } else {
+      base = (LispObj *) ptr_from_lispobj(untag(this));
+      header = *((natural *) base);
+      subtag = header_subtag(header);
+      if (subtag == subtag_function) {
+        boundary = base + imm_word_count(this);
+
+	*((int *)boundary) &= 0xff;
+        *((int *)boundary) |= ((this-((LispObj)boundary)) << 8);
+      }
+    }
+    element_count = header_element_count(header);
+    tag_n = fulltag_of(header);
+
+    if ((tag_n == fulltag_nodeheader) ||
+	(subtag <= max_32_bit_ivector_subtag)) {
+      total_size_in_bytes = 4 + (element_count<<2);
+    } else if (subtag <= max_8_bit_ivector_subtag) {
+      total_size_in_bytes = 4 + element_count;
+    } else if (subtag <= max_16_bit_ivector_subtag) {
+      total_size_in_bytes = 4 + (element_count<<1);
+    } else if (subtag == subtag_double_float_vector) {
+      total_size_in_bytes = 8 + (element_count<<3);
+    } else {
+      total_size_in_bytes = 4 + ((element_count+7)>>3);
+    }
+#endif
+
+    suffix_dnodes = ((total_size_in_bytes+(dnode_size-1))>>dnode_shift)-1;
+    
+    if (suffix_dnodes) {
+      set_n_bits(GCmarkbits, dnode+1, suffix_dnodes);
+    }
+    
+    if (!nodeheader_tag_p(tag_n)) goto Climb;
+    
+    if (subtag == subtag_hash_vector) {
+      /* Splice onto weakvll, then climb */
+      LispObj flags = ((hash_table_vector_header *) base)->flags;
+      
+      if (flags & nhash_weak_mask) {
+        ((hash_table_vector_header *) base)->cache_key = undefined;
+        ((hash_table_vector_header *) base)->cache_value = lisp_nil;
+        dws_mark_weak_htabv(this);
+        element_count = hash_table_vector_header_count;
+      }
+    }
+
+    if (subtag == subtag_pool) {
+      deref(this, 1) = lisp_nil;
+    }
+
+    if (subtag == subtag_weak) {
+      natural weak_type = (natural) base[2];
+      if (weak_type >> population_termination_bit)
+        element_count -= 2;
+      else
+        element_count -= 1;
+    }
+
+    this = (LispObj)(base) + (tag_of(this))  + ((element_count+1) << node_shift);
+    goto MarkVectorLoop;
+
+  ClimbVector:
+    prev = indirect_node(this);
+    indirect_node(this) = next;
+
+  MarkVectorLoop:
+    this -= node_size;
+    next = indirect_node(this);
+#ifdef X8664
+    if ((tag_of(this) == tag_function) &&
+        (header_subtag(next) == function_boundary_marker)) goto MarkFunctionDone;
+#else
+    if ((tag_of(this) == tag_misc) &&
+        (header_subtag(next) == function_boundary_marker)) goto MarkFunctionDone;
+#endif
+
+    tag_n = fulltag_of(next);
+    if (nodeheader_tag_p(tag_n)) goto MarkVectorDone;
+    if (!is_node_fulltag(tag_n)) goto MarkVectorLoop;
+    dnode = gc_area_dnode(next);
+    if (dnode >= GCndnodes_in_area) goto MarkVectorLoop;
+    set_bits_vars(markbits,dnode,bitsp,bits,mask);
+    if (bits & mask) goto MarkVectorLoop;
+    *bitsp = (bits | mask);
+    indirect_node(this) = prev;
+    if (tag_n == fulltag_cons) goto DescendCons;
+    goto DescendVector;
+
+  MarkVectorDone:
+    /* "next" is vector header; "this" tagged tag_misc or tag_symbol.
+       If  header subtag = subtag_weak_header, put it on weakvll */
+    this += node_size;          /* make it fulltag_misc/fulltag_symbol */
+
+    if (header_subtag(next) == subtag_weak) {
+      deref(this, 1) = GCweakvll;
+      GCweakvll = untag(this);
+    }
+    goto Climb;
+
+  MarkFunctionDone:
+    boundary = (LispObj *)(node_aligned(this));
+#ifdef X8664
+    this = ((LispObj)boundary) + (((int *)boundary)[1]);
+    (((int *)boundary)[1]) = 0;
+#else
+    this = ((LispObj)boundary) + ((*((int *)boundary)) >> 8);
+    ((int *)boundary)[0] &= 0xff;
+#endif
+    goto Climb;
+  }
+}
+
+LispObj *
+skip_over_ivector(natural start, LispObj header)
+{
+  natural 
+    element_count = header_element_count(header),
+    subtag = header_subtag(header),
+    nbytes;
+
+
+#ifdef X8664
+  switch (fulltag_of(header)) {
+  case ivector_class_64_bit:
+    nbytes = element_count << 3;
+    break;
+  case ivector_class_32_bit:
+    nbytes = element_count << 2;
+    break;
+  case ivector_class_other_bit:
+  default:
+    if (subtag == subtag_bit_vector) {
+      nbytes = (element_count+7)>>3;
+    } else if (subtag >= min_8_bit_ivector_subtag) {
+      nbytes = element_count;
+    } else {
+      nbytes = element_count << 1;
+    }
+  }
+  return ptr_from_lispobj(start+(~15 & (nbytes + 8 + 15)));
+#else
+  if (subtag <= max_32_bit_ivector_subtag) {
+    nbytes = element_count << 2;
+  } else if (subtag <= max_8_bit_ivector_subtag) {
+    nbytes = element_count;
+  } else if (subtag <= max_16_bit_ivector_subtag) {
+    nbytes = element_count << 1;
+  } else if (subtag == subtag_double_float_vector) {
+    nbytes = 4 + (element_count << 3);
+  } else {
+    nbytes = (element_count+7) >> 3;
+  }
+  return ptr_from_lispobj(start+(~7 & (nbytes + 4 + 7)));
+#endif
+}
+
+
+void
+check_refmap_consistency(LispObj *start, LispObj *end, bitvector refbits)
+{
+  LispObj x1, *base = start, *prev = start;
+  int tag;
+  natural ref_dnode, node_dnode;
+  Boolean intergen_ref;
+
+  while (start < end) {
+    x1 = *start;
+    prev = start;
+    tag = fulltag_of(x1);
+    if (immheader_tag_p(tag)) {
+      start = skip_over_ivector(ptr_to_lispobj(start), x1);
+    } else {
+      if (header_subtag(x1) == subtag_function) {
+#ifdef X8632
+	int skip = (unsigned short)deref(start,1);
+	/* XXX bootstrapping */
+	if (skip & 0x8000)
+	  skip = header_element_count(x1) - (skip & 0x7fff);
+#else
+        int skip = (int) deref(start,1);
+#endif
+        start += ((1+skip)&~1);
+        x1 = *start;
+        tag = fulltag_of(x1);
+      }
+      intergen_ref = false;
+      if (is_node_fulltag(tag)) {        
+        node_dnode = gc_area_dnode(x1);
+        if (node_dnode < GCndnodes_in_area) {
+          intergen_ref = true;
+        }
+      }
+      if (intergen_ref == false) {        
+        x1 = start[1];
+        tag = fulltag_of(x1);
+        if (is_node_fulltag(tag)) {        
+          node_dnode = gc_area_dnode(x1);
+          if (node_dnode < GCndnodes_in_area) {
+            intergen_ref = true;
+          }
+        }
+      }
+      if (intergen_ref) {
+        ref_dnode = area_dnode(start, base);
+        if (!ref_bit(refbits, ref_dnode)) {
+          Bug(NULL, "Missing memoization in doublenode at 0x" LISP "\n", start);
+          set_bit(refbits, ref_dnode);
+        }
+      }
+      start += 2;
+    }
+  }
+  if (start > end) {
+    Bug(NULL, "Overran end of range!");
+  }
+}
+
+
+
+void
+mark_memoized_area(area *a, natural num_memo_dnodes)
+{
+  bitvector refbits = a->refbits;
+  LispObj *p = (LispObj *) a->low, x1, x2;
+  natural inbits, outbits, bits, bitidx, *bitsp, nextbit, diff, memo_dnode = 0;
+  Boolean keep_x1, keep_x2;
+  natural hash_dnode_limit = 0;
+  hash_table_vector_header *hashp = NULL;
+  int mark_method = 3;
+
+  if (GCDebug) {
+    check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+  }
+
+  /* The distinction between "inbits" and "outbits" is supposed to help us
+     detect cases where "uninteresting" setfs have been memoized.  Storing
+     NIL, fixnums, immediates (characters, etc.) or node pointers to static
+     or readonly areas is definitely uninteresting, but other cases are
+     more complicated (and some of these cases are hard to detect.)
+
+     Some headers are "interesting", to the forwarder if not to us. 
+
+     */
+
+  /*
+    We need to ensure that there are no bits set at or beyond
+    "num_memo_dnodes" in the bitvector.  (This can happen as the EGC
+    tenures/untenures things.)  We find bits by grabbing a fullword at
+    a time and doing a cntlzw instruction; and don't want to have to
+    check for (< memo_dnode num_memo_dnodes) in the loop.
+    */
+
+  {
+    natural 
+      bits_in_last_word = (num_memo_dnodes & bitmap_shift_count_mask),
+      index_of_last_word = (num_memo_dnodes >> bitmap_shift);
+
+    if (bits_in_last_word != 0) {
+      natural mask = ~((NATURAL1<<(nbits_in_word-bits_in_last_word))- NATURAL1);
+      refbits[index_of_last_word] &= mask;
+    }
+  }
+        
+  set_bitidx_vars(refbits, 0, bitsp, bits, bitidx);
+  inbits = outbits = bits;
+  while (memo_dnode < num_memo_dnodes) {
+    if (bits == 0) {
+      int remain = nbits_in_word - bitidx;
+      memo_dnode += remain;
+      p += (remain+remain);
+      if (outbits != inbits) {
+        *bitsp = outbits;
+      }
+      bits = *++bitsp;
+      inbits = outbits = bits;
+      bitidx = 0;
+    } else {
+      nextbit = count_leading_zeros(bits);
+      if ((diff = (nextbit - bitidx)) != 0) {
+        memo_dnode += diff;
+        bitidx = nextbit;
+        p += (diff+diff);
+      }
+      x1 = *p++;
+      x2 = *p++;
+      bits &= ~(BIT0_MASK >> bitidx);
+
+      if (hashp) {
+        Boolean force_x1 = false;
+        if ((memo_dnode >= hash_dnode_limit) && (mark_method == 3)) {
+          /* if vector_header_count is odd, x1 might be the last word of the header */
+          force_x1 = (hash_table_vector_header_count & 1) && (memo_dnode == hash_dnode_limit);
+          /* was marking header, switch to data */
+          hash_dnode_limit = area_dnode(((LispObj *)hashp)
+                                        + 1
+                                        + header_element_count(hashp->header),
+                                        a->low);
+          /* In traditional weak method, don't mark vector entries at all. */
+          /* Otherwise mark the non-weak elements only */
+          mark_method = ((lisp_global(WEAK_GC_METHOD) == 0) ? 0 :
+                         ((hashp->flags & nhash_weak_value_mask)
+                          ? (1 + (hash_table_vector_header_count & 1))
+                          : (2 - (hash_table_vector_header_count & 1))));
+        }
+
+        if (memo_dnode < hash_dnode_limit) {
+          /* perhaps ignore one or both of the elements */
+          if (!force_x1 && !(mark_method & 1)) x1 = 0;
+          if (!(mark_method & 2)) x2 = 0;
+        } else {
+          hashp = NULL;
+        }
+      }
+
+      if (header_subtag(x1) == subtag_hash_vector) {
+        if (hashp) Bug(NULL, "header inside hash vector?");
+        hash_table_vector_header *hp = (hash_table_vector_header *)(p - 2);
+        if (hp->flags & nhash_weak_mask) {
+          /* If header_count is odd, this cuts off the last header field */
+          /* That case is handled specially above */
+          hash_dnode_limit = memo_dnode + ((hash_table_vector_header_count) >>1);
+          hashp = hp;
+          mark_method = 3;
+        }
+      }
+
+      keep_x1 = mark_ephemeral_root(x1);
+      keep_x2 = mark_ephemeral_root(x2);
+      if ((keep_x1 == false) && 
+          (keep_x2 == false) &&
+          (hashp == NULL)) {
+        outbits &= ~(BIT0_MASK >> bitidx);
+      }
+      memo_dnode++;
+      bitidx++;
+    }
+  }
+  if (GCDebug) {
+    p = (LispObj *) a->low;
+    check_refmap_consistency(p, p+(num_memo_dnodes << 1), refbits);
+  }
+}
+
+void
+mark_headerless_area_range(LispObj *start, LispObj *end)
+{
+  while (start < end) {
+    mark_root(*start++);
+  }
+}
+
+void
+mark_simple_area_range(LispObj *start, LispObj *end)
+{
+  LispObj x1, *base;
+  int tag;
+
+  while (start < end) {
+    x1 = *start;
+    tag = fulltag_of(x1);
+    if (immheader_tag_p(tag)) {
+      start = (LispObj *)ptr_from_lispobj(skip_over_ivector(ptr_to_lispobj(start), x1));
+    } else if (!nodeheader_tag_p(tag)) {
+      ++start;
+      mark_root(x1);
+      mark_root(*start++);
+    } else {
+      int subtag = header_subtag(x1);
+      natural element_count = header_element_count(x1);
+      natural size = (element_count+1 + 1) & ~1;
+
+      if (subtag == subtag_hash_vector) {
+        LispObj flags = ((hash_table_vector_header *) start)->flags;
+
+        if (flags & nhash_weak_mask) {
+          ((hash_table_vector_header *) start)->cache_key = undefined;
+          ((hash_table_vector_header *) start)->cache_value = lisp_nil;
+          mark_weak_htabv((LispObj)start);
+	  element_count = 0;
+	}
+      } 
+      if (subtag == subtag_pool) {
+	start[1] = lisp_nil;
+      }
+
+      if (subtag == subtag_weak) {
+	natural weak_type = (natural) start[2];
+	if (weak_type >> population_termination_bit)
+	  element_count -= 2;
+	else
+	  element_count -= 1; 
+	start[1] = GCweakvll;
+	GCweakvll = ptr_to_lispobj(start);
+      }
+
+      base = start + element_count + 1;
+      if (subtag == subtag_function) {
+#ifdef X8632
+	natural skip = (unsigned short)start[1];
+
+	/* XXX bootstrapping */
+	if (skip & 0x8000)
+	  skip = element_count - (skip & 0x7fff);
+
+	element_count -= skip;
+
+#else
+	element_count -= (int)start[1];
+#endif
+      }
+      while(element_count--) {
+	mark_root(*--base);
+      }
+      start += size;
+    }
+  }
+}
+
+
+/* Mark a tstack area */
+void
+mark_tstack_area(area *a)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) (a->active),
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = (LispObj *) ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    mark_simple_area_range(current+2, end);
+  }
+}
+
+/*
+  It's really important that headers never wind up in tagged registers.
+  Those registers would (possibly) get pushed on the vstack and confuse
+  the hell out of this routine.
+
+  vstacks are just treated as a "simple area range", possibly with
+  an extra word at the top (where the area's active pointer points.)
+  */
+
+void
+mark_vstack_area(area *a)
+{
+  LispObj
+    *start = (LispObj *) a->active,
+    *end = (LispObj *) a->high;
+
+#if 0
+  fprintf(dbgout, "mark VSP range: 0x" LISP ":0x" LISP "\n", start, end);
+#endif
+  mark_headerless_area_range(start, end);
+}
+
+/* No lisp objects on cstack on x86, at least x86-64 */
+void
+mark_cstack_area(area *a)
+{
+}
+
+
+/* Mark the lisp objects in an exception frame */
+#ifdef X8664
+void
+mark_xp(ExceptionInformation *xp)
+{
+  natural *regs = (natural *) xpGPRvector(xp), dnode;
+  LispObj rip;
+    
+  
+
+  mark_root(regs[Iarg_z]);
+  mark_root(regs[Iarg_y]);
+  mark_root(regs[Iarg_x]);
+  mark_root(regs[Isave3]);
+  mark_root(regs[Isave2]);
+  mark_root(regs[Isave1]);
+  mark_root(regs[Isave0]);
+  mark_root(regs[Ifn]);
+  mark_root(regs[Itemp0]);
+  mark_root(regs[Itemp1]);
+  mark_root(regs[Itemp2]);
+  /* If the RIP isn't pointing into a marked function,
+     we can -maybe- recover from that if it's tagged as
+     a TRA. */
+  rip = regs[Iip];
+  dnode = gc_area_dnode(rip);
+  if ((dnode < GCndnodes_in_area) &&
+      (! ref_bit(GCmarkbits,dnode))) {
+    if (tag_of(rip) == tag_tra) {
+      mark_root(rip);
+    } else if ((fulltag_of(rip) == fulltag_function) &&
+               (*((unsigned short *)rip) == RECOVER_FN_FROM_RIP_WORD0) &&
+               (*((unsigned char *)(rip+2)) == RECOVER_FN_FROM_RIP_BYTE2) &&
+               ((*(int *) (rip+3))) == -RECOVER_FN_FROM_RIP_LENGTH) {
+      mark_root(rip);
+    } else {
+      Bug(NULL, "Can't find function for rip 0x%16lx",rip);
+    }
+  }
+}
+#else
+void
+mark_xp(ExceptionInformation *xp, natural node_regs_mask)
+{
+  natural *regs = (natural *) xpGPRvector(xp), dnode;
+  LispObj eip;
+  int i;
+
+  if (node_regs_mask & (1<<0)) mark_root(regs[REG_EAX]);
+  if (node_regs_mask & (1<<1)) mark_root(regs[REG_ECX]);
+  if (regs[REG_EFL] & EFL_DF) {
+    /* DF set means EDX should be treated as an imm reg */
+    ;
+  } else
+    if (node_regs_mask & (1<<2)) mark_root(regs[REG_EDX]);
+
+  if (node_regs_mask & (1<<3)) mark_root(regs[REG_EBX]);
+  if (node_regs_mask & (1<<4)) mark_root(regs[REG_ESP]);
+  if (node_regs_mask & (1<<5)) mark_root(regs[REG_EBP]);
+  if (node_regs_mask & (1<<6)) mark_root(regs[REG_ESI]);
+  if (node_regs_mask & (1<<7)) mark_root(regs[REG_EDI]);
+
+  /* If the EIP isn't pointing into a marked function, we're probably
+     in trouble.  We can -maybe- recover from that if it's tagged as a
+     TRA. */
+  eip = regs[Ieip];
+  dnode = gc_area_dnode(eip);
+  if ((dnode < GCndnodes_in_area) &&
+      (! ref_bit(GCmarkbits,dnode))) {
+    if (fulltag_of(eip) == fulltag_tra) {
+      mark_root(eip);
+    } else if ((fulltag_of(eip) == fulltag_misc) &&
+               (header_subtag(header_of(eip)) == subtag_function) &&
+               (*(unsigned char *)eip == RECOVER_FN_OPCODE) &&
+	       (*(LispObj *)(eip + 1)) == eip) {
+      mark_root(eip);
+    } else {
+      Bug(NULL, "Can't find function for eip 0x%4x", eip);
+    }
+  }
+}
+#endif
+
+/* A "pagelet" contains 32 doublewords.  The relocation table contains
+   a word for each pagelet which defines the lowest address to which
+   dnodes on that pagelet will be relocated.
+
+   The relocation address of a given pagelet is the sum of the relocation
+   address for the preceding pagelet and the number of bytes occupied by
+   marked objects on the preceding pagelet.
+*/
+
+LispObj
+calculate_relocation()
+{
+  LispObj *relocptr = GCrelocptr;
+  LispObj current = GCareadynamiclow;
+  bitvector 
+    markbits = GCdynamic_markbits;
+  qnode *q = (qnode *) markbits;
+  natural npagelets = ((GCndynamic_dnodes_in_area+(nbits_in_word-1))>>bitmap_shift);
+  natural thesebits;
+  LispObj first = 0;
+
+  if (npagelets) {
+    do {
+      *relocptr++ = current;
+      thesebits = *markbits++;
+      if (thesebits == ALL_ONES) {
+        current += nbits_in_word*dnode_size;
+        q += 4; /* sic */
+      } else {
+        if (!first) {
+          first = current;
+          while (thesebits & BIT0_MASK) {
+            first += dnode_size;
+            thesebits += thesebits;
+          }
+        }
+        /* We're counting bits in qnodes in the wrong order here, but
+           that's OK.  I think ... */
+        current += one_bits(*q++);
+        current += one_bits(*q++);
+        current += one_bits(*q++);
+        current += one_bits(*q++);
+      }
+    } while(--npagelets);
+  }
+  *relocptr++ = current;
+  return first ? first : current;
+}
+
+
+#if 0
+LispObj
+dnode_forwarding_address(natural dnode, int tag_n)
+{
+  natural pagelet, nbits;
+  unsigned int near_bits;
+  LispObj new;
+
+  if (GCDebug) {
+    if (! ref_bit(GCdynamic_markbits, dnode)) {
+      Bug(NULL, "unmarked object being forwarded!\n");
+    }
+  }
+
+  pagelet = dnode >> bitmap_shift;
+  nbits = dnode & bitmap_shift_count_mask;
+  near_bits = ((unsigned int *)GCdynamic_markbits)[dnode>>(dnode_shift+1)];
+
+  if (nbits < 32) {
+    new = GCrelocptr[pagelet] + tag_n;;
+    /* Increment "new" by the count of 1 bits which precede the dnode */
+    if (near_bits == 0xffffffff) {
+      return (new + (nbits << 4));
+    } else {
+      near_bits &= (0xffffffff00000000 >> nbits);
+      if (nbits > 15) {
+        new += one_bits(near_bits & 0xffff);
+      }
+      return (new + (one_bits(near_bits >> 16))); 
+    }
+  } else {
+    new = GCrelocptr[pagelet+1] + tag_n;
+    nbits = 64-nbits;
+
+    if (near_bits == 0xffffffff) {
+      return (new - (nbits << 4));
+    } else {
+      near_bits &= (1<<nbits)-1;
+      if (nbits > 15) {
+        new -= one_bits(near_bits >> 16);
+      }
+      return (new -  one_bits(near_bits & 0xffff));
+    }
+  }
+}
+#else
+#ifdef X8664
+/* Quicker, dirtier */
+LispObj
+dnode_forwarding_address(natural dnode, int tag_n)
+{
+  natural pagelet, nbits, marked;
+  LispObj new;
+
+  if (GCDebug) {
+    if (! ref_bit(GCdynamic_markbits, dnode)) {
+      Bug(NULL, "unmarked object being forwarded!\n");
+    }
+  }
+
+  pagelet = dnode >> bitmap_shift;
+  nbits = dnode & bitmap_shift_count_mask;
+  new = GCrelocptr[pagelet] + tag_n;;
+  if (nbits) {
+    marked = (GCdynamic_markbits[dnode>>bitmap_shift]) >> (64-nbits);
+    while (marked) {
+      new += one_bits((qnode)marked);
+      marked >>=16;
+    }
+  }
+  return new;
+}
+#endif
+#ifdef X8632
+LispObj
+dnode_forwarding_address(natural dnode, int tag_n)
+{
+  natural pagelet, nbits;
+  unsigned short near_bits;
+  LispObj new;
+
+  if (GCDebug) {
+    if (! ref_bit(GCdynamic_markbits, dnode)) {
+      Bug(NULL, "unmarked object being forwarded!\n");
+    }
+  }
+
+  pagelet = dnode >> 5;
+  nbits = dnode & 0x1f;
+  /* On little-endian x86, we have to flip the low bit of dnode>>4 to
+     get the near_bits from the appropriate half-word. */
+  near_bits = ((unsigned short *)GCdynamic_markbits)[(dnode>>4)^1];
+
+  if (nbits < 16) {
+    new = GCrelocptr[pagelet] + tag_n;;
+    /* Increment "new" by the count of 1 bits which precede the dnode */
+    if (near_bits == 0xffff) {
+      return (new + (nbits << 3));
+    } else {
+      near_bits &= (0xffff0000 >> nbits);
+      if (nbits > 7) {
+        new += one_bits(near_bits & 0xff);
+      }
+      return (new + (one_bits(near_bits >> 8))); 
+    }
+  } else {
+    new = GCrelocptr[pagelet+1] + tag_n;
+    nbits = 32-nbits;
+
+    if (near_bits == 0xffff) {
+      return (new - (nbits << 3));
+    } else {
+      near_bits &= (1<<nbits)-1;
+      if (nbits > 7) {
+        new -= one_bits(near_bits >> 8);
+      }
+      return (new - one_bits(near_bits & 0xff));
+    }
+  }
+}
+#endif
+#endif
+
+LispObj
+locative_forwarding_address(LispObj obj)
+{
+  int tag_n = fulltag_of(obj);
+  natural dnode = gc_dynamic_area_dnode(obj);
+
+
+  if ((dnode >= GCndynamic_dnodes_in_area) ||
+      (obj < GCfirstunmarked)) {
+    return obj;
+  }
+
+  return dnode_forwarding_address(dnode, tag_n);
+}
+
+
+void
+forward_headerless_range(LispObj *range_start, LispObj *range_end)
+{
+  LispObj *p = range_start;
+
+  while (p < range_end) {
+    update_noderef(p);
+    p++;
+  }
+}
+
+void
+forward_range(LispObj *range_start, LispObj *range_end)
+{
+  LispObj *p = range_start, node, new;
+  int tag_n;
+  natural nwords;
+  hash_table_vector_header *hashp;
+
+  while (p < range_end) {
+    node = *p;
+    tag_n = fulltag_of(node);
+    if (immheader_tag_p(tag_n)) {
+      p = (LispObj *) skip_over_ivector((natural) p, node);
+    } else if (nodeheader_tag_p(tag_n)) {
+      nwords = header_element_count(node);
+      nwords += (1 - (nwords&1));
+      if ((header_subtag(node) == subtag_hash_vector) &&
+          ((((hash_table_vector_header *)p)->flags) & nhash_track_keys_mask)) {
+        natural skip = hash_table_vector_header_count-1;
+        hashp = (hash_table_vector_header *) p;
+        p++;
+        nwords -= skip;
+        while(skip--) {
+          update_noderef(p);
+          p++;
+        }
+        /* "nwords" is odd at this point: there are (floor nwords 2)
+           key/value pairs to look at, and then an extra word for
+           alignment.  Process them two at a time, then bump "p"
+           past the alignment word. */
+        nwords >>= 1;
+        while(nwords--) {
+          if (update_noderef(p) && hashp) {
+            hashp->flags |= nhash_key_moved_mask;
+            hashp = NULL;
+          }
+          p++;
+          update_noderef(p);
+          p++;
+        }
+        *p++ = 0;
+      } else {
+	if (header_subtag(node) == subtag_function) {
+#ifdef X8632
+	  int skip = (unsigned short)(p[1]);
+
+	  /* XXX bootstrapping */
+	  if (skip & 0x8000)
+	    skip = header_element_count(node) - (skip & 0x7fff);
+
+#else
+	  int skip = (int)(p[1]);
+#endif
+	  p += skip;
+	  nwords -= skip;
+	}
+        p++;
+        while(nwords--) {
+          update_noderef(p);
+          p++;
+        }
+      }
+    } else {
+      new = node_forwarding_address(node);
+      if (new != node) {
+        *p = new;
+      }
+      p++;
+      update_noderef(p);
+      p++;
+    }
+  }
+}
+
+
+
+
+
+
+/* Forward a tstack area */
+void
+forward_tstack_area(area *a)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) a->active,
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    forward_range(current+2, end);
+  }
+}
+
+/* Forward a vstack area */
+void
+forward_vstack_area(area *a)
+{
+  LispObj
+    *p = (LispObj *) a->active,
+    *q = (LispObj *) a->high;
+
+  forward_headerless_range(p, q);
+}
+
+/* Nothing of interest on x86 cstack */
+void
+forward_cstack_area(area *a)
+{
+}
+
+#ifdef X8664
+void
+forward_xp(ExceptionInformation *xp)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+  update_noderef(&(regs[Iarg_z]));
+  update_noderef(&(regs[Iarg_y]));
+  update_noderef(&(regs[Iarg_x]));
+  update_noderef(&(regs[Isave3]));
+  update_noderef(&(regs[Isave2]));
+  update_noderef(&(regs[Isave1]));
+  update_noderef(&(regs[Isave0]));
+  update_noderef(&(regs[Ifn]));
+  update_noderef(&(regs[Itemp0]));
+  update_noderef(&(regs[Itemp1]));
+  update_noderef(&(regs[Itemp2]));
+  update_locref(&(regs[Iip]));
+}
+#else
+void
+forward_xp(ExceptionInformation *xp, natural node_regs_mask)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+  if (node_regs_mask & (1<<0)) update_noderef(&regs[REG_EAX]);
+  if (node_regs_mask & (1<<1)) update_noderef(&regs[REG_ECX]);
+
+  if (regs[REG_EFL] & EFL_DF) {
+    /* then EDX is an imm reg */
+    ;
+  } else
+    if (node_regs_mask & (1<<2)) update_noderef(&regs[REG_EDX]);
+
+  if (node_regs_mask & (1<<3)) update_noderef(&regs[REG_EBX]);
+  if (node_regs_mask & (1<<4)) update_noderef(&regs[REG_ESP]);
+  if (node_regs_mask & (1<<5)) update_noderef(&regs[REG_EBP]);
+  if (node_regs_mask & (1<<6)) update_noderef(&regs[REG_ESI]);
+  if (node_regs_mask & (1<<7)) update_noderef(&regs[REG_EDI]);
+
+  update_locref(&(regs[Iip]));
+}
+#endif
+
+
+void
+forward_tcr_xframes(TCR *tcr)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+
+  xp = tcr->gc_context;
+  if (xp) {
+#ifdef X8664
+    forward_xp(xp);
+#else
+    forward_xp(xp, tcr->node_regs_mask);
+
+    update_noderef(&tcr->save0);
+    update_noderef(&tcr->save1);
+    update_noderef(&tcr->save2);
+    update_noderef(&tcr->save3);
+    update_noderef(&tcr->next_method_context);
+#endif
+  }
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+#ifdef X8664
+    forward_xp(xframes->curr);
+#else
+    forward_xp(xframes->curr, xframes->node_regs_mask);
+#endif
+  }
+}
+
+
+#ifdef X8632
+void
+update_self_references(LispObj *node)
+{
+  LispObj fn = fulltag_misc + (LispObj)node;
+  unsigned char *p = (unsigned char *)node;
+  natural i = imm_word_count(fn);
+
+  if (i) {
+    natural offset = node[--i];
+
+    while (offset) {
+      *(LispObj *)(p + offset) = fn;
+      offset = node[--i];
+    }
+  }    
+}
+#endif
+
+/*
+  Compact the dynamic heap (from GCfirstunmarked through its end.)
+  Return the doublenode address of the new freeptr.
+  */
+
+LispObj
+compact_dynamic_heap()
+{
+  LispObj *src = ptr_from_lispobj(GCfirstunmarked), *dest = src, node, new, *current,  *prev = NULL;
+  natural 
+    elements, 
+    dnode = gc_area_dnode(GCfirstunmarked), 
+    node_dnodes = 0, 
+    imm_dnodes = 0, 
+    bitidx, 
+    *bitsp, 
+    bits, 
+    nextbit, 
+    diff;
+  int tag;
+  bitvector markbits = GCmarkbits;
+
+  if (dnode < GCndnodes_in_area) {
+    lisp_global(FWDNUM) += (1<<fixnum_shift);
+  
+    set_bitidx_vars(markbits,dnode,bitsp,bits,bitidx);
+    while (dnode < GCndnodes_in_area) {
+      if (bits == 0) {
+        int remain = nbits_in_word - bitidx;
+        dnode += remain;
+        src += (remain+remain);
+        bits = *++bitsp;
+        bitidx = 0;
+      } else {
+        /* Have a non-zero markbits word; all bits more significant
+           than "bitidx" are 0.  Count leading zeros in "bits"
+           (there'll be at least "bitidx" of them.)  If there are more
+           than "bitidx" leading zeros, bump "dnode", "bitidx", and
+           "src" by the difference. */
+        nextbit = count_leading_zeros(bits);
+        if ((diff = (nextbit - bitidx)) != 0) {
+          dnode += diff;
+          bitidx = nextbit;
+          src += (diff+diff);
+        }
+        prev = current;
+        current = src;
+        if (GCDebug) {
+          if (dest != ptr_from_lispobj(locative_forwarding_address(ptr_to_lispobj(src)))) {
+            Bug(NULL, "Out of synch in heap compaction.  Forwarding from 0x" LISP " to 0x" LISP ",\n expected to go to 0x" LISP "\n", 
+                src, dest, locative_forwarding_address(ptr_to_lispobj(src)));
+          }
+        }
+
+        node = *src++;
+        tag = fulltag_of(node);
+        if (nodeheader_tag_p(tag)) {
+          elements = header_element_count(node);
+          node_dnodes = (elements+2)>>1;
+          dnode += node_dnodes;
+	  if (header_subtag(node) == subtag_function) {
+#ifdef X8632
+	    LispObj *f = dest;
+	    int skip = imm_word_count(fulltag_misc + (LispObj)current);
+#else
+	    int skip = *((int *)src);
+#endif
+	    *dest++ = node;
+            if (skip) {
+              elements -= skip;
+              while(skip--) {
+                *dest++ = *src++;
+              }
+#ifdef X8632
+              update_self_references(f);
+#endif
+            }
+	    while(elements--) {
+	      *dest++ = node_forwarding_address(*src++);
+	    }
+	    if (((LispObj)src) & node_size) {
+	      src++;
+	      *dest++ = 0;
+	    }
+	  } else {
+	    if ((header_subtag(node) == subtag_hash_vector) &&
+		(((hash_table_vector_header *) (src-1))->flags & nhash_track_keys_mask)) {
+	      hash_table_vector_header *hashp = (hash_table_vector_header *) dest;
+	      int skip = (sizeof(hash_table_vector_header)/sizeof(LispObj))-1;
+	      
+	      *dest++ = node;
+	      elements -= skip;
+	      while(skip--) {
+		*dest++ = node_forwarding_address(*src++);
+	      }
+	      /* There should be an even number of (key/value) pairs in elements;
+		 an extra alignment word follows. */
+	      elements >>= 1;
+	      while (elements--) {
+		if (hashp) {
+		  node = *src++;
+		  new = node_forwarding_address(node);
+		  if (new != node) {
+		    hashp->flags |= nhash_key_moved_mask;
+		    hashp = NULL;
+		    *dest++ = new;
+		  } else {
+		    *dest++ = node;
+		  }
+		} else {
+		  *dest++ = node_forwarding_address(*src++);
+		}
+		*dest++ = node_forwarding_address(*src++);
+	      }
+	      *dest++ = 0;
+	      src++;
+	    } else {
+	      *dest++ = node;
+	      *dest++ = node_forwarding_address(*src++);
+	      while(--node_dnodes) {
+		*dest++ = node_forwarding_address(*src++);
+		*dest++ = node_forwarding_address(*src++);
+	      }
+	    }
+          }
+          set_bitidx_vars(markbits,dnode,bitsp,bits,bitidx);
+        } else if (immheader_tag_p(tag)) {
+          *dest++ = node;
+          *dest++ = *src++;
+          elements = header_element_count(node);
+          tag = header_subtag(node);
+
+#ifdef X8664
+          switch(fulltag_of(tag)) {
+          case ivector_class_64_bit:
+            imm_dnodes = ((elements+1)+1)>>1;
+            break;
+          case ivector_class_32_bit:
+            imm_dnodes = (((elements+2)+3)>>2);
+            break;
+          case ivector_class_other_bit:
+            if (tag == subtag_bit_vector) {
+              imm_dnodes = (((elements+64)+127)>>7);
+	    } else if (tag >= min_8_bit_ivector_subtag) {
+	      imm_dnodes = (((elements+8)+15)>>4);
+            } else {
+              imm_dnodes = (((elements+4)+7)>>3);
+            }
+          }
+#endif
+#ifdef X8632
+          if (tag <= max_32_bit_ivector_subtag) {
+            imm_dnodes = (((elements+1)+1)>>1);
+          } else if (tag <= max_8_bit_ivector_subtag) {
+            imm_dnodes = (((elements+4)+7)>>3);
+          } else if (tag <= max_16_bit_ivector_subtag) {
+            imm_dnodes = (((elements+2)+3)>>2);
+          } else if (tag == subtag_bit_vector) {
+            imm_dnodes = (((elements+32)+63)>>6);
+          } else {
+            imm_dnodes = elements+1;
+          }
+#endif
+
+          dnode += imm_dnodes;
+          while (--imm_dnodes) {
+            *dest++ = *src++;
+            *dest++ = *src++;
+          }
+          set_bitidx_vars(markbits,dnode,bitsp,bits,bitidx);
+        } else {
+          *dest++ = node_forwarding_address(node);
+          *dest++ = node_forwarding_address(*src++);
+          bits &= ~(BIT0_MASK >> bitidx);
+          dnode++;
+          bitidx++;
+        }
+      }
+    }
+  }
+  return ptr_to_lispobj(dest);
+}
+
+
+#define PURIFY_IVECTORS (1<<0)
+#define PURIFY_FUNCTIONS (1<<1)
+#define PURIFY_ALL (-1)
+#define PURIFY_NOTHING (0)      /* update forwarding pointers, don't copy */
+
+
+
+Boolean
+immutable_function_p(LispObj thing)
+{
+  LispObj header = header_of(thing), lfbits;
+  if (header_subtag(header) == subtag_function) {
+    lfbits = deref(thing,header_element_count(header));
+    if (((lfbits & (lfbits_cm_mask | lfbits_method_mask)) !=
+         lfbits_cm_mask) &&
+        ((lfbits & (lfbits_gfn_mask | lfbits_method_mask)) !=
+         lfbits_gfn_mask)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+    
+/*
+  Total the (physical) byte sizes of all ivectors in the indicated memory range
+*/
+
+natural
+unboxed_bytes_in_range(LispObj *start, LispObj *end, Boolean include_functions)
+{
+  natural total=0, elements, tag, subtag, bytes;
+  LispObj header;
+
+  while (start < end) {
+    header = *start;
+    tag = fulltag_of(header);
+    
+    if ((nodeheader_tag_p(tag)) ||
+        (immheader_tag_p(tag))) {
+      elements = header_element_count(header);
+      if (nodeheader_tag_p(tag)) {
+        if (include_functions && immutable_function_p((LispObj)start)) {
+          total += (((elements+2)&~1)<<node_shift);
+        }
+        start += ((elements+2) & ~1);
+      } else {
+        subtag = header_subtag(header);
+
+#ifdef X8664
+        switch(fulltag_of(header)) {
+        case ivector_class_64_bit:
+          bytes = 8 + (elements<<3);
+          break;
+        case ivector_class_32_bit:
+          bytes = 8 + (elements<<2);
+          break;
+        case ivector_class_other_bit:
+        default:
+          if (subtag == subtag_bit_vector) {
+            bytes = 8 + ((elements+7)>>3);
+	  } else if (subtag >= min_8_bit_ivector_subtag) {
+	    bytes = 8 + elements;
+          } else {
+            bytes = 8 + (elements<<1);
+          }
+        }
+#endif
+#ifdef X8632
+          if (subtag <= max_32_bit_ivector_subtag) {
+            bytes = 4 + (elements<<2);
+          } else if (subtag <= max_8_bit_ivector_subtag) {
+            bytes = 4 + elements;
+          } else if (subtag <= max_16_bit_ivector_subtag) {
+            bytes = 4 + (elements<<1);
+          } else if (subtag == subtag_double_float_vector) {
+            bytes = 8 + (elements<<3);
+          } else {
+            bytes = 4 + ((elements+7)>>3);
+          }
+#endif
+
+        bytes = (bytes+dnode_size-1) & ~(dnode_size-1);
+        total += bytes;
+        start += (bytes >> node_shift);
+      }
+    } else {
+      start += 2;
+    }
+  }
+  return total;
+}
+
+
+void
+ensure_writable_space(area *target, natural need)
+{
+  BytePtr
+    oldlimit = (BytePtr)align_to_power_of_2(target->active,log2_page_size),
+    newlimit = (BytePtr)align_to_power_of_2(target->active+need,log2_page_size);
+  if (newlimit > oldlimit) {
+    CommitMemory(oldlimit,newlimit-oldlimit);
+  }
+}
+
+LispObj
+purify_displaced_object(LispObj obj, area *dest, natural disp)
+{
+  BytePtr 
+    free = dest->active,
+    *old = (BytePtr *) ptr_from_lispobj(untag(obj));
+  LispObj 
+    header = header_of(obj), 
+    new;
+  natural 
+    start = (natural)old,
+    physbytes;
+  int
+    header_tag = fulltag_of(header);
+#ifdef X8632
+  Boolean
+    is_function = (header_subtag(header)==subtag_function);
+#endif
+
+  if (immheader_tag_p(header_tag)) {
+    physbytes = ((natural)(skip_over_ivector(start,header))) - start;
+  } else if (nodeheader_tag_p(header_tag)) {
+    physbytes = ((header_element_count(header)+2)&~1) << node_shift;
+  } else {
+    physbytes = dnode_size;
+  }
+  
+  ensure_writable_space(dest, physbytes);
+  dest->active += physbytes;
+
+  new = ptr_to_lispobj(free)+disp;
+
+  memcpy(free, (BytePtr)old, physbytes);
+
+#ifdef X8632
+  if (is_function) {
+    update_self_references((LispObj *)free);
+  }
+#endif
+
+
+  while(physbytes) {
+    *old++ = (BytePtr) forward_marker;
+    *old++ = (BytePtr) free;
+    free += dnode_size;
+    physbytes -= dnode_size;
+  }
+  return new;
+}
+
+LispObj
+purify_object(LispObj obj, area *dest)
+{
+  return purify_displaced_object(obj, dest, fulltag_of(obj));
+}
+
+Boolean
+purify_locref(LispObj *ref,  BytePtr low, BytePtr high, area *dest, int what)
+{
+  LispObj obj = *ref, header, new;
+  natural tag = fulltag_of(obj), header_tag;
+  Boolean changed = false;
+
+  if ((((BytePtr)ptr_from_lispobj(obj)) > low) &&
+      (((BytePtr)ptr_from_lispobj(obj)) < high)) {
+    header = deref(obj, 0);
+    if (header == forward_marker) { /* already copied */
+      *ref = (untag(deref(obj,1)) + tag);
+      changed = true;
+    } else {
+      header_tag = fulltag_of(header);
+      if ((what == PURIFY_ALL) ||
+          ((what & PURIFY_IVECTORS) &&
+           immheader_tag_p(header_tag) &&
+           header_subtag(header) != subtag_macptr) ||
+          ((what & PURIFY_FUNCTIONS) &&
+           immutable_function_p(obj))) {
+        new = purify_object(obj, dest);
+        *ref = new;
+        changed = (new != obj);
+      }
+    }
+  }
+  return changed;
+}
+
+Boolean
+copy_reference(LispObj *ref, BytePtr low, BytePtr high, area *dest, int what)
+{
+  LispObj obj = *ref;
+  natural tag = fulltag_of(obj);
+
+  if (
+#ifdef X8664
+      (tag == fulltag_tra_0) || (tag == fulltag_tra_1)
+#endif
+#ifdef X8632
+      tag == fulltag_tra
+#endif
+      ) {
+    what = PURIFY_NOTHING;
+  }
+  if (is_node_fulltag(tag)) {
+    return purify_locref(ref,low,high,dest,what);
+  }
+  return false;
+}
+
+
+
+void
+purify_gcable_ptrs(BytePtr low, BytePtr high, area *to, int what)
+{
+  LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next;
+
+  while ((*prev) != (LispObj)NULL) {
+    copy_reference(prev, low, high, to, what);
+    next = *prev;
+    prev = &(((xmacptr *)ptr_from_lispobj(untag(next)))->link);
+  }
+}
+
+void 
+purify_headerless_range(LispObj *start, LispObj *end, BytePtr low, BytePtr high, area *to, int what)
+{
+  while (start < end) { 
+    copy_reference(start, low, high, to, what);
+    start++;
+  }
+}
+   
+void
+purify_range(LispObj *start, LispObj *end, BytePtr low, BytePtr high, area *to, int what)
+{
+  LispObj header;
+  unsigned tag;
+  natural nwords;
+  hash_table_vector_header *hashp;
+
+  while (start < end) {
+    header = *start;
+    if (header == forward_marker) {
+      start += 2;
+    } else {
+      tag = fulltag_of(header);
+      if (immheader_tag_p(tag)) {
+        start = (LispObj *)skip_over_ivector((natural)start, header);
+      } else if (nodeheader_tag_p(tag)) {
+        nwords = header_element_count(header);
+        nwords += (1 - (nwords&1));
+        if ((header_subtag(header) == subtag_hash_vector) &&
+          ((((hash_table_vector_header *)start)->flags) & 
+           nhash_track_keys_mask)) {
+          natural skip = (sizeof(hash_table_vector_header)/sizeof(LispObj))-1;
+
+          hashp = (hash_table_vector_header *) start;
+          start++;
+          nwords -= skip;
+          while(skip--) {
+            copy_reference(start, low, high, to, what);
+            start++;
+          }
+          /* "nwords" is odd at this point: there are (floor nwords 2)
+             key/value pairs to look at, and then an extra word for
+             alignment.  Process them two at a time, then bump "start"
+             past the alignment word. */
+          nwords >>= 1;
+          while(nwords--) {
+            if (copy_reference(start, low, high, to, what) && hashp) {
+              hashp->flags |= nhash_key_moved_mask;
+              hashp = NULL;
+            }
+            start++;
+            copy_reference(start, low, high, to, what);
+            start++;
+          }
+          *start++ = 0;
+        } else {
+          if (header_subtag(header) == subtag_function) {
+#ifdef X8632
+            int skip = (unsigned short)(start[1]);
+
+	    /* XXX bootstrapping */
+	    if (skip & 0x8000)
+	      skip = header_element_count(header) - (skip & 0x7fff);
+#else
+            int skip = (int)(start[1]);
+#endif
+            start += skip;
+            nwords -= skip;
+          }
+          start++;
+          while(nwords--) {
+            copy_reference(start, low, high, to, what);
+            start++;
+          }
+        }
+      } else {
+        /* Not a header, just a cons cell */
+        copy_reference(start, low, high, to, what);
+        start++;
+        copy_reference(start, low, high, to, what);
+        start++;
+      }
+    }
+  }
+}
+        
+/* Purify references from tstack areas */
+void
+purify_tstack_area(area *a, BytePtr low, BytePtr high, area *to, int what)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) (a->active),
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = (LispObj *) ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    purify_range(current+2, end, low, high, to, what);
+  }
+}
+
+/* Purify a vstack area */
+void
+purify_vstack_area(area *a, BytePtr low, BytePtr high, area *to, int what)
+{
+  LispObj
+    *p = (LispObj *) a->active,
+    *q = (LispObj *) a->high;
+  
+  purify_headerless_range(p, q, low, high, to, what);
+}
+
+
+void
+purify_xp(ExceptionInformation *xp, BytePtr low, BytePtr high, area *to, int what
+#ifdef X8632
+          ,natural node_regs_mask
+#endif
+)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+
+#ifdef X8664
+  copy_reference(&(regs[Iarg_z]), low, high, to, what);
+  copy_reference(&(regs[Iarg_y]), low, high, to, what);
+  copy_reference(&(regs[Iarg_x]), low, high, to, what);
+  copy_reference(&(regs[Isave3]), low, high, to, what);
+  copy_reference(&(regs[Isave2]), low, high, to, what);
+  copy_reference(&(regs[Isave1]), low, high, to, what);
+  copy_reference(&(regs[Isave0]), low, high, to, what);
+  copy_reference(&(regs[Ifn]), low, high, to, what);
+  copy_reference(&(regs[Itemp0]), low, high, to, what);
+  copy_reference(&(regs[Itemp1]), low, high, to, what);
+  copy_reference(&(regs[Itemp2]), low, high, to, what);
+
+  purify_locref(&(regs[Iip]), low, high, to, PURIFY_NOTHING);
+
+#else
+  if (node_regs_mask & (1<<0)) {
+    copy_reference(&(regs[REG_EAX]), low, high, to, what);
+  }
+  if (node_regs_mask & (1<<1)) {
+    copy_reference(&(regs[REG_ECX]), low, high, to, what);
+  }
+  if (! (regs[REG_EFL] & EFL_DF)) {
+    if (node_regs_mask & (1<<2)) {
+      copy_reference(&(regs[REG_EDX]), low, high, to, what);
+    }
+  }
+  if (node_regs_mask & (1<<3)) {
+    copy_reference(&(regs[REG_EBX]), low, high, to, what);
+  }
+  if (node_regs_mask & (1<<4)) {
+    copy_reference(&(regs[REG_ESP]), low, high, to, what);
+  }
+  if (node_regs_mask & (1<<5)) {
+    copy_reference(&(regs[REG_EBP]), low, high, to, what);
+  }
+  if (node_regs_mask & (1<<6)) {
+    copy_reference(&(regs[REG_ESI]), low, high, to, what);
+  }
+  if (node_regs_mask & (1<<7)) {
+    copy_reference(&(regs[REG_EDI]), low, high, to, what);
+  }
+  purify_locref(&regs[REG_EIP], low, high, to, what);
+#endif
+}
+
+void
+purify_tcr_tlb(TCR *tcr, BytePtr low, BytePtr high, area *to, int what)
+{
+  natural n = tcr->tlb_limit;
+  LispObj *start = tcr->tlb_pointer, *end = (LispObj *) ((BytePtr)start+n);
+
+  purify_range(start, end, low, high, to, what);
+}
+
+void
+purify_tcr_xframes(TCR *tcr, BytePtr low, BytePtr high, area *to, int what)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+  
+  xp = tcr->gc_context;
+  if (xp) {
+#ifdef X8632
+    purify_xp(xp, low, high, to, what, tcr->node_regs_mask);
+#else
+    purify_xp(xp, low, high, to, what);
+#endif
+  }
+#ifdef X8632
+  copy_reference(&tcr->save0, low, high, to, what);
+  copy_reference(&tcr->save1, low, high, to, what);
+  copy_reference(&tcr->save2, low, high, to, what);
+  copy_reference(&tcr->save3, low, high, to, what);
+  copy_reference(&tcr->next_method_context, low, high, to, what);
+#endif
+
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+    purify_xp(xframes->curr, low, high, to, what
+#ifdef X8632
+              , xframes->node_regs_mask
+#endif
+              );
+  }
+}
+
+
+void
+purify_areas(BytePtr low, BytePtr high, area *target, int what)
+{
+  area *next_area;
+  area_code code;
+      
+  for (next_area = active_dynamic_area; (code = next_area->code) != AREA_VOID; next_area = next_area->succ) {
+    switch (code) {
+    case AREA_TSTACK:
+      purify_tstack_area(next_area, low, high, target, what);
+      break;
+      
+    case AREA_VSTACK:
+      purify_vstack_area(next_area, low, high, target, what);
+      break;
+      
+    case AREA_CSTACK:
+      break;
+      
+    case AREA_STATIC:
+    case AREA_DYNAMIC:
+    case AREA_MANAGED_STATIC:
+      purify_range((LispObj *) next_area->low, (LispObj *) next_area->active, low, high, target, what);
+      break;
+      
+    default:
+      break;
+    }
+  }
+}
+
+void
+update_managed_refs(area *a, BytePtr low_dynamic_address, natural ndynamic_dnodes)
+{
+  LispObj 
+    *start = (LispObj *)a->low,
+    *end = (LispObj *)a->active,
+    x1, 
+    *base = start, *prev = start;
+  int tag;
+  bitvector refbits = a->refbits;
+  natural ref_dnode, node_dnode;
+  Boolean intergen_ref;
+
+  while (start < end) {
+    x1 = *start;
+    prev = start;
+    tag = fulltag_of(x1);
+    if (immheader_tag_p(tag)) {
+      start = skip_over_ivector(ptr_to_lispobj(start), x1);
+    } else {
+      if (header_subtag(x1) == subtag_function) {
+#ifdef X8632
+	int skip = (unsigned short)deref(start,1);
+	/* XXX bootstrapping */
+	if (skip & 0x8000)
+	  skip = header_element_count(x1) - (skip & 0x7fff);
+#else
+        int skip = (int) deref(start,1);
+#endif
+        start += ((1+skip)&~1);
+        x1 = *start;
+        tag = fulltag_of(x1);
+      }
+      intergen_ref = false;
+      if (is_node_fulltag(tag)) {        
+        node_dnode = area_dnode(x1, low_dynamic_address);
+        if (node_dnode < ndynamic_dnodes) {
+          intergen_ref = true;
+        }
+      }
+      if (intergen_ref == false) {        
+        x1 = start[1];
+        tag = fulltag_of(x1);
+        if (is_node_fulltag(tag)) {        
+          node_dnode = area_dnode(x1, low_dynamic_address);
+          if (node_dnode < ndynamic_dnodes) {
+            intergen_ref = true;
+          }
+        }
+      }
+      if (intergen_ref) {
+        ref_dnode = area_dnode(start, base);
+        set_bit(refbits, ref_dnode);
+      }
+      start += 2;
+    }
+  }
+  if (start > end) {
+    Bug(NULL, "Overran end of range!");
+  }
+}
+
+/*
+  So far, this is mostly for save_application's benefit.
+  We -should- be able to return to lisp code after doing this,
+  however.
+
+*/
+
+
+signed_natural
+purify(TCR *tcr, signed_natural param)
+{
+  extern area *extend_readonly_area(natural);
+  area 
+    *a = active_dynamic_area,
+    *pure_area;
+
+  TCR  *other_tcr;
+  natural max_pure_size;
+  BytePtr new_pure_start,
+    low = (a->low + (static_dnodes_for_area(a) << dnode_shift)),
+    high = a->active;
+  Boolean purify_functions = (param != 0);
+  int flags = PURIFY_IVECTORS | (purify_functions ? PURIFY_FUNCTIONS : 0);
+
+  max_pure_size = unboxed_bytes_in_range((LispObj *) low, (LispObj *) high, purify_functions);
+  pure_area = extend_readonly_area(max_pure_size);
+  if (pure_area) {
+    new_pure_start = pure_area->active;
+    lisp_global(IN_GC) = (1<<fixnumshift);
+
+    /* 
+      Caller will typically GC again (and that should recover quite a bit of
+      the dynamic heap.)
+      */
+
+
+    
+    purify_areas(low, high, pure_area, flags);
+    
+    other_tcr = tcr;
+    do {
+      purify_tcr_xframes(other_tcr, low, high, pure_area, flags);
+      purify_tcr_tlb(other_tcr, low, high, pure_area, flags);
+      other_tcr = other_tcr->next;
+    } while (other_tcr != tcr);
+
+    purify_gcable_ptrs(low, high, pure_area, flags);
+    if (purify_functions) {
+      /* We're likely to copy a lot of symbols to the managed static
+         area.  Lots of symbols will have incidental references to
+         a relatively small number of things that happen to initialy
+         be in dynamic space: the UNDEFINED-FUNCTION object, packages,
+         etc.  Doing a shallow copy of those things to the managed-static
+         area will reduce the number of static->dynamic references. */
+      LispObj package_list;
+
+      copy_reference(&nrs_UDF.vcell,low,high,managed_static_area,PURIFY_ALL);
+      for (package_list = nrs_ALL_PACKAGES.vcell;
+           package_list != lisp_nil;
+           package_list = deref(package_list,0)) {
+        copy_reference(&(deref(package_list,1)),low,high,managed_static_area,PURIFY_ALL);
+      }
+
+        
+
+      /* Do a shallow copy of the constants of all purified functions
+         from the dynamic area to the managed static area */
+      purify_range((LispObj*)(pure_area->low),
+                   (LispObj*)(pure_area->active),
+                   low,
+                   high,
+                   managed_static_area,
+                   PURIFY_ALL);
+      /* Go back through all areas, resolving forwarding pointers
+         (but without copying anything.) */
+      purify_areas(low, high, NULL, PURIFY_NOTHING);
+      other_tcr = tcr;
+      do {
+        purify_tcr_xframes(other_tcr, low, high, NULL, PURIFY_NOTHING);
+        purify_tcr_tlb(other_tcr, low, high, NULL, PURIFY_NOTHING);
+        other_tcr = other_tcr->next;
+      } while (other_tcr != tcr);
+      
+      purify_gcable_ptrs(low, high, NULL, PURIFY_NOTHING);
+
+      /* Update refbits for managed static area */
+      {
+        natural 
+          managed_dnodes = area_dnode(managed_static_area->active,
+                                      managed_static_area->low),
+          refbytes = align_to_power_of_2((managed_dnodes+7)>>3,log2_page_size);
+        
+        managed_static_area->ndnodes = managed_dnodes;
+        CommitMemory(managed_static_area->refbits, refbytes); /* zeros them */
+        update_managed_refs(managed_static_area, low_markable_address, area_dnode(a->active,low_markable_address));
+      }
+    }
+    ProtectMemory(pure_area->low,
+		  align_to_power_of_2(pure_area->active-pure_area->low,
+				      log2_page_size));
+    lisp_global(IN_GC) = 0;
+    just_purified_p = true;
+    return 0;
+  }
+  return -1;
+}
+
+Boolean
+impurify_locref(LispObj *p, LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj q = *p;
+
+  if ((q >= low) && 
+      (q < high)) {
+    *p = (q+delta);
+    return true;
+  }
+  return false;
+}
+  
+Boolean
+impurify_noderef(LispObj *p, LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj q = *p;
+  
+  if (is_node_fulltag(fulltag_of(q)) &&
+      (q >= low) && 
+      (q < high)) {
+    *p = (q+delta);
+    return true;
+  }
+  return false;
+}
+  
+
+void
+impurify_gcable_ptrs(LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next;
+
+  while ((*prev) != (LispObj)NULL) {
+    impurify_noderef(prev, low, high, delta);
+    next = *prev;
+    prev = &(((xmacptr *)ptr_from_lispobj(untag(next)))->link);
+  }
+}
+
+
+void
+impurify_xp(ExceptionInformation *xp, LispObj low, LispObj high, signed_natural delta
+#ifdef X8632
+            ,natural node_regs_mask
+#endif
+)
+{
+  natural *regs = (natural *) xpGPRvector(xp);
+
+
+#ifdef X8664
+  impurify_noderef(&(regs[Iarg_z]), low, high, delta);
+  impurify_noderef(&(regs[Iarg_y]), low, high, delta);
+  impurify_noderef(&(regs[Iarg_x]), low, high, delta);
+#ifndef TCR_IN_GPR
+  impurify_noderef(&(regs[Isave3]), low, high, delta);
+#endif
+  impurify_noderef(&(regs[Isave2]), low, high, delta);
+  impurify_noderef(&(regs[Isave1]), low, high, delta);
+  impurify_noderef(&(regs[Isave0]), low, high, delta);
+  impurify_noderef(&(regs[Ifn]), low, high, delta);
+  impurify_noderef(&(regs[Itemp0]), low, high, delta);
+  impurify_noderef(&(regs[Itemp1]), low, high, delta);
+
+  impurify_locref(&(regs[Iip]), low, high, delta);
+#else
+  if (node_regs_mask & (1<<0)) {
+    impurify_noderef(&(regs[REG_EAX]), low, high, delta);
+  }
+  if (node_regs_mask & (1<<1)) {
+    impurify_noderef(&(regs[REG_ECX]), low, high, delta);
+  }
+  if (! (regs[REG_EFL] & EFL_DF)) {
+    if (node_regs_mask & (1<<2)) {
+      impurify_noderef(&(regs[REG_EDX]), low, high, delta);
+    }
+  }
+  if (node_regs_mask & (1<<3)) {
+    impurify_noderef(&(regs[REG_EBX]), low, high, delta);
+  }
+  if (node_regs_mask & (1<<4)) {
+    impurify_noderef(&(regs[REG_ESP]), low, high, delta);
+  }
+  if (node_regs_mask & (1<<5)) {
+    impurify_noderef(&(regs[REG_EBP]), low, high, delta);
+  }
+  if (node_regs_mask & (1<<6)) {
+    impurify_noderef(&(regs[REG_ESI]), low, high, delta);
+  }
+  if (node_regs_mask & (1<<7)) {
+    impurify_noderef(&(regs[REG_EDI]), low, high, delta);
+  }
+  impurify_locref(&(regs[REG_EIP]), low, high, delta);
+
+#endif
+
+}
+
+void
+impurify_headerless_range(LispObj *start, LispObj *end, LispObj low, LispObj high, signed_natural delta)
+{
+  while (start < end) {
+    impurify_noderef(start, low, high, delta);
+    start++;
+  }
+}
+
+
+void
+impurify_range(LispObj *start, LispObj *end, LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj header;
+  unsigned tag;
+  natural nwords;
+  hash_table_vector_header *hashp;
+
+  while (start < end) {
+    header = *start;
+    if (header == forward_marker) {
+      start += 2;
+    } else {
+      tag = fulltag_of(header);
+      if (immheader_tag_p(tag)) {
+        start = (LispObj *)skip_over_ivector((natural)start, header);
+      } else if (nodeheader_tag_p(tag)) {
+        nwords = header_element_count(header);
+        nwords += (1 - (nwords&1));
+        if ((header_subtag(header) == subtag_hash_vector) &&
+          ((((hash_table_vector_header *)start)->flags) & 
+           nhash_track_keys_mask)) {
+          natural skip = (sizeof(hash_table_vector_header)/sizeof(LispObj))-1;
+
+          hashp = (hash_table_vector_header *) start;
+          start++;
+          nwords -= skip;
+          while(skip--) {
+            impurify_noderef(start, low, high, delta);
+            start++;
+          }
+          /* "nwords" is odd at this point: there are (floor nwords 2)
+             key/value pairs to look at, and then an extra word for
+             alignment.  Process them two at a time, then bump "start"
+             past the alignment word. */
+          nwords >>= 1;
+          while(nwords--) {
+            if (impurify_noderef(start, low, high, delta) && hashp) {
+              hashp->flags |= nhash_key_moved_mask;
+              hashp = NULL;
+            }
+            start++;
+            impurify_noderef(start, low, high, delta);
+            start++;
+          }
+          *start++ = 0;
+        } else {
+          if (header_subtag(header) == subtag_function) {
+#ifdef X8632
+	    int skip = (unsigned short)start[1];
+#else
+            int skip = (int)(start[1]);
+#endif
+            start += skip;
+            nwords -= skip;
+          }
+          start++;
+          while(nwords--) {
+            impurify_noderef(start, low, high, delta);
+            start++;
+          }
+        }
+      } else {
+        /* Not a header, just a cons cell */
+        impurify_noderef(start, low, high, delta);
+        start++;
+        impurify_noderef(start, low, high, delta);
+        start++;
+      }
+    }
+  }
+}
+
+
+
+
+void
+impurify_tcr_tlb(TCR *tcr,  LispObj low, LispObj high, signed_natural delta)
+{
+  unsigned n = tcr->tlb_limit;
+  LispObj *start = tcr->tlb_pointer, *end = (LispObj *) ((BytePtr)start+n);
+  
+  impurify_range(start, end, low, high, delta);
+}
+
+void
+impurify_tcr_xframes(TCR *tcr, LispObj low, LispObj high, signed_natural delta)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+  
+  xp = tcr->gc_context;
+  if (xp) {
+#ifdef X8632
+    impurify_xp(xp, low, high, delta, tcr->node_regs_mask);
+#else
+    impurify_xp(xp, low, high, delta);
+#endif
+  }
+
+#ifdef X8632
+  impurify_noderef(&tcr->save0, low, high, delta);
+  impurify_noderef(&tcr->save1, low, high, delta);
+  impurify_noderef(&tcr->save2, low, high, delta);
+  impurify_noderef(&tcr->save3, low, high, delta);
+  impurify_noderef(&tcr->next_method_context, low, high, delta);
+#endif
+
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+    impurify_xp(xframes->curr, low, high, delta
+#ifdef X8632
+                ,xframes->node_regs_mask
+#endif
+);
+  }
+}
+
+void
+impurify_tstack_area(area *a, LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj
+    *current,
+    *next,
+    *start = (LispObj *) (a->active),
+    *end = start,
+    *limit = (LispObj *) (a->high);
+
+  for (current = start;
+       end != limit;
+       current = next) {
+    next = (LispObj *) ptr_from_lispobj(*current);
+    end = ((next >= start) && (next < limit)) ? next : limit;
+    impurify_range(current+2, end, low, high, delta);
+  }
+}
+void
+impurify_vstack_area(area *a, LispObj low, LispObj high, signed_natural delta)
+{
+  LispObj
+    *p = (LispObj *) a->active,
+    *q = (LispObj *) a->high;
+
+  impurify_headerless_range(p, q, low, high, delta);
+}
+
+
+void
+impurify_areas(LispObj low, LispObj high, signed_natural delta)
+{
+  area *next_area;
+  area_code code;
+      
+  for (next_area = active_dynamic_area; (code = next_area->code) != AREA_VOID; next_area = next_area->succ) {
+    switch (code) {
+    case AREA_TSTACK:
+      impurify_tstack_area(next_area, low, high, delta);
+      break;
+      
+    case AREA_VSTACK:
+      impurify_vstack_area(next_area, low, high, delta);
+      break;
+      
+    case AREA_CSTACK:
+      break;
+      
+    case AREA_STATIC:
+    case AREA_DYNAMIC:
+    case AREA_MANAGED_STATIC:
+      impurify_range((LispObj *) next_area->low, (LispObj *) next_area->active, low, high, delta);
+      break;
+      
+    default:
+      break;
+    }
+  }
+}
+
+void
+impurify_from_area(TCR *tcr, area *src)
+{
+  area *a = active_dynamic_area;
+  BytePtr base = src->low, limit = src->active, oldfree = a->active,
+    oldhigh = a->high, newhigh;
+  natural n = limit-base;
+  signed_natural delta = oldfree-base;
+  TCR *other_tcr;
+
+  newhigh = (BytePtr) (align_to_power_of_2(oldfree+n,
+                                           log2_heap_segment_size));
+  if (newhigh > oldhigh) {
+    grow_dynamic_area(newhigh-oldhigh);
+  }
+  a->active += n;
+  memmove(oldfree, base, n);
+  UnCommitMemory((void *)base, n);
+  a->ndnodes = area_dnode(a, a->active);
+  src->active = src->low;
+  if (src == readonly_area) {
+    pure_space_active = src->low;
+  }
+  src->ndnodes = 0;
+  
+  impurify_areas(ptr_to_lispobj(base), ptr_to_lispobj(limit), delta);
+  
+  other_tcr = tcr;
+  do {
+    impurify_tcr_xframes(other_tcr, ptr_to_lispobj(base), ptr_to_lispobj(limit), delta);
+    impurify_tcr_tlb(other_tcr, ptr_to_lispobj(base), ptr_to_lispobj(limit), delta);
+    other_tcr = other_tcr->next;
+  } while (other_tcr != tcr);
+  
+  impurify_gcable_ptrs(ptr_to_lispobj(base), ptr_to_lispobj(limit), delta);
+}
+
+signed_natural
+impurify(TCR *tcr, signed_natural param)
+{
+  lisp_global(IN_GC)=1;
+  impurify_from_area(tcr, readonly_area);
+  impurify_from_area(tcr, managed_static_area);
+  lisp_global(IN_GC)=0;
+  return 0;
+}
+
+/*
+ * This stuff is all adapted from the forward_xxx functions for use by
+ * the watchpoint code.  It's a lot of duplicated code, and it would
+ * be nice to generalize it somehow.
+ */
+
+static inline int
+wp_maybe_update(LispObj *p, LispObj old, LispObj new)
+{
+  if (*p == old) {
+    *p = new;
+    return true;
+  }
+  return false;
+}
+
+static void
+wp_update_headerless_range(LispObj *start, LispObj *end,
+			   LispObj old, LispObj new)
+{
+  LispObj *p = start;
+
+  while (p < end) {
+    wp_maybe_update(p, old, new);
+    p++;
+  }
+}
+
+static void
+wp_update_range(LispObj *start, LispObj *end, LispObj old, LispObj new)
+{
+  LispObj *p = start, node;
+  int tag_n;
+  natural nwords;
+
+  while (p < end) {
+    node = *p;
+    tag_n = fulltag_of(node);
+
+    if (immheader_tag_p(tag_n)) {
+      p = (LispObj *)skip_over_ivector(ptr_to_lispobj(p), node);
+    } else if (nodeheader_tag_p(tag_n)) {
+      nwords = header_element_count(node);
+      nwords += 1 - (nwords & 1);
+
+      if ((header_subtag(node) == subtag_hash_vector) &&
+          ((((hash_table_vector_header *)p)->flags) & nhash_track_keys_mask)) {
+        natural skip = hash_table_vector_header_count - 1;
+	hash_table_vector_header *hashp = (hash_table_vector_header *)p;
+
+        p++;
+        nwords -= skip;
+        while(skip--) {
+	  wp_maybe_update(p, old, new);
+          p++;
+        }
+        /* "nwords" is odd at this point: there are (floor nwords 2)
+           key/value pairs to look at, and then an extra word for
+           alignment.  Process them two at a time, then bump "p"
+           past the alignment word. */
+        nwords >>= 1;
+        while(nwords--) {
+          if (wp_maybe_update(p, old, new) && hashp) {
+            hashp->flags |= nhash_key_moved_mask;
+            hashp = NULL;
+          }
+          p++;
+	  wp_maybe_update(p, old, new);
+          p++;
+        }
+        *p++ = 0;
+      } else {
+	if (header_subtag(node) == subtag_function) {
+#ifdef X8632
+	  int skip = (unsigned short)(p[1]);
+
+	  /* XXX bootstrapping */
+	  if (skip & 0x8000)
+	    skip = header_element_count(node) - (skip & 0x7fff);
+
+#else
+	  int skip = (int)(p[1]);
+#endif
+	  p += skip;
+	  nwords -= skip;
+	}
+        p++;
+        while(nwords--) {
+	  wp_maybe_update(p, old, new);
+          p++;
+        }
+      }
+    } else {
+      /* a cons cell */
+      wp_maybe_update(p, old, new);
+      p++;
+      wp_maybe_update(p, old, new);
+      p++;
+    }
+  }
+}
+
+#ifdef X8664
+static void
+wp_update_xp(ExceptionInformation *xp, LispObj old, LispObj new)
+{
+  natural *regs = (natural *)xpGPRvector(xp);
+
+  wp_maybe_update(&regs[Iarg_z], old, new);
+  wp_maybe_update(&regs[Iarg_y], old, new);
+  wp_maybe_update(&regs[Iarg_x], old, new);
+  wp_maybe_update(&regs[Isave3], old, new);
+  wp_maybe_update(&regs[Isave2], old, new);
+  wp_maybe_update(&regs[Isave1], old, new);
+  wp_maybe_update(&regs[Isave0], old, new);
+  wp_maybe_update(&regs[Ifn], old, new);
+  wp_maybe_update(&regs[Itemp0], old, new);
+  wp_maybe_update(&regs[Itemp1], old, new);
+  wp_maybe_update(&regs[Itemp2], old, new);
+
+#if 0
+  /* 
+   * We don't allow watching functions, so this presumably doesn't
+   * matter.
+   */
+  update_locref(&(regs[Iip]));
+#endif
+}
+#else
+static void
+wp_update_xp(ExceptionInformation *xp, LispObj old, LispObj new, natural node_regs_mask)
+{
+  natural *regs = (natural *)xpGPRvector(xp);
+
+  if (node_regs_mask & (1<<0)) wp_maybe_update(&regs[REG_EAX], old, new);
+  if (node_regs_mask & (1<<1)) wp_maybe_update(&regs[REG_ECX], old, new);
+
+  if (regs[REG_EFL] & EFL_DF) {
+    /* then EDX is an imm reg */
+    ;
+  } else
+    if (node_regs_mask & (1<<2)) wp_maybe_update(&regs[REG_EDX], old, new);
+
+  if (node_regs_mask & (1<<3)) wp_maybe_update(&regs[REG_EBX], old, new);
+  if (node_regs_mask & (1<<4)) wp_maybe_update(&regs[REG_ESP], old, new);
+  if (node_regs_mask & (1<<5)) wp_maybe_update(&regs[REG_EBP], old, new);
+  if (node_regs_mask & (1<<6)) wp_maybe_update(&regs[REG_ESI], old, new);
+  if (node_regs_mask & (1<<7)) wp_maybe_update(&regs[REG_EDI], old, new);
+  /* we shouldn't watch functions, so no need to update PC */
+}
+#endif
+
+static void
+wp_update_tcr_xframes(TCR *tcr, LispObj old, LispObj new)
+{
+  xframe_list *xframes;
+  ExceptionInformation *xp;
+
+  xp = tcr->gc_context;
+  if (xp) {
+#ifdef X8664
+    wp_update_xp(xp, old, new);
+#else
+    wp_update_xp(xp, old, new, tcr->node_regs_mask);
+    wp_maybe_update(&tcr->save0, old, new);
+    wp_maybe_update(&tcr->save1, old, new);
+    wp_maybe_update(&tcr->save2, old, new);
+    wp_maybe_update(&tcr->save3, old, new);
+    wp_maybe_update(&tcr->next_method_context, old, new);
+#endif
+  }
+  for (xframes = tcr->xframe; xframes; xframes = xframes->prev) {
+#ifdef X8664
+    wp_update_xp(xframes->curr, old, new);
+#else
+    wp_update_xp(xframes->curr, old, new, xframes->node_regs_mask);
+#endif
+  }
+}
+
+/*
+ * Scan all pointer-bearing areas, updating all references to
+ * "old" to "new".
+ */
+static void
+wp_update_all_areas(LispObj old, LispObj new)
+{
+  area *a = active_dynamic_area;
+  natural code = a->code;
+
+  while (code != AREA_VOID) {
+    switch (code) {
+      case AREA_DYNAMIC:
+      case AREA_STATIC:
+      case AREA_MANAGED_STATIC:
+      case AREA_WATCHED:
+	wp_update_range((LispObj *)a->low, (LispObj *)a->active, old, new);
+	break;
+      case AREA_VSTACK:
+      {
+	LispObj *low = (LispObj *)a->active;
+	LispObj *high = (LispObj *)a->high;
+	
+	wp_update_headerless_range(low, high, old, new);
+      }
+      break;
+      case AREA_TSTACK:
+      {
+	LispObj *current, *next;
+	LispObj *start = (LispObj *)a->active, *end = start;
+	LispObj *limit = (LispObj *)a->high;
+	
+	for (current = start; end != limit; current = next) {
+	  next = ptr_from_lispobj(*current);
+	  end = ((next >= start) && (next < limit)) ? next : limit;
+	  wp_update_range(current+2, end, old, new);
+	}
+      break;
+      }
+      default:
+	break;
+    }
+    a = a->succ;
+    code = a->code;
+  }
+}
+
+static void
+wp_update_tcr_tlb(TCR *tcr, LispObj old, LispObj new)
+{
+  natural n = tcr->tlb_limit;
+  LispObj *start = tcr->tlb_pointer;
+  LispObj *end = start + (n >> fixnumshift);
+
+  while (start < end) {
+    wp_maybe_update(start, old, new);
+    start++;
+  }
+}
+
+void
+wp_update_references(TCR *tcr, LispObj old, LispObj new)
+{
+  TCR *other_tcr = tcr;
+
+  do {
+    wp_update_tcr_xframes(other_tcr, old, new);
+    wp_update_tcr_tlb(other_tcr, old, new);
+    other_tcr = other_tcr->next;
+  } while (other_tcr != tcr);
+  unprotect_watched_areas();
+  wp_update_all_areas(old, new);
+  protect_watched_areas();
+}
Index: /branches/arm/lisp-kernel/x86-macros.s
===================================================================
--- /branches/arm/lisp-kernel/x86-macros.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-macros.s	(revision 13357)
@@ -0,0 +1,765 @@
+/*   Copyright (C) 2005-2009 Clozure Associates  */
+/*   This file is part of Clozure CL.    */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+
+/* Try to make macros follow GAS/ATT conventions, where source precedes  */
+/* destination.  */
+
+define(`lisp_global',`lisp_globals.$1')
+                        		
+define(`ref_global',`
+	__(mov lisp_global($1),$2)
+')
+
+define(`set_global',`
+	__(mov $1,lisp_global($2))
+')
+
+define(`ref_nrs_value',`
+	__(mov nrs.$1+symbol.vcell,$2)
+')
+	
+define(`set_nrs_value',`
+	__(mov $1,nrs.$2+symbol.vcell)
+')
+							
+define(`unbox_fixnum',`
+	__(mov $1,$2)
+	__(sar `$'fixnumshift,$2)
+')
+
+define(`box_fixnum',`
+        __(imul `$'fixnumone,$1,$2)
+')	
+
+
+/* box_fixnum, with no effect on flags */
+define(`box_fixnum_no_flags',`
+        __(lea (,$1,fixnumone),$2)
+')
+
+
+/* Zero $3 bytes worth of dnodes, starting at offset $2 relative  */
+/* to the base register $1.  */
+
+
+ifdef(`DarwinAssembler',`
+	.macro zero_dnodes
+	.if $2
+	ifdef(`X8664',`
+	__(movapd %fpzero,$1($0))
+	',`
+	__(movsd %fpzero,$1($0))
+	')
+	__(zero_dnodes $0,$1+dnode_size,$2-dnode_size)
+	.endif
+	.endmacro
+',`
+	.macro zero_dnodes base,disp,nbytes
+	.ifgt \nbytes
+	ifdef(`X8664',`
+        movapd %fpzero,\disp(\base)
+	',`
+	movsd %fpzero,\disp(\base)
+	')
+	zero_dnodes \base,"\disp+dnode_size","\nbytes-dnode_size"
+	.endif
+	.endm
+')	
+
+
+/* Allocate $1+dnode_size zeroed bytes on the tstack, using $2 as a temp  */
+/* reg.  */
+
+ifdef(`X8632',`
+define(`TSP_Alloc_Fixed',`
+	define(`TSP_Alloc_Size',`((($1+node_size) & ~(dnode_size-1))+dnode_size)')
+	__(subl `$'TSP_Alloc_Size,rcontext(tcr.next_tsp))
+	__(movd rcontext(tcr.save_tsp),%stack_temp)
+	__(movl rcontext(tcr.next_tsp),$2)
+	zero_dnodes $2,0,TSP_Alloc_Size
+	__(movd %stack_temp,($2))
+	__(movl %ebp,tsp_frame.save_ebp($2))
+	__(movl $2,rcontext(tcr.save_tsp))
+	undefine(`TSP_Alloc_Size')
+')',`
+define(`TSP_Alloc_Fixed',`
+	define(`TSP_Alloc_Size',`((($1+node_size) & ~(dnode_size-1))+dnode_size)')
+	__(subq `$'TSP_Alloc_Size,rcontext(tcr.next_tsp))
+        __(movq rcontext(tcr.save_tsp),%stack_temp)
+        __(movq rcontext(tcr.next_tsp),$2)
+	zero_dnodes $2,0,TSP_Alloc_Size
+	__(movq %stack_temp,($2))
+        __(movq %rbp,tsp_frame.save_rbp($2))
+        __(movq $2,rcontext(tcr.save_tsp))
+	undefine(`TSP_Alloc_Size')
+')')
+
+/* $1 = size (dnode-aligned, including tsp overhead, $2 scratch.  */
+/* Modifies both $1 and $2; on exit, $2 = new_tsp+tsp_overhead, $1 = old tsp  */
+
+ifdef(`X8632',`
+define(`TSP_Alloc_Var',`
+        new_macro_labels()
+        __(subl $1,rcontext(tcr.next_tsp))
+        __(movd rcontext(tcr.save_tsp),%stack_temp)
+        __(movl rcontext(tcr.next_tsp),$2)
+        __(jmp macro_label(test))
+macro_label(loop):
+        __(movsd %fpzero,0($2))
+        __(addl $dnode_size,$2)
+macro_label(test):
+        __(subl $dnode_size,$1)
+        __(jge macro_label(loop))
+        __(movl rcontext(tcr.next_tsp),$2)
+        __(movd %stack_temp,$1)
+        __(movl $1,($2))
+	__(movl %ebp,tsp_frame.save_ebp($2))
+        __(movl $2,rcontext(tcr.save_tsp))
+        __(addl $dnode_size,$2)
+')',`
+define(`TSP_Alloc_Var',`
+	new_macro_labels()
+        subq $1,rcontext(tcr.next_tsp)
+        __(movq rcontext(tcr.save_tsp),%stack_temp)
+        __(movq rcontext(tcr.next_tsp),$2)
+	__(jmp macro_label(test))
+macro_label(loop):
+	__(movapd %fpzero,0($2))
+	__(addq $dnode_size,$2)
+macro_label(test):	
+	__(subq $dnode_size,$1)
+	__(jge macro_label(loop))
+        __(movq rcontext(tcr.next_tsp),$2)
+	__(movd %stack_temp,$1)
+	__(movq $1,($2))
+        __(movq %rbp,tsp_frame.save_rbp($2))
+        __(movq $2,rcontext(tcr.save_tsp))
+	__(addq $dnode_size,$2)
+')')
+	
+	
+ifdef(`X8632',`
+define(`Allocate_Catch_Frame',`
+        TSP_Alloc_Fixed(catch_frame.size,$1)
+        __(movl `$'(catch_frame.element_count<<subtag_shift)|subtag_catch_frame,dnode_size($1))
+        __(addl `$'dnode_size+fulltag_misc,$1)
+')',`
+define(`Allocate_Catch_Frame',`
+	TSP_Alloc_Fixed(catch_frame.size,$1)
+	__(movq `$'(catch_frame.element_count<<subtag_shift)|subtag_catch_frame,dnode_size($1))
+	__(addq `$'dnode_size+fulltag_misc,$1)
+')')
+
+/* %arg_z = tag,  %xfn = pc, $1 = mvflag 	  */
+
+ifdef(`X8632',`
+define(`Make_Catch',`
+	Allocate_Catch_Frame(%imm0)
+	__(movd rcontext(tcr.catch_top),%mm0)
+	__(movd rcontext(tcr.db_link),%mm1)
+	__(movl %arg_z,catch_frame.catch_tag(%imm0))
+	__(movd %mm0,catch_frame.link(%imm0))
+	__(movl `$'$1,catch_frame.mvflag(%imm0))
+	__(movd rcontext(tcr.xframe),%mm0)
+	__(movl %esp,catch_frame.esp(%imm0))
+	__(movl %ebp,catch_frame.ebp(%imm0))
+        __(movd rcontext(tcr.foreign_sp),%stack_temp)
+	__(movd %stack_temp,catch_frame.foreign_sp(%imm0))
+	__(movd %mm1,catch_frame.db_link(%imm0))
+	__(movd %mm0,catch_frame.xframe(%imm0))
+	__(movl %xfn,catch_frame.pc(%imm0))
+	__(movl %imm0,rcontext(tcr.catch_top))
+')',`
+define(`Make_Catch',`
+	Allocate_Catch_Frame(%imm2)
+	__(movq rcontext(tcr.catch_top),%imm0)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(movq %arg_z,catch_frame.catch_tag(%imm2))
+	__(movq %imm0,catch_frame.link(%imm2))
+	__(movq `$'$1,catch_frame.mvflag(%imm2))
+	__(movq rcontext(tcr.xframe),%imm0)
+	__(movq %rsp,catch_frame.rsp(%imm2))
+	__(movq %rbp,catch_frame.rbp(%imm2))
+        __(movq rcontext(tcr.foreign_sp),%stack_temp)
+	__(movq %imm1,catch_frame.db_link(%imm2))
+	__ifndef(`WINDOWS')
+	__(movq %save3,catch_frame._save3(%imm2))
+	__endif
+	__(movq %save2,catch_frame._save2(%imm2))
+	__(movq %save1,catch_frame._save1(%imm2))
+	__(movq %save0,catch_frame._save0(%imm2))
+	__(movq %imm0,catch_frame.xframe(%imm2))
+	__(movq %stack_temp,catch_frame.foreign_sp(%imm2))
+	__(movq %xfn,catch_frame.pc(%imm2))
+	__(movq %imm2,rcontext(tcr.catch_top))
+')')	
+
+ifdef(`X8632',`
+define(`nMake_Catch',`
+	Allocate_Catch_Frame(%imm0)
+	__(movd rcontext(tcr.catch_top),%mm0)
+	__(movd rcontext(tcr.db_link),%mm1)
+	__(movl %arg_z,catch_frame.catch_tag(%imm0))
+	__(movd %mm0,catch_frame.link(%imm0))
+	__(movl %esp,catch_frame.esp(%imm0))
+	__(addl $node_size,catch_frame.esp(%imm0))
+	__(movl `$'$1,catch_frame.mvflag(%imm0))
+	__(movd rcontext(tcr.xframe),%mm0)
+	__(movl %ebp,catch_frame.ebp(%imm0))
+        __(movd rcontext(tcr.foreign_sp),%stack_temp)
+	__(movd %mm1,catch_frame.db_link(%imm0))
+	__(movd %mm0,catch_frame.xframe(%imm0))
+	__(movd %stack_temp,catch_frame.foreign_sp(%imm0))
+	__(movl %xfn,catch_frame.pc(%imm0))
+	__(movl %imm0,rcontext(tcr.catch_top))
+')',`	
+define(`nMake_Catch',`
+	Allocate_Catch_Frame(%imm2)
+	__(movq rcontext(tcr.catch_top),%imm0)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(movq %arg_z,catch_frame.catch_tag(%imm2))
+	__(movq %imm0,catch_frame.link(%imm2))
+        __(lea node_size(%rsp),%imm0)
+	__(movq `$'$1,catch_frame.mvflag(%imm2))
+	__(movq %imm0,catch_frame.rsp(%imm2))
+	__(movq rcontext(tcr.xframe),%imm0)
+	__(movq %rbp,catch_frame.rbp(%imm2))
+        __(movq rcontext(tcr.foreign_sp),%stack_temp)
+	__(movq %imm1,catch_frame.db_link(%imm2))
+	__ifndef(`WINDOWS')
+	__(movq %save3,catch_frame._save3(%imm2))
+	__endif
+	__(movq %save2,catch_frame._save2(%imm2))
+	__(movq %save1,catch_frame._save1(%imm2))
+	__(movq %save0,catch_frame._save0(%imm2))
+	__(movq %imm0,catch_frame.xframe(%imm2))
+	__(movq %stack_temp,catch_frame.foreign_sp(%imm2))
+	__(movq %xfn,catch_frame.pc(%imm2))
+	__(movq %imm2,rcontext(tcr.catch_top))
+')')	
+        	
+	
+/* Consing can get interrupted (either by PROCESS-INTERRUPT or by GC  */
+/* activity in some other thread; if it is interrupted, the interrupting  */
+/* process needs to be able to determine what is going on well enough  */
+/* to be able to either back out of the attempt or finish the job.  */
+/* That requires that we use easily recogninized instruction sequences  */
+/* and follow certain conventions when consing (either in the kernel  */
+/* or in compiled code.)  (One of those conventions involves using  */
+/* %allocptr = %temp0 as a freepointer; when consing, %temp0 can not  */
+/* contain a live value.)  */
+/* Making a CONS cell is a little simpler than making a uvector.  */
+
+/* $1=new_car,$2=new_cdr,$3=dest   */
+
+ifdef(`X8632',`
+define(`Cons',`
+	new_macro_labels()
+/* The instructions where tcr.save_allocptr is tagged are difficult  */
+/* to interrupt; the interrupting code has to recognize and possibly  */
+/* emulate the instructions in between   */
+        __(subl $cons.size-fulltag_cons,rcontext(tcr.save_allocptr))
+        __(movl rcontext(tcr.save_allocptr),%allocptr)
+        __(rcmpl(%allocptr,rcontext(tcr.save_allocbase)))
+        __(ja macro_label(no_trap))
+        uuo_alloc()
+macro_label(no_trap):
+        __(andb $~fulltagmask,rcontext(tcr.save_allocptr))
+/* Easy to interrupt now that tcr.save_allocptr is not tagged as a cons    */
+        __(movl $2,cons.cdr(%allocptr))
+        __(movl $1,cons.car(%allocptr))
+        ifelse($3,`',`',`
+         __(movl %allocptr,$3)
+        ')
+')',`
+
+define(`Cons',`
+	new_macro_labels()
+/* The instructions where tcr.save_allocptr is tagged are difficult  */
+/* to interrupt; the interrupting code has to recognize and possibly  */
+/* emulate the instructions in between   */
+	__(subq $cons.size-fulltag_cons,rcontext(tcr.save_allocptr))
+	__(movq rcontext(tcr.save_allocptr),%allocptr)
+	__(rcmpq(%allocptr,rcontext(tcr.save_allocbase)))
+	__(ja macro_label(no_trap))
+	uuo_alloc()
+macro_label(no_trap):	
+	__(andb $~fulltagmask,rcontext(tcr.save_allocptr))
+/* Easy to interrupt now that tcr.save_allocptr is not tagged as a cons    */
+	__(movq $2,cons.cdr(%allocptr))
+	__(movq $1,cons.car(%allocptr))
+	ifelse($3,`',`',`
+	 __(movq %allocptr,$3)
+	')
+')')
+
+ifdef(`X8632',`
+/* Header in %mm0, size in bytes in %imm0.  We bash %imm0. */
+define(`Misc_Alloc',`
+	__(sub `$'fulltag_misc,%imm0)
+	Misc_Alloc_Internal($1)
+')',`
+/* Header in %imm0, size in bytes in %imm1.  We bash %imm1. */
+define(`Misc_Alloc',`
+	__(subq `$'fulltag_misc,%imm1)
+	Misc_Alloc_Internal($1)
+')')
+
+/* Here Be Monsters: we have to treat some/all of this instruction   */
+/* sequence atomically, as soon as tcr.save_allocptr becomes tagged.  */
+                
+ifdef(`X8632',`
+define(`Misc_Alloc_Internal',`                  
+        new_macro_labels()
+        __(subl %imm0,rcontext(tcr.save_allocptr))
+        __(movl rcontext(tcr.save_allocptr),%allocptr)
+        __(cmpl rcontext(tcr.save_allocbase),%allocptr)
+        __(ja macro_label(no_trap))
+        uuo_alloc()
+macro_label(no_trap):   
+        __(movd %mm0,misc_header_offset(%allocptr))
+        __(andb $~fulltagmask,rcontext(tcr.save_allocptr))
+/* Now that tcr.save_allocptr is untagged, it is easier to be interrupted   */
+        ifelse($1,`',`',`
+         __(mov %allocptr,$1)
+        ')
+')',`	
+define(`Misc_Alloc_Internal',`			
+	new_macro_labels()
+	__(subq %imm1,rcontext(tcr.save_allocptr))
+	__(movq rcontext(tcr.save_allocptr),%allocptr)
+	__(rcmpq(%allocptr,rcontext(tcr.save_allocbase)))
+	__(ja macro_label(no_trap))
+	uuo_alloc()
+macro_label(no_trap):	
+	__(movq %imm0,misc_header_offset(%allocptr))
+	__(andb $~fulltagmask,rcontext(tcr.save_allocptr))
+/* Now that tcr.save_allocptr is untagged, it is easier to be interrupted   */
+	ifelse($1,`',`',`
+	 __(mov %allocptr,$1)
+	')
+')')
+
+ifdef(`X8632',`
+define(`Misc_Alloc_Fixed',`
+	__(mov `$'$2-fulltag_misc,%imm0)
+	Misc_Alloc_Internal($1)
+')',`
+define(`Misc_Alloc_Fixed',`
+	__(movq `$'$2-fulltag_misc,%imm1)
+	Misc_Alloc_Internal($1)
+')')					
+
+define(`vrefr',`
+	__(mov misc_data_offset+($3<<word_shift)($2),$1)
+')	
+
+define(`jump_fn',`
+	__(jmp *%fn)
+')
+			
+define(`jump_fname',`
+	__(mov symbol.fcell(%fname),%fn)
+	jump_fn()
+')	
+
+ifdef(`X8632',`
+define(`set_nargs',`
+	__(xorl %nargs,%nargs)
+	__(addl `$'$1<<fixnumshift,%nargs)
+')',`
+define(`set_nargs',`
+        ifelse(eval($1>15),1,`
+        __(movl `$'$1<<fixnumshift,%nargs)
+        ',`
+        __(xorl %nargs,%nargs)
+        ifelse(eval($1),0,`',`
+        __(addl `$'$1<<fixnumshift,%nargs)
+        ')')')
+')
+
+/* $1 = ndigits.  Assumes 4-byte digits           */
+define(`aligned_bignum_size',`((~(dnode_size-1)&(node_size+(dnode_size-1)+(4*$1))))')
+	
+
+define(`_car',`
+	__(mov cons.car($1),$2)
+')	
+
+define(`_rplaca',`
+	__(mov $2,cons.car($1))
+')	
+		
+define(`_cdr',`
+	__(mov cons.cdr($1),$2)
+')
+
+define(`_rplacd',`
+	__(mov $2,cons.cdr($1))
+')	
+		
+	
+	
+ifdef(`X8632',`
+define(`tra',`
+        .p2align 3
+	.long 0
+	.byte 0
+$1:	
+')',`
+define(`tra',`
+        .p2align 3
+	ifelse($2,`',`
+	.long 0
+	',`
+	.long $1-$2
+	')
+$1:	
+')')
+
+ifdef(`X8632',`
+define(`do_funcall',`
+        new_macro_labels()
+        extract_fulltag(%temp0,%imm0)
+        __(cmpb $fulltag_misc,%imm0_b)
+        __(jne macro_label(bad))
+        __(cmpb $subtag_function,misc_subtag_offset(%temp0))
+        __(jne macro_label(maybe_symbol))
+        __(mov %temp0,%fn)
+        __(jmp *%fn)
+macro_label(maybe_symbol):
+        __(cmpb $subtag_symbol,misc_subtag_offset(%temp0))
+        __(jne macro_label(bad))
+        /* %fname == %temp0 */
+        __(mov symbol.fcell(%fname),%fn)
+        __(jmp *%fn)
+macro_label(bad):
+        __(uuo_error_not_callable)
+')',`
+define(`do_funcall',`
+	new_macro_labels()
+	__(movb %temp0_b,%imm0_b)
+	__(andb $fulltagmask,%imm0_b)
+	__(cmpb $fulltag_symbol,%imm0_b)
+	/* %fname == %temp0   */
+	__(cmovgq %temp0,%fn)
+	jl macro_label(bad)
+	__(cmoveq symbol.fcell(%fname),%fn)
+	__(jmp *%fn)
+macro_label(bad):		
+	__(uuo_error_not_callable)
+')')
+
+define(`getvheader',`
+        __(mov misc_header_offset($1),$2)
+')
+
+/* "Size" is unboxed element-count.  $1 (header) and $2 (dest) should  */
+/*    both be immediate registers   */
+define(`header_size',`
+        __(mov $1,$2)
+        __(shr $num_subtag_bits,$2)
+')
+
+/* $2 (length) is fixnum element-count.   */
+define(`header_length',`
+        __(mov $~255,$2)
+        __(and $1,$2)
+        __(shr $num_subtag_bits-fixnumshift,$2)
+')
+
+/* $1 = vector, $2 = header, $3 = dest   */
+define(`vector_size',`                                 
+        __(getvheader($1,$2))
+        __(header_size($2,$3))
+')
+
+/* $1 = vector, $2 = dest   */
+define(`vector_length',`                                 
+        __(mov $~255,$2)
+        __(and misc_header_offset($1),$2)
+        __(shr $num_subtag_bits-fixnumshift,$2)
+')
+                
+/* GAS/ATT comparison arg order drives me nuts   */
+define(`rcmpq',`
+	__(cmpq $2,$1)
+')
+
+define(`rcmpl',`
+	__(cmpl $2,$1)
+')	
+
+define(`rcmpw',`
+	__(cmpw $2,$1)
+')	
+
+define(`rcmpb',`
+	__(cmpb $2,$1)
+')		
+
+
+define(`condition_to_boolean',`
+        __(movl `$'t_value,$2_l)
+        __(lea (-t_offset)($2),$3)
+        __(cmov$1l $2_l,$3_l)
+')
+
+ifdef(`X8632',`
+define(`compare_reg_to_nil',`
+	__(cmp $nil_value,$1)
+')',`
+define(`compare_reg_to_nil',`
+	__(cmpb $fulltag_nil,$1_b)
+')')
+
+ifdef(`X8632',`
+define(`extract_lisptag',`
+	__(movl $1,$2)
+	__(and `$'tagmask,$2)
+')',`
+define(`extract_lisptag',`
+	__(movzbl $1_b,$2_l)
+	__(andb `$'tagmask,$2_b)
+')')
+
+								
+define(`extract_fulltag',`
+	__(movzbl $1_b,$2_l)
+	__(andb `$'fulltagmask,$2_b)
+')
+
+define(`extract_subtag',`
+	__(movb misc_subtag_offset($1),$2)
+')
+
+ifdef(`X8632',`
+define(`extract_typecode',`
+	new_macro_labels()
+	__(mov $1,$2)
+	__(andl $tagmask,$2)
+	__(cmpb $tag_misc,$2_b)
+	__(jne macro_label(done))
+	__(movb misc_subtag_offset($1),$2_b)
+macro_label(done):
+')',`
+define(`extract_typecode',`
+	new_macro_labels()
+	__(movzbl $1_b,$2_l)
+	__(andb $tagmask,$2_b)
+	__(cmpb $tag_misc,$2_b)
+	__(jne macro_label(done))
+	__(movb misc_subtag_offset($1),$2_b)
+macro_label(done):
+')')
+
+/* dnode_align(src,delta,dest)  */
+
+define(`dnode_align',`
+        __(lea ($2+(dnode_size-1))($1),$3)
+	__(andb $~(dnode_size-1),$3_b)
+')
+
+ifdef(`X8632',`
+define(`push_argregs',`
+	new_macro_labels()
+	/* xxx hack alert: when the compiler calls a keyword subprim */
+	/* (SPsimple_keywords, SPkeyword_args, SP_keyword_bind) */
+	/* it puts some flags in the upper half of %temp1, which
+	/* is %nargs.  We use the cmpw here to avoid seeing those flags. */
+	__(cmpw `$'1*node_size,%nargs_w)
+	__(jb macro_label(done))
+	__(je macro_label(z))
+	__(push %arg_y)
+macro_label(z):
+	__(push %arg_z)
+macro_label(done):
+')',`
+define(`push_argregs',`
+	new_macro_labels()
+	__(testl %nargs,%nargs)
+	__(jz macro_label(done))
+	__(cmpl `$'2*node_size,%nargs)
+	__(je macro_label(yz))
+	__(jb macro_label(z))
+	__(push %arg_x)
+macro_label(yz):
+	__(push %arg_y)
+macro_label(z):
+	__(push %arg_z)
+macro_label(done):
+')')	
+
+
+/* $1 = ndigits.  Assumes 4-byte digits           */
+define(`aligned_bignum_size',`((~(dnode_size-1)&(node_size+(dnode_size-1)+(4*$1))))')
+
+define(`discard_temp_frame',`
+	__(mov rcontext(tcr.save_tsp),$1)
+	__(mov ($1),$1)
+	__(mov $1,rcontext(tcr.save_tsp))
+	__(mov $1,rcontext(tcr.next_tsp))
+')
+
+ifdef(`X8632',`	
+define(`check_pending_enabled_interrupt',`
+	__(btrl `$'31,rcontext(tcr.interrupt_pending))
+	__(jnc $1)
+	interrupt_now()
+')',`
+define(`check_pending_enabled_interrupt',`
+	__(btrq `$'63,rcontext(tcr.interrupt_pending))
+	__(jnc $1)
+	interrupt_now()
+')')
+	
+/* $1 = scratch register, used to access tcr.tlb_pointer.  An interrupt  */
+/*   should be taken if interrupts are enabled and the most significant  */
+/*   bit of tcr.interrupt_pending is set.  If we take the interrupt, we  */
+/*   test and clear the pending bit.  */
+
+define(`check_pending_interrupt',`
+	new_macro_labels()
+	__(mov rcontext(tcr.tlb_pointer),$1)
+	__(cmp `$'0,INTERRUPT_LEVEL_BINDING_INDEX($1))
+	__(js macro_label(done))
+	check_pending_enabled_interrupt(macro_label(done))
+macro_label(done):
+')
+
+/* This should only be called from a foreign context; it should be */
+/* assumed to bash all non-volatile C registers.  And of course it is */
+/* ugly, awful, non-portable, and slow.  %rdi should point to the */
+/* linear address that %gs should be made to address (tcr or pthread data) */
+        			
+ifdef(`DARWIN_GS_HACK',`
+define(`set_gs_base',`
+        ifelse($1,`',`
+        ',`
+        __(movq $1,%rdi)
+        ')
+        __(movl `$'0x3000003,%eax)
+        __(syscall)
+')
+
+/* %gs addresses the tcr.  Make it address pthread data before running */
+/* foreign code */        
+        
+define(`set_foreign_gs_base',`
+        set_gs_base(`rcontext(tcr.osid)')
+')
+
+/* %gs addresses the tcr.  Get the linear address of the tcr and */
+/* copy it to $1 */
+
+define(`save_tcr_linear',`
+        __(movq rcontext(tcr.linear),$1)
+') 
+	
+')
+
+/*  On AMD hardware (at least), a one-byte RET instruction should be */
+/*  prefixed with a REP prefix if it (a) is the target of a  */
+/*  branch or (b) immediately follows a conditional branch not taken. */
+define(`repret',`
+        __(.byte 0xf3)
+        __(ret)
+')
+
+ifdef(`X8632',`
+define(`regnum',`ifelse($1, `%eax', `0',
+       $1, `%ecx', `1',
+       $1, `%edx', `2',
+       $1, `%ebx', `3',
+       $1, `%esp', `4',
+       $1, `%ebp', `5',
+       $1, `%esi', `6',
+       $1, `%edi', `7',
+	"unknown register")dnl
+')
+
+define(`mark_as_node', `
+	__(xorl $1,$1)
+        __(orb `$'(1<<regnum($1)), rcontext(tcr.node_regs_mask))
+')
+
+define(`mark_as_imm',`
+        __(andb `$'~(1<<regnum($1)), rcontext(tcr.node_regs_mask))
+')
+')
+
+define(`check_cstack_alignment',`
+        new_macro_labels()
+        __(testb `$'7,rcontext(tcr.foreign_sp))
+        __(je macro_label(done))
+        __(hlt)
+macro_label(done):
+')
+
+        __ifdef(`WINDOWS')
+define(`windows_cstack_probe',`
+        new_macro_labels()
+        __(cmp `$'0x1000,$1)
+        __(jb macro_label(done))
+        __(mov rcontext(tcr.foreign_sp),$2)
+        __(orl `$'0,-0x1000($2))
+        __(cmp `$'0x2000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x2000($2))
+        __(cmp `$'0x3000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x3000($2))
+        __(cmp `$'0x4000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x4000($2))
+        __(cmp `$'0x5000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x5000($2))
+        __(cmp `$'0x6000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x6000($2))
+        __(cmp `$'0x7000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x7000($2))
+        __(cmp `$'0x8000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x8000($2))
+        __(cmp `$'0x9000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0x9000($2))
+        __(cmp `$'0xa000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0xa000($2))
+        __(cmp `$'0xb000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0xb000($2))
+        __(cmp `$'0xc000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0xc000($2))
+        __(cmp `$'0xd000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0xd000($2))
+        __(cmp `$'0xe000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0xe000($2))
+        __(cmp `$'0xf000,$1)
+        __(jb macro_label(done))
+        __(orl `$'0,-0xf000($2))
+macro_label(done):      
+')
+
+
+        __endif                
+                        
Index: /branches/arm/lisp-kernel/x86-spentry32.s
===================================================================
--- /branches/arm/lisp-kernel/x86-spentry32.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-spentry32.s	(revision 13357)
@@ -0,0 +1,4812 @@
+	include(lisp.s)
+	_beginfile
+
+	.align 2
+define(`_spentry',`ifdef(`__func_name',`_endfn',`')
+        .p2align 3
+        _exportfn(_SP$1)
+')
+
+define(`_endsubp',`
+        _endfn(_SP$1)
+')
+
+define(`jump_builtin',`
+	ref_nrs_value(builtin_functions,%fname)
+	set_nargs($2)
+	vrefr(%fname,%fname,$1)
+	jump_fname()
+')
+
+_spentry(bad_funcall)
+Xspentry_start:                 
+	.globl C(bad_funcall)
+__(tra(C(bad_funcall)))
+	__(uuo_error_not_callable)
+_endsubp(bad_funcall)
+
+/* %arg_z has overflowed by one bit.  Make a bignum with 1 (32-bit) digit. */
+_spentry(fix_overflow)
+C(fix_one_bit_overflow):
+        __(movl $one_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+        __(Misc_Alloc_Fixed(`',aligned_bignum_size(1)))
+        __(unbox_fixnum(%arg_z,%imm0))
+	__(xor $0xc0000000,%imm0)
+        __(mov %temp0,%arg_z)
+        __(movl %imm0,misc_data_offset(%arg_z))
+        __(ret)
+_endsubp(fix_overflow)
+
+/* %arg_y = vector, %arg_z = unscaled-idx */
+_spentry(misc_ref)
+	__(mov %arg_y,%imm0)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(movl misc_header_offset(%arg_y),%imm0)
+	__(xorb %imm0_b,%imm0_b)
+	__(shrl $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpl %imm0,%arg_z)
+	__(jae 2f)
+	__(movb misc_subtag_offset(%arg_y),%imm0_b)
+	__(jmp C(misc_ref_common))
+
+0:	__(uuo_error_reg_not_tag(Rarg_y,tag_misc))
+1:	__(uuo_error_reg_not_fixnum(Rarg_z))
+2:	__(uuo_error_vector_bounds(Rarg_z,Rarg_y))
+_endsubp(misc_ref)
+
+/* %imm0_b = subtag, %arg_y = vector, %arg_z = index. */
+/* Bounds/type-checking done in caller. */
+_startfn(C(misc_ref_common))
+	__(movzbl %imm0_b,%imm0)
+	__(leal local_label(misc_ref_jmp)(,%imm0,4),%imm0)
+	__(jmp *(%imm0))
+	.p2align 2
+local_label(misc_ref_jmp):
+	/* 00-0f */
+        .long local_label(misc_ref_invalid) /* 00 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 01 cons  */
+        .long local_label(misc_ref_invalid) /* 02 nodeheader  */
+        .long local_label(misc_ref_invalid) /* 03 imm  */
+        .long local_label(misc_ref_invalid) /* 04 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 05 tra  */
+        .long local_label(misc_ref_invalid) /* 06 misc  */
+        .long local_label(misc_ref_u32) /* 07 bignum  */
+        .long local_label(misc_ref_invalid) /* 08 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 09 cons  */
+        .long local_label(misc_ref_node) /* 0a ratio  */
+        .long local_label(misc_ref_invalid) /* 0b imm  */
+        .long local_label(misc_ref_invalid) /* 0c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 0d tra  */
+        .long local_label(misc_ref_invalid) /* 0e misc  */
+        .long local_label(misc_ref_u32) /* 0f single_float  */
+        /* 10-1f  */
+        .long local_label(misc_ref_invalid) /* 10 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 11 cons  */
+        .long local_label(misc_ref_invalid) /* 12 nodeheader  */
+        .long local_label(misc_ref_invalid) /* 13 imm  */
+        .long local_label(misc_ref_invalid) /* 14 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 15 tra  */
+        .long local_label(misc_ref_invalid) /* 16 misc  */
+        .long local_label(misc_ref_u32) /* 17 double_float  */
+        .long local_label(misc_ref_invalid) /* 18 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 19 cons  */
+        .long local_label(misc_ref_node) /* 1a complex  */
+        .long local_label(misc_ref_invalid) /* 1b imm  */
+        .long local_label(misc_ref_invalid) /* 1c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 1d tra  */
+        .long local_label(misc_ref_invalid) /* 1e misc  */
+        .long local_label(misc_ref_u32) /* 1f macptr  */
+        /* 20-2f  */
+        .long local_label(misc_ref_invalid) /* 20 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 21 cons  */
+        .long local_label(misc_ref_node) /* 22 catch_frame  */
+        .long local_label(misc_ref_invalid) /* 23 imm  */
+        .long local_label(misc_ref_invalid) /* 24 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 25 tra  */
+        .long local_label(misc_ref_invalid) /* 26 misc  */
+        .long local_label(misc_ref_u32) /* 27 dead_macptr  */
+        .long local_label(misc_ref_invalid) /* 28 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 29 cons  */
+        .long local_label(misc_ref_function) /* 2a function  */
+        .long local_label(misc_ref_invalid) /* 2b imm  */
+        .long local_label(misc_ref_invalid) /* 2c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 2d tra  */
+        .long local_label(misc_ref_invalid) /* 2e misc  */
+        .long local_label(misc_ref_invalid) /* 2f immheader  */
+        /* 30-3f  */
+        .long local_label(misc_ref_invalid) /* 30 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 31 cons  */
+        .long local_label(misc_ref_node) /* 32 basic_stream  */
+        .long local_label(misc_ref_invalid) /* 33 imm  */
+        .long local_label(misc_ref_invalid) /* 34 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 35 tra  */
+        .long local_label(misc_ref_invalid) /* 36 misc  */
+        .long local_label(misc_ref_invalid) /* 37 immheader  */
+        .long local_label(misc_ref_invalid) /* 38 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 39 cons  */
+        .long local_label(misc_ref_node) /* 3a symbol  */
+        .long local_label(misc_ref_invalid) /* 3b imm  */
+        .long local_label(misc_ref_invalid) /* 3c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 3d tra  */
+        .long local_label(misc_ref_invalid) /* 3e misc  */
+        .long local_label(misc_ref_u32) /* 3f xcode_vector  */
+        /* 40-4f  */
+        .long local_label(misc_ref_invalid) /* 40 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 41 cons  */
+        .long local_label(misc_ref_node) /* 42 lock  */
+        .long local_label(misc_ref_invalid) /* 43 imm  */
+        .long local_label(misc_ref_invalid) /* 44 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 45 tra  */
+        .long local_label(misc_ref_invalid) /* 46 misc  */
+        .long local_label(misc_ref_invalid) /* 47 immheader  */
+        .long local_label(misc_ref_invalid) /* 48 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 49 cons  */
+        .long local_label(misc_ref_node) /* 4a hash_vector  */
+        .long local_label(misc_ref_invalid) /* 4b imm  */
+        .long local_label(misc_ref_invalid) /* 4c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 4d tra  */
+        .long local_label(misc_ref_invalid) /* 4e misc  */
+        .long local_label(misc_ref_invalid) /* 4f immheader  */
+        /* 50-5f  */
+        .long local_label(misc_ref_invalid) /* 50 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 51 cons  */
+        .long local_label(misc_ref_node) /* 52 pool  */
+        .long local_label(misc_ref_invalid) /* 53 imm  */
+        .long local_label(misc_ref_invalid) /* 54 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 55 tra  */
+        .long local_label(misc_ref_invalid) /* 56 misc  */
+        .long local_label(misc_ref_invalid) /* 57 immheader  */
+        .long local_label(misc_ref_invalid) /* 58 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 59 cons  */
+        .long local_label(misc_ref_node) /* 5a weak  */
+        .long local_label(misc_ref_invalid) /* 5b imm  */
+        .long local_label(misc_ref_invalid) /* 5c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 5d tra  */
+        .long local_label(misc_ref_invalid) /* 5e misc  */
+        .long local_label(misc_ref_invalid) /* 5f immheader  */
+        /* 60-6f  */
+        .long local_label(misc_ref_invalid) /* 60 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 61 cons  */
+        .long local_label(misc_ref_node) /* 62 package  */
+        .long local_label(misc_ref_invalid) /* 63 imm  */
+        .long local_label(misc_ref_invalid) /* 64 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 65 tra  */
+        .long local_label(misc_ref_invalid) /* 66 misc  */
+        .long local_label(misc_ref_invalid) /* 67 immheader  */
+        .long local_label(misc_ref_invalid) /* 68 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 69 cons  */
+        .long local_label(misc_ref_node) /* 6a slot_vector  */
+        .long local_label(misc_ref_invalid) /* 6b imm  */
+        .long local_label(misc_ref_invalid) /* 6c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 6d tra  */
+        .long local_label(misc_ref_invalid) /* 6e misc  */
+        .long local_label(misc_ref_invalid) /* 6f immheader  */
+        /* 70-7f  */
+        .long local_label(misc_ref_invalid) /* 70 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 71 cons  */
+        .long local_label(misc_ref_node) /* 72 instance  */
+        .long local_label(misc_ref_invalid) /* 73 imm  */
+        .long local_label(misc_ref_invalid) /* 74 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 75 tra  */
+        .long local_label(misc_ref_invalid) /* 76 misc  */
+        .long local_label(misc_ref_invalid) /* 77 immheader  */
+        .long local_label(misc_ref_invalid) /* 78 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 79 cons  */
+        .long local_label(misc_ref_node) /* 7a struct  */
+        .long local_label(misc_ref_invalid) /* 7b imm  */
+        .long local_label(misc_ref_invalid) /* 7c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 7d tra  */
+        .long local_label(misc_ref_invalid) /* 7e misc  */
+        .long local_label(misc_ref_invalid) /* 7f immheader  */
+        /* 80-8f  */
+        .long local_label(misc_ref_invalid) /* 80 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 81 cons  */
+        .long local_label(misc_ref_node) /* 82 istruct  */
+        .long local_label(misc_ref_invalid) /* 83 imm  */
+        .long local_label(misc_ref_invalid) /* 84 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 85 tra  */
+        .long local_label(misc_ref_invalid) /* 86 misc  */
+        .long local_label(misc_ref_invalid) /* 87 immheader  */
+        .long local_label(misc_ref_invalid) /* 88 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 89 cons  */
+        .long local_label(misc_ref_node) /* 8a value_cell  */
+        .long local_label(misc_ref_invalid) /* 8b imm  */
+        .long local_label(misc_ref_invalid) /* 8c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 8d tra  */
+        .long local_label(misc_ref_invalid) /* 8e misc  */
+        .long local_label(misc_ref_invalid) /* 8f immheader  */
+        /* 90-9f  */
+        .long local_label(misc_ref_invalid) /* 90 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 91 cons  */
+        .long local_label(misc_ref_node) /* 92 xfunction  */
+        .long local_label(misc_ref_invalid) /* 93 imm  */
+        .long local_label(misc_ref_invalid) /* 94 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 95 tra  */
+        .long local_label(misc_ref_invalid) /* 96 misc  */
+        .long local_label(misc_ref_invalid) /* 97 immheader  */
+        .long local_label(misc_ref_invalid) /* 98 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* 99 cons  */
+        .long local_label(misc_ref_node) /* 9a arrayH  */
+        .long local_label(misc_ref_invalid) /* 9b imm  */
+        .long local_label(misc_ref_invalid) /* 9c odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* 9d tra  */
+        .long local_label(misc_ref_invalid) /* 9e misc  */
+        .long local_label(misc_ref_invalid) /* 9f immheader  */
+        /* a0-af  */
+        .long local_label(misc_ref_invalid) /* a0 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* a1 cons  */
+        .long local_label(misc_ref_node) /* a2 vectorH  */
+        .long local_label(misc_ref_invalid) /* a3 imm  */
+        .long local_label(misc_ref_invalid) /* a4 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* a5 tra  */
+        .long local_label(misc_ref_invalid) /* a6 misc  */
+        .long local_label(misc_ref_single_float_vector) /* a7 sf_vector  */
+        .long local_label(misc_ref_invalid) /* a8 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* a9 cons  */
+        .long local_label(misc_ref_node) /* aa simple_vector  */
+        .long local_label(misc_ref_invalid) /* ab imm  */
+        .long local_label(misc_ref_invalid) /* ac odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* ad tra  */
+        .long local_label(misc_ref_invalid) /* ae misc  */
+        .long local_label(misc_ref_u32) /* af u32  */
+        /* b0-bf  */
+        .long local_label(misc_ref_invalid) /* b0 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* b1 cons  */
+        .long local_label(misc_ref_invalid) /* b2 nodeheader  */
+        .long local_label(misc_ref_invalid) /* b3 imm  */
+        .long local_label(misc_ref_invalid) /* b4 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* b5 tra  */
+        .long local_label(misc_ref_invalid) /* b6 misc  */
+        .long local_label(misc_ref_s32) /* b7 s32  */
+        .long local_label(misc_ref_invalid) /* b8 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* b9 cons  */
+        .long local_label(misc_ref_invalid) /* ba nodeheader  */
+        .long local_label(misc_ref_invalid) /* bb imm  */
+        .long local_label(misc_ref_invalid) /* bc odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* bd tra  */
+        .long local_label(misc_ref_invalid) /* be misc  */
+        .long local_label(misc_ref_fixnum_vector) /* bf fixnum_vector  */
+        /* c0-cf  */
+        .long local_label(misc_ref_invalid) /* c0 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* c1 cons  */
+        .long local_label(misc_ref_invalid) /* c2 nodeheader  */
+        .long local_label(misc_ref_invalid) /* c3 imm  */
+        .long local_label(misc_ref_invalid) /* c4 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* c5 tra  */
+        .long local_label(misc_ref_invalid) /* c6 misc  */
+        .long local_label(misc_ref_string) /* c7 simple_base_string  */
+        .long local_label(misc_ref_invalid) /* c8 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* c9 cons  */
+        .long local_label(misc_ref_invalid) /* ca nodeheader  */
+        .long local_label(misc_ref_invalid) /* cb imm  */
+        .long local_label(misc_ref_invalid) /* cc odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* cd tra  */
+        .long local_label(misc_ref_invalid) /* ce misc  */
+        .long local_label(misc_ref_u8) /* cf u8  */
+        /* d0-df  */
+        .long local_label(misc_ref_invalid) /* d0 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* d1 cons  */
+        .long local_label(misc_ref_invalid) /* d2 nodeheader  */
+        .long local_label(misc_ref_invalid) /* d3 imm  */
+        .long local_label(misc_ref_invalid) /* d4 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* d5 tra  */
+        .long local_label(misc_ref_invalid) /* d6 misc  */
+        .long local_label(misc_ref_s8)      /* d7 s8  */
+        .long local_label(misc_ref_invalid) /* d8 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* d9 cons  */
+        .long local_label(misc_ref_invalid) /* da nodeheader  */
+        .long local_label(misc_ref_invalid) /* db imm  */
+        .long local_label(misc_ref_invalid) /* dc odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* dd tra  */
+        .long local_label(misc_ref_invalid) /* de misc  */
+        .long local_label(misc_ref_invalid) /* df immheader  */
+        /* e0-ef  */
+        .long local_label(misc_ref_invalid) /* e0 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* e1 cons  */
+        .long local_label(misc_ref_invalid) /* e2 nodeheader  */
+        .long local_label(misc_ref_invalid) /* e3 imm  */
+        .long local_label(misc_ref_invalid) /* e4 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* e5 tra  */
+        .long local_label(misc_ref_invalid) /* e6 misc  */
+        .long local_label(misc_ref_u16) /* e7 u16  */
+        .long local_label(misc_ref_invalid) /* e8 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* e9 cons  */
+        .long local_label(misc_ref_invalid) /* ea nodeheader  */
+        .long local_label(misc_ref_invalid) /* eb imm  */
+        .long local_label(misc_ref_invalid) /* ec odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* ed tra  */
+        .long local_label(misc_ref_invalid) /* ee misc  */
+        .long local_label(misc_ref_s16) /* ef s16  */
+        /* f0-ff  */
+        .long local_label(misc_ref_invalid) /* f0 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* f1 cons  */
+        .long local_label(misc_ref_invalid) /* f2 nodeheader  */
+        .long local_label(misc_ref_invalid) /* f3 imm  */
+        .long local_label(misc_ref_invalid) /* f4 odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* f5 tra  */
+        .long local_label(misc_ref_invalid) /* f6 misc  */
+        .long local_label(misc_ref_double_float_vector) /* f7 df vector  */
+        .long local_label(misc_ref_invalid) /* f8 even_fixnum  */
+        .long local_label(misc_ref_invalid) /* f9 cons  */
+        .long local_label(misc_ref_invalid) /* fa nodeheader  */
+        .long local_label(misc_ref_invalid) /* fb imm  */
+        .long local_label(misc_ref_invalid) /* fc odd_fixnum  */
+        .long local_label(misc_ref_invalid) /* fd tra  */
+        .long local_label(misc_ref_invalid) /* fe misc  */
+        .long local_label(misc_ref_bit_vector) /* ff bit_vector  */
+
+/* Functions are funny.  The first N words are treated as */
+/* (UNSIGNED-BYTE 32), where N is the low 16 bits of the first word. */
+
+local_label(misc_ref_function):
+	__(movzwl misc_data_offset(%arg_y), %imm0)
+	/* XXX bootstrapping */
+	__(btr $15,%imm0)
+	__(jnc 0f)
+	__(movl $0xffffff00,%temp0)
+	__(andl misc_header_offset(%arg_y),%temp0)
+	__(shr $num_subtag_bits-fixnumshift,%temp0)
+	__(shl $fixnumshift,%imm0)
+	__(subl %imm0,%temp0)
+	__(movl %temp0,%imm0)
+	__(shr $fixnumshift,%imm0)
+0:	
+	__(shl $fixnumshift,%imm0)
+	__(rcmpl(%arg_z,%imm0))
+	__(jb local_label(misc_ref_u32))
+local_label(misc_ref_node):
+	__(movl misc_data_offset(%arg_y,%arg_z),%arg_z)
+	__(ret)
+local_label(misc_ref_u32):
+	__(movl misc_data_offset(%arg_y,%arg_z),%imm0)
+	__(jmp _SPmakeu32)
+local_label(misc_ref_s32):
+	__(movl misc_data_offset(%arg_y,%arg_z),%imm0)
+	__(jmp _SPmakes32)
+local_label(misc_ref_single_float_vector):
+	__(movss misc_data_offset(%arg_y,%arg_z),%fp1)
+	__(movl $single_float_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,single_float.size))
+	__(movss %fp1,single_float.value(%arg_z))
+	__(ret)
+local_label(misc_ref_double_float_vector):
+	__(movsd misc_dfloat_offset(%arg_y,%arg_z,2),%fp1)
+	__(movl $double_float_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,double_float.size))
+	__(movsd %fp1,double_float.value(%arg_z))
+	__(ret)
+local_label(misc_ref_fixnum_vector):
+	__(movl misc_data_offset(%arg_y,%arg_z),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_u8):
+	__(movl %arg_z,%imm0)
+	__(shr $2,%imm0)
+	__(movzbl misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_s8):
+	__(movl %arg_z,%imm0)
+	__(shr $2,%imm0)
+	__(movsbl misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_string):
+	__(movl %arg_z,%imm0)
+	__(movl misc_data_offset(%arg_y,%imm0),%imm0)
+	__(shll $charcode_shift,%imm0)
+	__(leal subtag_character(%imm0),%arg_z)
+	__(ret)
+local_label(misc_ref_u16):
+	__(movl %arg_z,%imm0)
+	__(shrl $1,%imm0)
+	__(movzwl misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_s16):
+	__(movl %arg_z,%imm0)
+	__(shrl $1,%imm0)
+	__(movswl misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_bit_vector):
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(btl %imm0,misc_data_offset(%arg_y))
+	__(setc %imm0_b)
+	__(movzbl %imm0_b,%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_invalid):
+	__(pop %temp1)	/* return addr */
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+	__(push $XBADVEC)
+	__(push %temp1)
+	__(set_nargs(3))
+	__(jmp _SPksignalerr)
+_endfn(C(misc_ref_common))
+
+/* Like misc_ref, only the boxed subtag is in temp0. */
+_spentry(subtag_misc_ref)
+	__(mov %arg_y,%imm0)
+	__(and $tagmask,%imm0)
+	__(cmp $tag_misc,%imm0)
+	__(jne 0f)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(movl misc_header_offset(%arg_y),%imm0)
+	__(xorb %imm0_b,%imm0_b)
+	__(shrl $num_subtag_bits-fixnumshift,%imm0)
+	__(cmp %imm0,%arg_z)
+	__(jae 2f)
+	__(unbox_fixnum(%temp0,%imm0))
+	__(jmp C(misc_ref_common))
+0:	__(uuo_error_reg_not_tag(Rarg_y,tag_misc))
+1:	__(uuo_error_reg_not_fixnum(Rarg_z))
+2:	__(uuo_error_vector_bounds(Rarg_z,Rarg_y))
+_endsubp(subtag_misc_ref)
+
+/* Like misc_set, only the boxed subtag is in temp1. */
+_spentry(subtag_misc_set)
+	__(mov %temp0,%imm0)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(mov %arg_y,%imm0)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(movl misc_header_offset(%temp0),%imm0)
+	__(xorb %imm0_b,%imm0_b)
+	__(shrl $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpl %imm0,%arg_y)
+	__(jae 2f)
+	__(unbox_fixnum(%temp1,%imm0))
+	__(jmp C(misc_set_common))
+0:	__(uuo_error_reg_not_tag(Rtemp0,tag_misc))
+1:	__(uuo_error_reg_not_fixnum(Rarg_y))
+2:	__(uuo_error_vector_bounds(Rarg_y,Rtemp0))
+_endsubp(subtag_misc_set)
+
+/* %temp0 = vector, %arg_y = unscaled-idx, %arg_z = val */
+_spentry(misc_set)
+	__(mov %temp0,%imm0)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(test $fixnummask,%arg_y)
+	__(jne 1f)
+	__(movl misc_header_offset(%temp0),%imm0)
+	__(xorb %imm0_b,%imm0_b)
+	__(shrl $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpl %imm0,%arg_y)
+	__(jae 2f)
+	__(xorl %imm0,%imm0)
+	__(movb misc_subtag_offset(%temp0),%imm0_b)
+	__(jmp C(misc_set_common))
+0:	__(uuo_error_reg_not_tag(Rtemp0,tag_misc))
+1:	__(uuo_error_reg_not_fixnum(Rarg_y))
+2:	__(uuo_error_vector_bounds(Rarg_y,Rtemp0))
+_endsubp(misc_set)
+
+/* imm0_b = subtag, %temp0 = vector, %arg_y = index, %arg_z = value */
+_startfn(C(misc_set_common))
+	__(movzbl %imm0_b,%imm0)
+	__(leal local_label(misc_set_jmp)(,%imm0,4),%imm0)
+	__(jmp *(%imm0))
+	.p2align 2
+local_label(misc_set_jmp):
+	/* 00-0f */
+        .long local_label(misc_set_invalid) /* 00 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 01 cons  */
+        .long local_label(misc_set_invalid) /* 02 nodeheader  */
+        .long local_label(misc_set_invalid) /* 03 imm  */
+        .long local_label(misc_set_invalid) /* 04 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 05 tra  */
+        .long local_label(misc_set_invalid) /* 06 misc  */
+        .long local_label(misc_set_u32) /* 07 bignum  */
+        .long local_label(misc_set_invalid) /* 08 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 09 cons  */
+        .long _SPgvset /* 0a ratio  */
+        .long local_label(misc_set_invalid) /* 0b imm  */
+        .long local_label(misc_set_invalid) /* 0c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 0d tra  */
+        .long local_label(misc_set_invalid) /* 0e misc  */
+        .long local_label(misc_set_u32) /* 0f single_float  */
+        /* 10-1f  */
+        .long local_label(misc_set_invalid) /* 10 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 11 cons  */
+        .long local_label(misc_set_invalid) /* 12 nodeheader  */
+        .long local_label(misc_set_invalid) /* 13 imm  */
+        .long local_label(misc_set_invalid) /* 14 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 15 tra  */
+        .long local_label(misc_set_invalid) /* 16 misc  */
+        .long local_label(misc_set_u32) /* 17 double_float  */
+        .long local_label(misc_set_invalid) /* 18 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 19 cons  */
+        .long _SPgvset /* 1a complex  */
+        .long local_label(misc_set_invalid) /* 1b imm  */
+        .long local_label(misc_set_invalid) /* 1c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 1d tra  */
+        .long local_label(misc_set_invalid) /* 1e misc  */
+        .long local_label(misc_set_u32) /* 1f macptr  */
+        /* 20-2f  */
+        .long local_label(misc_set_invalid) /* 20 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 21 cons  */
+        .long _SPgvset /* 22 catch_frame  */
+        .long local_label(misc_set_invalid) /* 23 imm  */
+        .long local_label(misc_set_invalid) /* 24 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 25 tra  */
+        .long local_label(misc_set_invalid) /* 26 misc  */
+        .long local_label(misc_set_u32) /* 27 dead_macptr  */
+        .long local_label(misc_set_invalid) /* 28 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 29 cons  */
+        .long local_label(misc_set_function) /* 2a function  */
+        .long local_label(misc_set_invalid) /* 2b imm  */
+        .long local_label(misc_set_invalid) /* 2c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 2d tra  */
+        .long local_label(misc_set_invalid) /* 2e misc  */
+        .long local_label(misc_set_invalid) /* 2f immheader  */
+        /* 30-3f  */
+        .long local_label(misc_set_invalid) /* 30 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 31 cons  */
+        .long _SPgvset /* 32 basic_stream  */
+        .long local_label(misc_set_invalid) /* 33 imm  */
+        .long local_label(misc_set_invalid) /* 34 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 35 tra  */
+        .long local_label(misc_set_invalid) /* 36 misc  */
+        .long local_label(misc_set_invalid) /* 37 immheader  */
+        .long local_label(misc_set_invalid) /* 38 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 39 cons  */
+        .long _SPgvset /* 3a symbol  */
+        .long local_label(misc_set_invalid) /* 3b imm  */
+        .long local_label(misc_set_invalid) /* 3c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 3d tra  */
+        .long local_label(misc_set_invalid) /* 3e misc  */
+        .long local_label(misc_set_u32) /* 3f xcode_vector  */
+        /* 40-4f  */
+        .long local_label(misc_set_invalid) /* 40 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 41 cons  */
+        .long _SPgvset /* 42 lock  */
+        .long local_label(misc_set_invalid) /* 43 imm  */
+        .long local_label(misc_set_invalid) /* 44 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 45 tra  */
+        .long local_label(misc_set_invalid) /* 46 misc  */
+        .long local_label(misc_set_invalid) /* 47 immheader  */
+        .long local_label(misc_set_invalid) /* 48 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 49 cons  */
+        .long _SPgvset /* 4a hash_vector  */
+        .long local_label(misc_set_invalid) /* 4b imm  */
+        .long local_label(misc_set_invalid) /* 4c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 4d tra  */
+        .long local_label(misc_set_invalid) /* 4e misc  */
+        .long local_label(misc_set_invalid) /* 4f immheader  */
+        /* 50-5f  */
+        .long local_label(misc_set_invalid) /* 50 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 51 cons  */
+        .long _SPgvset /* 52 pool  */
+        .long local_label(misc_set_invalid) /* 53 imm  */
+        .long local_label(misc_set_invalid) /* 54 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 55 tra  */
+        .long local_label(misc_set_invalid) /* 56 misc  */
+        .long local_label(misc_set_invalid) /* 57 immheader  */
+        .long local_label(misc_set_invalid) /* 58 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 59 cons  */
+        .long _SPgvset /* 5a weak  */
+        .long local_label(misc_set_invalid) /* 5b imm  */
+        .long local_label(misc_set_invalid) /* 5c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 5d tra  */
+        .long local_label(misc_set_invalid) /* 5e misc  */
+        .long local_label(misc_set_invalid) /* 5f immheader  */
+        /* 60-6f  */
+        .long local_label(misc_set_invalid) /* 60 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 61 cons  */
+        .long _SPgvset /* 62 package  */
+        .long local_label(misc_set_invalid) /* 63 imm  */
+        .long local_label(misc_set_invalid) /* 64 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 65 tra  */
+        .long local_label(misc_set_invalid) /* 66 misc  */
+        .long local_label(misc_set_invalid) /* 67 immheader  */
+        .long local_label(misc_set_invalid) /* 68 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 69 cons  */
+        .long _SPgvset /* 6a slot_vector  */
+        .long local_label(misc_set_invalid) /* 6b imm  */
+        .long local_label(misc_set_invalid) /* 6c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 6d tra  */
+        .long local_label(misc_set_invalid) /* 6e misc  */
+        .long local_label(misc_set_invalid) /* 6f immheader  */
+        /* 70-7f  */
+        .long local_label(misc_set_invalid) /* 70 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 71 cons  */
+        .long _SPgvset /* 72 instance  */
+        .long local_label(misc_set_invalid) /* 73 imm  */
+        .long local_label(misc_set_invalid) /* 74 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 75 tra  */
+        .long local_label(misc_set_invalid) /* 76 misc  */
+        .long local_label(misc_set_invalid) /* 77 immheader  */
+        .long local_label(misc_set_invalid) /* 78 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 79 cons  */
+        .long _SPgvset /* 7a struct  */
+        .long local_label(misc_set_invalid) /* 7b imm  */
+        .long local_label(misc_set_invalid) /* 7c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 7d tra  */
+        .long local_label(misc_set_invalid) /* 7e misc  */
+        .long local_label(misc_set_invalid) /* 7f immheader  */
+        /* 80-8f  */
+        .long local_label(misc_set_invalid) /* 80 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 81 cons  */
+        .long _SPgvset /* 82 istruct  */
+        .long local_label(misc_set_invalid) /* 83 imm  */
+        .long local_label(misc_set_invalid) /* 84 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 85 tra  */
+        .long local_label(misc_set_invalid) /* 86 misc  */
+        .long local_label(misc_set_invalid) /* 87 immheader  */
+        .long local_label(misc_set_invalid) /* 88 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 89 cons  */
+        .long _SPgvset /* 8a value_cell  */
+        .long local_label(misc_set_invalid) /* 8b imm  */
+        .long local_label(misc_set_invalid) /* 8c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 8d tra  */
+        .long local_label(misc_set_invalid) /* 8e misc  */
+        .long local_label(misc_set_invalid) /* 8f immheader  */
+        /* 90-9f  */
+        .long local_label(misc_set_invalid) /* 90 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 91 cons  */
+        .long _SPgvset /* 92 xfunction  */
+        .long local_label(misc_set_invalid) /* 93 imm  */
+        .long local_label(misc_set_invalid) /* 94 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 95 tra  */
+        .long local_label(misc_set_invalid) /* 96 misc  */
+        .long local_label(misc_set_invalid) /* 97 immheader  */
+        .long local_label(misc_set_invalid) /* 98 even_fixnum  */
+        .long local_label(misc_set_invalid) /* 99 cons  */
+        .long _SPgvset /* 9a arrayH  */
+        .long local_label(misc_set_invalid) /* 9b imm  */
+        .long local_label(misc_set_invalid) /* 9c odd_fixnum  */
+        .long local_label(misc_set_invalid) /* 9d tra  */
+        .long local_label(misc_set_invalid) /* 9e misc  */
+        .long local_label(misc_set_invalid) /* 9f immheader  */
+        /* a0-af  */
+        .long local_label(misc_set_invalid) /* a0 even_fixnum  */
+        .long local_label(misc_set_invalid) /* a1 cons  */
+        .long _SPgvset /* a2 vectorH  */
+        .long local_label(misc_set_invalid) /* a3 imm  */
+        .long local_label(misc_set_invalid) /* a4 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* a5 tra  */
+        .long local_label(misc_set_invalid) /* a6 misc  */
+        .long local_label(misc_set_single_float_vector) /* a7 sf_vector  */
+        .long local_label(misc_set_invalid) /* a8 even_fixnum  */
+        .long local_label(misc_set_invalid) /* a9 cons  */
+        .long _SPgvset /* aa simple_vector  */
+        .long local_label(misc_set_invalid) /* ab imm  */
+        .long local_label(misc_set_invalid) /* ac odd_fixnum  */
+        .long local_label(misc_set_invalid) /* ad tra  */
+        .long local_label(misc_set_invalid) /* ae misc  */
+        .long local_label(misc_set_u32) /* af u32  */
+        /* b0-bf  */
+        .long local_label(misc_set_invalid) /* b0 even_fixnum  */
+        .long local_label(misc_set_invalid) /* b1 cons  */
+        .long local_label(misc_set_invalid) /* b2 nodeheader  */
+        .long local_label(misc_set_invalid) /* b3 imm  */
+        .long local_label(misc_set_invalid) /* b4 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* b5 tra  */
+        .long local_label(misc_set_invalid) /* b6 misc  */
+        .long local_label(misc_set_s32) /* b7 s32  */
+        .long local_label(misc_set_invalid) /* b8 even_fixnum  */
+        .long local_label(misc_set_invalid) /* b9 cons  */
+        .long local_label(misc_set_invalid) /* ba nodeheader  */
+        .long local_label(misc_set_invalid) /* bb imm  */
+        .long local_label(misc_set_invalid) /* bc odd_fixnum  */
+        .long local_label(misc_set_invalid) /* bd tra  */
+        .long local_label(misc_set_invalid) /* be misc  */
+        .long local_label(misc_set_fixnum_vector) /* bf fixnum_vector  */
+        /* c0-cf  */
+        .long local_label(misc_set_invalid) /* c0 even_fixnum  */
+        .long local_label(misc_set_invalid) /* c1 cons  */
+        .long local_label(misc_set_invalid) /* c2 nodeheader  */
+        .long local_label(misc_set_invalid) /* c3 imm  */
+        .long local_label(misc_set_invalid) /* c4 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* c5 tra  */
+        .long local_label(misc_set_invalid) /* c6 misc  */
+        .long local_label(misc_set_string) /* c7 simple_base_string  */
+        .long local_label(misc_set_invalid) /* c8 even_fixnum  */
+        .long local_label(misc_set_invalid) /* c9 cons  */
+        .long local_label(misc_set_invalid) /* ca nodeheader  */
+        .long local_label(misc_set_invalid) /* cb imm  */
+        .long local_label(misc_set_invalid) /* cc odd_fixnum  */
+        .long local_label(misc_set_invalid) /* cd tra  */
+        .long local_label(misc_set_invalid) /* ce misc  */
+        .long local_label(misc_set_u8) /* cf u8  */
+        /* d0-df  */
+        .long local_label(misc_set_invalid) /* d0 even_fixnum  */
+        .long local_label(misc_set_invalid) /* d1 cons  */
+        .long local_label(misc_set_invalid) /* d2 nodeheader  */
+        .long local_label(misc_set_invalid) /* d3 imm  */
+        .long local_label(misc_set_invalid) /* d4 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* d5 tra  */
+        .long local_label(misc_set_invalid) /* d6 misc  */
+        .long local_label(misc_set_s8)      /* d7 s8  */
+        .long local_label(misc_set_invalid) /* d8 even_fixnum  */
+        .long local_label(misc_set_invalid) /* d9 cons  */
+        .long local_label(misc_set_invalid) /* da nodeheader  */
+        .long local_label(misc_set_invalid) /* db imm  */
+        .long local_label(misc_set_invalid) /* dc odd_fixnum  */
+        .long local_label(misc_set_invalid) /* dd tra  */
+        .long local_label(misc_set_invalid) /* de misc  */
+        .long local_label(misc_set_invalid) /* df immheader  */
+        /* e0-ef  */
+        .long local_label(misc_set_invalid) /* e0 even_fixnum  */
+        .long local_label(misc_set_invalid) /* e1 cons  */
+        .long local_label(misc_set_invalid) /* e2 nodeheader  */
+        .long local_label(misc_set_invalid) /* e3 imm  */
+        .long local_label(misc_set_invalid) /* e4 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* e5 tra  */
+        .long local_label(misc_set_invalid) /* e6 misc  */
+        .long local_label(misc_set_u16) /* e7 u16  */
+        .long local_label(misc_set_invalid) /* e8 even_fixnum  */
+        .long local_label(misc_set_invalid) /* e9 cons  */
+        .long local_label(misc_set_invalid) /* ea nodeheader  */
+        .long local_label(misc_set_invalid) /* eb imm  */
+        .long local_label(misc_set_invalid) /* ec odd_fixnum  */
+        .long local_label(misc_set_invalid) /* ed tra  */
+        .long local_label(misc_set_invalid) /* ee misc  */
+        .long local_label(misc_set_s16) /* ef s16  */
+        /* f0-ff  */
+        .long local_label(misc_set_invalid) /* f0 even_fixnum  */
+        .long local_label(misc_set_invalid) /* f1 cons  */
+        .long local_label(misc_set_invalid) /* f2 nodeheader  */
+        .long local_label(misc_set_invalid) /* f3 imm  */
+        .long local_label(misc_set_invalid) /* f4 odd_fixnum  */
+        .long local_label(misc_set_invalid) /* f5 tra  */
+        .long local_label(misc_set_invalid) /* f6 misc  */
+        .long local_label(misc_set_double_float_vector) /* f7 df vector  */
+        .long local_label(misc_set_invalid) /* f8 even_fixnum  */
+        .long local_label(misc_set_invalid) /* f9 cons  */
+        .long local_label(misc_set_invalid) /* fa nodeheader  */
+        .long local_label(misc_set_invalid) /* fb imm  */
+        .long local_label(misc_set_invalid) /* fc odd_fixnum  */
+        .long local_label(misc_set_invalid) /* fd tra  */
+        .long local_label(misc_set_invalid) /* fe misc  */
+        .long local_label(misc_set_bit_vector) /* ff bit_vector  */
+
+local_label(misc_set_function):
+	/* Functions are funny: the first N words are treated as */
+	/* (UNSIGNED-BYTE 32), where N is the low 16 bits of the first word. */
+	__(movzwl misc_data_offset(%temp0),%imm0)
+	/* XXX bootstrapping */
+	__(btr $15,%imm0)
+	__(jnc 0f)
+	__(movl $0xffffff00,%temp1)
+	__(andl misc_header_offset(%temp0),%temp1)
+	__(shr $num_subtag_bits-fixnumshift,%temp1)
+	__(shl $fixnumshift,%imm0)
+	__(subl %imm0,%temp1)
+	__(movl %temp1,%imm0)
+	__(shr $fixnumshift,%imm0)
+0:
+	__(shl $fixnumshift,%imm0)
+	__(rcmpl(%arg_y,%imm0))
+	__(jae _SPgvset)
+local_label(misc_set_u32):
+	/* Either a non-negative fixnum, a positive one-digit bignum, or */
+	/* a two-digit bignum whose sign-digit is 0 is OK. */
+	__(movl $~(target_most_positive_fixnum <<fixnumshift),%imm0)
+	__(test %arg_z,%imm0)
+	__(movl %arg_z,%imm0)
+	__(jne 1f)
+	__(sarl $fixnumshift,%imm0)
+	__(jmp 9f)
+1:	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movl misc_header_offset(%arg_z),%imm0)
+	__(cmpl $two_digit_bignum_header,%imm0)
+	__(je 3f)
+	__(cmpl $one_digit_bignum_header,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(movl misc_data_offset(%arg_z),%imm0)
+	__(testl %imm0,%imm0)
+	__(js local_label(misc_set_bad))
+	__(jmp 9f)
+3:	__(movl misc_data_offset(%arg_z),%imm0)
+	__(cmpl $0,misc_data_offset+4(%arg_z))
+	__(jne local_label(misc_set_bad))
+9:	__(movl %imm0,misc_data_offset(%temp0,%arg_y))
+	__(ret)
+local_label(misc_set_s32):
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(testb $fixnummask,%arg_z_b)
+	__(je 9f)
+1:	__(movb %arg_z_b,%imm0_b)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movl misc_header_offset(%arg_z),%imm0)
+	__(cmpl $one_digit_bignum_header,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(movl misc_data_offset(%arg_z),%imm0)
+9:	__(movl %imm0,misc_data_offset(%temp0,%arg_y))
+	__(ret)
+local_label(misc_set_bad):
+	__(movl %arg_z,%arg_y)
+	__(movl %temp0,%arg_z)
+	__(pop %temp1)	/* return addr */
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+	__(push $XNOTELT)
+	__(push %temp1)
+	__(set_nargs(3))
+	__(jmp _SPksignalerr)
+local_label(misc_set_single_float_vector):
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_single_float,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movl single_float.value(%arg_z),%imm0)
+	__(movl %imm0,misc_data_offset(%temp0,%arg_y))
+	__(ret)
+local_label(misc_set_double_float_vector):
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_double_float,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movsd double_float.value(%arg_z),%fp0)
+	__(movsd %fp0,misc_dfloat_offset(%temp0,%arg_y,2))
+	__(ret)
+local_label(misc_set_fixnum_vector):
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(movl %imm0,misc_data_offset(%temp0,%arg_y))
+	__(ret)
+local_label(misc_set_u8):
+	__(testl $~(0xff<<fixnumshift),%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(movl %arg_z,%arg_y)
+	__(shll $8-fixnumshift,%arg_z)
+	__(movb %arg_z_bh,misc_data_offset(%temp0,%imm0))
+	__(movl %arg_y,%arg_z)
+	__(ret)
+local_label(misc_set_s8):
+	__(movl %arg_z,%imm0)
+	__(shll $32-(8+fixnumshift),%imm0)
+	__(sarl $32-(8+fixnumshift),%imm0)
+	__(cmpl %arg_z,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(movl %arg_z,%arg_z)
+	__(shll $8-fixnumshift,%arg_z)
+	__(movb %arg_z_bh,misc_data_offset(%temp0,%imm0))
+	__(movl %arg_y,%arg_z)
+	__(ret)
+local_label(misc_set_string):
+	__(cmpb $subtag_character,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(movl %arg_z,%imm0)
+	__(shrl $charcode_shift,%imm0)
+	__(movl %imm0,misc_data_offset(%temp0,%arg_y))
+	__(ret)
+local_label(misc_set_u16):
+	__(testl $~(0xffff<<fixnumshift),%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(movl %arg_y,%imm0)
+	__(shrl $1,%imm0)
+	__(mark_as_imm(%temp1))
+	__(unbox_fixnum(%arg_z,%temp1))
+	__(movw %temp1_w,misc_data_offset(%temp0,%imm0))
+	__(mark_as_node(%temp1))
+	__(ret)
+local_label(misc_set_s16):
+	__(movl %arg_z,%imm0)
+	__(shll $32-(16+fixnumshift),%imm0)
+	__(sarl $32-(16+fixnumshift),%imm0)
+	__(cmpl %arg_z,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(movl %arg_y,%imm0)
+	__(shrl $1,%imm0)
+	__(mark_as_imm(%temp1))
+	__(unbox_fixnum(%arg_z,%temp1))
+	__(movw %temp1_w,misc_data_offset(%temp0,%imm0))
+	__(mark_as_node(%temp1))
+	__(ret)
+local_label(misc_set_bit_vector):
+	__(testl $~fixnumone,%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(testb %arg_z_b,%arg_z_b)
+	__(je local_label(misc_set_clr_bit))
+local_label(misc_set_set_bit):
+	__(btsl %imm0,misc_data_offset(%temp0))
+	__(ret)
+local_label(misc_set_clr_bit):
+	__(btrl %imm0,misc_data_offset(%temp0))
+	__(ret)
+local_label(misc_set_invalid):
+	__(pop %temp1)	/* return addr */
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+	__(push $XSETBADVEC)
+	__(push %temp0)
+	__(push %temp1)
+	__(set_nargs(4))
+	__(jmp _SPksignalerr)
+_endfn(C(misc_set_common))
+
+_spentry(Fret1valn)
+	.globl C(ret1valn)
+__(tra(C(ret1valn)))
+        __(mov (%esp),%ra0)
+        __(mov %arg_z,(%esp))
+	__(set_nargs(1))
+	__(jmp *%ra0)
+_endsubp(Fret1valn)
+
+_spentry(nvalret)
+	.globl C(nvalret)
+C(nvalret):
+	__(ref_global(ret1val_addr,%temp0))
+	__(cmpl lisp_frame.savera0(%ebp),%temp0)
+	__(je 1f)
+	__(test %nargs,%nargs)
+	__(movl $nil_value,%arg_z)
+	__(cmovnel -node_size(%esp,%nargs),%arg_z)
+	__(leave)
+	__(ret)
+
+/* actually need to return values; always need to copy. */
+1:	__(lea 2*node_size(%ebp),%imm0)
+	__(pushl (%imm0))
+	__(movl 0(%ebp),%ebp)
+	__(addl $node_size,%imm0)
+	__(lea node_size(%esp,%nargs),%temp0)
+	__(xorl %arg_y,%arg_y)
+	__(jmp 3f)
+2:	__(movl -node_size(%temp0),%arg_z)
+	__(subl $node_size,%temp0)
+	__(addl $node_size,%arg_y)
+	__(movl %arg_z,-node_size(%imm0))
+	__(subl $node_size,%imm0)
+3:	__(cmpl %arg_y,%nargs)
+	__(jne 2b)
+	__(pop %ra0)
+	__(movl %imm0,%esp)
+	__(jmp *%ra0)
+_endsubp(nvalret)
+
+_spentry(jmpsym)
+	__(jump_fname())
+_endsubp(jmpsym)
+
+_spentry(jmpnfn)
+	__(mov %temp0,%fn)
+	__(jmp *%fn)
+_endsubp(jmpnfn)
+
+_spentry(funcall)
+	__(do_funcall())
+_endsubp(funcall)
+
+/* Make a lisp integer (fixnum or one-digit bignum) from the value in %imm0 */
+_spentry(makes32)
+	__(imull $fixnumone,%imm0,%arg_z)	/* result is fixnum-tagged */
+	__(jno 0f)				/* but may have overflowed */
+	__(movd %imm0,%mm1)
+	__(movl $one_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(1)))
+	__(movd %mm1,misc_data_offset(%arg_z))
+0:	__(repret)
+_endsubp(makes32)
+
+/* Make a lisp integer out of the unboxed 64-bit word in %mm0. */
+/* This is a little clumsy, but the alternative requires callers to */
+/* have already marked %edx as an imm reg (or else store it in memory
+/* somewhere), and I'm nervous about */
+/* splitting up the mark-as-imm/mark-as-node between two separate */
+/* pieces of code. */
+_spentry(makes64)
+        __(movq %mm0,%mm2)
+        __(pshufw $0x4e,%mm0,%mm1)      /* swap hi/lo halves */
+        __(psrad $31,%mm0)      /* propagate sign */
+        __(pcmpeqd %mm0,%mm1)	/* all ones if equal */
+        __(movd %mm1,%imm0)
+        __(cmpb $-1,%imm0_b)    /* upper half just sign extension? */
+        __(jne 1f)
+        __(movd %mm2,%imm0)
+	__(jmp _SPmakes32)
+1:      __(movl $two_digit_bignum_header,%imm0)
+        __(movd %imm0,%mm0)
+        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(2)))
+        __(movq %mm2,misc_data_offset(%arg_z))
+        __(ret)
+_endsubp(makes64)
+
+_spentry(syscall)
+	/* Save lisp registers */
+	__(push %ebp)
+	__(movl %esp,%ebp)
+	__(push %temp0)
+        __(push %temp1)
+        __(push %arg_y)
+        __(push %arg_z)
+        __(push %fn)
+	__(movl %esp,rcontext(tcr.save_vsp))
+	__(movl %ebp,rcontext(tcr.save_ebp))
+	__(movl $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+	__(movl rcontext(tcr.foreign_sp),%esp)
+	/* preserve state of direction flag */
+	__(pushfl)
+	__(popl rcontext(tcr.save_eflags))
+	__(cld)
+	__(emms)
+	__(pop %ebp)		/* backlink */
+        __(lea 15(%esp),%edx)
+        __(andl $-16,%edx)
+        __(movl %edx,%esp)
+	__(unbox_fixnum(%arg_z,%eax))	/* syscall number */
+	__(movl $local_label(back_from_sysenter),%edx)
+	__(push %edx)
+	__(movl %esp,%ecx)
+	__(sysenter)
+local_label(back_from_sysenter):
+	__(jnc 0f)
+	__(neg %eax)
+0:	
+	__(movl %ebp,%esp)
+	__(movl %esp,rcontext(tcr.foreign_sp))
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+	__(pushl rcontext(tcr.save_eflags))
+	__(popfl)
+	__(movl rcontext(tcr.save_vsp),%esp)
+	__(movl rcontext(tcr.save_ebp),%ebp)
+	__(movl $TCR_STATE_LISP,rcontext(tcr.valence))
+        __(pop %fn)
+        __(pop %arg_z)
+        __(pop %arg_y)
+        __(pop %temp1)
+	__(check_pending_interrupt(%temp0))
+	__(pop %temp0)
+	__(leave)
+	__(ret)
+_endsubp(syscall)
+
+/* Make system call that returns a doubleword result in %edx:%eax and */
+/* copy the result into %mm0. */
+_spentry(syscall2)
+	/* Save lisp registers */
+	__(push %ebp)
+	__(movl %esp,%ebp)
+	__(push %temp0)
+        __(push %temp1)
+        __(push %arg_y)
+        __(push %arg_z)
+        __(push %fn)
+	__(movl %esp,rcontext(tcr.save_vsp))
+	__(movl %ebp,rcontext(tcr.save_ebp))
+	__(movl $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+	__(movl rcontext(tcr.foreign_sp),%esp)
+	/* preserve state of direction flag */
+	__(pushfl)
+	__(popl rcontext(tcr.save_eflags))
+	__(cld)
+	__(emms)
+	__(pop %ebp)		/* backlink */
+        __(lea 15(%esp),%edx)
+        __(andl $-16,%edx)
+        __(movl %edx,%esp)
+	__(unbox_fixnum(%arg_z,%eax))	/* syscall number */
+	__(pushl $local_label(back_from_syscall))
+	__(int $0x80)
+local_label(back_from_syscall):
+	__(jnc 0f)
+	__(neg %eax)
+	__(movl $-1,%edx)
+0:
+	/* just use memory rather than screwing around with */
+	/* movd %eax,%mm0, movd %edx,%mm1, psllq $32,%mm1, por %mm1,%mm0 */
+	__(push %edx)
+	__(push %eax)
+	__(movq (%esp),%mm0)
+	__(movl %ebp,%esp)
+	__(movl %esp,rcontext(tcr.foreign_sp))
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+	__(pushl rcontext(tcr.save_eflags))
+	__(popf)
+	__(movl rcontext(tcr.save_vsp),%esp)
+	__(movl rcontext(tcr.save_ebp),%ebp)
+	__(movl $TCR_STATE_LISP,rcontext(tcr.valence))
+        __(pop %fn)
+        __(pop %arg_z)
+        __(pop %arg_y)
+        __(pop %temp1)
+	__(check_pending_interrupt(%temp0))
+	__(pop %temp0)
+	__(leave)
+	__(ret)
+_endsubp(syscall2)
+
+
+_spentry(mkcatch1v)
+	__(nMake_Catch(0))
+	__(ret)
+_endsubp(mkcatch1v)
+
+_spentry(mkunwind)
+	__(movl $undefined,%arg_z)
+	__(Make_Catch(fixnumone))
+	__(jmp *%ra0)
+_endsubp(mkunwind)
+
+/* this takes a return address in %ra0; it's "new" in that it does the */
+/*   double binding of *interrupt-level* out-of-line */
+_spentry(nmkunwind)
+	__(movl rcontext(tcr.tlb_pointer),%arg_z)
+        __(movl INTERRUPT_LEVEL_BINDING_INDEX(%arg_z),%arg_y)
+	__(push %arg_y)
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%arg_z))
+	__(movl $undefined,%arg_z)
+	/* %arg_z = tag, %xfn (%temp1) = pc */
+	__(Make_Catch(fixnumone))
+	__(movl %arg_y,%arg_z)
+        __(jmp _SPbind_interrupt_level)
+_endsubp(nmkunwind)
+
+_spentry(mkcatchmv)
+	__(nMake_Catch(fixnumone))
+	__(ret)
+_endsubp(mkcatchmv)
+
+_spentry(throw)
+	__(movl rcontext(tcr.catch_top),%imm0)
+	__(movl (%esp,%nargs),%arg_y)	/* arg_y = tag   */
+	__(movd %nargs,%mm0)
+	__(xorl %temp1,%temp1)
+	__(jmp local_label(_throw_test))
+local_label(_throw_loop):
+	__(cmpl %arg_y,catch_frame.catch_tag(%imm0))
+	__(je local_label(_throw_found))
+	__(movl catch_frame.link(%imm0),%imm0)
+	__(addl $fixnum_one,%temp1)
+local_label(_throw_test):
+	__(test %imm0,%imm0)
+	__(jne local_label(_throw_loop))
+        __(push %ra0)
+	__(uuo_error_reg_not_tag(Rarg_y,subtag_catch_frame))
+        __(pop %ra0)
+	__(jmp _SPthrow)
+local_label(_throw_found):
+	__(testb $fulltagmask,catch_frame.mvflag(%imm0))
+	__(movl %temp1,%imm0)
+	__(movd %mm0,%nargs)
+	__(jne local_label(_throw_multiple))
+	__(movl $nil_value,%arg_z)
+	__(test %nargs,%nargs)
+	__(je local_label(_throw_one_value))
+	__(movl -node_size(%esp,%nargs),%arg_z)
+	__(add %nargs,%esp)
+local_label(_throw_one_value):
+	__(movl $local_label(_threw_one_value),%ra0)
+	__(jmp _SPnthrow1value)
+__(tra(local_label(_threw_one_value)))
+	__(movl rcontext(tcr.catch_top),%arg_y)
+	__(movl catch_frame.db_link(%arg_y),%imm0)
+	__(cmpl %imm0,rcontext(tcr.db_link))
+	__(jz local_label(_threw_one_value_dont_unbind))
+	__(push $local_label(_threw_one_value_dont_unbind))
+	__(jmp _SPunbind_to)	/* preserves registers */
+__(tra(local_label(_threw_one_value_dont_unbind)))
+	__(movl catch_frame.ebp(%arg_y),%ebp)
+	__(movl catch_frame.foreign_sp(%arg_y),%imm0)
+        __(movl %imm0,rcontext(tcr.foreign_sp))
+	__(movl catch_frame.xframe(%arg_y),%imm0)
+	__(movl %imm0,rcontext(tcr.xframe))
+	__(movl catch_frame.esp(%arg_y),%esp)
+	__(movl catch_frame.link(%arg_y),%imm0)
+	__(movl %imm0,rcontext(tcr.catch_top))
+	__(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%arg_y),%imm0)
+	__(movl (%imm0),%imm0)
+        __(movl %imm0,rcontext(tcr.save_tsp))
+        __(movl %imm0,rcontext(tcr.next_tsp))
+	__(movl catch_frame.pc(%arg_y),%ra0)
+	__(jmp *%ra0)
+local_label(_throw_multiple):
+	__(movl $local_label(_threw_multiple),%ra0)
+	__(jmp _SPnthrowvalues)
+__(tra(local_label(_threw_multiple)))
+	__(movl rcontext(tcr.catch_top),%arg_y)
+	__(movl catch_frame.db_link(%arg_y),%imm0)
+	__(cmpl %imm0,rcontext(tcr.db_link))
+	__(je local_label(_threw_multiple_dont_unbind))
+	__(push $local_label(_threw_multiple_dont_unbind))
+	__(jmp _SPunbind_to)	/* preserves registers */
+__(tra(local_label(_threw_multiple_dont_unbind)))
+	/* Copy multiple values from the current %esp to the target %esp   */
+	__(lea (%esp,%nargs),%imm0)
+	__(movd %nargs,%mm0)	/* nargs is aka temp1 */
+	__(movl catch_frame.esp(%arg_y),%temp1)
+	__(jmp local_label(_threw_multiple_push_test))
+local_label(_threw_multiple_push_loop):
+	__(subl $node_size,%imm0)
+	__(subl $node_size,%temp1)
+	__(movl (%imm0),%arg_z)
+	__(movl %arg_z,(%temp1))
+local_label(_threw_multiple_push_test):
+	__(cmpl %imm0,%esp)
+	__(jne local_label(_threw_multiple_push_loop))
+	/* target %esp is now in %temp1   */
+	__(movl catch_frame.ebp(%arg_y),%ebp)
+	__(movl catch_frame.foreign_sp(%arg_y),%imm0)
+        __(movl %imm0,rcontext(tcr.foreign_sp))        
+	__(movl catch_frame.xframe(%arg_y),%imm0)
+	__(movl %imm0,rcontext(tcr.xframe))
+	__(movl %temp1,%esp)
+	__(movl catch_frame.link(%arg_y),%temp1)
+	__(movl %temp1,rcontext(tcr.catch_top))
+	__(movd %mm0,%nargs)
+	__(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%arg_y),%imm0)
+	__(movl catch_frame.pc(%arg_y),%ra0)
+	__(movl (%imm0),%imm0)
+        __(movl %imm0,rcontext(tcr.save_tsp))
+        __(movl %imm0,rcontext(tcr.next_tsp))
+	__(jmp *%ra0)
+_endsubp(throw)
+
+	/* This takes N multiple values atop the vstack.   */
+_spentry(nthrowvalues)
+	__(movb $1,rcontext(tcr.unwinding))
+	__(movl %ra0,rcontext(tcr.save0)) /* %ra0 (aka %temp0) to spill area */
+local_label(_nthrowv_nextframe):
+	__(subl $fixnumone,%imm0)
+	__(js local_label(_nthrowv_done))
+	__(movd %imm0,%mm1)
+	__(movl rcontext(tcr.catch_top),%temp0)
+	__(movl catch_frame.link(%temp0),%imm0)
+	__(movl %imm0,rcontext(tcr.catch_top))
+	__(movl catch_frame.db_link(%temp0),%imm0)
+	__(cmpl %imm0,rcontext(tcr.db_link))
+	__(jz local_label(_nthrowv_dont_unbind))
+	__(push %temp1)
+	__(push %temp0)
+	__(push $local_label(_nthrowv_back_from_unbind))
+	__(jmp _SPunbind_to)
+__(tra(local_label(_nthrowv_back_from_unbind)))
+	__(pop %temp0)
+	__(pop %temp1)
+local_label(_nthrowv_dont_unbind):
+	__(cmpb $unbound_marker,catch_frame.catch_tag(%temp0))
+	__(je local_label(_nthrowv_do_unwind))
+/* A catch frame.  If the last one, restore context from there.   */
+	__(movd %mm1,%imm0)
+	__(test %imm0,%imm0)	/* last catch frame ?   */
+	__(jne local_label(_nthrowv_skip))
+	__(movl catch_frame.xframe(%temp0),%arg_y)
+	__(movl %arg_y,rcontext(tcr.xframe))
+	__(lea (%esp,%nargs),%arg_y)
+	__(movl catch_frame.esp(%temp0),%arg_z)
+	__(movd %nargs,%mm2)
+	__(jmp local_label(_nthrowv_push_test))
+local_label(_nthrowv_push_loop):
+	__(subl $node_size,%arg_y)
+	__(subl $node_size,%arg_z)
+	__(movd (%arg_y),%mm0)
+	__(movd %mm0,(%arg_z))
+local_label(_nthrowv_push_test):
+	__(subl $node_size,%nargs)
+	__(jns local_label(_nthrowv_push_loop))
+	__(movd %mm2,%nargs)
+	__(movl catch_frame.xframe(%temp0),%arg_y)
+	__(movl %arg_y,rcontext(tcr.xframe))
+	__(movl %arg_z,%esp)
+	__(movl catch_frame.ebp(%temp0),%ebp)
+	__(movd catch_frame.foreign_sp(%temp0),%stack_temp)
+        __(movd %stack_temp,rcontext(tcr.foreign_sp))        
+local_label(_nthrowv_skip):	
+	__(movl -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm0)
+        __(movl %imm0,rcontext(tcr.save_tsp))
+        __(movl %imm0,rcontext(tcr.next_tsp))
+	__(movd %mm1,%imm0)
+	__(jmp local_label(_nthrowv_nextframe))
+local_label(_nthrowv_do_unwind):	
+/* This is harder.  Call the cleanup code with the multiple values and   */
+/* nargs, the throw count, and the caller's return address in a temp  */
+/* stack frame.   */
+	__(leal (%esp,%nargs),%arg_y)
+	__(push catch_frame.pc(%temp0))
+	__(movl catch_frame.ebp(%temp0),%ebp)
+        __(movd catch_frame.xframe(%temp0),%stack_temp)
+        __(movd %stack_temp,rcontext(tcr.xframe))
+	__(movl catch_frame.esp(%temp0),%arg_z)
+	__(movd catch_frame.foreign_sp(%temp0),%stack_temp)
+        __(movd %stack_temp,rcontext(tcr.foreign_sp))        
+	/* Discard the catch frame, so we can build a temp frame   */
+	__(movl -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm0)
+        __(movl %imm0,rcontext(tcr.save_tsp))
+        __(movl %imm0,rcontext(tcr.next_tsp))
+	__(movd %temp1,%mm2) /* save %nargs */
+	/* tsp overhead, nargs, throw count, ra0   */
+	__(dnode_align(%nargs,(tsp_frame.fixed_overhead+(3*node_size)),%imm0))
+	__(movl %imm0,%temp1)
+	__(TSP_Alloc_Var(%temp1,%imm0))
+	__(movd %mm2,%temp1) /* aka %nargs */
+
+	__(movl %nargs,(%imm0))
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movl %ra0,node_size(%imm0))
+	__(movd %mm1,node_size*2(%imm0))
+	__(leal node_size*3(%imm0),%imm0)
+	__(jmp local_label(_nthrowv_tpushtest))
+local_label(_nthrowv_tpushloop):
+	__(movl -node_size(%arg_y),%temp0)
+	__(subl $node_size,%arg_y)
+	__(movl %temp0,(%imm0))
+	__(addl $node_size,%imm0)
+local_label(_nthrowv_tpushtest):
+	__(subl $node_size,%nargs)
+	__(jns local_label(_nthrowv_tpushloop))
+	__(pop %xfn)	/* aka %temp1/%nargs */
+	__(movl %arg_z,%esp)
+/* Ready to call cleanup code. set up tra, jmp to %xfn   */
+	__(push $local_label(_nthrowv_called_cleanup))
+	__(movb $0,rcontext(tcr.unwinding))
+	__(jmp *%xfn)
+__(tra(local_label(_nthrowv_called_cleanup)))
+
+	__(movb $1,rcontext(tcr.unwinding))
+	__(movl rcontext(tcr.save_tsp),%imm0)
+	__(movl tsp_frame.data_offset+(0*node_size)(%imm0),%nargs)
+	__(movl tsp_frame.data_offset+(1*node_size)(%imm0),%ra0)
+	__(movl %ra0,rcontext(tcr.save0))
+	__(movd tsp_frame.data_offset+(2*node_size)(%imm0),%mm1)
+	__(movd %nargs,%mm2)
+	__(addl $tsp_frame.fixed_overhead+(node_size*3),%imm0)
+	__(jmp local_label(_nthrowv_tpoptest))
+local_label(_nthrowv_tpoploop):	
+	__(push (%imm0))
+	__(addl $node_size,%imm0)
+local_label(_nthrowv_tpoptest):	
+	__(subl $node_size,%nargs)
+	__(jns local_label(_nthrowv_tpoploop))
+	__(movd %mm2,%nargs)
+	__(movl rcontext(tcr.save_tsp),%imm0)
+	__(movl (%imm0),%imm0)
+        __(movl %imm0,rcontext(tcr.save_tsp))
+        __(movl %imm0,rcontext(tcr.next_tsp))
+	__(movd %mm1,%imm0)
+	__(jmp local_label(_nthrowv_nextframe))
+local_label(_nthrowv_done):
+	__(movb $0,rcontext(tcr.unwinding))
+	__(check_pending_interrupt(%imm0))
+local_label(_nthrowv_return):
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp *%ra0)	
+_endsubp(nthrowvalues)
+
+/* This is a (slight) optimization.  When running an unwind-protect,  */
+/* save the single value and the throw count in the tstack frame.  */
+/* Note that this takes a single value in arg_z.  */
+
+_spentry(nthrow1value)
+	__(movb $1,rcontext(tcr.unwinding))
+local_label(_nthrow1v_nextframe):
+	__(subl $fixnumone,%imm0)
+	__(js local_label(_nthrow1v_done))
+	__(movd %imm0,%mm0)
+	__(movl rcontext(tcr.catch_top),%temp1)
+	__(movl catch_frame.link(%temp1),%imm0)
+	__(movl %imm0,rcontext(tcr.catch_top))
+	__(movl catch_frame.db_link(%temp1),%imm0)
+	__(cmpl %imm0,rcontext(tcr.db_link))
+	__(jz local_label(_nthrow1v_dont_unbind))
+	__(push %temp1)
+	__(push %temp0)
+	__(push %arg_z)
+	__(push `$'local_label(_nthrow1v_back_from_unbind))
+	__(jmp _SPunbind_to)
+__(tra(local_label(_nthrow1v_back_from_unbind)))
+	__(pop %arg_z)
+	__(pop %temp0)
+	__(pop %temp1)
+local_label(_nthrow1v_dont_unbind):
+	__(cmpb $unbound_marker,catch_frame.catch_tag(%temp1))
+	__(je local_label(_nthrow1v_do_unwind))
+/* A catch frame.  If the last one, restore context from there. */
+	__(movd %mm0,%imm0)
+	__(test %imm0,%imm0)	/* last catch frame? */
+	__(jne local_label(_nthrow1v_skip))
+	__(movl catch_frame.xframe(%temp1),%arg_y)
+	__(movl %arg_y,rcontext(tcr.xframe))
+	__(movl catch_frame.esp(%temp1),%esp)
+	__(movl catch_frame.ebp(%temp1),%ebp)
+	__(movd catch_frame.foreign_sp(%temp1),%stack_temp)
+	__(movd %stack_temp,rcontext(tcr.foreign_sp))
+local_label(_nthrow1v_skip):
+	__(movl -(tsp_frame.fixed_overhead+fulltag_misc)(%temp1),%imm0)
+	__(movl %imm0,rcontext(tcr.save_tsp))
+	__(movl %imm0,rcontext(tcr.next_tsp))
+	__(movd %mm0,%imm0)
+	__(jmp local_label(_nthrow1v_nextframe))
+local_label(_nthrow1v_do_unwind):
+/* This is harder, but not as hard (not as much BLTing) as the */
+/* multiple-value case. */
+	__(movl catch_frame.xframe(%temp1),%arg_y)
+	__(movl %arg_y,rcontext(tcr.xframe))
+	__(movl catch_frame.ebp(%temp1),%ebp)
+	__(movl catch_frame.esp(%temp1),%esp)
+	__(movd catch_frame.foreign_sp(%temp1),%stack_temp)
+	__(movd %stack_temp,rcontext(tcr.foreign_sp))
+	/* Discard the catch frame so we can build a temp frame. */
+	__(movl -(tsp_frame.fixed_overhead+fulltag_misc)(%temp1),%imm0)
+	__(movl %imm0,rcontext(tcr.save_tsp))
+	__(movl %imm0,rcontext(tcr.next_tsp))
+	__(movl catch_frame.pc(%temp1),%xfn) /* xfn is temp1 */
+	__(TSP_Alloc_Fixed((3*node_size),%imm0))
+	__(addl $tsp_frame.fixed_overhead,%imm0)
+	__(movl %ra0,(%imm0))
+	__(movd %mm0,node_size*1(%imm0))
+	__(movl %arg_z,node_size*2(%imm0))
+/* Ready to call cleanup code.  Set up tra, jmp to %xfn. */
+	__(push $local_label(_nthrow1v_called_cleanup))
+	__(movb $0,rcontext(tcr.unwinding))
+	__(jmp *%xfn)
+__(tra(local_label(_nthrow1v_called_cleanup)))
+	__(movb $1,rcontext(tcr.unwinding))
+	__(movl rcontext(tcr.save_tsp),%imm0)
+	__(movl tsp_frame.data_offset+(0*node_size)(%imm0),%ra0)
+	__(movd tsp_frame.data_offset+(1*node_size)(%imm0),%mm0)
+	__(movl tsp_frame.data_offset+(2*node_size)(%imm0),%arg_z)
+	__(movl (%imm0),%imm0)
+	__(movl %imm0,rcontext(tcr.save_tsp))
+	__(movl %imm0,rcontext(tcr.next_tsp))
+	__(movd %mm0,%imm0)
+	__(jmp local_label(_nthrow1v_nextframe))
+local_label(_nthrow1v_done):
+	__(movb $0,rcontext(tcr.unwinding))
+	__(check_pending_interrupt(%imm0))
+local_label(_nthrow1v_return):
+	__(jmp *%ra0)
+_endsubp(nthrow1value)
+
+/* This never affects the symbol's vcell   */
+/* Non-null symbol in arg_y, new value in arg_z           */
+
+_spentry(bind)
+	__(movl symbol.binding_index(%arg_y),%imm0)
+	__(cmpl rcontext(tcr.tlb_limit),%imm0)
+	__(jb 0f)
+	__(push %imm0)
+	__(tlb_too_small())
+0:	__(test %imm0,%imm0)
+	__(jz 9f)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(push (%temp1,%imm0))
+	__(push %imm0)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl %arg_z,(%temp1,%imm0))
+	__(jmp *%ra0)
+9:	
+	__(movl %arg_y,%arg_z)
+	__(movl $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind)
+
+/* arg_z = symbol: bind it to its current value  */
+
+_spentry(bind_self)
+	__(movl symbol.binding_index(%arg_z),%imm0)
+	__(cmpl rcontext(tcr.tlb_limit),%imm0)
+	__(jb 0f)
+	__(push %imm0)
+	__(tlb_too_small())
+0:	__(test %imm0,%imm0)
+	__(jz 9f)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(cmpb $no_thread_local_binding_marker,(%temp1,%imm0))
+	__(jz 2f)
+	__(push (%temp1,%imm0))
+	__(push %imm0)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(jmp *%ra0)
+2:	__(movl symbol.vcell(%arg_z),%arg_y)
+	__(push (%temp1,%imm0))
+	__(push %imm0)
+	__(push rcontext(tcr.db_link))
+	__(movl %arg_y,(%temp1,%imm0))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(jmp *%ra0)
+9:	__(movl $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind_self)
+
+_spentry(bind_nil)
+	__(movl symbol.binding_index(%arg_z),%imm0)
+	__(cmpl rcontext(tcr.tlb_limit),%imm0)
+	__(jb 0f)
+	__(push %imm0)
+	__(tlb_too_small())
+0:	__(test %imm0,%imm0)
+	__(jz 9f)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(push (%temp1,%imm0))
+	__(push %imm0)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl $nil_value,(%temp1,%imm0))
+	__(jmp *%ra0)
+9:	__(movl $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind_nil)
+
+_spentry(bind_self_boundp_check)
+	__(movl symbol.binding_index(%arg_z),%imm0)
+	__(cmpl rcontext(tcr.tlb_limit),%imm0)
+	__(jb 0f)
+	__(push %imm0)
+	__(tlb_too_small())
+0:	__(test %imm0,%imm0)
+	__(jz 9f)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(cmpb $no_thread_local_binding_marker,(%temp1,%imm0))
+	__(je 2f)
+	__(cmpb $unbound_marker,(%temp1,%imm0))
+	__(je 8f)
+	__(push (%temp1,%imm0))
+	__(push %imm0)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(jmp *%ra0)
+2:	__(movl symbol.vcell(%arg_z),%arg_y)
+	__(cmpl $unbound_marker,%arg_y)
+	__(jz 8f)
+	__(push (%temp1,%imm0))
+	__(push %imm0)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl %arg_y,(%temp1,%imm0))
+	__(jmp *%ra0)
+8:	__(push %ra0)
+        __(uuo_error_reg_unbound(Rarg_z))
+	
+9:	__(movl $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind_self_boundp_check)
+
+_spentry(conslist)
+	__(movl %nargs,%imm0)
+	__(movl %temp0,%temp1)	/* have to use temp0 for consing */
+	__(movl $nil_value,%arg_z)
+	__(test %imm0,%imm0)
+	__(jmp 2f)
+1:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%imm0)
+2:	__(jnz 1b)
+	__(jmp *%temp1)
+_endsubp(conslist)
+
+/* do list*: last arg in arg_z, all others pushed, nargs set to #args pushed.  */
+/* Cons, one cons cell at at time.  Maybe optimize this later.  */
+
+_spentry(conslist_star)
+	__(movl %nargs,%imm0)
+	__(test %imm0,%imm0)
+	__(movl %ra0,%temp1)
+	__(jmp 2f)
+1:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%imm0)
+2:	__(jnz 1b)
+	__(jmp *%temp1)
+_endsubp(conslist_star)
+
+/* We always have to create a tsp frame (even if nargs is 0), so the compiler */
+/* doesn't get confused. */
+_spentry(stkconslist)
+	__(movl $nil_value,%arg_z)
+C(stkconslist_common):               
+	__(movl %ra0,rcontext(tcr.save0))
+	__(movd %nargs,%mm0)
+	__(movl %nargs,%temp0)
+	__(addl %temp0,%temp0)
+	__(dnode_align(%temp0,tsp_frame.fixed_overhead,%temp0))
+	__(TSP_Alloc_Var(%temp0,%imm0))
+	__(addl $fulltag_cons,%imm0)
+	__(test %nargs,%nargs)
+	__(jmp 2f)
+1:	__(pop %arg_y)
+	__(_rplaca(%imm0,%arg_y))
+	__(_rplacd(%imm0,%arg_z))
+	__(movl %imm0,%arg_z)
+	__(add $cons.size,%imm0)
+	__(subl $node_size,%nargs)
+2:	__(jne 1b)
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp *%ra0)
+_endsubp(stkconslist)
+
+/* do list*: last arg in arg_z, all others vpushed,   */
+/*	nargs set to #args vpushed.  */
+
+_spentry(stkconslist_star)
+        __(jmp C(stkconslist_common))
+_endsubp(stkconslist_star)
+
+
+/* Make a stack-consed simple-vector out of the NARGS objects   */
+/*	on top of the vstack; return it in arg_z.  */
+
+_spentry(mkstackv)
+	__(dnode_align(%nargs,tsp_frame.fixed_overhead+node_size,%imm0))
+	__(TSP_Alloc_Var(%imm0,%arg_y))
+	__(movl %nargs,%imm0)
+	__(shll $(num_subtag_bits-fixnumshift),%imm0)
+	__(movb $subtag_simple_vector,%imm0_b)
+	__(movl %imm0,(%arg_y))
+	__(leal fulltag_misc(%arg_y),%arg_z)
+	__(test %nargs,%nargs)
+	__(leal misc_data_offset(%arg_z,%nargs),%imm0)
+	__(jmp 2f)
+1:	__(pop -node_size(%imm0))
+	__(subl $node_size,%nargs)
+	__(leal -node_size(%imm0),%imm0)
+2:	__(jne 1b)
+	__(jmp *%ra0)	
+_endsubp(mkstackv)
+
+        .globl C(egc_write_barrier_start)
+C(egc_write_barrier_start):
+/*  */
+/* The function pc_luser_xp() - which is used to ensure that suspended threads  */
+/* are suspended in a GC-safe way - has to treat these subprims (which implement  */
+/* the EGC write-barrier) specially.  Specifically, a store that might introduce  */
+/* an intergenerational reference (a young pointer stored in an old object) has  */
+/* to "memoize" that reference by setting a bit in the global "refbits" bitmap.  */
+/* This has to happen atomically, and has to happen atomically wrt GC.  */
+
+/* Note that updating a word in a bitmap is itself not atomic, unless we use  */
+/* interlocked loads and stores.  */
+
+/* For RPLACA and RPLACD, things are fairly simple: regardless of where we are  */
+/* in the function, we can do the store (even if it's already been done) and  */
+/* calculate whether or not we need to set the bit out-of-line.  (Actually  */
+/* setting the bit needs to be done atomically, unless we're sure that other  */
+/* threads are suspended.)  */
+/* We can unconditionally set the suspended thread's RIP to the return address.  */
+
+_spentry(rplaca)
+        .globl C(egc_rplaca)
+C(egc_rplaca):
+	__(rcmpl(%arg_z,%arg_y))
+	__(_rplaca(%arg_y,%arg_z))
+	__(ja 1f)
+0:	__(repret)
+1:	__(movl %arg_y,%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(cmpl lisp_global(oldspace_dnode_count),%imm0)
+	__(jae 0b)
+	__(ref_global(refbits,%temp0))
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp0))
+	__(ret)
+_endsubp(rplaca)
+
+_spentry(rplacd)
+        .globl C(egc_rplacd)
+C(egc_rplacd):
+	__(rcmpl(%arg_z,%arg_y))
+	__(_rplacd(%arg_y,%arg_z))
+	__(ja 1f)
+0:	__(repret)
+1:	__(movl %arg_y,%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(cmpl lisp_global(oldspace_dnode_count),%imm0)
+	__(jae 0b)
+	__(ref_global(refbits,%temp0))
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp0))
+	__(ret)
+_endsubp(rplacd)
+
+/* Storing into a gvector can be handled the same way as storing into a CONS. */
+/* args (src, unscaled-idx, val) in temp0, arg_y, arg_z */
+_spentry(gvset)
+        .globl C(egc_gvset)
+C(egc_gvset):
+	__(movl %arg_z,misc_data_offset(%temp0,%arg_y))
+	__(rcmpl(%arg_z,%temp0))
+	__(ja 1f)
+0:	__(repret)
+1:	__(lea misc_data_offset(%temp0,%arg_y),%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(cmpl lisp_global(oldspace_dnode_count),%imm0)
+	__(jae 0b)
+	__(ref_global(refbits,%temp1))
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp1))
+	__(ret)
+_endsubp(gvset)
+
+/* This is a special case of storing into a gvector: if we need to  */
+/* memoize the store, record the address of the hash-table vector  */
+/* in the refmap, as well.  */
+
+_spentry(set_hash_key)
+        .globl C(egc_set_hash_key)
+C(egc_set_hash_key):
+	__(movl %arg_z,misc_data_offset(%temp0,%arg_y))
+	__(rcmpl(%arg_z,%temp0))
+	__(ja 1f)
+0:	__(repret)
+1:	__(lea misc_data_offset(%temp0,%arg_y),%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(cmpl lisp_global(oldspace_dnode_count),%imm0)
+	__(jae 0b)
+	__(ref_global(refbits,%temp1))
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp1))
+	/* Now memoize the address of the hash vector */
+	__(movl %temp0,%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp1))
+	__(ret)
+_endsubp(set_hash_key)
+
+/* This is a little trickier: if this is interrupted, we need to know  */
+/* whether or not the STORE-CONDITIONAL (cmpxchgq) has won or not.    */
+/* If we're interrupted   before the PC has reached the "success_test" label, */
+/* repeat (luser the PC back to store_node_conditional_retry.)  If
+	we're at that */
+/* label with the Z flag set, we won and (may) need to memoize.  */
+
+/* %temp0 = offset, %temp1 = object, %arg_y = old, %arg_z = new */
+_spentry(store_node_conditional)
+        .globl C(egc_store_node_conditional)
+C(egc_store_node_conditional):
+	__(subl $misc_data_offset*fixnumone,%temp0) /* undo pre-added offset */
+	__(sarl $fixnumshift,%temp0)	/* will be fixnum-tagged */
+        .globl C(egc_store_node_conditional_retry)
+C(egc_store_node_conditional_retry):      
+0:	__(cmpl %arg_y,misc_data_offset(%temp1,%temp0))
+	__(movl misc_data_offset(%temp1,%temp0),%imm0)
+	__(jne 3f)
+	__(lock)
+	__(cmpxchgl %arg_z,misc_data_offset(%temp1,%temp0))
+	.globl C(egc_store_node_conditional_success_test)
+C(egc_store_node_conditional_success_test):
+	__(jne 0b)
+	__(leal misc_data_offset(%temp1,%temp0),%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(cmpl lisp_global(oldspace_dnode_count),%imm0)
+	__(jae 2f)
+	__(ref_global(refbits,%temp1))
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp1))
+        .globl C(egc_store_node_conditional_success_end)
+C(egc_store_node_conditional_success_end):
+2:	__(movl $t_value,%arg_z)
+	__(ret)
+3:	__(movl $nil_value,%arg_z)
+	__(ret)
+_endsubp(store_node_conditional)
+
+	/* %temp0 = offset, %temp1 = object, %arg_y = old, %arg_z = new */
+_spentry(set_hash_key_conditional)
+        .globl C(egc_set_hash_key_conditional)
+C(egc_set_hash_key_conditional):
+	__(subl $misc_data_offset*fixnumone,%temp0) /* undo pre-added offset */
+	__(sarl $fixnumshift,%temp0)	/* will be fixnum-tagged */
+        .globl C(egc_set_hash_key_conditional_retry)
+C(egc_set_hash_key_conditional_retry):          
+0:	__(cmpl %arg_y,misc_data_offset(%temp1,%temp0))
+	__(movl misc_data_offset(%temp1,%temp0),%imm0)
+	__(jne 3f)
+	__(lock)
+	__(cmpxchgl %arg_z,misc_data_offset(%temp1,%temp0))
+	.globl C(egc_set_hash_key_conditional_success_test)
+C(egc_set_hash_key_conditional_success_test):
+	__(jne 0b)
+	__(leal misc_data_offset(%temp1,%temp0),%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(cmpl lisp_global(oldspace_dnode_count),%imm0)
+	__(jae 2f)
+	__(ref_global(refbits,%temp0))
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp0))
+	/* Now memoize the address of the hash vector */
+	__(movl %temp1,%imm0)
+	__(subl lisp_global(ref_base),%imm0)
+	__(shrl $dnode_shift,%imm0)
+	__(xorb $31,%imm0_b)
+	__(lock)
+	__(btsl %imm0,(%temp0))
+        .globl C(egc_write_barrier_end)
+C(egc_write_barrier_end):
+2:	__(movl $t_value,%arg_z)
+	__(ret)
+3:	__(movl $nil_value,%arg_z)
+	__(ret)
+_endsubp(store_node_conditional)
+
+_spentry(setqsym)
+	__(bt $sym_vbit_const,symbol.flags(%arg_y))
+	__(jae _SPspecset)
+	__(mov %arg_y,%arg_z)
+	__(mov $XCONST,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+_endsubp(setqsym)
+
+_spentry(progvsave)
+	__(push %arg_y)
+	
+	/* Error if arg_z isn't a proper list.  That's unlikely,  */
+	/* but it's better to check now than to crash later.  */
+	
+	__(compare_reg_to_nil(%arg_z))
+	__(movl %arg_z,%temp0)	/* fast   */
+	__(movl %arg_z,%temp1)	/* slow   */
+	__(je 9f)		/* Null list is proper   */
+0:
+	__(extract_lisptag(%temp0,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 8f)
+	__(compare_reg_to_nil(%temp0))
+	__(je 9f)
+	__(_cdr(%temp0,%arg_y))	/* (null (cdr fast)) ?   */
+	__(compare_reg_to_nil(%arg_y))
+	__(je 9f)
+	__(extract_lisptag(%arg_y,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 8f)
+	__(_cdr(%arg_y,%temp0))
+	__(_cdr(%temp1,%temp1))
+	__(cmpl %temp1,%temp0)
+	__(jne 0b)
+
+8:	__(add $node_size,%esp)	/* discard pushed arg_y */
+	__(movl $XIMPROPERLIST,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+9:	/* Whew 	  */
+
+        /* Next, determine the length of arg_y.  We   */
+	/* know that it's a proper list.   */
+	__(pop %arg_y)
+	
+	__(movl $-fixnumone,%imm0)
+	__(movl %arg_y,%temp0)
+1:	__(compare_reg_to_nil(%temp0))
+	__(_cdr(%temp0,%temp0))
+	__(leal fixnumone(%imm0),%imm0)
+	__(jne 1b)
+	
+	/* imm0 is now (boxed) triplet count.  */
+	/* Determine word count, add 1 (to align), and make room.  */
+	/*  if count is 0, make an empty tsp frame and exit   */
+	__(testl %imm0,%imm0)
+	__(jne 2f)
+	__(TSP_Alloc_Fixed(2*node_size,%imm0))
+	__(ret)
+2:	__(movl %imm0,%temp1)
+	__(add %imm0,%imm0)
+	__(add %temp1,%imm0)
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+node_size,%imm0))
+	__(TSP_Alloc_Var(%imm0,%temp0))
+	__(movl %temp1,(%temp0))
+	__(movd rcontext(tcr.db_link),%mm0)
+3:	__(movl $unbound_marker,%temp0)
+	__(compare_reg_to_nil(%arg_z))
+	__(cmovnel cons.car(%arg_z),%temp0)
+	__(cmovnel cons.cdr(%arg_z),%arg_z)
+	__(_car(%arg_y,%temp1))
+	__(_cdr(%arg_y,%arg_y))
+	__(movl symbol.binding_index(%temp1),%temp1)
+	__(cmp rcontext(tcr.tlb_limit),%temp1)
+	__(jb 4f)
+	__(push %temp1)
+	__(tlb_too_small())
+4:	__(push %arg_z)
+	__(movl rcontext(tcr.tlb_pointer),%arg_z)
+	__(subl $binding.size,%imm0)
+	__(movl %temp1,binding.sym(%imm0))
+	__(push (%arg_z,%temp1))
+	__(pop binding.val(%imm0))
+	__(movl %temp0,(%arg_z,%temp1))
+	__(pop %arg_z)
+	__(movd %mm0,binding.link(%imm0))
+	__(movd %imm0,%mm0)
+	__(compare_reg_to_nil(%arg_y))
+	__(jne 3b)
+	__(movd %mm0,rcontext(tcr.db_link))
+	__(ret)
+_endsubp(progvsave)
+
+/* Allocate node objects on the temp stack, immediate objects on the foreign  */
+/* stack. (The caller has to know which stack to discard a frame from.)  */
+/* %arg_y = boxed element-count, %arg_z = boxed subtype  */
+
+_spentry(stack_misc_alloc)
+	__(testl $~(((1<<24)-1)<<fixnumshift),%arg_y)
+	__(jne local_label(stack_misc_alloc_not_u24))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(mov %arg_y,%temp0)
+	__(shl $num_subtag_bits-fixnumshift,%temp0)
+	__(or %temp0,%imm0)	/* %imm0 now = header */
+	__(movd %imm0,%mm0)	/* cache header in %mm0 */
+	__(andb $fulltagmask,%imm0_b)
+	__(cmpb $fulltag_nodeheader,%imm0_b)
+	__(je local_label(stack_misc_alloc_node))
+	__(movd %mm0,%imm0)
+	__(cmpb $max_32_bit_ivector_subtag,%imm0_b)
+	__(jbe local_label(stack_misc_alloc_32))
+	__(cmpb $max_8_bit_ivector_subtag,%imm0_b)
+	__(jbe local_label(stack_misc_alloc_8))
+	__(cmpb $max_16_bit_ivector_subtag,%imm0_b)
+	__(jbe local_label(stack_misc_alloc_16))
+	__(cmpb $subtag_double_float_vector,%imm0_b)
+	__(jne local_label(stack_misc_alloc_1))
+	/* double-float vector case */
+	__(imul $2,%arg_y,%imm0)
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_1):
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(addl $7,%imm0)
+	__(shrl $3,%imm0)
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_8):
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_16):
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(shl $1,%imm0)
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_32):
+	__(mov %arg_y,%imm0)
+local_label(stack_misc_alloc_alloc_ivector):
+	/* byte count in %imm0 */
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+node_size,%imm0))
+	__(cmpl $tstack_alloc_limit,%imm0)
+	__(ja local_label(stack_misc_alloc_heap_alloc_ivector))
+        __ifdef(`WINDOWS')
+         __(windows_cstack_probe(%imm0,%temp1))
+        __endif
+	__(movd rcontext(tcr.foreign_sp),%stack_temp)
+	__(movd %stack_temp,%temp1)
+	__(subl %imm0,rcontext(tcr.foreign_sp))
+	__(movl rcontext(tcr.foreign_sp),%temp0)
+0:	__(movsd %fpzero,-dnode_size(%temp1))
+	__(subl $dnode_size,%temp1)
+	__(cmpl %temp1,%temp0)
+	__(jnz 0b)
+	__(movd %stack_temp,(%temp0))
+	__(movl %ebp,csp_frame.save_ebp(%temp0))
+	__(movd %mm0,csp_frame.fixed_overhead(%temp0))
+	__(lea csp_frame.fixed_overhead+fulltag_misc(%temp0),%arg_z)
+	__(ret)
+local_label(stack_misc_alloc_heap_alloc_ivector):
+	__(movd rcontext(tcr.foreign_sp),%stack_temp)
+	__(subl $dnode_size,rcontext(tcr.foreign_sp))
+	__(movl rcontext(tcr.foreign_sp),%imm0)
+	__(movd %stack_temp,(%imm0))
+	__(jmp _SPmisc_alloc)
+local_label(stack_misc_alloc_node):
+	__(movl %arg_y,%imm0)
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+node_size,%imm0))
+	__(cmpl $tstack_alloc_limit,%imm0)
+	__(ja local_label(stack_misc_alloc_heap_alloc_gvector))
+	__(TSP_Alloc_Var(%imm0,%temp1))
+	__(movd %mm0,(%temp1))
+	__(leal fulltag_misc(%temp1),%arg_z)
+	__(ret)
+local_label(stack_misc_alloc_heap_alloc_gvector):
+	__(TSP_Alloc_Fixed(0,%imm0))
+	__(jmp _SPmisc_alloc)
+
+local_label(stack_misc_alloc_not_u24):
+	__(uuo_error_reg_not_type(Rarg_y,error_object_not_unsigned_byte_24))
+_endsubp(stack_misc_alloc)
+
+/* subtype (boxed, of course) is pushed, followed by nargs bytes worth of */
+/* initial-contents.  Note that this can be used to cons any type of */
+/* initialized node-header'ed misc object (symbols, closures, ...) */
+/* as well as vector-like objects. */
+_spentry(gvector)
+	__(subl $node_size,%nargs)	/* off by one in x862-%gvector */
+	__(movl (%esp,%nargs),%imm0)	/* boxed subtype */
+	__(sarl $fixnumshift,%imm0)
+	__(movl %nargs,%arg_z)
+	__(shll $num_subtag_bits-word_shift,%arg_z)
+	__(orl %arg_z,%imm0)
+	__(movd %imm0,%mm0)
+	__(dnode_align(%nargs,node_size,%imm0))
+	__(push %ra0)	/* aka %temp0, can't be live while consing */
+	__(Misc_Alloc(%arg_z))
+	__(pop %ra0)
+	__(movl %nargs,%imm0)
+	__(jmp 2f)
+1:	__(movl %arg_y,misc_data_offset(%arg_z,%imm0))
+2:	__(subl $node_size,%imm0)
+	__(pop %arg_y)	/* Note the intentional fencepost: */
+			/* discard the subtype as well. */
+	__(jge 1b)
+	__(jmp *%ra0)
+_endsubp(gvector)
+
+_spentry(mvpass)
+	__(hlt)
+_endsubp(mvpass)
+
+_spentry(nthvalue)
+	__(hlt)
+_endsubp(nthvalue)
+
+_spentry(values)
+	__(movl (%temp0),%arg_y)	/* return address */
+	__(ref_global(ret1val_addr,%imm0))
+	__(movl $nil_value,%arg_z)
+	__(cmpl %imm0,%arg_y)
+	__(je 0f)
+	__(test %nargs,%nargs)
+	__(cmovne -node_size(%esp,%nargs),%arg_z)
+	__(movl %temp0,%esp)
+	__(ret)
+0:	__(movl 4(%temp0),%arg_y)
+        __(addl $2*node_size,%temp0)
+	__(lea (%esp,%nargs),%imm0)
+	__(movd %nargs,%mm0)
+	__(jmp 2f)
+1:	__(subl $node_size,%imm0)
+	__(movl (%imm0),%temp1)
+	__(subl $node_size,%temp0)
+	__(movl %temp1,(%temp0))
+2:	__(cmp %imm0,%esp)
+	__(jne 1b)
+	__(movl %temp0,%esp)
+	__(movd %mm0,%nargs)
+	__(jmp *%arg_y)
+
+_endsubp(values)
+
+_spentry(default_optional_args)
+	__(hlt)
+_endsubp(default_optional_args)
+
+_spentry(opt_supplied_p)
+	__(hlt)
+_endsubp(opt_supplied_p)
+
+_spentry(lexpr_entry)
+	__(hlt)
+_endsubp(lexpr_entry)
+
+_spentry(heap_rest_arg)
+	__(push_argregs())
+	__(movl %temp0,%arg_y)
+	__(movl %nargs,%imm0)
+	__(testl %imm0,%imm0)
+	__(movl $nil_value,%arg_z)
+	__(jmp 2f)
+	.p2align 4
+1:	__(pop %temp1)
+	__(Cons(%temp1,%arg_z,%arg_z))
+	__(subl $node_size,%imm0)
+2:	__(jg 1b)
+	__(push %arg_z)
+	__(movl %arg_y,%temp0)
+	__(jmp *%ra0)
+
+_endsubp(heap_rest_arg)
+
+/* %imm0 contains the number of fixed args; make an &rest arg out of */
+/* the others. */
+_spentry(req_heap_rest_arg)
+	__(push_argregs())
+	__(movd %nargs,%mm0)
+	__(subl %imm0,%nargs)
+	__(movl %nargs,%imm0)
+	__(movl %temp0,%temp1)
+	__(movl $nil_value,%arg_z)
+	__(jmp 2f)
+	.p2align 4
+1:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%imm0)
+2:	__(jg 1b)
+	__(push %arg_z)
+	__(movl %temp1,%temp0)
+	__(movd %mm0,%nargs)
+	__(jmp *%ra0)
+_endsubp(req_heap_rest_arg)
+
+/* %imm0 bytes of stuff has already been pushed	  */
+/* make an &rest arg out of any others   */
+_spentry(heap_cons_rest_arg)
+	__(movd %nargs,%mm0)
+	__(subl %imm0,%nargs)
+	__(movl %nargs,%imm0)
+	__(movl $nil_value,%arg_z)
+	__(movl %ra0,%arg_y)	/* temp0 can't be live while consing */
+	__(jmp 2f)		/* (did I mention that already?) */
+	.p2align 4
+1:	__(pop %temp1)
+	__(Cons(%temp1,%arg_z,%arg_z))
+	__(subl $node_size,%imm0)
+2:	__(jg 1b)
+	__(push %arg_z)
+	__(movd %mm0,%nargs)
+	__(jmp *%arg_y)
+_endsubp(heap_cons_rest_arg)
+
+_spentry(simple_keywords)
+	__(xor %imm0,%imm0)
+	__(push_argregs())
+	__(jmp _SPkeyword_bind)
+_endsubp(simple_keywords)
+
+_spentry(keyword_args)
+	__(push_argregs())
+	__(jmp _SPkeyword_bind)
+_endsubp(keyword_args)
+
+/* There are %nargs words of arguments on the stack; %imm0 contains the */
+/* number of non-keyword args pushed.  It's possible that we never actually */
+/* got any keyword args, which would make things much simpler. */
+
+/* On entry, the upper half of %temp1 (aka %nargs) contains some bits */
+/* indicating whether &allow-other-keys and/or &rest was present in the */
+/* lambda list. */
+
+/* Once we get here, we can use the arg registers. */
+
+/* N.B.: %ra0 is %temp0, and must not be clobbered. */
+
+define(`keyword_flags_aok_bit',`16')
+define(`keyword_flags_unknown_keys_bit',`17')
+define(`keyword_flags_rest_bit',`18')
+define(`keyword_flags_seen_aok_bit',`19')
+
+_spentry(keyword_bind)
+	__(movl %temp1,rcontext(tcr.unboxed0))	/* save keyword flags */
+	__(movzwl %nargs_w,%nargs)
+	__(movl %nargs,%arg_z)
+	__(subl %imm0,%arg_z)
+	__(jbe local_label(no_keyword_values))
+	__(btl $word_shift,%arg_z)
+	__(jnc local_label(even))
+	__(movl $nil_value,%arg_y)
+	__(movl %arg_z,%nargs)
+	__(test %nargs,%nargs)
+	__(movl %ra0,rcontext(tcr.save0))	/* save temp0 while consing */
+	__(jmp 1f)
+0:	__(pop %arg_z)
+	__(Cons(%arg_z,%arg_y,%arg_y))
+	__(subl $node_size,%nargs)
+1:	__(jnz 0b)
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movapd %fpzero,rcontext(tcr.save0))
+	__(movl %arg_y,%arg_z)
+	__(movl $XBADKEYS,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+
+	/* Now that we're sure that we have an even number of */
+	/* keywords and values (in %arg_z), move the pairs over */
+	/* to the temp stack. */
+local_label(even):
+	__(lea tsp_frame.fixed_overhead(%arg_z),%arg_y)
+	__(TSP_Alloc_Var(%arg_y,%imm0))
+2:	__(subl $node_size,%arg_y)
+	__(pop (%arg_y))
+	__(cmpl %arg_y,%imm0)
+	__(jne 2b)
+
+	/* Get the keyword vector into %arg_y, and its length into %imm0. */
+	/* Push %imm0 pairs of NILs (representing value, supplied-p) */
+	/* for each declared keyword. */
+	__(movzwl misc_data_offset(%fn),%imm0)
+	/* XXX bootstrapping */
+	__(btr $15,%imm0)
+	__(jnc 0f)
+	__(vector_length(%fn,%arg_y))
+	__(box_fixnum(%imm0,%imm0))
+	__(subl %imm0,%arg_y)
+	__(movl %arg_y,%imm0)
+	__(shrl $fixnumshift,%imm0)
+0:
+	__(movl misc_data_offset(%fn,%imm0,node_size),%arg_y)
+	__(vector_length(%arg_y,%imm0))
+	__(jmp 4f)
+3:	__(push $nil_value)
+	__(push $nil_value)
+4:	__(subl $fixnumone,%imm0)
+	__(jge 3b)
+
+	/* We can now push %ra0 (aka %temp0) and %nargs (aka %temp1) */
+	/* in order to get a couple more registers to work with. */
+	__(push %ra0)
+	__(push %nargs)
+
+	/* At this point we have: */
+	/* number of supplied keywords and values in %arg_z */
+	/* keyword vector in %arg_y */
+	__(vector_length(%arg_y,%imm0))
+	__(push %imm0)		/* count of declared keywords */
+	__(push %arg_z)		/* count of supplied keys and values */
+
+	/* For each declared keyword, iterate over the supplied k/v pairs */
+	/* to see if it's supplied and what the value is. */
+	/* checking to see if any */
+	/* key-value pairs were unexpectedly supplied. */
+
+	__(movl rcontext(tcr.save_tsp),%temp0)
+	__(addl $2*node_size,%temp0) /* skip frame overhead */
+	/* %temp0: top of tstack (skipping frame overhead) */
+	__(lea 4*node_size(%esp,%imm0,2),%temp1)
+	/* %temp1: word above 0th value/supplied-p pair on vstack */
+	/* %arg_y: keyword vector */
+	__(xorl %imm0,%imm0)
+	/* %imm0: index */
+	/* %arg_z: temporary */
+
+	/* Iterate over supplied k/v pairs on tstack.  See if key is */
+	/* in the keyword vector.  Copy value and set supplied-p on */
+	/* vstack if found. */
+
+local_label(tstack_loop):
+	__(movl (%temp0,%imm0,2),%arg_z)	/* keyword */
+	__(push %imm0)
+	__(xorl %imm0,%imm0)
+	__(cmpl $nrs.kallowotherkeys,%arg_z)
+	__(jne local_label(next_keyvect_entry))
+	__(btsl $keyword_flags_seen_aok_bit,rcontext(tcr.unboxed0))
+	__(jc local_label(next_keyvect_entry))
+	__(push %imm0)
+	__(movl 4(%esp),%imm0)
+	__(cmpl $nil_value,node_size(%temp0,%imm0,2))
+	__(pop %imm0)
+	__(je local_label(next_keyvect_entry))
+	__(btsl $keyword_flags_aok_bit,rcontext(tcr.unboxed0))
+	__(jmp local_label(next_keyvect_entry))
+	/* loop through keyword vector */
+6:	__(cmpl misc_data_offset(%arg_y,%imm0),%arg_z)
+	__(jne 7f)
+	/* Got a match; have we already seen this keyword? */
+	__(negl %imm0)
+	__(cmpl $nil_value,-node_size*2(%temp1,%imm0,2))
+	__(jne 9f)	/* seen it, ignore this value */
+	__(movl (%esp),%arg_z)
+	__(lea (%temp0,%arg_z,2),%arg_z)
+	__(movl node_size(%arg_z),%arg_z) /* value for this key */
+	__(movl %arg_z,-node_size(%temp1,%imm0,2))
+	__(movl $t_value,-node_size*2(%temp1,%imm0,2))
+	__(jmp 9f)
+7:	__(addl $node_size,%imm0)
+local_label(next_keyvect_entry):
+	__(cmpl %imm0,8(%esp))
+	__(jne 6b)
+	/* Didn't match anything in the keyword vector.  Is the keyword */
+	/* :allow-other-keys? */
+	__(cmpl $nrs.kallowotherkeys,%arg_z)
+	__(je 9f)	/* :allow-other-keys is never "unknown" */
+8:	__(btsl $keyword_flags_unknown_keys_bit,rcontext(tcr.unboxed0))
+9:	__(pop %imm0)
+	__(addl $fixnumone,%imm0)
+	__(movl %imm0,%arg_z)
+	__(shll $1,%arg_z)	/* pairs of tstack words */
+	__(cmpl %arg_z,0(%esp))
+	__(jne local_label(tstack_loop))
+
+	__(pop %imm0)	/* count of supplied keys and values */
+	__(addl $node_size,%esp)
+	__(pop %nargs)
+	__(pop %ra0)
+
+	/* If the function takes an &rest arg, or if we got an unrecognized */
+	/* keyword and don't allow that, copy the incoming k/v pairs from */
+	/* the temp stack back to the value stack. */
+	__(btl $keyword_flags_rest_bit,rcontext(tcr.unboxed0))
+	__(jc 1f)
+	__(btl $keyword_flags_unknown_keys_bit,rcontext(tcr.unboxed0))
+	__(jnc 0f)
+	__(btl $keyword_flags_aok_bit,rcontext(tcr.unboxed0))
+	__(jnc 1f)
+	/* pop the tstack frame */
+0:	__(discard_temp_frame(%imm0))
+	__(jmp *%ra0)
+
+	/* Copy the k/v pairs from the tstack back to the value stack, */
+	/* either because the function takes an &rest arg or because */
+	/* we need to signal an "unknown keywords" error. */
+1:	__(movl rcontext(tcr.save_tsp),%arg_z)
+	__(mov (%arg_z),%arg_y)
+	__(jmp 3f)
+2:	__(push (%arg_z))
+	__(push node_size(%arg_z))
+3:	__(addl $dnode_size,%arg_z)
+	__(cmpl %arg_z,%arg_y)
+	__(jne 2b)
+	__(discard_temp_frame(%arg_z))
+	__(btl $keyword_flags_unknown_keys_bit,rcontext(tcr.unboxed0))
+	__(jnc 9f)
+	__(btl $keyword_flags_aok_bit,rcontext(tcr.unboxed0))
+	__(jc 9f)
+	/* Signal an "unknown keywords" error */
+	__(movl %imm0,%nargs)
+	__(movl $nil_value,%arg_z)
+	__(test %nargs,%nargs)
+	__(movl %ra0,rcontext(tcr.save0))
+	__(jmp 5f)
+4:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%nargs)
+5:	__(jnz 4b)
+	__(movl $XBADKEYS,%arg_y)
+	__(set_nargs(2))
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp _SPksignalerr)
+9:	__(jmp *%ra0)
+
+/* No keyword value were provided.  Access the keyword vector (which is the */
+/* 0th constant in %fn), determine its length N, and push N pairs of NILs. */
+/* N could be 0... */
+
+local_label(no_keyword_values):
+	__(movzwl misc_data_offset(%fn),%imm0)
+	/* XXX bootstrapping */
+	__(btr $15,%imm0)
+	__(jnc 9f)
+	__(vector_length(%fn,%arg_y))
+	__(box_fixnum(%imm0,%imm0))
+	__(subl %imm0,%arg_y)
+	__(movl %arg_y,%imm0)
+	__(shrl $fixnumshift,%imm0)
+9:
+	__(movl misc_data_offset(%fn,%imm0,node_size),%arg_y)
+	__(vector_length(%arg_y,%arg_z))
+	__(movl $nil_value,%imm0)
+	__(jmp 1f)
+0:	__(push %imm0)
+	__(push %imm0)
+1:	__(subl $fixnumone,%arg_z)
+	__(jge 0b)
+	__(jmp *%ra0)
+_endsubp(keyword_bind)
+
+/* Normally, we'd just set %fname (aka %temp0) and do */
+/* jump_fname().  Sometimes, though, %temp0 is being used */
+/* as %ra0, and I'm not sure that it's going to be safe to */
+/* clobber that.  (Note that nil-relative symbols aren't going */
+/* get moved around by the GC, so we can get away with putting */
+/* '%err-disp in %imm0.) */
+_spentry(ksignalerr)
+	__(mov $nrs.errdisp,%imm0)
+	__(mov symbol.fcell(%imm0),%fn)
+	__(jump_fn)
+_endsubp(ksignalerr)
+
+_spentry(stack_rest_arg)
+	__(xorl %imm0,%imm0)
+	__(push_argregs())
+	__(jmp _SPstack_cons_rest_arg)
+_endsubp(stack_rest_arg)
+
+_spentry(req_stack_rest_arg)
+	__(push_argregs())
+	__(jmp _SPstack_cons_rest_arg)
+_endsubp(req_stack_rest_arg)
+
+_spentry(stack_cons_rest_arg)
+	__(movd %nargs,%mm2)
+	__(movl %temp0,rcontext(tcr.save0))
+	__(subl %imm0,%temp1)
+	__(movl $nil_value,%arg_z)
+	__(jle 2f)	/* empty list; make an empty TSP frame */
+	__(addl %temp1,%temp1)
+	__(cmpl $(tstack_alloc_limit-dnode_size),%temp1)
+	__(ja 3f)	/* make empty frame, then heap-cons */
+	__(dnode_align(%temp1,tsp_frame.fixed_overhead,%imm0))
+	__(TSP_Alloc_Var(%imm0,%temp0))
+	__(addl $fulltag_cons,%temp0)
+1:	__(pop %arg_y)
+	__(_rplacd(%temp0,%arg_z))
+	__(_rplaca(%temp0,%arg_y))
+	__(movl %temp0,%arg_z)
+	__(addl $cons.size,%temp0)
+	__(subl $dnode_size,%temp1)
+	__(jne 1b)
+	__(push %arg_z)
+	__(movd %mm2,%nargs)
+	__(movl rcontext(tcr.save0),%temp0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp *%temp0)
+/* Length 0, make empty frame */
+2:
+	__(TSP_Alloc_Fixed(0,%temp0))
+	__(push %arg_z)
+	__(movd %mm2,%nargs)
+	__(movl rcontext(tcr.save0),%temp0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp *%temp0)
+/* Too big to stack-cons, but make an empty frame before heap-consing */
+	__(TSP_Alloc_Fixed(0,%temp0))
+	__(movd %mm2,%nargs)
+	__(movl rcontext(tcr.save0),%temp0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp _SPheap_cons_rest_arg)
+_endsubp(stack_cons_rest_arg)
+
+_spentry(getxlong)
+	__(hlt)
+_endsubp(getxlong)
+
+/* Have to be a little careful here: the caller may or may not have pushed  */
+/* an empty frame, and we may or may not have needed one.  We can't easily  */
+/* tell whether or not a frame will be needed (if the caller didn't reserve  */
+/* a frame, whether or not we need one depends on the length of the list  */
+/* in arg_z.  So, if the caller didn't push a frame, we do so; once */
+/* everything's been spread, we discard the reserved frame (regardless of
+/* who pushed it) if all args fit in registers.   */
+
+/* xxx preserve temp1 somehow? cf. comment in x862-invoke-fn */
+_spentry(spreadargz)
+	__(test %nargs,%nargs)
+	__(jne 0f)
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+0:	__(movl %arg_z,rcontext(tcr.save0))	/* save in case of error */
+	__(movd %nargs,%mm0)	/* now we can use %temp1 */
+	__(xorl %nargs,%nargs)
+	__(cmpl $nil_value,%arg_z)
+	__(je 2f)
+1:	__(extract_fulltag(%arg_z,%imm0))
+	__(cmpb $fulltag_cons,%imm0_b)
+	__(jne 9f)
+	__(_car(%arg_z,%arg_y))
+	__(_cdr(%arg_z,%arg_z))
+	__(add $node_size,%nargs)
+	__(cmpl $call_arguments_limit<<fixnumshift,%nargs)
+	__(jge 8f)
+	__(push %arg_y)
+	__(cmpl $nil_value,%arg_z)
+	__(jne 1b)
+2:	__(movd %mm0,%imm0)
+	__(addl %imm0,%nargs)
+	__(jne 4f)
+3:	__(addl $2*node_size,%esp)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp *%ra0)
+4:	__(pop %arg_z)
+	__(cmp $1*node_size,%nargs)
+	__(je 3b)
+	__(pop %arg_y)
+	__(cmp $2*node_size,%nargs)
+	__(je 3b)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp *%ra0)
+/* Discard everything that's been pushed already, complain */
+8:	__(lea (%esp,%nargs),%esp)
+	__(movl rcontext(tcr.save0),%arg_z) /* recover original */
+	__(movl $0,rcontext(tcr.save0))
+	__(movl $XTMINPS,%arg_y)
+	__(set_nargs(2))
+	__(push %ra0)
+	__(jmp _SPksignalerr)
+9:	__(lea (%esp,%nargs),%esp)
+	__(movl rcontext(tcr.save0),%arg_z) /* recover original */
+	__(movl $0,rcontext(tcr.save0))
+	__(movl $XNOSPREAD,%arg_y)
+	__(set_nargs(2))
+	__(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(spreadargz)
+
+
+/* Caller built its own frame when it was entered.  If all outgoing args  */
+/* are in registers, we can discard that frame; otherwise, we copy outgoing  */
+/* relative to it and restore %rbp/%ra0   */
+_spentry(tfuncallgen)
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe 9f)
+	__(lea -nargregs*node_size(%esp,%nargs),%imm0)
+	__(movl %temp0,rcontext(tcr.save0))
+	__(movd %nargs,%mm0)
+	__(xorl %temp1,%temp1)
+	/* We can use %ra0 as a temporary here, since the real return address */
+	/* is on the stack   */
+0:	__(movl -node_size(%imm0),%ra0)
+	__(movl %ra0,-node_size(%ebp,%temp1))
+	__(subl $node_size,%imm0)
+	__(subl $node_size,%temp1)
+	__(cmpl %imm0,%esp)
+	__(jne 0b)
+	__(lea (%ebp,%temp1),%esp)
+	__(movl 4(%ebp),%ra0)
+	__(movl (%ebp),%ebp)
+        __(pushl %ra0)
+	__(movd %mm0,%nargs)
+	__(movl rcontext(tcr.save0),%temp0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(do_funcall())
+        /* All args in regs; exactly the same as the tfuncallvsp case   */
+9:		
+	__(leave)
+	__(do_funcall())
+
+_endsubp(tfuncallgen)
+
+/* Some args were pushed; move them down in the frame   */
+_spentry(tfuncallslide)
+	__(lea -nargregs*node_size(%esp,%nargs),%imm0)
+	__(movd %nargs,%mm0)
+	__(xorl %temp1,%temp1)
+	__(movl %temp0,rcontext(tcr.save0))
+0:	__(movl -node_size(%imm0),%temp0)
+	__(movl %temp0,-node_size(%ebp,%temp1))
+	__(subl $node_size,%imm0)
+	__(subl $node_size,%temp1)
+	__(cmpl %imm0,%esp)
+	__(jne 0b)
+	__(lea (%ebp,%temp1),%esp)
+	__(push 4(%ebp))	/* return address */
+	__(movl (%ebp),%ebp)
+	__(movd %mm0,%nargs)
+	__(movl rcontext(tcr.save0),%temp0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(do_funcall())
+_endsubp(tfuncallslide)
+
+/* No args were pushed; recover saved context & do funcall 	  */
+_spentry(tfuncallvsp)
+	__(leave)
+	__(do_funcall())
+_endsubp(tfuncallvsp)
+
+_spentry(tcallsymgen)
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe 9f)
+	__(lea -nargregs*node_size(%esp,%nargs),%imm0)
+	__(movd %nargs,%mm0)
+	__(movl %temp0,rcontext(tcr.save0))
+	__(xorl %temp1,%temp1)	/* aka nargs */
+0:	__(movl -node_size(%imm0),%temp0)
+	__(movl %temp0,-node_size(%ebp,%temp1))
+	__(subl $node_size,%imm0)
+	__(subl $node_size,%temp1)
+	__(cmpl %imm0,%esp)
+	__(jne 0b)
+	__(lea (%ebp,%temp1),%esp)
+	__(movl 4(%ebp),%temp0)
+	__(movl (%ebp),%ebp)
+	__(push %temp0)
+	__(movl rcontext(tcr.save0),%temp0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(movd %mm0,%nargs)
+	__(jump_fname())
+/* All args in regs; exactly the same as the tcallsymvsp case. */
+9:
+	__(leave)
+	__(jump_fname())
+_endsubp(tcallsymgen)
+
+_spentry(tcallsymslide)
+	__(movl %ebp,%imm0)
+	__(subl %nargs,%imm0)
+	__(addl $nargregs*node_size,%imm0)	/* new tos */
+	__(push %imm0)
+	__(push %arg_y)
+	__(push %arg_z)
+	__(push %nargs)
+	__(lea (4-nargregs)*node_size(%esp,%nargs),%arg_y) /* src ptr */
+	__(movl %ebp,%imm0) /* dst ptr */
+	__(subl $fixnumone*nargregs,%nargs)
+	__(jmp 1f)
+0:	__(subl $node_size,%arg_y)
+	__(movl (%arg_y),%arg_z)
+	__(subl $node_size,%imm0)
+	__(movl %arg_z,(%imm0))
+1:	__(subl $fixnumone,%nargs)
+	__(jge 0b)
+	__(pop %nargs)
+	__(pop %arg_z)
+	__(pop %arg_y)
+	__(pop %esp)
+	__(push node_size(%ebp))
+	__(movl 0(%ebp),%ebp)
+	__(jump_fname)
+_endsubp(tcallsymslide)
+
+_spentry(tcallsymvsp)
+	__(leave)
+	__(jump_fname())
+_endsubp(tcallsymvsp)
+
+_spentry(tcallnfngen)
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe 9f)
+	__(lea -nargregs*node_size(%esp,%nargs),%imm0)
+	__(movd %nargs,%mm0)	/* stash nargs aka temp1 */
+	__(xorl %temp1,%temp1)
+	__(movl %temp0,rcontext(tcr.save0))
+	/* It's OK to use %ra0 (%temp0) as an temp here, since the */
+	/* real return address is on the stack. */
+0:	__(movl -node_size(%imm0),%ra0)
+	__(movl %ra0,-node_size(%ebp,%temp1))
+	__(subl $node_size,%imm0)
+	__(subl $node_size,%temp1)
+	__(cmpl %imm0,%esp)
+	__(jne 0b)
+	__(movl rcontext(tcr.save0),%fn)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(lea (%ebp,%temp1),%esp)
+	__(movl lisp_frame.savera0(%ebp),%ra0)
+	__(movl lisp_frame.backlink(%ebp),%ebp)
+	__(push %ra0)
+	__(movd %mm0,%nargs)
+	__(jmp *%fn)
+9:	/* All args in regs; exactly the same as the tcallnfnvsp case */
+	__(movl %temp0,%fn)
+	__(leave)
+	__(jmp *%fn)
+_endsubp(tcallnfngen)
+
+_spentry(tcallnfnslide)
+	__(lea -nargregs*node_size(%esp,%nargs),%imm0)
+	__(movd %nargs,%mm0)	/* save nargs aka temp1 */
+	__(xorl %temp1,%temp1)
+	__(movl %temp0,rcontext(tcr.save0))
+	/* We can use %ra0 as a temporary here, since the real return address */
+	/* is on the stack   */
+0:	__(movl -node_size(%imm0),%ra0)
+	__(movl %ra0,-node_size(%ebp,%temp1))
+	__(subl $node_size,%imm0)
+	__(subl $node_size,%temp1)
+	__(cmpl %imm0,%esp)
+	__(jne 0b)
+	__(movl rcontext(tcr.save0),%fn)
+	__(lea (%ebp,%temp1),%esp)
+	__(movl lisp_frame.savera0(%ebp),%ra0)
+	__(movl lisp_frame.backlink(%ebp),%ebp)
+        __(push %ra0)
+	__(movapd %fpzero,rcontext(tcr.save0))
+	__(movd %mm0,%nargs)
+	__(jmp *%fn)
+_endsubp(tcallnfnslide)
+
+_spentry(tcallnfnvsp)
+	__(mov %temp0,%fn)
+	__(leave)
+	__(jmp *%fn)
+_endsubp(tcallnfnvsp)
+
+/* Make a "raw" area on the foreign stack, stack-cons a macptr to point */
+/* to it, and return the macptr.  Size (in bytes, boxed) is in arg_z */
+/* on entry; macptr in arg_z on exit. */
+_spentry(makestackblock)
+        __(check_cstack_alignment())
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+macptr.size,%imm0))
+	__(cmpl $tstack_alloc_limit,%imm0)
+	__(jae 1f)
+        __ifdef(`WINDOWS')
+         __(windows_cstack_probe(%imm0,%arg_z))
+        __endif
+	__(movd rcontext(tcr.foreign_sp),%mm0)
+	__(subl %imm0,rcontext(tcr.foreign_sp))
+	__(movl rcontext(tcr.foreign_sp),%arg_z)
+	__(movd %mm0,(%arg_z))
+	__(movl %ebp,csp_frame.save_ebp(%arg_z))
+	__(lea macptr.size+tsp_frame.fixed_overhead(%arg_z),%imm0)
+	__(movl $macptr_header,tsp_frame.fixed_overhead(%arg_z))
+	__(addl $fulltag_misc+tsp_frame.fixed_overhead,%arg_z)
+	__(movl %imm0,macptr.address(%arg_z))
+	__(movss %fpzero,macptr.domain(%arg_z))
+	__(movss %fpzero,macptr.type(%arg_z))
+	__(ret)
+1:	__(movd rcontext(tcr.foreign_sp),%mm0)
+	__(subl $dnode_size,rcontext(tcr.foreign_sp))
+	__(movl rcontext(tcr.foreign_sp),%imm0)
+	__(movd %mm0,(%imm0))
+	__(movl %ebp,csp_frame.save_ebp(%imm0))
+	__(set_nargs(1))
+	__(movl $nrs.new_gcable_ptr,%fname)
+	__(jump_fname())
+_endsubp(makestackblock)
+
+_spentry(makestackblock0)
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+macptr.size,%imm0))
+	__(cmpl $tstack_alloc_limit,%imm0)
+	__(jae 9f)
+        __ifdef(`WINDOWS')
+         __(windows_cstack_probe(%imm0,%temp0))
+        __endif
+        __(movl rcontext(tcr.foreign_sp),%temp0)
+        __(subl %imm0,rcontext(tcr.foreign_sp))
+        __(movl rcontext(tcr.foreign_sp),%arg_z)
+	__(movl %temp0,(%arg_z))
+	__(movl %ebp,csp_frame.save_ebp(%arg_z))
+	__(lea macptr.size+tsp_frame.fixed_overhead(%arg_z),%imm0)
+	__(movl $macptr_header,tsp_frame.fixed_overhead(%arg_z))
+	__(addl $fulltag_misc+tsp_frame.fixed_overhead,%arg_z)
+	__(movl %imm0,macptr.address(%arg_z))
+	__(movss %fpzero,macptr.domain(%arg_z))
+	__(movss %fpzero,macptr.type(%arg_z))
+	__(jmp 2f)
+1:	__(movsd %fpzero,(%imm0))
+	__(addl $dnode_size,%imm0)
+2:	__(cmpl %imm0,%temp0)
+	__(jne 1b)
+	__(repret)
+9:	__(movd rcontext(tcr.foreign_sp),%mm0)
+        __(subl $dnode_size,rcontext(tcr.foreign_sp))
+        __(movl rcontext(tcr.foreign_sp),%imm0)
+	__(movd %mm0,(%imm0))
+	__(movl %ebp,csp_frame.save_ebp(%imm0))
+	__(set_nargs(1))
+	__(movl $nrs.new_gcable_ptr,%fname)
+	__(jump_fname())
+_endsubp(makestackblock0)
+
+_spentry(makestacklist)
+	__(test %arg_y,%arg_y)
+        __(js 9f)
+	__(movl %arg_y,%imm0)
+        __(testb $fixnummask,%imm0_b)
+        __(jne 9f)
+	__(addl %imm0,%imm0)
+	__(rcmpl(%imm0,$tstack_alloc_limit))
+	__(movl $nil_value,%temp1) 
+	__(jae 2f)
+	__(addl $tsp_frame.fixed_overhead,%imm0)
+	__(TSP_Alloc_Var(%imm0,%temp0))
+	__(addl $fulltag_cons,%temp0)
+	__(jmp 1f)
+0:	__(_rplaca(%temp0,%arg_z))
+	__(_rplacd(%temp0,%temp1))
+	__(movl %temp0,%temp1)
+	__(addl $cons.size,%temp0)
+1:	__(subl $fixnumone,%arg_y)
+	__(jge 0b)
+	__(movl %temp1,%arg_z)
+	__(ret)
+2:	__(TSP_Alloc_Fixed(0,%imm0))
+	__(jmp 4f)
+3:	__(Cons(%arg_z,%temp1,%temp1))
+4:	__(subl $fixnumone,%arg_y)				
+	__(jge 3b)
+	__(movl %temp1,%arg_z)
+	__(ret)
+9:      __(uuo_error_reg_not_type(Rarg_y,error_object_not_unsigned_byte))
+_endsubp(makestacklist)
+
+/* subtype (boxed) vpushed before initial values. (Had better be a */
+/* node header subtag.)  Nargs set to count of things vpushed. */
+_spentry(stkgvector)
+	__(movl -fixnumone(%esp,%nargs),%imm0)	/* boxed subtag */
+	__(shrl $fixnumshift,%imm0)
+	__(leal -fixnumone(%nargs),%arg_z)
+	__(movl %arg_z,%arg_y)
+	__(shll $num_subtag_bits-fixnumshift,%arg_z)
+	__(orl %arg_z,%imm0)	/* imm0 = header, %arg_y = unaligned size */
+	__(movd %imm0,%mm0)
+	__(dnode_align(%arg_y,(tsp_frame.fixed_overhead+node_size),%imm0))
+	__(TSP_Alloc_Var(%imm0,%arg_z))
+	__(movd %mm0,(%arg_z))
+	__(addl $fulltag_misc,%arg_z)
+	__(lea -node_size(%nargs),%imm0)
+	__(jmp 2f)
+1:	__(pop misc_data_offset(%arg_z,%imm0))
+2:	__(subl $node_size,%imm0)
+	__(jge 1b)
+	__(addl $node_size,%esp)
+	__(jmp *%ra0)
+_endsubp(stkgvector)
+
+/* Allocate a fulltag-misc object. */
+/* arg_y = boxed element count, arg_z = subtag (boxed, of course) */
+_spentry(misc_alloc)
+	__(testl $~(((1<<24)-1)<<fixnumshift),%arg_y)
+	__(jne local_label(misc_alloc_not_u24))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(mov %arg_y,%temp0)
+	__(shl $num_subtag_bits-fixnumshift,%temp0)
+	__(or %temp0,%imm0)	/* %imm0 now = header */
+	__(movd %imm0,%mm0)	/* Misc_Alloc wants header in %mm0 */
+	__(andb $fulltagmask,%imm0_b)
+	__(cmpb $fulltag_nodeheader,%imm0_b)
+	__(je local_label(misc_alloc_32))
+	__(movd %mm0,%imm0)
+	__(cmpb $max_32_bit_ivector_subtag,%imm0_b)
+	__(jbe local_label(misc_alloc_32))
+	__(cmpb $max_8_bit_ivector_subtag,%imm0_b)
+	__(jbe local_label(misc_alloc_8))
+	__(cmpb $max_16_bit_ivector_subtag,%imm0_b)
+	__(jbe local_label(misc_alloc_16))
+	__(cmpb $subtag_double_float_vector,%imm0_b)
+	__(jne local_label(misc_alloc_1))
+	/* double-float vector case */
+	__(imul $2,%arg_y,%imm0)
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_1):
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(addl $7,%imm0)
+	__(shrl $3,%imm0)
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_8):
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_16):
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(shl $1,%imm0)
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_32):
+	__(movl %arg_y,%imm0)
+local_label(misc_alloc_alloc_vector):
+	__(dnode_align(%imm0,node_size,%imm0))
+	__(Misc_Alloc(%arg_z))
+	__(ret)
+local_label(misc_alloc_not_u24):
+	__(uuo_error_reg_not_type(Rarg_y,error_object_not_unsigned_byte_24))
+_endsubp(misc_alloc)
+
+/* N.B. arg count word in %imm0, not %nargs */
+/* no %whole_reg;  it's in rcontext(tcr.save0) */
+/* %arg_reg is %temp1, key vector in %arg_y */ 
+_startfn(C(destbind1))
+	__(movl %ra0,rcontext(tcr.save1))
+	/* Save entry %esp in case of error   */
+	__(movd %esp,%mm0)
+	/* Save arg count word */
+	__(movd %imm0,%mm1)
+	/* Extract required arg count.   */
+        __(testb %imm0_b,%imm0_b)
+	__(je local_label(opt))		/* skip if no required args   */
+	__(movzbl %imm0_b,%imm0)
+local_label(req_loop):	
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(toofew))
+	__(movb $fulltagmask,%imm0_bh)
+	__(andb %arg_reg_b,%imm0_bh)
+	__(cmpb $fulltag_cons,%imm0_bh)
+	__(jne local_label(badlist))
+	__(subb $1,%imm0_b)
+	__(pushl cons.car(%arg_reg))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jne local_label(req_loop))
+	__(movd %mm1,%imm0)
+local_label(opt):
+        __(movb %imm0_bh,%imm0_b)
+	__(testb %imm0_b,%imm0_b)
+	__(je local_label(rest_keys))
+	__(btl $initopt_bit,%imm0)
+	__(jc local_label(opt_supp))
+	/* 'simple' &optionals:	 no supplied-p, default to nil.   */
+local_label(simple_opt_loop):
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(default_simple_opt))
+	__(movb $fulltagmask,%imm0_bh)
+	__(andb %arg_reg_b,%imm0_bh)
+	__(cmpb $fulltag_cons,%imm0_bh)
+	__(jne local_label(badlist))
+	__(subb $1,%imm0_b)
+	__(pushl cons.car(%arg_reg))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jne local_label(simple_opt_loop))
+	__(jmp local_label(rest_keys))
+local_label(default_simple_opt):
+	__(subb $1,%imm0_b)
+	__(pushl $nil_value)
+	__(jne local_label(default_simple_opt))
+	__(jmp local_label(rest_keys))
+local_label(opt_supp):
+	__(movb $fulltagmask,%imm0_bh)
+	__(andb %arg_reg_b,%imm0_bh)
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(default_hard_opt))
+	__(cmpb $fulltag_cons,%imm0_bh)
+	__(jne local_label(badlist))
+	__(subb $1,%imm0_b)
+	__(pushl cons.car(%arg_reg))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(push $t_value)
+	__(jne local_label(opt_supp))
+	__(jmp local_label(rest_keys))
+local_label(default_hard_opt):
+	__(subb $1,%imm0_b)
+	__(push $nil_value)
+	__(push $nil_value)
+	__(jne local_label(default_hard_opt))
+local_label(rest_keys):	
+	__(btl $restp_bit,%imm0)
+	__(jc local_label(have_rest))
+	__(btl $keyp_bit,%imm0)
+	__(jc local_label(have_keys))
+	__(compare_reg_to_nil(%arg_reg))
+	__(jne local_label(toomany))
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp *%ra0)
+local_label(have_rest):
+	__(pushl %arg_reg)
+	__(btl $keyp_bit,%imm0)
+	__(jc local_label(have_keys))
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp *%ra0)
+	/* Ensure that arg_reg contains a proper,even-length list.  */
+	/* Insist that its length is <= 512 (as a cheap circularity check.)   */
+local_label(have_keys):
+	__(movb $255,%imm0_b)
+	__(push %arg_reg)
+	__(push %arg_z)
+	__(xorl %arg_z,%arg_z)
+local_label(count_keys_loop):
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(counted_keys))
+	__(subb $1,%imm0_b)
+	__(jb local_label(toomany))
+	__(movb $fulltagmask,%arg_z_bh)
+	__(andb %arg_reg_b,%arg_z_bh)
+ 	__(cmpb $fulltag_cons,%arg_z_bh)
+	__(jne local_label(badlist))
+	__(_cdr(%arg_reg,%arg_reg))
+        __(compare_reg_to_nil(%arg_reg))
+        __(je local_label(badlist))
+	__(movb $fulltagmask,%arg_z_bh)
+	__(andb %arg_reg_b,%arg_z_bh)
+	__(cmpb $fulltag_cons,%arg_z_bh)
+	__(jne local_label(badlist))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jmp local_label(count_keys_loop))
+local_label(counted_keys):		
+	/* We've got a proper, even-length list of key/value pairs in  */
+	/* arg_reg. For each keyword var in the lambda-list, push a pair  */
+	/* of NILs on the vstack.   */
+	__(pop %arg_z)
+	__(pop %arg_reg)
+	__(movd %mm1,%imm0)
+	__(shrl $16,%imm0)
+	__(movzbl %imm0_b,%imm0)
+	__(movl %esp,rcontext(tcr.unboxed0))	/* 0th value/supplied-p pair */
+	__(jmp local_label(push_pair_test))
+local_label(push_pair_loop):
+	__(push $nil_value)
+	__(push $nil_value)
+local_label(push_pair_test):	
+	__(subb $1,%imm0_b)
+	__(jge local_label(push_pair_loop))
+	__(push %temp0)	/* keyword */
+	__(push %arg_z) /* value */
+	__(vector_length(%arg_y,%imm0))
+	__(push %arg_reg)
+	__(push %imm0)	/* keyword vector length */
+	__(movd %mm1,%imm0)
+	__(movl $0,rcontext(tcr.unboxed1)) /* count of unknown keywords seen */
+local_label(match_keys_loop):
+	__(movl 4(%esp),%arg_reg)
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(matched_keys))
+	__(_car(%arg_reg,%temp0))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(_car(%arg_reg,%arg_z))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(movl %arg_reg,4(%esp))
+	__(xorl %temp1,%temp1)
+	__(jmp local_label(match_test))
+local_label(match_loop):
+	__(cmpl misc_data_offset(%arg_y,%temp1),%temp0)
+	__(je local_label(matched))
+	__(addl $node_size,%temp1)
+local_label(match_test):
+	__(cmpl %temp1,(%esp))	/* compare index, keyword vector length */
+	__(jne local_label(match_loop))
+	/* No match.  Note unknown keyword, check for :allow-other-keys   */
+	__(addl $1,rcontext(tcr.unboxed1))
+	__(cmpl $nrs.kallowotherkeys,%temp0)
+	__(jne local_label(match_keys_loop))
+	__(subl $1,rcontext(tcr.unboxed1))
+	__(btsl $seen_aok_bit,%imm0)
+	__(jc local_label(match_keys_loop))
+	/* First time we've seen :allow-other-keys.  Maybe set aok_bit.   */
+	__(compare_reg_to_nil(%arg_z))
+	__(je local_label(match_keys_loop))
+	__(btsl $aok_bit,%imm0)
+	__(jmp local_label(match_keys_loop))
+	/* Got a match.  Worry about :allow-other-keys here, too.   */
+local_label(matched):
+	__(negl %temp1)
+	__(shll $1,%temp1)
+	__(addl rcontext(tcr.unboxed0),%temp1)
+	__(cmpl $nil_value,-node_size*2(%temp1))
+	__(jne local_label(match_keys_loop))
+	__(movl %arg_z,-node_size(%temp1))
+	__(movl $t_value,-node_size*2(%temp1))
+	__(cmpl $nrs.kallowotherkeys,%temp0)
+	__(jne local_label(match_keys_loop))
+	__(btsl $seen_aok_bit,%imm0)
+	__(jnc local_label(match_keys_loop))
+	__(compare_reg_to_nil(%arg_z))
+	__(je local_label(match_keys_loop))
+	__(btsl $aok_bit,%imm0)
+	__(jmp local_label(match_keys_loop))
+local_label(matched_keys):	
+	__(cmpl $0,rcontext(tcr.unboxed1))	/* any unknown keys seen? */
+	__(je local_label(keys_ok))
+	__(btl $aok_bit,%imm0)
+	__(jnc local_label(badkeys))
+local_label(keys_ok):
+	__(addl $(3*node_size),%esp)
+	__(pop %ra0)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(jmp *%ra0)
+	/* Some unrecognized keywords.  Complain generically about   */
+	/* invalid keywords.   */
+local_label(badkeys):
+	__(movl $XBADKEYS,%arg_y)
+	__(jmp local_label(destructure_error))
+local_label(toomany):
+	__(movl $XCALLTOOMANY,%arg_y)
+	__(jmp local_label(destructure_error))
+local_label(toofew):
+	__(movl $XCALLTOOFEW,%arg_y)
+	__(jmp local_label(destructure_error))
+local_label(badlist):
+	__(movl $XCALLNOMATCH,%arg_y)
+local_label(destructure_error):
+	__(movd %mm0,%esp)		/* undo everything done to the stack */
+	__(movl rcontext(tcr.save0),%arg_z)	/* %whole_reg */
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(set_nargs(2))
+	__(push %ra0)
+	__(jmp _SPksignalerr)
+_endfn(C(destbind1))
+
+_spentry(macro_bind)
+	__(movl %arg_reg,rcontext(tcr.save0))	/* %whole_reg */
+	__(extract_fulltag(%arg_reg,%imm0))
+	__(cmpb $fulltag_cons,%imm0_b)
+	__(jne 1f)
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jmp C(destbind1))
+1:	__(movl $XCALLNOMATCH,%arg_y)
+	__(movl rcontext(tcr.save0),%arg_z)
+	__(movss %fpzero,rcontext(tcr.save0))
+	__(set_nargs(2))
+        __(push %ra0)        
+	__(jmp _SPksignalerr)
+
+_endsubp(macro_bind)
+
+_spentry(destructuring_bind)
+	__(movl %arg_reg,rcontext(tcr.save0))	/* %whole_reg */
+	__(jmp C(destbind1))
+_endsubp(destructuring_bind)
+
+_spentry(destructuring_bind_inner)
+	__(movl %arg_z,rcontext(tcr.save0))	/* %whole_reg */
+	__(jmp C(destbind1))
+_endsubp(destructuring_bind_inner)
+
+_spentry(vpopargregs)
+	__(hlt)
+_endsubp(vpopargregs)
+
+/* If arg_z is an integer, return in imm0 something whose sign  */
+/* is the same as arg_z's.  If not an integer, error.   */
+_spentry(integer_sign)
+	__(mov %arg_z,%imm0)
+	__(testb $tagmask,%arg_z_b)
+	__(je 8f)
+	__(extract_typecode(%arg_z,%imm0))
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(getvheader(%arg_z,%imm0))
+	__(shr $num_subtag_bits,%imm0)
+	__(movl misc_data_offset-4(%arg_z,%imm0,4),%imm0)
+8:	__(repret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_integer))
+_endsubp(integer_sign)
+
+/* "slide" nargs worth of values up the stack.  imm0 contains */
+/* the difference between the current stack pointer and the target. */
+_spentry(mvslide)
+	__(movd %nargs,%mm0)
+	__(lea (%esp,%nargs),%arg_y)
+	__(lea (%arg_y,%imm0),%imm0)
+	__(test %nargs,%nargs)
+	__(je 2f)
+1:
+	__(subl $node_size,%arg_y)
+	__(movl (%arg_y),%arg_z)
+	__(subl $node_size,%imm0)
+	__(movl %arg_z,(%imm0))
+	__(subl $node_size,%nargs)
+	__(jne 1b)
+2:	__(movl %imm0,%esp)
+	__(movd %mm0,%nargs)
+	__(jmp *%ra0)
+_endsubp(mvslide)
+
+_spentry(save_values)
+	__(movd rcontext(tcr.save_tsp),%mm1)
+/* common exit: nargs = values in this set, mm1 = ptr to tsp before call to save_values   */
+local_label(save_values_to_tsp):
+	__(movl %ra0,rcontext(tcr.save0))
+	__(movl rcontext(tcr.save_tsp),%temp0)
+	__(dnode_align(%nargs,tsp_frame.fixed_overhead+(2*node_size),%imm0)) /* count, link   */
+	__(TSP_Alloc_Var(%imm0,%arg_z))
+	__(movl rcontext(tcr.save_tsp),%imm0)
+	__(movd %mm1,(%imm0))
+	__(movl %nargs,(%arg_z))
+	__(movl %temp0,node_size(%arg_z))
+	__(leal 2*node_size(%arg_z,%nargs),%arg_y)
+	__(leal (%esp,%nargs),%imm0)
+	__(cmpl %imm0,%esp)
+	__(jmp 2f)
+1:	__(subl $node_size,%imm0)
+	__(movl (%imm0),%arg_z)
+	__(subl $node_size,%arg_y)
+	__(cmpl %imm0,%esp)
+	__(movl %arg_z,(%arg_y))
+2:	__(jne 1b)
+	__(addl %nargs,%esp)
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp *%ra0)
+_endsubp(save_values)
+
+/* Add the multiple values that are on top of the vstack to the set  */
+/* saved in the top tsp frame, popping them off of the vstack in the  */
+/* process.  It is an error (a bad one) if the TSP contains something  */
+/* other than a previously saved set of multiple-values.  */
+/* Since adding to the TSP may cause a new TSP segment to be allocated,  */
+/* each add_values call adds another linked element to the list of  */
+/* values. This makes recover_values harder.   */
+_spentry(add_values)
+	/* do we need to preserve imm0? */
+	__(test %nargs,%nargs)
+	__(movl rcontext(tcr.save_tsp),%imm0)
+	__(movl (%imm0),%imm0)
+	__(movd %imm0,%mm1)	/* for the benefit of save_values_to_tsp */
+	__(jne local_label(save_values_to_tsp))
+	__(jmp *%ra0)
+_endsubp(add_values)
+
+/* push the values in the value set atop the sp, incrementing nargs.  */
+/* Discard the tsp frame; leave values atop the sp.   */
+_spentry(recover_values)
+	__(movl %ra0,rcontext(tcr.save0)) /* temp0 */
+	__(movd %nargs,%mm0)		  /* temp1 */
+	/* First, walk the segments reversing the pointer to previous  */
+	/* segment pointers Can tell the end because that previous  */
+	/* segment pointer is the prev tsp pointer   */
+	__(movl rcontext(tcr.save_tsp),%temp1)
+	__(movl %temp1,%temp0)	/* current segment   */
+	__(movl %temp1,%arg_y)	/* last segment   */
+	__(movl tsp_frame.backlink(%temp1),%arg_z)	/* previous tsp   */
+local_label(walkloop):
+	__(movl tsp_frame.fixed_overhead+node_size(%temp0),%imm0)
+	__(cmpl %imm0,%arg_z)	/* last segment ?   */
+	__(movl %arg_y,tsp_frame.fixed_overhead+node_size(%temp0))
+	__(movl %temp0,%arg_y)	/* last segment <- current segment   */
+	__(movl %imm0,%temp0)	/* current segment <- next segment   */
+	__(jne local_label(walkloop))
+
+	__(movl %temp1,%arg_z)
+	__(movd %mm0,%nargs)
+	/* the final segment pointer is now in %arg_y  */
+	/* walk backwards, pushing values on the stack and incrementing %nargs   */
+local_label(pushloop):
+	__(movl tsp_frame.data_offset(%arg_y),%imm0)	/* nargs in segment   */
+	__(test %imm0,%imm0)
+	__(leal tsp_frame.data_offset+(2*node_size)(%arg_y,%imm0),%temp0)
+	__(leal (%nargs,%imm0),%nargs)
+	__(jmp 2f)
+1:	__(push -node_size(%temp0))
+	__(subl $node_size,%temp0)
+	__(subl $fixnum_one,%imm0)
+2:	__(jne 1b)
+	__(cmpl %arg_y,%arg_z)
+	__(movl tsp_frame.data_offset+node_size(%arg_y),%arg_y)
+	__(jne local_label(pushloop))
+	__(movl (%arg_z),%arg_z)
+        __(movl %arg_z,rcontext(tcr.save_tsp))
+        __(movl %arg_z,rcontext(tcr.next_tsp))
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp *%ra0)		
+_endsubp(recover_values)
+
+/* Exactly like recover_values, but it's necessary to reserve an outgoing  */
+/* frame if any values (which will be used as outgoing arguments) will  */
+/* wind up on the stack.  We can assume that %nargs contains 0 (and  */
+/* that no other arguments have been pushed) on entry.   */
+
+_spentry(recover_values_for_mvcall)
+	__(movl %ra0,rcontext(tcr.save0)) /* temp0 */
+	/* First, walk the segments reversing the pointer to previous  */
+	/* segment pointers Can tell the end because that previous  */
+	/* segment pointer is the prev tsp pointer   */
+	__(xorl %nargs,%nargs)
+	__(push %nargs)
+	__(movl rcontext(tcr.save_tsp),%temp1)
+	__(movl %temp1,%temp0)	/* current segment   */
+	__(movl %temp1,%arg_y)	/* last segment   */
+	__(movl tsp_frame.backlink(%temp1),%arg_z)	/* previous tsp   */
+local_label(walkloop_mvcall):
+	__(movl tsp_frame.data_offset(%temp0),%imm0)
+	__(addl %imm0,(%esp))
+	__(movl tsp_frame.fixed_overhead+node_size(%temp0),%imm0)
+	__(cmpl %imm0,%arg_z)	/* last segment ?   */
+	__(movl %arg_y,tsp_frame.fixed_overhead+node_size(%temp0))
+	__(movl %temp0,%arg_y)	/* last segment <- current segment   */
+	__(movl %imm0,%temp0)	/* current segment <- next segment   */
+	__(jne local_label(walkloop_mvcall))
+
+	__(movl %temp1,%arg_z)
+	__(pop %nargs)
+
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe local_label(pushloop_mvcall))
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+
+	/* the final segment pointer is now in %arg_y  */
+	/* walk backwards, pushing values on the stack and incrementing %nargs*/
+local_label(pushloop_mvcall):
+	__(movl tsp_frame.data_offset(%arg_y),%imm0)	/* nargs in segment */
+	__(test %imm0,%imm0)
+	__(leal tsp_frame.data_offset+(2*node_size)(%arg_y,%imm0),%temp0)
+	__(jmp 2f)
+1:	__(push -node_size(%temp0))
+	__(subl $node_size,%temp0)
+	__(subl $fixnum_one,%imm0)
+2:	__(jne 1b)
+	__(cmpl %arg_y,%arg_z)
+	__(movl tsp_frame.data_offset+node_size(%arg_y),%arg_y)
+	__(jne local_label(pushloop_mvcall))
+	__(movl (%arg_z),%arg_z)
+        __(movl %arg_z,rcontext(tcr.save_tsp))
+        __(movl %arg_z,rcontext(tcr.next_tsp))
+	__(movl rcontext(tcr.save0),%ra0)
+	__(movl $0,rcontext(tcr.save0))
+	__(jmp *%ra0)		
+_endsubp(recover_values_for_mvcall)
+
+_spentry(reset)
+	__(hlt)
+_endsubp(reset)
+
+/* temp0 = element-count, arg_y = subtag, arg_z = initval */
+_spentry(misc_alloc_init)
+	__(push %ebp)
+	__(movl %esp,%ebp)
+	__(push %arg_z)
+	__(movl %arg_y,%arg_z)
+	__(movl %temp0,%arg_y)
+	__(push $local_label(misc_alloc_init_back))
+	__(jmp _SPmisc_alloc)
+__(tra(local_label(misc_alloc_init_back)))
+	__(pop %arg_y)
+	__(leave)
+	__(movl $nrs.init_misc,%fname)
+	__(set_nargs(2))
+	__(jump_fname())
+_endsubp(misc_alloc_init)
+
+/* %temp1 = element-count, %arg_y = subtag, %arg_z = initial-value */        
+_spentry(stack_misc_alloc_init)
+	__(push %ebp)
+        __(movl %esp,%ebp)
+        __(push %arg_z)
+        __(movl %arg_y,%arg_z)
+        __(movl %temp1,%arg_y)
+        __(pushl $local_label(stack_misc_alloc_init_back))
+        __(jmp _SPstack_misc_alloc)
+__(tra(local_label(stack_misc_alloc_init_back)))
+        __(popl %arg_y)
+	__(leave)
+	__(movl $nrs.init_misc,%fname)
+	__(set_nargs(2))
+	__(jump_fname())
+_endsubp(stack_misc_alloc_init)
+
+	.globl C(popj)
+_spentry(popj)
+C(popj):
+	__(leave)
+        __(ret)
+_endsubp(popj)
+
+/* arg_z should be of type (signed-byte 64) */
+/* return unboxed value in mm0 */
+_spentry(gets64)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+        __(unbox_fixnum(%arg_z,%imm0))
+        __(movd %imm0,%mm0)
+        __(jns 8f)
+        /* get sign into upper half of %mm0 */
+        __(pcmpeqd %mm1,%mm1)   /* all ones */
+        __(psllq $32,%mm1)
+        __(por %mm1,%mm0)
+        __(ret)
+1:      __(movb %arg_z_b,%imm0_b)
+        __(andb $tagmask,%imm0_b)
+        __(cmpb $tag_misc,%imm0_b)
+        __(jne 9f)
+        __(movl misc_header_offset(%arg_z),%imm0)
+        __(cmpb $subtag_bignum,%imm0_b)
+        __(jne 9f)
+        __(cmpl $two_digit_bignum_header,%imm0)
+        __(ja 9f)
+        __(movd misc_data_offset(%arg_z),%mm0)
+	__(jne 8f)
+	__(movq misc_data_offset(%arg_z),%mm0)
+8:      __(repret)
+9:      __(uuo_error_reg_not_type(Rarg_z,error_object_not_s64))
+_endsubp(gets64)
+
+/* arg_z should be of type (unsigned-byte 64) */
+/* return unboxed value in mm0 */
+_spentry(getu64)
+	__(movl $~(target_most_positive_fixnum << fixnumshift),%imm0)
+	__(testl %arg_z,%imm0)
+	__(movl %arg_z,%imm0)
+	__(jne 1f)
+	__(sarl $fixnumshift,%imm0)
+	__(movd %imm0,%mm0)
+	__(ret)
+1:	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 9f)
+	__(movl misc_header_offset(%arg_z),%imm0)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(cmpl $three_digit_bignum_header,%imm0)
+	__(ja 9f)
+	__(je 3f)
+	__(cmpl $two_digit_bignum_header,%imm0)
+	__(je 2f)
+	/* must be a one digit bignum */
+	__(movl misc_data_offset(%arg_z),%imm0)
+	__(test %imm0,%imm0)
+	__(js 9f)
+	__(movd %imm0,%mm0)
+	__(ret)
+2: 	__(movl misc_data_offset+4(%arg_z),%imm0)
+	__(testl %imm0,%imm0)
+	__(js 9f)
+	__(movq misc_data_offset(%arg_z),%mm0)
+	__(ret)
+3:	__(movl misc_data_offset(%arg_z),%imm0)
+	__(cmpl $0,misc_data_offset+8(%arg_z))
+	__(jne 9f)
+	__(movq misc_data_offset(%arg_z),%mm0)
+	__(repret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_u64))
+_endsubp(getu64)
+
+/* Make unsigned integer from value in mm0 */
+_spentry(makeu64)
+	__(movq %mm0,%mm1)
+	__(psrlq $32,%mm0)
+	__(movd %mm0,%imm0)
+	__(test %imm0,%imm0)
+	__(js 3f)
+	__(jnz 2f)
+	__(movd %mm1,%imm0)
+	__(cmpl $target_most_positive_fixnum,%imm0)
+	__(ja 1f)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+1:	/* maybe make a 1 digit bignum */
+	__(test %imm0,%imm0)
+	__(js 2f)
+	__(movl $one_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(1)))
+	__(movd %mm1,misc_data_offset(%arg_z))
+	__(ret)
+	/* make a 2 digit bignum */
+2:	__(movl $two_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(2)))
+	__(movq %mm1,misc_data_offset(%arg_z))
+	__(ret)
+	/* make a 3 digit bignum */
+3:	__(movl $three_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(3)))
+	__(movq %mm1,misc_data_offset(%arg_z))
+	__(ret)
+_endsubp(makeu64)
+
+/* on entry: arg_z = symbol.  On exit, arg_z = value (possibly */
+/* unbound_marker), arg_y = symbol */
+_spentry(specref)
+	__(movl symbol.binding_index(%arg_z),%imm0)
+	__(cmp rcontext(tcr.tlb_limit),%imm0)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(movl %arg_z,%arg_y)
+	__(jae 7f)
+	__(movl (%temp1,%imm0),%arg_z)
+	__(cmpb $no_thread_local_binding_marker,%arg_z_b)
+	__(jne 8f)
+7:	__(movl symbol.vcell(%arg_y),%arg_z)
+8:	__(repret)		
+_endsubp(specref)
+
+/* arg_y = special symbol, arg_z = new value. */
+_spentry(specset)
+	__(movl symbol.binding_index(%arg_y),%imm0)
+	__(cmp rcontext(tcr.tlb_limit),%imm0)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(jae 1f)
+	__(movl (%temp1,%imm0),%temp0)
+	__(cmpb $no_thread_local_binding_marker,%temp0_b)
+	__(je 1f)
+	__(movl %arg_z,(%temp1,%imm0))
+	__(ret)
+1:	__(movl %arg_y,%temp0)
+	__(movl $1<<fixnumshift,%arg_y)
+	__(jmp _SPgvset)
+_endsubp(specset)
+
+_spentry(specrefcheck)
+	__(mov %arg_z,%arg_y)
+	__(movl symbol.binding_index(%arg_z),%imm0)
+	__(cmp rcontext(tcr.tlb_limit),%imm0)
+	__(jae 7f)
+	__(movl rcontext(tcr.tlb_pointer),%temp1)
+	__(movl (%temp1,%imm0),%arg_z)
+	__(cmpb $no_thread_local_binding_marker,%arg_z_b)
+	__(cmovel symbol.vcell(%arg_y),%arg_z)
+	__(cmpb $unbound_marker,%arg_z_b)
+	__(je 9f)
+8:	__(repret)
+7:	__(movl symbol.vcell(%arg_y),%arg_z)
+	__(cmpb $unbound_marker,symbol.vcell(%arg_y))
+	__(je 9f)
+	__(repret)
+9:	__(uuo_error_reg_unbound(Rarg_y))
+_endsubp(specrefcheck)
+
+_spentry(restoreintlevel)
+	__(hlt)
+_endsubp(restoreintlevel)
+
+/* Make a lisp integer from the unsigned value in imm0 */
+_spentry(makeu32)
+	__(cmpl $target_most_positive_fixnum,%imm0)
+	__(ja 0f)	/* need to make a bignum */
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+0:	__(movd %imm0,%mm1)
+	__(test %imm0,%imm0)
+	__(js 1f)
+	__(movl $one_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(1)))
+	__(movd %mm1,misc_data_offset(%arg_z))
+	__(ret)
+1:	__(movl $two_digit_bignum_header,%imm0)
+	__(movd %imm0,%mm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(2)))
+	__(movd %mm1,misc_data_offset(%arg_z))
+	__(ret)
+_endsubp(makeu32)
+
+/* arg_z is of type (signed-byte 32) */
+/* return unboxed value in %imm0 */
+_spentry(gets32)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(ret)
+1:	__(movb %arg_z_b,%imm0_b)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 9f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(movl misc_header_offset(%arg_z),%imm0)
+	__(cmpl $one_digit_bignum_header,%imm0)
+	__(jne 9f)
+	__(movl misc_data_offset(%arg_z),%imm0)
+	__(ret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_signed_byte_32))
+_endsubp(gets32)
+
+/* arg_z is of type (unsigned-byte 32) */
+/* return unboxed value in %imm0 */
+_spentry(getu32)
+	__(movl $~(target_most_positive_fixnum << fixnumshift),%imm0)
+	__(testl %arg_z,%imm0)
+	__(movl %arg_z,%imm0)
+	__(jne 1f)
+	__(sarl $fixnumshift,%imm0)
+	__(ret)
+1:	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 9f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(movl misc_header_offset(%arg_z),%imm0)
+	__(cmpl $two_digit_bignum_header,%imm0)
+	__(je 2f)
+	__(cmpl $one_digit_bignum_header,%imm0)
+	__(jne 9f)
+	__(movl misc_data_offset(%arg_z),%imm0)
+	__(ret)
+2:	__(movl misc_data_offset(%arg_z),%imm0)
+	__(cmpl $0,misc_data_offset+4(%arg_z))
+	__(jne 9f)
+	__(ret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_unsigned_byte_32))
+_endsubp(getu32)
+
+_spentry(mvpasssym)
+	__(hlt)
+_endsubp(mvpasssym)
+
+/* don't smash arg_z */
+_spentry(unbind)
+	__(push %arg_z)
+	__(movl rcontext(tcr.db_link),%imm0)
+	__(movl rcontext(tcr.tlb_pointer),%arg_z)
+	__(movl binding.sym(%imm0),%temp0)
+	__(movl binding.val(%imm0),%arg_y)
+	__(movl binding.link(%imm0),%imm0)
+	__(movl %arg_y,(%arg_z,%temp0))
+	__(movl %imm0,rcontext(tcr.db_link))
+	__(pop %arg_z)
+	__(ret)
+_endsubp(unbind)
+
+_spentry(unbind_n)
+	__(push %temp1)		/* preserve temp1/nargs */
+	__(push %arg_z)
+	__(xorl %arg_z,%arg_z)
+	__(movl rcontext(tcr.db_link),%temp1)
+	__(movl rcontext(tcr.tlb_pointer),%arg_z)
+1:		
+	__(movl binding.sym(%temp1),%temp0)
+	__(movl binding.val(%temp1),%arg_y)
+	__(movl binding.link(%temp1),%temp1)
+	__(movl %arg_y,(%arg_z,%temp0))
+	__(decl %imm0)
+	__(jne 1b)
+	__(movl %temp1,rcontext(tcr.db_link))
+	__(pop %arg_z)
+	__(pop %temp1)
+	__(ret)	
+_endsubp(unbind_n)
+
+_spentry(unbind_to)
+	__(push %arg_y)
+	__(push %arg_z)
+	__(push %temp0)
+	__(push %temp1)
+	
+	__(movl rcontext(tcr.db_link),%temp0)
+	__(movl rcontext(tcr.tlb_pointer),%arg_z)
+1:
+	__(movl binding.sym(%temp0),%temp1)
+	__(movl binding.val(%temp0),%arg_y)
+	__(movl binding.link(%temp0),%temp0)
+	__(movl %arg_y,(%arg_z,%temp1))
+	__(cmpl %temp0,%imm0)
+	__(jne 1b)
+	__(movl %temp0,rcontext(tcr.db_link))
+
+	__(pop %temp1)
+	__(pop %temp0)
+	__(pop %arg_z)
+	__(pop %arg_y)
+	__(ret)
+_endsubp(unbind_to)
+
+_spentry(bind_interrupt_level_0)
+	__(movl rcontext(tcr.tlb_pointer),%arg_y)
+	__(cmpl $0,INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(push INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl $0,INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(js 1f)
+0:	__(jmp *%ra0)
+	/* Interrupt level was negative; interrupt may be pending */
+1:	__(check_pending_enabled_interrupt(2f))
+2:	__(jmp *%ra0)
+_endsubp(bind_interrupt_level_0)
+
+/* Bind CCL::*INTERRUPT-LEVEL* to the fixnum -1.  (This has the effect  */
+/* of disabling interrupts.)   */
+_spentry(bind_interrupt_level_m1)
+	__(movl rcontext(tcr.tlb_pointer),%arg_y)
+	__(push INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(jmp *%ra0)
+_endsubp(bind_interrupt_level_m1)
+
+/* Bind CCL::*INTERRUPT-LEVEL* to the value in arg_z.  If that value's 0, */
+/* do what _SPbind_interrupt_level_0 does. */
+_spentry(bind_interrupt_level)
+	__(test %arg_z,%arg_z)
+	__(jz _SPbind_interrupt_level_0)
+	__(movl rcontext(tcr.tlb_pointer),%arg_y)
+	__(push INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movl %esp,rcontext(tcr.db_link))
+	__(movl %arg_z,INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(jmp *%ra0)
+_endsubp(bind_interrupt_level)
+
+/* Unbind CCL::*INTERRUPT-LEVEL*.  If the value changes from negative to */
+/* non-negative, check for pending interrupts. */
+_spentry(unbind_interrupt_level)
+	__(btl $TCR_FLAG_BIT_PENDING_SUSPEND,rcontext(tcr.flags))
+	__(movl rcontext(tcr.tlb_pointer),%arg_y)
+	__(movl INTERRUPT_LEVEL_BINDING_INDEX(%arg_y),%imm0)
+	__(jc 5f)
+0:	__(test %imm0,%imm0)
+	__(movl rcontext(tcr.db_link),%imm0)
+	__(movl binding.val(%imm0),%temp0)
+	__(movl binding.link(%imm0),%imm0)
+	__(movl %temp0,INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+	__(movl %imm0,rcontext(tcr.db_link))
+	__(js 3f)
+2:	__(repret)
+3:	__(test %temp0,%temp0)
+	__(js 2b)
+	__(check_pending_enabled_interrupt(4f))
+4:	__(repret)
+5:       /* Missed a suspend request; force suspend now if we're restoring
+          interrupt level to -1 or greater */
+        __(cmpl $-2<<fixnumshift,%imm0)
+        __(jne 0b)
+	__(movl rcontext(tcr.db_link),%temp0)
+	__(movl binding.val(%temp0),%temp0)
+        __(cmpl %imm0,%temp0)
+        __(je 0b)
+        __(movl $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%arg_y))
+        __(suspend_now())
+        __(jmp 0b)
+_endsubp(unbind_interrupt_level)
+
+_spentry(progvrestore)
+	__(movl rcontext(tcr.save_tsp),%imm0)
+	__(movl tsp_frame.backlink(%imm0),%imm0) /* ignore .SPnthrowXXX values frame   */
+	__(movl tsp_frame.data_offset(%imm0),%imm0)
+	__(shrl $fixnumshift,%imm0)
+	__(jne _SPunbind_n)
+	__(repret)
+_endsubp(progvrestore)
+
+/* %arg_z <- %arg_y + %arg_z.  Do the fixnum case - including overflow -  */
+/* inline.  Call out otherwise.   */
+_spentry(builtin_plus)
+	__(movl %arg_y,%imm0)
+	__(orl %arg_z,%imm0)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(addl %arg_y,%arg_z)
+	__(jo C(fix_one_bit_overflow))
+	__(repret)
+1:	__(jump_builtin(_builtin_plus,2))
+_endsubp(builtin_plus)
+
+/* %arg_z <- %arg_y - %arg_z.  Do the fixnum case - including overflow -  */
+/*  inline.  Call out otherwise.   */
+_spentry(builtin_minus)
+	__(movl %arg_y,%imm0)
+	__(orl %arg_z,%imm0)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(xchgl %arg_y,%arg_z)
+	__(subl %arg_y,%arg_z)
+	__(jo C(fix_one_bit_overflow))
+	__(repret)
+1:	__(jump_builtin(_builtin_minus,2))
+_endsubp(builtin_minus)
+
+/* %arg_z -< arg_y * arg_z. */
+/* Do the fixnum case---including overflow---inline.  Call out otherwise. */
+_spentry(builtin_times)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 2f)
+	__(unbox_fixnum(%arg_z,%imm0))
+	/* 32-bit fixnum result in %imm0.  Overflow set if it doesn't fit. */
+	__(imul %arg_y,%imm0)
+	__(jo 1f)
+	__(movl %imm0,%arg_z)
+	__(ret)
+1:	__(unbox_fixnum(%arg_z,%eax))
+	__(mark_as_imm(%edx))
+	__(unbox_fixnum(%arg_y,%edx))
+	__(imul %edx)
+        __(movd %eax,%mm0)
+        __(movd %edx,%mm1)
+        __(mark_as_node(%edx))
+        __(psllq $32,%mm1)
+        __(por %mm1,%mm0)
+        __(jmp _SPmakes64)
+2:	__(jump_builtin(_builtin_times,2))
+_endsubp(builtin_times)
+
+_spentry(builtin_div)
+	__(jump_builtin(_builtin_div,2))
+
+/* %arg_z <- (= %arg_y %arg_z).	  */
+_spentry(builtin_eq)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpl(%arg_z,%arg_y))
+	__(condition_to_boolean(e,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_eq,2))
+_endsubp(builtin_eq)
+
+/* %arg_z <- (/= %arg_y %arg_z).	  */
+_spentry(builtin_ne)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpl(%arg_z,%arg_y))
+	__(condition_to_boolean(ne,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_ne,2))
+_endsubp(builtin_ne)
+
+/* %arg_z <- (> %arg_y %arg_z).	  */
+_spentry(builtin_gt)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpl(%arg_y,%arg_z))
+	__(condition_to_boolean(g,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_gt,2))
+_endsubp(builtin_gt)
+
+/* %arg_z <- (>= %arg_y %arg_z).	  */
+_spentry(builtin_ge)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpl(%arg_y,%arg_z))
+	__(condition_to_boolean(ge,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_ge,2))
+_endsubp(builtin_ge)
+
+/* %arg_z <- (< %arg_y %arg_z).	  */
+_spentry(builtin_lt)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpl(%arg_y,%arg_z))
+	__(condition_to_boolean(l,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_lt,2))
+_endsubp(builtin_lt)
+
+/* %arg_z <- (<= %arg_y %arg_z).   */
+_spentry(builtin_le)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpl(%arg_y,%arg_z))
+	__(condition_to_boolean(le,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_le,2))
+_endsubp(builtin_le)
+
+_spentry(builtin_eql)
+	__(cmpl %arg_y,%arg_z)
+	__(je 1f)
+	/* Not EQ.  Could only possibly be EQL if both are tag-misc  */
+	/* and both have the same subtag. */
+	__(movl %arg_y,%imm0)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 2f)
+	__(movb %arg_z_b,%imm0_bh)
+	__(andb $tagmask,%imm0_bh)
+	__(cmpb %imm0_bh,%imm0_b)
+	__(jne 2f)
+	__(extract_subtag(%arg_y,%imm0_b))
+	__(extract_subtag(%arg_z,%imm0_bh))
+	__(cmpb %imm0_b,%imm0_bh)
+	__(jne 2f)
+	__(jump_builtin(_builtin_eql,2))
+1:	__(movl $t_value,%arg_z)
+	__(ret)
+2:	__(movl $nil_value,%arg_z)
+	__(ret)
+_endsubp(builtin_eql)
+
+_spentry(builtin_length)
+	__(extract_fulltag(%arg_z,%imm0))
+	__(cmpl $tag_list,%imm0)
+	__(jz 2f)
+	__(andl $tagmask,%imm0)
+	__(cmpl $tag_misc,%imm0)
+	__(jnz 8f)
+	__(extract_subtag(%arg_z,%imm0_b))
+	__(rcmpb(%imm0_b,$min_vector_subtag))
+	__(jb 8f)
+	__(je 1f)
+	/* (simple-array * (*)) */
+	__(movl %arg_z,%arg_y)
+	__(vector_length(%arg_y,%arg_z))
+	__(ret)
+1:	/* vector header */
+	__(movl vectorH.logsize(%arg_z),%arg_z)
+	__(ret)
+2:	/* list.  Maybe null, maybe dotted or circular. */
+	__(movl $-fixnumone,%arg_y)
+	__(movl %arg_z,%temp0)	/* fast pointer */
+	__(movl %arg_z,%temp1)  /* slow pointer */
+3:	__(movb %temp0_b,%al)
+	__(andb $fulltagmask,%al)
+	__(addl $fixnumone,%arg_y)
+	__(compare_reg_to_nil(%temp0))
+	__(je 9f)
+	__(cmpb $fulltag_cons,%al)
+	__(jne 8f)
+	__(movb %temp1_b,%ah)
+	__(andb $fulltagmask,%ah)
+	__(_cdr(%temp0,%temp0))
+	__(testl $fixnumone,%arg_y)
+	__(je 3b)
+	__(cmpb $fulltag_cons,%ah)
+	__(jne 8f)
+	__(_cdr(%temp1,%temp1))
+	__(cmpl %temp0,%temp1)
+	__(jne 3b)
+8:
+	__(jump_builtin(_builtin_length,1))
+9:
+	__(movl %arg_y,%arg_z)
+	__(ret)
+_endsubp(builtin_length)
+
+_spentry(builtin_seqtype)
+	__(extract_fulltag(%arg_z,%imm0))
+	__(cmpb $fulltag_cons,%imm0_b)
+	__(jz 1f)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 2f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(rcmpb(%imm0_b,$min_vector_subtag))
+	__(jb 2f)
+	__(movl $nil_value,%arg_z)
+	__(ret)
+1:	__(movl $t_value,%arg_z)
+	__(ret)
+2:
+	__(jump_builtin(_builtin_seqtype,1))
+_endsubp(builtin_seqtype)
+
+_spentry(builtin_assq)
+	__(cmpl $nil_value,%arg_z)
+	__(je 5f)
+1:	__(movl %arg_z,%imm0)
+	__(andb $fulltagmask,%imm0_b)
+	__(cmpb $fulltag_cons,%imm0_b)
+	__(jne 2f)
+	__(_car(%arg_z,%temp0))
+	__(_cdr(%arg_z,%arg_z))
+	__(cmpl $nil_value,%temp0)
+	__(je 4f)
+	__(movl %temp0,%imm0)
+	__(andb $fulltagmask,%imm0_b)
+	__(cmpb $fulltag_cons,%imm0_b)
+	__(jne 3f)
+	__(_car(%temp0,%temp1))
+	__(cmpl %temp1,%arg_y)
+	__(jne 4f)
+	__(movl %temp0,%arg_z)
+	__(ret)
+4:	__(cmpl $nil_value,%arg_z)
+5:	__(jnz 1b)
+	__(repret)
+2:	__(uuo_error_reg_not_list(Rarg_z))
+3:	__(uuo_error_reg_not_list(Rtemp0))
+_endsubp(builtin_assq)
+
+_spentry(builtin_memq)
+	__(cmpl $nil_value,%arg_z)
+	__(jmp 3f)
+1:	__(movb $fulltagmask,%imm0_b)
+	__(andb %arg_z_b,%imm0_b)
+	__(cmpb $fulltag_cons,%imm0_b)
+	__(jne 2f)
+	__(_car(%arg_z,%temp1))
+	__(_cdr(%arg_z,%temp0))
+	__(cmpl %temp1,%arg_y)
+	__(jz 4f)
+	__(cmpl $nil_value,%temp0)
+	__(movl %temp0,%arg_z)
+3:	__(jnz 1b)
+4:	__(repret)
+2:	__(uuo_error_reg_not_list(Rarg_z))
+_endsubp(builtin_memq)
+
+logbitp_max_bit = 30
+
+_spentry(builtin_logbitp)
+	/* Call out unless: both args fixnums, arg_y in `0, logbitp_max_bit) */
+	__(movl %arg_z,%imm0)
+	__(orl %arg_y,%imm0)
+	__(testb $fixnummask,%imm0_b)
+	__(jnz 1f)
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(js 1f)	/* bit number negative */
+	__(addb $fixnumshift,%imm0_b)
+	__(cmpl $logbitp_max_bit<<fixnumshift,%arg_y)
+	__(jb 2f)
+	__(movl $logbitp_max_bit-1+fixnumshift,%imm0)
+2:	__(bt %imm0,%arg_z)
+	__(condition_to_boolean(b,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_logbitp,2))
+_endsubp(builtin_logbitp)
+
+_spentry(builtin_logior)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(orl %arg_y,%arg_z)
+	__(ret)
+1:
+	__(jump_builtin(_builtin_logior,2))
+_endsubp(builtin_logior)
+
+_spentry(builtin_logand)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(andl %arg_y,%arg_z)
+	__(ret)
+1:
+	__(jump_builtin(_builtin_logand,2))
+_endsubp(builtin_logand)
+
+_spentry(builtin_negate)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(negl %arg_z)
+	__(jo C(fix_one_bit_overflow))
+	__(repret)
+1:
+	__(jump_builtin(_builtin_negate,1))
+_endsubp(builtin_negate)
+
+_spentry(builtin_logxor)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(xorl %arg_y,%arg_z)
+	__(ret)
+1:
+	__(jump_builtin(_builtin_logxor,2))
+_endsubp(builtin_logxor)
+
+/* temp0 = vector, arg_y = index, arg_z = newval */
+_spentry(aset1)
+	__(extract_typecode(%temp0,%imm0))
+	__(box_fixnum(%imm0,%temp1))
+	__(cmpb $min_vector_subtag,%imm0_b)
+	__(ja _SPsubtag_misc_set)
+	/* push frame... */
+	__(pop %temp1)
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+	__(push %temp0)
+	__(push %temp1)
+	/* and fall through... */
+_endsubp(aset1)
+
+_spentry(builtin_aset1)
+	__(jump_builtin(_builtin_aset1,3))
+_endsubp(builtin_aset1)
+
+_spentry(builtin_ash)
+	__(movl %arg_y,%imm0)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 9f)
+	__(unbox_fixnum(%arg_z,%imm0))
+	/* Z flag set if zero ASH shift count */
+	__(jnz 1f)
+	__(movl %arg_y,%arg_z) /* shift by 0 */
+	__(ret)
+1:	__(jns 3f)
+	__(rcmpl(%imm0,$-31))
+	__(jg 2f)
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(sar $31,%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+2:	/* Right-shift by small fixnum */
+	__(negb %imm0_b)
+	__(movzbl %imm0_b,%ecx)
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(sar %cl,%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+3:	/* Left shift by fixnum.  We can't shift by more than 31 bits, */
+	/* though shifting by 32 is actually easy. */
+	__(rcmpl(%imm0,$32))
+	__(jg 9f)
+	__(jne 4f)
+	/* left-shift by 32 bits exactly */
+	__(unbox_fixnum(%arg_y,%imm0))
+        __(movd %imm0,%mm0)
+        __(psllq $32,%mm0)
+        __(jmp _SPmakes64)
+4:	/* left-shift by 1..31 bits. Safe to move shift count to %cl */
+	__(movd %imm0,%mm1)     /* shift count */
+        __(unbox_fixnum(%arg_y,%imm0))
+        __(movd %imm0,%mm0)
+        __(sarl $31,%imm0)      /* propagate sign */
+        __(movd %imm0,%mm2)
+        __(pshufw $0x4e,%mm2,%mm2) /* swap hi/lo halves */
+        __(por %mm2,%mm0)
+        __(psllq %mm1,%mm0)
+        __(jmp _SPmakes64)
+9:
+	__(jump_builtin(_builtin_ash,2))
+_endsubp(builtin_ash)
+
+_spentry(builtin_aref1)
+	__(extract_typecode(%arg_y,%imm0))
+	__(box_fixnum_no_flags(%imm0,%temp0))
+	__(cmpb $min_vector_subtag,%imm0_b)
+	__(ja _SPsubtag_misc_ref)
+	__(jump_builtin(_builtin_aref1,2))
+_endsubp(builtin_aref1)
+
+/* Maybe check the x87 tag word to see if st(0) is valid and pop it */
+/* if so.  This might allow us to avoid having to have a priori */
+/* knowledge of whether a foreign function returns a floating-point result. */
+/* backlink to saved %esp, below */
+/* arg n-1 */
+/* arg n-2 */
+/* ... */
+/* arg 0 */
+/* space for alignment */
+/* previous %esp */
+
+_spentry(ffcall)
+LocalLabelPrefix`'ffcall:
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(testb $fixnummask,%arg_z_b)
+	__(je 0f)
+	__(movl macptr.address(%arg_z),%imm0)
+0:
+	/* Save lisp registers. */
+	__(push %ebp)
+	__(mov %esp,%ebp)
+        __(push %temp0) 	 	 
+        __(push %temp1) 	 	 
+        __(push %arg_y) 	 	 
+        __(push %arg_z) 	 	 
+        __(push %fn)         
+        __ifdef(`WIN32_ES_HACK')
+         __(movl rcontext(tcr.linear),%ebx)
+        __endif
+	__(movl %esp,rcontext(tcr.save_vsp))
+	__(movl %ebp,rcontext(tcr.save_ebp))
+	__(movl $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+	__(movl rcontext(tcr.foreign_sp),%esp)
+	/* preserve state of direction flag */
+	__(pushfl)
+	__(popl rcontext(tcr.save_eflags))
+	__(cld)        
+	__(stmxcsr rcontext(tcr.lisp_mxcsr))
+	__(emms)
+	__(ldmxcsr rcontext(tcr.foreign_mxcsr))
+	__(movl (%esp),%ebp)
+LocalLabelPrefix`'ffcall_setup:
+        __(lea 15(%esp),%ecx)
+        __(andl $-16,%ecx)
+        __(movl %ecx,%esp)
+/*	__(addl $node_size,%esp) */
+        __ifdef(`WIN32_ES_HACK')
+         __(push %ds)
+         __(pop %es)
+        __endif
+LocalLabelPrefix`'ffcall_call:
+	__(call *%eax)
+	__ifdef(`WIN32_ES_HACK')
+         __(movw tcr.ldt_selector(%ebx),%rcontext_reg)
+        __endif
+LocalLabelPrefix`'ffcall_call_end:
+	__(movl %ebp,%esp)
+	__(movl %esp,rcontext(tcr.foreign_sp))
+        /* The high word of a 64-bit result would be in %edx right now.
+           There doesn't seem to be any other good place to put this,
+           though %edx is often undefined at this point. */
+        __(mov %edx,rcontext(tcr.unboxed1))
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+	__(cmpb $0,C(bogus_fp_exceptions))
+	__(je 0f)
+	__(movl %arg_z,rcontext(tcr.ffi_exception))
+	__(jmp 1f)
+0:
+	__ifdef(`SSE2_MATH_LIB')
+	__(stmxcsr rcontext(tcr.ffi_exception))
+	__else
+	__(fnstsw rcontext(tcr.ffi_exception))
+	__(fnclex)
+	__endif
+1:	__(pushl rcontext(tcr.save_eflags))
+	__(popfl)
+	__(movl rcontext(tcr.save_vsp),%esp)
+	__(movl rcontext(tcr.save_ebp),%ebp)
+	__(movl $TCR_STATE_LISP,rcontext(tcr.valence))
+        __(pop %fn) 	 	 
+        __(pop %arg_z) 	 	 
+        __(pop %arg_y) 	 	 
+        __(pop %temp1) 
+       	__(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(check_pending_interrupt(%temp0))
+        __(pop %temp0)
+	__(leave)
+	__(ret)
+	/* need to deal with NSExceptions and Objc-2.0 execptions */
+_endsubp(ffcall)
+
+_spentry(ffcall_return_registers)
+	__(hlt)
+_endsubp(ffcall_return_registers)
+
+/* We need to reserve a frame here if (a) nothing else was already pushed
+/* and (b) we push something (e.g., more than 2 args in the lexpr) */
+_spentry(spread_lexprz)
+	new_local_labels()
+	__(movl (%arg_z),%imm0)	/* lexpr count */
+        __(leal node_size(%arg_z,%imm0),%arg_y)
+	__(movd %arg_y,%mm1)
+	__(test %nargs,%nargs) /* anything pushed by caller ? */
+        __(jne 0f)              /* yes, caller has already created frame. */
+        __(cmpl $(nargregs*node_size),%imm0) /* will we push anything ? */
+        __(jbe 0f)
+        __(push $reserved_frame_marker)
+        __(push $reserved_frame_marker)
+0:      __(addl %imm0,%nargs)
+        __(cmpl $(1*node_size),%imm0)
+        __(ja 2f)
+	__(je 1f)
+        /* lexpr count was 0; vpop the args that */
+        /* were pushed by the caller */
+        __(test %nargs,%nargs)
+        __(je local_label(all_args_popped))
+        __(pop %arg_z)
+local_label(maybe_pop_y):
+        __(cmpl $(1*node_size),%nargs)
+        __(je local_label(all_args_popped))
+        __(pop %arg_y)
+local_label(all_args_popped):   
+        /* If all args fit in registers but some were pushed */
+        /* by the caller, discard the reserved frame that the caller */
+        /* pushed.         */
+        __(cmpl %imm0,%nargs)
+        __(je local_label(go))
+        __(cmpl $(nargregs*node_size),%nargs)
+        __(ja local_label(go))
+        __(addl $(2*node_size),%esp)
+local_label(go):
+        __(jmp *%ra0)
+
+	/* lexpr count is two or more: vpush args from the lexpr until */
+	/* we have only two left, then assign them to arg_y and arg_z */
+2:	__(cmpl $(2*node_size),%imm0)
+	__(je local_label(push_loop_end))
+local_label(push_loop):
+	__(lea -1*node_size(%imm0),%imm0)
+	__(push -node_size(%arg_y))
+	__(lea -1*node_size(%arg_y),%arg_y)
+	__(cmpl $(2*node_size),%imm0)
+	__(jne 2b)
+local_label(push_loop_end):
+        __(movl -node_size*2(%arg_y),%arg_z)
+	__(movl -node_size*1(%arg_y),%arg_y)
+        __(jmp *%ra0)
+	/* lexpr count is one: set arg_z from the lexpr, */
+	/* maybe vpop arg_y  */
+1:      __(movl -node_size*1(%arg_y),%arg_z)
+        __(jmp local_label(maybe_pop_y))
+_endsubp(spread_lexprz)
+
+_spentry(callback)
+	__(push %ebp)
+	__(movl %esp,%ebp)
+	/* C scalar args are already on the stack. */
+	/* arg word 0 at 8(%ebp), word 1 at 12(%ebp), etc. */
+
+	/* %eax is passed to us via the callback trampoline.
+	   bits 0-22: callback index
+	   bit 23: flag, set if we need to discard hidden arg on return
+		   (ignored when upper 8 bits are non-zero)
+	   bits 24-31: arg words to discard on return (_stdcall for win32) */
+	
+        /* Reserve some space for results, relative to the
+           current %ebp.  We may need quite a bit of it. */
+        __(subl $20,%esp)
+        __(movl $0,-16(%ebp)) /* No FP result */
+	__(btl $23,%eax)      /* set CF if we need to discard hidden arg */
+	__(pushfl)	      /* and save for later */
+        __(movl %eax,%ecx)    /* extract args-discard count */
+        __(shrl $24,%ecx)
+        __(andl $0x007fffff,%eax) /* callback index */
+        __(movl %ecx,-20(%ebp))
+        /* If the C stack is 16-byte aligned by convention,
+           it should still be, and this'll be a NOP. */
+        __(andl $~15,%esp)
+	/* C NVRs */
+	__(push %edi)
+	__(push %esi)
+	__(push %ebx)
+	__(push %ebp)
+	__(box_fixnum(%eax,%esi))	/* put callback index in arg_y */
+        __(cmpb $0,C(rcontext_readonly))
+        __(jne 0f)
+	__(ref_global(get_tcr,%eax))
+	__(subl $12,%esp)		/* alignment */
+	__(push $1)			/* stack now 16-byte aligned */
+	__(call *%eax)
+	__(addl $16,%esp)		/* discard arg, alignment words */
+	/* linear TCR addr now in %eax */
+	__(movw tcr.ldt_selector(%eax), %rcontext_reg)
+0:      
+
+        /* ebp is 16-byte aligned, and we've pushed 4 words.  Make
+          sure that when we push old foreign_sp, %esp will be 16-byte
+          aligned again */
+        __(subl $8,%esp)
+        __(pushl rcontext(tcr.save_ebp))  /* mark cstack frame's "owner" */
+ 	__(push rcontext(tcr.foreign_sp))
+	__(movl %esp,rcontext(tcr.foreign_sp))
+	__(clr %arg_z)
+	/* arg_y contains callback index */
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+	__(movl rcontext(tcr.save_vsp),%esp)
+	__(movl %ebp,%arg_z)
+	__(movl rcontext(tcr.save_ebp),%ebp)
+	__(movl $TCR_STATE_LISP,rcontext(tcr.valence))
+	__(stmxcsr rcontext(tcr.foreign_mxcsr))
+	__(andb $~mxcsr_all_exceptions,rcontext(tcr.foreign_mxcsr))
+	__(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(movl $nrs.callbacks,%fname)
+        __(check_cstack_alignment())
+	__(push $local_label(back_from_callback))
+	__(set_nargs(2))
+	__(jump_fname())
+__(tra(local_label(back_from_callback)))
+	__(movl %esp,rcontext(tcr.save_vsp))
+	__(movl %ebp,rcontext(tcr.save_ebp))
+	__(movl $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+	__(movl rcontext(tcr.foreign_sp),%esp)
+	__(stmxcsr rcontext(tcr.lisp_mxcsr))
+	__(emms)
+	__(pop rcontext(tcr.foreign_sp))
+        __(addl $12,%esp)       /* discard alignment padding */
+        __(ldmxcsr rcontext(tcr.foreign_mxcsr))
+        __ifdef(`WIN32_ES_HACK')
+         __(push %ds)
+         __(pop %es)
+        __endif
+	__(pop %ebp)
+	__(pop %ebx)
+	__(pop %esi)
+	__(pop %edi)
+        __(movl -12(%ebp),%ecx) /* magic value for ObjC bridge */
+        __(cmpb $1,-16(%ebp))
+        __(jae 1f)
+	__(movl -8(%ebp),%eax)
+        __(movl -4(%ebp),%edx)
+        __ifdef(`WIN_32')
+	 __(cmpl $0,-20(%ebp))
+         __(jne local_label(winapi_return))
+	__endif
+        /* since we aligned the stack after pushing flags, we're not
+           really sure where %esp is relative to where flags were saved.
+           We do know where the saved flags are relative to %ebp, so use
+           that to establish %esp before the popfl.
+        */
+        __(lea -24(%ebp),%esp)
+	__(popfl)	/* flags from bt way back when */
+	__(jc local_label(discard_first_arg))
+	__(leave)
+	__(ret)
+1:      __(jne 2f)
+        /* single float return in x87 */
+        __(flds -8(%ebp))
+        __ifdef(`WIN_32')
+	 __(cmpl $0,-20(%ebp))
+         __(jne local_label(winapi_return))
+        __endif
+        __(leave)
+	__(ret)
+2:      /* double-float return in x87 */
+        __(fldl -8(%ebp))
+        __ifdef(`WIN_32')
+	 __(cmpl $0,-20(%ebp))
+         __(jne local_label(winapi_return))
+        __endif
+        __(leave)
+	__(ret)
+        __ifdef(`WIN_32')
+local_label(winapi_return):
+	  __(movl -20(%ebp),%ecx)
+	  __(leave)
+         /* %ecx is non-zero and contains count of arg words to pop */
+          __(popl -4(%esp,%ecx,4))
+          __(leal -4(%esp,%ecx,4),%esp)
+          __(ret)
+        __endif
+local_label(discard_first_arg):
+	__(leave)
+	__(ret $4)
+_endsubp(callback)
+
+/* temp0 = array, arg_y = i, arg_z = j. Typecheck everything.
+   We don't know whether the array is alleged to be simple or
+   not, and don't know anythng about the element type.  */
+
+_spentry(aref2)
+        __(testl $fixnummask,%arg_y)
+        __(jne 0f)
+	__(testb $fixnummask,%arg_z_b)
+        __(jne 1f)
+	__(extract_typecode(%temp0,%imm0))
+        __(cmpb $subtag_arrayH,%imm0_b)
+        __(jne 2f)
+        __(cmpl $2<<fixnumshift,arrayH.rank(%temp0))
+        __(jne 2f)
+	__(cmpl arrayH.dim0(%temp0),%arg_y)
+        __(jae 3f)
+	__(movl arrayH.dim0+node_size(%temp0),%imm0)
+        __(cmpl %imm0,%arg_z)
+        __(jae 4f)
+	__(sarl $fixnumshift,%imm0)
+        __(imull %arg_y,%imm0)
+        __(addl %imm0,%arg_z)
+        __(movl %temp0,%arg_y)
+	__(xorl %temp1,%temp1)
+6:      __(addl arrayH.displacement(%arg_y),%arg_z)
+        __(movl arrayH.data_vector(%arg_y),%arg_y)
+        __(extract_subtag(%arg_y,%imm0_b))
+        __(cmpb $subtag_vectorH,%imm0_b)
+        __(ja C(misc_ref_common))
+        __(jmp 6b)
+0:	__(uuo_error_reg_not_fixnum(Rarg_y))
+1:	__(uuo_error_reg_not_fixnum(Rarg_z))
+2:      __(uuo_error_reg_not_type(Rtemp0,error_object_not_array_2d))
+3:	__(uuo_error_array_bounds(Rarg_y,Rtemp0))
+4:	__(uuo_error_array_bounds(Rarg_z,Rtemp0))
+
+_endsubp(aref2)
+
+/* Like aref2, but temp1 = array, temp0 = i, arg_y = j, arg_z = k */
+_spentry(aref3)
+	__(testb $fixnummask,%temp0_b)
+	__(jne 0f)
+	__(testl $fixnummask,%arg_y)
+	__(jne 1f)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 2f)
+	__(extract_typecode(%temp1,%imm0))
+	__(cmpb $subtag_arrayH,%imm0_b)
+	__(jne 3f)
+	__(cmpl $3<<fixnumshift,arrayH.rank(%temp1))
+	__(jne 3f)
+	__(cmpl arrayH.dim0(%temp1),%temp0)
+	__(jae 4f)
+	__(movl arrayH.dim0+node_size(%temp1),%imm0)
+	__(cmpl %imm0,%arg_y)
+	__(jae 5f)
+	__(cmpl arrayH.dim0+(node_size*2)(%temp1),%arg_z)
+	__(jae 6f)
+	/* index computation: k + dim2 * (j + dim1 * i) */
+	/* (plus minor fussing for fixnum scaling) */
+	__(sarl $fixnumshift,%imm0)
+	__(imull %imm0,%temp0)
+	__(addl %arg_y,%temp0)
+	__(movl arrayH.dim0+(node_size*2)(%temp1),%imm0)
+	__(sarl $fixnumshift,%imm0)
+	__(imull %imm0,%temp0)
+	__(addl %temp0,%arg_z)
+	__(movl %temp1,%arg_y)
+8:	__(addl arrayH.displacement(%arg_y),%arg_z)
+	__(movl arrayH.data_vector(%arg_y),%arg_y)
+	__(extract_subtag(%arg_y,%imm0_b))
+	__(cmpb $subtag_vectorH,%imm0_b)
+	__(ja C(misc_ref_common))
+	__(jmp 8b)
+0:	__(uuo_error_reg_not_fixnum(Rtemp0))
+1:	__(uuo_error_reg_not_fixnum(Rarg_y))
+2:	__(uuo_error_reg_not_fixnum(Rarg_z))
+3:	__(uuo_error_reg_not_type(Rtemp1,error_object_not_array_3d))
+4:	__(uuo_error_array_bounds(Rtemp0,Rtemp1))
+5:	__(uuo_error_array_bounds(Rarg_y,Rtemp1))
+6:	__(uuo_error_array_bounds(Rarg_z,Rtemp1))
+_endsubp(aref3)
+
+/* As with aref2, but temp1 = array, temp0 = i, arg_y = j, arg_z = new_value */
+_spentry(aset2)
+        __(testb $fixnummask,%temp0_b)
+        __(jne 0f)
+	__(testl $fixnummask,%arg_y)
+        __(jne 1f)
+	__(extract_typecode(%temp1,%imm0))
+        __(cmpb $subtag_arrayH,%imm0_b)
+        __(jne 2f)
+        __(cmpl $2<<fixnumshift,arrayH.rank(%temp1))
+        __(jne 2f)
+	__(cmpl arrayH.dim0(%temp1),%temp0)
+        __(jae 3f)
+	__(movl arrayH.dim0+node_size(%temp1),%imm0)
+        __(cmpl %imm0,%arg_y)
+        __(jae 4f)
+	__(sarl $fixnumshift,%imm0)
+        __(imull %temp0,%imm0)
+        __(addl %imm0,%arg_y)
+        __(movl %temp1,%temp0)
+	__(xorl %temp1,%temp1)
+6:      __(addl arrayH.displacement(%temp0),%arg_y)
+        __(movl arrayH.data_vector(%temp0),%temp0)
+        __(extract_subtag(%temp0,%imm0_b))
+        __(cmpb $subtag_vectorH,%imm0_b)
+        __(ja C(misc_set_common))
+        __(jmp 6b)
+0:	__(uuo_error_reg_not_fixnum(Rtemp0))
+1:	__(uuo_error_reg_not_fixnum(Rarg_y))
+2:      __(uuo_error_reg_not_type(Rtemp1,error_object_not_array_2d))
+3:	__(uuo_error_array_bounds(Rtemp0,Rtemp1))
+4:	__(uuo_error_array_bounds(Rarg_y,Rtemp1))
+_endsubp(aset2)
+
+/* temp1 = array, (%esp) = i, temp0 = j, arg_y = k, arg_z = newval */
+_spentry(aset3)
+	__(testb $fixnummask,(%esp))
+	__(jne 0f)
+	__(testb $fixnummask,%temp0_b)
+	__(jne 1f)
+	__(testl $fixnummask,%arg_y)
+	__(jne 2f)
+	__(extract_typecode(%temp1,%imm0))
+	__(cmpb $subtag_arrayH,%imm0_b)
+	__(jne 3f)
+	__(cmpl $3<<fixnumshift,arrayH.rank(%temp1))
+	__(jne 3f)
+	__(movl arrayH.dim0(%temp1),%imm0)
+	__(cmpl %imm0,(%esp))	/* i on stack */
+	__(jae 4f)
+	__(movl arrayH.dim0+node_size(%temp1),%imm0)
+	__(cmpl %imm0,%temp0)
+	__(jae 5f)
+	__(cmpl arrayH.dim0+(node_size*2)(%temp1),%arg_y)
+	__(jae 6f)
+	/* index computation: k + dim2 * (j + dim1 * i) */
+	/* (plus minor fussing for fixnum scaling) */
+	__(sarl $fixnumshift,%imm0)
+	__(imull (%esp),%imm0)	/* i on stack */
+	__(addl %imm0,%temp0)
+	__(addl $node_size,%esp)
+	__(movl arrayH.dim0+(node_size*2)(%temp1),%imm0)
+	__(sarl $fixnumshift,%imm0)
+	__(imull %imm0,%temp0)
+	__(addl %temp0,%arg_y)
+	__(movl %temp1,%temp0)
+8:	__(addl arrayH.displacement(%temp0),%arg_y)
+	__(movl arrayH.data_vector(%temp0),%temp0)
+	__(extract_subtag(%temp0,%imm0_b))
+	__(cmpb $subtag_vectorH,%imm0_b)
+	__(ja C(misc_set_common))
+	__(jmp 8b)
+0:	__(pop %temp0)	/* supplied i */
+	__(uuo_error_reg_not_fixnum(Rtemp0))
+1:	__(uuo_error_reg_not_fixnum(Rtemp0))
+2:	__(uuo_error_reg_not_fixnum(Rarg_y))
+3:	__(uuo_error_reg_not_type(Rtemp1,error_object_not_array_3d))
+4:	__(pop %imm0)	/* supplied i is on stack */
+	__(uuo_error_array_bounds(Rimm0,Rtemp1))
+5:	__(uuo_error_array_bounds(Rtemp0,Rtemp1))
+6:	__(uuo_error_array_bounds(Rarg_y,Rtemp1))
+_endsubp(aset3)
+
+/* Prepend all but the first seven (6 words of code & other immediate data,
+/* plus inner fn) and last (lfbits) elements of %fn to the "arglist". */
+_spentry(call_closure)
+	new_local_labels()
+	__(vector_length(%fn,%imm0))
+	__(subl $8<<fixnumshift,%imm0)	/* imm0 = inherited arg count */
+	__(lea (%nargs,%imm0),%temp0)
+	__(cmpl $nargregs<<fixnumshift,%temp0)
+	__(jna local_label(regs_only))	/* either: 1 arg, 1 inherited, or */
+					/* no args, 2 inherited */
+	__(pop rcontext(tcr.save0))		/* save return address */
+	__(cmpl $nargregs<<fixnumshift,%nargs)
+	__(jna local_label(no_insert))
+
+/* Some arguments have already been pushed.  Push %imm0's worth */
+/* of NILs, copy those arguments that have already been vpushed from */
+/* the old TOS to the new, then insert all of the inherited args */
+/* and go to the function. */
+
+	__(mov %imm0,%temp0)
+local_label(push_nil_loop):
+	__(push $nil_value)
+	__(sub $fixnumone,%temp0)
+	__(jne local_label(push_nil_loop))
+
+/* Need to use arg regs as temporaries.  Stash them in the spill area. */
+	__(movl %arg_y,rcontext(tcr.save1))
+	__(movl %arg_z,rcontext(tcr.save2))
+
+	__(leal (%esp,%imm0),%temp0)	/* start of already-pushed args */
+	__(leal -nargregs<<fixnumshift(%nargs),%arg_y) /* args pushed */
+	__(movd %imm0,%mm0)	/* save inherited arg count */
+	__(xorl %imm0,%imm0)
+local_label(copy_already_loop):
+	__(movl (%temp0,%imm0),%arg_z)
+	__(movl %arg_z,(%esp,%imm0))
+	__(addl $fixnumone,%imm0)
+	__(cmpl %imm0,%arg_y)
+	__(jne local_label(copy_already_loop))
+
+	__(lea -node_size(%temp0,%imm0),%arg_y)	/* start of args on stack */
+	__(movl $7<<fixnumshift,%temp0)	/* skip code, new fn */
+	__(movd %mm0,%imm0)
+local_label(insert_loop):
+	__(movl misc_data_offset(%fn,%temp0),%arg_z)
+	__(addl $node_size,%temp0)
+	__(addl $fixnumone,%nargs)
+	__(movl %arg_z,(%arg_y))
+	__(subl $node_size,%arg_y)
+	__(subl $fixnumone,%imm0)
+	__(jne local_label(insert_loop))
+
+	/* Recover arg regs, saved earlier */
+	__(movl rcontext(tcr.save1),%arg_y)
+	__(movl rcontext(tcr.save2),%arg_z)
+	__(jmp local_label(go))
+	
+/* Here if no args were pushed by the caller. */
+/* cases: */
+/* no args, more than two inherited args */
+/* a single arg in arg_z, more than one inherited arg */
+/* two args in arg_y and arg_z, some number of inherited args */
+
+/* Therefore, we're always going to have to push something (the sum of */
+/* %nargs and %imm0 will always be greater than $nargregs), and */
+/* we will have to reserve space for a stack frame. */
+/* The 0 args, 2 inherited case and the 1 arg, 1 inherited case get */
+/* handled at local_label(regs_ony). */
+	
+local_label(no_insert):
+	/* Reserve space for a stack frame */
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+	__(lea 7<<fixnumshift(%imm0),%temp0)	/* last inherited arg */
+	__(rcmpl(%nargs,$fixnumone))
+	__(je local_label(set_arg_y))
+	__(jb local_label(set_y_z))
+	/* %nargs = $nargregs (i.e., 2), vpush remaining inherited vars. */
+
+local_label(vpush_remaining):
+	__(movl $7<<fixnumshift,%temp0)
+local_label(vpush_remaining_loop):
+	__(push misc_data_offset(%fn,%temp0))
+	__(add $node_size,%temp0)
+	__(add $fixnumone,%nargs)
+	__(sub $node_size,%imm0)
+	__(jnz local_label(vpush_remaining_loop))
+	__(jmp local_label(go))
+	
+local_label(set_arg_y):
+	/* one arg in arg_z.  set arg_y and vpush remaining inherited args */
+	__(subl $node_size,%temp0)
+	__(movl misc_data_offset(%fn,%temp0),%arg_y)
+	__(addl $fixnumone,%nargs)
+	__(subl $fixnumone,%imm0)
+	__(jmp local_label(vpush_remaining))
+local_label(set_y_z):
+	__(subl $node_size,%temp0)
+	__(movl misc_data_offset(%fn,%temp0),%arg_z)
+	__(addl $fixnumone,%nargs)
+	__(subl $fixnumone,%imm0)
+	__(jmp local_label(set_arg_y))
+
+local_label(go):
+	__(movl misc_data_offset+(6*node_size)(%fn),%fn)
+	__(push rcontext(tcr.save0))	/* restore return addr */
+	__(movapd %fpzero,rcontext(tcr.save0))	/* clear out spill area */
+	__(jmp *%fn)
+local_label(regs_only):
+	__(lea 7<<fixnumshift(%imm0),%temp0)
+	__(test %nargs,%nargs)
+	__(jne local_label(one_arg))
+	/* no args passed, two inherited args */
+	__(movl misc_data_offset-node_size(%fn,%temp0),%arg_z)
+	__(cmpl $node_size,%imm0)
+	__(je local_label(rgo))
+	__(movl misc_data_offset-(node_size*2)(%fn,%temp0),%arg_y)
+local_label(rgo):
+	__(addl %imm0,%nargs)
+	__(jmp *misc_data_offset+(6*node_size)(%fn))
+local_label(one_arg):
+	/* one arg was passed, so there's one inherited arg */
+	__(movl misc_data_offset-node_size(%fn,%temp0),%arg_y)
+	__(jmp local_label(rgo))
+_endsubp(call_closure)
+
+_spentry(poweropen_callbackX)
+	__(hlt)
+_endsubp(poweropen_callbackX)
+
+_spentry(poweropen_ffcallX)
+	__(hlt)
+_endsubp(poweropen_ffcallX)
+
+_spentry(eabi_ff_call)
+	__(hlt)
+_endsubp(eabi_ff_call)
+
+_spentry(eabi_callback)
+	__(hlt)
+_endsubp(eabi_callback)
+
+
+/* Unused, and often not used on PPC either  */
+_spentry(callbuiltin)
+	__(hlt)
+_endsubp(callbuiltin)
+
+_spentry(callbuiltin0)
+	__(hlt)
+_endsubp(callbuiltin0)
+
+_spentry(callbuiltin1)
+	__(hlt)
+_endsubp(callbuiltin1)
+
+_spentry(callbuiltin2)
+	__(hlt)
+_endsubp(callbuiltin2)
+
+_spentry(callbuiltin3)
+	__(hlt)
+_endsubp(callbuiltin3)
+
+_spentry(restorefullcontext)
+	__(hlt)
+_endsubp(restorefullcontext)
+
+_spentry(savecontextvsp)
+	__(hlt)
+_endsubp(savecontextvsp)
+
+_spentry(savecontext0)
+	__(hlt)
+_endsubp(savecontext0)
+
+_spentry(restorecontext)
+	__(hlt)
+_endsubp(restorecontext)
+
+_spentry(stkconsyz)
+	__(hlt)
+_endsubp(stkconsyz)
+
+_spentry(stkvcell0)
+	__(hlt)
+_endsubp(stkvcell0)
+
+_spentry(stkvcellvsp)
+	__(hlt)
+_endsubp(stkvcellvsp)
+
+_spentry(breakpoint)
+        __(hlt)
+_endsubp(breakpoint)
+
+_spentry(unused_6)
+        __(hlt)
+Xspentry_end:
+_endsubp(unused_6)
+        .data
+        .globl C(spentry_start)
+        .globl C(spentry_end)
+C(spentry_start):       .long Xspentry_start
+C(spentry_end):         .long Xspentry_end
+        
Index: /branches/arm/lisp-kernel/x86-spentry64.s
===================================================================
--- /branches/arm/lisp-kernel/x86-spentry64.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-spentry64.s	(revision 13357)
@@ -0,0 +1,5184 @@
+/*   Copyright (C) 2005-2009 Clozure Associates and contributors  */
+/*   This file is part of Clozure CL.    */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+
+		
+	include(lisp.s)
+	_beginfile
+	
+        .align 2
+define(`_spentry',`ifdef(`__func_name',`_endfn',`')
+	.p2align 3
+	_exportfn(_SP$1)
+	.line  __line__
+')
+
+             
+define(`_endsubp',`
+	_endfn(_SP$1)
+#  __line__ 
+')
+
+define(`jump_builtin',`
+	ref_nrs_value(builtin_functions,%fname)
+	set_nargs($2)
+	vrefr(%fname,%fname,$1)
+	jump_fname()
+')
+
+        
+
+_spentry(bad_funcall)
+Xspentry_start:         
+	.globl C(bad_funcall)	
+__(tra(C(bad_funcall)))
+	__(uuo_error_not_callable)
+_endsubp(bad_funcall)
+	
+/* %arg_z has overflowed by one bit.  Make a bignum with 2 (32-bit) digits.  */
+	
+_spentry(fix_overflow)
+C(fix_one_bit_overflow):	
+	__(movq $two_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(`',aligned_bignum_size(2)))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(movq $0xe000000000000000,%imm1)
+	__(mov %temp0,%arg_z)
+	__(xorq %imm1,%imm0)
+	__(movq %imm0,misc_data_offset(%arg_z))
+	__(ret)	
+_endsubp(fix_overflow)
+
+
+/* Make a lisp integer (fixnum or two-digit bignum) from the signed  */
+/* 64-bit value in %imm0.   */
+
+_spentry(makes64)
+	__(movq %imm0,%imm1)
+	__(shlq $fixnumshift,%imm1)
+	__(movq %imm1,%arg_z)
+	__(sarq $fixnumshift,%imm1)
+	__(cmpq %imm1,%imm0)
+	__(jz 0f)
+	__(movd %imm0,%mm0)
+	__(movq $two_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(2)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+0:	__(repret)
+_endsubp(makes64)	
+
+        				
+
+/* %imm1:%imm0 constitute a signed integer, almost certainly a bignum.  */
+/* Make a lisp integer out of those 128 bits ..   */
+	
+_startfn(C(makes128))
+	
+        /*  We're likely to have to make a bignum out of the integer in %imm1 and  */
+        /*  %imm0. We'll need to use %imm0 and %imm1 to cons the bignum, and  */
+        /*  will need to do some arithmetic (determining significant bigits)  */
+        /*  on %imm0 and %imm1 in order to know how large that bignum needs to be.  */
+        /*  Cache %imm0 and %imm1 in %mm0 and %mm1.   */
+   
+	__(movd %imm0,%mm0)
+	__(movd %imm1,%mm1)
+	
+        /* If %imm1 is just a sign extension of %imm0, make a 64-bit signed integer.   */
+	
+	__(sarq $63,%imm0) 
+	__(cmpq %imm0,%imm1)
+	__(movd %mm0,%imm0)
+	__(je _SPmakes64)
+	
+        /* Otherwise, if the high 32 bits of %imm1 are a sign-extension of the  */
+        /* low 32 bits of %imm1, make a 3-digit bignum.  If the upper 32 bits  */
+        /* of %imm1 are significant, make a 4 digit bignum   */
+	
+	__(movq %imm1,%imm0)
+	__(shlq $32,%imm0)
+	__(sarq $32,%imm0)
+	__(cmpq %imm0,%imm1)
+	__(jz 3f)
+	__(mov $four_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(4)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(movq %mm1,misc_data_offset+8(%arg_z))
+	__(ret)
+3:	__(mov $three_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(3)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(movd %mm1,misc_data_offset+8(%arg_z))
+	__(ret)
+_endfn
+
+        
+/* %imm1:%imm0 constitute an unsigned integer, almost certainly a bignum.  */
+/* Make a lisp integer out of those 128 bits ..  */
+	
+_startfn(C(makeu128))
+	
+        /* We're likely to have to make a bignum out of the integer in %imm1 and  */
+        /* %imm0. We'll need to use %imm0 and %imm1 to cons the bignum, and  */
+        /* will need to do some arithmetic (determining significant bigits)  */
+        /* on %imm0 and %imm1 in order to know how large that bignum needs to be.  */
+        /* Cache %imm0 and %imm1 in %mm0 and %mm1.   */
+
+        /* If the high word is 0, make an unsigned-byte 64 ... 	  */
+	
+	__(testq %imm1,%imm1)
+	__(jz _SPmakeu64)
+	
+	__(movd %imm0,%mm0)
+	__(movd %imm1,%mm1)
+
+	__(js 5f)		/* Sign bit set in %imm1. Need 5 digits   */
+	__(bsrq %imm1,%imm0)
+	__(rcmpb(%imm0_b,$31))
+	__(jae 4f)		/* Some high bits in %imm1.  Need 4 digits   */
+	__(testl %imm1_l,%imm1_l)
+	__(movd %mm0,%imm0)
+	__(jz _SPmakeu64)
+	
+	/* Need 3 digits   */
+	
+	__(movq $three_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(3)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(movd %mm1,misc_data_offset+8(%arg_z))
+	__(ret)
+4:	__(movq $four_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(4)))
+	__(jmp 6f)
+5:	__(movq $five_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(5)))
+6:	__(movq %mm0,misc_data_offset(%arg_z))
+	__(movq %mm0,misc_data_offset+8(%arg_z))
+	__(ret)
+_endfn
+
+_spentry(misc_ref)
+	__(movb $tagmask,%imm0_b)
+	__(andb %arg_y_b,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(movq misc_header_offset(%arg_y),%imm0)
+        __(xorb %imm0_b,%imm0_b)
+	__(shrq $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpq %imm0,%arg_z)
+	__(jae 2f)
+	__(movb misc_subtag_offset(%arg_y),%imm1_b)
+        __(jmp C(misc_ref_common))
+        
+0:      __(uuo_error_reg_not_tag(Rarg_y,tag_misc))
+1:      __(uuo_error_reg_not_fixnum(Rarg_z))
+2:      __(uuo_error_vector_bounds(Rarg_z,Rarg_y))        
+_endsubp(misc_ref)
+	
+/* %imm1.b = subtag, %arg_y = uvector, %arg_z = index.  */
+/* Bounds/type-checking done in caller  */
+	
+_startfn(C(misc_ref_common))
+	__(movzbl %imm1_b,%imm1_l)
+        __(lea local_label(misc_ref_jmp)(%rip),%imm2)
+	__(jmp *(%imm2,%imm1,8))
+	.p2align 3
+local_label(misc_ref_jmp):	
+	/* 00-0f   */
+	.quad local_label(misc_ref_invalid) /* 00 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 01 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 02 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 03 cons   */
+	.quad local_label(misc_ref_invalid) /* 04 tra_0   */
+	.quad local_label(misc_ref_invalid) /* 05 nodeheader_0   */
+	.quad local_label(misc_ref_invalid) /* 06 nodeheader_1   */
+	.quad local_label(misc_ref_invalid) /* 07 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 08 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 09 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 0a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 0b nil   */
+	.quad local_label(misc_ref_invalid) /* 0c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 0d misc   */
+	.quad local_label(misc_ref_invalid) /* 0e symbol   */
+	.quad local_label(misc_ref_invalid) /* 0f function   */
+	/* 10-1f   */
+	.quad local_label(misc_ref_invalid) /* 10 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 11 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 12 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 13 cons   */
+	.quad local_label(misc_ref_invalid) /* 14 tra_0   */
+	.quad local_label(misc_ref_node) /* 15 symbol_vector   */
+	.quad local_label(misc_ref_node) /* 16 ratio   */
+	.quad local_label(misc_ref_invalid) /* 17 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 18 odd_fixnum   */
+	.quad local_label(misc_ref_u32)	/* 19 bignum   */
+	.quad local_label(misc_ref_u64) /* 1a macptr   */
+	.quad local_label(misc_ref_invalid) /* 1b nil   */
+	.quad local_label(misc_ref_invalid) /* 1c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 1d misc   */
+	.quad local_label(misc_ref_invalid) /* 1e symbol   */
+	.quad local_label(misc_ref_invalid) /* 1f function   */
+	/* 20-2f   */
+	.quad local_label(misc_ref_invalid) /* 20 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 21 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 22 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 23 cons   */
+	.quad local_label(misc_ref_invalid) /* 24 tra_0   */
+	.quad local_label(misc_ref_node) /* 25 catch_frame   */
+	.quad local_label(misc_ref_node) /* 26 complex   */
+	.quad local_label(misc_ref_invalid) /* 27 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 28 odd_fixnum   */
+	.quad local_label(misc_ref_u32)	/* 29 double_float   */
+	.quad local_label(misc_ref_u64)  /* 2a dead_macptr   */
+	.quad local_label(misc_ref_invalid) /* 2b nil   */
+	.quad local_label(misc_ref_invalid) /* 2c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 2d misc   */
+	.quad local_label(misc_ref_invalid) /* 2e symbol   */
+	.quad local_label(misc_ref_invalid) /* 2f function   */
+	/* 30-3f   */
+	.quad local_label(misc_ref_invalid) /* 30 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 31 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 32 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 33 cons   */
+	.quad local_label(misc_ref_invalid) /* 34 tra_0   */
+	.quad local_label(misc_ref_node) /* 35 hash_vector   */
+	.quad local_label(misc_ref_node) /* 36 struct   */
+	.quad local_label(misc_ref_invalid) /* 37 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 38 odd_fixnum   */
+	.quad local_label(misc_ref_u32)	/* 39 xcode_vector   */
+	.quad local_label(misc_ref_invalid) /* 3a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 3b nil   */
+	.quad local_label(misc_ref_invalid) /* 3c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 3d misc   */
+	.quad local_label(misc_ref_invalid) /* 3e symbol   */
+	.quad local_label(misc_ref_invalid) /* 3f function   */
+	/* 40-4f   */
+	.quad local_label(misc_ref_invalid) /* 40 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 41 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 42 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 43 cons   */
+	.quad local_label(misc_ref_invalid) /* 44 tra_0   */
+	.quad local_label(misc_ref_node) /* 45 pool   */
+	.quad local_label(misc_ref_node) /* 46 istruct   */
+	.quad local_label(misc_ref_invalid) /* 47 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 48 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 49 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 4a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 4b nil   */
+	.quad local_label(misc_ref_invalid) /* 4c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 4d misc   */
+	.quad local_label(misc_ref_invalid) /* 4e symbol   */
+	.quad local_label(misc_ref_invalid) /* 4f function   */
+	/* 50-5f   */
+	.quad local_label(misc_ref_invalid) /* 50 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 51 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 52 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 53 cons   */
+	.quad local_label(misc_ref_invalid) /* 54 tra_0   */
+	.quad local_label(misc_ref_node) /* 55 weak   */
+	.quad local_label(misc_ref_node) /* 56 value_cell   */
+	.quad local_label(misc_ref_invalid) /* 57 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 58 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 59 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 5a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 5b nil   */
+	.quad local_label(misc_ref_invalid) /* 5c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 5d misc   */
+	.quad local_label(misc_ref_invalid) /* 5e symbol   */
+	.quad local_label(misc_ref_invalid) /* 5f function   */
+	/* 60-6f   */
+	.quad local_label(misc_ref_invalid) /* 60 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 61 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 62 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 63 cons   */
+	.quad local_label(misc_ref_invalid) /* 64 tra_0   */
+	.quad local_label(misc_ref_node) /* 65 package   */
+	.quad local_label(misc_ref_node) /* 66 xfunction   */
+	.quad local_label(misc_ref_invalid) /* 67 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 68 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 69 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 6a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 6b nil   */
+	.quad local_label(misc_ref_invalid) /* 6c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 6d misc   */
+	.quad local_label(misc_ref_invalid) /* 6e symbol   */
+	.quad local_label(misc_ref_invalid) /* 6f function   */
+	/* 70-7f   */
+	.quad local_label(misc_ref_invalid) /* 70 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 71 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 72 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 73 cons   */
+	.quad local_label(misc_ref_invalid) /* 74 tra_0   */
+	.quad local_label(misc_ref_node) /* 75 slot_vector   */
+	.quad local_label(misc_ref_node) /* 76 lock   */
+	.quad local_label(misc_ref_invalid) /* 77 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 78 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 79 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 7a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 7b nil   */
+	.quad local_label(misc_ref_invalid) /* 7c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 7d misc   */
+	.quad local_label(misc_ref_invalid) /* 7e symbol   */
+	.quad local_label(misc_ref_invalid) /* 7f function   */
+	/* 80-8f   */
+	.quad local_label(misc_ref_invalid) /* 80 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 81 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 82 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 83 cons   */
+	.quad local_label(misc_ref_invalid) /* 84 tra_0   */
+	.quad local_label(misc_ref_node) /* 85 lisp_thread   */
+	.quad local_label(misc_ref_node) /* 86 instance   */
+	.quad local_label(misc_ref_invalid) /* 87 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 88 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 89 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 8a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 8b nil   */
+	.quad local_label(misc_ref_invalid) /* 8c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 8d misc   */
+	.quad local_label(misc_ref_invalid) /* 8e symbol   */
+	.quad local_label(misc_ref_invalid) /* 8f function   */
+	/* 90-9f   */
+	.quad local_label(misc_ref_invalid) /* 90 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 91 imm_1   */
+	.quad local_label(misc_ref_invalid) /* 92 imm_2   */
+	.quad local_label(misc_ref_invalid) /* 93 cons   */
+	.quad local_label(misc_ref_invalid) /* 94 tra_0   */
+	.quad local_label(misc_ref_function) /* 95 function_vector   */
+	.quad local_label(misc_ref_invalid) /* 96 nodeheader_1   */
+	.quad local_label(misc_ref_invalid) /* 97 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* 98 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* 99 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* 9a immheader_2   */
+	.quad local_label(misc_ref_invalid) /* 9b nil   */
+	.quad local_label(misc_ref_invalid) /* 9c tra_1   */
+	.quad local_label(misc_ref_invalid) /* 9d misc   */
+	.quad local_label(misc_ref_invalid) /* 9e symbol   */
+	.quad local_label(misc_ref_invalid) /* 9f function   */
+	/* a0-af   */
+	.quad local_label(misc_ref_invalid) /* a0 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* a1 imm_1   */
+	.quad local_label(misc_ref_invalid) /* a2 imm_2   */
+	.quad local_label(misc_ref_invalid) /* a3 cons   */
+	.quad local_label(misc_ref_invalid) /* a4 tra_0   */
+	.quad local_label(misc_ref_node) /* a5 arrayH   */
+	.quad local_label(misc_ref_node) /* a6 vectorH   */
+	.quad local_label(misc_ref_s16)	/* a7 s16   */
+	.quad local_label(misc_ref_invalid) /* a8 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* a9 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* aa immheader_2   */
+	.quad local_label(misc_ref_invalid) /* ab nil   */
+	.quad local_label(misc_ref_invalid) /* ac tra_1   */
+	.quad local_label(misc_ref_invalid) /* ad misc   */
+	.quad local_label(misc_ref_invalid) /* ae symbol   */
+	.quad local_label(misc_ref_invalid) /* af function   */
+	/* b0-bf   */
+	.quad local_label(misc_ref_invalid) /* b0 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* b1 imm_1   */
+	.quad local_label(misc_ref_invalid) /* b2 imm_2   */
+	.quad local_label(misc_ref_invalid) /* b3 cons   */
+	.quad local_label(misc_ref_invalid) /* b4 tra_0   */
+	.quad local_label(misc_ref_invalid) /* b5 nodeheader_0   */
+	.quad local_label(misc_ref_node) /* b6 simple_vector   */
+	.quad local_label(misc_ref_u16) /* b7 immheader_0   */
+	.quad local_label(misc_ref_invalid) /* b8 odd_fixnum   */
+	.quad local_label(misc_ref_invalid) /* b9 immheader_1   */
+	.quad local_label(misc_ref_invalid) /* ba immheader_2   */
+	.quad local_label(misc_ref_invalid) /* bb nil   */
+	.quad local_label(misc_ref_invalid) /* bc tra_1   */
+	.quad local_label(misc_ref_invalid) /* bd misc   */
+	.quad local_label(misc_ref_invalid) /* be symbol   */
+	.quad local_label(misc_ref_invalid) /* bf function   */
+	/* c0-cf   */
+	.quad local_label(misc_ref_invalid) /* c0 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* c1 imm_1   */
+	.quad local_label(misc_ref_invalid) /* c2 imm_2   */
+	.quad local_label(misc_ref_invalid) /* c3 cons   */
+	.quad local_label(misc_ref_invalid) /* c4 tra_0   */
+	.quad local_label(misc_ref_invalid) /* c5 nodeheader_0   */
+	.quad local_label(misc_ref_invalid) /* c6 nodeheader_1   */
+	.quad local_label(misc_ref_string) /* c7 simple_base_string   */
+	.quad local_label(misc_ref_invalid) /* c8 odd_fixnum   */
+	.quad local_label(misc_ref_new_string) /* c9 new_string_1   */
+	.quad local_label(misc_ref_fixnum_vector) /* ca fixnum_vector   */
+	.quad local_label(misc_ref_invalid) /* cb nil   */
+	.quad local_label(misc_ref_invalid) /* cc tra_1   */
+	.quad local_label(misc_ref_invalid) /* cd misc   */
+	.quad local_label(misc_ref_invalid) /* ce symbol   */
+	.quad local_label(misc_ref_invalid) /* cf function   */
+	/* d0-df   */
+	.quad local_label(misc_ref_invalid) /* d0 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* d1 imm_1   */
+	.quad local_label(misc_ref_invalid) /* d2 imm_2   */
+	.quad local_label(misc_ref_invalid) /* d3 cons   */
+	.quad local_label(misc_ref_invalid) /* d4 tra_0   */
+	.quad local_label(misc_ref_invalid) /* d5 nodeheader_0   */
+	.quad local_label(misc_ref_invalid) /* d6 nodeheader_1   */
+	.quad local_label(misc_ref_s8)	/* d7 s8   */
+	.quad local_label(misc_ref_invalid) /* d8 odd_fixnum   */
+	.quad local_label(misc_ref_s32)	/* d9 s32   */
+	.quad local_label(misc_ref_s64)	/* da s64   */
+	.quad local_label(misc_ref_invalid) /* db nil   */
+	.quad local_label(misc_ref_invalid) /* dc tra_1   */
+	.quad local_label(misc_ref_invalid) /* dd misc   */
+	.quad local_label(misc_ref_invalid) /* de symbol   */
+	.quad local_label(misc_ref_invalid) /* df function   */
+	/* e0-ef   */
+	.quad local_label(misc_ref_invalid) /* e0 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* e1 imm_1   */
+	.quad local_label(misc_ref_invalid) /* e2 imm_2   */
+	.quad local_label(misc_ref_invalid) /* e3 cons   */
+	.quad local_label(misc_ref_invalid) /* e4 tra_0   */
+	.quad local_label(misc_ref_invalid) /* e5 nodeheader_0   */
+	.quad local_label(misc_ref_invalid) /* e6 nodeheader_1   */
+	.quad local_label(misc_ref_u8)	/* e7 u8   */
+	.quad local_label(misc_ref_invalid) /* e8 odd_fixnum   */
+	.quad local_label(misc_ref_u32)	/* e9 u32   */
+	.quad local_label(misc_ref_u64) /* ea u64   */
+	.quad local_label(misc_ref_invalid) /* eb nil   */
+	.quad local_label(misc_ref_invalid) /* ec tra_1   */
+	.quad local_label(misc_ref_invalid) /* ed misc   */
+	.quad local_label(misc_ref_invalid) /* ee symbol   */
+	.quad local_label(misc_ref_invalid) /* ef function   */
+	/* f0-ff   */
+	.quad local_label(misc_ref_invalid) /* f0 even_fixnum   */
+	.quad local_label(misc_ref_invalid) /* f1 imm_1   */
+	.quad local_label(misc_ref_invalid) /* f2 imm_2   */
+	.quad local_label(misc_ref_invalid) /* f3 cons   */
+	.quad local_label(misc_ref_invalid) /* f4 tra_0   */
+	.quad local_label(misc_ref_invalid) /* f5 nodeheader_0   */
+	.quad local_label(misc_ref_invalid) /* f6 nodeheader_1   */
+	.quad local_label(misc_ref_bit_vector) /* f7 bitvector   */
+	.quad local_label(misc_ref_invalid) /* f8 odd_fixnum   */
+	.quad local_label(misc_ref_single_float_vector) /* f9 single_float   */
+	.quad local_label(misc_ref_double_float_vector) /* fa double_float   */
+	.quad local_label(misc_ref_invalid) /* fb nil   */
+	.quad local_label(misc_ref_invalid) /* fc tra_1   */
+	.quad local_label(misc_ref_invalid) /* fd misc   */
+	.quad local_label(misc_ref_invalid) /* fe symbol   */
+	.quad local_label(misc_ref_invalid) /* ff function   */
+	
+	
+	/* Node vector.  Functions are funny: the first  N words  */
+	/* are treated as (UNSIGNED-BYTE 64), where N is the low  */
+	/* 32 bits of the first word.  */
+	
+local_label(misc_ref_function):		
+	__(movl misc_data_offset(%arg_y),%imm0_l)
+	__(shl $fixnumshift,%imm0)
+	__(rcmpq(%arg_z,%imm0))
+	__(jb local_label(misc_ref_u64))
+local_label(misc_ref_node):
+	__(movq misc_data_offset(%arg_y,%arg_z),%arg_z)
+	__(ret)
+local_label(misc_ref_u64):
+	__(movq misc_data_offset(%arg_y,%arg_z),%imm0)
+	__(jmp _SPmakeu64)
+local_label(misc_ref_double_float_vector):
+	__(movsd misc_data_offset(%arg_y,%arg_z),%fp1)
+	__(movq $double_float_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,double_float.size))
+	__(movsd %fp1,double_float.value(%arg_z))
+	__(ret)
+local_label(misc_ref_fixnum_vector):	
+	__(movq misc_data_offset(%arg_y,%arg_z),%imm0)
+        __(box_fixnum(%imm0,%arg_z))
+        __(ret)
+local_label(misc_ref_s64):	
+	__(movq misc_data_offset(%arg_y,%arg_z),%imm0)
+	__(jmp _SPmakes64)
+local_label(misc_ref_u32):
+	__(movq %arg_z,%imm0)
+	__(shr $1,%imm0)
+	__(movl misc_data_offset(%arg_y,%imm0),%imm0_l)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_s32):
+	__(movq %arg_z,%imm0)
+	__(shr $1,%imm0)
+	__(movslq misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_single_float_vector):
+	__(movq %arg_z,%imm0)
+	__(shr $1,%imm0)
+	__(movsd misc_data_offset(%arg_y,%imm0),%fp1)
+	__(movd %fp1,%imm0_l)
+	__(shl $32,%imm0)
+	__(lea subtag_single_float(%imm0),%arg_z)
+	__(ret)
+local_label(misc_ref_u8):
+	__(movq %arg_z,%imm0)
+	__(shr $3,%imm0)
+	__(movzbl misc_data_offset(%arg_y,%imm0),%imm0_l)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_s8):	
+	__(movq %arg_z,%imm0)
+	__(shr $3,%imm0)
+	__(movsbq misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_string):
+	__(movq %arg_z,%imm0)
+	__(shr $3,%imm0)
+	__(movzbl misc_data_offset(%arg_y,%imm0),%imm0_l)
+	__(shlq $charcode_shift,%imm0)
+	__(leaq subtag_character(%imm0),%arg_z)
+	__(ret)
+local_label(misc_ref_new_string):
+	__(movq %arg_z,%imm0)
+	__(shr $1,%imm0)
+	__(movl misc_data_offset(%arg_y,%imm0),%imm0_l)
+	__(shlq $charcode_shift,%imm0)
+	__(leaq subtag_character(%imm0),%arg_z)
+	__(ret)        
+local_label(misc_ref_u16):	
+	__(movq %arg_z,%imm0)
+	__(shrq $2,%imm0)
+	__(movzwl misc_data_offset(%arg_y,%imm0),%imm0_l)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_s16):	
+	__(movq %arg_z,%imm0)
+	__(shrq $2,%imm0)
+	__(movswq misc_data_offset(%arg_y,%imm0),%imm0)
+	__(box_fixnum(%imm0,%arg_z))
+	__(ret)
+local_label(misc_ref_bit_vector):
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(btq %imm0,misc_data_offset(%arg_y))
+	__(setc %imm0_b)
+	__(movzbl %imm0_b,%imm0_l)
+	__(imull $fixnumone,%imm0_l,%arg_z_l)
+	__(ret)
+local_label(misc_ref_invalid):
+	__(movq $XBADVEC,%arg_x)
+	__(set_nargs(3))
+	__(jmp _SPksignalerr)
+_endfn(C(misc_ref_common))
+
+/* like misc_ref, only the boxed subtag is in arg_x.   */
+					
+_spentry(subtag_misc_ref)
+	__(movb $tagmask,%imm0_b)
+	__(andb %arg_y_b,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+        __(movq misc_header_offset(%arg_y),%imm0)
+        __(xorb %imm0_b,%imm0_b)
+	__(shrq $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpq %imm0,%arg_z)
+	__(jae 2f)
+	__(unbox_fixnum(%arg_x,%imm1))
+	__(jmp C(misc_ref_common))
+0:      __(uuo_error_reg_not_tag(Rarg_y,tag_misc))
+1:      __(uuo_error_reg_not_fixnum(Rarg_z))
+2:      __(uuo_error_vector_bounds(Rarg_z,Rarg_y))
+                        
+_endsubp(subtag_misc_ref)
+
+_spentry(subtag_misc_set)
+	__(movb $tagmask,%imm0_b)
+	__(andb %arg_x_b,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(testb $fixnummask,%arg_y_b)
+	__(jne 1f)
+	__(movq misc_header_offset(%arg_x),%imm0)
+        __(xorb %imm0_b,%imm0_b)
+	__(shrq $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpq %imm0,%arg_y)
+	__(jae 2f)
+	__(unbox_fixnum(%temp0,%imm1))
+	__(jmp C(misc_set_common))
+0:      __(uuo_error_reg_not_tag(Rarg_x,tag_misc))
+1:      __(uuo_error_reg_not_fixnum(Rarg_y))
+2:      __(uuo_error_vector_bounds(Rarg_y,Rarg_x))                        
+_endsubp(subtag_misc_set)
+
+_spentry(misc_set)
+	__(movb $tagmask,%imm0_b)
+	__(andb %arg_x_b,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 0f)
+	__(testb $fixnummask,%arg_y_b)
+	__(jne 1f)
+	__(movq misc_header_offset(%arg_x),%imm0)
+        __(xorb %imm0_b,%imm0_b)
+	__(shrq $num_subtag_bits-fixnumshift,%imm0)
+	__(cmpq %imm0,%arg_y)
+	__(jae 2f)
+	__(movb misc_subtag_offset(%arg_x),%imm1_b)
+	__(jmp C(misc_set_common))
+	
+0:      __(uuo_error_reg_not_tag(Rarg_x,tag_misc))
+1:      __(uuo_error_reg_not_fixnum(Rarg_y))
+2:      __(uuo_error_vector_bounds(Rarg_y,Rarg_x))                        
+_endsubp(misc_set)
+		
+_startfn(C(misc_set_common))
+	__(movzbl %imm1_b,%imm1_l)
+        __(lea local_label(misc_set_jmp)(%rip),%imm2)
+	__(jmp *(%imm2,%imm1,8))
+	.p2align 3
+local_label(misc_set_jmp):		
+	/* 00-0f   */
+	.quad local_label(misc_set_invalid) /* 00 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 01 imm_1   */
+	.quad local_label(misc_set_invalid) /* 02 imm_2   */
+	.quad local_label(misc_set_invalid) /* 03 cons   */
+	.quad local_label(misc_set_invalid) /* 04 tra_0   */
+	.quad local_label(misc_set_invalid) /* 05 nodeheader_0   */
+	.quad local_label(misc_set_invalid) /* 06 nodeheader_1   */
+	.quad local_label(misc_set_invalid) /* 07 immheader_0   */
+	.quad local_label(misc_set_invalid) /* 08 odd_fixnum   */
+	.quad local_label(misc_set_invalid) /* 09 immheader_1   */
+	.quad local_label(misc_set_invalid) /* 0a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 0b nil   */
+	.quad local_label(misc_set_invalid) /* 0c tra_1   */
+	.quad local_label(misc_set_invalid) /* 0d misc   */
+	.quad local_label(misc_set_invalid) /* 0e symbol   */
+	.quad local_label(misc_set_invalid) /* 0f function   */
+	/* 10-1f   */
+	.quad local_label(misc_set_invalid)	/* 10 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 11 imm_1   */
+	.quad local_label(misc_set_invalid) /* 12 imm_2   */
+	.quad local_label(misc_set_invalid) /* 13 cons   */
+	.quad local_label(misc_set_invalid)	/* 14 tra_0   */
+	.quad _SPgvset /* 15 symbol_vector   */
+	.quad _SPgvset /* 16 ratio   */
+	.quad local_label(misc_set_invalid) /* 17 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 18 odd_fixnum   */
+	.quad local_label(misc_set_u32)	/* 19 bignum   */
+	.quad local_label(misc_set_u64) /* 1a macptr   */
+	.quad local_label(misc_set_invalid) /* 1b nil   */
+	.quad local_label(misc_set_invalid)	/* 1c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 1d misc   */
+	.quad local_label(misc_set_invalid)	/* 1e symbol   */
+	.quad local_label(misc_set_invalid)	/* 1f function   */
+	/* 20-2f   */
+	.quad local_label(misc_set_invalid)	/* 20 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 21 imm_1   */
+	.quad local_label(misc_set_invalid) /* 22 imm_2   */
+	.quad local_label(misc_set_invalid) /* 23 cons   */
+	.quad local_label(misc_set_invalid)	/* 24 tra_0   */
+	.quad _SPgvset /* 25 catch_frame   */
+	.quad _SPgvset /* 26 complex   */
+	.quad local_label(misc_set_invalid) /* 27 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 28 odd_fixnum   */
+	.quad local_label(misc_set_u32)	/* 29 double_float   */
+	.quad local_label(misc_set_u64)  /* 2a dead_macptr   */
+	.quad local_label(misc_set_invalid) /* 2b nil   */
+	.quad local_label(misc_set_invalid)	/* 2c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 2d misc   */
+	.quad local_label(misc_set_invalid)	/* 2e symbol   */
+	.quad local_label(misc_set_invalid)	/* 2f function   */
+	/* 30-3f   */
+	.quad local_label(misc_set_invalid)	/* 30 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 31 imm_1   */
+	.quad local_label(misc_set_invalid) /* 32 imm_2   */
+	.quad local_label(misc_set_invalid) /* 33 cons   */
+	.quad local_label(misc_set_invalid)	/* 34 tra_0   */
+	.quad _SPgvset /* 35 hash_vector   */
+	.quad _SPgvset /* 36 struct   */
+	.quad local_label(misc_set_invalid) /* 37 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 38 odd_fixnum   */
+	.quad local_label(misc_set_u32)	/* 39 xcode_vector   */
+	.quad local_label(misc_set_invalid)  /* 3a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 3b nil   */
+	.quad local_label(misc_set_invalid)	/* 3c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 3d misc   */
+	.quad local_label(misc_set_invalid)	/* 3e symbol   */
+	.quad local_label(misc_set_invalid)	/* 3f function   */
+	/* 40-4f   */
+	.quad local_label(misc_set_invalid)	/* 40 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 41 imm_1   */
+	.quad local_label(misc_set_invalid) /* 42 imm_2   */
+	.quad local_label(misc_set_invalid) /* 43 cons   */
+	.quad local_label(misc_set_invalid)	/* 44 tra_0   */
+	.quad _SPgvset /* 45 pool   */
+	.quad _SPgvset /* 46 istruct   */
+	.quad local_label(misc_set_invalid) /* 47 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 48 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* 49 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* 4a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 4b nil   */
+	.quad local_label(misc_set_invalid)	/* 4c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 4d misc   */
+	.quad local_label(misc_set_invalid)	/* 4e symbol   */
+	.quad local_label(misc_set_invalid)	/* 4f function   */
+	/* 50-5f   */
+	.quad local_label(misc_set_invalid)	/* 50 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 51 imm_1   */
+	.quad local_label(misc_set_invalid) /* 52 imm_2   */
+	.quad local_label(misc_set_invalid) /* 53 cons   */
+	.quad local_label(misc_set_invalid)	/* 54 tra_0   */
+	.quad _SPgvset /* 55 weak   */
+	.quad _SPgvset /* 56 value_cell   */
+	.quad local_label(misc_set_invalid) /* 57 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 58 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* 59 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* 5a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 5b nil   */
+	.quad local_label(misc_set_invalid)	/* 5c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 5d misc   */
+	.quad local_label(misc_set_invalid)	/* 5e symbol   */
+	.quad local_label(misc_set_invalid)	/* 5f function   */
+	/* 60-6f   */
+	.quad local_label(misc_set_invalid)	/* 60 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 61 imm_1   */
+	.quad local_label(misc_set_invalid) /* 62 imm_2   */
+	.quad local_label(misc_set_invalid) /* 63 cons   */
+	.quad local_label(misc_set_invalid)	/* 64 tra_0   */
+	.quad _SPgvset /* 65 package   */
+	.quad _SPgvset /* 66 xfunction   */
+	.quad local_label(misc_set_invalid) /* 67 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 68 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* 69 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* 6a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 6b nil   */
+	.quad local_label(misc_set_invalid)	/* 6c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 6d misc   */
+	.quad local_label(misc_set_invalid)	/* 6e symbol   */
+	.quad local_label(misc_set_invalid)	/* 6f function   */
+	/* 70-7f   */
+	.quad local_label(misc_set_invalid)	/* 70 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 71 imm_1   */
+	.quad local_label(misc_set_invalid) /* 72 imm_2   */
+	.quad local_label(misc_set_invalid) /* 73 cons   */
+	.quad local_label(misc_set_invalid)	/* 74 tra_0   */
+	.quad _SPgvset /* 75 slot_vector   */
+	.quad _SPgvset /* 76 lock   */
+	.quad local_label(misc_set_invalid) /* 77 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 78 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* 79 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* 7a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 7b nil   */
+	.quad local_label(misc_set_invalid)	/* 7c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 7d misc   */
+	.quad local_label(misc_set_invalid)	/* 7e symbol   */
+	.quad local_label(misc_set_invalid)	/* 7f function   */
+	/* 80-8f   */
+	.quad local_label(misc_set_invalid)	/* 80 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 81 imm_1   */
+	.quad local_label(misc_set_invalid) /* 82 imm_2   */
+	.quad local_label(misc_set_invalid) /* 83 cons   */
+	.quad local_label(misc_set_invalid)	/* 84 tra_0   */
+	.quad _SPgvset /* 85 lisp_thread   */
+	.quad _SPgvset /* 86 instance   */
+	.quad local_label(misc_set_invalid) /* 87 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 88 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* 89 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* 8a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 8b nil   */
+	.quad local_label(misc_set_invalid)	/* 8c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 8d misc   */
+	.quad local_label(misc_set_invalid)	/* 8e symbol   */
+	.quad local_label(misc_set_invalid)	/* 8f function   */
+	/* 90-9f   */
+	.quad local_label(misc_set_invalid)	/* 90 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* 91 imm_1   */
+	.quad local_label(misc_set_invalid) /* 92 imm_2   */
+	.quad local_label(misc_set_invalid) /* 93 cons   */
+	.quad local_label(misc_set_invalid)	/* 94 tra_0   */
+	.quad local_label(misc_set_function) /* 95 function_vector   */
+	.quad local_label(misc_set_invalid) /* 96 nodeheader_1   */
+	.quad local_label(misc_set_invalid) /* 97 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* 98 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* 99 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* 9a immheader_2   */
+	.quad local_label(misc_set_invalid) /* 9b nil   */
+	.quad local_label(misc_set_invalid)	/* 9c tra_1   */
+	.quad local_label(misc_set_invalid)	/* 9d misc   */
+	.quad local_label(misc_set_invalid)	/* 9e symbol   */
+	.quad local_label(misc_set_invalid)	/* 9f function   */
+	/* a0-af   */
+	.quad local_label(misc_set_invalid)	/* a0 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* a1 imm_1   */
+	.quad local_label(misc_set_invalid) /* a2 imm_2   */
+	.quad local_label(misc_set_invalid) /* a3 cons   */
+	.quad local_label(misc_set_invalid)	/* a4 tra_0   */
+	.quad _SPgvset /* a5 arrayH   */
+	.quad _SPgvset /* a6 vectorH   */
+	.quad local_label(misc_set_s16)	/* a7 s16   */
+	.quad local_label(misc_set_invalid)	/* a8 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* a9 immheader_1   */
+	.quad local_label(misc_set_invalid)  /* aa immheader_2   */
+	.quad local_label(misc_set_invalid) /* ab nil   */
+	.quad local_label(misc_set_invalid)	/* ac tra_1   */
+	.quad local_label(misc_set_invalid)	/* ad misc   */
+	.quad local_label(misc_set_invalid)	/* ae symbol   */
+	.quad local_label(misc_set_invalid)	/* af function   */
+	/* b0-bf   */
+	.quad local_label(misc_set_invalid)	/* b0 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* b1 imm_1   */
+	.quad local_label(misc_set_invalid) /* b2 imm_2   */
+	.quad local_label(misc_set_invalid) /* b3 cons   */
+	.quad local_label(misc_set_invalid)	/* b4 tra_0   */
+	.quad local_label(misc_set_invalid) /* b5 nodeheader_0   */
+	.quad _SPgvset /* b6 simple_vector   */
+	.quad local_label(misc_set_u16) /* b7 immheader_0   */
+	.quad local_label(misc_set_invalid)	/* b8 odd_fixnum   */
+	.quad local_label(misc_set_invalid)	/* b9 immheader_1   */
+	.quad local_label(misc_set_invalid) /* ba immheader_2   */
+	.quad local_label(misc_set_invalid) /* bb nil   */
+	.quad local_label(misc_set_invalid)	/* bc tra_1   */
+	.quad local_label(misc_set_invalid)	/* bd misc   */
+	.quad local_label(misc_set_invalid)	/* be symbol   */
+	.quad local_label(misc_set_invalid)	/* bf function   */
+	/* c0-cf   */
+	.quad local_label(misc_set_invalid)	/* c0 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* c1 imm_1   */
+	.quad local_label(misc_set_invalid) /* c2 imm_2   */
+	.quad local_label(misc_set_invalid) /* c3 cons   */
+	.quad local_label(misc_set_invalid)	/* c4 tra_0   */
+	.quad local_label(misc_set_invalid) /* c5 nodeheader_0   */
+	.quad local_label(misc_set_invalid) /* c6 nodeheader_1   */
+	.quad local_label(misc_set_string) /* c7 simple_base_string   */
+	.quad local_label(misc_set_invalid)	/* c8 odd_fixnum   */
+	.quad local_label(misc_set_new_string)	/* c9 new_strin   */
+	.quad local_label(misc_set_fixnum_vector)  /* ca fixnum_vector   */
+	.quad local_label(misc_set_invalid) /* cb nil   */
+	.quad local_label(misc_set_invalid)	/* cc tra_1   */
+	.quad local_label(misc_set_invalid)	/* cd misc   */
+	.quad local_label(misc_set_invalid)	/* ce symbol   */
+	.quad local_label(misc_set_invalid)	/* cf function   */
+	/* d0-df   */
+	.quad local_label(misc_set_invalid)	/* d0 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* d1 imm_1   */
+	.quad local_label(misc_set_invalid) /* d2 imm_2   */
+	.quad local_label(misc_set_invalid) /* d3 cons   */
+	.quad local_label(misc_set_invalid)	/* d4 tra_0   */
+	.quad local_label(misc_set_invalid) /* d5 nodeheader_0   */
+	.quad local_label(misc_set_invalid) /* d6 nodeheader_1   */
+	.quad local_label(misc_set_s8)	/* d7 s8   */
+	.quad local_label(misc_set_invalid)	/* d8 odd_fixnum   */
+	.quad local_label(misc_set_s32)	/* d9 s32   */
+	.quad local_label(misc_set_s64)	/* da s64   */
+	.quad local_label(misc_set_invalid) /* db nil   */
+	.quad local_label(misc_set_invalid)	/* dc tra_1   */
+	.quad local_label(misc_set_invalid)	/* dd misc   */
+	.quad local_label(misc_set_invalid)	/* de symbol   */
+	.quad local_label(misc_set_invalid)	/* df function   */
+	/* e0-ef   */
+	.quad local_label(misc_set_invalid)	/* e0 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* e1 imm_1   */
+	.quad local_label(misc_set_invalid) /* e2 imm_2   */
+	.quad local_label(misc_set_invalid) /* e3 cons   */
+	.quad local_label(misc_set_invalid)	/* e4 tra_0   */
+	.quad local_label(misc_set_invalid) /* e5 nodeheader_0   */
+	.quad local_label(misc_set_invalid) /* e6 nodeheader_1   */
+	.quad local_label(misc_set_u8)	/* e7 u8   */
+	.quad local_label(misc_set_invalid)	/* e8 odd_fixnum   */
+	.quad local_label(misc_set_u32)	/* e9 u32   */
+	.quad local_label(misc_set_u64) /* ea u64   */
+	.quad local_label(misc_set_invalid) /* eb nil   */
+	.quad local_label(misc_set_invalid)	/* ec tra_1   */
+	.quad local_label(misc_set_invalid)	/* ed misc   */
+	.quad local_label(misc_set_invalid)	/* ee symbol   */
+	.quad local_label(misc_set_invalid)	/* ef function   */
+	/* f0-ff   */
+	.quad local_label(misc_set_invalid)	/* f0 even_fixnum   */
+	.quad local_label(misc_set_invalid) /* f1 imm_1   */
+	.quad local_label(misc_set_invalid) /* f2 imm_2   */
+	.quad local_label(misc_set_invalid) /* f3 cons   */
+	.quad local_label(misc_set_invalid)	/* f4 tra_0   */
+	.quad local_label(misc_set_invalid) /* f5 nodeheader_0   */
+	.quad local_label(misc_set_invalid) /* f6 nodeheader_1   */
+	.quad local_label(misc_set_bit_vector) /* f7 bitvector   */
+	.quad local_label(misc_set_invalid)	/* f8 odd_fixnum   */
+	.quad local_label(misc_set_single_float_vector) /* f9 single_float   */
+	.quad local_label(misc_set_double_float_vector) /* fa double_float   */
+	.quad local_label(misc_set_invalid) /* fb nil   */
+	.quad local_label(misc_set_invalid)	/* fc tra_1   */
+	.quad local_label(misc_set_invalid)	/* fd misc   */
+	.quad local_label(misc_set_invalid)	/* fe symbol   */
+	.quad local_label(misc_set_invalid)	/* ff function   */
+
+local_label(misc_set_function):			
+	/* Functions are funny: the first  N words  */
+	/* are treated as (UNSIGNED-BYTE 64), where N is the low  */
+	/* 32 bits of the first word.   */
+	__(movl misc_data_offset(%arg_x),%imm0_l)
+	__(shl $fixnumshift,%imm0)
+	__(rcmpq(%arg_y,%imm0))
+	__(jae _SPgvset)
+local_label(misc_set_u64):
+	__(movq $~(target_most_positive_fixnum << fixnumshift),%imm0)
+	__(testq %arg_z,%imm0)
+	__(movq %arg_z,%imm0)
+	__(jne 1f)
+	__(sarq $fixnumshift,%imm0)
+	__(jmp 9f)
+1:	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movq misc_header_offset(%arg_z),%imm0)
+	__(cmpq $three_digit_bignum_header,%imm0)
+	__(je 3f)
+	__(cmpq $two_digit_bignum_header,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(movq misc_data_offset(%arg_z),%imm0)
+	__(testq %imm0,%imm0)
+	__(js local_label(misc_set_bad))
+	__(jmp 9f)
+3:	__(movq misc_data_offset(%arg_z),%imm0)
+	__(cmpl $0,misc_data_offset+8(%arg_z))
+	__(jne local_label(misc_set_bad))
+9:	__(movq %imm0,misc_data_offset(%arg_x,%arg_y))
+	__(ret)
+local_label(misc_set_fixnum_vector):
+	__(movq %arg_z,%imm0)
+	__(sarq $fixnumshift,%imm0)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(movq %imm0,misc_data_offset(%arg_x,%arg_y))
+	__(ret)	
+local_label(misc_set_s64):
+	__(movq %arg_z,%imm0)
+	__(sarq $fixnumshift,%imm0)
+	__(testb $fixnummask,%arg_z_b)
+	__(je 9f)
+1:	__(movb %arg_z_b,%imm0_b)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movq misc_header_offset(%arg_z),%imm0)
+	__(cmpq $two_digit_bignum_header,%imm0)
+	__(movq misc_data_offset(%arg_z),%imm0)
+	__(jne local_label(misc_set_bad))
+9:	__(movq %imm0,misc_data_offset(%arg_x,%arg_y))
+	__(ret)	
+local_label(misc_set_bad):
+	__(movq %arg_z,%arg_y)
+	__(movq %arg_x,%arg_z)
+	__(movq $XNOTELT,%arg_x)
+	__(set_nargs(3))
+	__(jmp _SPksignalerr)
+local_label(misc_set_double_float_vector):	
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_double_float,%imm0_b)
+	__(jne local_label(misc_set_bad))
+	__(movq double_float.value(%arg_z),%imm0)
+	__(movq %imm0,misc_dfloat_offset(%arg_x,%arg_y))
+	__(ret)
+local_label(misc_set_s32):	
+	__(movq %arg_z,%imm0)
+	__(movq %arg_y,%imm1)
+	__(shlq $64-(32+fixnumshift),%imm0)
+	__(shrq $1,%imm1)
+	__(sarq $64-(32+fixnumshift),%imm0)
+	__(cmpq %imm0,%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(shr $fixnumshift,%imm0)
+	__(movl %imm0_l,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_single_float_vector):
+	__(cmpb $tag_single_float,%arg_z_b)
+	__(movq %arg_z,%imm0)
+	__(movq %arg_y,%imm1)
+	__(jne local_label(misc_set_bad))
+	__(shrq $1,%imm1)
+	__(shr $32,%imm0)
+	__(movl %imm0_l,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_u32):
+	__(movq %arg_y,%imm1)	
+	__(movq $~(0xffffffff<<fixnumshift),%imm0)
+	__(shrq $1,%imm1)
+	__(testq %imm0,%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(movl %imm0_l,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_bit_vector):	
+	__(testq $~fixnumone,%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(unbox_fixnum(%arg_y,%imm0))
+	__(testb %arg_z_b,%arg_z_b)
+	__(je local_label(misc_set_clr_bit))
+local_label(misc_set_set_bit):	
+	__(btsq %imm0,misc_data_offset(%arg_x))
+	__(ret)
+local_label(misc_set_clr_bit):	
+	__(btrq %imm0,misc_data_offset(%arg_x))
+	__(ret)
+local_label(misc_set_u8):	
+	__(testq $~(0xff<<fixnumshift),%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(movq %arg_y,%imm1)
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(shrq $3,%imm1)
+	__(movb %imm0_b,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_s8):
+	__(movq %arg_z,%imm0)
+	__(shlq $64-(8+fixnumshift),%imm0)	
+	__(sarq $64-(8+fixnumshift),%imm0)
+	__(cmpq %arg_z,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(movq %arg_y,%imm1)
+	__(shrq $fixnumshift,%imm0)
+	__(shrq $3,%imm1)
+	__(movb %imm0_b,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_string):
+	__(cmpb $subtag_character,%arg_z_b)
+	__(movq %arg_z,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(movq %arg_y,%imm1)
+	__(shrq $charcode_shift,%imm0)
+	__(shrq $3,%imm1)
+	__(movb %imm0_b,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_new_string):
+	__(cmpb $subtag_character,%arg_z_b)
+	__(movq %arg_z,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(movq %arg_y,%imm1)
+	__(shrq $charcode_shift,%imm0)
+	__(shrq $1,%imm1)
+	__(movl %imm0_l,misc_data_offset(%arg_x,%imm1))
+	__(ret)        
+local_label(misc_set_s16):	
+	__(movq %arg_z,%imm0)
+	__(movq %arg_y,%imm1)
+	__(shlq $64-(16+fixnumshift),%imm0)	
+	__(shrq $2,%imm1)
+	__(sarq $64-(16+fixnumshift),%imm0)
+	__(cmpq %arg_z,%imm0)
+	__(jne local_label(misc_set_bad))
+	__(testb $fixnummask,%arg_z_b)
+	__(jne local_label(misc_set_bad))
+	__(shrq $fixnumshift,%imm0)
+	__(movw %imm0_w,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_u16):
+	__(movq %arg_y,%imm1)
+	__(testq $~(0xffff<<fixnumshift),%arg_z)
+	__(jne local_label(misc_set_bad))
+	__(shrq $2,%imm1)
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(movw %imm0_w,misc_data_offset(%arg_x,%imm1))
+	__(ret)
+local_label(misc_set_invalid):
+	__(push $XSETBADVEC)
+	__(set_nargs(4))
+	__(jmp _SPksignalerr)
+_endfn(C(misc_set_common))
+	
+/* ret1valn returns "1 multiple value" when a called function does not   */
+/* return multiple values.  Its presence on the stack (as a return address)   */
+/* identifies the stack frame to code which returns multiple values.   */
+
+_spentry(Fret1valn)
+	.globl C(ret1valn)
+__(tra(C(ret1valn)))
+        __(movq (%rsp),%ra0)
+        __(movq %arg_z,(%rsp))
+	__(set_nargs(1))
+	__(jmpq *%ra0)
+_endsubp(Fret1valn)
+	
+
+_spentry(nvalret)
+	.globl C(nvalret)			
+C(nvalret):	
+	__(ref_global(ret1val_addr,%temp1))
+	__(cmpq lisp_frame.savera0(%rbp),%temp1)
+	__(je 1f)
+	__(testl %nargs,%nargs)
+	__(movl $nil_value,%arg_z_l)
+	__(cmovneq -node_size(%rsp,%nargs_q),%arg_z)
+	__(leaveq)
+        __(ret)
+
+	
+/* actually need to return values ; always need to copy   */
+1:	__(leaq 2*node_size(%rbp),%imm1)
+	__(movq (%imm1),%ra0)
+	__(addq $node_size,%imm1)
+	__(movq 0(%rbp),%rbp)
+	__(leaq (%rsp,%nargs_q),%temp0)
+	__(xorl %imm0_l,%imm0_l)
+	__(jmp 3f)
+2:	__(movq -node_size(%temp0),%temp1)
+	__(subq $node_size,%temp0)
+	__(addl $node_size,%imm0_l)
+	__(movq %temp1,-node_size(%imm1))
+	__(subq $node_size,%imm1)
+3:	__(cmpl %imm0_l,%nargs)  ;
+	__(jne 2b)
+	__(movq %imm1,%rsp)
+	__(jmp *%ra0)	
+_endsubp(nvalret)
+	
+_spentry(jmpsym)
+	__(jump_fname())
+_endsubp(jmpsym)
+
+_spentry(jmpnfn)
+	__(movq %temp0,%fn)
+	__(jmp *%fn)
+_endsubp(jmpnfn)
+
+_spentry(funcall)
+	__(do_funcall())
+_endsubp(funcall)
+
+_spentry(mkcatch1v)
+	__(nMake_Catch(0))
+	__(ret)
+_endsubp(mkcatch1v)
+
+_spentry(mkunwind)
+	__(movq $undefined,%arg_z)
+	__(Make_Catch(fixnumone))
+	__(jmp *%ra0)
+_endsubp(mkunwind)
+        
+/* this takes a return address in %ra0; it's "new" in that it does the
+   double binding of *interrupt-level* out-of-line */
+_spentry(nmkunwind)
+	__(movq rcontext(tcr.tlb_pointer),%arg_x)
+        __(movq INTERRUPT_LEVEL_BINDING_INDEX(%arg_x),%arg_y)
+	__(push %arg_y)
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%arg_x))
+	__(movq $undefined,%arg_z)
+	__(Make_Catch(fixnumone))
+        __(movq %arg_y,%arg_z)
+        __(jmp _SPbind_interrupt_level)
+_endsubp(nmkunwind)
+
+_spentry(mkcatchmv)
+	__(nMake_Catch(fixnumone))
+	__(ret)
+_endsubp(mkcatchmv)
+        
+_spentry(throw)
+	__(movq rcontext(tcr.catch_top),%imm1)
+	__(xorl %imm0_l,%imm0_l)
+	__(movq (%rsp,%nargs_q),%temp0)	/* temp0 = tag   */
+	__(jmp local_label(_throw_test))
+local_label(_throw_loop):
+	__(cmpq %temp0,catch_frame.catch_tag(%imm1))
+	__(je local_label(_throw_found))
+	__(movq catch_frame.link(%imm1),%imm1)
+	__(addq $fixnum_one,%imm0)
+local_label(_throw_test):
+	__(testq %imm1,%imm1)
+	__(jne local_label(_throw_loop))
+        __(push %ra0)
+	__(uuo_error_reg_not_tag(Rtemp0,subtag_catch_frame))
+        __(pop %ra0)
+	__(jmp _SPthrow)
+local_label(_throw_found):	
+	__(testb $fulltagmask,catch_frame.mvflag(%imm1))
+	__(jne local_label(_throw_multiple))
+	__(testl %nargs,%nargs)
+	__(movl $nil_value,%arg_z_l)
+	__(je local_label(_throw_one_value))
+	__(movq -node_size(%rsp,%nargs_q),%arg_z)
+	__(add %nargs_q,%rsp)
+local_label(_throw_one_value):
+	__(lea local_label(_threw_one_value)(%rip),%ra0)
+	__(jmp _SPnthrow1value)
+__(tra(local_label(_threw_one_value)))
+	__(movq rcontext(tcr.catch_top),%temp0)
+	__(movq catch_frame.db_link(%temp0),%imm0)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(cmpq %imm0,%imm1)
+	__(jz local_label(_threw_one_value_dont_unbind))
+	__(lea local_label(_threw_one_value_dont_unbind)(%rip),%ra0)
+        __(push %ra0)
+	__(jmp _SPunbind_to)
+__(tra(local_label(_threw_one_value_dont_unbind)))
+	__(movq catch_frame.rbp(%temp0),%rbp)
+	__(movq catch_frame.foreign_sp(%temp0),%imm0)
+	__(movq catch_frame.xframe(%temp0),%imm1)
+        __(movq %imm0,rcontext(tcr.foreign_sp))
+	__(movq %imm1,rcontext(tcr.xframe))
+	__(movq catch_frame.rsp(%temp0),%rsp)
+	__(movq catch_frame.link(%temp0),%imm1)
+	__(movq catch_frame._save0(%temp0),%save0)
+	__(movq catch_frame._save1(%temp0),%save1)
+	__(movq catch_frame._save2(%temp0),%save2)
+	__ifndef(`TCR_IN_GPR')
+	__(movq catch_frame._save3(%temp0),%save3)
+	__endif
+	__(movq %imm1,rcontext(tcr.catch_top))
+	__(movq catch_frame.pc(%temp0),%ra0)
+	__(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
+	__(movq (%imm1),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))
+	__(jmp *%ra0)
+local_label(_throw_multiple):
+	__(lea local_label(_threw_multiple)(%rip),%ra0)
+	__(jmp _SPnthrowvalues)
+__(tra(local_label(_threw_multiple)))
+	__(movq rcontext(tcr.catch_top),%temp0)
+	__(movq catch_frame.db_link(%temp0),%imm0)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(cmpq %imm0,%imm1)
+	__(je local_label(_threw_multiple_dont_unbind))
+	__(leaq local_label(_threw_multiple_dont_unbind)(%rip),%ra0)
+        __(push %ra0)
+	__(jmp _SPunbind_to)
+__(tra(local_label(_threw_multiple_dont_unbind)))
+	/* Copy multiple values from the current %rsp to the target %rsp   */
+	__(lea (%rsp,%nargs_q),%imm0)
+	__(movq catch_frame.rsp(%temp0),%imm1)
+	__(jmp local_label(_threw_multiple_push_test))
+local_label(_threw_multiple_push_loop):
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(movq (%imm0),%arg_z)
+	__(movq %arg_z,(%imm1))
+local_label(_threw_multiple_push_test):		
+	__(cmpq %imm0,%rsp)
+	__(jne local_label(_threw_multiple_push_loop))
+	/* target %rsp is now in %imm1   */
+	__(movq catch_frame.rbp(%temp0),%rbp)
+	__(movq catch_frame.foreign_sp(%temp0),%imm0)
+        __(movq %imm0,rcontext(tcr.foreign_sp))        
+	__(movq catch_frame.xframe(%temp0),%imm0)
+	__(movq %imm0,rcontext(tcr.xframe))
+	__(movq %imm1,%rsp)
+	__(movq catch_frame.link(%temp0),%imm1)		
+	__(movq catch_frame._save0(%temp0),%save0)
+	__(movq catch_frame._save1(%temp0),%save1)
+	__(movq catch_frame._save2(%temp0),%save2)
+	__ifndef(`TCR_IN_GPR')
+	__(movq catch_frame._save3(%temp0),%save3)
+	__endif
+	__(movq %imm1,rcontext(tcr.catch_top))
+	__(movq catch_frame.pc(%temp0),%ra0)
+	__(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
+	__(movq (%imm1),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))
+	__(jmp *%ra0)
+_endsubp(throw)
+
+/* This takes N multiple values atop the vstack.   */
+_spentry(nthrowvalues)
+	__(movb $1,rcontext(tcr.unwinding))
+local_label(_nthrowv_nextframe):
+	__(subq $fixnumone,%imm0)
+	__(js local_label(_nthrowv_done))
+	__(movd %imm0,%mm1)
+	__(movq rcontext(tcr.catch_top),%temp0)
+	__(movq catch_frame.link(%temp0),%imm1)
+	__(movq catch_frame.db_link(%temp0),%imm0)
+	__(movq %imm1,rcontext(tcr.catch_top))
+	__(cmpq %imm0,rcontext(tcr.db_link))
+	__(jz local_label(_nthrowv_dont_unbind))
+	__(push %ra0)
+	__(leaq local_label(_nthrowv_back_from_unbind)(%rip),%ra0)
+        __(push %ra0)
+	__(jmp _SPunbind_to)
+__(tra(local_label(_nthrowv_back_from_unbind)))
+
+	__(pop %ra0)
+local_label(_nthrowv_dont_unbind):
+	__(cmpb $unbound_marker,catch_frame.catch_tag(%temp0))
+	__(je local_label(_nthrowv_do_unwind))
+/* A catch frame.  If the last one, restore context from there.   */
+	__(movd %mm1,%imm0)
+	__(testq %imm0,%imm0)	/* last catch frame ?   */
+	__(jne local_label(_nthrowv_skip))
+	__(movq catch_frame.xframe(%temp0),%save0)
+	__(movq %save0,rcontext(tcr.xframe))
+	__(leaq (%rsp,%nargs_q),%save1)
+	__(movq catch_frame.rsp(%temp0),%save2)
+	__(movq %nargs_q,%save0)
+	__(jmp local_label(_nthrowv_push_test))
+local_label(_nthrowv_push_loop):
+	__(subq $node_size,%save1)
+	__(subq $node_size,%save2)
+	__(movq (%save1),%temp1)
+	__(movq %temp1,(%save2))
+local_label(_nthrowv_push_test):
+	__(subq $node_size,%save0)
+	__(jns local_label(_nthrowv_push_loop))
+	__(movq catch_frame.xframe(%temp0),%save0)
+	__(movq %save0,rcontext(tcr.xframe))
+	__(movq %save2,%rsp)
+	__(movq catch_frame.rbp(%temp0),%rbp)
+	__ifndef(`TCR_IN_GPR')
+	__(movq catch_frame._save3(%temp0),%save3)
+	__endif
+	__(movq catch_frame._save2(%temp0),%save2)
+	__(movq catch_frame._save1(%temp0),%save1)
+	__(movq catch_frame._save0(%temp0),%save0)
+	__(movq catch_frame.foreign_sp(%temp0),%stack_temp)
+        __(movq %stack_temp,rcontext(tcr.foreign_sp))        
+local_label(_nthrowv_skip):	
+	__(movq -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))        
+        __(movq %imm1,rcontext(tcr.next_tsp))
+	__(movd %mm1,%imm0)
+	__(jmp local_label(_nthrowv_nextframe))
+local_label(_nthrowv_do_unwind):	
+/* This is harder.  Call the cleanup code with the multiple values and   */
+/* nargs, the throw count, and the caller's return address in a temp  */
+/* stack frame.   */
+	__(leaq (%rsp,%nargs_q),%save1)
+	__(push catch_frame._save0(%temp0))
+	__(push catch_frame._save1(%temp0))
+	__(push catch_frame._save2(%temp0))
+	__ifndef(`TCR_IN_GPR')
+	__(push catch_frame._save3(%temp0))
+	__endif
+	__(push catch_frame.pc(%temp0))
+	__(movq catch_frame.rbp(%temp0),%rbp)
+        __(movq catch_frame.xframe(%temp0),%stack_temp)
+	__(movq catch_frame.rsp(%temp0),%arg_x)
+        __(movq %stack_temp,rcontext(tcr.xframe))
+	__(movq catch_frame.foreign_sp(%temp0),%stack_temp)
+        __(movq %stack_temp,rcontext(tcr.foreign_sp))        
+	/* Discard the catch frame, so we can build a temp frame   */
+	__(movq -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))
+	/* tsp overhead, nargs, throw count, ra0   */
+	__(dnode_align(%nargs_q,(tsp_frame.fixed_overhead+(3*node_size)),%imm0))
+	__(TSP_Alloc_Var(%imm0,%imm1))
+
+	__(movq %nargs_q,(%imm1))
+	__(movq %ra0,node_size(%imm1))
+	__(movq %mm1,node_size*2(%imm1))
+	__(leaq node_size*3(%imm1),%imm1)
+	__(jmp local_label(_nthrowv_tpushtest))
+local_label(_nthrowv_tpushloop):
+	__(movq -node_size(%save1),%temp0)
+	__(subq $node_size,%save1)
+	__(movq %temp0,(%imm1))
+	__(addq $node_size,%imm1)
+local_label(_nthrowv_tpushtest):
+	__(subl $node_size,%nargs)
+	__(jns local_label(_nthrowv_tpushloop))
+	__(pop %xfn)
+	__ifndef(`TCR_IN_GPR')
+	__(pop %save3)
+	__endif
+	__(pop %save2)
+	__(pop %save1)
+	__(pop %save0)
+	__(movq %arg_x,%rsp)
+/* Ready to call cleanup code. set up tra, jmp to %xfn   */
+	__(leaq local_label(_nthrowv_called_cleanup)(%rip),%ra0)
+        __(push %ra0)
+	__(movb $0,rcontext(tcr.unwinding))
+	__(jmp *%xfn)
+__(tra(local_label(_nthrowv_called_cleanup)))
+
+	__(movb $1,rcontext(tcr.unwinding))
+	__(movq rcontext(tcr.save_tsp),%imm1)
+	__(movq tsp_frame.data_offset+(0*node_size)(%imm1),%nargs_q)
+	__(movq tsp_frame.data_offset+(1*node_size)(%imm1),%ra0)
+	__(movq tsp_frame.data_offset+(2*node_size)(%imm1),%mm1)
+	__(movq %nargs_q,%imm0)
+	__(addq $tsp_frame.fixed_overhead+(node_size*3),%imm1)
+	__(jmp local_label(_nthrowv_tpoptest))
+local_label(_nthrowv_tpoploop):	
+	__(push (%imm1))
+	__(addq $node_size,%imm1)
+local_label(_nthrowv_tpoptest):	
+	__(subq $node_size,%imm0)
+	__(jns local_label(_nthrowv_tpoploop))
+	__(movq rcontext(tcr.save_tsp),%imm1)
+	__(movq (%imm1),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))
+	__(movd %mm1,%imm0)
+	__(jmp local_label(_nthrowv_nextframe))
+local_label(_nthrowv_done):
+	__(movb $0,rcontext(tcr.unwinding))
+	__(check_pending_interrupt(%imm0))
+local_label(_nthrowv_return):	
+	__(jmp *%ra0)	
+_endsubp(nthrowvalues)
+
+/* This is a (slight) optimization.  When running an unwind-protect,  */
+/* save the single value and the throw count in the tstack frame.  */
+/* Note that this takes a single value in arg_z.  */
+	
+_spentry(nthrow1value)
+	__(movb $1,rcontext(tcr.unwinding))
+local_label(_nthrow1v_nextframe):
+	__(subq $fixnumone,%imm0)
+	__(js local_label(_nthrow1v_done))
+	__(movd %imm0,%mm1)
+	__(movq rcontext(tcr.catch_top),%temp0)
+	__(movq catch_frame.link(%temp0),%imm1)
+	__(movq catch_frame.db_link(%temp0),%imm0)
+	__(movq %imm1,rcontext(tcr.catch_top))
+	__(cmpq %imm0,rcontext(tcr.db_link))
+	__(jz local_label(_nthrow1v_dont_unbind))
+	__(push %ra0)
+	__(leaq local_label(_nthrow1v_back_from_unbind)(%rip),%ra0)
+        __(push %ra0)
+	__(jmp _SPunbind_to)
+__(tra(local_label(_nthrow1v_back_from_unbind)))
+
+	__(pop %ra0)
+local_label(_nthrow1v_dont_unbind):
+	__(cmpb $unbound_marker,catch_frame.catch_tag(%temp0))
+	__(je local_label(_nthrow1v_do_unwind))
+/* A catch frame.  If the last one, restore context from there.   */
+	__(movd %mm1,%imm0)
+	__(testq %imm0,%imm0)	/* last catch frame ?   */
+	__(jne local_label(_nthrow1v_skip))
+	__(movq catch_frame.xframe(%temp0),%save0)
+	__(movq %save0,rcontext(tcr.xframe))
+	__(leaq (%rsp,%nargs_q),%save1)
+	__(movq catch_frame.xframe(%temp0),%save0)
+	__(movq %save0,rcontext(tcr.xframe))
+	__(movq catch_frame.rsp(%temp0),%rsp)
+	__(movq catch_frame.rbp(%temp0),%rbp)
+	__ifndef(`TCR_IN_GPR')
+	__(movq catch_frame._save3(%temp0),%save3)
+	__endif
+	__(movq catch_frame._save2(%temp0),%save2)
+	__(movq catch_frame._save1(%temp0),%save1)
+	__(movq catch_frame._save0(%temp0),%save0)
+	__(movq catch_frame.foreign_sp(%temp0),%stack_temp)
+        __(movq %stack_temp,rcontext(tcr.foreign_sp))        
+local_label(_nthrow1v_skip):	
+	__(movq -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))        
+	__(movd %mm1,%imm0)
+	__(jmp local_label(_nthrow1v_nextframe))
+local_label(_nthrow1v_do_unwind):
+	
+/* This is harder, but not as hard (not as much BLTing) as the  */
+/* multiple-value case.  */
+	
+	__(movq catch_frame.xframe(%temp0),%save0)
+	__(movq %save0,rcontext(tcr.xframe))
+	__(movq catch_frame._save0(%temp0),%save0)
+	__(movq catch_frame._save1(%temp0),%save1)
+	__(movq catch_frame._save2(%temp0),%save2)
+	__ifndef(`TCR_IN_GPR')
+	__(movq catch_frame._save3(%temp0),%save3)
+	__endif
+	__(movq catch_frame.pc(%temp0),%xfn)
+	__(movq catch_frame.rbp(%temp0),%rbp)
+	__(movq catch_frame.rsp(%temp0),%rsp)
+	__(movq catch_frame.foreign_sp(%temp0),%stack_temp)
+        __(movq %stack_temp,rcontext(tcr.foreign_sp))        
+	/* Discard the catch frame, so we can build a temp frame   */
+	__(movq -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))        
+	__(TSP_Alloc_Fixed((3*node_size),%imm1))
+	__(addq $tsp_frame.fixed_overhead,%imm1)
+	__(movq %ra0,(%imm1))
+	__(movq %mm1,node_size*1(%imm1))
+	__(movq %arg_z,node_size*2(%imm1))
+/* Ready to call cleanup code. set up tra, jmp to %xfn   */
+	__(leaq local_label(_nthrow1v_called_cleanup)(%rip),%ra0)
+	__(movb $0,rcontext(tcr.unwinding))
+        __(push %ra0)
+	__(jmp *%xfn)
+__(tra(local_label(_nthrow1v_called_cleanup)))
+
+	__(movb $1,rcontext(tcr.unwinding))
+	__(movq rcontext(tcr.save_tsp),%imm1)
+	__(movq tsp_frame.data_offset+(0*node_size)(%imm1),%ra0)
+	__(movq tsp_frame.data_offset+(1*node_size)(%imm1),%mm1)
+	__(movq tsp_frame.data_offset+(2*node_size)(%imm1),%arg_z)
+
+	__(movq (%imm1),%imm1)
+        __(movq %imm1,rcontext(tcr.save_tsp))
+        __(movq %imm1,rcontext(tcr.next_tsp))        
+	__(movd %mm1,%imm0)
+	__(jmp local_label(_nthrow1v_nextframe))
+local_label(_nthrow1v_done):
+	__(movb $0,rcontext(tcr.unwinding))
+	__(check_pending_interrupt(%imm0))
+local_label(_nthrow1v_return):	
+	__(jmp *%ra0)	
+_endsubp(nthrow1value)
+
+/* This never affects the symbol's vcell   */
+/* Non-null symbol in arg_y, new value in arg_z           */
+	
+_spentry(bind)
+	__(movq symbol.binding_index(%arg_y),%temp0)
+	__(cmpq rcontext(tcr.tlb_limit),%temp0)
+	__(jb 0f)
+	__(push %temp0)
+	__(tlb_too_small())
+0:	__(testq %temp0,%temp0)
+	__(jz 9f)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(push (%temp1,%temp0))
+	__(push %temp0)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq %arg_z,(%temp1,%temp0))
+	__(jmp *%ra0)
+9:	
+	__(movq %arg_y,%arg_z)
+	__(movq $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)	
+_endsubp(bind)
+
+/* arg_z = symbol: bind it to its current value  */
+	
+_spentry(bind_self)
+	__(movq symbol.binding_index(%arg_z),%temp0)
+	__(cmpq rcontext(tcr.tlb_limit),%temp0)
+	__(jb 0f)
+	__(push %temp0)
+	__(tlb_too_small())
+0:	__(testq %temp0,%temp0)
+	__(jz 9f)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(cmpb $no_thread_local_binding_marker,(%temp0,%temp1))
+	__(jz 2f)
+	__(push (%temp1,%temp0))
+	__(push %temp0)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(jmp *%ra0)
+2:	__(movq symbol.vcell(%arg_z),%arg_y)
+	__(push (%temp1,%temp0))
+	__(push %temp0)
+	__(push rcontext(tcr.db_link))
+	__(movq %arg_y,(%temp1,%temp0))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(jmp *%ra0)
+9:	__(movq $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind_self)
+
+_spentry(bind_nil)
+	__(movq symbol.binding_index(%arg_z),%temp0)
+	__(cmpq rcontext(tcr.tlb_limit),%temp0)
+	__(jb 0f)
+	__(push %temp0)
+	__(tlb_too_small())
+0:	__(testq %temp0,%temp0)
+	__(jz 9f)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(push (%temp1,%temp0))
+	__(push %temp0)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq $nil_value,(%temp1,%temp0))
+	__(jmp *%ra0)
+9:	__(movq $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind_nil)
+
+_spentry(bind_self_boundp_check)
+	__(movq symbol.binding_index(%arg_z),%temp0)
+	__(cmpq rcontext(tcr.tlb_limit),%temp0)
+	__(jb 0f)
+	__(push %temp0)
+	__(tlb_too_small())
+0:	__(testq %temp0,%temp0)
+	__(jz 9f)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(cmpb $no_thread_local_binding_marker,(%temp1,%temp0))
+	__(je 2f)
+	__(cmpb $unbound_marker,(%temp1,%temp0))
+	__(je 8f)
+	__(push (%temp1,%temp0))
+	__(push %temp0)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(jmp *%ra0)
+2:	__(movq symbol.vcell(%arg_z),%arg_y)
+	__(cmpb $unbound_marker,%arg_y_b)
+	__(jz 8f)
+	__(push (%temp1,%temp0))
+	__(push %temp0)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq %arg_y,(%temp1,%temp0))
+	__(jmp *%ra0)
+8:	__(push %ra0)
+        __(uuo_error_reg_unbound(Rarg_z))
+	
+9:	__(movq $XSYMNOBIND,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(bind_self_boundp_check)
+
+_spentry(conslist)
+	__(movl $nil_value,%arg_z_l)
+	__(testl %nargs,%nargs)
+	__(jmp 2f)
+1:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%nargs)
+2:	__(jnz 1b)
+	__(jmp *%ra0)		
+_endsubp(conslist)
+
+/* do list*: last arg in arg_z, all others pushed, nargs set to #args pushed.  */
+/* Cons, one cons cell at at time.  Maybe optimize this later.  */
+	
+_spentry(conslist_star)
+	__(testl %nargs,%nargs)
+	__(jmp 2f)
+1:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%nargs)
+2:	__(jnz 1b)
+	__(jmp *%ra0)		
+_endsubp(conslist_star)
+
+/* We always have to create a tsp frame (even if nargs is 0), so the compiler   */
+/* doesn't get confused.   */
+_spentry(stkconslist)
+	__(movq %nargs_q,%imm1)
+	__(addq %imm1,%imm1)
+	__(movl $nil_value,%arg_z_l)
+	__(dnode_align(%imm1,tsp_frame.fixed_overhead,%imm1))
+	__(TSP_Alloc_Var(%imm1,%imm0))
+	__(addq $fulltag_cons,%imm0)
+	__(testl %nargs,%nargs)
+	__(jmp 2f)
+1:	__(pop %temp0)
+	__(_rplaca(%imm0,%temp0))
+	__(_rplacd(%imm0,%arg_z))
+	__(movq %imm0,%arg_z)
+	__(add $cons.size,%imm0)
+	__(subl $node_size,%nargs)
+2:	__(jne 1b)
+	__(jmp *%ra0)
+_endsubp(stkconslist)
+
+/* do list*: last arg in arg_z, all others vpushed,   */
+/*	nargs set to #args vpushed.  */
+	
+_spentry(stkconslist_star)
+	__(movq %nargs_q,%imm1)
+	__(addq %imm1,%imm1)
+	__(dnode_align(%imm1,tsp_frame.fixed_overhead,%imm1))
+	__(TSP_Alloc_Var(%imm1,%imm0))
+	__(addq $fulltag_cons,%imm0)
+	__(testl %nargs,%nargs)
+	__(jmp 2f)
+1:	__(pop %temp0)
+	__(_rplaca(%imm0,%temp0))
+	__(_rplacd(%imm0,%arg_z))
+	__(movq %imm0,%arg_z)
+	__(addq $cons.size,%imm0)
+	__(subl $node_size,%nargs)
+2:	__(jne 1b)
+	__(jmp *%ra0)
+_endsubp(stkconslist_star)
+
+/* Make a stack-consed simple-vector out of the NARGS objects   */
+/*	on top of the vstack; return it in arg_z.  */
+	
+_spentry(mkstackv)
+	__(dnode_align(%nargs_q,tsp_frame.fixed_overhead+node_size,%imm1))
+	__(TSP_Alloc_Var(%imm1,%temp0))
+	__(movl %nargs,%imm0_l)
+	__(shlq $(num_subtag_bits-fixnumshift),%imm0)
+	__(movb $subtag_simple_vector,%imm0_b)
+	__(movq %imm0,(%temp0))
+	__(leaq fulltag_misc(%temp0),%arg_z)
+	__(testl %nargs,%nargs)
+	__(leaq misc_data_offset(%arg_z,%nargs_q),%imm1)
+	__(jmp 2f)
+1:	__(pop -node_size(%imm1))
+	__(subl $node_size,%nargs)
+	__(leaq -node_size(%imm1),%imm1)
+2:	__(jne 1b)
+	__(jmp *%ra0)	
+_endsubp(mkstackv)
+
+	
+        .globl C(egc_write_barrier_start)
+C(egc_write_barrier_start):
+/*  */
+/* The function pc_luser_xp() - which is used to ensure that suspended threads  */
+/* are suspended in a GC-safe way - has to treat these subprims (which implement  */
+/* the EGC write-barrier) specially.  Specifically, a store that might introduce  */
+/* an intergenerational reference (a young pointer stored in an old object) has  */
+/* to "memoize" that reference by setting a bit in the global "refbits" bitmap.  */
+/* This has to happen atomically, and has to happen atomically wrt GC.  */
+
+/* Note that updating a word in a bitmap is itself not atomic, unless we use  */
+/* interlocked loads and stores.  */
+
+
+
+/* For RPLACA and RPLACD, things are fairly simple: regardless of where we are  */
+/* in the function, we can do the store (even if it's already been done) and  */
+/* calculate whether or not we need to set the bit out-of-line.  (Actually  */
+/* setting the bit needs to be done atomically, unless we're sure that other  */
+/* threads are suspended.)  */
+/* We can unconditionally set the suspended thread's RIP to the return address.  */
+
+	
+_spentry(rplaca)
+        .globl C(egc_rplaca)
+C(egc_rplaca):
+        __(rcmpq(%arg_z,%arg_y))
+	__(_rplaca(%arg_y,%arg_z))
+        __(ja 1f)
+0:      __(repret)
+1:      __(movq %arg_y,%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(cmpq lisp_global(oldspace_dnode_count),%imm0)
+        __(jae 0b)
+        __(ref_global(refbits,%temp0))
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp0))
+        __(ret)
+_endsubp(rplaca)
+
+_spentry(rplacd)
+        .globl C(egc_rplacd)
+C(egc_rplacd):          
+        __(rcmpq(%arg_z,%arg_y))
+	__(_rplacd(%arg_y,%arg_z))
+        __(ja 1f)
+0:      __(repret)
+1:      __(movq %arg_y,%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(cmpq lisp_global(oldspace_dnode_count),%imm0)
+        __(jae 0b)
+        __(ref_global(refbits,%temp0))
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp0))
+        __(ret)
+_endsubp(rplacd)
+
+/* Storing into a gvector can be handled the same way as storing into a CONS.  */
+
+
+_spentry(gvset)
+        .globl C(egc_gvset)
+C(egc_gvset):
+        __(rcmpq(%arg_z,%arg_x))
+	__(movq %arg_z,misc_data_offset(%arg_x,%arg_y))
+        __(ja 1f)
+0:      __(repret)
+1:      __(lea misc_data_offset(%arg_x,%arg_y),%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(cmpq lisp_global(oldspace_dnode_count),%imm0)
+        __(jae 0b)
+        __(ref_global(refbits,%temp0))
+        __(xorb $63,%imm0_b)
+        __(lock) 
+        __(btsq %imm0,(%temp0))
+        __(ret)                
+_endsubp(gvset)
+
+/* This is a special case of storing into a gvector: if we need to  */
+/* memoize the store, record the address of the hash-table vector  */
+/* in the refmap, as well.  */
+        
+
+_spentry(set_hash_key)
+        .globl C(egc_set_hash_key)
+C(egc_set_hash_key):  
+        __(rcmpq(%arg_z,%arg_x))
+	__(movq %arg_z,misc_data_offset(%arg_x,%arg_y))
+        __(ja 1f)
+0:      __(repret)
+1:      __(lea misc_data_offset(%arg_x,%arg_y),%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(cmpq lisp_global(oldspace_dnode_count),%imm0)
+        __(jae 0b)
+        __(ref_global(refbits,%temp0))
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp0))
+        /* Now memoize the address of the hash vector   */
+        __(movq %arg_x,%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp0))
+        __(ret)                
+_endsubp(set_hash_key)
+
+/* This is a little trickier: if this is interrupted, we need to know  */
+/* whether or not the STORE-CONDITIONAL (cmpxchgq) has won or not.    */
+/* If we're interrupted   before the PC has reached the "success_test" label,   */
+/* repeat (luser the PC back to store_node_conditional_retry.)  If we're at that  */
+/* label with the Z flag set, we won and (may) need to memoize.  */
+
+_spentry(store_node_conditional)
+        .globl C(egc_store_node_conditional)
+C(egc_store_node_conditional):
+	__(unbox_fixnum(%temp0,%imm1))
+        .globl C(egc_store_node_conditional_retry)
+C(egc_store_node_conditional_retry):      
+0:	__(movq (%arg_x,%imm1),%temp1)
+	__(cmpq %arg_y,%temp1)
+	__(movq %temp1,%imm0)
+	__(jne 3f)
+	__(lock)
+        __(cmpxchgq %arg_z,(%arg_x,%imm1))
+        .globl C(egc_store_node_conditional_success_test)
+C(egc_store_node_conditional_success_test):
+	__(jne 0b)
+        __(lea (%arg_x,%imm1),%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(cmpq lisp_global(oldspace_dnode_count),%imm0)
+        __(ref_global(refbits,%temp1))
+        __(jae 2f)
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp1))
+	.globl C(egc_store_node_conditional_success_end)
+C(egc_store_node_conditional_success_end):
+2:      __(movl $t_value,%arg_z_l)
+	__(ret)
+3:	__(movl $nil_value,%arg_z_l)
+	__(ret)
+_endsubp(store_node_conditional)
+				
+	_spentry(set_hash_key_conditional)
+        .globl C(egc_set_hash_key_conditional)
+C(egc_set_hash_key_conditional):
+        .globl C(egc_set_hash_key_conditional_retry)
+C(egc_set_hash_key_conditional_retry):          
+	__(unbox_fixnum(%temp0,%imm1))
+0:	__(movq (%arg_x,%imm1),%temp1)
+	__(cmpq %arg_y,%temp1)
+	__(movq %temp1,%imm0)
+	__(jne 3f)
+	__(lock)
+        __(cmpxchgq %arg_z,(%arg_x,%imm1))
+        .globl C(egc_set_hash_key_conditional_success_test)
+C(egc_set_hash_key_conditional_success_test):
+	__(jne 0b)
+        __(lea (%arg_x,%imm1),%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(cmpq lisp_global(oldspace_dnode_count),%imm0)
+        __(ref_global(refbits,%temp1))
+        __(jae 2f)
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp1))
+        /* Now memoize the address of the hash vector   */
+        __(movq %arg_x,%imm0)
+        __(subq lisp_global(ref_base),%imm0)
+        __(shrq $dnode_shift,%imm0)
+        __(xorb $63,%imm0_b)
+        __(lock)
+        __(btsq %imm0,(%temp1))
+        .globl C(egc_write_barrier_end)
+C(egc_write_barrier_end):
+2:      __(movl $t_value,%arg_z_l)
+	__(ret)
+3:	__(movl $nil_value,%arg_z_l)
+	__(ret)
+_endsubp(set_hash_key_conditional)
+
+	
+
+
+_spentry(setqsym)
+	__(btq $sym_vbit_const,symbol.flags(%arg_y))
+	__(jae _SPspecset)
+	__(movq %arg_y,%arg_z)
+	__(movq $XCONST,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+_endsubp(setqsym)
+
+_spentry(progvsave)
+	/* Error if arg_z isn't a proper list.  That's unlikely,  */
+	/* but it's better to check now than to crash later.  */
+	
+	__(compare_reg_to_nil(%arg_z))
+	__(movq %arg_z,%arg_x)	/* fast   */
+	__(movq %arg_z,%temp1)	/* slow   */
+	__(je 9f)		/* Null list is proper   */
+0:
+	__(extract_lisptag(%arg_x,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 8f)
+	__(compare_reg_to_nil(%arg_x))
+	__(je 9f)
+	__(_cdr(%arg_x,%temp0))	/* (null (cdr fast)) ?   */
+	__(compare_reg_to_nil(%temp0))
+	__(je 9f)
+	__(extract_lisptag(%temp0,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 8f)
+	__(_cdr(%temp0,%arg_x))
+	__(_cdr(%temp1,%temp1))
+	__(cmpq %temp1,%arg_x)
+	__(jne 0b)
+
+8:	__(movq $XIMPROPERLIST,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+9:	/* Whew 	  */
+
+        /* Next, determine the length of arg_y.  We   */
+	/* know that it's a proper list.   */
+	__(movq $-fixnumone,%imm0)
+	__(movq %arg_y,%arg_x)
+1:	__(compare_reg_to_nil(%arg_x))
+	__(_cdr(%arg_x,%arg_x))
+	__(leaq fixnumone(%imm0),%imm0)
+	__(jne 1b)
+	
+	/* imm0 is now (boxed) triplet count.  */
+	/* Determine word count, add 1 (to align), and make room.  */
+	/*  if count is 0, make an empty tsp frame and exit   */
+	__(testq %imm0,%imm0)
+	__(jne 2f)
+	__(TSP_Alloc_Fixed(2*node_size,%imm0))
+	__(ret)
+2:	__(movq %imm0,%imm1)
+	__(add %imm1,%imm1)
+	__(add %imm0,%imm1)
+	__(dnode_align(%imm1,tsp_frame.fixed_overhead+node_size,%imm1))
+	__(TSP_Alloc_Var(%imm1,%temp0))
+	__(movq %imm0,(%temp0))
+	__(movq rcontext(tcr.db_link),%temp1)
+3:	__(movl $unbound_marker,%temp0_l)
+	__(compare_reg_to_nil(%arg_z))
+	__(cmovneq cons.car(%arg_z),%temp0)
+	__(cmovneq cons.cdr(%arg_z),%arg_z)
+	__(_car(%arg_y,%arg_x))
+	__(_cdr(%arg_y,%arg_y))
+	__(movq symbol.binding_index(%arg_x),%arg_x)
+	__(cmp rcontext(tcr.tlb_limit),%arg_x)
+	__(jb 4f)
+	__(push %arg_x)
+	__(tlb_too_small())
+4:	__(movq rcontext(tcr.tlb_pointer),%imm0)
+	__(subq $binding.size,%imm1)
+	__(compare_reg_to_nil(%arg_y))
+	__(movq %arg_x,binding.sym(%imm1))
+	__(push (%imm0,%arg_x))
+	__(pop binding.val(%imm1))
+	__(movq %temp0,(%imm0,%arg_x))
+	__(movq %temp1,binding.link(%imm1))
+	__(movq %imm1,%temp1)
+	__(jne 3b)
+	__(movq %temp1,rcontext(tcr.db_link))
+	__(ret)
+_endsubp(progvsave)
+
+/* Allocate node objects on the temp stack, immediate objects on the foreign  */
+/* stack. (The caller has to know which stack to discard a frame from.)  */
+/* %arg_y = boxed element-count, %arg_z = boxed subtype  */
+	
+_spentry(stack_misc_alloc)
+	__(movq $~(((1<<56)-1)<<fixnumshift),%temp0)
+	__(testq %temp0,%arg_y)
+	__(jne local_label(stack_misc_alloc_not_u56))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(movq %arg_y,%temp0)
+	__(shl $num_subtag_bits-fixnumshift,%temp0)
+	__(orq %temp0,%imm0)		/* %imm0 now = header   */
+	__(movb $fulltagmask,%imm1_b)
+	__(andb %imm0_b,%imm1_b)
+	__(cmpb $fulltag_nodeheader_0,%imm1_b)
+	__(je local_label(stack_misc_alloc_node))
+	__(cmpb $fulltag_nodeheader_1,%imm1_b)
+	__(je local_label(stack_misc_alloc_node))
+	__(cmpb $ivector_class_64_bit,%imm1_b)
+	__(jz local_label(stack_misc_alloc_64))
+	__(cmpb $ivector_class_32_bit,%imm1_b)
+	__(jz local_label(stack_misc_alloc_32))
+	__(unbox_fixnum(%arg_y,%imm1))
+	/* ivector_class_other_bit: 16, 8, or 1 ...   */
+	__(cmpb $subtag_bit_vector,%imm0_b)
+	__(jne local_label(stack_misc_alloc_8))
+	__(addq $7,%imm1)
+	__(shrq $3,%imm1)
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_8):	
+	__(cmpb $subtag_simple_base_string,%imm0_b)
+	__(jb local_label(stack_misc_alloc_16))
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_16):	
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(shlq %imm1)
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_32):
+	/* 32-bit ivector   */
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(shlq $2,%imm1)
+	__(jmp local_label(stack_misc_alloc_alloc_ivector))
+local_label(stack_misc_alloc_64):
+	/* 64-bit ivector 	  */
+	__(movq %arg_y,%imm1)
+local_label(stack_misc_alloc_alloc_ivector):	
+	__(dnode_align(%imm1,tsp_frame.fixed_overhead+node_size,%imm1))
+	__(cmpq $tstack_alloc_limit,%imm1)
+	__(ja local_label(stack_misc_alloc_heap_alloc_ivector))
+        __ifdef(`WINDOWS')
+         __(windows_cstack_probe(%imm1,%temp0))
+        __endif
+        __(movq rcontext(tcr.foreign_sp),%stack_temp) 
+	__(movd %stack_temp,%temp1)
+        __(subq %imm1,rcontext(tcr.foreign_sp))
+        __(movq rcontext(tcr.foreign_sp),%temp0)
+0:	__(movapd %fpzero,-dnode_size(%temp1))
+	__(subq $dnode_size,%temp1)
+	__(cmpq %temp1,%temp0)
+	__(jnz 0b)	
+	__(movq %stack_temp,(%temp0))
+        __(movq %rbp,csp_frame.save_rbp(%temp0))
+	__(movq %imm0,csp_frame.fixed_overhead(%temp0))
+	__(leaq csp_frame.fixed_overhead+fulltag_misc(%temp0),%arg_z)
+	__(ret)
+local_label(stack_misc_alloc_heap_alloc_ivector):
+        __(movq rcontext(tcr.foreign_sp),%imm1)
+        __(subq $dnode_size,rcontext(tcr.foreign_sp))
+        __(movq rcontext(tcr.foreign_sp),%imm0)
+	__(movq %imm1,(%imm0))
+	__(jmp _SPmisc_alloc)	
+local_label(stack_misc_alloc_node):
+	__(movq %arg_y,%imm1)
+	__(dnode_align(%imm1,tsp_frame.fixed_overhead+node_size,%imm1))
+	__(cmpq $tstack_alloc_limit,%imm1)
+	__(ja local_label(stack_misc_alloc_heap_alloc_gvector))
+	__(TSP_Alloc_Var(%imm1,%temp0))
+	__(movq %imm0,(%temp0))
+	__(leaq fulltag_misc(%temp0),%arg_z)
+	__(ret)
+local_label(stack_misc_alloc_heap_alloc_gvector):	
+	__(TSP_Alloc_Fixed(0,%imm0))
+	__(jmp _SPmisc_alloc)	
+		
+local_label(stack_misc_alloc_not_u56):				
+	__(uuo_error_reg_not_type(Rarg_y,error_object_not_unsigned_byte_56))	
+_endsubp(stack_misc_alloc)
+
+/* subtype (boxed, of course) is pushed, followed by nargs bytes worth of   */
+/* initial-contents.  Note that this can be used to cons any type of initialized   */
+/* node-header'ed misc object (symbols, closures, ...) as well as vector-like   */
+/* objects.   */
+_spentry(gvector)
+        __(subl $node_size,%nargs)
+	__(movq (%rsp,%nargs_q),%imm0)	/* boxed subtype   */
+	__(sarq $fixnumshift,%imm0)
+	__(movq %nargs_q,%imm1)
+	__(shlq $num_subtag_bits-word_shift,%imm1)
+	__(orq %imm1,%imm0)
+	__(dnode_align(%nargs_q,node_size,%imm1))
+	__(Misc_Alloc(%arg_z))
+	__(movq %nargs_q,%imm1)
+	__(jmp 2f)
+1:	__(movq %temp0,misc_data_offset(%arg_z,%imm1))
+2:	__(subq $node_size,%imm1)
+	__(pop %temp0)	/* Note the intentional fencepost:  */
+			/* discard the subtype as well.  */
+	__(jge 1b)
+	__(jmp *%ra0)
+_endsubp(gvector)
+
+_spentry(mvpass)
+	__(hlt)
+_endsubp(mvpass)
+
+
+
+_spentry(nthvalue)
+	__(hlt)
+_endsubp(nthvalue)
+
+_spentry(values)
+        __(movq (%temp0),%ra0)
+	__(ref_global(ret1val_addr,%imm1))
+	__(cmpq %imm1,%ra0)
+	__(movl $nil_value,%arg_z_l)
+	__(je 0f)
+	__(testl %nargs,%nargs)
+	__(cmovneq -node_size(%rsp,%nargs_q),%arg_z)
+	__(movq %temp0,%rsp)
+	__(ret)
+0:	__(movq 8(%temp0),%ra0)
+        __(addq $2*node_size,%temp0)
+	__(lea (%rsp,%nargs_q),%imm0)
+	__(jmp 2f)
+1:	__(subq $node_size,%imm0)
+	__(movq (%imm0),%temp1)
+	__(subq $node_size,%temp0)
+	__(movq %temp1,(%temp0))
+2:	__(cmpq %imm0,%rsp)
+	__(jne 1b)
+	__(movq %temp0,%rsp)
+	__(jmp *%ra0)	
+_endsubp(values)
+
+_spentry(default_optional_args)
+	__(hlt)
+_endsubp(default_optional_args)
+
+_spentry(opt_supplied_p)
+	__(hlt)
+_endsubp(opt_supplied_p)
+
+_spentry(lexpr_entry)
+	__(hlt)
+_endsubp(lexpr_entry)
+	
+_spentry(heap_rest_arg)
+	__(push_argregs())
+        __(movq %next_method_context,%arg_y)
+	__(movl %nargs,%imm1_l)
+	__(testl %imm1_l,%imm1_l)
+	__(movl $nil_value,%arg_z_l)
+	__(jmp 2f)
+	.p2align 4
+1:	__(pop %temp1)
+	__(Cons(%temp1,%arg_z,%arg_z))
+	__(subl $node_size,%imm1_l)
+2:	__(jg 1b)
+	__(push %arg_z)
+        __(movq %arg_y,%next_method_context)
+	__(jmp *%ra0)		
+_endsubp(heap_rest_arg)
+
+/* %imm0 contains the number of fixed args ; make an &rest arg out of the others   */
+_spentry(req_heap_rest_arg)
+	__(push_argregs())
+        __(movq %next_method_context,%arg_y)
+	__(movl %nargs,%imm1_l)
+	__(subl %imm0_l,%imm1_l)
+	__(movl $nil_value,%arg_z_l)
+	__(jmp 2f)
+	.p2align 4
+1:	__(pop %temp1)
+	__(Cons(%temp1,%arg_z,%arg_z))
+	__(subl $node_size,%imm1_l)
+2:	__(jg 1b)
+	__(push %arg_z)
+        __(movq %arg_y,%next_method_context)
+	__(jmp *%ra0)		
+_endsubp(req_heap_rest_arg)
+
+/* %imm0 bytes of stuff has already been pushed	  */
+/* make an &rest arg out of any others   */
+_spentry(heap_cons_rest_arg)
+	__(movl %nargs,%imm1_l)
+	__(subl %imm0_l,%imm1_l)
+        __(movq %next_method_context,%arg_y)
+	__(movl $nil_value,%arg_z_l)
+	__(jmp 2f)
+	.p2align 4
+1:	__(pop %temp1)
+	__(Cons(%temp1,%arg_z,%arg_z))
+	__(subl $node_size,%imm1_l)
+2:	__(jg 1b)
+	__(push %arg_z)
+        __(movq %arg_y,%next_method_context)
+	__(jmp *%ra0)		
+_endsubp(heap_cons_rest_arg)
+
+_spentry(simple_keywords)
+	__(xorl %imm0_l,%imm0_l)
+	__(push_argregs())
+	__(jmp _SPkeyword_bind)
+_endsubp(simple_keywords)
+
+_spentry(keyword_args)
+	__(push_argregs())
+	__(jmp _SPkeyword_bind)
+_endsubp(keyword_args)
+
+/* There are %nargs words of arguments on the stack; %imm0 contains the number  */
+/* of non-keyword args pushed.  It's possible that we never actually got  */
+/* any keyword args, which would make things much simpler.   */
+
+/* On entry, temp1 contains a fixnum with bits indicating whether   */
+/* &allow-other-keys and/or &rest was present in the lambda list.  */
+/* Once we get here, we can use the arg registers.  */
+
+define(`keyword_flags_aok_bit',`fixnumshift')
+define(`keyword_flags_unknown_keys_bit',`fixnumshift+1')
+define(`keyword_flags_rest_bit',`fixnumshift+2')
+define(`keyword_flags_seen_aok_bit',`fixnumshift+3')        
+	
+_spentry(keyword_bind)
+	__(movl %nargs,%imm1_l)
+	__(subq %imm0,%imm1)
+	__(jbe local_label(no_keyword_values))
+	__(btq $word_shift,%imm1)
+	__(jnc local_label(even))
+	__(movl $nil_value,%arg_z_l)
+	__(movq %imm1,%nargs_q)
+	__(testl %nargs,%nargs)
+	__(jmp 1f)
+0:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%nargs)
+1:	__(jnz 0b)
+	__(movl $XBADKEYS,%arg_y_l)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+	/* Now that we're sure that we have an even number of keywords and values  */
+	/* (in %imm1), copy all pairs to the temp stack   */
+local_label(even):
+	/* Get the keyword vector into arg_x, and its length into arg_y.  */
+	__(movl function_data_offset(%fn),%imm0_l)
+	__(movq function_data_offset(%fn,%imm0,node_size),%arg_x)
+	__(vector_length(%arg_x,%arg_y))
+        __(testq %arg_y,%arg_y)
+        __(jne 1f)
+        __(btq $keyword_flags_aok_bit,%temp1)
+        __(jnc 1f)
+
+        __(btq $keyword_flags_rest_bit,%temp1)
+        __(jc 0f)
+        __(addq %imm1,%rsp)
+0:      
+        __(jmp *%ra0)
+1:      
+       	__(lea tsp_frame.fixed_overhead(%imm1),%arg_z)
+	__(TSP_Alloc_Var(%arg_z,%imm0))
+2:	__(subq $node_size,%arg_z)
+	__(pop (%arg_z))
+	__(cmpq %arg_z,%imm0)
+	__(jne 2b)
+	/* Push arg_y pairs of NILs.   */
+	__(movq %arg_y,%imm0)
+	__(jmp 4f)
+3:	__(push $nil_value)
+	__(push $nil_value)
+4:	__(subq $fixnumone,%arg_y)
+	__(jge 3b)
+	/* Push the %saveN registers, so that we can use them in this loop   */
+	/* Also, borrow %arg_y for a bit */
+	__(push %arg_y)
+	__(push %save2)
+	__(push %save1)
+	__(push %save0)
+	__(leaq 4*node_size(%rsp,%imm0,2),%save0)
+	/* %save0 points to the 0th value/supplied-p pair   */
+	__(leaq (%arg_z,%imm1),%save1)
+	/* %save1 is the end of the provided keyword/value pairs (the old %tsp).   */
+	__(movq %imm0,%save2)
+	/* %save2 is the length of the keyword vector   */
+5:	__(movq (%arg_z),%arg_y)	/* %arg_y is current keyword   */
+	__(xorl %imm0_l,%imm0_l)
+        __(cmpq $nrs.kallowotherkeys,%arg_y)
+        __(jne local_label(next_keyvect_entry))
+        __(btsq $keyword_flags_seen_aok_bit,%temp1)
+        __(jc local_label(next_keyvect_entry))
+        __(cmpb $fulltag_nil,node_size(%arg_z))
+	__(je local_label(next_keyvect_entry))
+	__(btsq $keyword_flags_aok_bit,%temp1)
+	__(jmp local_label(next_keyvect_entry))
+6:	__(cmpq misc_data_offset(%arg_x,%imm0),%arg_y)
+	__(jne 7f)
+	/* Got a match; have we already seen this keyword ?   */
+	__(negq %imm0)
+	__(cmpb $fulltag_nil,-node_size*2(%save0,%imm0,2))
+	__(jne 9f)	/* already seen keyword, ignore this value   */
+	__(movq node_size(%arg_z),%arg_y)
+	__(movq %arg_y,-node_size(%save0,%imm0,2))
+	__(movl $t_value,-node_size*2(%save0,%imm0,2))
+	__(jmp 9f)
+7:	__(addq $node_size,%imm0)
+local_label(next_keyvect_entry):	
+	__(cmpq %imm0,%save2)
+	__(jne 6b)
+	/* Didn't match anything in the keyword vector. Is the keyword  */
+	/* :allow-other-keys ?   */
+	__(cmpq $nrs.kallowotherkeys,%arg_y)
+	__(je 9f)               /* :allow-other-keys is never "unknown" */
+8:	__(btsq $keyword_flags_unknown_keys_bit,%temp1)
+9:	__(addq $dnode_size,%arg_z)
+	__(cmpq %arg_z,%save1)
+	__(jne 5b)
+	__(pop %save0)
+	__(pop %save1)
+	__(pop %save2)
+	__(pop %arg_y)
+	/* If the function takes an &rest arg, or if we got an unrecognized  */
+	/* keyword and don't allow that, copy the incoming keyword/value  */
+	/* pairs from the temp stack back to the value stack   */
+	__(btq $keyword_flags_rest_bit,%temp1)
+	__(jc 1f)
+	__(btq $keyword_flags_unknown_keys_bit,%temp1)
+	__(jnc 0f)
+	__(btq $keyword_flags_aok_bit,%temp1)
+	__(jnc 1f)
+	/* pop the temp frame   */
+0:	__(discard_temp_frame(%imm1))
+	__(jmp *%ra0)
+	/* Copy the keyword/value pairs from the tsp back to sp, either because  */
+	/* the function takes an &rest arg or because we need to signal an  */
+	/* "unknown keywords" error   */
+1:	__(movq rcontext(tcr.save_tsp),%arg_z)
+	__(mov (%arg_z),%arg_y)
+	__(jmp 3f)
+2:	__(push (%arg_z))
+	__(push node_size(%arg_z))
+3:	__(addq $dnode_size,%arg_z)
+	__(cmpq %arg_z,%arg_y)
+	__(jne 2b)
+	__(discard_temp_frame(%imm0))
+	__(btq $keyword_flags_unknown_keys_bit,%temp1)
+	__(jnc 9f)
+	__(btq $keyword_flags_aok_bit,%temp1)
+	__(jc 9f)
+	/* Signal an "unknown keywords" error   */
+	__(movq %imm1,%nargs_q)
+	__(testl %nargs,%nargs)
+        __(movl $nil_value,%arg_z_l)
+	__(jmp 5f)
+4:	__(pop %arg_y)
+	__(Cons(%arg_y,%arg_z,%arg_z))
+	__(subl $node_size,%nargs)
+5:	__(jnz 4b)
+	__(movl $XBADKEYS,%arg_y_l)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+9:	__(jmp *%ra0)
+	
+/* No keyword values were provided.  Access the keyword vector (which is the 0th  */
+/*  constant in %fn), determine its length N, and push N	pairs of NILs.   */
+/* N could be 0 ...  */
+	
+local_label(no_keyword_values):		
+	__(movl function_data_offset(%fn),%imm0_l)
+	__(movq function_data_offset(%fn,%imm0,node_size),%arg_x)
+	__(movl $nil_value,%arg_z_l)
+	__(vector_length(%arg_x,%arg_y))
+	__(jmp 1f)
+0:	__(push %arg_z)
+	__(push %arg_z)
+1:	__(subq $fixnumone,%arg_y)
+	__(jge 0b)
+	__(jmp *%ra0)		
+_endsubp(keyword_bind)
+
+
+
+_spentry(ksignalerr)
+	__(movq $nrs.errdisp,%fname)
+	__(jump_fname)	
+_endsubp(ksignalerr)
+
+_spentry(stack_rest_arg)
+	__(xorl %imm0_l,%imm0_l)
+	__(push_argregs())
+	__(jmp _SPstack_cons_rest_arg)
+_endsubp(stack_rest_arg)
+
+_spentry(req_stack_rest_arg)
+	__(push_argregs())
+	__(jmp _SPstack_cons_rest_arg)
+_endsubp(req_stack_rest_arg)
+
+_spentry(stack_cons_rest_arg)
+	__(movl %nargs,%imm1_l)
+	__(subl %imm0_l,%imm1_l)
+	__(movl $nil_value,%arg_z_l)
+	__(jle 2f)	/* empty list ; make an empty TSP frame   */
+	__(addq %imm1,%imm1)
+	__(cmpq $(tstack_alloc_limit-dnode_size),%imm1)
+	__(ja 3f)	/* make empty frame, then heap-cons   */
+	__(dnode_align(%imm1,tsp_frame.fixed_overhead,%imm0))
+	__(TSP_Alloc_Var(%imm0,%temp1))
+	__(addq $fulltag_cons,%temp1)
+1:	__(pop %arg_x)
+	__(_rplacd(%temp1,%arg_z))
+	__(_rplaca(%temp1,%arg_x))
+	__(movq %temp1,%arg_z)
+	__(addq $cons.size,%temp1)
+	__(subq $dnode_size,%imm1)
+	__(jne 1b)
+	__(push %arg_z)
+	__(jmp *%ra0)
+	
+/* Length 0, make empty frame  */
+	
+2:
+	__(TSP_Alloc_Fixed(0,%temp1))
+	__(push %arg_z)
+	__(jmp *%ra0)
+	
+/* Too big to stack-cons, but make an empty frame before heap-consing  */
+	
+3:		
+	__(TSP_Alloc_Fixed(0,%temp1))
+	__(jmp _SPheap_cons_rest_arg)
+_endsubp(stack_cons_rest_arg)
+
+
+
+_spentry(getxlong)
+_endsubp(getxlong)
+
+/* Have to be a little careful here: the caller may or may not have pushed  */
+/*   an empty frame, and we may or may not have needed one.  We can't easily  */
+/*   tell whether or not a frame will be needed (if the caller didn't reserve  */
+/*   a frame, whether or not we need one depends on the length of the list  */
+/*   in arg_z.  So, if the caller didn't push a frame, we do so ; once everything's  */
+/*   been spread, we discard the reserved frame (regardless of who pushed it)  */
+/*   if all args fit in registers.   */
+_spentry(spreadargz)
+	__(testl %nargs,%nargs)
+	__(jne 0f)
+	__(push $reserved_frame_marker)
+	__(push $reserved_frame_marker)
+0:	__(movq %arg_z,%arg_y)	/* save in case of error   */
+	__(xorl %imm0_l,%imm0_l)
+	__(compare_reg_to_nil(%arg_z))
+	__(je 2f)
+1:	__(extract_fulltag(%arg_z,%imm1))
+	__(cmpb $fulltag_cons,%imm1_b)
+	__(jne 9f)
+	__(addw $node_size,%imm0_w)
+        __(_car(%arg_z,%arg_x))
+	__(_cdr(%arg_z,%arg_z))
+        __(js 8f)
+	__(compare_reg_to_nil(%arg_z))
+	__(push %arg_x)
+	__(jne 1b)
+2:	__(addw %imm0_w,%nargs_w)
+	__(jne 4f)
+3:	__(addq $2*node_size,%rsp)
+	__(jmp *%ra0)
+4:	__(cmpl $1*node_size,%nargs)
+	__(pop %arg_z)
+	__(je 3b)
+	__(cmpl $2*node_size,%nargs)
+	__(pop %arg_y)
+	__(je 3b)
+	__(cmpl $3*node_size,%nargs)
+	__(pop %arg_x)
+	__(je 3b)
+	__(jmp *%ra0)
+/* Discard everything that's been pushed already, complain   */
+
+8:     	__(lea (%rsp,%imm0),%rsp)
+	__(movq %arg_y,%arg_z)	/* recover original   */
+	__(movq $XTMINPS,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+/* Discard everything that's been pushed already, complain   */
+9:	__(lea (%rsp,%imm0),%rsp)
+	__(movq %arg_y,%arg_z)	/* recover original   */
+	__(movq $XNOSPREAD,%arg_y)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endsubp(spreadargz)
+
+/* Caller built it's own frame when it was entered.  If all outgoing args  */
+/* are in registers, we can discard that frame; otherwise, we copy outgoing  */
+/* relative to it and restore %rbp/%ra0   */
+_spentry(tfuncallgen)
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe 9f)
+	__(lea -nargregs*node_size(%rsp,%nargs_q),%imm0)
+	__(xorl %imm1_l,%imm1_l)
+	/* We can use %ra0 as a temporary here, since the real return address  */
+	/* is on the stack   */
+0:	__(movq -node_size(%imm0),%ra0)
+	__(movq %ra0,-node_size(%rbp,%imm1))
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(cmpq %imm0,%rsp)
+	__(jne 0b)
+	__(lea (%rbp,%imm1),%rsp)
+	__(movq 8(%rbp),%ra0)
+	__(movq (%rbp),%rbp)
+        __(pushq %ra0)
+	__(do_funcall())
+        /* All args in regs; exactly the same as the tfuncallvsp case   */
+9:		
+	__(leave)
+	__(do_funcall())
+_endsubp(tfuncallgen)
+
+/* Some args were pushed; move them down in the frame   */
+_spentry(tfuncallslide)
+	__(lea -nargregs*node_size(%rsp,%nargs_q),%imm0)
+	__(xorl %imm1_l,%imm1_l)
+	/* We can use %ra0 as a temporary here, since the real return address  */
+	/* is on the stack   */
+0:	__(movq -node_size(%imm0),%ra0)
+	__(movq %ra0,-node_size(%rbp,%imm1))
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(cmpq %imm0,%rsp)
+	__(jne 0b)
+	__(lea (%rbp,%imm1),%rsp)
+	__(movq 8(%rbp),%ra0)
+	__(movq (%rbp),%rbp)
+        __(push %ra0)
+	__(do_funcall())	
+_endsubp(tfuncallslide)
+
+/* No args were pushed; recover saved context & do funcall 	  */
+_spentry(tfuncallvsp)
+	__(leave)
+	__(do_funcall())
+_endsubp(tfuncallvsp)
+
+_spentry(tcallsymgen)
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe 9f)
+	__(lea -nargregs*node_size(%rsp,%nargs_q),%imm0)
+	__(xorl %imm1_l,%imm1_l)
+	/* We can use %ra0 as a temporary here, since the real return address  */
+	/* is on the stack   */
+0:	__(movq -node_size(%imm0),%ra0)
+	__(movq %ra0,-node_size(%rbp,%imm1))
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(cmpq %imm0,%rsp)
+	__(jne 0b)
+	__(lea (%rbp,%imm1),%rsp)
+	__(movq 8(%rbp),%ra0)
+	__(movq (%rbp),%rbp)
+        __(pushq %ra0)
+	__(jump_fname())
+/* All args in regs; exactly the same as the tcallsymvsp case   */
+9:		
+	__(leave)
+	__(jump_fname())
+_endsubp(tcallsymgen)
+
+_spentry(tcallsymslide)
+	__(lea -nargregs*node_size(%rsp,%nargs_q),%imm0)
+	__(xorl %imm1_l,%imm1_l)
+	/* We can use %ra0 as a temporary here, since the real return address  */
+	/* is on the stack   */
+0:	__(movq -node_size(%imm0),%ra0)
+	__(movq %ra0,-node_size(%rbp,%imm1))
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(cmpq %imm0,%rsp)
+	__(jne 0b)
+	__(lea (%rbp,%imm1),%rsp)
+	__(movq 8(%rbp),%ra0)
+	__(movq 0(%rbp),%rbp)
+        __(pushq %ra0)
+	__(jump_fname())
+_endsubp(tcallsymslide)
+
+_spentry(tcallsymvsp)
+	__(leave)
+	__(jump_fname())
+_endsubp(tcallsymvsp)
+
+_spentry(tcallnfngen)
+	__(cmpl $nargregs*node_size,%nargs)
+	__(jbe 9f)
+	__(lea -nargregs*node_size(%rsp,%nargs_q),%imm0)
+	__(xorl %imm1_l,%imm1_l)
+	/* We can use %ra0 as a temporary here, since the real return address  */
+	/* is on the stack   */
+0:	__(movq -node_size(%imm0),%ra0)
+	__(movq %ra0,-node_size(%rbp,%imm1))
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(cmpq %imm0,%rsp)
+	__(jne 0b)
+	__(movq %temp0,%fn)
+	__(lea (%rbp,%imm1),%rsp)
+	__(movq lisp_frame.savera0(%rbp),%ra0)
+	__(movq lisp_frame.backlink(%rbp),%rbp)
+        __(pushq %ra0)
+	__(jmp *%fn)
+/* All args in regs; exactly the same as the tcallnfnvsp case   */
+9:		
+	__(movq %temp0,%fn)
+	__(leave)
+	__(jmp *%fn)
+_endsubp(tcallnfngen)
+
+_spentry(tcallnfnslide)
+	__(lea -nargregs*node_size(%rsp,%nargs_q),%imm0)
+	__(xorl %imm1_l,%imm1_l)
+	/* We can use %ra0 as a temporary here, since the real return address  */
+	/* is on the stack   */
+0:	__(movq -node_size(%imm0),%ra0)
+	__(movq %ra0,-node_size(%rbp,%imm1))
+	__(subq $node_size,%imm0)
+	__(subq $node_size,%imm1)
+	__(cmpq %imm0,%rsp)
+	__(jne 0b)
+	__(movq %temp0,%fn)
+	__(lea (%rbp,%imm1),%rsp)
+	__(movq lisp_frame.savera0(%rbp),%ra0)
+	__(movq lisp_frame.backlink(%rbp),%rbp)
+        __(pushq %ra0)
+	__(jmp *%fn)
+_endsubp(tcallnfnslide)
+
+_spentry(tcallnfnvsp)
+	__(movq %temp0,%fn)
+	__(leave)
+	__(jmp *%fn)
+_endsubp(tcallnfnvsp)
+
+
+/* Make a "raw" area on the foreign stack, stack-cons a macptr to point to it,   */
+/*   and return the macptr.  Size (in bytes, boxed) is in arg_z on entry; macptr  */
+/*   in arg_z on exit.   */
+_spentry(makestackblock)
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+macptr.size,%imm0))
+	__(cmpq $tstack_alloc_limit,%imm0)
+	__(jae 1f)
+        __ifdef(`WINDOWS')
+         __(windows_cstack_probe(%imm0,%arg_z))
+        __endif
+        __(movq rcontext(tcr.foreign_sp),%imm1)
+        __(subq %imm0,rcontext(tcr.foreign_sp))
+        __(movq rcontext(tcr.foreign_sp),%arg_z)
+	__(movq %imm1,(%arg_z))
+        __(movq %rbp,csp_frame.save_rbp(%arg_z))
+	__(lea macptr.size+tsp_frame.fixed_overhead(%arg_z),%imm0)
+	__(movq $macptr_header,tsp_frame.fixed_overhead(%arg_z))
+	__(addq $fulltag_misc+tsp_frame.fixed_overhead,%arg_z)
+	__(movq %imm0,macptr.address(%arg_z))
+	__(movsd %fpzero,macptr.domain(%arg_z))
+	__(movsd %fpzero,macptr.type(%arg_z))
+	__(ret)
+1:	__(movq rcontext(tcr.foreign_sp),%imm1)
+        __(subq $dnode_size,rcontext(tcr.foreign_sp))
+        __(movq rcontext(tcr.foreign_sp),%imm0)
+	__(movq %imm1,(%imm0))
+        __(movq %rbp,csp_frame.save_rbp(%imm0))
+	__(set_nargs(1))
+	__(movq $nrs.new_gcable_ptr,%fname)
+	__(jump_fname())
+_endsubp(makestackblock)
+
+_spentry(makestackblock0)
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(dnode_align(%imm0,tsp_frame.fixed_overhead+macptr.size,%imm0))
+	__(cmpq $tstack_alloc_limit,%imm0)
+	__(jae 9f)
+        __ifdef(`WINDOWS')
+         __(windows_cstack_probe(%imm0,%arg_z))
+        __endif        
+        __(movq rcontext(tcr.foreign_sp),%imm1)
+        __(subq %imm0,rcontext(tcr.foreign_sp))
+        __(movq rcontext(tcr.foreign_sp),%arg_z)
+	__(movq %imm1,(%arg_z))
+        __(movq %rbp,csp_frame.save_rbp(%arg_z))
+	__(lea macptr.size+tsp_frame.fixed_overhead(%arg_z),%imm0)
+	__(movq $macptr_header,tsp_frame.fixed_overhead(%arg_z))
+	__(addq $fulltag_misc+tsp_frame.fixed_overhead,%arg_z)
+	__(movq %imm0,macptr.address(%arg_z))
+	__(movsd %fpzero,macptr.domain(%arg_z))
+	__(movsd %fpzero,macptr.type(%arg_z))
+	__(jmp 2f)
+1:	__(movapd %fpzero,(%imm0))
+	__(addq $dnode_size,%imm0)
+2:	__(cmpq %imm0,%imm1)
+	__(jne 1b)		
+	__(repret)
+9:	__(movq rcontext(tcr.foreign_sp),%imm1)
+        __(subq $dnode_size,rcontext(tcr.foreign_sp))
+        __(movq rcontext(tcr.foreign_sp),%imm0)
+	__(movq %imm1,(%imm0))
+        __(movq %rbp,csp_frame.save_rbp(%imm0))
+	__(set_nargs(1))
+	__(movq $nrs.new_gcable_ptr,%fname)
+	__(jump_fname())
+_endsubp(makestackblock0)
+
+_spentry(makestacklist)
+        __(movq $((1<<63)|fixnummask),%imm0)
+        __(testq %imm0,%arg_y)
+        __(jne 9f)
+	__(movq %arg_y,%imm0)
+	__(addq %imm0,%imm0)
+	__(rcmpq(%imm0,$tstack_alloc_limit))
+	__(movl $nil_value,%temp1_l) 
+	__(jae 2f)
+	__(addq $tsp_frame.fixed_overhead,%imm0)
+	__(TSP_Alloc_Var(%imm0,%temp0))
+	__(addq $fulltag_cons,%temp0)
+	__(jmp 1f)
+0:	__(_rplaca(%temp0,%arg_z))
+	__(_rplacd(%temp0,%temp1))
+	__(movq %temp0,%temp1)
+	__(addq $cons.size,%temp0)
+1:	__(subq $fixnumone,%arg_y)
+	__(jge 0b)
+	__(movq %temp1,%arg_z)
+	__(ret)
+2:	__(TSP_Alloc_Fixed(0,%imm0))
+	__(jmp 4f)
+3:	__(Cons(%arg_z,%temp1,%temp1))
+4:	__(subq $fixnumone,%arg_y)				
+	__(jge 3b)
+	__(movq %temp1,%arg_z)
+	__(ret)
+9:      __(uuo_error_reg_not_type(Rarg_y,error_object_not_unsigned_byte))
+_endsubp(makestacklist)
+
+/* subtype (boxed) vpushed before initial values. (Had better be a   */
+/* node header subtag.) Nargs set to count of things vpushed. 	  */
+_spentry(stkgvector)
+	__(lea -fixnum_one(%nargs_q),%imm0)
+	__(lea (%rsp,%imm0),%arg_x)
+	__(movq %imm0,%arg_y)
+	__(shlq $num_subtag_bits-fixnumshift,%imm0)
+	__(movq (%arg_x), %imm1)
+	__(shrq $fixnumshift,%imm1)
+	__(orq %imm1,%imm0)	/* imm0 = header, %arg_y = unaligned size   */
+	__(dnode_align(%arg_y,(tsp_frame.fixed_overhead+node_size),%imm1))
+	__(TSP_Alloc_Var(%imm1,%arg_z))
+	__(movq %imm0,(%arg_z))
+	__(addq $fulltag_misc,%arg_z)
+	__(lea -node_size(%nargs_q),%imm0)
+	__(jmp 2f)
+1:	__(pop misc_data_offset(%arg_z,%imm0))
+2:	__(subq $node_size,%imm0)
+	__(jge 1b)
+	__(addq $node_size,%rsp)
+	__(jmp *%ra0)	
+_endsubp(stkgvector)
+
+_spentry(misc_alloc)
+	__(movq $~(((1<<56)-1)<<fixnumshift),%imm0)
+	__(testq %imm0,%arg_y)
+	__(jne local_label(misc_alloc_not_u56))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(movq %arg_y,%temp0)
+	__(shl $num_subtag_bits-fixnumshift,%temp0)
+	__(orq %temp0,%imm0)		/* %imm0 now = header   */
+	__(movb $fulltagmask,%imm1_b)
+	__(andb %imm0_b,%imm1_b)
+	__(cmpb $fulltag_nodeheader_0,%imm1_b)
+	__(je local_label(misc_alloc_64))
+	__(cmpb $fulltag_nodeheader_1,%imm1_b)
+	__(je local_label(misc_alloc_64))
+	__(cmpb $ivector_class_64_bit,%imm1_b)
+	__(jz local_label(misc_alloc_64))
+	__(cmpb $ivector_class_32_bit,%imm1_b)
+	__(jz local_label(misc_alloc_32))
+	__(unbox_fixnum(%arg_y,%imm1))
+	/* ivector_class_other_bit: 16, 8, or 1 ...   */
+	__(cmpb $subtag_bit_vector,%imm0_b)
+	__(jne local_label(misc_alloc_8))
+	__(addq $7,%imm1)
+	__(shrq $3,%imm1)
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_8):	
+	__(cmpb $subtag_simple_base_string,%imm0_b)
+	__(jae local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_16):	
+	__(shlq %imm1)
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_32):
+	/* 32-bit ivector   */
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(shlq $2,%imm1)
+	__(jmp local_label(misc_alloc_alloc_vector))
+local_label(misc_alloc_64):
+	/* 64-bit ivector or gvector 	  */
+	__(movq %arg_y,%imm1)
+local_label(misc_alloc_alloc_vector):	
+	__(dnode_align(%imm1,node_size,%imm1))
+	__(Misc_Alloc(%arg_z))
+	__(ret)
+local_label(misc_alloc_not_u56):
+	__(uuo_error_reg_not_type(Rarg_y,error_object_not_unsigned_byte_56))
+_endsubp(misc_alloc)
+
+
+_startfn(C(destbind1))
+	/* Save entry %rsp in case of error   */
+	__(movd %rsp,%mm0)
+	/* Extract required arg count.   */
+	__(movzbl %nargs_b,%imm0_l)
+        __(testl %imm0_l,%imm0_l)
+	__(je local_label(opt))		/* skip if no required args   */
+local_label(req_loop):	
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(toofew))
+	__(extract_lisptag(%arg_reg,%imm1))
+	__(cmpb $tag_list,%imm1_b)
+	__(jne local_label(badlist))
+	__(subb $1,%imm0_b)
+	__(pushq cons.car(%arg_reg))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jne local_label(req_loop))
+local_label(opt):	
+	__(movw %nargs_w,%imm0_w)
+	__(shrw $8,%imm0_w)
+	__(je local_label(rest_keys))
+	__(btl $initopt_bit,%nargs)
+	__(jc local_label(opt_supp))
+	/* 'simple' &optionals:	 no supplied-p, default to nil.   */
+local_label(simple_opt_loop):
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(default_simple_opt))
+	__(extract_lisptag(%arg_reg,%imm1))
+	__(cmpb $tag_list,%imm1_b)
+	__(jne local_label(badlist))
+	__(subb $1,%imm0_b)
+	__(pushq cons.car(%arg_reg))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jne local_label(simple_opt_loop))
+	__(jmp local_label(rest_keys))
+local_label(default_simple_opt):
+	__(subb $1,%imm0_b)
+	__(pushq $nil_value)
+	__(jne local_label(default_simple_opt))
+	__(jmp local_label(rest_keys))
+local_label(opt_supp):
+	__(extract_lisptag(%arg_reg,%imm1))
+	__(compare_reg_to_nil(%arg_z))
+	__(je local_label(default_hard_opt))
+	__(cmpb $tag_list,%imm1_b)
+	__(jne local_label(badlist))
+	__(subb $1,%imm0_b)
+	__(pushq cons.car(%arg_reg))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(push $t_value)
+	__(jne local_label(opt_supp))
+	__(jmp local_label(rest_keys))
+local_label(default_hard_opt):
+	__(subb $1,%imm0_b)
+	__(push $nil_value)
+	__(push $nil_value)
+	__(jne local_label(default_hard_opt))	
+local_label(rest_keys):	
+	__(btl $restp_bit,%nargs)
+	__(jc local_label(have_rest))
+	__(btl $keyp_bit,%nargs)
+	__(jc local_label(have_keys))
+	__(compare_reg_to_nil(%arg_reg))
+	__(jne local_label(toomany))
+	__(jmp *%ra0)
+local_label(have_rest):
+	__(pushq %arg_reg)
+	__(btl $keyp_bit,%nargs)
+	__(jc local_label(have_keys))
+	__(jmp *%ra0)		
+	/* Ensure that arg_reg contains a proper,even-length list.  */
+	/* Insist that its length is <= 512 (as a cheap circularity check.)   */
+local_label(have_keys):
+	__(movw $256,%imm0_w)
+	__(movq %arg_reg,%arg_y)
+local_label(count_keys_loop):	
+	__(compare_reg_to_nil(%arg_y))
+	__(je local_label(counted_keys))
+	__(subw $1,%imm0_w)
+	__(jl local_label(toomany))
+	__(extract_lisptag(%arg_y,%imm1))
+	__(cmpb $tag_list,%imm1_b)
+	__(jne local_label(badlist))
+	__(_cdr(%arg_y,%arg_y))
+	__(extract_fulltag(%arg_y,%imm1))
+	__(cmpb $fulltag_cons,%imm1_b)
+	__(jne local_label(badlist))
+	__(_cdr(%arg_y,%arg_y))
+	__(jmp local_label(count_keys_loop))
+local_label(counted_keys):		
+	/* We've got a proper, even-length list of key/value pairs in  */
+	/* arg_reg. For each keyword var in the lambda-list, push a pair  */
+	/* of NILs on the vstack.   */
+	
+	__(movl %nargs,%imm1_l)
+	__(shrl $16,%imm1_l)
+	__(movzbl %imm1_b,%imm0_l)
+	__(movq %rsp,%arg_y)
+	__(jmp local_label(push_pair_test))	
+local_label(push_pair_loop):
+	__(push $nil_value)
+	__(push $nil_value)
+local_label(push_pair_test):	
+	__(subb $1,%imm1_b)
+	__(jge local_label(push_pair_loop))
+	/* Push the %saveN registers, so that we can use them in this loop   */
+	/* Also, borrow %arg_z */
+	__(push %save0)
+	__(push %save1)
+	__(push %save2)
+	__(push %arg_z)
+	/* save0 points to the 0th value/supplied-p pair   */
+	__(movq %arg_y,%save0)
+	/* save1 is the length of the keyword vector   */
+	__(vector_length(%arg_x,%save1))
+	/* save2 is the current keyword   */
+	/* arg_z is the value of the current keyword   */
+	__(xorl %imm0_l,%imm0_l)	/* count unknown keywords seen   */
+local_label(match_keys_loop):
+	__(compare_reg_to_nil(%arg_reg))
+	__(je local_label(matched_keys))
+	__(_car(%arg_reg,%save2))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(_car(%arg_reg,%arg_z))
+	__(_cdr(%arg_reg,%arg_reg))
+	__(xorl %arg_y_l,%arg_y_l)
+	__(jmp local_label(match_test))
+local_label(match_loop):
+	__(cmpq misc_data_offset(%arg_x,%arg_y),%save2)
+	__(je local_label(matched))
+	__(addq $node_size,%arg_y)
+local_label(match_test):
+	__(cmpq %arg_y,%save1)
+	__(jne local_label(match_loop))
+	/* No match.  Note unknown keyword, check for :allow-other-keys   */
+	__(addl $1,%imm0_l)
+	__(cmpq $nrs.kallowotherkeys,%save2)
+	__(jne local_label(match_keys_loop))
+	__(subl $1,%imm0_l)
+	__(btsl $seen_aok_bit,%nargs)
+	__(jc local_label(match_keys_loop))
+	/* First time we've seen :allow-other-keys.  Maybe set aok_bit.   */
+	__(compare_reg_to_nil(%arg_z))
+	__(je local_label(match_keys_loop))
+	__(btsl $aok_bit,%nargs)
+	__(jmp local_label(match_keys_loop))
+	/* Got a match.  Worry about :allow-other-keys here, too.   */
+local_label(matched):
+	__(negq %arg_y)
+	__(cmpb $fulltag_nil,-node_size*2(%save0,%arg_y,2))
+	__(jne local_label(match_keys_loop))
+	__(movq %arg_z,-node_size(%save0,%arg_y,2))
+	__(movl $t_value,-node_size*2(%save0,%arg_y,2))
+	__(cmpq $nrs.kallowotherkeys,%save2)
+	__(jne local_label(match_keys_loop))
+	__(btsl $seen_aok_bit,%nargs)
+	__(jnc local_label(match_keys_loop))
+	__(compare_reg_to_nil(%arg_z))
+	__(je local_label(match_keys_loop))
+	__(btsl $aok_bit,%nargs)
+	__(jmp local_label(match_keys_loop))
+local_label(matched_keys):		
+	__(pop %arg_z)
+	__(pop %save2)
+	__(pop %save1)
+	__(pop %save0)
+	__(testl %imm0_l,%imm0_l)
+	__(je local_label(keys_ok)) 
+	__(btl $aok_bit,%nargs)
+	__(jnc local_label(badkeys))
+local_label(keys_ok):	
+	__(jmp *%ra0)
+	/* Some unrecognized keywords.  Complain generically about   */
+	/* invalid keywords.   */
+local_label(badkeys):
+	__(movq $XBADKEYS,%arg_y)
+	__(jmp local_label(destructure_error))
+local_label(toomany):
+	__(movq $XCALLTOOMANY,%arg_y)
+	__(jmp local_label(destructure_error))
+local_label(toofew):
+	__(movq $XCALLTOOFEW,%arg_y)
+	__(jmp local_label(destructure_error))
+local_label(badlist):
+	__(movq $XCALLNOMATCH,%arg_y)
+	/* jmp local_label(destructure_error)   */
+local_label(destructure_error):
+	__(movd %mm0,%rsp)		/* undo everything done to the stack   */
+	__(movq %whole_reg,%arg_z)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jmp _SPksignalerr)
+_endfn(C(destbind1))	
+
+_spentry(macro_bind)
+	__(movq %arg_reg,%whole_reg)
+	__(extract_lisptag(%arg_reg,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 1f)
+	__(_cdr(%arg_reg,%arg_reg))
+	__(jmp C(destbind1))
+1:	__(movq $XCALLNOMATCH,%arg_y)
+	__(movq %whole_reg,%arg_z)
+	__(set_nargs(2))
+        __(push %ra0)        
+	__(jmp _SPksignalerr)
+_endsubp(macro_bind)
+
+_spentry(destructuring_bind)
+	__(movq %arg_reg,%whole_reg)
+	__(jmp C(destbind1))
+_endsubp(destructuring_bind)
+
+_spentry(destructuring_bind_inner)
+	__(movq %arg_z,%whole_reg)
+	__(jmp C(destbind1))
+_endsubp(destructuring_bind_inner)
+
+	
+
+
+_spentry(vpopargregs)
+_endsubp(vpopargregs)
+
+/* If arg_z is an integer, return in imm0 something whose sign  */
+/* is the same as arg_z's.  If not an integer, error.   */
+_spentry(integer_sign)
+	__(testb $tagmask,%arg_z_b)
+	__(movq %arg_z,%imm0)
+	__(je 8f)
+	__(extract_typecode(%arg_z,%imm0))
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(getvheader(%arg_z,%imm0))
+	__(shr $num_subtag_bits,%imm0)
+	__(movslq misc_data_offset-4(%arg_z,%imm0,4),%imm0)
+8:	__(repret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_integer))
+_endsubp(integer_sign)
+
+/* "slide" nargs worth of values up the stack.  IMM0 contains   */
+/* the difference between the current RSP and the target.   */
+_spentry(mvslide)
+	__(movl %nargs,%imm1_l)
+	__(lea (%rsp,%nargs_q),%temp0)
+	__(testq %imm1,%imm1)
+	__(lea (%temp0,%imm0),%imm0)
+	__(je 2f)
+1:	
+	__(subq $node_size,%temp0)
+	__(movq (%temp0),%temp1)
+	__(subq $node_size,%imm0)
+	__(movq %temp1,(%imm0))
+	__(subq $node_size,%imm1)
+	__(jne 1b)
+2:	__(movq %imm0,%rsp)
+	__(jmp *%ra0)	
+_endsubp(mvslide)
+
+_spentry(save_values)
+	__(movq rcontext(tcr.save_tsp),%imm1)
+/* common exit: nargs = values in this set, imm1 = ptr to tsp before call to save_values   */
+local_label(save_values_to_tsp):
+	__(movq rcontext(tcr.save_tsp),%arg_x)
+	__(dnode_align(%nargs_q,tsp_frame.fixed_overhead+(2*node_size),%imm0)) /* count, link   */
+	__(TSP_Alloc_Var(%imm0,%arg_z))
+	__(movq rcontext(tcr.save_tsp),%imm0)
+	__(movq %imm1,(%imm0))
+	__(movq %nargs_q,(%arg_z))
+	__(movq %arg_x,node_size(%arg_z))
+	__(leaq 2*node_size(%arg_z,%nargs_q),%arg_y)
+	__(leaq (%rsp,%nargs_q),%imm0)
+	__(cmpq %imm0,%rsp)
+	__(jmp 2f)
+1:	__(subq $node_size,%imm0)
+	__(movq (%imm0),%arg_z)
+	__(subq $node_size,%arg_y)
+	__(cmpq %imm0,%rsp)
+	__(movq %arg_z,(%arg_y))
+2:	__(jne 1b)
+	__(add %nargs_q,%rsp)
+	__(jmp *%ra0)			
+_endsubp(save_values)
+
+/* Add the multiple values that are on top of the vstack to the set  */
+/* saved in the top tsp frame, popping them off of the vstack in the  */
+/* process.  It is an error (a bad one) if the TSP contains something  */
+/* other than a previously saved set of multiple-values.  */
+/* Since adding to the TSP may cause a new TSP segment to be allocated,  */
+/* each add_values call adds another linked element to the list of  */
+/* values. This makes recover_values harder.   */
+_spentry(add_values)
+	__(testl %nargs,%nargs)
+	__(movq rcontext(tcr.save_tsp),%imm1)
+	__(movq (%imm1),%imm1)
+	__(jne local_label(save_values_to_tsp))
+	__(jmp *%ra0)
+_endsubp(add_values)
+
+/* push the values in the value set atop the sp, incrementing nargs.  */
+/* Discard the tsp frame; leave values atop the sp.   */
+	
+_spentry(recover_values)
+	/* First, walk the segments reversing the pointer to previous  */
+	/* segment pointers Can tell the end because that previous  */
+	/* segment pointer is the prev tsp pointer   */
+	__(movq rcontext(tcr.save_tsp),%temp1)
+	__(movq %temp1,%arg_x)	/* current segment   */
+	__(movq %temp1,%arg_y)	/* last segment   */
+	__(movq tsp_frame.backlink(%temp1),%arg_z)	/* previous tsp   */
+local_label(walkloop):
+	__(movq tsp_frame.fixed_overhead+node_size(%arg_x),%temp0)
+	__(cmpq %temp0,%arg_z)	/* last segment ?   */
+	__(movq %arg_y,tsp_frame.fixed_overhead+node_size(%arg_x))
+	__(movq %arg_x,%arg_y)	/* last segment <- current segment   */
+	__(movq %temp0,%arg_x)	/* current segment <- next segment   */
+	__(jne local_label(walkloop))
+
+	/* the final segment pointer is now in %arg_y  */
+	/* walk backwards, pushing values on the stack and incrementing %nargs   */
+local_label(pushloop):
+	__(movq tsp_frame.data_offset(%arg_y),%imm0)	/* nargs in segment   */
+	__(testq %imm0,%imm0)
+	__(leaq tsp_frame.data_offset+(2*node_size)(%arg_y,%imm0),%temp0)
+	__(leaq (%nargs_q,%imm0),%nargs_q)
+	__(jmp 2f)
+1:	__(pushq -node_size(%temp0))
+	__(subq $node_size,%temp0)
+	__(subq $fixnum_one,%imm0)
+2:	__(jne 1b)
+	__(cmpq %arg_y,%temp1)
+	__(movq tsp_frame.data_offset+node_size(%arg_y),%arg_y)
+	__(jne local_label(pushloop))
+	__(movq (%temp1),%temp1)
+        __(movq %temp1,rcontext(tcr.save_tsp))
+        __(movq %temp1,rcontext(tcr.next_tsp))        
+	__(jmp *%ra0)		
+_endsubp(recover_values)
+
+/* Exactly like recover_values, but it's necessary to reserve an outgoing  */
+/* frame if any values (which will be used as outgoing arguments) will  */
+/* wind up on the stack.  We can assume that %nargs contains 0 (and  */
+/* that no other arguments have been pushed) on entry.   */
+                
+_spentry(recover_values_for_mvcall)
+	/* First, walk the segments reversing the pointer to previous  */
+	/* segment pointers Can tell the end because that previous  */
+	/* segment pointer is the prev tsp pointer   */
+        __(xorl %nargs,%nargs)
+	__(movq rcontext(tcr.save_tsp),%temp1)
+	__(movq %temp1,%arg_x)	/* current segment   */
+	__(movq %temp1,%arg_y)	/* last segment   */
+	__(movq tsp_frame.backlink(%temp1),%arg_z)	/* previous tsp   */
+local_label(walkloop_mvcall):
+	__(movq tsp_frame.fixed_overhead+node_size(%arg_x),%temp0)
+        __(addq tsp_frame.data_offset(%arg_x),%nargs_q)	
+	__(cmpq %temp0,%arg_z)	/* last segment ?   */
+	__(movq %arg_y,tsp_frame.fixed_overhead+node_size(%arg_x))
+	__(movq %arg_x,%arg_y)	/* last segment <- current segment   */
+	__(movq %temp0,%arg_x)	/* current segment <- next segment   */
+	__(jne local_label(walkloop_mvcall))
+
+        __(cmpl $nargregs*node_size,%nargs)
+        __(jbe local_label(pushloop_mvcall))
+        __(push $reserved_frame_marker)
+        __(push $reserved_frame_marker)
+
+	/* the final segment pointer is now in %arg_y  */
+	/* walk backwards, pushing values on the stack and incrementing %nargs   */
+local_label(pushloop_mvcall):
+	__(movq tsp_frame.data_offset(%arg_y),%imm0)	/* nargs in segment   */
+	__(testq %imm0,%imm0)
+	__(leaq tsp_frame.data_offset+(2*node_size)(%arg_y,%imm0),%temp0)
+	__(jmp 2f)
+1:	__(pushq -node_size(%temp0))
+	__(subq $node_size,%temp0)
+	__(subq $fixnum_one,%imm0)
+2:	__(jne 1b)
+	__(cmpq %arg_y,%temp1)
+	__(movq tsp_frame.data_offset+node_size(%arg_y),%arg_y)
+	__(jne local_label(pushloop_mvcall))
+	__(movq (%temp1),%temp1)
+        __(movq %temp1,rcontext(tcr.save_tsp))
+        __(movq %temp1,rcontext(tcr.next_tsp))        
+	__(jmp *%ra0)		
+_endsubp(recover_values_for_mvcall)
+        				
+_spentry(reset)
+	__(hlt)
+_endsubp(reset)
+
+
+
+_spentry(misc_alloc_init)
+	__(push %rbp)
+	__(movq %rsp,%rbp)
+	__(push %arg_z)
+	__(movq %arg_y,%arg_z)
+	__(movq %arg_x,%arg_y)
+	__(lea local_label(misc_alloc_init_back)(%rip),%ra0)
+        __(push %ra0)
+	__(jmp _SPmisc_alloc)
+__(tra(local_label(misc_alloc_init_back)))
+	__(pop %arg_y)
+	__(leave)
+	__(movq $nrs.init_misc,%fname)
+	__(set_nargs(2))
+	__(jump_fname())	
+_endsubp(misc_alloc_init)
+
+_spentry(stack_misc_alloc_init)
+	__(push %rbp)
+	__(movq %rsp,%rbp)
+	__(push %arg_z)
+	__(movq %arg_y,%arg_z)
+	__(movq %arg_x,%arg_y)
+	__(lea local_label(stack_misc_alloc_init_back)(%rip),%ra0)
+        __(push %ra0)
+	__(jmp _SPstack_misc_alloc)
+__(tra(local_label(stack_misc_alloc_init_back)))
+	__(pop %arg_y)
+	__(leave)
+	__(movq $nrs.init_misc,%fname)
+	__(set_nargs(2))
+	__(jump_fname())	
+_endsubp(stack_misc_alloc_init)
+
+
+
+	.globl C(popj)
+_spentry(popj)
+C(popj):
+	__(leave)
+        __(ret)
+_endsubp(popj)
+
+
+
+_spentry(getu64)
+	__(movq $~(target_most_positive_fixnum << fixnumshift),%imm0)
+	__(testq %arg_z,%imm0)
+	__(movq %arg_z,%imm0)
+	__(jne 1f)
+	__(sarq $fixnumshift,%imm0)
+	__(ret)
+1:	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 9f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(movq misc_header_offset(%arg_z),%imm0)
+	__(cmpq $three_digit_bignum_header,%imm0)
+	__(je 3f)
+	__(cmpq $two_digit_bignum_header,%imm0)
+	__(jne 9f)
+	__(movq misc_data_offset(%arg_z),%imm0)
+	__(testq %imm0,%imm0)
+	__(js 9f)
+	__(repret)
+3:	__(movq misc_data_offset(%arg_z),%imm0)
+	__(cmpl $0,misc_data_offset+8(%arg_z))
+	__(jne 9f)
+	__(repret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_u64))
+_endsubp(getu64)
+
+_spentry(gets64)
+	__(movq %arg_z,%imm0)
+	__(sarq $fixnumshift,%imm0)
+	__(testb $fixnummask,%arg_z_b)
+	__(je 8f)
+1:	__(movb %arg_z_b,%imm0_b)
+	__(andb $tagmask,%imm0_b)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 9f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(cmpb $subtag_bignum,%imm0_b)
+	__(jne 9f)
+	__(movq misc_header_offset(%arg_z),%imm0)
+	__(cmpq $two_digit_bignum_header,%imm0)
+	__(movq misc_data_offset(%arg_z),%imm0)
+	__(jne 9f)
+8:	__(repret)
+9:	__(uuo_error_reg_not_type(Rarg_z,error_object_not_s64))
+_endsubp(gets64)
+
+_spentry(makeu64)
+	__(movq %imm0,%imm1)
+	__(shlq $fixnumshift+1,%imm1)
+	__(movq %imm1,%arg_z)	/* Tagged as a fixnum, 2x    */
+	__(shrq $fixnumshift+1,%imm1)
+	__(shrq %arg_z)
+	__(cmpq %imm0,%imm1)
+	__(je 9f)
+	__(testq %imm0,%imm0)
+	__(movd %imm0,%mm0)
+	__(js 3f)
+	/* Make a 2-digit bignum.   */
+	__(movl $two_digit_bignum_header,%imm0_l)
+	__(movl $aligned_bignum_size(2),%imm1_l)
+	__(Misc_Alloc(%arg_z))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(ret)
+3:	__(movl $three_digit_bignum_header,%imm0_l)
+	__(movl $aligned_bignum_size(3),%imm1_l)
+	__(Misc_Alloc(%arg_z))
+	__(movq %mm0,misc_data_offset(%arg_z))
+9:	__(repret)
+_endsubp(makeu64)
+
+/* on entry: arg_z = symbol.  On exit, arg_z = value (possibly  */
+/* unbound_marker), arg_y = symbol   */
+_spentry(specref)
+	__(movq symbol.binding_index(%arg_z),%imm0)
+	__(cmp rcontext(tcr.tlb_limit),%imm0)
+	__(movq rcontext(tcr.tlb_pointer),%imm1)
+	__(movq %arg_z,%arg_y)
+	__(jae 7f)
+	__(movq (%imm1,%imm0),%arg_z)
+	__(cmpb $no_thread_local_binding_marker,%arg_z_b)
+	__(jne 8f)
+7:	__(movq symbol.vcell(%arg_y),%arg_z)
+8:	__(repret)		
+_endsubp(specref)
+
+/* arg_y = special symbol, arg_z = new value.           */
+_spentry(specset)
+	__(movq symbol.binding_index(%arg_y),%imm0)
+	__(cmp rcontext(tcr.tlb_limit),%imm0)
+	__(movq rcontext(tcr.tlb_pointer),%imm1)
+	__(jae 1f)
+	__(movq (%imm1,%imm0),%arg_x)
+	__(cmpb $no_thread_local_binding_marker,%arg_x_b)
+	__(je 1f)
+	__(movq %arg_z,(%imm1,%imm0))
+	__(ret)
+1:	__(lea fulltag_misc-fulltag_symbol(%arg_y),%arg_x)
+	__(movq $1<<fixnumshift,%arg_y)
+	__(jmp _SPgvset)
+_endsubp(specset)
+
+_spentry(specrefcheck)
+	__(movq symbol.binding_index(%arg_z),%imm0)
+	__(cmp rcontext(tcr.tlb_limit),%imm0)
+	__(movq rcontext(tcr.tlb_pointer),%imm1)
+	__(movq %arg_z,%arg_y)
+	__(jae 7f)
+	__(movq (%imm1,%imm0),%arg_z)
+	__(cmpb $no_thread_local_binding_marker,%arg_z_b)
+	__(cmoveq symbol.vcell(%arg_y),%arg_z)
+	__(cmpb $unbound_marker,%arg_z_b)
+	__(je 9f)
+8:      __(repret)
+7:      __(cmpb $unbound_marker,symbol.vcell(%arg_y))
+        __(movq symbol.vcell(%arg_y),%arg_z)
+        __(je 9f)
+        __(repret)
+9:      __(uuo_error_reg_unbound(Rarg_y))
+_endsubp(specrefcheck)
+
+_spentry(restoreintlevel)
+_endsubp(restoreintlevel)
+
+_spentry(makes32)
+	__(hlt)
+_endsubp(makes32)
+
+_spentry(makeu32)
+	__(hlt)
+_endsubp(makeu32)
+
+_spentry(gets32)
+	__(hlt)
+_endsubp(gets32)
+
+_spentry(getu32)
+	__(hlt)
+_endsubp(getu32)
+
+
+_spentry(mvpasssym)
+_endsubp(mvpasssym)
+
+
+_spentry(unbind)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(movq rcontext(tcr.tlb_pointer),%arg_x)
+	__(movq binding.sym(%imm1),%temp1)
+	__(movq binding.val(%imm1),%arg_y)
+	__(movq binding.link(%imm1),%imm1)
+	__(movq %arg_y,(%arg_x,%temp1))
+	__(movq %imm1,rcontext(tcr.db_link))
+	__(ret)	
+_endsubp(unbind)
+
+_spentry(unbind_n)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(movq rcontext(tcr.tlb_pointer),%arg_x)
+1:		
+	__(movq binding.sym(%imm1),%temp1)
+	__(movq binding.val(%imm1),%arg_y)
+	__(movq binding.link(%imm1),%imm1)
+	__(movq %arg_y,(%arg_x,%temp1))
+	__(subq $1,%imm0)
+	__(jne 1b)
+	__(movq %imm1,rcontext(tcr.db_link))
+	__(ret)	
+_endsubp(unbind_n)
+
+_spentry(unbind_to)
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(movq rcontext(tcr.tlb_pointer),%arg_x)
+1:		
+	__(movq binding.sym(%imm1),%temp1)
+	__(movq binding.val(%imm1),%arg_y)
+	__(movq binding.link(%imm1),%imm1)
+	__(movq %arg_y,(%arg_x,%temp1))
+	__(cmpq %imm1,%imm0)
+	__(jne 1b)
+	__(movq %imm1,rcontext(tcr.db_link))
+	__(ret)	
+_endsubp(unbind_to)
+
+
+/* Bind CCL::*INTERRUPT-LEVEL* to 0.  If its value had been negative, check   */
+/* for pending interrupts after doing so.   */
+	
+_spentry(bind_interrupt_level_0)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(cmpq $0,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(push INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq $0,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(js 1f)
+0:	__(jmp *%ra0)
+	/* Interrupt level was negative; interrupt may be pending   */
+1:	__(check_pending_enabled_interrupt(2f))
+2:	__(jmp *%ra0)
+_endsubp(bind_interrupt_level_0)
+	
+
+/* Bind CCL::*INTERRUPT-LEVEL* to the fixnum -1.  (This has the effect  */
+/* of disabling interrupts.)   */
+
+_spentry(bind_interrupt_level_m1)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(push INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(jmp *%ra0)
+_endsubp(bind_interrupt_level_m1)
+
+/* Bind CCL::*INTERRUPT-LEVEL* to the value in arg_z.  If that value's 0,  */
+/* do what _SPbind_interrupt_level_0 does   */
+_spentry(bind_interrupt_level)
+	__(testq %arg_z,%arg_z)
+	__(movq rcontext(tcr.tlb_pointer),%temp1)
+	__(jz _SPbind_interrupt_level_0)
+	__(push INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(push $INTERRUPT_LEVEL_BINDING_INDEX)
+	__(push rcontext(tcr.db_link))
+	__(movq %rsp,rcontext(tcr.db_link))
+	__(movq %arg_z,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
+	__(jmp *%ra0)
+_endsubp(bind_interrupt_level)
+
+/* Unbind CCL::*INTERRUPT-LEVEL*.  If the value changes from negative to  */
+/* non-negative, check for pending interrupts.    */
+	
+_spentry(unbind_interrupt_level)
+        __(btq $TCR_FLAG_BIT_PENDING_SUSPEND,rcontext(tcr.flags))
+	__(movq rcontext(tcr.db_link),%imm1)
+	__(movq rcontext(tcr.tlb_pointer),%arg_x)
+	__(movq INTERRUPT_LEVEL_BINDING_INDEX(%arg_x),%imm0)
+        __(jc 5f)
+0:      __(testq %imm0,%imm0)
+	__(movq binding.val(%imm1),%temp0)
+	__(movq binding.link(%imm1),%imm1)
+	__(movq %temp0,INTERRUPT_LEVEL_BINDING_INDEX(%arg_x))
+ 	__(movq %imm1,rcontext(tcr.db_link))
+	__(js 3f)
+2:	__(repret)
+3:	__(testq %temp0,%temp0)
+	__(js 2b)
+	__(check_pending_enabled_interrupt(4f))
+4:	__(repret)
+5:       /* Missed a suspend request; force suspend now if we're restoring
+          interrupt level to -1 or greater */
+        __(cmpq $-2<<fixnumshift,%imm0)
+        __(jne 0b)
+	__(movq binding.val(%imm1),%temp0)
+        __(cmpq %imm0,%temp0)
+        __(je 0b)
+        __(movq $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%arg_x))
+        __(suspend_now())
+        __(jmp 0b)
+_endsubp(unbind_interrupt_level)
+
+	
+_spentry(progvrestore)
+	__(movq rcontext(tcr.save_tsp),%imm0)
+	__(movq tsp_frame.backlink(%imm0),%imm0) /* ignore .SPnthrowXXX values frame   */
+	__(movq tsp_frame.data_offset(%imm0),%imm0)
+	__(shrq $fixnumshift,%imm0)
+	__(jne _SPunbind_n)
+	__(repret)
+_endsubp(progvrestore)
+	
+
+/* %arg_z <- %arg_y + %arg_z.  Do the fixnum case - including overflow -  */
+/* inline.  Call out otherwise.   */
+_spentry(builtin_plus)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(addq %arg_y,%arg_z)
+	__(jo C(fix_one_bit_overflow))
+	__(repret)
+1:	__(jump_builtin(_builtin_plus,2))
+_endsubp(builtin_plus)
+	
+
+/* %arg_z <- %arg_z - %arg_y.  Do the fixnum case - including overflow -  */
+/*  inline.  Call out otherwise.   */
+_spentry(builtin_minus)			
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(xchgq %arg_y,%arg_z)
+	__(subq %arg_y,%arg_z)
+	__(jo C(fix_one_bit_overflow))
+	__(repret)
+1:	__(jump_builtin(_builtin_minus,2))
+_endsubp(builtin_minus)
+
+/* %arg_z <- %arg_z * %arg_y.  Do the fixnum case - including overflow -  */
+/* inline.  Call out otherwise.   */
+_spentry(builtin_times)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 2f)
+	__(unbox_fixnum(%arg_z,%imm0))
+	/* 128-bit fixnum result in %imm1:%imm0. Overflow set if %imm1  */
+	/* is significant   */
+	__(imul %arg_y)
+	__(jo 1f)
+	__(mov %imm0,%arg_z)
+	__(ret)
+1:	__(unbox_fixnum(%arg_z,%imm0))
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(imul %imm1)
+	__(jmp C(makes128))
+2:	__(jump_builtin(_builtin_times,2))
+_endsubp(builtin_times)
+
+_spentry(builtin_div)
+	__(jump_builtin(_builtin_div,2))
+
+/* %arg_z <- (= %arg_y %arg_z).	  */
+_spentry(builtin_eq)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_z,%arg_y))
+	__(condition_to_boolean(e,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_eq,2))
+_endsubp(builtin_eq)
+	
+/* %arg_z <- (/= %arg_y %arg_z).	  */
+_spentry(builtin_ne)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_z,%arg_y))
+	__(condition_to_boolean(ne,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_ne,2))
+_endsubp(builtin_ne)
+	
+/* %arg_z <- (> %arg_y %arg_z).	  */
+_spentry(builtin_gt)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(g,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_gt,2))
+_endsubp(builtin_gt)
+
+/* %arg_z <- (>= %arg_y %arg_z).	  */
+_spentry(builtin_ge)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(ge,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_ge,2))
+_endsubp(builtin_ge)
+	
+/* %arg_z <- (< %arg_y %arg_z).	  */
+_spentry(builtin_lt)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(l,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_lt,2))
+_endsubp(builtin_lt)
+
+/* %arg_z <- (<= %arg_y %arg_z).   */
+_spentry(builtin_le)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(le,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_le,2))
+_endsubp(builtin_le)
+
+_spentry(builtin_eql)
+	__(cmpq %arg_y,%arg_z)
+	__(je 1f)
+	/* Not EQ.  Could only possibly be EQL if both are tag-misc  */
+	/* and both have the same subtag   */
+	__(extract_lisptag(%arg_y,%imm0))
+	__(extract_lisptag(%arg_z,%imm1))
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 2f)
+	__(cmpb %imm0_b,%imm1_b)
+	__(jne 2f)
+	__(extract_subtag(%arg_y,%imm0_b))
+	__(extract_subtag(%arg_z,%imm1_b))
+	__(cmpb %imm0_b,%imm1_b)
+	__(jne 2f)
+	__(jump_builtin(_builtin_eql,2))
+1:	__(movl $t_value,%arg_z_l)
+	__(ret)
+2:	__(movl $nil_value,%arg_z_l)
+	__(ret)	
+_endsubp(builtin_eql)
+
+_spentry(builtin_length)
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jz 2f)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jnz 8f)
+	__(extract_subtag(%arg_z,%imm0_b))
+	__(rcmpb(%imm0_b,$min_vector_subtag))
+	__(jb 8f)
+	__(je 1f)
+	/* (simple-array * (*))   */
+	__(movq %arg_z,%arg_y)
+	__(vector_length(%arg_y,%arg_z))
+	__(ret)
+1:	/* vector header   */
+	__(movq vectorH.logsize(%arg_z),%arg_z)
+	__(ret)
+2:	/* list.  Maybe null, maybe dotted or circular.   */
+	__(movq $-fixnumone,%imm2)
+	__(movq %arg_z,%temp0)	/* fast pointer   */
+	__(movq %arg_z,%temp1)  /* slow pointer   */
+3:	__(extract_lisptag(%temp0,%imm0))	
+	__(compare_reg_to_nil(%temp0))
+	__(leaq fixnumone(%imm2),%imm2)
+	__(je 9f)
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 8f)
+	__(extract_lisptag(%temp1,%imm1))
+	__(testb $fixnumone,%imm2_b)
+	__(_cdr(%temp0,%temp0))
+	__(je 3b)
+	__(cmpb $tag_list,%imm1_b)
+	__(jne 8f)
+	__(_cdr(%temp1,%temp1))
+	__(cmpq %temp0,%temp1)
+	__(jne 3b)
+8:	
+	__(jump_builtin(_builtin_length,1))
+9:	
+	__(movq %imm2,%arg_z)
+	__(ret)		
+_endsubp(builtin_length)
+
+	
+_spentry(builtin_seqtype)
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jz 1f)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 2f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(rcmpb(%imm0_b,$min_vector_subtag))
+	__(jb 2f)
+	__(movl $nil_value,%arg_z_l)
+	__(ret)
+1:	__(movl $t_value,%arg_z_l)
+	__(ret)
+2:	
+	__(jump_builtin(_builtin_seqtype,1))
+_endsubp(builtin_seqtype)
+
+_spentry(builtin_assq)
+	__(cmpb $fulltag_nil,%arg_z_b)
+	__(jz 5f)
+1:	__(movb $tagmask,%imm0_b)
+	__(andb %arg_z_b,%imm0_b)
+	__(cmpb $tag_list,%imm0_b)
+	__(jnz 2f)
+	__(_car(%arg_z,%arg_x))
+	__(_cdr(%arg_z,%arg_z))
+	__(cmpb $fulltag_nil,%arg_x_b)
+	__(jz 4f)
+	__(movb $tagmask,%imm0_b)
+	__(andb %arg_x_b,%imm0_b)
+	__(cmpb $tag_list,%imm0_b)
+	__(jnz 3f)
+	__(_car(%arg_x,%temp0))
+	__(cmpq %temp0,%arg_y)
+	__(jnz 4f)
+	__(movq %arg_x,%arg_z)
+	__(ret)
+4:	__(cmpb $fulltag_nil,%arg_z_b)
+5:	__(jnz 1b)
+	__(repret)
+2:      __(uuo_error_reg_not_list(Rarg_z))
+3:      __(uuo_error_reg_not_list(Rarg_x))        
+_endsubp(builtin_assq)	
+
+_spentry(builtin_memq)
+	__(cmpb $fulltag_nil,%arg_z_b)
+	__(jmp 3f)
+1:	__(movb $tagmask,%imm0_b)
+	__(andb %arg_z_b,%imm0_b)
+	__(cmpb $tag_list,%imm0_b)
+	__(jnz 2f)
+	__(_car(%arg_z,%arg_x))
+	__(_cdr(%arg_z,%temp0))
+	__(cmpq %arg_x,%arg_y)
+	__(jz 4f)
+	__(cmpb $fulltag_nil,%temp0_b)
+	__(movq %temp0,%arg_z)
+3:	__(jnz 1b)
+4:	__(repret)				
+2:      __(uuo_error_reg_not_list(Rarg_z))
+_endsubp(builtin_memq)
+
+        __ifdef(`X8664')
+logbitp_max_bit = 61
+        __else
+logbitp_max_bit = 30
+        __endif
+	
+_spentry(builtin_logbitp)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jnz 1f)
+	__(unbox_fixnum(%arg_y,%imm0))
+        __(movl $logbitp_max_bit-1+fixnumshift,%imm1_l)
+        __(js 1f)               /* bit number negative */
+	__(addb $fixnumshift,%imm0_b)
+	__(cmpq $logbitp_max_bit<<fixnumshift,%arg_y)
+	__(cmovael %imm1_l,%imm0_l)
+	__(bt %imm0,%arg_z)
+	__(condition_to_boolean(b,%imm0,%arg_z))
+	__(ret)
+1:	__(jump_builtin(_builtin_logbitp,2))
+_endsubp(builtin_logbitp)
+
+_spentry(builtin_logior)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(orq %arg_y,%arg_z)
+	__(ret)
+1:	
+	__(jump_builtin(_builtin_logior,2))
+		
+_endsubp(builtin_logior)
+
+_spentry(builtin_logand)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(andq %arg_y,%arg_z)
+	__(ret)
+1:		
+	__(jump_builtin(_builtin_logand,2))
+_endsubp(builtin_logand)
+
+_spentry(builtin_negate)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(negq %arg_z)
+	__(jo C(fix_one_bit_overflow))
+	__(repret)
+1:		
+	__(jump_builtin(_builtin_negate,1))	
+_endsubp(builtin_negate)
+
+_spentry(builtin_logxor)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(xorq %arg_y,%arg_z)
+	__(ret)
+1:		
+	__(jump_builtin(_builtin_logxor,2))
+_endsubp(builtin_logxor)
+
+
+_spentry(builtin_aset1)
+	__(extract_typecode(%arg_x,%imm0))
+	__(box_fixnum(%imm0,%temp0))
+	__(cmpb $min_vector_subtag,%imm0_b)
+	__(ja _SPsubtag_misc_set)
+	__(jump_builtin(_builtin_aset1,3))
+_endsubp(builtin_aset1)
+
+
+_spentry(builtin_ash)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 9f)
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(unbox_fixnum(%arg_z,%imm0))
+	/* Z flag set if zero ASH shift count   */
+	__(jnz 1f)
+	__(movq %arg_y,%arg_z)	/* shift by 0   */
+	__(ret)
+1:	__(jns 3f)
+	__(rcmpq(%imm0,$-63))
+	__(jg 2f)
+	__(sar $63,%imm1)
+	__(box_fixnum(%imm1,%arg_z))
+	__(ret)
+2:	/* Right-shift by small fixnum   */
+	__(negb %imm0_b)
+	__(movzbl %imm0_b,%ecx)
+	__(sar %cl,%imm1)
+	__(box_fixnum(%imm1,%arg_z))
+	__(ret)
+3:      /* Left shift by fixnum. We cant shift by more than 63 bits, though  */
+	/* shifting by 64 is actually easy.   */
+	__(rcmpq(%imm0,$64))
+	__(jg 9f)
+	__(jne 4f)
+	/* left-shift by 64-bits exactly   */
+	__(xorl %imm0_l,%imm0_l)
+	__(jmp C(makes128))
+4:	/* left-shift by 1..63 bits.  Safe to move shift count to %rcx/%cl   */
+	__(movzbl %imm0_b,%ecx)	 /* zero-extending mov   */
+	__(movq %imm1,%imm0)
+	__(sarq $63,%imm1)
+	__(js 5f)
+	__(shld %cl,%imm0,%imm1)
+	__(shl %cl,%imm0)
+	__(jmp C(makes128))
+5:	__(shld %cl,%imm0,%imm1)
+	__(shl %cl,%imm0)
+	__(jmp C(makes128))
+9:	
+	__(jump_builtin(_builtin_ash,2))
+_endsubp(builtin_ash)
+
+_spentry(builtin_aref1)
+	__(extract_typecode(%arg_y,%imm0))
+	__(cmpb $min_vector_subtag,%imm0_b)
+	__(box_fixnum_no_flags(%imm0,%arg_x))
+	__(ja _SPsubtag_misc_ref)
+	__(jump_builtin(_builtin_aref1,2))
+_endsubp(builtin_aref1)
+
+/* Arg_z is either a MACPTR containing the function address or a boxed fixnum.  */
+/*   %imm0.b (aka %al) contains the number (0-7) of args passed in FP regs.  */
+/*   On entry, the foreign stack contains a frame containing at least 8 words:  */
+
+/*   * -> aligned on 16-byte boundary  */
+/*  *backlink	<-	foreign %rsp		  */
+/*   unused  */
+/*   scalar arg 0		passed in %rdi  */
+/*   scalar arg 1         passed in %rsi  */
+/*   scalar arg 2		passed in %rdx  */
+/*   scalar arg 3		passed in %rcx  */
+/*   scalar arg 4		passed in %r8  */
+/*   scalar arg 5		passed in %r9  */
+/*  *address of first memory arg  */
+/*   ...  */
+/*   possible scratch space  */
+/*  *previous %rsp value  */
+
+/*   Any floating-point args will have been loaded into %xmm0-%xmm7 by the caller.  */
+/*   When this returns, the foreign %rsp will contain its previous value, and  */
+/*   the function result will be in %rax (and possibly %rdx) or %xmm0 (+ %xmm1).  */
+
+_spentry(ffcall)
+LocalLabelPrefix`'ffcall:                
+        /* Unbox %arg_z.  It's either a fixnum or macptr (or bignum) ;
+          if not a fixnum, get the first word */
+        __(unbox_fixnum(%arg_z,%imm1))
+	__(testb $fixnummask,%arg_z_b)
+        __(je 0f)
+        __(movq macptr.address(%arg_z),%imm1)
+0:              
+	/* Save lisp registers   */
+        __(push %rbp)
+	__(movq %rsp,%rbp)
+	__(push %temp0)
+	__(push %temp1)
+	__(push %temp2)
+	__(push %arg_x)
+	__(push %arg_y)
+	__(push %arg_z)
+	__(push %fn)
+	__ifndef(`TCR_IN_GPR')
+	__(push %save3)  
+	__endif
+	__(push %save2)
+	__(push %save1)
+	__(push %save0)       /* 10 or 11 registers pushed after %rbp */
+	__(movq %rsp,rcontext(tcr.save_vsp))
+        __(movq %rbp,rcontext(tcr.save_rbp))
+	__(movq $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+        __(movq rcontext(tcr.foreign_sp),%rsp)
+	__(stmxcsr rcontext(tcr.lisp_mxcsr))
+	__(emms)
+	__(ldmxcsr rcontext(tcr.foreign_mxcsr))
+	__(movq (%rsp),%rbp)
+        __ifdef(`DARWIN_GS_HACK')
+         /* At this point, %imm1=%rdx is live (contains
+            the entrypoint) and %imm0.b=%al contains
+            info about xmm register arguments; the lisp registers are
+            all saved, and the foreign arguments are
+            on the foreign stack (about to be popped
+            off).  Save the linear TCR address in %save0/%r15
+            so that we can restore it later, and preserve
+            the entrypoint somewhere where C won't bash it.
+            Note that dereferencing the entrypoint from
+            foreign code has never been safe (unless it's
+            a fixnum */
+         __(save_tcr_linear(%csave0))
+         __(movq %imm1,%csave1)
+         __(movq %imm0,%csave2)
+         __(set_foreign_gs_base())
+         __(movq %csave1,%imm1)
+         __(movq %csave2,%imm0)
+        __endif
+	__ifdef(`TCR_IN_GPR')
+	/* Preserve TCR pointer */
+	__(movq %rcontext_reg, %csave0)
+	__endif
+LocalLabelPrefix`'ffcall_setup: 
+	__(addq $2*node_size,%rsp)
+        __(movq %imm1,%r11)
+        __ifdef(`WINDOWS')
+         /* Leave 0x20 bytes of register spill area on stack */
+         __(movq (%rsp),%carg0)
+         __(movq 8(%rsp),%carg1)
+         __(movq 16(%rsp),%carg2)
+         __(movq 24(%rsp),%carg3)
+        __else
+	 __(pop %carg0)
+	 __(pop %carg1)
+	 __(pop %carg2)
+	 __(pop %carg3)
+	 __(pop %carg4)
+	 __(pop %carg5)
+	__endif
+LocalLabelPrefix`'ffcall_setup_end: 
+LocalLabelPrefix`'ffcall_call:
+	__(call *%r11)
+LocalLabelPrefix`'ffcall_call_end:               
+	__ifdef(`WINDOWS')
+	__(add $0x20,%rsp)
+	__endif
+	__(movq %rbp,%rsp)
+        __ifdef(`DARWIN_GS_HACK')
+         /* %rax/%rdx contains the return value (maybe), %csave1 still
+            contains the linear tcr address.  Preserve %rax/%rdx here. */
+         __(movq %rax,%csave1)
+         __(movq %rdx,%csave2)
+         __(set_gs_base(%csave0))
+         __(movq %csave1,%rax)
+         __(movq %csave2,%rdx)
+        __endif
+	__ifdef(`TCR_IN_GPR')
+	__(movq %csave0, %rcontext_reg)
+	__endif
+	__(movq %rsp,rcontext(tcr.foreign_sp))
+	__ifndef(`TCR_IN_GPR')
+	__(clr %save3)
+	__endif
+	__(clr %save2)
+	__(clr %save1)
+	__(clr %save0)
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %arg_x)
+	__(clr %temp2)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+        __(cmpb $0,C(bogus_fp_exceptions)(%rip))
+        __(je 0f)
+        __(movl %arg_x_l,rcontext(tcr.ffi_exception))
+        __(jmp 1f)
+0:      __(stmxcsr rcontext(tcr.ffi_exception))
+1:      __(movq rcontext(tcr.save_vsp),%rsp)
+        __(movq rcontext(tcr.save_rbp),%rbp)
+	__(movq $TCR_STATE_LISP,rcontext(tcr.valence))
+	__(pop %save0)
+	__(pop %save1)
+	__(pop %save2)
+	__ifndef(`TCR_IN_GPR')
+	__(pop %save3)
+	__endif
+	__(pop %fn)
+	__(pop %arg_z)
+	__(pop %arg_y)
+	__(pop %arg_x)
+	__(pop %temp2)
+	__(pop %temp1)
+	__(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(check_pending_interrupt(%temp0))
+	__(pop %temp0)
+        __(leave)
+	__ifdef(`DARWIN')
+	__(btrq $TCR_FLAG_BIT_FOREIGN_EXCEPTION,rcontext(tcr.flags))
+	__(jc 0f)
+	__endif
+	__(ret)
+	__ifdef(`DARWIN')
+0:
+	/* Unboxed foreign exception (likely an NSException) in %imm0. */
+	/* Box it, then signal a lisp error. */
+	__(movq %imm0,%imm2)
+	__(movq $macptr_header,%rax)
+	__(Misc_Alloc_Fixed(%arg_z,macptr.size))
+	__(movq %imm2,macptr.address(%arg_z))
+	__(movq $XFOREIGNEXCEPTION,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+	__endif
+        __ifdef(`DARWIN')        
+        /* Handle exceptions, for ObjC 2.0 */
+LocalLabelPrefix`'ffcallLandingPad:      
+        __(movq %rax,%save1)
+        __(cmpq $1,%rdx)
+        __(je 1f)
+        __(movq %rax,%rdi)
+LocalLabelPrefix`'ffcallUnwindResume:            
+       	__(call *lisp_global(unwind_resume))
+LocalLabelPrefix`'ffcallUnwindResume_end:         
+1:      __(movq %save1,%rdi)
+LocalLabelPrefix`'ffcallBeginCatch:              
+        __(call *lisp_global(objc2_begin_catch))
+LocalLabelPrefix`'ffcallBeginCatch_end:          
+        __(movq (%rax),%save1) /* indirection is necessary because we don't provide type info in lsda */
+LocalLabelPrefix`'ffcallEndCatch:                
+        __(call *lisp_global(objc2_end_catch))
+LocalLabelPrefix`'ffcallEndCatch_end:            
+	__(ref_global(get_tcr,%rax))
+	__(movq $1,%rdi)
+	__(call *%rax)
+	__(btsq $TCR_FLAG_BIT_FOREIGN_EXCEPTION,tcr.flags(%rax))
+	__(movq %save1,%rax)
+	__(jmp LocalLabelPrefix`'ffcall_call_end)
+LocalLabelPrefix`'ffcall_end:   
+        __endif
+_endsubp(ffcall)
+
+        __ifdef(`DARWIN')
+	.section __DATA,__gcc_except_tab
+GCC_except_table0:
+	.align 3
+LLSDA1:
+	.byte	0xff	/* @LPStart format (omit) */
+	.byte	0x0	/* @TType format (absolute) */
+	.byte	0x4d	/* uleb128 0x4d; @TType base offset */
+	.byte	0x3	/* call-site format (udata4) */
+	.byte	0x41	/* uleb128 0x41; Call-site table length */
+	
+	.long Lffcall_setup-Lffcall	/* region 0 start */
+	.long Lffcall_setup_end-Lffcall_setup	/* length */
+	.long	0x0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+        
+	.long Lffcall_call-Lffcall	/* region 1 start */
+	.long Lffcall_call_end-Lffcall_call	/* length */
+	.long LffcallLandingPad-Lffcall	/* landing pad */
+	.byte	0x1	/* uleb128 0x1; action */
+        
+	.long LffcallUnwindResume-Lffcall	/* region 2 start */
+	.long LffcallUnwindResume_end-LffcallUnwindResume	/* length */
+	.long	0x0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+	
+	.long LffcallBeginCatch-Lffcall	/* region 3 start */
+	.long LffcallBeginCatch_end-LffcallBeginCatch	/* length */
+	.long 0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+        
+	.long LffcallEndCatch-Lffcall
+	.long LffcallEndCatch_end-LffcallEndCatch	/* length */
+	.long	0x0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+	.byte	0x1	/* Action record table */
+	.byte	0x0
+	.align 3
+	.quad	0       /* _OBJC_EHTYPE_$_NSException */
+        .text
+        __endif
+
+_spentry(ffcall_return_registers)
+LocalLabelPrefix`'ffcall_return_registers:                
+        /* Unbox %arg_z.  It's either a fixnum or macptr (or bignum) ;
+          if not a fixnum, get the first word */
+        __(unbox_fixnum(%arg_z,%imm1))
+	__(testb $fixnummask,%arg_z_b)
+        __(je 0f)
+        __(movq macptr.address(%arg_z),%imm1)
+0:              
+	/* Save lisp registers   */
+        __(push %rbp)
+        __(movq %rsp,%rbp)
+	__(push %temp0)
+	__(push %temp1)
+	__(push %temp2)
+	__(push %arg_x)
+	__(push %arg_y)
+	__(push %arg_z)
+	__ifndef(`TCR_IN_GPR')
+	__(push %save3)
+	__endif
+	__(push %save2)
+	__(push %save1)
+	__(push %save0)
+        __(movq macptr.address(%arg_y),%csave0)  /* %rbx non-volatile */
+	__(push %fn)
+	__(movq %rsp,rcontext(tcr.save_vsp))
+        __(movq %rbp,rcontext(tcr.save_rbp))
+	__(movq $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+        __(movq rcontext(tcr.foreign_sp),%rsp)
+	__(stmxcsr rcontext(tcr.lisp_mxcsr))
+	__(emms)
+	__(ldmxcsr rcontext(tcr.foreign_mxcsr))
+	__(movq (%rsp),%rbp)
+        __ifdef(`DARWIN_GS_HACK')
+         /* At this point, %imm1=%rdx is live (contains
+            the entrypoint) and %imm0.b=%al contains
+            xmm argument info; the lisp registers are
+            all saved, and the foreign arguments are
+            on the foreign stack (about to be popped
+            off).  Save the linear TCR address in %csave1/%r12
+            so that we can restore it later, and preserve
+            the entrypoint somewhere where C won't bash it.
+            Note that dereferencing the entrypoint from
+            foreign code has never been safe (unless it's
+            a fixnum */
+         __(save_tcr_linear(%csave1))
+         __(movq %imm0,%csave2)
+         __(movq %imm1,%csave3)
+         __(set_foreign_gs_base())
+         __(movq %csave2,%imm0)
+         __(movq %csave3,%imm1)
+        __endif
+	__ifdef(`TCR_IN_GPR')
+	/* Preserve TCR pointer */
+	__(movq %rcontext_reg, %csave1)
+	__endif
+        __(movq %imm1,%r11)
+LocalLabelPrefix`'ffcall_return_registers_setup: 
+	__(addq $2*node_size,%rsp)
+	__(pop %carg0)
+	__(pop %carg1)
+	__(pop %carg2)
+	__(pop %carg3)
+	__ifdef(`WINDOWS')
+	__(sub $0x20, %rsp) /* Make room for arg register spill */
+	__else
+	__(pop %carg4)
+	__(pop %carg5)
+	__endif
+LocalLabelPrefix`'ffcall_return_registers_setup_end: 
+LocalLabelPrefix`'ffcall_return_registers_call:
+	__(call *%r11)
+LocalLabelPrefix`'ffcall_return_registers_call_end:
+	__ifdef(`WINDOWS')
+	__(add $0x20, %rsp)
+	__endif
+        __(movq %rax,(%csave0))
+        __(movq %rdx,8(%csave0))
+        __(movsd %xmm0,16(%csave0))
+        __(movsd %xmm1,24(%csave0))
+	__(movq %rbp,%rsp)
+        __ifdef(`DARWIN_GS_HACK')
+         /* %rax/%rdx contains the return value (maybe), %save0 still
+            contains the linear tcr address.  Preserve %rax/%rdx here. */
+         __(set_gs_base(%csave1))
+         __(movq (%csave0),%rax)
+         __(movq 8(%csave0),%rdx)
+         __(movsd 16(%csave0),%xmm0)
+         __(movsd 24(%csave0),%xmm1)
+        __endif
+	__ifdef(`TCR_IN_GPR')
+	__(movq %csave1, %rcontext_reg)
+	__endif
+	__(movq %rsp,rcontext(tcr.foreign_sp))        
+	__ifndef(`TCR_IN_GPR')
+	__(clr %save3)
+	__endif
+	__(clr %save2)
+	__(clr %save1)
+	__(clr %save0)
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %arg_x)
+	__(clr %temp2)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+        __(cmpb $0,C(bogus_fp_exceptions)(%rip))
+        __(je 0f)
+        __(movl %arg_x_l,rcontext(tcr.ffi_exception))
+        __(jmp 1f)
+0:      __(stmxcsr rcontext(tcr.ffi_exception))
+1:      __(movq rcontext(tcr.save_vsp),%rsp)
+        __(movq rcontext(tcr.save_rbp),%rbp)
+	__(movq $TCR_STATE_LISP,rcontext(tcr.valence))
+	__(pop %fn)
+	__(pop %save0)
+	__(pop %save1)
+	__(pop %save2)
+	__ifndef(`TCR_IN_GPR')
+	__(pop %save3)
+	__endif
+	__(pop %arg_z)
+	__(pop %arg_y)
+	__(pop %arg_x)
+	__(pop %temp2)
+	__(pop %temp1)
+	__(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(check_pending_interrupt(%temp0))
+	__(pop %temp0)
+        __(leave)
+	__ifdef(`DARWIN')
+	__(btrq $TCR_FLAG_BIT_FOREIGN_EXCEPTION,rcontext(tcr.flags))
+	__(jc 0f)
+	__endif
+        __(ret)
+	__ifdef(`DARWIN')
+0:
+	/* Unboxed foreign exception (likely an NSException) in %imm0. */
+	/* Box it, then signal a lisp error. */
+	__(movq %imm0,%imm2)
+	__(movq $macptr_header,%rax)
+	__(Misc_Alloc_Fixed(%arg_z,macptr.size))
+	__(movq %imm2,macptr.address(%arg_z))
+	__(movq $XFOREIGNEXCEPTION,%arg_y)
+	__(set_nargs(2))
+	__(jmp _SPksignalerr)
+	__endif
+        __ifdef(`DARWIN')        
+        /* Handle exceptions, for ObjC 2.0 */
+LocalLabelPrefix`'ffcall_return_registersLandingPad:      
+        __(movq %rax,%save1)
+        __(cmpq $1,%rdx)
+        __(je 1f)
+        __(movq %rax,%rdi)
+LocalLabelPrefix`'ffcall_return_registersUnwindResume:            
+       	__(call *lisp_global(unwind_resume))
+LocalLabelPrefix`'ffcall_return_registersUnwindResume_end:         
+1:      __(movq %save1,%rdi)
+LocalLabelPrefix`'ffcall_return_registersBeginCatch:              
+        __(call *lisp_global(objc2_begin_catch))
+LocalLabelPrefix`'ffcall_return_registersBeginCatch_end:          
+        __(movq (%rax),%save1) /* indirection is necessary because we don't provide type info in lsda */
+LocalLabelPrefix`'ffcall_return_registersEndCatch:                
+        __(call *lisp_global(objc2_end_catch))
+LocalLabelPrefix`'ffcall_return_registersEndCatch_end:            
+	__(ref_global(get_tcr,%rax))
+	__(movq $1,%rdi)
+	__(call *%rax)
+	__(btsq $TCR_FLAG_BIT_FOREIGN_EXCEPTION,tcr.flags(%rax))
+	__(movq %save1,%rax)
+	__(jmp LocalLabelPrefix`'ffcall_return_registers_call_end)
+LocalLabelPrefix`'ffcall_return_registers_end:   
+        __endif
+_endsubp(ffcall_returning_registers)
+
+        __ifdef(`DARWIN')
+	.section __DATA,__gcc_except_tab
+GCC_except_table1:
+	.align 3
+LLSDA2:
+	.byte	0xff	/* @LPStart format (omit) */
+	.byte	0x0	/* @TType format (absolute) */
+	.byte	0x4d	/* uleb128 0x4d; @TType base offset */
+	.byte	0x3	/* call-site format (udata4) */
+	.byte	0x41	/* uleb128 0x41; Call-site table length */
+	
+	.long Lffcall_return_registers_setup-Lffcall_return_registers	/* region 0 start */
+	.long Lffcall_return_registers_setup_end-Lffcall_return_registers_setup	/* length */
+	.long	0x0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+        
+	.long Lffcall_return_registers_call-Lffcall_return_registers	/* region 1 start */
+	.long Lffcall_return_registers_call_end-Lffcall_return_registers_call	/* length */
+	.long Lffcall_return_registersLandingPad-Lffcall_return_registers	/* landing pad */
+	.byte	0x1	/* uleb128 0x1; action */
+        
+	.long Lffcall_return_registersUnwindResume-Lffcall_return_registers	/* region 2 start */
+	.long Lffcall_return_registersUnwindResume_end-Lffcall_return_registersUnwindResume	/* length */
+	.long	0x0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+	
+	.long Lffcall_return_registersBeginCatch-Lffcall_return_registers	/* region 3 start */
+	.long Lffcall_return_registersBeginCatch_end-Lffcall_return_registersBeginCatch	/* length */
+	.long 0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+        
+	.long Lffcall_return_registersEndCatch-Lffcall_return_registers
+	.long Lffcall_return_registersEndCatch_end-Lffcall_return_registersEndCatch	/* length */
+	.long	0x0	/* landing pad */
+	.byte	0x0	/* uleb128 0x0; action */
+	.byte	0x1	/* Action record table */
+	.byte	0x0
+	.align 3
+	.quad	0       /* _OBJC_EHTYPE_$_NSException */
+        .text
+        __endif
+                
+_spentry(syscall)
+	/* Save lisp registers   */
+	__(push %rbp)
+	__(movq %rsp,%rbp)
+	__(push %temp0)
+	__(push %temp1)
+	__(push %temp2)
+	__(push %arg_x)
+	__(push %arg_y)
+	__(push %arg_z)
+        __ifndef(`TCR_IN_GPR')
+	 __(push %save3)
+        __endif
+	__(push %save2)
+	__(push %save1)
+	__(push %save0)
+	__(push %fn)
+	__(movq %rsp,rcontext(tcr.save_vsp))
+        __(movq %rbp,rcontext(tcr.save_rbp))
+	__(movq $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+        __(movq rcontext(tcr.foreign_sp),%rsp)
+	__(emms)
+	__(movq (%rsp),%rbp)
+	__(addq $2*node_size,%rsp)
+        __ifdef(`TCR_IN_GPR')
+         __(movq %rcontext_reg,%csave0)
+        __endif
+        __ifdef(`WINDOWS')
+         __(pop %carg0)
+         __(pop %carg1)
+         __(pop %carg2)
+         __(pop %carg3)
+         __(subq $0x20,%rsp)
+         __(orq $-1,%cret)
+         __(addq $0x20,%rsp)
+        __else
+	 __(unbox_fixnum(%arg_z,%rax))
+	 __(pop %rdi)
+	 __(pop %rsi)
+	 __(pop %rdx)
+	 __(pop %r10)		/*  syscalls take 4th param in %r10, not %rcx   */
+	 __(pop %r8)
+	 __(pop %r9)
+	 __(syscall)
+         __ifdef(`SYSCALL_SETS_CARRY_ON_ERROR')
+          __(jnc 0f)
+          __(negq %rax)
+0:      
+         __endif
+        __endif
+        __ifdef(`TCR_IN_GPR')
+         __(movq %csave0,%rcontext_reg)
+        __endif
+	__(movq %rbp,%rsp)
+	__(movq %rsp,rcontext(tcr.foreign_sp))
+        __ifndef(`TCR_IN_GPR')
+	 __(clr %save3)
+        __endif
+	__(clr %save2)
+	__(clr %save1)
+	__(clr %save0)
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %arg_x)
+	__(clr %temp2)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+	__(movq rcontext(tcr.save_vsp),%rsp)
+        __(movq rcontext(tcr.save_rbp),%rbp)
+	__(movq $TCR_STATE_LISP,rcontext(tcr.valence))
+	__(pop %fn)
+	__(pop %save0)
+	__(pop %save1)
+	__(pop %save2)
+        __ifndef(`TCR_IN_GPR')
+	 __(pop %save3)
+        __endif
+	__(pop %arg_z)
+	__(pop %arg_y)
+	__(pop %arg_x)
+	__(pop %temp2)
+	__(pop %temp1)
+	__(check_pending_interrupt(%temp0))
+	__(pop %temp0)
+        __(leave)
+	__(ret)
+_endsubp(syscall)		
+
+/* We need to reserve a frame here if (a) nothing else was already pushed and (b) */
+/*   we push something (e.g., more than 3 args in the lexpr) 	  */
+_spentry(spread_lexprz)
+	new_local_labels()
+	__(movq (%arg_z),%imm0)
+	__(testl %nargs,%nargs) /* anything pushed by caller ? */
+        __(leaq node_size(%arg_z,%imm0),%imm1)
+        __(jne 0f)              /* yes, caller has already created frame. */
+        __(cmpw $(nargregs*node_size),%imm0_w) /* will we push anything ? */
+        __(jbe 0f)
+        __(push $reserved_frame_marker)
+        __(push $reserved_frame_marker)
+0:      __(addw %imm0_w,%nargs_w)
+        __(cmpw $(nargregs*node_size),%imm0_w)
+        __(jae 9f)
+        __(cmpw $(2*node_size),%imm0_w)
+        __(je 2f)
+        __(testw %imm0_w,%imm0_w)
+        __(jne 1f)
+        /* lexpr count was 0; vpop the args that */
+        /* were pushed by the caller */
+        __(testl %nargs,%nargs)
+        __(je local_label(all_args_popped))
+        __(pop %arg_z)
+local_label(maybe_pop_yx):              
+        __(cmpl $(1*node_size),%nargs)
+        __(je local_label(all_args_popped))
+        __(pop %arg_y)
+        __(cmpl $(2*node_size),%nargs)
+        __(je local_label(all_args_popped))
+local_label(pop_arg_x):         
+        __(pop %arg_x)
+local_label(all_args_popped):   
+        /* If all args fit in registers but some were pushed */
+        /* by the caller, discard the reserved frame that the caller */
+        /* pushed.         */
+        __(cmpw %imm0_w,%nargs_w)
+        __(je local_label(go))
+        __(cmpl $(nargregs*node_size),%nargs)
+        __(ja local_label(go))
+        __(addq $(2*node_size),%rsp)
+local_label(go):        
+        __(jmp *%ra0)        
+	/* vpush args from the lexpr until we have only */
+	/* three left, then assign them to arg_x, arg_y, */
+	/* and arg_z. */ 
+8:      __(cmpw $(4*node_size),%imm0_w)
+        __(lea -1*node_size(%imm0),%imm0)
+        __(push -node_size(%imm1))
+        __(lea -1*node_size(%imm1),%imm1)
+9:      __(jne 8b)
+        __(movq -node_size*1(%imm1),%arg_x)
+        __(movq -node_size*2(%imm1),%arg_y)
+        __(movq -node_size*3(%imm1),%arg_z)
+        __(jmp *%ra0)
+
+	/* lexpr count is two: set arg_y, arg_z from the */
+	/* lexpr, maybe vpop arg_x */
+2:      __(cmpl $(2*node_size),%nargs)
+        __(movq -node_size*1(%imm1),%arg_y)
+        __(movq -node_size*2(%imm1),%arg_z)
+        __(jne local_label(pop_arg_x))
+        __(jmp *%ra0)
+	/* lexpr count is one: set arg_z from the lexpr, */
+	/* maybe vpop arg_y, arg_x  */
+1:      __(movq -node_size*1(%imm1),%arg_z)
+        __(jmp local_label(maybe_pop_yx))
+_endsubp(spread_lexprz)
+	
+
+
+
+/* Callback index in %r11 */
+_spentry(callback)
+	__(push %rbp)
+	__(movq %rsp,%rbp)
+	/* C scalar args   */
+	__(push %carg0)	/* -8(%rbp)   */
+	__(push %carg1)
+	__(push %carg2)
+	__(push %carg3)
+	__ifndef(`WINDOWS')
+	__(push %carg4)
+	__(push %carg5)
+	__endif
+	/* FP arg regs   */
+	__ifdef(`WINDOWS')
+	__(subq $4*8,%rsp)
+	__(movq %xmm0,3*8(%rsp))	/* -40(%rbp) */
+	__(movq %xmm1,2*8(%rsp))
+	__(movq %xmm2,1*8(%rsp))
+	__(movq %xmm3,0*8(%rsp))
+	__else
+	__(subq $8*8,%rsp)
+	__(movq %xmm0,7*8(%rsp))	/* -56(%rbp) */
+	__(movq %xmm1,6*8(%rsp))
+	__(movq %xmm2,5*8(%rsp))
+	__(movq %xmm3,4*8(%rsp))
+	__(movq %xmm4,3*8(%rsp))
+	__(movq %xmm5,2*8(%rsp))
+	__(movq %xmm6,1*8(%rsp))
+	__(movq %xmm7,0*8(%rsp))
+	__endif
+	__ifndef(`WINDOWS')
+	__endif
+	/* C NVRs   */
+	__(push %csave0)
+	__(push %csave1)
+	__(push %csave2)
+	__(push %csave3)
+	__(push %csave4)
+	__ifdef(`WINDOWS')
+	__(push %csave5)
+	__(push %csave6)
+	__endif
+	__(push %rbp)
+	__(movq %r11,%csave0)
+        __ifdef(`HAVE_TLS')
+	 /* TCR initialized for lisp ?   */
+	 __ifndef(`TCR_IN_GPR') /* FIXME */
+	 __(movq %fs:current_tcr@TPOFF+tcr.linear,%rax)
+	 __(testq %rax,%rax)
+	 __(jne 1f)
+	 __endif
+        __endif
+	__(ref_global(get_tcr,%rax))
+	__(movq $1,%carg0)
+	__ifdef(`WINDOWS')
+	__(sub $0x20, %rsp)
+	__endif
+	__(call *%rax)
+	__ifdef(`WINDOWS')
+	__(add $0x20, %rsp)
+        __endif
+        __ifdef(`TCR_IN_GPR')
+	__(movq %rax, %rcontext_reg)
+	__endif	
+        __ifdef(`DARWIN_GS_HACK')
+         /* linear TCR address in now in %rax; callback index was
+            saved in %r12 a moment ago. */
+         __(set_gs_base(%rax))
+        __endif
+1:	/* Align foreign stack for lisp   */
+        __(pushq rcontext(tcr.save_rbp)) /* mark cstack frame's "owner" */
+	__(pushq rcontext(tcr.foreign_sp))
+	/* init lisp registers   */
+	__(movq %csave0,%rax)
+	__(movq %rsp,rcontext(tcr.foreign_sp))
+	__ifndef(`TCR_IN_GPR')
+	__(clr %save3)
+	__endif
+	__(clr %save2)
+	__(clr %save1)
+	__(clr %save0)
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %arg_x)
+	__(clr %temp2)
+	__(clr %temp1)
+	__(clr %temp0)
+	__(clr %fn)
+	__(pxor %fpzero,%fpzero)
+	__(movq rcontext(tcr.save_vsp),%rsp)
+	__(box_fixnum(%rax,%arg_y))
+	__(movq %rbp,%arg_z)
+        __(movq rcontext(tcr.save_rbp),%rbp)
+	__(movq $TCR_STATE_LISP,rcontext(tcr.valence))
+        __(movq (%rsp),%save0)
+        __(movq 8(%rsp),%save1)
+        __(movq 16(%rsp),%save2)
+        __ifndef(`TCR_IN_GPR')
+         __(movq 24(%rsp),%save3)
+        __endif
+        __(stmxcsr rcontext(tcr.foreign_mxcsr))
+        __(andb $~mxcsr_all_exceptions,rcontext(tcr.foreign_mxcsr))
+	__(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(movq $nrs.callbacks,%fname)
+	__(lea local_label(back_from_callback)(%rip),%ra0)
+	__(set_nargs(2))
+        __(push %ra0)
+	__(jump_fname())
+__(tra(local_label(back_from_callback)))
+	__(movq %rsp,rcontext(tcr.save_vsp))
+        __(movq %rbp,rcontext(tcr.save_rbp))
+        __(movq rcontext(tcr.foreign_sp),%rsp)
+	__(stmxcsr rcontext(tcr.lisp_mxcsr))
+	__(movq $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+	__(emms)
+	__(pop rcontext(tcr.foreign_sp))
+        __(addq $node_size,%rsp)
+        __(ldmxcsr rcontext(tcr.foreign_mxcsr))
+        __ifdef(`DARWIN_GS_HACK')
+         /* Lucky us; nothing is live here */
+         __(set_foreign_gs_base())
+        __endif
+	__(pop %rbp)
+	__ifdef(`WINDOWS')
+	__(pop %csave6)
+	__(pop %csave5)
+	__endif
+	__(pop %csave4)
+	__(pop %csave3)
+	__(pop %csave2)
+	__(pop %csave1)
+	__(pop %csave0)
+	__(movq -8(%rbp),%rax)
+        __(movq -16(%rbp),%rdx)
+	__(movq -24(%rbp),%xmm0)
+        __(movq -32(%rbp),%xmm1)
+	__(leave)
+	__(ret)		
+_endsubp(callback)
+
+/* arg_x = array, arg_y = i, arg_z = j. Typecheck everything.
+   We don't know whether the array is alleged to be simple or
+   not, and don't know anythng about the element type.  */
+        	
+_spentry(aref2)
+        __(testb $fixnummask,%arg_y_b)
+        __(jne 0f)
+        
+        __(testb $fixnummask,%arg_z_b)
+        __(jne 1f)
+        __(extract_typecode(%arg_x,%imm0))
+        __(cmpb $subtag_arrayH,%imm0_b)
+        __(jne 2f)
+        __(cmpq $2<<fixnumshift,arrayH.rank(%arg_x))
+        __(jne 2f)
+        __(cmpq arrayH.dim0(%arg_x),%arg_y)
+        __(jae 3f)
+        __(movq arrayH.dim0+node_size(%arg_x),%imm0)
+        __(cmpq %imm0,%arg_z)
+        __(jae 4f)
+        __(unbox_fixnum(%imm0,%imm0))
+        __(mulq %arg_y)         /* imm0 <- imm0 * arg_y */
+        __(addq %imm0,%arg_z)
+        __(movq %arg_x,%arg_y)
+6:      __(addq arrayH.displacement(%arg_y),%arg_z)
+        __(movq arrayH.data_vector(%arg_y),%arg_y)
+        __(extract_subtag(%arg_y,%imm1_b))
+        __(cmpb $subtag_vectorH,%imm1_b)
+        __(ja C(misc_ref_common))
+        __(jmp 6b)
+0:      __(uuo_error_reg_not_fixnum(Rarg_y))
+1:      __(uuo_error_reg_not_fixnum(Rarg_z))
+2:      __(uuo_error_reg_not_type(Rarg_x,error_object_not_array_2d))
+3:      __(uuo_error_array_bounds(Rarg_y,Rarg_x))
+4:      __(uuo_error_array_bounds(Rarg_z,Rarg_x))
+        
+_endsubp(aref2)
+
+/* %temp0 = array, %arg_x = i,%arg_y = j, %arg_z = k */
+_spentry(aref3)
+        __(testb $fixnummask,%arg_x_b)
+        __(jne 0f)
+        __(testb $fixnummask,%arg_y_b)
+        __(jne 1f)
+        __(testb $fixnummask,%arg_z_b)
+        __(jne 2f)
+        __(extract_typecode(%temp0,%imm0))
+        __(cmpb $subtag_arrayH,%imm0_b)
+        __(jne 3f)
+        __(cmpq $3<<fixnumshift,arrayH.rank(%temp0))
+        __(jne 3f)
+        __(cmpq arrayH.dim0(%temp0),%arg_x)
+        __(jae 5f)
+        __(movq arrayH.dim0+node_size(%temp0),%imm0)
+        __(cmpq %imm0,%arg_y)
+        __(jae 6f)
+        __(unbox_fixnum(%imm0,%imm0))
+        __(movq arrayH.dim0+(node_size*2)(%temp0),%imm1)
+        __(cmpq %imm1,%arg_z)
+        __(jae 7f)
+        __(unbox_fixnum(%imm1,%imm1))
+        __(imulq %imm1,%arg_y)
+        __(mulq %imm1)
+        __(imulq %imm0,%arg_x)
+        __(addq %arg_x,%arg_z)
+        __(addq %arg_y,%arg_z)
+        __(movq %temp0,%arg_y)
+8:      __(addq arrayH.displacement(%arg_y),%arg_z)
+        __(movq arrayH.data_vector(%arg_y),%arg_y)
+        __(extract_subtag(%arg_y,%imm1_b))
+        __(cmpb $subtag_vectorH,%imm1_b)
+        __(ja C(misc_ref_common))
+        __(jmp 8b)
+0:      __(uuo_error_reg_not_fixnum(Rarg_x))
+1:      __(uuo_error_reg_not_fixnum(Rarg_y))	
+2:      __(uuo_error_reg_not_fixnum(Rarg_z))
+3:      __(uuo_error_reg_not_type(Rtemp0,error_object_not_array_3d))
+5:      __(uuo_error_array_bounds(Rarg_x,Rtemp0))
+6:      __(uuo_error_array_bounds(Rarg_y,Rtemp0))
+7:      __(uuo_error_array_bounds(Rarg_z,Rtemp0))
+        
+_endsubp(aref3)
+        
+/* As with aref2, but temp0 = array, arg_x = i, arg_y = j, arg_z = new_value */
+_spentry(aset2)
+        __(testb $fixnummask,%arg_x_b)
+        __(jne 0f)
+        __(testb $fixnummask,%arg_y_b)
+        __(jne 1f)
+        __(extract_typecode(%temp0,%imm0))
+        __(cmpb $subtag_arrayH,%imm0_b)
+        __(jne 2f)
+        __(cmpq $2<<fixnumshift,arrayH.rank(%temp0))
+        __(jne 2f)
+        __(cmpq arrayH.dim0(%temp0),%arg_x)
+        __(jae 4f)
+        __(movq arrayH.dim0+node_size(%temp0),%imm0)
+        __(cmpq %imm0,%arg_y)
+        __(jae 5f)
+        __(unbox_fixnum(%imm0,%imm0))
+        __(mulq %arg_x)         /* imm0 <- imm0 * arg_x */
+        __(addq %imm0,%arg_y)
+        __(movq %temp0,%arg_x)
+6:      __(addq arrayH.displacement(%arg_x),%arg_y)
+        __(movq arrayH.data_vector(%arg_x),%arg_x)
+        __(extract_subtag(%arg_x,%imm1_b))
+        __(cmpb $subtag_vectorH,%imm1_b)
+        __(ja C(misc_set_common))
+        __(jmp 6b)
+0:      __(uuo_error_reg_not_fixnum(Rarg_x))
+1:      __(uuo_error_reg_not_fixnum(Rarg_y))
+2:      __(uuo_error_reg_not_type(Rtemp0,error_object_not_array_2d))
+4:      __(uuo_error_array_bounds(Rarg_x,Rtemp0))
+5:      __(uuo_error_array_bounds(Rarg_y,Rtemp0))
+_endsubp(aset2)
+
+/* %temp1 = array, %temp0 = i, %arg_x = j, %arg_y = k, %arg_y = newval. */
+
+_spentry(aset3)
+        __(testb $fixnummask,%temp0_b)
+        __(jne 0f)
+        __(testb $fixnummask,%arg_x_b)
+        __(jne 1f)
+        __(testb $fixnummask,%arg_y_b)
+        __(jne 2f)
+        __(extract_typecode(%temp1,%imm0))
+        __(cmpb $subtag_arrayH,%imm0_b)
+        __(jne 3f)
+        __(cmpq $3<<fixnumshift,arrayH.rank(%temp1))
+        __(jne 3f)
+        __(cmpq arrayH.dim0(%temp1),%temp0)
+        __(jae 5f)
+        __(movq arrayH.dim0+node_size(%temp1),%imm0)
+        __(cmpq %imm0,%arg_x)
+        __(jae 6f)
+        __(unbox_fixnum(%imm0,%imm0))
+        __(movq arrayH.dim0+(node_size*2)(%temp1),%imm1)
+        __(cmpq %imm1,%arg_y)
+        __(jae 7f)
+        __(unbox_fixnum(%imm1,%imm1))
+        __(imulq %imm1,%arg_x)
+        __(mulq %imm1)
+        __(imulq %imm0,%temp0)
+        __(addq %temp0,%arg_y)
+        __(addq %arg_x,%arg_y)
+        __(movq %temp1,%arg_x)
+8:      __(addq arrayH.displacement(%arg_x),%arg_y)
+        __(movq arrayH.data_vector(%arg_x),%arg_x)
+        __(extract_subtag(%arg_x,%imm1_b))
+        __(cmpb $subtag_vectorH,%imm1_b)
+        __(ja C(misc_set_common))
+        __(jmp 8b)
+	
+0:      __(uuo_error_reg_not_fixnum(Rtemp0))
+1:      __(uuo_error_reg_not_fixnum(Rarg_x))
+2:      __(uuo_error_reg_not_fixnum(Rarg_y))
+3:      __(uuo_error_reg_not_type(Rtemp1,error_object_not_array_3d))
+5:      __(uuo_error_array_bounds(Rtemp0,Rtemp1))
+6:      __(uuo_error_array_bounds(Rarg_x,Rtemp1))
+6:      __(uuo_error_array_bounds(Rarg_x,Rtemp1))
+7:      __(uuo_error_array_bounds(Rarg_y,Rtemp1))
+	
+_endsubp(aset3)
+
+        
+
+
+/* Prepend all but the first five (4 words of code, inner fn) and last   */
+/* (lfbits) elements of %fn to the "arglist".   */
+	
+_spentry(call_closure)
+        new_local_labels()
+        __(subq $fulltag_function-fulltag_misc,%fn)
+        __(vector_length(%fn,%imm0))
+	
+        __(subq $6<<fixnumshift,%imm0)  /* imm0 = inherited arg count   */
+        __(lea (%nargs_q,%imm0),%imm1)
+        __(cmpl $nargregs<<fixnumshift,%imm1_l)
+        __(jna local_label(regs_only))
+        __(pop %ra0)
+        __(cmpl $nargregs<<fixnumshift,%nargs)
+        __(jna local_label(no_insert))
+	
+/* Some arguments have already been pushed.  Push imm0's worth   */
+/* of NILs, copy those arguments that have already been vpushed from   */
+/* the old TOS to the new, then insert all of the inerited args   */
+/* and go to the function.  */
+	
+        __(movq %imm0,%imm1)
+local_label(push_nil_loop):     
+        __(push $nil_value)
+        __(sub $fixnumone,%imm1)
+        __(jne local_label(push_nil_loop))
+	
+/* Need to use arg regs as temporaries here.    */
+        __(movq %rsp,%temp1)
+        __(push %arg_z)
+        __(push %arg_y)
+        __(push %arg_x)
+        __(lea 3*node_size(%rsp,%imm0),%arg_x)
+        __(lea -nargregs<<fixnumshift(%nargs_q),%arg_y)
+local_label(copy_already_loop): 
+        __(movq (%arg_x),%arg_z)
+        __(addq $fixnumone,%arg_x)
+        __(movq %arg_z,(%temp1))
+        __(addq $fixnumone,%temp1)
+        __(subq $fixnumone,%arg_y)
+        __(jne local_label(copy_already_loop))
+	
+        __(movl $5<<fixnumshift,%imm1_l) /* skip code, new fn   */
+local_label(insert_loop):               
+        __(movq misc_data_offset(%fn,%imm1),%arg_z)
+        __(addq $node_size,%imm1)
+        __(addl $fixnum_one,%nargs)
+        __(subq $node_size,%arg_x)
+        __(movq %arg_z,(%arg_x))
+        __(subq $fixnum_one,%imm0)
+        __(jne local_label(insert_loop))
+
+        /* Recover the argument registers, pushed earlier   */
+        __(pop %arg_x)
+        __(pop %arg_y)
+        __(pop %arg_z)
+        __(jmp local_label(go))
+
+/* Here if nothing was pushed by the caller.  If we're  */
+/* going to push anything, we have to reserve a stack  */
+/* frame first. (We'll need to push something if the  */
+/* sum of %nargs and %imm0 is greater than nargregs)   */
+	
+local_label(no_insert):
+        __(lea (%nargs_q,%imm0),%imm1)
+        __(cmpq $nargregs<<fixnumshift,%imm1)
+        __(jna local_label(no_insert_no_frame))
+        /* Reserve space for a stack frame   */
+        __(push $reserved_frame_marker)
+        __(push $reserved_frame_marker)
+local_label(no_insert_no_frame):        
+	/* nargregs or fewer args were already vpushed.   */
+	/* if exactly nargregs, vpush remaining inherited vars.   */
+        __(cmpl $nargregs<<fixnumshift,%nargs)
+        __(movl $5<<fixnumshift,%imm1_l) /* skip code, new fn   */
+        __(leaq 5<<fixnumshift(%imm0),%temp1)
+        __(jnz local_label(set_regs))
+local_label(vpush_remaining):  
+        __(push misc_data_offset(%fn,%imm1))
+        __(addq $node_size,%imm1)
+        __(addl $fixnumone,%nargs)
+        __(subq $node_size,%imm0)
+        __(jnz local_label(vpush_remaining))
+        __(jmp local_label(go))
+local_label(set_regs):
+	/* if nargs was > 1 (and we know that it was < 3), it must have   */
+	/* been 2.  Set arg_x, then vpush the remaining args.   */
+        __(cmpl $fixnumone,%nargs)
+        __(jle local_label(set_y_z))
+local_label(set_arg_x): 
+        __(subq $node_size,%temp1)
+        __(movq misc_data_offset(%fn,%temp1),%arg_x)
+        __(addl $fixnumone,%nargs)
+        __(subq $fixnumone,%imm0)
+        __(jne local_label(vpush_remaining))
+        __(jmp local_label(go))
+	/* Maybe set arg_y or arg_z, preceding args   */
+local_label(set_y_z):
+        __(jne local_label(set_arg_z))
+	/* Set arg_y, maybe arg_x, preceding args   */
+local_label(set_arg_y): 
+        __(subq $node_size,%temp1)
+        __(movq misc_data_offset(%fn,%temp1),%arg_y)
+        __(addl $fixnumone,%nargs)
+        __(subq $fixnum_one,%imm0)
+        __(jnz local_label(set_arg_x))
+        __(jmp local_label(go))
+local_label(set_arg_z): 
+        __(subq $node_size,%temp1)
+        __(movq misc_data_offset(%fn,%temp1),%arg_z)
+        __(addl $fixnumone,%nargs)
+        __(subq $fixnum_one,%imm0)
+        __(jne local_label(set_arg_y))
+local_label(go):        
+        __(movq misc_data_offset+(4*node_size)(%fn),%fn)
+        __(push %ra0)
+        __(jmp *%fn)
+local_label(regs_only):
+        __(leaq 5<<fixnumshift(%imm0),%temp1)
+        __(testl %nargs,%nargs)
+        __(jne local_label(some_args))
+        __(cmpw $node_size,%imm0)
+        __(movq misc_data_offset-node_size(%fn,%temp1),%arg_z)
+        __(je local_label(rgo))
+        __(cmpw $2*node_size,%imm0)
+        __(movq misc_data_offset-(node_size*2)(%fn,%temp1),%arg_y)
+        __(je local_label(rgo))
+        __(movq misc_data_offset-(node_size*3)(%fn,%temp1),%arg_x)
+local_label(rgo):
+        __(addw %imm0_w,%nargs_w)
+        __(jmp *misc_data_offset+(4*node_size)(%fn))
+local_label(some_args):         
+        __(cmpl $2*node_size,%nargs)
+        __(jz local_label(rtwo))
+        /* One arg was passed, could be one or two inherited args */
+        __(cmpw $node_size,%imm0)
+        __(movq misc_data_offset-node_size(%fn,%temp1),%arg_y)
+        __(je local_label(rgo))
+        __(movq misc_data_offset-(node_size*2)(%fn,%temp1),%arg_x)
+        __(jmp local_label(rgo))
+local_label(rtwo):     
+        __(movq misc_data_offset-node_size(%fn,%temp1),%arg_x)
+        __(jmp local_label(rgo))
+_endsubp(call_closure)
+                                        
+        
+_spentry(poweropen_callbackX)
+_endsubp(poweropen_callbackX)
+	
+	
+_spentry(poweropen_ffcallX)
+_endsubp(poweropen_ffcallX)
+        	
+_spentry(poweropen_syscall)
+_endsubp(poweropen_syscall)
+
+_spentry(eabi_ff_call)
+_endsubp(eabi_ff_call)
+
+_spentry(eabi_callback)
+_endsubp(eabi_callback)
+
+
+/* Unused, and often not used on PPC either  */
+_spentry(callbuiltin)
+	__(hlt)
+_endsubp(callbuiltin)
+
+_spentry(callbuiltin0)
+	__(hlt)
+_endsubp(callbuiltin0)
+
+_spentry(callbuiltin1)
+	__(hlt)
+_endsubp(callbuiltin1)
+
+_spentry(callbuiltin2)
+	__(hlt)
+_endsubp(callbuiltin2)
+
+_spentry(callbuiltin3)
+	__(hlt)
+_endsubp(callbuiltin3)
+	
+_spentry(restorefullcontext)
+	__(hlt)
+_endsubp(restorefullcontext)
+
+_spentry(savecontextvsp)
+	__(hlt)
+_endsubp(savecontextvsp)
+
+_spentry(savecontext0)
+	__(hlt)
+_endsubp(savecontext0)
+
+_spentry(restorecontext)
+	__(hlt)
+_endsubp(restorecontext)
+
+_spentry(stkconsyz)
+	__(hlt)
+_endsubp(stkconsyz)
+
+_spentry(stkvcell0)
+	__(hlt)
+_endsubp(stkvcell0)
+
+_spentry(stkvcellvsp)
+	__(hlt)
+_endsubp(stkvcellvsp)
+
+_spentry(breakpoint)
+        __(hlt)
+_endsubp(breakpoint)
+
+
+        __ifdef(`DARWIN')
+        .if 1
+	.globl  C(lisp_objc_personality)
+C(lisp_objc_personality):
+	jmp *lisp_global(objc_2_personality)
+	
+	.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
+EH_frame1:
+	.set L$set$12,LECIE1-LSCIE1
+	.long L$set$12	/* Length of Common Information Entry */
+LSCIE1:
+	.long	0x0	/* CIE Identifier Tag */
+	.byte	0x1	/* CIE Version */
+	.ascii "zPLR\0"	/* CIE Augmentation */
+	.byte	0x1	/* uleb128 0x1; CIE Code Alignment Factor */
+	.byte	0x78	/* sleb128 -8; CIE Data Alignment Factor */
+	.byte	0x10	/* CIE RA Column */
+	.byte	0x7
+	.byte	0x9b
+	.long	_lisp_objc_personality+4@GOTPCREL
+	.byte	0x10	/* LSDA Encoding (pcrel) */
+	.byte	0x10	/* FDE Encoding (pcrel) */
+	.byte	0xc	/* DW_CFA_def_cfa */
+	.byte	0x7	/* uleb128 0x7 */
+	.byte	0x8	/* uleb128 0x8 */
+	.byte	0x90	/* DW_CFA_offset, column 0x10 */
+	.byte	0x1	/* uleb128 0x1 */
+	.align 3
+LECIE1:
+        .globl _SPffcall.eh
+_SPffcall.eh:
+        .long LEFDEffcall-LSFDEffcall
+LSFDEffcall:      
+        .long LSFDEffcall-EH_frame1 /* FDE CIE offset */
+        .quad Lffcall-. /* FDE Initial Location */
+        .quad Lffcall_end-Lffcall /* FDE address range */
+        .byte 8 /* uleb128 0x8; Augmentation size */
+        .quad LLSDA1-.           /* Language Specific Data Area */
+	.byte	0x4	/* DW_CFA_advance_loc4 */
+	.long Lffcall_setup-Lffcall
+	.byte	0xe	/* DW_CFA_def_cfa_offset */
+	.byte	0x10	/* uleb128 0x10 */
+	.byte	0x86	/* DW_CFA_offset, column 0x6 */
+	.byte	0x2	/* uleb128 0x2 */
+	.byte	0x4	/* DW_CFA_advance_loc4 */
+	.long Lffcall_setup_end-Lffcall_setup
+	.byte	0xd	/* DW_CFA_def_cfa_register */
+	.byte	0x6	/* uleb128 0x6 */
+	.byte	0x4	/* DW_CFA_advance_loc4 */
+	.long Lffcall_call_end-Lffcall_call
+	.byte	0x83	/* DW_CFA_offset, column 0x3 */
+	.byte	0x3	/* uleb128 0x3 */
+	.align 3
+LEFDEffcall:
+        .globl _SPffcall_return_registers.eh
+_SPffcall_return_registers.eh:
+        .long LEFDEffcall_return_registers-LSFDEffcall_return_registers
+LSFDEffcall_return_registers:      
+        .long LSFDEffcall_return_registers-EH_frame1 /* FDE CIE offset */
+        .quad Lffcall_return_registers-. /* FDE Initial Location */
+        .quad Lffcall_return_registers_end-Lffcall_return_registers /* FDE address range */
+        .byte 8 /* uleb128 0x8; Augmentation size */
+        .quad LLSDA2-.           /* Language Specific Data Area */
+	.byte	0x4	/* DW_CFA_advance_loc4 */
+	.long Lffcall_return_registers_setup-Lffcall_return_registers
+	.byte	0xe	/* DW_CFA_def_cfa_offset */
+	.byte	0x10	/* uleb128 0x10 */
+	.byte	0x86	/* DW_CFA_offset, column 0x6 */
+	.byte	0x2	/* uleb128 0x2 */
+	.byte	0x4	/* DW_CFA_advance_loc4 */
+	.long Lffcall_return_registers_setup_end-Lffcall_return_registers_setup
+	.byte	0xd	/* DW_CFA_def_cfa_register */
+	.byte	0x6	/* uleb128 0x6 */
+	.byte	0x4	/* DW_CFA_advance_loc4 */
+	.long Lffcall_return_registers_call_end-Lffcall_return_registers_call
+	.byte	0x83	/* DW_CFA_offset, column 0x3 */
+	.byte	0x3	/* uleb128 0x3 */
+	.align 3
+LEFDEffcall_return_registers:
+        .text
+        .endif
+        __endif
+        
+_spentry(unused_5)
+        __(hlt)
+Xspentry_end:           
+_endsubp(unused_5)
+        
+        .data
+        .globl C(spentry_start)
+        .globl C(spentry_end)
+C(spentry_start):       .quad Xspentry_start
+C(spentry_end):         .quad Xspentry_end
Index: /branches/arm/lisp-kernel/x86-spjump32.s
===================================================================
--- /branches/arm/lisp-kernel/x86-spjump32.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-spjump32.s	(revision 13357)
@@ -0,0 +1,193 @@
+/*   Copyright (C) 2005-2009 Clozure Associates */
+/*   This file is part of Clozure CL.    */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+	
+        include(lisp.s)
+define(`_spjump',`
+        .p2align 2
+        .globl _SP$1
+_exportfn(j_SP$1)
+          __(.long _SP$1)
+_endfn
+')
+	_beginfile
+        __ifdef(`DARWIN')
+        .space 0x3000,0
+        __endif
+        __ifdef(`WIN_32')
+        .space 0x5000-0x1000,0
+        __endif
+         .globl C(spjump_start)
+C(spjump_start):
+
+        _spjump(jmpsym)
+        _spjump(jmpnfn)
+        _spjump(funcall)
+        _spjump(mkcatch1v)
+        _spjump(mkunwind)
+        _spjump(mkcatchmv)
+        _spjump(throw)
+        _spjump(nthrowvalues)
+        _spjump(nthrow1value)
+        _spjump(bind)
+        _spjump(bind_self)
+        _spjump(bind_nil)
+        _spjump(bind_self_boundp_check)
+        _spjump(rplaca)
+        _spjump(rplacd)
+        _spjump(conslist)
+        _spjump(conslist_star)
+        _spjump(stkconslist)
+        _spjump(stkconslist_star)
+        _spjump(mkstackv)
+        _spjump(subtag_misc_ref)
+        _spjump(setqsym)
+        _spjump(progvsave)
+        _spjump(stack_misc_alloc)
+        _spjump(gvector)
+        _spjump(nvalret)
+        _spjump(mvpass)
+        _spjump(recover_values_for_mvcall)
+        _spjump(nthvalue)
+        _spjump(values)
+        _spjump(default_optional_args)
+        _spjump(opt_supplied_p)
+        _spjump(heap_rest_arg)
+        _spjump(req_heap_rest_arg)
+        _spjump(heap_cons_rest_arg)
+        _spjump(simple_keywords)
+        _spjump(keyword_args)
+        _spjump(keyword_bind)
+        _spjump(ffcall)
+        _spjump(aref2)
+        _spjump(ksignalerr)
+        _spjump(stack_rest_arg)
+        _spjump(req_stack_rest_arg)
+        _spjump(stack_cons_rest_arg)
+        _spjump(poweropen_callbackX)        
+        _spjump(call_closure)        
+        _spjump(getxlong)
+        _spjump(spreadargz)
+        _spjump(tfuncallgen)
+        _spjump(tfuncallslide)
+        _spjump(tfuncallvsp)
+        _spjump(tcallsymgen)
+        _spjump(tcallsymslide)
+        _spjump(tcallsymvsp)
+        _spjump(tcallnfngen)
+        _spjump(tcallnfnslide)
+        _spjump(tcallnfnvsp)
+        _spjump(misc_ref)
+        _spjump(misc_set)
+        _spjump(stkconsyz)
+        _spjump(stkvcell0)
+        _spjump(stkvcellvsp)      
+        _spjump(makestackblock)
+        _spjump(makestackblock0)
+        _spjump(makestacklist)
+        _spjump(stkgvector)
+        _spjump(misc_alloc)
+        _spjump(poweropen_ffcallX)
+        _spjump(gvset)
+        _spjump(macro_bind)
+        _spjump(destructuring_bind)
+        _spjump(destructuring_bind_inner)
+        _spjump(recover_values)
+        _spjump(vpopargregs)
+        _spjump(integer_sign)
+        _spjump(subtag_misc_set)
+        _spjump(spread_lexprz)
+        _spjump(store_node_conditional)
+        _spjump(reset)
+        _spjump(mvslide)
+        _spjump(save_values)
+        _spjump(add_values)
+        _spjump(callback)
+        _spjump(misc_alloc_init)
+        _spjump(stack_misc_alloc_init)
+        _spjump(set_hash_key)
+        _spjump(aset2)
+        _spjump(callbuiltin)
+        _spjump(callbuiltin0)
+        _spjump(callbuiltin1)
+        _spjump(callbuiltin2)
+        _spjump(callbuiltin3)
+        _spjump(popj)
+        _spjump(restorefullcontext)
+        _spjump(savecontextvsp)
+        _spjump(savecontext0)
+        _spjump(restorecontext)
+        _spjump(lexpr_entry)
+        _spjump(syscall2)
+        _spjump(builtin_plus)
+        _spjump(builtin_minus)
+        _spjump(builtin_times)
+        _spjump(builtin_div)
+        _spjump(builtin_eq)
+        _spjump(builtin_ne)
+        _spjump(builtin_gt)
+        _spjump(builtin_ge)
+        _spjump(builtin_lt)
+        _spjump(builtin_le)
+        _spjump(builtin_eql)
+        _spjump(builtin_length)
+        _spjump(builtin_seqtype)
+        _spjump(builtin_assq)
+        _spjump(builtin_memq)
+        _spjump(builtin_logbitp)
+        _spjump(builtin_logior)
+        _spjump(builtin_logand)
+        _spjump(builtin_ash)
+        _spjump(builtin_negate)
+        _spjump(builtin_logxor)
+        _spjump(builtin_aref1)
+        _spjump(builtin_aset1)
+        _spjump(breakpoint)
+        _spjump(eabi_ff_call)
+        _spjump(eabi_callback)
+        _spjump(syscall)
+        _spjump(getu64)
+        _spjump(gets64)
+        _spjump(makeu64)
+        _spjump(makes64)
+        _spjump(specref)
+        _spjump(specset)
+        _spjump(specrefcheck)
+        _spjump(restoreintlevel)
+        _spjump(makes32)
+        _spjump(makeu32)
+        _spjump(gets32)
+        _spjump(getu32)
+        _spjump(fix_overflow)
+        _spjump(mvpasssym)
+        _spjump(aref3)
+        _spjump(aset3)
+        _spjump(ffcall_return_registers)
+        _spjump(aset1)
+        _spjump(set_hash_key_conditional)
+        _spjump(unbind_interrupt_level)
+        _spjump(unbind)
+        _spjump(unbind_n)
+        _spjump(unbind_to)
+        _spjump(bind_interrupt_level_m1)
+        _spjump(bind_interrupt_level)
+        _spjump(bind_interrupt_level_0)
+        _spjump(progvrestore)
+        _spjump(nmkunwind)
+         .globl C(spjump_end)
+C(spjump_end):
+	.org C(spjump_start)+0x1000
+	
+        _endfile
+		
Index: /branches/arm/lisp-kernel/x86-spjump64.s
===================================================================
--- /branches/arm/lisp-kernel/x86-spjump64.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-spjump64.s	(revision 13357)
@@ -0,0 +1,190 @@
+/*   Copyright (C) 2005-2009 Clozure Associates */
+/*   This file is part of Clozure CL.    */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public  */
+/*   License , known as the LLGPL and distributed with Clozure CL as the  */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,  */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these  */
+/*   conflict, the preamble takes precedence.    */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."  */
+
+/*   The LLGPL is also available online at  */
+/*   http://opensource.franz.com/preamble.html  */
+
+	
+        include(lisp.s)
+define(`_spjump',`
+        .p2align 3
+        .globl _SP$1
+_exportfn(j_SP$1)
+          __(.quad _SP$1)
+_endfn
+')
+	_beginfile
+        __ifdef(`DARWIN')
+        .space 0x5000,0
+        __endif
+         .globl C(spjump_start)
+C(spjump_start):
+
+        _spjump(jmpsym)
+        _spjump(jmpnfn)
+        _spjump(funcall)
+        _spjump(mkcatch1v)
+        _spjump(mkunwind)
+        _spjump(mkcatchmv)
+        _spjump(throw)
+        _spjump(nthrowvalues)
+        _spjump(nthrow1value)
+        _spjump(bind)
+        _spjump(bind_self)
+        _spjump(bind_nil)
+        _spjump(bind_self_boundp_check)
+        _spjump(rplaca)
+        _spjump(rplacd)
+        _spjump(conslist)
+        _spjump(conslist_star)
+        _spjump(stkconslist)
+        _spjump(stkconslist_star)
+        _spjump(mkstackv)
+        _spjump(subtag_misc_ref)
+        _spjump(setqsym)
+        _spjump(progvsave)
+        _spjump(stack_misc_alloc)
+        _spjump(gvector)
+        _spjump(nvalret)
+        _spjump(mvpass)
+        _spjump(recover_values_for_mvcall)
+        _spjump(nthvalue)
+        _spjump(values)
+        _spjump(default_optional_args)
+        _spjump(opt_supplied_p)
+        _spjump(heap_rest_arg)
+        _spjump(req_heap_rest_arg)
+        _spjump(heap_cons_rest_arg)
+        _spjump(simple_keywords)
+        _spjump(keyword_args)
+        _spjump(keyword_bind)
+        _spjump(ffcall)
+        _spjump(aref2)
+        _spjump(ksignalerr)
+        _spjump(stack_rest_arg)
+        _spjump(req_stack_rest_arg)
+        _spjump(stack_cons_rest_arg)
+        _spjump(poweropen_callbackX)        
+        _spjump(call_closure)        
+        _spjump(getxlong)
+        _spjump(spreadargz)
+        _spjump(tfuncallgen)
+        _spjump(tfuncallslide)
+        _spjump(tfuncallvsp)
+        _spjump(tcallsymgen)
+        _spjump(tcallsymslide)
+        _spjump(tcallsymvsp)
+        _spjump(tcallnfngen)
+        _spjump(tcallnfnslide)
+        _spjump(tcallnfnvsp)
+        _spjump(misc_ref)
+        _spjump(misc_set)
+        _spjump(stkconsyz)
+        _spjump(stkvcell0)
+        _spjump(stkvcellvsp)      
+        _spjump(makestackblock)
+        _spjump(makestackblock0)
+        _spjump(makestacklist)
+        _spjump(stkgvector)
+        _spjump(misc_alloc)
+        _spjump(poweropen_ffcallX)
+        _spjump(gvset)
+        _spjump(macro_bind)
+        _spjump(destructuring_bind)
+        _spjump(destructuring_bind_inner)
+        _spjump(recover_values)
+        _spjump(vpopargregs)
+        _spjump(integer_sign)
+        _spjump(subtag_misc_set)
+        _spjump(spread_lexprz)
+        _spjump(store_node_conditional)
+        _spjump(reset)
+        _spjump(mvslide)
+        _spjump(save_values)
+        _spjump(add_values)
+        _spjump(callback)
+        _spjump(misc_alloc_init)
+        _spjump(stack_misc_alloc_init)
+        _spjump(set_hash_key)
+        _spjump(aset2)
+        _spjump(callbuiltin)
+        _spjump(callbuiltin0)
+        _spjump(callbuiltin1)
+        _spjump(callbuiltin2)
+        _spjump(callbuiltin3)
+        _spjump(popj)
+        _spjump(restorefullcontext)
+        _spjump(savecontextvsp)
+        _spjump(savecontext0)
+        _spjump(restorecontext)
+        _spjump(lexpr_entry)
+        _spjump(poweropen_syscall)
+        _spjump(builtin_plus)
+        _spjump(builtin_minus)
+        _spjump(builtin_times)
+        _spjump(builtin_div)
+        _spjump(builtin_eq)
+        _spjump(builtin_ne)
+        _spjump(builtin_gt)
+        _spjump(builtin_ge)
+        _spjump(builtin_lt)
+        _spjump(builtin_le)
+        _spjump(builtin_eql)
+        _spjump(builtin_length)
+        _spjump(builtin_seqtype)
+        _spjump(builtin_assq)
+        _spjump(builtin_memq)
+        _spjump(builtin_logbitp)
+        _spjump(builtin_logior)
+        _spjump(builtin_logand)
+        _spjump(builtin_ash)
+        _spjump(builtin_negate)
+        _spjump(builtin_logxor)
+        _spjump(builtin_aref1)
+        _spjump(builtin_aset1)
+        _spjump(breakpoint)
+        _spjump(eabi_ff_call)
+        _spjump(eabi_callback)
+        _spjump(syscall)
+        _spjump(getu64)
+        _spjump(gets64)
+        _spjump(makeu64)
+        _spjump(makes64)
+        _spjump(specref)
+        _spjump(specset)
+        _spjump(specrefcheck)
+        _spjump(restoreintlevel)
+        _spjump(makes32)
+        _spjump(makeu32)
+        _spjump(gets32)
+        _spjump(getu32)
+        _spjump(fix_overflow)
+        _spjump(mvpasssym)
+        _spjump(aref3)
+        _spjump(aset3)
+        _spjump(ffcall_return_registers)
+        _spjump(unused_5)
+        _spjump(set_hash_key_conditional)
+        _spjump(unbind_interrupt_level)
+        _spjump(unbind)
+        _spjump(unbind_n)
+        _spjump(unbind_to)
+        _spjump(bind_interrupt_level_m1)
+        _spjump(bind_interrupt_level)
+        _spjump(bind_interrupt_level_0)
+        _spjump(progvrestore)
+        _spjump(nmkunwind)
+         .globl C(spjump_end)
+C(spjump_end):
+	.org 0x1000
+	
+        _endfile
+		
Index: /branches/arm/lisp-kernel/x86-subprims32.s
===================================================================
--- /branches/arm/lisp-kernel/x86-subprims32.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-subprims32.s	(revision 13357)
@@ -0,0 +1,126 @@
+        include(lisp.s)
+	_beginfile
+	
+	.globl _SPmkcatch1v
+	.globl _SPnthrow1value
+	
+/* This is called from a c-style context and calls a lisp function.*/
+/* This does the moral equivalent of*/
+/*   (loop */
+/*	(let* ((fn (%function_on_top_of_lisp_stack)))*/
+/*	  (if fn*/
+/*            (catch %toplevel-catch%*/
+/*	       (funcall fn))*/
+/*            (return nil))))*/
+
+
+_exportfn(toplevel_loop)
+Xsubprims_start:        	
+	__(push %ebp)
+	__(movl %esp,%ebp)
+	/* Switch to the lisp stack */
+	__(movl %esp,rcontext(tcr.foreign_sp))
+	__(movl rcontext(tcr.save_vsp),%esp)
+	__(push $0)
+	__(mov %esp,%ebp)
+	__(cmpl $0,C(GCDebug))
+	__(je 1f)
+        __(ref_global(initial_tcr,%imm0))
+        __(cmpl rcontext(tcr.linear),%imm0)
+        __(jne 1f)
+	__(clr %imm0)
+	__(uuo_error_gc_trap)
+1:
+	__(jmp local_label(test))
+local_label(loop):
+	__(ref_nrs_value(toplcatch,%arg_z))
+	__(movl `$'local_label(back_from_catch),%ra0)
+	__(movl `$'local_label(test),%xfn)
+        __(push %ra0)
+	__(jmp _SPmkcatch1v)
+__(tra(local_label(back_from_catch)))
+	__(movl %arg_y,%temp0)
+	__(pushl `$'local_label(back_from_funcall))
+	__(set_nargs(0))
+	__(jmp _SPfuncall)
+__(tra(local_label(back_from_funcall)))
+	__(movl $fixnumone,%imm0)
+	__(movl `$'local_label(test),%ra0)
+	__(jmp _SPnthrow1value)
+__(tra(local_label(test)))
+	__(movl 4(%ebp),%arg_y)
+	__(cmpl $nil_value,%arg_y)
+	__(jnz local_label(loop))
+local_label(back_to_c):
+	__(movl rcontext(tcr.foreign_sp),%esp)
+	__(movl %esp,%ebp)
+	__(leave)
+	__(ret)
+
+/* This is called from C code when a thread (including the initial thread) */
+/* starts execution.  (Historically, it also provided a primitive way of */
+/* "resettting" a thread in the event of catastrophic failure, but this */
+/* hasn't worked in a long time.) */
+/* For compatibility with PPC code, this is called with the first foreign */
+/* argument pointing to the thread's TCR and the second foreign argument */
+/*  being a Boolean which indicates whether the thread should try to */
+/* "reset" itself or start running lisp code. */
+/* The reset/panic code doesn't work. */
+
+_exportfn(C(start_lisp))
+	__(push %ebp)
+	__(movl %esp, %ebp)
+	__(push %edi)
+	__(push %esi)
+	__(push %ebx)
+	__(mov 8(%ebp), %ebx)	/* get tcr */
+        __(cmpb $0,C(rcontext_readonly))
+        __(jne 0f)
+        __(movw tcr.ldt_selector(%ebx), %rcontext_reg)
+0:              
+        __(movl 8(%ebp),%eax)
+        __(cmpl rcontext(tcr.linear),%eax)
+        __(je 1f)
+        __(hlt)
+1:              
+        .if c_stack_16_byte_aligned
+	__(sub $12, %esp) 	/* stack now 16-byte aligned */
+        .else
+        __(andl $~15,%esp)
+        .endif
+	__(clr %arg_z)
+	__(clr %arg_y)	
+	__(clr %temp0)
+	__(clr %temp1)
+	__(clr %fn)
+	__(pxor %fpzero, %fpzero)
+	__(stmxcsr rcontext(tcr.foreign_mxcsr))
+	__(andb $~mxcsr_all_exceptions,rcontext(tcr.foreign_mxcsr))
+        __(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(movl $TCR_STATE_LISP, rcontext(tcr.valence))
+	__(call toplevel_loop)
+	__(movl $TCR_STATE_FOREIGN, rcontext(tcr.valence))
+	__(emms)
+        __(leal -3*node_size(%ebp),%esp)
+	__(pop %ebx)
+	__(pop %esi)
+	__(pop %edi)
+	__(ldmxcsr rcontext(tcr.foreign_mxcsr))
+        __ifdef(`WIN32_ES_HACK')
+         __(push %ds)
+         __(pop %es)
+        __endif
+	__(movl $nil_value, %eax)
+	__(leave)
+	__(ret)
+Xsubprims_end:           
+_endfn
+
+        .data
+        .globl C(subprims_start)
+        .globl C(subprims_end)
+C(subprims_start):      .long Xsubprims_start
+C(subprims_end):        .long Xsubprims_end
+        .text
+
+
Index: /branches/arm/lisp-kernel/x86-subprims64.s
===================================================================
--- /branches/arm/lisp-kernel/x86-subprims64.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-subprims64.s	(revision 13357)
@@ -0,0 +1,156 @@
+/*   Copyright (C) 2005-2009 Clozure Associates*/
+/*   This file is part of Clozure CL.  */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public*/
+/*   License , known as the LLGPL and distributed with Clozure CL as the*/
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,*/
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these*/
+/*   conflict, the preamble takes precedence.  */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY."*/
+
+/*   The LLGPL is also available online at*/
+/*   http://opensource.franz.com/preamble.html*/
+
+
+	include(lisp.s)
+	_beginfile
+
+	.globl _SPmkcatch1v
+	.globl _SPnthrow1value
+
+
+/* This is called from a c-style context and calls a lisp function.*/
+/* This does the moral equivalent of*/
+/*   (loop */
+/*	(let* ((fn (%function_on_top_of_lisp_stack)))*/
+/*	  (if fn*/
+/*            (catch %toplevel-catch%*/
+/*	       (funcall fn))*/
+/*            (return nil))))*/
+
+
+_exportfn(toplevel_loop)
+Xsubprims_start:        	
+	__(push %rbp)
+	__(movq %rsp,%rbp)
+	/* Switch to the lisp stack */
+        __(push $0)
+        __(push $0)
+	__(movq %rsp,rcontext(tcr.foreign_sp))
+	__(movq rcontext(tcr.save_vsp),%rsp)
+	__(push $0)
+	__(movq %rsp,%rbp)
+        
+        __(TSP_Alloc_Fixed(0,%temp0))
+        __(movsd %fpzero,tsp_frame.save_rbp(%temp0)) /* sentinel */
+	__(jmp local_label(test))
+local_label(loop):
+	__(ref_nrs_value(toplcatch,%arg_z))
+	__(leaq local_label(back_from_catch)(%rip),%ra0)
+	__(leaq local_label(test)(%rip),%xfn)
+        __(push %ra0)
+	__(jmp _SPmkcatch1v)
+__(tra(local_label(back_from_catch)))
+	__(movq %arg_x,%temp0)
+	__(leaq local_label(back_from_funcall)(%rip),%ra0)
+        __(push %ra0)
+	__(set_nargs(0))
+	__(jmp _SPfuncall)
+__(tra(local_label(back_from_funcall)))
+	__(movl $fixnumone,%imm0_l)
+	__(leaq local_label(test)(%rip),%ra0)
+	__(jmp _SPnthrow1value)	
+__(tra(local_label(test)))
+	__(movq 8(%rbp),%arg_x)
+	__(cmpq $nil_value,%arg_x)
+	__(jnz local_label(loop))
+local_label(back_to_c):
+        __(discard_temp_frame(%imm0))
+	__(movq rcontext(tcr.foreign_sp),%rsp)
+        __(addq $dnode_size,%rsp)
+	__(movq %rsp,%rbp)
+	__(leave)
+	__(ret)
+
+/* This is called from C code when a thread (including the initial thread) */
+/* starts execution.  (Historically, it also provided a primitive way of */
+/* "resettting" a thread in the event of catastrophic failure, but this */
+/* hasn't worked in a long time.) */
+/* For compatibility with PPC code, this is called with the first foreign */
+/* argument pointing to the thread's TCR and the second foreign argument */
+/*  being a Boolean which indicates whether the thread should try to */
+/* "reset" itself or start running lisp code.  Both of these arguments */
+/* are currently ignored (the TCR is maintained in a segment register and */
+/*  the reset/panic code doesn't work ...), except on Windows, where we use */
+/* the first arg to set up the TCR register */	
+   
+_exportfn(C(start_lisp))
+	__(push %rbp)
+	__(movq %rsp,%rbp)
+	__(push %csave0)
+	__(push %csave1)
+	__(push %csave2)
+	__(push %csave3)
+	__(push %csave4)
+	__ifdef(`WINDOWS')
+	__(push %csave5)
+	__(push %csave6)
+        __endif
+        __ifdef(`TCR_IN_GPR')
+	__(movq %carg0,%rcontext_reg)
+	__endif
+        __ifdef(`DARWIN_GS_HACK')
+         __(set_gs_base())
+        __endif
+	__(sub $8,%rsp)	/* %rsp is now 16-byte aligned  */
+	/* Put harmless values in lisp node registers  */
+	__(clr %arg_z)
+	__(clr %arg_y)
+	__(clr %arg_x)
+	__(clr %temp0)
+	__(clr %temp1)
+	__(clr %temp2)
+	__(clr %fn)
+        /*	__(clr %ra0) */ /* %ra0 == %temp2, now zeroed above */
+	__(clr %save0)
+	__(clr %save1)
+	__(clr %save2)
+	__ifndef(`TCR_IN_GPR') /* no %save3, r11 is %rcontext_reg */
+	__(clr %save3)
+	__endif
+	__(pxor %fpzero,%fpzero)	/* fpzero = 0.0`d0' */
+        __(stmxcsr rcontext(tcr.foreign_mxcsr))
+        __(andb $~mxcsr_all_exceptions,rcontext(tcr.foreign_mxcsr))
+        __(ldmxcsr rcontext(tcr.lisp_mxcsr))
+	__(movq $TCR_STATE_LISP,rcontext(tcr.valence))
+	__(call toplevel_loop)
+	__(movq $TCR_STATE_FOREIGN,rcontext(tcr.valence))
+	__(emms)
+	__(addq $8,%rsp)	/* discard alignment word */
+	__ifdef(`WINDOWS')
+	__(pop %csave6)
+	__(pop %csave5)
+	__endif
+	__(pop %csave4)
+	__(pop %csave3)
+	__(pop %csave2)
+	__(pop %csave1)
+	__(pop %csave0)
+        __(ldmxcsr rcontext(tcr.foreign_mxcsr))
+        __ifdef(`DARWIN_GS_HACK')
+         __(set_foreign_gs_base)
+        __endif
+	__(movl $nil_value,%eax)
+	__(leave)
+	__(ret)
+Xsubprims_end:           
+_endfn
+
+        .data
+        .globl C(subprims_start)
+        .globl C(subprims_end)
+C(subprims_start):      .quad Xsubprims_start
+C(subprims_end):        .quad Xsubprims_end
+        .text
+                                
Index: /branches/arm/lisp-kernel/x86-uuo.s
===================================================================
--- /branches/arm/lisp-kernel/x86-uuo.s	(revision 13357)
+++ /branches/arm/lisp-kernel/x86-uuo.s	(revision 13357)
@@ -0,0 +1,104 @@
+/*   Copyright (C) 2005-2009 Clozure Associates */
+/*   This file is part of Clozure CL.   */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence.   */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+define(`uuo_error_too_few_args',`
+        int `$'0xc0
+')
+
+define(`uuo_error_too_many_args',`
+        int `$'0xc1
+')
+
+define(`uuo_error_wrong_number_of_args',`
+        int `$'0xc2
+')
+
+
+define(`uuo_error_gc_trap',`
+        int `$'0xc4
+')                        
+
+
+define(`uuo_error_debug_trap',`
+        int `$'0xca
+')                        
+        
+                                        
+/* If we're allocating a CONS, the tcr's save_allocptr slot will be */
+/* tagged as a cons.  Otherwise, it'll be tagged as fulltag_misc, */
+/* and we have to look at the immediate registers to determine what's */
+/* being allocated. */
+define(`uuo_alloc',`
+	int `$'0xc5
+')
+				
+define(`uuo_error_not_callable',`
+        int `$'0xc6
+')
+
+
+define(`xuuo',`
+	ud2a
+	.byte $1
+')
+	
+define(`tlb_too_small',`
+	xuuo(1)
+')
+
+define(`interrupt_now',`
+	xuuo(2)
+')		
+
+define(`suspend_now',`
+	xuuo(3)
+')		
+
+define(`uuo_error_reg_not_fixnum',`
+	int `$'0xf0|$1
+')	
+	
+define(`uuo_error_reg_not_list',`
+	int `$'0xe0|$1
+')
+
+define(`uuo_error_reg_not_tag',`
+	int `$'0xd0|$1
+	.byte $2
+')			
+
+define(`uuo_error_reg_not_type',`
+	int `$'0xb0|$1
+	.byte $2
+')
+
+define(`uuo_error_reg_not_fixnum',`
+	int `$'0xf0|$1
+')	
+		
+define(`uuo_error_reg_unbound',`
+	int `$'0x90|$1
+')	
+
+define(`uuo_error_vector_bounds',`
+	int `$'0xc8
+	.byte ($1<<4)|($2)
+')	
+
+define(`uuo_error_array_bounds',`
+	int `$'0xcb
+	.byte ($1<<4)|($2)
+')	
+
Index: /branches/arm/lisp-kernel/x86_print.c
===================================================================
--- /branches/arm/lisp-kernel/x86_print.c	(revision 13357)
+++ /branches/arm/lisp-kernel/x86_print.c	(revision 13357)
@@ -0,0 +1,608 @@
+/*
+   Copyright (C) 2005-2009, Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <setjmp.h>
+
+#include "lisp.h"
+#include "area.h"
+#include "lisp-exceptions.h"
+#include "lisp_globals.h"
+
+void
+sprint_lisp_object(LispObj, int);
+
+#define PBUFLEN 252
+
+char printbuf[PBUFLEN + 4];
+int bufpos = 0;
+
+jmp_buf escape;
+
+void
+add_char(char c)
+{
+  if (bufpos >= PBUFLEN) {
+    longjmp(escape, 1);
+  } else {
+    printbuf[bufpos++] = c;
+  }
+}
+
+void
+add_string(char *s, int len) 
+{
+  while(len--) {
+    add_char(*s++);
+  }
+}
+
+void
+add_lisp_base_string(LispObj str)
+{
+  lisp_char_code *src = (lisp_char_code *)  (ptr_from_lispobj(str + misc_data_offset));
+  natural i, n = header_element_count(header_of(str));
+
+  for (i=0; i < n; i++) {
+    add_char((char)(*src++));
+  }
+}
+
+void
+add_c_string(char *s)
+{
+  add_string(s, strlen(s));
+}
+
+char numbuf[64], *digits = "0123456789ABCDEF";
+
+
+void
+sprint_unsigned_decimal_aux(natural n, Boolean first)
+{
+  if (n == 0) {
+    if (first) {
+      add_char('0');
+    }
+  } else {
+    sprint_unsigned_decimal_aux(n/10, false);
+    add_char(digits[n%10]);
+  }
+}
+
+void
+sprint_unsigned_decimal(natural n)
+{
+  sprint_unsigned_decimal_aux(n, true);
+}
+
+void
+sprint_signed_decimal(signed_natural n)
+{
+  if (n < 0) {
+    add_char('-');
+    n = -n;
+  }
+  sprint_unsigned_decimal(n);
+}
+
+
+void
+sprint_unsigned_hex(natural n)
+{
+  int i, 
+    ndigits =
+#if WORD_SIZE == 64
+    16
+#else
+    8
+#endif
+    ;
+
+  add_c_string("#x");
+  for (i = 0; i < ndigits; i++) {
+    add_char(digits[(n>>(4*(ndigits-(i+1))))&15]);
+  }
+}
+
+void
+sprint_list(LispObj o, int depth)
+{
+  LispObj the_cdr;
+  
+  add_char('(');
+  while(1) {
+    if (o != lisp_nil) {
+      sprint_lisp_object(ptr_to_lispobj(car(o)), depth);
+      the_cdr = ptr_to_lispobj(cdr(o));
+      if (the_cdr != lisp_nil) {
+        add_char(' ');
+        if (fulltag_of(the_cdr) == fulltag_cons) {
+          o = the_cdr;
+          continue;
+        }
+        add_c_string(". ");
+        sprint_lisp_object(the_cdr, depth);
+        break;
+      }
+    }
+    break;
+  }
+  add_char(')');
+}
+
+/* 
+  Print a list of method specializers, using the class name instead of the class object.
+*/
+
+void
+sprint_specializers_list(LispObj o, int depth)
+{
+  LispObj the_cdr, the_car;
+  
+  add_char('(');
+  while(1) {
+    if (o != lisp_nil) {
+      the_car = car(o);
+      if (fulltag_of(the_car) == fulltag_misc) {
+        sprint_lisp_object(deref(deref(the_car,3), 4), depth);
+      } else {
+        sprint_lisp_object(the_car, depth);
+      }
+      the_cdr = cdr(o);
+      if (the_cdr != lisp_nil) {
+        add_char(' ');
+        if (fulltag_of(the_cdr) == fulltag_cons) {
+          o = the_cdr;
+          continue;
+        }
+        add_c_string(". ");
+        sprint_lisp_object(the_cdr, depth);
+        break;
+      }
+    }
+    break;
+  }
+  add_char(')');
+}
+
+char *
+vector_subtag_name(unsigned subtag)
+{
+  switch (subtag) {
+  case subtag_bit_vector:
+    return "BIT-VECTOR";
+    break;
+  case subtag_instance:
+    return "INSTANCE";
+    break;
+  case subtag_bignum:
+    return "BIGNUM";
+    break;
+  case subtag_u8_vector:
+    return "(UNSIGNED-BYTE 8)";
+    break;
+  case subtag_s8_vector:
+    return "(SIGNED-BYTE 8)";
+    break;
+  case subtag_u16_vector:
+    return "(UNSIGNED-BYTE 16)";
+    break;
+  case subtag_s16_vector:
+    return "(SIGNED-BYTE 16)";
+    break;
+  case subtag_u32_vector:
+    return "(UNSIGNED-BYTE 32)";
+    break;
+  case subtag_s32_vector:
+    return "(SIGNED-BYTE 32)";
+    break;
+#ifdef X8664
+  case subtag_u64_vector:
+    return "(UNSIGNED-BYTE 64)";
+    break;
+  case subtag_s64_vector:
+    return "(SIGNED-BYTE 64)";
+    break;
+#endif
+  case subtag_package:
+    return "PACKAGE";
+    break;
+  case subtag_slot_vector:
+    return "SLOT-VECTOR";
+    break;
+  default:
+    return "";
+    break;
+  }
+}
+
+
+void
+sprint_random_vector(LispObj o, unsigned subtag, natural elements)
+{
+  add_c_string("#<");
+  sprint_unsigned_decimal(elements);
+  add_c_string("-element vector subtag = #x");
+  add_char(digits[subtag>>4]);
+  add_char(digits[subtag&15]);
+  add_c_string(" @");
+  sprint_unsigned_hex(o);
+  add_c_string(" (");
+  add_c_string(vector_subtag_name(subtag));
+  add_c_string(")>");
+}
+
+void
+sprint_symbol(LispObj o)
+{
+  lispsymbol *rawsym = (lispsymbol *) ptr_from_lispobj(untag(o));
+  LispObj 
+    pname = rawsym->pname,
+    package = rawsym->package_predicate;
+
+  if (fulltag_of(package) == fulltag_cons) {
+    package = car(package);
+  }
+
+  if (package == nrs_KEYWORD_PACKAGE.vcell) {
+    add_char(':');
+  }
+  add_lisp_base_string(pname);
+}
+
+#ifdef X8632
+LispObj
+nth_immediate(LispObj o, unsigned n)
+{
+  u16_t imm_word_count = *(u16_t *)(o + misc_data_offset);
+  natural *constants = (natural *)((char *)o + misc_data_offset + (imm_word_count << 2));
+  LispObj result = (LispObj)(constants[n-1]);
+
+  return result;
+}
+#endif
+
+void
+sprint_function(LispObj o, int depth)
+{
+  LispObj lfbits, header, name = lisp_nil;
+  natural elements;
+
+  header = header_of(o);
+  elements = header_element_count(header);
+  lfbits = deref(o, elements);
+
+  if ((lfbits & lfbits_noname_mask) == 0) {
+    name = deref(o, elements-1);
+  }
+  
+  add_c_string("#<");
+  if (name == lisp_nil) {
+    add_c_string("Anonymous Function ");
+  } else {
+    if (lfbits & lfbits_method_mask) {
+      LispObj 
+	slot_vector = deref(name,3),
+        method_name = deref(slot_vector, 6),
+        method_qualifiers = deref(slot_vector, 2),
+        method_specializers = deref(slot_vector, 3);
+      add_c_string("Method-Function ");
+      sprint_lisp_object(method_name, depth);
+      add_char(' ');
+      if (method_qualifiers != lisp_nil) {
+        if (cdr(method_qualifiers) == lisp_nil) {
+          sprint_lisp_object(car(method_qualifiers), depth);
+        } else {
+          sprint_lisp_object(method_qualifiers, depth);
+        }
+        add_char(' ');
+      }
+      sprint_specializers_list(method_specializers, depth);
+      add_char(' ');
+    } else if (lfbits & lfbits_gfn_mask) {
+      LispObj gf_slots;
+      LispObj gf_name;
+
+      add_c_string("Generic Function ");
+
+#ifdef X8632
+      gf_slots = nth_immediate(o, 2);
+      gf_name = deref(gf_slots, 2);
+      sprint_lisp_object(gf_name, depth);
+      add_char(' ');
+#endif
+    } else {
+      add_c_string("Function ");
+      sprint_lisp_object(name, depth);
+      add_char(' ');
+    }
+  }
+  sprint_unsigned_hex(o);
+  add_char('>');
+}
+
+void
+sprint_tra(LispObj o, int depth)
+{
+#ifdef X8664
+  signed sdisp;
+  unsigned disp = 0;
+  LispObj f = 0;
+
+  if ((*((unsigned short *)o) == RECOVER_FN_FROM_RIP_WORD0) &&
+      (*((unsigned char *)(o+2)) == RECOVER_FN_FROM_RIP_BYTE2)) {
+    sdisp = (*(int *) (o+3));
+    f = RECOVER_FN_FROM_RIP_LENGTH+o+sdisp;
+    disp = o-f;
+  }
+
+  if (fulltag_of(f) == fulltag_function) {
+    add_c_string("tagged return address: ");
+    sprint_function(f, depth);
+    add_c_string(" + ");
+    sprint_unsigned_decimal(disp);
+  } else {
+    add_c_string("(tra ?) : ");
+    sprint_unsigned_hex(o);
+  }
+#else
+  LispObj f = 0;
+  unsigned disp = 0;
+
+  if (*(unsigned char *)o == RECOVER_FN_OPCODE) {
+    f = (LispObj)(*((natural *)(o + 1)));
+    disp = o - f;
+  }
+
+  if (f && header_subtag(header_of(f)) == subtag_function) {
+    add_c_string("tagged return address: ");
+    sprint_function(f, depth);
+    add_c_string(" + ");
+    sprint_unsigned_decimal(disp);
+  } else {
+    add_c_string("(tra ?) : ");
+    sprint_unsigned_hex(o);
+  }
+#endif
+}
+	       
+void
+sprint_gvector(LispObj o, int depth)
+{
+  LispObj header = header_of(o);
+  unsigned 
+    elements = header_element_count(header),
+    subtag = header_subtag(header);
+    
+  switch(subtag) {
+  case subtag_function:
+    sprint_function(o, depth);
+    break;
+    
+  case subtag_symbol:
+    sprint_symbol(o);
+    break;
+    
+  case subtag_struct:
+  case subtag_istruct:
+    add_c_string("#<");
+    sprint_lisp_object(deref(o,1), depth);
+    add_c_string(" @");
+    sprint_unsigned_hex(o);
+    add_c_string(">");
+    break;
+   
+  case subtag_simple_vector:
+    {
+      int i;
+      add_c_string("#(");
+      for(i = 1; i <= elements; i++) {
+        if (i > 1) {
+          add_char(' ');
+        }
+        sprint_lisp_object(deref(o, i), depth);
+      }
+      add_char(')');
+      break;
+    }
+
+  case subtag_instance:
+    {
+      LispObj class_or_hash = deref(o,1);
+      
+      if (tag_of(class_or_hash) == tag_fixnum) {
+	sprint_random_vector(o, subtag, elements);
+      } else {
+	add_c_string("#<CLASS ");
+	sprint_lisp_object(class_or_hash, depth);
+	add_c_string(" @");
+	sprint_unsigned_hex(o);
+	add_c_string(">");
+      }
+      break;
+    }
+
+	
+      
+  default:
+    sprint_random_vector(o, subtag, elements);
+    break;
+  }
+}
+
+void
+sprint_ivector(LispObj o)
+{
+  LispObj header = header_of(o);
+  unsigned 
+    elements = header_element_count(header),
+    subtag = header_subtag(header);
+    
+  switch(subtag) {
+  case subtag_simple_base_string:
+    add_char('"');
+    add_lisp_base_string(o);
+    add_char('"');
+    return;
+    
+  case subtag_bignum:
+    if (elements == 1) {
+      sprint_signed_decimal((signed_natural)(deref(o, 1)));
+      return;
+    }
+    if ((elements == 2) && (deref(o, 2) == 0)) {
+      sprint_unsigned_decimal(deref(o, 1));
+      return;
+    }
+    break;
+    
+  case subtag_double_float:
+    break;
+
+  case subtag_macptr:
+    add_c_string("#<MACPTR ");
+    sprint_unsigned_hex(deref(o,1));
+    add_c_string(">");
+    break;
+
+  default:
+    sprint_random_vector(o, subtag, elements);
+  }
+}
+
+void
+sprint_vector(LispObj o, int depth)
+{
+  LispObj header = header_of(o);
+  
+  if (immheader_tag_p(fulltag_of(header))) {
+    sprint_ivector(o);
+  } else {
+    sprint_gvector(o, depth);
+  }
+}
+
+void
+sprint_lisp_object(LispObj o, int depth) 
+{
+  if (--depth < 0) {
+    add_char('#');
+  } else {
+    switch (fulltag_of(o)) {
+    case fulltag_even_fixnum:
+    case fulltag_odd_fixnum:
+      sprint_signed_decimal(unbox_fixnum(o));
+      break;
+    
+#ifdef X8664
+    case fulltag_immheader_0:
+    case fulltag_immheader_1:
+    case fulltag_immheader_2:
+    case fulltag_nodeheader_0:
+    case fulltag_nodeheader_1:
+#else
+    case fulltag_immheader:
+    case fulltag_nodeheader:
+#endif      
+      add_c_string("#<header ? ");
+      sprint_unsigned_hex(o);
+      add_c_string(">");
+      break;
+
+#ifdef X8664
+    case fulltag_imm_0:
+    case fulltag_imm_1:
+#else
+    case fulltag_imm:
+#endif
+      if (o == unbound) {
+        add_c_string("#<Unbound>");
+      } else {
+        if (header_subtag(o) == subtag_character) {
+          unsigned c = (o >> charcode_shift);
+          add_c_string("#\\");
+          if ((c >= ' ') && (c < 0x7f)) {
+            add_char(c);
+          } else {
+            sprintf(numbuf, "%#o", c);
+            add_c_string(numbuf);
+          }
+#ifdef X8664
+        } else if (header_subtag(o) == subtag_single_float) {
+          LispObj xx = o;
+          float f = ((float *)&xx)[1];
+          sprintf(numbuf, "%f", f);
+          add_c_string(numbuf);
+#endif
+        } else {
+
+          add_c_string("#<imm ");
+          sprint_unsigned_hex(o);
+          add_c_string(">");
+        }
+      }
+      break;
+
+#ifdef X8664
+    case fulltag_nil:
+#endif
+    case fulltag_cons:
+      sprint_list(o, depth);
+      break;
+     
+    case fulltag_misc:
+      sprint_vector(o, depth);
+      break;
+
+#ifdef X8664
+    case fulltag_symbol:
+      sprint_symbol(o);
+      break;
+
+    case fulltag_function:
+      sprint_function(o, depth);
+      break;
+#endif
+
+#ifdef X8664
+    case fulltag_tra_0:
+    case fulltag_tra_1:
+#else
+    case fulltag_tra:
+#endif
+      sprint_tra(o,depth);
+      break;
+    }
+  }
+}
+
+char *
+print_lisp_object(LispObj o)
+{
+  bufpos = 0;
+  if (setjmp(escape) == 0) {
+    sprint_lisp_object(o, 5);
+    printbuf[bufpos] = 0;
+  } else {
+    printbuf[PBUFLEN+0] = '.';
+    printbuf[PBUFLEN+1] = '.';
+    printbuf[PBUFLEN+2] = '.';
+    printbuf[PBUFLEN+3] = 0;
+  }
+  return printbuf;
+}
Index: /branches/arm/lisp-kernel/xlbt.c
===================================================================
--- /branches/arm/lisp-kernel/xlbt.c	(revision 13357)
+++ /branches/arm/lisp-kernel/xlbt.c	(revision 13357)
@@ -0,0 +1,171 @@
+/*
+   Copyright (C) 2005-2009 Clozure Associates
+   This file is part of Clozure CL.  
+
+   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
+   License , known as the LLGPL and distributed with Clozure CL as the
+   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
+   which is distributed with Clozure CL as the file "LGPL".  Where these
+   conflict, the preamble takes precedence.  
+
+   Clozure CL is referenced in the preamble as the "LIBRARY."
+
+   The LLGPL is also available online at
+   http://opensource.franz.com/preamble.html
+*/
+
+#include "lispdcmd.h"
+#include <stdio.h>
+
+
+
+void
+print_lisp_frame(lisp_frame *frame)
+{
+  LispObj pc = frame->tra, fun=0;
+  int delta = 0;
+
+  if (pc == lisp_global(RET1VALN)) {
+    pc = frame->xtra;
+  }
+#ifdef X8632
+  if (fulltag_of(pc) == fulltag_tra) {
+    if (*((unsigned char *)pc) == RECOVER_FN_OPCODE) {
+      natural n = *((natural *)(pc + 1));
+      fun = (LispObj)n;
+    }
+    if (fun && header_subtag(header_of(fun)) == subtag_function) {
+      delta = pc - fun;
+      Dprintf("(#x%08X) #x%08X : %s + %d", frame, pc, print_lisp_object(fun), delta);
+      return;
+    }
+  }
+  if (pc == 0) {
+    fun = ((xcf *)frame)->nominal_function;
+    Dprintf("(#x%08X) #x%08X : %s + ??", frame, pc, print_lisp_object(fun));
+    return;
+  }
+#else
+  if (tag_of(pc) == tag_tra) {
+    if ((*((unsigned short *)pc) == RECOVER_FN_FROM_RIP_WORD0) &&
+        (*((unsigned char *)(pc+2)) == RECOVER_FN_FROM_RIP_BYTE2)) {
+      int sdisp = (*(int *) (pc+3));
+      fun = RECOVER_FN_FROM_RIP_LENGTH+pc+sdisp;
+    }
+    if (fulltag_of(fun) == fulltag_function) {
+      delta = pc - fun;
+      Dprintf("(#x%016lX) #x%016lX : %s + %d", frame, pc, print_lisp_object(fun), delta);
+      return;
+    }
+  }
+  if (pc == 0) {
+    fun = ((xcf *)frame)->nominal_function;
+    Dprintf("(#x%016lX) #x%016lX : %s + ??", frame, pc, print_lisp_object(fun));
+    return;
+  }
+#endif
+}
+
+Boolean
+lisp_frame_p(lisp_frame *f)
+{
+  LispObj ra;
+
+  if (f) {
+    ra = f->tra;
+    if (ra == lisp_global(RET1VALN)) {
+      ra = f->xtra;
+    }
+
+#ifdef X8632
+    if (fulltag_of(ra) == fulltag_tra) {
+#else
+    if (tag_of(ra) == tag_tra) {
+#endif
+      return true;
+    } else if ((ra == lisp_global(LEXPR_RETURN)) ||
+	       (ra == lisp_global(LEXPR_RETURN1V))) {
+      return true;
+    } else if (ra == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+void
+walk_stack_frames(lisp_frame *start, lisp_frame *end) 
+{
+  lisp_frame *next;
+  Dprintf("\n");
+  while (start < end) {
+
+    if (lisp_frame_p(start)) {
+      print_lisp_frame(start);
+    } else {
+      if (start->backlink) {
+        fprintf(dbgout, "Bogus  frame %lx\n", start);
+      }
+      return;
+    }
+    
+    next = start->backlink;
+    if (next == 0) {
+      next = end;
+    }
+    if (next < start) {
+      fprintf(dbgout, "Bad frame! (%x < %x)\n", next, start);
+      break;
+    }
+    start = next;
+  }
+}
+
+char *
+interrupt_level_description(TCR *tcr)
+{
+  signed_natural level = (signed_natural) TCR_INTERRUPT_LEVEL(tcr);
+  if (level < 0) {
+    if (tcr->interrupt_pending) {
+      return "disabled(pending)";
+    } else {
+      return "disabled";
+    }
+  } else {
+    return "enabled";
+  }
+}
+
+void
+plbt_sp(LispObj current_fp)
+{
+  area *vs_area, *cs_area;
+  TCR *tcr = (TCR *)get_tcr(true);
+  char *ilevel = interrupt_level_description(tcr);
+
+  vs_area = tcr->vs_area;
+  cs_area = tcr->cs_area;
+  if ((((LispObj) ptr_to_lispobj(vs_area->low)) > current_fp) ||
+      (((LispObj) ptr_to_lispobj(vs_area->high)) < current_fp)) {
+    current_fp = (LispObj) (tcr->save_fp);
+  }
+  if ((((LispObj) ptr_to_lispobj(vs_area->low)) > current_fp) ||
+      (((LispObj) ptr_to_lispobj(vs_area->high)) < current_fp)) {
+    Dprintf("\nFrame pointer [#x" LISP "] in unknown area.", current_fp);
+  } else {
+    fprintf(dbgout, "current thread: tcr = 0x" LISP ", native thread ID = 0x" LISP ", interrupts %s\n", tcr, tcr->native_thread_id, ilevel);
+    walk_stack_frames((lisp_frame *) ptr_from_lispobj(current_fp), (lisp_frame *) (vs_area->high));
+    /*      walk_other_areas();*/
+  }
+}
+
+
+void
+plbt(ExceptionInformation *xp)
+{
+#ifdef X8632
+  plbt_sp(xpGPR(xp,Iebp));
+#else
+  plbt_sp(xpGPR(xp,Irbp));
+#endif
+}
