Index: /branches/x8664-call/ccl/lisp-kernel/x86-spentry64.s
===================================================================
--- /branches/x8664-call/ccl/lisp-kernel/x86-spentry64.s	(revision 6315)
+++ /branches/x8664-call/ccl/lisp-kernel/x86-spentry64.s	(revision 6316)
@@ -37,4 +37,10 @@
 	jump_fname()
 ])
+
+define([pop_jump_builtin],[
+        pop %ra0
+        jump_builtin($1,$2)
+])               
+        
 
 _spentry(bad_funcall)	
@@ -58,5 +64,17 @@
 _endsubp(fix_overflow)
 
-	
+
+_spentry(nfix_overflow)        
+C(nfix_one_bit_overflow):	
+	__(movq $two_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed([],aligned_bignum_size(2)))
+	__(unbox_fixnum(%arg_z,%imm0))
+	__(movq $0xe000000000000000,%imm1)
+	__(mov %temp0,%arg_z)
+	__(xorq %imm1,%imm0)
+	__(movq %imm0,misc_data_offset(%arg_z))
+	__(ret)	
+_endsubp(nfix_overflow)
+        	
 /* Make a lisp integer (fixnum or two-digit bignum) from the signed  */
 /* 64-bit value in %imm0.   */
@@ -75,5 +93,19 @@
 0:	__(jmp *%ra0)
 _endsubp(makes64)	
-				
+
+_spentry(nmakes64)
+	__(movq %imm0,%imm1)
+	__(shlq $fixnumshift,%imm1)
+	__(movq %imm1,%arg_z)
+	__(sarq $fixnumshift,%imm1)
+	__(cmpq %imm1,%imm0)
+	__(jz,pt 0f)
+	__(movd %imm0,%mm0)
+	__(movq $two_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(2)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+0:	__(ret)
+_endsubp(nmakes64)	
+        				
 
 /* %imm1:%imm0 constitute a signed integer, almost certainly a bignum.  */
@@ -119,4 +151,46 @@
 _endfn
 
+/* %imm1:%imm0 constitute a signed integer, almost certainly a bignum.  */
+/* Make a lisp integer out of those 128 bits ..   */
+	
+_startfn(C(nmakes128))
+	
+        /*  We're likely to have to make a bignum out of the integer in %imm1 and  */
+        /*  %imm0. We'll need to use %imm0 and %imm1 to cons the bignum, and  */
+        /*  will need to do some arithmetic (determining significant bigits)  */
+        /*  on %imm0 and %imm1 in order to know how large that bignum needs to be.  */
+        /*  Cache %imm0 and %imm1 in %mm0 and %mm1.   */
+   
+	__(movd %imm0,%mm0)
+	__(movd %imm1,%mm1)
+	
+        /* If %imm1 is just a sign extension of %imm0, make a 64-bit signed integer.   */
+	
+	__(sarq $63,%imm0) 
+	__(cmpq %imm0,%imm1)
+	__(movd %mm0,%imm0)
+	__(je _SPnmakes64)
+	
+        /* Otherwise, if the high 32 bits of %imm1 are a sign-extension of the  */
+        /* low 32 bits of %imm1, make a 3-digit bignum.  If the upper 32 bits  */
+        /* of %imm1 are significant, make a 4 digit bignum   */
+	
+	__(movq %imm1,%imm0)
+	__(shlq $32,%imm0)
+	__(sarq $32,%imm0)
+	__(cmpq %imm0,%imm1)
+	__(jz 3f)
+	__(mov $four_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(4)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(movq %mm1,misc_data_offset+8(%arg_z))
+	__(ret)
+3:	__(mov $three_digit_bignum_header,%imm0)
+	__(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(3)))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(movd %mm1,misc_data_offset+8(%arg_z))
+	__(ret)
+_endfn
+        
 /* %imm1:%imm0 constitute an unsigned integer, almost certainly a bignum.  */
 /* Make a lisp integer out of those 128 bits ..  */
@@ -4604,5 +4678,466 @@
         __(jmp 8b)
 _endsubp(aset3)
+
         
+/* %arg_z <- %arg_y + %arg_z.  Do the fixnum case - including overflow -  */
+/* inline.  Call out otherwise.   */
+_spentry(nbuiltin_plus)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(addq %arg_y,%arg_z)
+	__(jo,pn C(nfix_one_bit_overflow))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_plus,2))
+_endsubp(nbuiltin_plus)
+	
+
+/* %arg_z <- %arg_z - %arg_y.  Do the fixnum case - including overflow -  */
+/*  inline.  Call out otherwise.   */
+_spentry(nbuiltin_minus)			
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(xchgq %arg_y,%arg_z)
+	__(subq %arg_y,%arg_z)
+	__(jo,pn C(nfix_one_bit_overflow))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_minus,2))
+_endsubp(nbuiltin_minus)
+
+/* %arg_z <- %arg_z * %arg_y.  Do the fixnum case - including overflow -  */
+/* inline.  Call out otherwise.   */
+_spentry(nbuiltin_times)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 2f)
+	__(unbox_fixnum(%arg_z,%imm0))
+	/* 128-bit fixnum result in %imm1:%imm0. Overflow set if %imm1  */
+	/* is significant   */
+	__(imul %arg_y)
+	__(jo 1f)
+	__(mov %imm0,%arg_z)
+	__(ret)
+1:	__(unbox_fixnum(%arg_z,%imm0))
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(imul %imm1)
+	__(jmp C(nmakes128))
+2:	__(pop_jump_builtin(_builtin_times,2))
+_endsubp(nbuiltin_times)
+
+_spentry(nbuiltin_div)
+	__(pop_jump_builtin(_builtin_div,2))
+
+/* %arg_z <- (= %arg_y %arg_z).	  */
+_spentry(nbuiltin_eq)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_z,%arg_y))
+	__(condition_to_boolean(e,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_eq,2))
+_endsubp(nbuiltin_eq)
+	
+/* %arg_z <- (/= %arg_y %arg_z).	  */
+_spentry(nbuiltin_ne)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_z,%arg_y))
+	__(condition_to_boolean(ne,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_ne,2))
+_endsubp(nbuiltin_ne)
+	
+/* %arg_z <- (> %arg_y %arg_z).	  */
+_spentry(nbuiltin_gt)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(g,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_gt,2))
+_endsubp(nbuiltin_gt)
+
+/* %arg_z <- (>= %arg_y %arg_z).	  */
+_spentry(nbuiltin_ge)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(ge,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_ge,2))
+_endsubp(nbuiltin_ge)
+	
+/* %arg_z <- (< %arg_y %arg_z).	  */
+_spentry(nbuiltin_lt)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(l,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_lt,2))
+_endsubp(nbuiltin_lt)
+
+/* %arg_z <- (<= %arg_y %arg_z).   */
+_spentry(nbuiltin_le)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(rcmpq(%arg_y,%arg_z))
+	__(condition_to_boolean(le,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_le,2))
+_endsubp(nbuiltin_le)
+
+_spentry(nbuiltin_eql)
+	__(cmpq %arg_y,%arg_z)
+	__(je 1f)
+	/* Not EQ.  Could only possibly be EQL if both are tag-misc  */
+	/* and both have the same subtag   */
+	__(extract_lisptag(%arg_y,%imm0))
+	__(extract_lisptag(%arg_z,%imm1))
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 2f)
+	__(cmpb %imm0_b,%imm1_b)
+	__(jne 2f)
+	__(extract_subtag(%arg_y,%imm0_b))
+	__(extract_subtag(%arg_z,%imm1_b))
+	__(cmpb %imm0_b,%imm1_b)
+	__(jne 2f)
+	__(pop_jump_builtin(_builtin_eql,2))
+1:	__(movl $t_value,%arg_z_l)
+	__(ret)
+2:	__(movl $nil_value,%arg_z_l)
+	__(ret)	
+_endsubp(nbuiltin_eql)
+
+_spentry(nbuiltin_length)
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jz 2f)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jnz 8f)
+	__(extract_subtag(%arg_z,%imm0_b))
+	__(rcmpb(%imm0_b,$min_vector_subtag))
+	__(jb 8f)
+	__(je 1f)
+	/* (simple-array * (*))   */
+	__(movq %arg_z,%arg_y)
+	__(vector_length(%arg_y,%arg_z))
+	__(ret)
+1:	/* vector header   */
+	__(movq vectorH.logsize(%arg_z),%arg_z)
+	__(ret)
+2:	/* list.  Maybe null, maybe dotted or circular.   */
+	__(movq $-fixnumone,%temp2)
+	__(movq %arg_z,%temp0)	/* fast pointer   */
+	__(movq %arg_z,%temp1)  /* slow pointer   */
+3:	__(extract_lisptag(%temp0,%imm0))	
+	__(compare_reg_to_nil(%temp0))
+	__(leaq fixnumone(%temp2),%temp2)
+	__(je 9f)
+	__(cmpb $tag_list,%imm0_b)
+	__(jne 8f)
+	__(extract_lisptag(%temp1,%imm1))
+	__(testb $fixnumone,%temp2_b)
+	__(_cdr(%temp0,%temp0))
+	__(je 3b)
+	__(cmpb $tag_list,%imm1_b)
+	__(jne 8f)
+	__(_cdr(%temp1,%temp1))
+	__(cmpq %temp0,%temp1)
+	__(jne 3b)
+8:	
+	__(pop_jump_builtin(_builtin_length,1))
+9:	
+	__(movq %temp2,%arg_z)
+	__(ret)		
+_endsubp(nbuiltin_length)
+
+	
+_spentry(nbuiltin_seqtype)
+	__(extract_lisptag(%arg_z,%imm0))
+	__(cmpb $tag_list,%imm0_b)
+	__(jz 1f)
+	__(cmpb $tag_misc,%imm0_b)
+	__(jne 2f)
+	__(movb misc_subtag_offset(%arg_z),%imm0_b)
+	__(rcmpb(%imm0_b,$min_vector_subtag))
+	__(jb 2f)
+	__(movl $nil_value,%arg_z_l)
+	__(ret)
+1:	__(movl $t_value,%arg_z_l)
+	__(ret)
+2:	
+	__(pop_jump_builtin(_builtin_seqtype,1))
+_endsubp(nbuiltin_seqtype)
+
+_spentry(nbuiltin_assq)
+	__(cmpb $fulltag_nil,%arg_z_b)
+	__(jz 5f)
+1:	__(movb $tagmask,%imm0_b)
+	__(andb %arg_z_b,%imm0_b)
+	__(cmpb $tag_list,%imm0_b)
+	__(jz,pt 2f)
+	__(uuo_error_reg_not_list(Rarg_z))
+2:	__(_car(%arg_z,%arg_x))
+	__(_cdr(%arg_z,%arg_z))
+	__(cmpb $fulltag_nil,%arg_x_b)
+	__(jz 4f)
+	__(movb $tagmask,%imm0_b)
+	__(andb %arg_x_b,%imm0_b)
+	__(cmpb $tag_list,%imm0_b)
+	__(jz,pt 3f)
+	__(uuo_error_reg_not_list(Rarg_x))
+3:	__(_car(%arg_x,%temp0))
+	__(cmpq %temp0,%arg_y)
+	__(jnz 4f)
+	__(movq %arg_x,%arg_z)
+	__(ret)
+4:	__(cmpb $fulltag_nil,%arg_z_b)
+5:	__(jnz 1b)
+	__(ret)			
+_endsubp(nbuiltin_assq)	
+
+_spentry(nbuiltin_memq)
+	__(cmpb $fulltag_nil,%arg_z_b)
+	__(jmp 3f)
+1:	__(movb $tagmask,%imm0_b)
+	__(andb %arg_z_b,%imm0_b)
+	__(cmpb $tag_list,%imm0_b)
+	__(jz,pt 2f)
+	__(uuo_error_reg_not_list(Rarg_z))
+2:	__(_car(%arg_z,%arg_x))
+	__(_cdr(%arg_z,%temp0))
+	__(cmpq %arg_x,%arg_y)
+	__(jz 4f)
+	__(cmpb $fulltag_nil,%temp0_b)
+	__(movq %temp0,%arg_z)
+3:	__(jnz 1b)
+4:	__(ret)				
+_endsubp(nbuiltin_memq)
+
+	
+_spentry(nbuiltin_logbitp)
+	__(movb %arg_z_b,%imm0_b)
+	__(orb %arg_y_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jnz 1f)
+	__(unbox_fixnum(%arg_y,%imm0))
+        __(movl $logbitp_max_bit-1+fixnumshift,%imm1_l)
+        __(js 1f)               /* bit number negative */
+	__(addb $fixnumshift,%imm0_b)
+	__(cmpq $logbitp_max_bit<<fixnumshift,%arg_y)
+	__(cmovael %imm1_l,%imm0_l)
+	__(bt %imm0,%arg_z)
+	__(condition_to_boolean(b,%imm0,%arg_z))
+	__(ret)
+1:	__(pop_jump_builtin(_builtin_logbitp,2))
+_endsubp(nbuiltin_logbitp)
+
+_spentry(nbuiltin_logior)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(orq %arg_y,%arg_z)
+	__(ret)
+1:	
+	__(pop_jump_builtin(_builtin_logior,2))
+		
+_endsubp(nbuiltin_logior)
+
+_spentry(nbuiltin_logand)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(andq %arg_y,%arg_z)
+	__(ret)
+1:		
+	__(pop_jump_builtin(_builtin_logand,2))
+_endsubp(nbuiltin_logand)
+
+_spentry(nbuiltin_negate)
+	__(testb $fixnummask,%arg_z_b)
+	__(jne 1f)
+	__(negq %arg_z)
+	__(jo,pn C(nfix_one_bit_overflow))
+	__(ret)
+1:		
+	__(pop_jump_builtin(_builtin_negate,1))	
+_endsubp(nbuiltin_negate)
+
+_spentry(nbuiltin_logxor)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 1f)
+	__(xorq %arg_y,%arg_z)
+	__(ret)
+1:		
+	__(pop_jump_builtin(_builtin_logxor,2))
+_endsubp(nbuiltin_logxor)
+
+
+_spentry(nbuiltin_aset1)
+        __(pop %ra0)            /* for now */
+	__(extract_typecode(%arg_x,%imm0))
+	__(box_fixnum(%imm0,%temp0))
+	__(cmpb $min_vector_subtag,%imm0_b)
+	__(ja _SPsubtag_misc_set)
+	__(jump_builtin(_builtin_aset1,3))
+_endsubp(nbuiltin_aset1)
+
+/* We have to be a little careful here	%cl has to be used for  */
+/*   the (unboxed) shift count in all variable-length shifts, and  */
+/*   %temp2 = %rcx.  Zero all but the low 8 (or 6) bits of %rcx,  */
+/*   so that the shift count doesn't confuse the GC.  */
+
+_spentry(nbuiltin_ash)
+	__(movb %arg_y_b,%imm0_b)
+	__(orb %arg_z_b,%imm0_b)
+	__(testb $fixnummask,%imm0_b)
+	__(jne 9f)
+	__(unbox_fixnum(%arg_y,%imm1))
+	__(unbox_fixnum(%arg_z,%imm0))
+	/* Z flag set if zero ASH shift count   */
+	__(jnz 1f)
+	__(movq %arg_y,%arg_z)	/* shift by 0   */
+	__(jmp *%ra0)
+1:	__(jns 3f)
+	__(rcmpq(%imm0,$-63))
+	__(jg 2f)
+	__(sar $63,%imm1)
+	__(box_fixnum(%imm1,%arg_z))
+	__(ret)
+2:	/* Right-shift by small fixnum   */
+	__(negb %imm0_b)
+	__(movzbl %imm0_b,%ecx)
+	__(sar %cl,%imm1)
+	__(xorl %ecx,%ecx)
+	__(box_fixnum(%imm1,%arg_z))
+	__(ret)
+3:      /* Left shift by fixnum. We cant shift by more than 63 bits, though  */
+	/* shifting by 64 is actually easy.   */
+	__(rcmpq(%imm0,$64))
+	__(jg 9f)
+	__(jne 4f)
+	/* left-shift by 64-bits exactly   */
+	__(xorl %imm0_l,%imm0_l)
+	__(jmp C(nmakes128))
+4:	/* left-shift by 1..63 bits.  Safe to move shift count to %rcx/%cl   */
+	__(movzbl %imm0_b,%ecx)	 /* zero-extending mov   */
+	__(movq %imm1,%imm0)
+	__(sarq $63,%imm1)
+	__(js 5f)
+	__(shld %cl,%imm0,%imm1)
+	__(shl %cl,%imm0)
+	__(xorb %cl,%cl)
+	__(jmp C(nmakes128))
+5:	__(shld %cl,%imm0,%imm1)
+	__(shl %cl,%imm0)
+	__(xorb %cl,%cl)
+	__(jmp C(nmakes128))
+9:	
+	__(pop_jump_builtin(_builtin_ash,2))
+_endsubp(nbuiltin_ash)
+
+_spentry(nbuiltin_aref1)
+        __(pop %ra0)            /* for now */
+	__(extract_typecode(%arg_y,%imm0))
+	__(cmpb $min_vector_subtag,%imm0_b)
+	__(box_fixnum_no_flags(%imm0,%arg_x))
+	__(ja _SPsubtag_misc_ref)
+	__(jump_builtin(_builtin_aref1,2))
+_endsubp(nbuiltin_aref1)
+
+
+_spentry(nmakeu64)
+	__(movq %imm0,%imm1)
+	__(shlq $fixnumshift+1,%imm1)
+	__(movq %imm1,%arg_z)	/* Tagged as a fixnum, 2x    */
+	__(shrq $fixnumshift+1,%imm1)
+	__(shrq %arg_z)
+	__(cmpq %imm0,%imm1)
+	__(je 9f)
+	__(testq %imm0,%imm0)
+	__(movd %imm0,%mm0)
+	__(js 3f)
+	/* Make a 2-digit bignum.   */
+	__(movl $two_digit_bignum_header,%imm0_l)
+	__(movl $aligned_bignum_size(2),%imm1_l)
+	__(Misc_Alloc(%arg_z))
+	__(movq %mm0,misc_data_offset(%arg_z))
+	__(jmp *%ra0)
+3:	__(movl $three_digit_bignum_header,%imm0_l)
+	__(movl $aligned_bignum_size(3),%imm1_l)
+	__(Misc_Alloc(%arg_z))
+	__(movq %mm0,misc_data_offset(%arg_z))
+9:	__(ret)
+_endsubp(nmakeu64)
+
+/* on entry: arg_z = symbol.  On exit, arg_z = value (possibly  */
+/* unbound_marker), arg_y = symbol   */
+_spentry(nspecref)
+	__(movq symbol.binding_index(%arg_z),%imm0)
+	__(cmp %rcontext:tcr.tlb_limit,%imm0)
+	__(movq %rcontext:tcr.tlb_pointer,%imm1)
+	__(movq %arg_z,%arg_y)
+	__(jae 7f)
+	__(movq (%imm1,%imm0),%arg_z)
+	__(cmpb $no_thread_local_binding_marker,%arg_z_b)
+	__(jne 8f)
+7:	__(movq symbol.vcell(%arg_y),%arg_z)
+8:	__(ret)		
+_endsubp(nspecref)
+
+/* arg_y = special symbol, arg_z = new value.           */
+_spentry(nspecset)
+	__(movq symbol.binding_index(%arg_y),%imm0)
+	__(cmp %rcontext:tcr.tlb_limit,%imm0)
+	__(movq %rcontext:tcr.tlb_pointer,%imm1)
+	__(jae 1f)
+	__(movq (%imm1,%imm0),%arg_x)
+	__(cmpb $no_thread_local_binding_marker,%arg_x_b)
+	__(je 1f)
+	__(movq %arg_z,(%imm1,%imm0))
+	__(ret)
+1:	__(lea fulltag_misc-fulltag_symbol(%arg_y),%arg_x)
+	__(movq $1<<fixnumshift,%arg_y)
+        __(pop %ra0)
+	__(jmp _SPgvset)
+_endsubp(nspecset)
+
+_spentry(nspecrefcheck)
+	__(movq symbol.binding_index(%arg_z),%imm0)
+	__(cmp %rcontext:tcr.tlb_limit,%imm0)
+	__(movq %rcontext:tcr.tlb_pointer,%imm1)
+	__(movq %arg_z,%arg_y)
+	__(jae 7f)
+	__(movq (%imm1,%imm0),%arg_z)
+	__(cmpb $no_thread_local_binding_marker,%arg_z_b)
+	__(jne 8f)
+7:	__(movq symbol.vcell(%arg_y),%arg_z)
+8:	__(cmpb $unbound_marker,%arg_z_b)
+	__(jne,pt 9f)
+	__(uuo_error_reg_unbound(Rarg_y))
+9:	__(ret)		
+_endsubp(nspecrefcheck)
+                                
         
 _spentry(poweropen_callbackX)
