Index: /branches/arm/lisp-kernel/arm-asmutils.s
===================================================================
--- /branches/arm/lisp-kernel/arm-asmutils.s	(revision 13668)
+++ /branches/arm/lisp-kernel/arm-asmutils.s	(revision 13668)
@@ -0,0 +1,194 @@
+/*   Copyright (C) 2009 Clozure Associates */
+/*   Copyright (C) 1994-2001 Digitool, Inc */
+/*   This file is part of Clozure CL. */
+
+/*   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public */
+/*   License , known as the LLGPL and distributed with Clozure CL as the */
+/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
+/*   which is distributed with Clozure CL as the file "LGPL".  Where these */
+/*   conflict, the preamble takes precedence. */
+
+/*   Clozure CL is referenced in the preamble as the "LIBRARY." */
+
+/*   The LLGPL is also available online at */
+/*   http://opensource.franz.com/preamble.html */
+
+
+	
+
+	include(lisp.s)
+
+	_beginfile
+/*  Zero R4 cache lines, starting at address in R3.  Each line is assumed to be */
+/* R5 bytes wide. */
+_exportfn(C(zero_cache_lines))
+	__(cmpri(cr0,r4,0))
+	__(mtctr r4)
+	__(beqlr)
+1:
+	__(DCBZL(0,r3))
+	__(add r3,r3,r5)
+	__(bdnz 1b)
+	__(blr)
+_endfn
+
+/*  Flush R4 cache lines, starting at address in R3.  Each line is */
+/* assumed to be R5 bytes wide. */
+_exportfn(C(flush_cache_lines))
+	__(cmpri(cr0,r4,0))
+	__(mtctr r4)
+        __(mr r6,r3)
+	__(beqlr)
+1:
+	__(dcbst 0,r3)
+        __(add r3,r3,r5)
+        __(bdnz 1b)
+	__(sync)                /* wait until dcbst's get to memory */
+        __(mr r3,r6)
+        __(mtctr r4)
+2:      
+	__(icbi 0,r3)
+	__(add r3,r3,r5)
+	__(bdnz 2b)
+        __(sync)
+	__(isync)
+	__(blr)
+/* The strange reference to "exp" is supposed to force the kernel to */
+/* load libm, so lisp code can use it.   Under Darwin, the functionality */
+/* of libm is contained in libsystem, along with libc & everything else. */
+
+        __ifndef(`DARWIN')
+        .data
+        __ifdef(`PPC64')
+        .quad exp
+        __else
+        .long exp
+        __endif
+        .text        
+        __endif
+_endfn
+
+_exportfn(C(touch_page))
+        __(str(r3,0(r3)))
+        __(li r4,0)
+        __(str(r4,0(r3)))
+        __(li r3,1) /* can't assume that low 32 bits of r3 are non-zero */
+        .globl C(touch_page_end)
+C(touch_page_end):
+        __(blr)
+_endfn
+                                
+_exportfn(C(current_stack_pointer))
+	__(mr r3,sp)
+	__(blr)
+_endfn
+	
+_exportfn(C(count_leading_zeros))
+        __(clz r0,r0)
+	__(bx lr
+_endfn
+
+_exportfn(C(noop))
+	__(blr)
+_endfn
+
+_exportfn(C(set_fpscr))
+	__(stru(sp,-32(sp)))
+	__(stw r3,12(sp))
+	__(lfd f0,8(sp))
+	__(mtfsf 0xff,f0)
+	__(la sp,32(sp))
+	__(blr)
+_endfn
+
+
+_exportfn(C(get_fpscr))
+	__(stru(sp,-32(sp)))
+        __(mffs f0)
+        __(stfd f0,8(sp))
+        __(lwz r3,12(sp))
+	__(la sp,32(sp))
+	__(blr)
+_endfn
+                
+
+
+
+
+/* Atomically store new value (r2) in *r0, if old value == expected (r1). */
+/* Return actual old value. */
+
+_exportfn(C(store_conditional))
+        __(mr r6,r3)
+1:      __(lrarx(r3,0,r6))
+        __(cmpw r3,r4)
+        __(bne- 2f)
+        __(strcx(r5,0,r6))
+        __(bne- 1b)
+        __(isync)
+        __(blr)
+2:      __(li r0,RESERVATION_DISCHARGE)
+        __(strcx(r0,0,r0))
+        __(blr)
+_endfn
+
+/* Atomically store new_value(r1) in *r0 ;  return previous contents */
+/* of *r0. */
+
+_exportfn(C(atomic_swap))
+        __(swp r0,r1,[r0])
+        __(bx lr)
+_endfn
+
+/* Logior the value in *r3 with the value in r4 (presumably a bitmask with exactly 1 */
+/* bit set.)  Return non-zero if any of the bits in that bitmask were already set. */
+        
+_exportfn(C(atomic_ior))
+        __(sync)
+1:	__(lrarx(r5,0,r3))
+        __(or r6,r4,r5)
+	__(strcx(r6,0,r3))
+	__(bne- 1b)
+	__(isync)
+	__(and r3,r4,r5)
+	__(blr)
+_endfn
+
+
+/* Logand the value in *r3 with the value in r4 (presumably a bitmask with exactly 1 */
+/* bit set.)  Return the value now in *r3 (for some value of "now" */
+
+_exportfn(C(atomic_and))
+        __(sync)
+1:	__(lrarx(r5,0,r3))
+        __(and r6,r4,r5)
+	__(strcx(r6,0,r3))
+	__(bne- 1b)
+	__(isync)
+	__(mr r3,r6)
+	__(blr)
+_endfn
+                
+	
+        __ifdef(`DARWIN')
+_exportfn(C(enable_fp_exceptions))
+        __(.long 0)
+        __(blr)
+_endfn
+        
+_exportfn(C(disable_fp_exceptions))
+        __(.long 0)
+        __(blr)
+_endfn
+
+_exportfn(C(pseudo_sigreturn))
+	__(.long 0)
+	__(b C(pseudo_sigreturn))
+_endfn
+        __endif
+	
+	
+_endfn
+
+
+	_endfile
