source: trunk/ccl/lisp-kernel/x86-spentry64.s @ 3446

Last change on this file since 3446 was 3446, checked in by gb, 15 years ago

funcall does so.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 47.2 KB
Line 
1/*
2   Copyright (C) 2005-2006 Clozure Associates and contributors
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17               
18        include(lisp.s)
19        _beginfile
20       
21        .align 2
22define([_spentry],[ifdef([__func_name],[_endfn],[])
23        _exportfn(_SP$1)
24        .line  __line__
25])
26
27             
28define([_endsubp],[
29        _endfn(_SP$1)
30# __line__
31])
32
33define([jump_builtin],[
34        ref_nrs_value(builtin_functions,%fname)
35        set_nargs($2)
36        vrefr(%fname,%fname,$1)
37        jump_fname()
38])
39
40/* %arg_z has verflowed by one bit.  Make a bignum with 2 (32-bit) digits. */
41_startfn(C(fix_one_bit_overflow))
42        __(movq $two_digit_bignum_header,%imm0)
43        __(Misc_Alloc_Fixed([],aligned_bignum_size(2)))
44        __(unbox_fixnum(%arg_z,%imm0))
45        __(mov %temp0,%arg_z)
46        __(xorq overflow_mask(%rip),%imm0)
47        __(movq %imm0,misc_data_offset(%arg_z))
48        __(jmp *%ra0)   
49overflow_mask:  .quad 0xe000000000000000
50_endfn
51       
52/* Make a lisp integer (fixnum or two-digit bignum) from the signed
53   64-bit value in %imm0.  Shift left 3 bits - a bit at a time, via
54   addition - and check for overlow after each add, since the overflow
55   bit isn't cumulative on x86.
56*/
57_spentry(makes64)
58        __(movq %imm0,%imm1)
59        __(shlq $fixnumshift,%imm1)
60        __(movq %imm1,%arg_z)
61        __(sarq $fixnumshift,%imm1)
62        __(cmpq %imm1,%imm0)
63        __(jz,pt 0f)
640:      __(jmp *%ra0)
651:      __(movd %imm0,%mm0)
66        __(movq $two_digit_bignum_header,%imm0)
67        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(2)))
68        __(movq %mm0,misc_data_offset(%arg_z))
69        __(jmp *%ra0)
70_endsubp(makes64)       
71                               
72
73/* %imm1:%imm0 constitute a signed integer, almost certainly a bignum.
74   Make a lisp integer out of those 128 bits .. */
75_startfn(C(makes128))
76/* We're likely to have to make a bignum out of the integer in %imm1 and
77   %imm0. We'll need to use %imm0 and %imm1 to cons the bignum, and
78   will need to do some arithmetic (determining significant bigits)
79   on %imm0 and %imm1 in order to know how large that bignum needs to be.
80   Cache %imm0 and %imm1 in %mm0 and %mm1. */
81   
82        __(movd %imm0,%mm0)
83        __(movd %imm1,%mm1)
84       
85/* If %imm1 is just a sign extension of %imm0, make a 64-bit signed integer. */
86       
87        __(sarq $63,%imm0) 
88        __(cmpq %imm0,%imm1)
89        __(movd %mm0,%imm0)
90        __(je _SPmakes64)
91       
92/* Otherwise, if the high 32 bits of %imm1 are a sign-extension of the
93   low 32 bits of %imm1, make a 3-digit bignum.  If the upper 32 bits
94   of %imm1 are significant, make a 4 digit bignum */
95        __(movq %imm1,%imm0)
96        __(shlq $32,%imm0)
97        __(sarq $32,%imm0)
98        __(cmpq %imm0,%imm1)
99        __(jz 3f)
100        __(mov $four_digit_bignum_header,%imm0)
101        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(4)))
102        __(movq %mm0,misc_data_offset(%arg_z))
103        __(movq %mm1,misc_data_offset+8(%arg_z))
104        __(jmp *%ra0)
1053:      __(mov $three_digit_bignum_header,%imm0)
106        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(3)))
107        __(movq %mm0,misc_data_offset(%arg_z))
108        __(movd %mm1,misc_data_offset+8(%arg_z))
109        __(jmp *%ra0)
110_endfn
111
112/* %imm1:%imm0 constitute an unsigned integer, almost certainly a bignum.
113   Make a lisp integer out of those 128 bits .. */
114_startfn(C(makeu128))
115/* We're likely to have to make a bignum out of the integer in %imm1 and
116   %imm0. We'll need to use %imm0 and %imm1 to cons the bignum, and
117   will need to do some arithmetic (determining significant bigits)
118   on %imm0 and %imm1 in order to know how large that bignum needs to be.
119   Cache %imm0 and %imm1 in %mm0 and %mm1. */
120
121/* If the high word is 0, make an unsigned-byte 64 ... */       
122        __(testq %imm1,%imm1)
123        __(jz _SPmakeu64)
124       
125        __(movd %imm0,%mm0)
126        __(movd %imm1,%mm1)
127
128        __(js 5f)               /* Sign bit set in %imm1. Need 5 digits */
129        __(bsrq %imm1,%imm0)
130        __(rcmpb(%imm0_b,$31))
131        __(jae 4f)              /* Some high bits in %imm1.  Need 4 digits */
132        __(testl %imm1_l,%imm1_l)
133        __(movd %mm0,%imm0)
134        __(jz _SPmakeu64)
135        /* Need 3 digits */
136        __(movq $three_digit_bignum_header,%imm0)
137        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(3)))
138        __(movq %mm0,misc_data_offset(%arg_z))
139        __(movd %mm1,misc_data_offset+8(%arg_z))
140        __(jmp *%ra0)
1414:      __(movq $four_digit_bignum_header,%imm0)
142        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(4)))
143        __(jmp 6f)
1445:      __(movq $five_digit_bignum_header,%imm0)
145        __(Misc_Alloc_Fixed(%arg_z,aligned_bignum_size(5)))
1466:      __(movq %mm0,misc_data_offset(%arg_z))
147        __(movq %mm0,misc_data_offset+8(%arg_z))
148        __(jmpq *%ra0)
149_endfn
150
151/* %imm1.b = subtag, %arg_y = uvector, %arg_z = index.
152   Bounds/type-checking done in caller */       
153_startfn(C(misc_ref_common))
154        __(extract_fulltag(%imm1,%imm0))
155        __(cmpb $ivector_class_64_bit,%imm0_b)
156        __(je local_label(misc_ref_64))
157        __(cmpb $ivector_class_32_bit,%imm0_b)
158        __(je local_label(misc_ref_32))
159        __(cmpb $ivector_class_other_bit,%imm0_b)
160        __(je local_label(misc_ref_other))
161        /* Node vector.  Functions are funny: the first  N words
162           are treated as (UNSIGNED-BYTE 64), where N is the low
163           32 bits of the first word. */
164        __(cmpb $subtag_function,%imm1_b)
165        __(jne local_label(misc_ref_node))
166        __(movl misc_data_offset(%arg_y),%imm0_l)
167        __(shl $fixnumshift,%imm0)
168        __(rcmpq(%arg_z,%imm0))
169        __(jl local_label(misc_ref_u64))
170local_label(misc_ref_node):
171        __(movq misc_data_offset(%arg_y,%arg_z),%arg_z)
172        __(jmp *%ra0)
173local_label(misc_ref_u64):
174        __(movq misc_data_offset(%arg_y,%arg_z),%imm0)
175        __(jmp _SPmakeu64)
176local_label(misc_ref_double_float_vector):
177        __(movsd misc_data_offset(%arg_y,%arg_z),%fp1)
178        __(movq $double_float_header,%imm0)
179        __(Misc_Alloc_Fixed(%arg_z,double_float.size))
180        __(movsd %fp1,double_float.value(%arg_z))
181        __(jmp *%ra0)
182local_label(misc_ref_64):
183        __(cmpb $subtag_double_float_vector,%imm1_b)
184        __(je local_label(misc_ref_double_float_vector))
185        __(cmpb $subtag_s64_vector,%imm0_b)
186        __(jne local_label(misc_ref_u64))
187local_label(misc_ref_s64):     
188        __(movq misc_data_offset(%arg_y,%arg_z),%imm0)
189        __(jmp _SPmakes64)
190local_label(misc_ref_u32):
191        __(movl misc_data_offset(%arg_y,%imm0),%imm0_l)
192        __(box_fixnum(%imm0,%arg_z))
193        __(jmp *%ra0)
194local_label(misc_ref_s32):
195        __(movslq misc_data_offset(%arg_y,%imm0),%imm0)
196        __(box_fixnum(%imm0,%arg_z))
197        __(jmp *%ra0)
198local_label(misc_ref_32):
199        __(movq %arg_z,%imm0)
200        __(shr $1,%imm0)
201        __(cmpb $subtag_s32_vector,%imm1_b)
202        __(je local_label(misc_ref_s32))
203        __(cmpb $subtag_single_float_vector,%imm1_b)
204        __(jne local_label(misc_ref_u32))
205local_label(misc_ref_single_float_vector):
206        __(movsd misc_data_offset(%arg_y,%imm0),%fp1)
207        __(movd %fp1,%imm0_l)
208        __(shl $32,%imm0)
209        __(lea subtag_single_float(%imm0),%arg_z)
210        __(jmp *%ra0)
211local_label(misc_ref_other):
212        __(cmpb $subtag_u16_vector,%imm1_b)
213        __(jle local_label(misc_ref_16))
214        __(cmpb $subtag_bit_vector,%imm1_b)
215        __(jz local_label(misc_ref_bit_vector))
216        /* 8-bit case:  string, u8, s8 */
217        __(movq %arg_z,%imm0)
218        __(shr $3,%imm0)
219        __(cmpb $subtag_s8_vector,%imm1_b)
220        __(je local_label(misc_ref_s8))
221        __(jl local_label(misc_ref_string))
222local_label(misc_ref_u8):
223        __(movzbl misc_data_offset(%arg_y,%imm0),%imm0_l)
224        __(box_fixnum(%imm0,%arg_z))
225        __(jmp *%ra0)
226local_label(misc_ref_s8):       
227        __(movsbq misc_data_offset(%arg_y,%imm0),%imm0)
228        __(box_fixnum(%imm0,%arg_z))
229        __(jmp *%ra0)
230local_label(misc_ref_string):
231        __(movzbl misc_data_offset(%arg_y,%imm0),%imm0_l)
232        __(shlq $charcode_shift,%imm0)
233        __(leaq subtag_character(%imm0),%arg_z)
234        __(jmp *%ra0)
235local_label(misc_ref_16):
236        __(movq %arg_z,%imm0)
237        __(shrq $2,%imm0)
238        __(cmpb $subtag_s16_vector,%imm1_b)
239        __(je local_label(misc_ref_s16))
240local_label(misc_ref_u16):     
241        __(movzwl misc_data_offset(%arg_y,%imm0),%imm0_l)
242        __(box_fixnum(%imm0,%arg_z))
243        __(jmp *%ra0)
244local_label(misc_ref_s16):     
245        __(movswq misc_data_offset(%arg_y,%imm0),%imm0)
246        __(box_fixnum(%imm0,%arg_z))
247        __(jmp *%ra0)
248local_label(misc_ref_bit_vector):
249        __(unbox_fixnum(%arg_z,%imm0))
250        __(movl $63,%imm1_l)
251        __(andb %imm0_b,%imm1_b)
252        __(shrq $6,%imm0)
253        __(btq %imm1,misc_data_offset(%arg_y,%imm0,8))
254        __(setc %imm0_b)
255        __(andl $fixnum_one,%imm0_l)
256        __(movq %imm0,%arg_z)
257        __(jmp *%ra0)                   
258_endfn(C(misc_ref_common))
259                               
260
261/* ret1valn returns "1 multiple value" when a called function does not */
262/* return multiple values.  Its presence on the stack (as a return address) */
263/* identifies the stack frame to code which returns multiple values. */
264
265_exportfn(C(ret1valn))
266        __(leaveq)
267        __(pop %ra0)
268        __(push %arg_z)
269        __(set_nargs(1))
270        __(jmpq *%ra0)
271_endfn
272       
273        .globl C(popj)
274C(popj):
275
276_spentry(nvalret)
277        .globl C(nvalret)                       
278C(nvalret):     
279        __(ref_global(ret1val_addr,%temp1))
280        __(cmpq lisp_frame.savera0(%rbp),%temp1)
281        __(je 1f)
282        __(testw %nargs,%nargs)
283        __(movzwl %nargs,%nargs_l)
284        __(movl $nil_value,%arg_z_l)
285        __(cmovneq -node_size(%rsp,%nargs_q),%arg_z)
286        __(leaveq)
287        __(popq %ra0)
288        __(jmp *%ra0)
289       
290/* actually need to return values ; always need to copy */
2911:      __(leaq 8(%rbp),%imm1)
292        __(movq (%imm1),%ra0)
293        __(movq 0(%rbp),%rbp)
294        __(leaq (%rsp,%nargs_q),%temp0)
295        __(xorl %imm0_l,%imm0_l)
296        __(jmp 3f)
2972:      __(movq -node_size(%temp0),%temp1)
298        __(subq $node_size,%temp1)
299        __(addq $node_size,%imm0)
300        __(movq %temp1,-node_size(%imm1))
301        __(subq $node_size,%imm1)
3023:      __(cmpw %imm0_w,%nargs)
303        __(jne 2b)
304        __(movq %imm1,%rsp)
305        __(jmp *%ra0)   
306_endsubp(nvalret)
307       
308_spentry(jmpsym)
309_endsubp(jmpsym)
310
311_spentry(jmpnfn)
312_endsubp(jmpnfn)
313
314_spentry(funcall)
315        __(do_funcall())
316_endsubp(funcall)
317
318_spentry(mkcatch1v)
319        __(Make_Catch(0))
320        __(jmp *%ra0)
321_endsubp(mkcatch1v)
322
323_spentry(mkunwind)
324        __(movq $undefined,%arg_z)
325        __(Make_Catch(fixnumone))
326        __(jmp *%ra0)
327_endsubp(mkunwind)
328
329_spentry(mkcatchmv)
330        __(Make_Catch(fixnumone))
331        __(jmp *%ra0)
332_endsubp(mkcatchmv)
333
334_spentry(throw)
335        __(movq %rcontext:tcr.catch_top,%imm1)
336        __(xorl %imm0_l,%imm0_l)
337        __(movzwl %nargs,%nargs_l)
338        __(movq (%rsp,%nargs_q),%temp0) /* temp0 = tag */
339        __(jmp local_label(_throw_test))
340local_label(_throw_loop):
341        __(cmpq %temp0,catch_frame.catch_tag(%imm1))
342        __(je local_label(_throw_found))
343        __(movq catch_frame.link(%imm1),%imm1)
344        __(addq $fixnum_one,%imm0)
345local_label(_throw_test):
346        __(testq %imm1,%imm1)
347        __(jne local_label(_throw_loop))
348        __(uuo_error_reg_not_tag(Rtemp0,subtag_catch_frame))
349        __(jmp _SPthrow)
350local_label(_throw_found):     
351        __(testb $fulltagmask,catch_frame.mvflag(%imm1))
352        __(jne local_label(_throw_multiple))
353        __(testw %nargs,%nargs)
354        __(movl $nil_value,%arg_z_l)
355        __(je local_label(_throw_one_value))
356        __(movq -node_size(%rsp,%nargs_q),%arg_z)
357        __(add %nargs_q,%rsp)
358local_label(_throw_one_value):
359        __(lea local_label(_threw_one_value)(%rip),%ra0)
360        __(jmp _SPnthrow1value)
361__(tra(local_label(_threw_one_value)))
362        __(movq %rcontext:tcr.catch_top,%temp0)
363        __(movq catch_frame.db_link(%temp0),%imm1)
364        __(cmpq %imm0,%imm1)
365        __(jz local_label(_threw_one_value_dont_unbind))
366        __(push %ra0)
367        __(lea local_label(_threw_one_value_back_from_unbind)(%rip),%ra0)
368        __(jmp _SPunbind_to)
369__(tra(local_label(_threw_one_value_back_from_unbind)))
370        __(pop %ra0)
371local_label(_threw_one_value_dont_unbind):
372        __(movq catch_frame.rbp(%temp0),%rbp)
373        __(movq catch_frame.rsp(%temp0),%rsp)
374        __(movq catch_frame.foreign_sp(%temp0),%imm0)
375        __(movq catch_frame.xframe(%temp0),%imm1)
376        __(movq %imm0,%rcontext:tcr.foreign_sp)
377        __(movq %imm1,%rcontext:tcr.xframe)
378        __(movq catch_frame.link(%temp0),%imm1)
379        __(movq catch_frame._save0(%temp0),%save0)
380        __(movq catch_frame._save1(%temp0),%save1)
381        __(movq catch_frame._save2(%temp0),%save2)
382        __(movq catch_frame._save3(%temp0),%save3)
383        __(movq %imm1,%rcontext:tcr.catch_top)
384        __(movq catch_frame.pc(%temp0),%ra0)
385        __(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
386        __(movq (%imm1),%tsp)
387        __(movq %tsp,%next_tsp)
388        __(jmp *%ra0)
389local_label(_throw_multiple):
390        __(lea local_label(_threw_multiple)(%rip),%ra0)
391        __(jmp _SPnthrowvalues)
392__(tra(local_label(_threw_multiple)))
393        __(movq %rcontext:tcr.catch_top,%temp0)
394        __(movq catch_frame.db_link(%temp0),%imm0)
395                       
396
397               
398_endsubp(throw)
399
400/* This takes N multiple values atop the vstack. */
401_spentry(nthrowvalues)
402        __(movb $1,%rcontext:tcr.unwinding)
403        __(movzwl %nargs,%nargs_l)
404local_label(_nthrowv_nextframe):
405        __(subq $fixnumone,%imm0)
406        __(js local_label(_nthrowv_done))
407        __(movd %imm0,%mm1)
408        __(movq %rcontext:tcr.catch_top,%temp0)
409        __(movq catch_frame.link(%temp0),%imm1)
410        __(movq catch_frame.db_link(%temp0),%imm0)
411        __(movq %imm1,%rcontext:tcr.catch_top)
412        __(cmpq %imm0,%rcontext:tcr.db_link)
413        __(jz local_label(_nthrowv_dont_unbind))
414        __(push %ra0)
415        __(leaq local_label(_nthrowv_back_from_unbind)(%rip),%ra0)
416        __(jmp _SPunbind_to)
417__(tra(local_label(_nthrowv_back_from_unbind)))
418
419        __(pop %ra0)
420local_label(_nthrowv_dont_unbind):
421        __(cmpb $unbound_marker,catch_frame.catch_tag(%temp0))
422        __(je local_label(_nthrowv_do_unwind))
423/* A catch frame.  If the last one, restore context from there. */
424        __(movd %mm1,%imm0)
425        __(testq %imm0,%imm0)   /* last catch frame ? */
426        __(jz local_label(_nthrowv_skip))
427        __(movq catch_frame.xframe(%temp0),%save0)
428        __(movq %save0,%rcontext:tcr.xframe)
429        __(leaq (%rsp,%nargs_q),%save1)
430        __(movq catch_frame.rsp(%temp0),%save2)
431        __(movq %nargs_q,%save0)
432        __(jmp local_label(_nthrowv_push_test))
433local_label(_nthrowv_push_loop):
434        __(subq $node_size,%save1)
435        __(subq $node_size,%save2)
436        __(movq (%save1),%temp1)
437        __(movq %temp1,(%save2))
438local_label(_nthrowv_push_test):
439        __(subq $node_size,%save0)
440        __(jns local_label(_nthrowv_push_loop))
441        __(movq %save2,%rsp)
442        __(movq catch_frame.rbp(%temp0),%rbp)
443        __(movq catch_frame._save3(%temp0),%save3)
444        __(movq catch_frame._save2(%temp0),%save2)
445        __(movq catch_frame._save1(%temp0),%save1)
446        __(movq catch_frame._save0(%temp0),%save0)
447        __(movq catch_frame.foreign_sp(%temp0),%imm1)
448        __(movq %imm1,%rcontext:tcr.foreign_sp)
449local_label(_nthrowv_skip):     
450        __(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
451        __(movd %imm1,%tsp)
452        __(movd %imm1,%next_tsp)
453        __(movd %mm1,%imm0)
454        __(jmp local_label(_nthrowv_nextframe))
455local_label(_nthrowv_do_unwind):       
456/* This is harder.  Call the cleanup code with the multiple values and
457    nargs, the throw count, and the caller's return address in a temp
458    stack frame. */
459        __(movq catch_frame.xframe(%temp0),%save0)
460        __(movq %save0,%rcontext:tcr.xframe)
461        __(leaq (%rsp,%nargs_q),%save1)
462        __(push catch_frame._save0(%temp0))
463        __(push catch_frame._save1(%temp0))
464        __(push catch_frame._save2(%temp0))
465        __(push catch_frame._save3(%temp0))
466        __(push catch_frame.pc(%temp0))
467        __(movq catch_frame.rbp(%temp0),%rbp)
468        __(movq catch_frame.rsp(%temp0),%temp1)
469        __(movq catch_frame.foreign_sp(%temp0),%imm0)
470        __(movq %imm0,%rcontext:tcr.foreign_sp)
471        /* Discard the catch frame, so we can build a temp frame */
472        __(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
473        __(movd %imm1,%tsp)
474        __(movd %imm1,%next_tsp)
475        /* tsp overhead, nargs, throw count, ra0 */
476        __(dnode_align(%nargs_q,(tsp_frame.fixed_overhead+(3*node_size)),%imm0))
477        __(TSP_Alloc_Var(%imm0,%imm1))
478        __(movq %nargs_q,%temp0)
479        __(movq %nargs_q,(%imm1))
480        __(movq %ra0,node_size(%imm1))
481        __(movq %mm1,node_size*2(%imm1))
482        __(leaq node_size*3(%imm1),%imm1)
483        __(jmp local_label(_nthrowv_tpushtest))
484local_label(_nthrowv_tpushloop):
485        __(movq -node_size(%save0),%temp0)
486        __(subq $node_size,%save0)
487        __(movq %temp0,(%imm1))
488        __(addq $node_size,%imm1)
489local_label(_nthrowv_tpushtest):
490        __(subw $node_size,%nargs)
491        __(jns local_label(_nthrowv_tpushloop))
492        __(pop %xfn)
493        __(pop %save3)
494        __(pop %save2)
495        __(pop %save1)
496        __(pop %save0)
497        __(movq %temp1,%rsp)
498/* Ready to call cleanup code: set up tra, jmp to %xfn */
499        __(leaq local_label(_nthrowv_called_cleanup)(%rip),%ra0)
500        __(movb $0,%rcontext:tcr.unwinding)
501        __(jmp *%xfn)
502__(tra(local_label(_nthrowv_called_cleanup)))
503
504        __(movb $1,%rcontext:tcr.unwinding)
505        __(movd %tsp,%imm1)
506        __(movq tsp_frame.data_offset+(0*node_size)(%imm1),%nargs_q)
507        __(movq tsp_frame.data_offset+(1*node_size)(%imm1),%ra0)
508        __(movq tsp_frame.data_offset+(2*node_size)(%imm1),%mm1)
509        __(movq %nargs_q,%imm0)
510        __(leaq node_size*3(%imm1),%imm1)
511        __(jmp local_label(_nthrowv_tpoptest))
512local_label(_nthrowv_tpoploop):
513        __(push (%imm1))
514        __(addq $node_size,%imm1)
515local_label(_nthrowv_tpoptest):
516        __(subq $node_size,%imm0)
517        __(jns local_label(_nthrowv_tpoploop))
518        __(movd %tsp,%imm1)
519        __(movq (%imm1),%imm1)
520        __(movd %imm1,%tsp)
521        __(movd %imm1,%next_tsp)
522        __(movd %mm1,%temp0)
523        __(jmp local_label(_nthrowv_nextframe))
524local_label(_nthrowv_done):
525        __(movb $0,%rcontext:tcr.unwinding)
526        __(movq %rcontext:tcr.tlb_pointer,%imm0)
527        __(cmpq $0,INTERRUPT_LEVEL_BINDING_INDEX(%imm1))
528        __(js local_label(_nthrowv_return))
529        __(cmpq $0,%rcontext:tcr.interrupt_pending)
530        __(je local_label(_nthrowv_return))
531        __(interrupt_now())
532local_label(_nthrowv_return):   
533        __(jmp *%ra0)   
534_endsubp(nthrowvalues)
535
536/* This is a (slight) optimization.  When running an unwind-protect,
537   save the single value and the throw count in the tstack frame.
538   Note that this takes a single value in arg_z. */
539_spentry(nthrow1value)
540        __(movb $1,%rcontext:tcr.unwinding)
541        __(movzwl %nargs,%nargs_l)
542local_label(_nthrow1v_nextframe):
543        __(subq $fixnumone,%imm0)
544        __(js local_label(_nthrow1v_done))
545        __(movd %imm0,%mm1)
546        __(movq %rcontext:tcr.catch_top,%temp0)
547        __(movq catch_frame.link(%temp0),%imm1)
548        __(movq catch_frame.db_link(%temp0),%imm0)
549        __(movq %imm1,%rcontext:tcr.catch_top)
550        __(cmpq %imm0,%rcontext:tcr.db_link)
551        __(jz local_label(_nthrow1v_dont_unbind))
552        __(push %ra0)
553        __(leaq local_label(_nthrow1v_back_from_unbind)(%rip),%ra0)
554        __(jmp _SPunbind_to)
555__(tra(local_label(_nthrow1v_back_from_unbind)))
556
557        __(pop %ra0)
558local_label(_nthrow1v_dont_unbind):
559        __(cmpb $unbound_marker,catch_frame.catch_tag(%temp0))
560        __(je local_label(_nthrow1v_do_unwind))
561/* A catch frame.  If the last one, restore context from there. */
562        __(movd %mm1,%imm0)
563        __(testq %imm0,%imm0)   /* last catch frame ? */
564        __(jz local_label(_nthrow1v_skip))
565        __(movq catch_frame.xframe(%temp0),%save0)
566        __(movq %save0,%rcontext:tcr.xframe)
567        __(leaq (%rsp,%nargs_q),%save1)
568        __(movq catch_frame.rsp(%temp0),%save2)
569        __(movq %nargs_q,%save0)
570        __(jmp local_label(_nthrow1v_push_test))
571local_label(_nthrow1v_push_loop):
572        __(subq $node_size,%save1)
573        __(subq $node_size,%save2)
574        __(movq (%save1),%temp1)
575        __(movq %temp1,(%save2))
576local_label(_nthrow1v_push_test):
577        __(subq $node_size,%save0)
578        __(jns local_label(_nthrow1v_push_loop))
579        __(movq %save2,%rsp)
580        __(movq catch_frame.rbp(%temp0),%rbp)
581        __(movq catch_frame._save3(%temp0),%save3)
582        __(movq catch_frame._save2(%temp0),%save2)
583        __(movq catch_frame._save1(%temp0),%save1)
584        __(movq catch_frame._save0(%temp0),%save0)
585        __(movq catch_frame.foreign_sp(%temp0),%imm1)
586        __(movq %imm1,%rcontext:tcr.foreign_sp)
587local_label(_nthrow1v_skip):   
588        __(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
589        __(movd %imm1,%tsp)
590        __(movd %imm1,%next_tsp)
591        __(movd %mm1,%imm0)
592        __(jmp local_label(_nthrow1v_nextframe))
593local_label(_nthrow1v_do_unwind):
594/* This is harder, but not as hard (not as much BLTing) as the
595   multiple-value case. */
596        __(movq catch_frame.xframe(%temp0),%save0)
597        __(movq %save0,%rcontext:tcr.xframe)
598        __(movq catch_frame._save0(%temp0),%save0)
599        __(movq catch_frame._save1(%temp0),%save1)
600        __(movq catch_frame._save2(%temp0),%save2)
601        __(movq catch_frame._save3(%temp0),%save3)
602        __(movq catch_frame.pc(%temp0),%xfn)
603        __(movq catch_frame.rbp(%temp0),%rbp)
604        __(movq catch_frame.rsp(%temp0),%rsp)
605        __(movq catch_frame.foreign_sp(%temp0),%imm0)
606        __(movq %imm0,%rcontext:tcr.foreign_sp)
607        /* Discard the catch frame, so we can build a temp frame */
608        __(lea -(tsp_frame.fixed_overhead+fulltag_misc)(%temp0),%imm1)
609        __(movd %imm1,%tsp)
610        __(movd %imm1,%next_tsp)
611        /* tsp overhead, throw count, ra0, arg_z */
612        __(dnode_align(%nargs_q,(tsp_frame.fixed_overhead+(3*node_size)),%imm0))
613        __(TSP_Alloc_Fixed((2*node_size),%imm1))
614        __(addq $tsp_frame.fixed_overhead,%imm1)
615        __(movq %ra0,(%imm1))
616        __(movq %mm1,node_size*1(%imm1))
617        __(movq %arg_z,node_size*2(%imm1))
618/* Ready to call cleanup code: set up tra, jmp to %xfn */
619        __(leaq local_label(_nthrow1v_called_cleanup)(%rip),%ra0)
620        __(movb $0,%rcontext:tcr.unwinding)
621        __(jmp *%xfn)
622__(tra(local_label(_nthrow1v_called_cleanup)))
623
624        __(movb $1,%rcontext:tcr.unwinding)
625        __(movd %tsp,%imm1)
626        __(movq tsp_frame.data_offset+(0*node_size)(%imm1),%ra0)
627        __(movq tsp_frame.data_offset+(1*node_size)(%imm1),%mm1)
628        __(movq tsp_frame.data_offset+(2+node_size)(%imm1),%arg_z)
629        __(movd %tsp,%imm1)
630        __(movq (%imm1),%imm1)
631        __(movd %imm1,%tsp)
632        __(movd %imm1,%next_tsp)
633        __(movd %mm1,%temp0)
634        __(jmp local_label(_nthrow1v_nextframe))
635local_label(_nthrow1v_done):
636        __(movb $0,%rcontext:tcr.unwinding)
637        __(movq %rcontext:tcr.tlb_pointer,%imm0)
638        __(cmpq $0,INTERRUPT_LEVEL_BINDING_INDEX(%imm1))
639        __(js local_label(_nthrow1v_return))
640        __(cmpq $0,%rcontext:tcr.interrupt_pending)
641        __(je local_label(_nthrow1v_return))
642        __(interrupt_now())
643local_label(_nthrow1v_return): 
644        __(jmp *%ra0)   
645_endsubp(nthrow1value)
646
647/* This never affects the symbol's vcell */
648/* Non-null symbol in arg_y, new value in arg_z */       
649       
650_spentry(bind)
651        __(movq symbol.binding_index(%arg_y),%temp0)
652        __(cmpq %rcontext:tcr.tlb_limit,%temp0)
653        __(jb,pt 0f)
654        __(tlb_too_small())
6550:      __(testq %temp0,%temp0)
656        __(jz 9f)
657        __(movq %rcontext:tcr.tlb_pointer,%temp1)
658        __(push (%temp1,%temp0))
659        __(push %temp0)
660        __(push %rcontext:tcr.db_link)
661        __(movq %arg_z,(%temp1,%temp0))
662        __(movq %rsp,%rcontext:tcr.db_link)
663        __(jmp *%ra0)
6649:     
665        __(movq %arg_y,%arg_z)
666        __(movq $XSYMNOBIND,%arg_y)
667        __(set_nargs(2))
668        __(jmp _SPksignalerr)   
669_endsubp(bind)
670
671/* arg_z = symbol: bind it to its current value */       
672_spentry(bind_self)
673        __(movq symbol.binding_index(%arg_z),%temp0)
674        __(cmpq %rcontext:tcr.tlb_limit,%temp0)
675        __(jb,pt 0f)
676        __(tlb_too_small())
6770:      __(testq %temp0,%temp0)
678        __(jz 9f)
679        __(movq %rcontext:tcr.tlb_pointer,%temp1)
680        __(cmpb $no_thread_local_binding_marker,(%temp0,%temp1))
681        __(jz 2f)
682        __(push (%temp1,%temp0))
683        __(push %temp0)
684        __(push %rcontext:tcr.db_link)
685        __(movq %rsp,%rcontext:tcr.db_link)
686        __(jmp *%ra0)
6872:      __(movq symbol.vcell(%arg_z),%arg_y)
688        __(push (%temp1,%temp0))
689        __(push %temp0)
690        __(push %rcontext:tcr.db_link)
691        __(movq %arg_y,(%temp1,%temp0))
692        __(movq %rsp,%rcontext:tcr.db_link)
693        __(jmp *%ra0)
6949:      __(movq $XSYMNOBIND,%arg_y)
695        __(set_nargs(2))
696        __(jmp _SPksignalerr)
697_endsubp(bind_self)
698
699_spentry(bind_nil)
700        __(movq symbol.binding_index(%arg_z),%temp0)
701        __(cmpq %rcontext:tcr.tlb_limit,%temp0)
702        __(jb,pt 0f)
703        __(tlb_too_small())
7040:      __(testq %temp0,%temp0)
705        __(jz 9f)
706        __(movq %rcontext:tcr.tlb_pointer,%temp1)
707        __(push (%temp1,%temp0))
708        __(push %temp0)
709        __(push %rcontext:tcr.db_link)
710        __(movq $nil_value,(%temp0,%temp1))
711        __(movq %rsp,%rcontext:tcr.db_link)
712        __(jmp *%ra0)
7139:      __(movq $XSYMNOBIND,%arg_y)
714        __(set_nargs(2))
715        __(jmp _SPksignalerr)
716_endsubp(bind_nil)
717
718_spentry(bind_self_boundp_check)
719        __(movq symbol.binding_index(%arg_z),%temp0)
720        __(cmpq %rcontext:tcr.tlb_limit,%temp0)
721        __(jb,pt 0f)
722        __(tlb_too_small())
7230:      __(testq %temp0,%temp0)
724        __(jz 9f)
725        __(movq %rcontext:tcr.tlb_pointer,%temp1)
726        __(cmpb $no_thread_local_binding_marker,(%temp1,%temp0))
727        __(je 2f)
728        __(cmpb $unbound_marker,(%temp1,%temp0))
729        __(je 8f)
730        __(push (%temp1,%temp0))
731        __(push %temp0)
732        __(push %rcontext:tcr.db_link)
733        __(movq %rsp,%rcontext:tcr.db_link)
734        __(jmp *%ra0)
7352:      __(movq symbol.vcell(%arg_z),%arg_y)
736        __(cmpb $unbound_marker,%arg_y_b)
737        __(jz 8f)
738        __(push (%temp1,%temp0))
739        __(push %temp0)
740        __(push %rcontext:tcr.db_link)
741        __(movq %arg_y,(%temp1,%temp0))
742        __(movq %rsp,%rcontext:tcr.db_link)
743        __(jmp *%ra0)
7448:      __(uuo_error_reg_unbound(Rarg_z))
745       
7469:      __(movq $XSYMNOBIND,%arg_y)
747        __(set_nargs(2))
748        __(jmp _SPksignalerr)
749_endsubp(bind_self_boundp_check)
750
751_spentry(conslist)
752        __(movl $nil_value,%arg_z_l)
753        __(testw %nargs,%nargs)
754        __(jmp 2f)
7551:      __(pop %arg_y)
756        __(Cons(%arg_y,%arg_z,%arg_z))
757        __(subw $node_size,%nargs)
7582:      __(jnz 1b)
759        __(jmp *%ra0)           
760_endsubp(conslist)
761
762/* do list*: last arg in arg_z, all others pushed, nargs set to #args pushed.*/
763/* Cons, one cons cell at at time.  Maybe optimize this later. */
764_spentry(conslist_star)
765        __(testw %nargs,%nargs)
766        __(jmp 2f)
7671:      __(pop %arg_y)
768        __(Cons(%arg_y,%arg_z,%arg_z))
769        __(subw $node_size,%nargs)
7702:      __(jnz 1b)
771        __(jmp *%ra0)           
772_endsubp(conslist_star)
773
774_spentry(stkconslist)
775_endsubp(stkconslist)
776
777_spentry(stkconslist_star)
778_endsubp(stkconslist_star)
779
780_spentry(mkstackv)
781_endsubp(mkstackv)
782
783_spentry(subtag_misc_ref)
784_endsubp(subtag_misc_ref)
785       
786        .globl C(egc_write_barrier_start)
787C(egc_write_barrier_start):
788/*
789   The function pc_luser_xp() - which is used to ensure that suspended threads
790   are suspended in a GC-safe way - has to treat these subprims (which implement
791   the EGC write-barrier) specially.  Specifically, a store that might introduce
792   an intergenerational reference (a young pointer stored in an old object) has
793   to "memoize" that reference by setting a bit in the global "refbits" bitmap.
794   This has to happen atomically, and has to happen atomically wrt GC.
795
796   Note that updating a word in a bitmap is itself not atomic, unless we use
797   interlocked loads and stores.
798*/
799
800/*
801  For RPLACA and RPLACD, things are fairly simple: regardless of where we are
802  in the function, we can do the store (even if it's already been done) and
803  calculate whether or not we need to set the bit out-of-line.  (Actually
804  setting the bit needs to be done atomically, unless we're sure that other
805  threads are suspended.)
806  We can unconditionally set the suspended thread's RIP to its RA0.
807*/
808       
809_spentry(rplaca)
810        .globl C(egc_rplaca)
811C(egc_rplaca):         
812_endsubp(rplaca)
813
814_spentry(rplacd)
815        .globl C(egc_rplacd)
816C(egc_rplacd):         
817_endsubp(rplacd)
818
819/*
820  Storing into a gvector can be handled the same way as storing into a CONS.
821*/
822
823_spentry(gvset)
824        .globl C(egc_gvset)
825C(egc_gvset):
826_endsubp(gvset)
827
828/* This is a special case of storing into a gvector: if we need to memoize the store,
829   record the address of the hash-table vector in the refmap, as well.
830*/       
831
832_spentry(set_hash_key)
833        .globl C(egc_set_hash_key)
834C(egc_set_hash_key): 
835_endsubp(set_hash_key)
836
837/*
838  This is a little trickier: the first instruction clears the EQ bit in CR0; the only
839  way that it can get set is if the conditional store succeeds.  So:
840  a) if we're interrupted on the first instruction, or if we're interrupted on a subsequent
841     instruction but CR0[EQ] is clear, the condtional store hasn't succeeded yet.  We don't
842     have to adjust the PC in this case; when the thread's resumed, the conditional store
843     will be (re-)attempted and will eventually either succeed or fail.
844  b) if the CR0[EQ] bit is set (on some instruction other than the first), the handler can
845     decide if/how to handle memoization.  The handler should set the PC to the LR, and
846     set arg_z to T.
847*/
848
849_spentry(store_node_conditional)
850        .globl C(egc_store_node_conditional)
851C(egc_store_node_conditional): 
852       .globl C(egc_write_barrier_end)
853C(egc_write_barrier_end):
854_endsubp(store_node_conditional)
855                               
856_spentry(setqsym)
857_endsubp(setqsym)
858
859_spentry(progvsave)
860_endsubp(progvsave)
861
862_spentry(stack_misc_alloc)
863_endsubp(stack_misc_alloc)
864
865_spentry(gvector)
866_endsubp(gvector)
867
868_spentry(mvpass)
869_endsubp(mvpass)
870
871_spentry(fitvals)
872_endsubp(fitvals)
873
874_spentry(nthvalue)
875_endsubp(nthvalue)
876
877_spentry(values)
878_endsubp(values)
879
880_spentry(default_optional_args)
881_endsubp(default_optional_args)
882
883_spentry(opt_supplied_p)
884_endsubp(opt_supplied_p)
885
886_spentry(heap_rest_arg)
887_endsubp(heap_rest_arg)
888
889_spentry(req_heap_rest_arg)
890_endsubp(req_heap_rest_arg)
891
892_spentry(heap_cons_rest_arg)
893_endsubp(heap_cons_rest_arg)
894
895_spentry(simple_keywords)
896_endsubp(simple_keywords)
897
898_spentry(keyword_args)
899_endsubp(keyword_args)
900
901_spentry(keyword_bind)
902_endsubp(keyword_bind)
903
904_spentry(poweropen_ffcall)
905_endsubp(poweropen_ffcall)
906
907_spentry(unused_0)
908_endsubp(unused_0)
909
910_spentry(ksignalerr)
911_endsubp(ksignalerr)
912
913_spentry(stack_rest_arg)
914_endsubp(stack_rest_arg)
915
916_spentry(req_stack_rest_arg)
917_endsubp(req_stack_rest_arg)
918
919_spentry(stack_cons_rest_arg)
920_endsubp(stack_cons_rest_arg)
921
922_spentry(poweropen_callbackX)
923_endsubp(poweropen_callbackX)
924
925/* Prepend all but the first three (2 words of code, inner fn) and last two */
926/* (function name, lfbits) elements of %fn to the "arglist". */
927_spentry(call_closure)
928        __(subq $fulltag_function-fulltag_misc,%fn)
929        __(header_length(%fn,%imm0))
930        __(movzwl %nargs,%nargs_l)
931        __(subq $5<<fixnumshift,%imm0)  /* imm0 = inherited arg count */
932        __(cmpw $nargregs<<fixnumshift,%nargs)
933        __(jna,pt local_label(no_insert))
934       
935        /* Some arguments have already been pushed.  Push imm0's worth */
936        /* of NILs, copy those arguments that have already been vpushed from */
937        /* the old TOS to the new, then insert all of the inerited args */
938        /* and go to the function. */
939        __(movq %imm0,%imm1)
940local_label(push_nil_loop):     
941        __(push $nil_value)
942        __(sub $fixnumone,%imm1)
943        __(jne local_label(push_nil_loop))
944        /* Need to use arg regs as temporaries here.  */
945        __(movq %rsp,%temp0)
946        __(push %arg_z)
947        __(push %arg_y)
948        __(push %arg_x)
949        __(lea (%rsp,%imm0),%arg_x)
950        __(lea -nargregs<<fixnumshift(%nargs_q),%arg_y)
951local_label(copy_already_loop):
952        __(movq (%arg_x),%arg_z)
953        __(addq $fixnumone,%arg_x)
954        __(movq %arg_z,(%temp0))
955        __(addq $fixnumone,%temp0)
956        __(subq $fixnumone,%arg_y)
957        __(jne local_label(copy_already_loop))
958
959        __(movl $3<<fixnumshift,%imm1_l) /* skip code, new fn */
960local_label(insert_loop):               
961        __(movq misc_data_offset(%fn,%imm1),%arg_z)
962        __(addq $node_size,%imm1)
963        __(addw $fixnum_one,%nargs)
964        __(subq $node_size,%arg_x)
965        __(movq %arg_z,(%arg_x))
966        __(subq $fixnum_one,%imm0)
967        __(jne local_label(insert_loop))
968
969        /* Recover the argument registers, pushed earlier */
970        __(pop %arg_x)
971        __(pop %arg_y)
972        __(pop %arg_z)
973        __(jmp local_label(go))
974
975        /* Here if nothing was pushed by the caller.  If we're
976           going to push anything, we have to reserve a stack
977           frame first. (We'll need to push something if the
978           sum of %nargs and %imm0 is greater than nargregs */
979local_label(no_insert):
980        __(lea (%nargs_q,%imm0),%imm1)
981        __(cmpq $nargregs<<fixnumshift,%imm1)
982        __(jna local_label(no_insert_no_frame))
983        /* Reserve space for a stack frame */
984        __(push $0)
985        __(push $0)
986local_label(no_insert_no_frame):       
987        /* nargregs or fewer args were already vpushed. */
988        /* if exactly nargregs, vpush remaining inherited vars. */
989        __(cmpw $nargregs<<fixnumshift,%nargs)
990        __(movl $3<<fixnumshift,%imm1_l) /* skip code, new fn */
991        __(leaq 3<<fixnumshift(%imm0),%temp0)
992        __(jnz local_label(set_regs))
993local_label(vpush_remaining): 
994        __(push misc_data_offset(%fn,%imm1))
995        __(addq $node_size,%imm1)
996        __(addw $fixnumone,%nargs)
997        __(subq $node_size,%imm0)
998        __(jnz local_label(vpush_remaining))
999        __(jmp local_label(go))
1000local_label(set_regs):
1001        /* if nargs was > 1 (and we know that it was < 3), it must have */
1002        /* been 2.  Set arg_x, then vpush the remaining args. */
1003        __(cmpw $fixnumone,%nargs)
1004        __(jle local_label(set_y_z))
1005local_label(set_arg_x):
1006        __(subq $node_size,%temp0)
1007        __(movq misc_data_offset(%fn,%temp0),%arg_x)
1008        __(addw $fixnumone,%nargs)
1009        __(subq $fixnumone,%imm0)
1010        __(jne local_label(vpush_remaining))
1011        __(jmp local_label(go))
1012        /* Maybe set arg_y or arg_z, preceding args */
1013local_label(set_y_z):
1014        __(jne local_label(set_arg_z))
1015        /* Set arg_y, maybe arg_x, preceding args */
1016local_label(set_arg_y):
1017        __(subq $node_size,%temp0)
1018        __(movq misc_data_offset(%fn,%temp0),%arg_y)
1019        __(addw $fixnumone,%nargs)
1020        __(subq $fixnum_one,%imm0)
1021        __(jnz local_label(set_arg_x))
1022        __(jmp local_label(go))
1023local_label(set_arg_z):
1024        __(subq $node_size,%temp0)
1025        __(movq misc_data_offset(%fn,%temp0),%arg_z)
1026        __(addw $fixnumone,%nargs)
1027        __(subq $fixnum_one,%imm0)
1028        __(jne local_label(set_arg_y))
1029       
1030local_label(go):       
1031        __(movq misc_data_offset+(2*node_size)(%fn),%fn)
1032        __(jmp *%fn)               
1033       
1034_endsubp(call_closure)
1035
1036_spentry(getxlong)
1037_endsubp(getxlong)
1038
1039_spentry(spreadargz)
1040_endsubp(spreadargz)
1041
1042_spentry(tfuncallgen)
1043_endsubp(tfuncallgen)
1044
1045_spentry(tfuncallslide)
1046_endsubp(tfuncallslide)
1047
1048_spentry(tfuncallvsp)
1049_endsubp(tfuncallvsp)
1050
1051_spentry(tcallsymgen)
1052_endsubp(tcallsymgen)
1053
1054_spentry(tcallsymslide)
1055_endsubp(tcallsymslide)
1056
1057_spentry(tcallsymvsp)
1058_endsubp(tcallsymvsp)
1059
1060_spentry(tcallnfngen)
1061_endsubp(tcallnfngen)
1062
1063_spentry(tcallnfnslide)
1064_endsubp(tcallnfnslide)
1065
1066_spentry(tcallnfnvsp)
1067_endsubp(tcallnfnvsp)
1068
1069_spentry(misc_ref)
1070_endsubp(misc_ref)
1071
1072_spentry(misc_set)
1073_endsubp(misc_set)
1074
1075_spentry(stkconsyz)
1076_endsubp(stkconsyz)
1077
1078_spentry(stkvcell0)
1079_endsubp(stkvcell0)
1080
1081_spentry(stkvcellvsp)
1082_endsubp(stkvcellvsp)
1083
1084_spentry(makestackblock)
1085_endsubp(makestackblock)
1086
1087_spentry(makestackblock0)
1088_endsubp(makestackblock0)
1089
1090_spentry(makestacklist)
1091_endsubp(makestacklist)
1092
1093_spentry(stkgvector)
1094_endsubp(stkgvector)
1095
1096_spentry(misc_alloc)
1097_endsubp(misc_alloc)
1098
1099_spentry(poweropen_ffcallX)
1100_endsubp(poweropen_ffcallX)
1101
1102
1103_spentry(macro_bind)
1104_endsubp(macro_bind)
1105
1106_spentry(destructuring_bind)
1107_endsubp(destructuring_bind)
1108
1109_spentry(destructuring_bind_inner)
1110_endsubp(destructuring_bind_inner)
1111
1112_spentry(recover_values)
1113_endsubp(recover_values)
1114
1115_spentry(vpopargregs)
1116_endsubp(vpopargregs)
1117
1118_spentry(integer_sign)
1119_endsubp(integer_sign)
1120
1121_spentry(subtag_misc_set)
1122_endsubp(subtag_misc_set)
1123
1124_spentry(spread_lexprz)
1125_endsubp(spread_lexprz)
1126
1127
1128_spentry(reset)
1129_endsubp(reset)
1130
1131_spentry(mvslide)
1132_endsubp(mvslide)
1133
1134_spentry(save_values)
1135_endsubp(save_values)
1136
1137_spentry(add_values)
1138_endsubp(add_values)
1139
1140_spentry(poweropen_callback)
1141_endsubp(poweropen_callback)
1142
1143_spentry(misc_alloc_init)
1144_endsubp(misc_alloc_init)
1145
1146_spentry(stack_misc_alloc_init)
1147_endsubp(stack_misc_alloc_init)
1148
1149
1150_spentry(unused_1)
1151_endsubp(unused_1)
1152
1153_spentry(callbuiltin)
1154_endsubp(callbuiltin)
1155
1156_spentry(callbuiltin0)
1157_endsubp(callbuiltin0)
1158
1159_spentry(callbuiltin1)
1160_endsubp(callbuiltin1)
1161
1162_spentry(callbuiltin2)
1163_endsubp(callbuiltin2)
1164
1165_spentry(callbuiltin3)
1166_endsubp(callbuiltin3)
1167
1168_spentry(popj)
1169_endsubp(popj)
1170
1171_spentry(restorefullcontext)
1172_endsubp(restorefullcontext)
1173
1174_spentry(savecontextvsp)
1175_endsubp(savecontextvsp)
1176
1177_spentry(savecontext0)
1178_endsubp(savecontext0)
1179
1180_spentry(restorecontext)
1181_endsubp(restorecontext)
1182
1183_spentry(lexpr_entry)
1184_endsubp(lexpr_entry)
1185
1186_spentry(poweropen_syscall)
1187_endsubp(poweropen_syscall)
1188
1189
1190_spentry(breakpoint)
1191_endsubp(breakpoint)
1192
1193_spentry(eabi_ff_call)
1194_endsubp(eabi_ff_call)
1195
1196_spentry(eabi_callback)
1197_endsubp(eabi_callback)
1198
1199_spentry(eabi_syscall)
1200_endsubp(eabi_syscall)
1201
1202_spentry(getu64)
1203_endsubp(getu64)
1204
1205_spentry(gets64)
1206_endsubp(gets64)
1207
1208_spentry(makeu64)
1209_endsubp(makeu64)
1210
1211_spentry(specref)
1212_endsubp(specref)
1213
1214_spentry(specset)
1215_endsubp(specset)
1216
1217_spentry(specrefcheck)
1218_endsubp(specrefcheck)
1219
1220_spentry(restoreintlevel)
1221_endsubp(restoreintlevel)
1222
1223_spentry(makes32)
1224_endsubp(makes32)
1225
1226_spentry(makeu32)
1227_endsubp(makeu32)
1228
1229_spentry(gets32)
1230_endsubp(gets32)
1231
1232_spentry(getu32)
1233_endsubp(getu32)
1234
1235_spentry(fix_overflow)
1236_endsubp(fix_overflow)
1237
1238_spentry(mvpasssym)
1239_endsubp(mvpasssym)
1240
1241_spentry(unused_2)
1242_endsubp(unused_2)
1243
1244_spentry(unused_3)
1245_endsubp(unused_3)
1246
1247_spentry(unused_4)
1248_endsubp(unused_4)
1249
1250_spentry(unused_5)
1251_endsubp(unused_5)
1252
1253_spentry(unused_6)
1254_endsubp(unused_6)
1255
1256_spentry(unbind)
1257        __(movq %rcontext:tcr.db_link,%imm1)
1258        __(movq %rcontext:tcr.tlb_pointer,%arg_x)
1259        __(movq binding.sym(%imm1),%temp1)
1260        __(movq binding.val(%imm1),%arg_y)
1261        __(movq binding.link(%imm1),%imm1)
1262        __(movq %arg_y,(%arg_x,%temp1))
1263        __(movq %imm1,%rcontext:tcr.db_link)
1264        __(jmp *%ra0)   
1265_endsubp(unbind)
1266
1267_spentry(unbind_n)
1268        __(movq %rcontext:tcr.db_link,%imm1)
1269        __(movq %rcontext:tcr.tlb_pointer,%arg_x)
12701:             
1271        __(movq binding.sym(%imm1),%temp1)
1272        __(movq binding.val(%imm1),%arg_y)
1273        __(movq binding.link(%imm1),%imm1)
1274        __(movq %arg_y,(%arg_x,%temp1))
1275        __(subq $1,%imm0)
1276        __(jne 1b)
1277        __(movq %imm1,%rcontext:tcr.db_link)
1278        __(jmp *%ra0)   
1279_endsubp(unbind_n)
1280
1281_spentry(unbind_to)
1282        __(movq %rcontext:tcr.db_link,%imm1)
1283        __(movq %rcontext:tcr.tlb_pointer,%arg_x)
12841:             
1285        __(movq binding.sym(%imm1),%temp1)
1286        __(movq binding.val(%imm1),%arg_y)
1287        __(movq binding.link(%imm1),%imm1)
1288        __(movq %arg_y,(%arg_x,%temp1))
1289        __(cmpq %imm1,%imm0)
1290        __(jne 1b)
1291        __(movq %imm1,%rcontext:tcr.db_link)
1292        __(jmp *%ra0)   
1293_endsubp(unbind_to)
1294
1295
1296/* Bind CCL::*INTERRUPT-LEVEL* to 0.  If its value had been negative, check
1297   for pending interrupts after doing so. */
1298       
1299_spentry(bind_interrupt_level_0)
1300        __(movq %rcontext:tcr.tlb_pointer,%temp1)
1301        __(cmpq $0,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1302        __(push INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1303        __(push $INTERRUPT_LEVEL_BINDING_INDEX)
1304        __(push %rcontext:tcr.db_link)
1305        __(movq $0,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1306        __(movq %rsp,%rcontext:tcr.db_link)
1307        __(js,pn 1f)
13080:      __(jmp *%ra0)
1309        /* Interrupt level was negative; interrupt may be pending */
13101:      __(cmpq $0,%rcontext:tcr.interrupt_pending)
1311        __(movq $0,%rcontext:tcr.interrupt_pending)
1312        __(je 0b)
1313        __(interrupt_now())
1314        __(jmp *%ra0)
1315_endsubp(bind_interrupt_level_0)
1316       
1317
1318/* Bind CCL::*INTERRUPT-LEVEL* to the fixnum -1.  (This has the effect
1319   of disabling interrupts.) */
1320
1321_spentry(bind_interrupt_level_m1)
1322        __(movq %rcontext:tcr.tlb_pointer,%temp1)
1323        __(push INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1324        __(push $INTERRUPT_LEVEL_BINDING_INDEX)
1325        __(push %rcontext:tcr.db_link)
1326        __(movq $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1327        __(movq %rsp,%rcontext:tcr.db_link)
1328        __(jmp *%ra0)
1329_endsubp(bind_interrupt_level_m1)
1330
1331/* Bind CCL::*INTERRUPT-LEVEL* to the value in arg_z.  If that value's 0,
1332   do what _SPbind_interrupt_level_0 does */
1333_spentry(bind_interrupt_level)
1334        __(testq %arg_z,%arg_z)
1335        __(movq %rcontext:tcr.tlb_pointer,%temp1)
1336        __(jz _SPbind_interrupt_level_0)
1337        __(push INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1338        __(push $INTERRUPT_LEVEL_BINDING_INDEX)
1339        __(push %rcontext:tcr.db_link)
1340        __(movq %arg_z,INTERRUPT_LEVEL_BINDING_INDEX(%temp1))
1341        __(movq %rsp,%rcontext:tcr.db_link)
1342        __(jmp *%ra0)
1343_endsubp(bind_interrupt_level)
1344
1345/* Unbind CCL::*INTERRUPT-LEVEL*.  If the value changes from negative to
1346   non-negative, check for pending interrupts.  */
1347       
1348_spentry(unbind_interrupt_level)
1349        __(movq %rcontext:tcr.db_link,%imm1)
1350        __(movq %rcontext:tcr.tlb_pointer,%arg_x)
1351        __(movq INTERRUPT_LEVEL_BINDING_INDEX(%arg_x),%imm0)
1352        __(testq %imm0,%imm0)
1353        __(movq binding.val(%imm1),%temp0)
1354        __(movq binding.link(%imm1),%imm1)
1355        __(movq %temp0,INTERRUPT_LEVEL_BINDING_INDEX(%arg_x))
1356        __(movq %imm1,%rcontext:tcr.db_link)
1357        __(js,pn 1f)
13580:      __(jmp *%ra0)
13591:      __(testq %temp0,%temp0)
1360        __(js 0b)
1361        __(cmpq $0,%rcontext:tcr.interrupt_pending)
1362        __(movq $0,%rcontext:tcr.interrupt_pending)
1363        __(je 0b)
1364        __(interrupt_now())
1365        __(jmp *%ra0)   
1366_endsubp(unbind_interrupt_level)
1367
1368       
1369_spentry(progvrestore)
1370_endsubp(progvrestore)
1371       
1372
1373/* %arg_z <- %arg_y + %arg_z.  Do the fixnum case - including overflow -
1374  inline.  Call out otherwise. */
1375_spentry(builtin_plus)
1376        __(movb %arg_z_b,%imm0_b)
1377        __(orb %arg_y_b,%imm0_b)
1378        __(testb $fixnummask,%imm0_b)
1379        __(jne 1f)
1380        __(addq %arg_y,%arg_z)
1381        __(jo,pn C(fix_one_bit_overflow))
1382        __(jmp *%ra0)
13831:      __(jump_builtin(_builtin_plus,2))
1384_endsubp(builtin_plus)
1385       
1386
1387/* %arg_z <- %arg_z - %arg_y.  Do the fixnum case - including overflow -
1388  inline.  Call out otherwise. */
1389_spentry(builtin_minus)                 
1390        __(movb %arg_z_b,%imm0_b)
1391        __(orb %arg_y_b,%imm0_b)
1392        __(testb $fixnummask,%imm0_b)
1393        __(jne 1f)
1394        __(subq %arg_y,%arg_z)
1395        __(jo,pn C(fix_one_bit_overflow))
1396        __(jmp *%ra0)
13971:      __(jump_builtin(_builtin_minus,2))
1398_endsubp(builtin_minus)
1399
1400/* %arg_z <- %arg_z * %arg_y.  Do the fixnum case - including overflow -
1401  inline.  Call out otherwise. */
1402_spentry(builtin_times)
1403        __(movb %arg_z_b,%imm0_b)
1404        __(orb %arg_y_b,%imm0_b)
1405        __(testb $fixnummask,%imm0_b)
1406        __(jne 2f)
1407        __(unbox_fixnum(%arg_z,%imm0))
1408        /* 128-bit fixnum result in %imm1:%imm0. Overflow set if %imm1
1409           is significant */
1410        __(imul %arg_y)
1411        __(jo 1f)
1412        __(mov %imm0,%arg_z)
1413        __(jmp *%ra0)
14141:      __(unbox_fixnum(%arg_z,%imm0))
1415        __(unbox_fixnum(%arg_y,%imm1))
1416        __(imul %imm1)
1417        __(jmp C(makes128))
14182:      __(jump_builtin(_builtin_times,2))
1419_endsubp(builtin_times)
1420
1421_spentry(builtin_div)
1422        __(jump_builtin(_builtin_div,2))
1423
1424/* %arg_z <- (= %arg_y %arg_z). */
1425_spentry(builtin_eq)
1426        __(movb %arg_z_b,%imm0_b)
1427        __(orb %arg_y_b,%imm0_b)
1428        __(testb $fixnummask,%imm0_b)
1429        __(jne 1f)
1430        __(rcmpq(%arg_z,%arg_y))
1431        __(condition_to_boolean(e,%imm0,%arg_z))
1432        __(jmp *%ra0)
14331:      __(jump_builtin(_builtin_eq,2))
1434_endsubp(builtin_eq)
1435       
1436/* %arg_z <- (/= %arg_y %arg_z).        */
1437_spentry(builtin_ne)
1438        __(movb %arg_z_b,%imm0_b)
1439        __(orb %arg_y_b,%imm0_b)
1440        __(testb $fixnummask,%imm0_b)
1441        __(jne 1f)
1442        __(rcmpq(%arg_z,%arg_y))
1443        __(condition_to_boolean(ne,%imm0,%arg_z))
1444        __(jmp *%ra0)
14451:      __(jump_builtin(_builtin_ne,2))
1446_endsubp(builtin_ne)
1447       
1448/* %arg_z <- (> %arg_y %arg_z). */
1449_spentry(builtin_gt)
1450        __(movb %arg_z_b,%imm0_b)
1451        __(orb %arg_y_b,%imm0_b)
1452        __(testb $fixnummask,%imm0_b)
1453        __(jne 1f)
1454        __(rcmpq(%arg_y,%arg_z))
1455        __(condition_to_boolean(g,%imm0,%arg_z))
1456        __(jmp *%ra0)
14571:      __(jump_builtin(_builtin_gt,2))
1458_endsubp(builtin_gt)
1459
1460/* %arg_z <- (>= %arg_y %arg_z).        */
1461_spentry(builtin_ge)
1462        __(movb %arg_z_b,%imm0_b)
1463        __(orb %arg_y_b,%imm0_b)
1464        __(testb $fixnummask,%imm0_b)
1465        __(jne 1f)
1466        __(rcmpq(%arg_y,%arg_z))
1467        __(condition_to_boolean(ge,%imm0,%arg_z))
1468        __(jmp *%ra0)
14691:      __(jump_builtin(_builtin_ge,2))
1470_endsubp(builtin_ge)
1471       
1472/* %arg_z <- (< %arg_y %arg_z). */
1473_spentry(builtin_lt)
1474        __(movb %arg_z_b,%imm0_b)
1475        __(orb %arg_y_b,%imm0_b)
1476        __(testb $fixnummask,%imm0_b)
1477        __(jne 1f)
1478        __(rcmpq(%arg_y,%arg_z))
1479        __(condition_to_boolean(l,%imm0,%arg_z))
1480        __(jmp *%ra0)
14811:      __(jump_builtin(_builtin_lt,2))
1482_endsubp(builtin_lt)
1483
1484/* %arg_z <- (<= %arg_y %arg_z). */
1485_spentry(builtin_le)
1486        __(movb %arg_z_b,%imm0_b)
1487        __(orb %arg_y_b,%imm0_b)
1488        __(testb $fixnummask,%imm0_b)
1489        __(jne 1f)
1490        __(rcmpq(%arg_y,%arg_z))
1491        __(condition_to_boolean(le,%imm0,%arg_z))
1492        __(jmp *%ra0)
14931:      __(jump_builtin(_builtin_le,2))
1494_endsubp(builtin_le)
1495
1496_spentry(builtin_eql)
1497        __(cmpq %arg_y,%arg_z)
1498        __(je 1f)
1499        /* Not EQ.  Could only possibly be EQL if both are fulltag-misc
1500           and both have the same subtag */
1501        __(extract_lisptag(%arg_y,%imm0))
1502        __(extract_lisptag(%arg_z,%imm1))
1503        __(cmpb $fulltag_misc,%imm0_b)
1504        __(jne 2f)
1505        __(cmpb %imm0_b,%imm1_b)
1506        __(jne 2f)
1507        __(extract_subtag(%arg_y,%imm0_b))
1508        __(extract_subtag(%arg_z,%imm1_b))
1509        __(cmpb %imm0_b,%imm1_b)
1510        __(jne 2f)
1511        __(jump_builtin(_builtin_eql,2))
15121:      __(movl $t_value,%arg_z_l)
1513        __(jmp *%ra0)
15142:      __(movl $nil_value,%arg_z_l)
1515        __(jmp *%ra0)   
1516_endsubp(builtin_eql)
1517
1518_spentry(builtin_length)
1519        __(extract_lisptag(%arg_z,%imm0))
1520        __(cmpb $tag_list,%imm0_b)
1521        __(jz 2f)
1522        __(cmpb $tag_misc,%imm0_b)
1523        __(jnz 8f)
1524        __(extract_subtag(%arg_z,%imm0_b))
1525        __(rcmpb(%imm0_b,$min_vector_subtag))
1526        __(jb 8f)
1527        __(je 1f)
1528        /* (simple-array * (*)) */
1529        __(movq %arg_z,%arg_y)
1530        __(vector_length(%arg_y,%arg_z))
1531        __(jmp *%ra0)
15321:      /* vector header */
1533        __(movq vectorH.logsize(%arg_z),%arg_z)
1534        __(jmp *%ra0)
15352:      /* list.  Maybe null, maybe dotted or circular. */
1536        __(movq $-fixnumone,%temp2)
1537        __(movq %arg_z,%temp0)  /* fast pointer */
1538        __(movq %arg_z,%temp1)  /* slow pointer */
15393:      __(extract_lisptag(%temp0,%imm0))       
1540        __(cmpb $fulltag_nil,%temp0_b)
1541        __(addq $fixnumone,%temp2)
1542        __(je 9f)
1543        __(cmpb $tag_list,%imm0_b)
1544        __(je 8f)
1545        __(extract_lisptag(%temp1,%imm1))
1546        __(testb $fixnumone,%temp2_b)
1547        __(_cdr(%temp0,%temp0))
1548        __(je 3b)
1549        __(cmpb $tag_list,%imm1_b)
1550        __(jne 8f)
1551        __(_cdr(%temp1,%temp1))
1552        __(cmpq %temp0,%temp1)
1553        __(jne 3b)
15548:     
1555        __(jump_builtin(_builtin_length,1))
15569:     
1557        __(movq %temp2,%arg_z)
1558        __(jmp *%ra0)           
1559_endsubp(builtin_length)
1560
1561       
1562_spentry(builtin_seqtype)
1563        __(extract_lisptag(%arg_z,%imm0))
1564        __(cmpb $tag_list,%imm0_b)
1565        __(jz 1f)
1566        __(cmpb $tag_misc,%imm0_b)
1567        __(cmovew misc_subtag_offset(%arg_z),%imm0_w)
1568        __(jne 2f)
1569        __(rcmpb(%imm0_b,$min_vector_subtag))
1570        __(jb 2f)
1571        __(movl $nil_value,%arg_z_l)
1572        __(jmp *%ra0)
15731:      __(movl $t_value,%arg_z_l)
1574        __(jmp *%ra0)
15752:     
1576        __(jump_builtin(_builtin_seqtype,1))
1577_endsubp(builtin_seqtype)
1578
1579_spentry(builtin_assq)
1580        __(cmpb $fulltag_nil,%arg_z_b)
1581        __(jz 5f)
15821:      __(movb $tagmask,%imm0_b)
1583        __(andb %arg_z_b,%imm0_b)
1584        __(cmpb $tag_list,%imm0_b)
1585        __(jz,pt 2f)
1586        __(uuo_error_reg_not_list(Rarg_z))
15872:      __(_car(%arg_z,%arg_x))
1588        __(_cdr(%arg_z,%arg_z))
1589        __(cmpb $fulltag_nil,%arg_x_b)
1590        __(jz 4f)
1591        __(movb $tagmask,%imm0_b)
1592        __(andb %arg_x_b,%imm0_b)
1593        __(cmpb $tag_list,%imm0_b)
1594        __(jz,pt 3f)
1595        __(uuo_error_reg_not_list(Rarg_x))
15963:      __(_car(%arg_x,%temp0))
1597        __(cmpq %temp0,%arg_y)
1598        __(jnz 4f)
1599        __(movq %arg_x,%arg_z)
1600        __(jmp *%ra0)
16014:      __(cmpb $fulltag_nil,%arg_z_b)
16025:      __(jnz 1b)
1603        __(jmp *%ra0)                   
1604_endsubp(builtin_assq) 
1605
1606_spentry(builtin_memq)
1607        __(cmpb $fulltag_nil,%arg_z_b)
1608        __(jmp 3f)
16091:      __(movb $tagmask,%imm0_b)
1610        __(andb %arg_z_b,%imm0_b)
1611        __(cmpb $tag_list,%imm0_b)
1612        __(jz,pt 2f)
1613        __(uuo_error_reg_not_list(Rarg_z))
16142:      __(_car(%arg_z,%arg_x))
1615        __(_cdr(%arg_z,%temp0))
1616        __(cmpq %arg_x,%arg_y)
1617        __(jz 4f)
1618        __(cmpb $fulltag_nil,%temp0_b)
1619        __(movq %temp0,%arg_z)
16203:      __(jnz 1b)
16214:      __(jmp *%ra0)                           
1622_endsubp(builtin_memq)
1623
1624        __ifdef([X8664])
1625logbitp_max_bit = 61
1626        __else
1627logbitp_max_bit = 30
1628        __endif
1629       
1630_spentry(builtin_logbitp)
1631        __(movb %arg_z_b,%imm0_b)
1632        __(orb %arg_y_b,%imm0_b)
1633        __(testb $fixnummask,%imm0_b)
1634        __(jnz 1f)
1635        __(cmpq $logbitp_max_bit<<fixnumshift,%arg_y)
1636        __(ja 1f)
1637        __(unbox_fixnum(%arg_y,%imm0))
1638        __(addb $fixnumshift,%imm0_b)
1639        __(bt %imm0,%arg_z)
1640        __(condition_to_boolean(b,%imm0,%arg_z))
1641/*     
1642        __(setb %imm0_b)
1643        __(andb $t_offset,%imm0_b)
1644        __(lea nil_value(%imm0),%arg_z)
1645*/     
1646        __(jmp *%ra0)
16471:      __(jump_builtin(_builtin_logbitp,2))
1648_endsubp(builtin_logbitp)
1649
1650_spentry(builtin_logior)
1651        __(movb %arg_y_b,%imm0_b)
1652        __(orb %arg_z_b,%imm0_b)
1653        __(testb $fixnummask,%imm0_b)
1654        __(jne 1f)
1655        __(orq %arg_y,%arg_z)
1656        __(jmp *%ra0)
16571:     
1658        __(jump_builtin(_builtin_logior,2))
1659               
1660_endsubp(builtin_logior)
1661
1662_spentry(builtin_logand)
1663        __(movb %arg_y_b,%imm0_b)
1664        __(orb %arg_z_b,%imm0_b)
1665        __(testb $fixnummask,%imm0_b)
1666        __(jne 1f)
1667        __(andq %arg_y,%arg_z)
1668        __(jmp *%ra0)
16691:             
1670        __(jump_builtin(_builtin_logand,2))
1671_endsubp(builtin_logand)
1672
1673_spentry(builtin_negate)
1674        __(testb $fixnummask,%arg_z_b)
1675        __(jne 1f)
1676        __(negq %arg_z)
1677        __(jo,pn C(fix_one_bit_overflow))
1678        __(jmp *%ra0)
16791:             
1680        __(jump_builtin(_builtin_negate,1))     
1681_endsubp(builtin_negate)
1682
1683_spentry(builtin_logxor)
1684        __(movb %arg_y_b,%imm0_b)
1685        __(orb %arg_z_b,%imm0_b)
1686        __(testb $fixnummask,%imm0_b)
1687        __(jne 1f)
1688        __(xorq %arg_y,%arg_z)
1689        __(jmp *%ra0)
16901:             
1691        __(jump_builtin(_builtin_logand,2))
1692_endsubp(builtin_logxor)
1693
1694_spentry(builtin_aref1)
1695_endsubp(builtin_aref1)
1696
1697_spentry(builtin_aset1)
1698_endsubp(builtin_aset1)
1699
1700/* We have to be a little careful here  %cl has to be used for
1701   the (unboxed) shift count in all variable-length shifts, and
1702   %temp2 = %rcx.  Zero all but the low 8 (or 6) bits of %rcx,
1703   so that the shift count doesn't confuse the GC.
1704*/
1705_spentry(builtin_ash)
1706        __(movb %arg_y_b,%imm0_b)
1707        __(orb %arg_z_b,%imm0_b)
1708        __(testb $fixnummask,%imm0_b)
1709        __(jne 9f)
1710        __(unbox_fixnum(%arg_y,%imm1))
1711        __(unbox_fixnum(%arg_z,%imm0))
1712        /* Z flag set if zero ASH shift count */
1713        __(jnz 1f)
1714        __(movq %arg_y,%arg_z)  /* shift by 0 */
1715        __(jmp *%ra0)
17161:      __(jns 3f)
1717        __(rcmpq(%imm0,$-63))
1718        __(jg 2f)
1719        __(sar $63,%imm1)
1720        __(box_fixnum(%imm1,%arg_z))
1721        __(jmp *%ra0)
17222:      /* Right-shift by small fixnum */
1723        __(negb %imm0_b)
1724        __(movzbl %imm0_b,%ecx)
1725        __(sar %cl,%imm1)
1726        __(xorl %ecx,%ecx)
1727        __(box_fixnum(%imm1,%arg_z))
1728        __(jmp *%ra0)
17293:    /* Left shift by fixnum. We cant shift by more than 63 bits, though
1730        shifting by 64 is actually easy. */
1731        __(rcmpq(%imm0,$64))
1732        __(jg 9f)
1733        __(jne 4f)
1734        /* left-shift by 64-bits exactly */
1735        __(xorl %imm0_l,%imm0_l)
1736        __(jmp C(makes128))
17374:      /* left-shift by 1..63 bits.  Safe to move shift count to %rcx/%cl */
1738        __(movzbl %imm0_b,%ecx)  /* zero-extending mov */
1739        __(movq %imm1,%imm0)
1740        __(xorq %imm1,%imm1)
1741        __(testq %imm0,%imm0)
1742        __(js 5f)
1743        __(shld %cl,%imm0,%imm1)
1744        __(shl %cl,%imm0)
1745        __(xorb %cl,%cl)
1746        __(jmp C(makeu128))
17475:      __(subq $1,%imm1)
1748        __(shld %cl,%imm0,%imm1)
1749        __(shl %cl,%imm0)
1750        __(xorb %cl,%cl)
1751        __(jmp C(makes128))
17529:     
1753        __(jump_builtin(_builtin_ash,2))
1754_endsubp(builtin_ash)
Note: See TracBrowser for help on using the repository browser.