source: trunk/source/lisp-kernel/x86-asmutils32.s @ 11710

Last change on this file since 11710 was 11710, checked in by gb, 10 years ago

Try to handle the case of suspending a Windows thread that's in the
middle of restore_windows_context() more sanely/completely, especially
on win32.

Move the code that's concerned with this into a separate function
(pc_luser_restore_windows_context), which always overwrites some or
all of the context as of thread suspend with the context that the
suspended thread is trying to restore (e.g., whatever pc-lusering
we do always has the effect of causing the thread to resume at the
point where restore_windows_context() would have completed.

Since restore_windows_context() is only used to return from an
exception, clear tcr->pending_exception_context when we've got
things to a known point.

Define ia32_iret_frame in x86-constants32.h.

Use iret to restore flags/cs:eip on ia32.

Ensure that the last thing that win32_restore_context() does before
the iret is to reload %rcx/%ecx from the context (kept in %rcx/%ecx),
so suspending in the middle of restore_lisp_context() - before the iret -
just has to find the context being restored in %rcx/%ecx (it doesn't
matter where we are in the process of restoring it.)

If we're at the iret, just emulate the (machine-specific) effects of
the iret.

Need to make sure that this isn't grossly broken on win64, and need
to look at interrupt (vice suspend) code.

File size: 8.5 KB
Line 
1/*   Copyright (C) 2005 Clozure Associates */
2/*   This file is part of OpenMCL.   */
3 
4/*   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public */
5/*   License , known as the LLGPL and distributed with OpenMCL as the */
6/*   file "LICENSE".  The LLGPL consists of a preamble and the LGPL, */
7/*   which is distributed with OpenMCL as the file "LGPL".  Where these */
8/*   conflict, the preamble takes precedence.   */
9 
10/*   OpenMCL is referenced in the preamble as the "LIBRARY." */
11 
12/*   The LLGPL is also available online at */
13/*   http://opensource.franz.com/preamble.html */
14
15
16       
17
18        include(lisp.s)
19
20        _beginfile
21
22_exportfn(C(current_stack_pointer))
23        __(movl %esp,%eax)
24        __(ret)
25_endfn
26                       
27_exportfn(C(count_leading_zeros))
28        __(bsr 4(%esp),%eax)
29        __(xor $31,%eax)
30        __(ret)
31_endfn
32
33_exportfn(C(noop))
34        __(ret)
35_endfn
36
37_exportfn(C(set_mxcsr))
38        __(ldmxcsr 4(%esp))
39        __(ret)
40_endfn
41       
42_exportfn(C(get_mxcsr))
43        __(push $0)
44        __(stmxcsr (%esp))
45        __(pop %eax)
46        __(ret)
47_endfn
48
49_exportfn(C(save_fp_context))
50_endfn
51       
52_exportfn(C(restore_fp_context))
53_endfn                       
54
55/*  Atomically store new in *p, if *p == old. */
56/*  Return actual old value. */
57/* natural store_conditional(natural *p, natural old, natural new) */
58_exportfn(C(store_conditional))
59        __(movl 12(%esp),%edx)  /* new */
60        __(movl 8(%esp),%eax)   /* old */
61        __(movl 4(%esp),%ecx)   /* ptr */
62        __(lock)
63        __(cmpxchgl %edx,(%ecx))
64        __(cmovne %edx,%eax)
65        __(ret)
66_endfn
67
68/*      Atomically store val in *p; return previous *p */
69/*      of *%rdi. */
70/* signed_natural atomic_swap(signed_natural *p, signed_natural val) */
71_exportfn(C(atomic_swap))
72        __(movl 8(%esp),%eax)
73        __(movl 4(%esp),%edx)
74        __(lock)
75        __(xchg %eax,(%edx))
76        __(ret)
77_endfn
78
79/*      Logior the value in *p with mask (presumably a */
80/*      bitmask with exactly 1 bit set.)  Return non-zero if any of */
81/*      the bits in that bitmask were already set. */
82/* natural atomic_ior(natural *p, natural mask) */
83_exportfn(C(atomic_ior))
84        __(movl 4(%esp),%edx)   /* ptr */
850:      __(movl (%edx),%eax)
86        __(movl %eax,%ecx)
87        __(orl 8(%esp),%ecx)
88        __(lock)
89        __(cmpxchg %ecx,(%edx))
90        __(jnz 0b)
91        __(andl 8(%esp),%eax)
92        __(ret)
93_endfn
94       
95       
96/* Logand the value in *p with mask (presumably a bitmask with exactly 1 */
97/* bit set.)  Return the value now in *p (for some value of "now"). */
98/* natural atomic_and(natural *p, natural mask) */
99_exportfn(C(atomic_and))
100        __(movl 4(%esp),%edx)
1010:      __(movl (%edx),%eax)
102        __(movl %eax,%ecx)
103        __(and 8(%esp),%ecx)
104        __(lock)
105        __(cmpxchg %ecx,(%edx))
106        __(jnz 0b)
107        __(movl %ecx,%eax)
108        __(ret)
109_endfn
110
111
112        __ifdef([DARWIN])
113_exportfn(C(pseudo_sigreturn))
114        __(hlt)
115        __(jmp C(pseudo_sigreturn))
116_endfn
117        __endif   
118
119/* int cpuid (int code, int *pebx, int *pecx, int *pedx)  */
120_exportfn(C(cpuid))
121        __(push %ebx)           /* %ebx is non-volatile */
122        __(push %esi)           /* ditto here */
123        __(movl 12(%esp),%eax)
124        __(xorl %ecx,%ecx)
125        __(cpuid)
126        __(movl 16(%esp),%esi)
127        __(movl %ebx,(%esi))
128        __(movl 20(%esp),%esi)
129        __(movl %ecx,(%esi))
130        __(movl 24(%esp),%esi)
131        __(movl %edx,(%esi))
132        __(pop %esi)
133        __(pop %ebx)
134        __(ret)
135_endfn
136
137/* switch_to_foreign_stack(new_sp, func, arg_0, arg_1, arg_2)  */
138/*   Not fully general, but should get us off of the signal stack */
139/* Beware: on Darwin, GDB can get very confused by this code, and
140   doesn't really get unconfused until the target function - the
141   handler - has built its stack frame
142   The lone caller of this function passes 3 arguments (besides
143   the new stack pointer and the handler address.)
144   On platforms where the C stack must be 16-byte aligned, pushing
145   a 4th word helps make the stack aligned before the return
146   address is (re-)pushed.
147   On Linux, there are severe constraints on what the top of stack
148   can look like when rt_sigreturn (the code at the return address)
149   runs, and there aren't any constraints on stack alignment, so
150   we don't push the extra word on the new stack.*/
151_exportfn(C(switch_to_foreign_stack))
152        __(addl $4,%esp)        /* discard return address, on wrong stack */
153        __(pop %edi)            /* new esp */
154        __(pop %esi)            /* handler */
155        __(pop %eax)            /* arg_0 */
156        __(pop %ebx)            /* arg_1 */
157        __(pop %ecx)            /* arg_2 */
158        __(mov %edi,%esp)
159        __(pop %edi)            /* Return address pushed by caller */
160        __ifndef([LINUX])
161        __(push $0)             /* For alignment. See comment above */
162        __endif
163        __(push %ecx)           /* arg_2 */
164        __(push %ebx)           /* arg_1 */
165        __(push %eax)           /* arg_0 */
166        __(push %edi)           /* return address */
167        __(jmp *%esi)           /* On some platforms, we don't really return */
168_endfn
169
170        __ifdef([FREEBSD])
171        .globl C(sigreturn)
172_exportfn(C(freebsd_sigreturn))
173        __(jmp C(sigreturn))
174_endfn
175        __endif
176
177        __ifdef([DARWIN])
178        .globl C(sigreturn)
179_exportfn(C(darwin_sigreturn))
180/* Need to set the sigreturn 'infostyle' argument, which is mostly
181   undocumented.  On x8632 Darwin, sigtramp() sets it to 0x1e, and
182   since we're trying to do what sigtramp() would do if we'd returned
183   to it ... */
184        __(movl $0x1e,%esi)
185        __(jmp C(sigreturn))
186_endfn
187        __endif       
188               
189_exportfn(C(get_vector_registers))
190        __(ret)
191_endfn
192
193_exportfn(C(put_vector_registers))
194        __(ret)
195_endfn                         
196
197        __ifdef([WIN_32])
198_exportfn(C(restore_windows_context))
199Xrestore_windows_context_start:
200        __(movl 4(%esp),%ecx)   /* context */
201        __(movl 12(%esp),%edx)  /* old valence */
202        __(movl 8(%esp),%eax)   /* tcr */
203        __(movw tcr.ldt_selector(%eax), %rcontext_reg)
204        __(movl %edx,rcontext(tcr.valence))
205        __(movl $0,rcontext(tcr.pending_exception_context))
206        __(frstor win32_context.FloatSave(%ecx))
207        /* Windows doesn't bother to align the context, so use
208          'movupd' here */
209        __(movupd win32_context.Xmm0(%ecx),%xmm0)
210        __(movupd win32_context.Xmm1(%ecx),%xmm1)
211        __(movupd win32_context.Xmm2(%ecx),%xmm2)
212        __(movupd win32_context.Xmm3(%ecx),%xmm3)
213        __(movupd win32_context.Xmm4(%ecx),%xmm4)
214        __(movupd win32_context.Xmm5(%ecx),%xmm5)
215        __(movupd win32_context.Xmm6(%ecx),%xmm6)
216        __(movupd win32_context.Xmm7(%ecx),%xmm7)
217        __(ldmxcsr win32_context.MXCSR(%ecx))
218        __(movl win32_context.Ebp(%ecx),%ebp)
219        __(movl win32_context.Edi(%ecx),%edi)
220        __(movl win32_context.Esi(%ecx),%esi)
221        __(movl win32_context.Edx(%ecx),%edx)
222        __(movl win32_context.Ebx(%ecx),%ebx)
223        __(movl win32_context.Eax(%ecx),%eax)
224        __(movl win32_context.Esp(%ecx),%esp)
225        __(pushl win32_context.EFlags(%ecx))
226        __(pushl %cs)
227        __(pushl win32_context.Eip(%ecx))       
228        /* This must be the last thing before the iret, e.g., if we're
229        interrupted before the iret, the context we're returning to here
230        is still in %ecx.  If we're interrupted -at- the iret, then
231        everything but that which the iret will restore has been restored. */
232        __(movl win32_context.Ecx(%ecx),%ecx)
233Xrestore_windows_context_iret:           
234        __(iret)
235Xrestore_windows_context_end:             
236        __(nop)
237_endfn
238       
239_exportfn(C(windows_switch_to_foreign_stack))
240        __(pop %eax)
241        __(pop %ebx)            /* new %esp */
242        __(pop %ecx)            /* handler */
243        __(pop %edx)            /* arg */
244        __(movl %ebx,%esp)
245        __(subl $0x10,%esp)
246        __(movl %edx,(%esp))
247        __(push %eax)
248        __(jmp *%ecx)
249_endfn       
250
251        .data
252        .globl C(restore_windows_context_start)
253        .globl C(restore_windows_context_end)
254        .globl C(restore_windows_context_iret)
255C(restore_windows_context_start):  .long Xrestore_windows_context_start
256C(restore_windows_context_end): .long Xrestore_windows_context_end
257C(restore_windows_context_iret): .long Xrestore_windows_context_iret
258        .text
259       
260        __ifdef([WIN32_ES_HACK])
261/* Something that we shouldn't return to */
262_exportfn(C(windows_halt))
263        __(hlt)
264_endfn         
265        __endif
266_exportfn(C(ensure_safe_for_string_operations))
267        __ifdef([WIN32_ES_HACK])
268        __(movw %es,%ax)
269        __(movw %ds,%dx)
270        __(cmpw %ax,%dx)
271        __(jne 9f)
2720:      __(movw %dx,%es)
273        __endif
274        __(cld)       
275        __(ret)
276        __ifdef([WIN32_ES_HACK])
2779:      __(hlt)
278        __(jmp 0b)
279        __endif
280_endfn                                       
281        __endif
282        _endfile
283
Note: See TracBrowser for help on using the repository browser.