1 | /* |
---|
2 | Copyright (C) 1994-2001 Digitool, Inc |
---|
3 | This file is part of OpenMCL. |
---|
4 | |
---|
5 | OpenMCL is licensed under the terms of the Lisp Lesser GNU Public |
---|
6 | License , known as the LLGPL and distributed with OpenMCL as the |
---|
7 | file "LICENSE". The LLGPL consists of a preamble and the LGPL, |
---|
8 | which is distributed with OpenMCL as the file "LGPL". Where these |
---|
9 | conflict, the preamble takes precedence. |
---|
10 | |
---|
11 | OpenMCL is referenced in the preamble as the "LIBRARY." |
---|
12 | |
---|
13 | The LLGPL is also available online at |
---|
14 | http://opensource.franz.com/preamble.html |
---|
15 | */ |
---|
16 | |
---|
17 | #include "lisp.h" |
---|
18 | #include "lisp-exceptions.h" |
---|
19 | #include "lisp_globals.h" |
---|
20 | #include <ctype.h> |
---|
21 | #include <stdio.h> |
---|
22 | #include <stddef.h> |
---|
23 | #include <string.h> |
---|
24 | #include <stdarg.h> |
---|
25 | #include <errno.h> |
---|
26 | #include <stdio.h> |
---|
27 | #ifdef LINUX |
---|
28 | #include <strings.h> |
---|
29 | #include <sys/mman.h> |
---|
30 | #include <fpu_control.h> |
---|
31 | #include <linux/prctl.h> |
---|
32 | #endif |
---|
33 | |
---|
34 | #ifdef DARWIN |
---|
35 | #include <sys/mman.h> |
---|
36 | #define _FPU_RESERVED 0xffffff00 |
---|
37 | #ifndef SA_NODEFER |
---|
38 | #define SA_NODEFER 0 |
---|
39 | #endif |
---|
40 | #include <sysexits.h> |
---|
41 | |
---|
42 | /* a distinguished UUO at a distinguished address */ |
---|
43 | extern void pseudo_sigreturn(ExceptionInformation *); |
---|
44 | #endif |
---|
45 | |
---|
46 | |
---|
47 | #include "Threads.h" |
---|
48 | |
---|
49 | #define MSR_FE0_MASK (((unsigned)0x80000000)>>20) |
---|
50 | #define MSR_FE1_MASK (((unsigned)0x80000000)>>23) |
---|
51 | #define MSR_FE0_FE1_MASK (MSR_FE0_MASK|MSR_FE1_MASK) |
---|
52 | extern void enable_fp_exceptions(void); |
---|
53 | extern void disable_fp_exceptions(void); |
---|
54 | |
---|
55 | #ifdef LINUX |
---|
56 | /* Some relatively recent kernels support this interface. |
---|
57 | If this prctl isn't supported, assume that we're always |
---|
58 | running with excptions enabled and "precise". |
---|
59 | */ |
---|
60 | #ifndef PR_SET_FPEXC |
---|
61 | #define PR_SET_FPEXC 12 |
---|
62 | #endif |
---|
63 | #ifndef PR_FP_EXC_DISABLED |
---|
64 | #define PR_FP_EXC_DISABLED 0 |
---|
65 | #endif |
---|
66 | #ifndef PR_FP_EXC_PRECISE |
---|
67 | #define PR_FP_EXC_PRECISE 3 |
---|
68 | #endif |
---|
69 | |
---|
70 | void |
---|
71 | enable_fp_exceptions() |
---|
72 | { |
---|
73 | prctl(PR_SET_FPEXC, PR_FP_EXC_PRECISE); |
---|
74 | } |
---|
75 | |
---|
76 | void |
---|
77 | disable_fp_exceptions() |
---|
78 | { |
---|
79 | prctl(PR_SET_FPEXC, PR_FP_EXC_DISABLED); |
---|
80 | } |
---|
81 | |
---|
82 | #endif |
---|
83 | |
---|
84 | /* |
---|
85 | Handle exceptions. |
---|
86 | |
---|
87 | */ |
---|
88 | |
---|
89 | extern LispObj lisp_nil; |
---|
90 | |
---|
91 | extern natural lisp_heap_gc_threshold; |
---|
92 | extern Boolean grow_dynamic_area(natural); |
---|
93 | |
---|
94 | |
---|
95 | |
---|
96 | |
---|
97 | |
---|
98 | |
---|
99 | int |
---|
100 | page_size = 4096; |
---|
101 | |
---|
102 | int |
---|
103 | log2_page_size = 12; |
---|
104 | |
---|
105 | |
---|
106 | |
---|
107 | |
---|
108 | |
---|
109 | /* |
---|
110 | If the PC is pointing to an allocation trap, the previous instruction |
---|
111 | must have decremented allocptr. Return the non-zero amount by which |
---|
112 | allocptr was decremented. |
---|
113 | */ |
---|
114 | signed_natural |
---|
115 | allocptr_displacement(ExceptionInformation *xp) |
---|
116 | { |
---|
117 | pc program_counter = xpPC(xp); |
---|
118 | opcode instr = *program_counter, prev_instr = *(program_counter-1); |
---|
119 | |
---|
120 | if (instr == ALLOC_TRAP_INSTRUCTION) { |
---|
121 | if (match_instr(prev_instr, |
---|
122 | XO_MASK | RT_MASK | RB_MASK, |
---|
123 | XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) | |
---|
124 | RT(allocptr) | |
---|
125 | RB(allocptr))) { |
---|
126 | return ((signed_natural) xpGPR(xp, RA_field(prev_instr))); |
---|
127 | } |
---|
128 | if (match_instr(prev_instr, |
---|
129 | OP_MASK | RT_MASK | RA_MASK, |
---|
130 | OP(major_opcode_ADDI) | |
---|
131 | RT(allocptr) | |
---|
132 | RA(allocptr))) { |
---|
133 | return (signed_natural) -((short) prev_instr); |
---|
134 | } |
---|
135 | Bug(xp, "Can't determine allocation displacement"); |
---|
136 | } |
---|
137 | return 0; |
---|
138 | } |
---|
139 | |
---|
140 | |
---|
141 | /* |
---|
142 | A cons cell's been successfully allocated, but the allocptr's |
---|
143 | still tagged (as fulltag_cons, of course.) Emulate any instructions |
---|
144 | that might follow the allocation (stores to the car or cdr, an |
---|
145 | assignment to the "result" gpr) that take place while the allocptr's |
---|
146 | tag is non-zero, advancing over each such instruction. When we're |
---|
147 | done, the cons cell will be allocated and initialized, the result |
---|
148 | register will point to it, the allocptr will be untagged, and |
---|
149 | the PC will point past the instruction that clears the allocptr's |
---|
150 | tag. |
---|
151 | */ |
---|
152 | void |
---|
153 | finish_allocating_cons(ExceptionInformation *xp) |
---|
154 | { |
---|
155 | pc program_counter = xpPC(xp); |
---|
156 | opcode instr; |
---|
157 | LispObj cur_allocptr = xpGPR(xp, allocptr); |
---|
158 | cons *c = (cons *)ptr_from_lispobj(untag(cur_allocptr)); |
---|
159 | int target_reg; |
---|
160 | |
---|
161 | while (1) { |
---|
162 | instr = *program_counter++; |
---|
163 | |
---|
164 | if (instr == UNTAG_ALLOCPTR_INSTRUCTION) { |
---|
165 | xpGPR(xp, allocptr) = untag(cur_allocptr); |
---|
166 | xpPC(xp) = program_counter; |
---|
167 | return; |
---|
168 | } |
---|
169 | |
---|
170 | switch (instr & STORE_CXR_ALLOCPTR_MASK) { |
---|
171 | case STORE_CAR_ALLOCPTR_INSTRUCTION: |
---|
172 | c->car = xpGPR(xp,RT_field(instr)); |
---|
173 | break; |
---|
174 | case STORE_CDR_ALLOCPTR_INSTRUCTION: |
---|
175 | c->cdr = xpGPR(xp,RT_field(instr)); |
---|
176 | break; |
---|
177 | default: |
---|
178 | /* Assume that this is an assignment: {rt/ra} <- allocptr. |
---|
179 | There are several equivalent instruction forms |
---|
180 | that might have that effect; just assign to target here. |
---|
181 | */ |
---|
182 | if (major_opcode_p(instr,major_opcode_X31)) { |
---|
183 | target_reg = RA_field(instr); |
---|
184 | } else { |
---|
185 | target_reg = RT_field(instr); |
---|
186 | } |
---|
187 | xpGPR(xp,target_reg) = cur_allocptr; |
---|
188 | break; |
---|
189 | } |
---|
190 | } |
---|
191 | } |
---|
192 | |
---|
193 | /* |
---|
194 | We were interrupted in the process of allocating a uvector; we |
---|
195 | survived the allocation trap, and allocptr is tagged as fulltag_misc. |
---|
196 | Emulate any instructions which store a header into the uvector, |
---|
197 | assign the value of allocptr to some other register, and clear |
---|
198 | allocptr's tag. Don't expect/allow any other instructions in |
---|
199 | this environment. |
---|
200 | */ |
---|
201 | void |
---|
202 | finish_allocating_uvector(ExceptionInformation *xp) |
---|
203 | { |
---|
204 | pc program_counter = xpPC(xp); |
---|
205 | opcode instr; |
---|
206 | LispObj cur_allocptr = xpGPR(xp, allocptr); |
---|
207 | int target_reg; |
---|
208 | |
---|
209 | while (1) { |
---|
210 | instr = *program_counter++; |
---|
211 | if (instr == UNTAG_ALLOCPTR_INSTRUCTION) { |
---|
212 | xpGPR(xp, allocptr) = untag(cur_allocptr); |
---|
213 | xpPC(xp) = program_counter; |
---|
214 | return; |
---|
215 | } |
---|
216 | if ((instr & STORE_HEADER_ALLOCPTR_MASK) == |
---|
217 | STORE_HEADER_ALLOCPTR_INSTRUCTION) { |
---|
218 | header_of(cur_allocptr) = xpGPR(xp, RT_field(instr)); |
---|
219 | } else { |
---|
220 | /* assume that this is an assignment */ |
---|
221 | |
---|
222 | if (major_opcode_p(instr,major_opcode_X31)) { |
---|
223 | target_reg = RA_field(instr); |
---|
224 | } else { |
---|
225 | target_reg = RT_field(instr); |
---|
226 | } |
---|
227 | xpGPR(xp,target_reg) = cur_allocptr; |
---|
228 | } |
---|
229 | } |
---|
230 | } |
---|
231 | |
---|
232 | |
---|
233 | Boolean |
---|
234 | allocate_object(ExceptionInformation *xp, |
---|
235 | natural bytes_needed, |
---|
236 | signed_natural disp_from_allocptr, |
---|
237 | TCR *tcr) |
---|
238 | { |
---|
239 | area *a = active_dynamic_area; |
---|
240 | |
---|
241 | /* Maybe do an EGC */ |
---|
242 | if (a->older && lisp_global(OLDEST_EPHEMERAL)) { |
---|
243 | if (((a->active)-(a->low)) >= a->threshold) { |
---|
244 | gc_from_xp(xp, 0L); |
---|
245 | } |
---|
246 | } |
---|
247 | |
---|
248 | /* Life is pretty simple if we can simply grab a segment |
---|
249 | without extending the heap. |
---|
250 | */ |
---|
251 | if (new_heap_segment(xp, bytes_needed, false, tcr)) { |
---|
252 | xpGPR(xp, allocptr) += disp_from_allocptr; |
---|
253 | #ifdef DEBUG |
---|
254 | fprintf(stderr, "New heap segment for #x%x, no GC: #x%x/#x%x, vsp = #x%x\n", |
---|
255 | tcr,xpGPR(xp,allocbase),tcr->last_allocptr, xpGPR(xp,vsp)); |
---|
256 | #endif |
---|
257 | return true; |
---|
258 | } |
---|
259 | |
---|
260 | /* It doesn't make sense to try a full GC if the object |
---|
261 | we're trying to allocate is larger than everything |
---|
262 | allocated so far. |
---|
263 | */ |
---|
264 | if ((lisp_global(HEAP_END)-lisp_global(HEAP_START)) > bytes_needed) { |
---|
265 | untenure_from_area(tenured_area); /* force a full GC */ |
---|
266 | gc_from_xp(xp, 0L); |
---|
267 | } |
---|
268 | |
---|
269 | /* Try again, growing the heap if necessary */ |
---|
270 | if (new_heap_segment(xp, bytes_needed, true, tcr)) { |
---|
271 | xpGPR(xp, allocptr) += disp_from_allocptr; |
---|
272 | #ifdef DEBUG |
---|
273 | fprintf(stderr, "New heap segment for #x%x after GC: #x%x/#x%x\n", |
---|
274 | tcr,xpGPR(xp,allocbase),tcr->last_allocptr); |
---|
275 | #endif |
---|
276 | return true; |
---|
277 | } |
---|
278 | |
---|
279 | return false; |
---|
280 | } |
---|
281 | |
---|
282 | #ifndef XNOMEM |
---|
283 | #define XNOMEM 10 |
---|
284 | #endif |
---|
285 | |
---|
286 | void |
---|
287 | update_bytes_allocated(TCR* tcr, void *cur_allocptr) |
---|
288 | { |
---|
289 | BytePtr |
---|
290 | last = (BytePtr) tcr->last_allocptr, |
---|
291 | current = (BytePtr) cur_allocptr; |
---|
292 | if (last && (cur_allocptr != ((void *)VOID_ALLOCPTR))) { |
---|
293 | tcr->bytes_allocated += last-current; |
---|
294 | } |
---|
295 | tcr->last_allocptr = 0; |
---|
296 | } |
---|
297 | |
---|
298 | void |
---|
299 | lisp_allocation_failure(ExceptionInformation *xp, TCR *tcr, natural bytes_needed) |
---|
300 | { |
---|
301 | /* Couldn't allocate the object. If it's smaller than some arbitrary |
---|
302 | size (say 128K bytes), signal a "chronically out-of-memory" condition; |
---|
303 | else signal a "allocation request failed" condition. |
---|
304 | */ |
---|
305 | xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR; |
---|
306 | handle_error(xp, bytes_needed < (128<<10) ? XNOMEM : error_alloc_failed, 0, 0, xpPC(xp)); |
---|
307 | } |
---|
308 | |
---|
309 | /* |
---|
310 | Allocate a large list, where "large" means "large enough to |
---|
311 | possibly trigger the EGC several times if this was done |
---|
312 | by individually allocating each CONS." The number of |
---|
313 | ocnses in question is in arg_z; on successful return, |
---|
314 | the list will be in arg_z |
---|
315 | */ |
---|
316 | |
---|
317 | Boolean |
---|
318 | allocate_list(ExceptionInformation *xp, TCR *tcr) |
---|
319 | { |
---|
320 | natural |
---|
321 | nconses = (unbox_fixnum(xpGPR(xp,arg_z))), |
---|
322 | bytes_needed = (nconses << dnode_shift); |
---|
323 | LispObj |
---|
324 | prev = lisp_nil, |
---|
325 | current, |
---|
326 | initial = xpGPR(xp,arg_y); |
---|
327 | |
---|
328 | if (nconses == 0) { |
---|
329 | /* Silly case */ |
---|
330 | xpGPR(xp,arg_z) = lisp_nil; |
---|
331 | xpGPR(xp,allocptr) = lisp_nil; |
---|
332 | return true; |
---|
333 | } |
---|
334 | update_bytes_allocated(tcr, (void *)(void *) tcr->save_allocptr); |
---|
335 | if (allocate_object(xp,bytes_needed,(-bytes_needed)+fulltag_cons,tcr)) { |
---|
336 | for (current = xpGPR(xp,allocptr); |
---|
337 | nconses; |
---|
338 | prev = current, current+= dnode_size, nconses--) { |
---|
339 | deref(current,0) = prev; |
---|
340 | deref(current,1) = initial; |
---|
341 | } |
---|
342 | xpGPR(xp,arg_z) = prev; |
---|
343 | xpGPR(xp,arg_y) = xpGPR(xp,allocptr); |
---|
344 | xpGPR(xp,allocptr)-=fulltag_cons; |
---|
345 | } else { |
---|
346 | lisp_allocation_failure(xp,tcr,bytes_needed); |
---|
347 | } |
---|
348 | return true; |
---|
349 | } |
---|
350 | |
---|
351 | OSStatus |
---|
352 | handle_alloc_trap(ExceptionInformation *xp, TCR *tcr) |
---|
353 | { |
---|
354 | pc program_counter; |
---|
355 | natural cur_allocptr, bytes_needed = 0; |
---|
356 | opcode prev_instr; |
---|
357 | signed_natural disp = 0; |
---|
358 | unsigned allocptr_tag; |
---|
359 | |
---|
360 | cur_allocptr = xpGPR(xp,allocptr); |
---|
361 | program_counter = xpPC(xp); |
---|
362 | prev_instr = *(program_counter-1); |
---|
363 | allocptr_tag = fulltag_of(cur_allocptr); |
---|
364 | |
---|
365 | switch (allocptr_tag) { |
---|
366 | case fulltag_cons: |
---|
367 | bytes_needed = sizeof(cons); |
---|
368 | disp = -sizeof(cons) + fulltag_cons; |
---|
369 | break; |
---|
370 | |
---|
371 | case fulltag_even_fixnum: |
---|
372 | case fulltag_odd_fixnum: |
---|
373 | break; |
---|
374 | |
---|
375 | case fulltag_misc: |
---|
376 | if (match_instr(prev_instr, |
---|
377 | XO_MASK | RT_MASK | RB_MASK, |
---|
378 | XO(major_opcode_X31,minor_opcode_SUBF, 0, 0) | |
---|
379 | RT(allocptr) | |
---|
380 | RB(allocptr))) { |
---|
381 | disp = -((signed_natural) xpGPR(xp, RA_field(prev_instr))); |
---|
382 | } else if (match_instr(prev_instr, |
---|
383 | OP_MASK | RT_MASK | RA_MASK, |
---|
384 | OP(major_opcode_ADDI) | |
---|
385 | RT(allocptr) | |
---|
386 | RA(allocptr))) { |
---|
387 | disp = (signed_natural) ((short) prev_instr); |
---|
388 | } |
---|
389 | if (disp) { |
---|
390 | bytes_needed = (-disp) + fulltag_misc; |
---|
391 | break; |
---|
392 | } |
---|
393 | /* else fall thru */ |
---|
394 | default: |
---|
395 | return -1; |
---|
396 | } |
---|
397 | |
---|
398 | if (bytes_needed) { |
---|
399 | update_bytes_allocated(tcr,((BytePtr)(cur_allocptr-disp))); |
---|
400 | if (allocate_object(xp, bytes_needed, disp, tcr)) { |
---|
401 | #if 0 |
---|
402 | fprintf(stderr, "alloc_trap in 0x%lx, new allocptr = 0x%lx\n", |
---|
403 | tcr, xpGPR(xp, allocptr)); |
---|
404 | #endif |
---|
405 | adjust_exception_pc(xp,4); |
---|
406 | return 0; |
---|
407 | } |
---|
408 | lisp_allocation_failure(xp,tcr,bytes_needed); |
---|
409 | return -1; |
---|
410 | } |
---|
411 | return -1; |
---|
412 | } |
---|
413 | |
---|
414 | natural gc_deferred = 0, full_gc_deferred = 0; |
---|
415 | |
---|
416 | OSStatus |
---|
417 | handle_gc_trap(ExceptionInformation *xp, TCR *tcr) |
---|
418 | { |
---|
419 | LispObj |
---|
420 | selector = xpGPR(xp,imm0), |
---|
421 | arg = xpGPR(xp,imm1); |
---|
422 | area *a = active_dynamic_area; |
---|
423 | Boolean egc_was_enabled = (a->older != NULL); |
---|
424 | natural gc_previously_deferred = gc_deferred; |
---|
425 | |
---|
426 | |
---|
427 | switch (selector) { |
---|
428 | case GC_TRAP_FUNCTION_EGC_CONTROL: |
---|
429 | egc_control(arg != 0, a->active); |
---|
430 | xpGPR(xp,arg_z) = lisp_nil + (egc_was_enabled ? t_offset : 0); |
---|
431 | break; |
---|
432 | |
---|
433 | case GC_TRAP_FUNCTION_CONFIGURE_EGC: |
---|
434 | a->threshold = unbox_fixnum(xpGPR(xp, arg_x)); |
---|
435 | g1_area->threshold = unbox_fixnum(xpGPR(xp, arg_y)); |
---|
436 | g2_area->threshold = unbox_fixnum(xpGPR(xp, arg_z)); |
---|
437 | xpGPR(xp,arg_z) = lisp_nil+t_offset; |
---|
438 | break; |
---|
439 | |
---|
440 | case GC_TRAP_FUNCTION_SET_LISP_HEAP_THRESHOLD: |
---|
441 | if (((signed_natural) arg) > 0) { |
---|
442 | lisp_heap_gc_threshold = |
---|
443 | align_to_power_of_2((arg-1) + |
---|
444 | (heap_segment_size - 1), |
---|
445 | log2_heap_segment_size); |
---|
446 | } |
---|
447 | /* fall through */ |
---|
448 | case GC_TRAP_FUNCTION_GET_LISP_HEAP_THRESHOLD: |
---|
449 | xpGPR(xp, imm0) = lisp_heap_gc_threshold; |
---|
450 | break; |
---|
451 | |
---|
452 | case GC_TRAP_FUNCTION_USE_LISP_HEAP_THRESHOLD: |
---|
453 | /* Try to put the current threshold in effect. This may |
---|
454 | need to disable/reenable the EGC. */ |
---|
455 | untenure_from_area(tenured_area); |
---|
456 | resize_dynamic_heap(a->active,lisp_heap_gc_threshold); |
---|
457 | if (egc_was_enabled) { |
---|
458 | if ((a->high - a->active) >= a->threshold) { |
---|
459 | tenure_to_area(tenured_area); |
---|
460 | } |
---|
461 | } |
---|
462 | xpGPR(xp, imm0) = lisp_heap_gc_threshold; |
---|
463 | break; |
---|
464 | |
---|
465 | default: |
---|
466 | update_bytes_allocated(tcr, (void *) ptr_from_lispobj(xpGPR(xp, allocptr))); |
---|
467 | |
---|
468 | if (selector == GC_TRAP_FUNCTION_IMMEDIATE_GC) { |
---|
469 | if (!full_gc_deferred) { |
---|
470 | gc_from_xp(xp, 0L); |
---|
471 | break; |
---|
472 | } |
---|
473 | /* Tried to do a full GC when gc was disabled. That failed, |
---|
474 | so try full GC now */ |
---|
475 | selector = GC_TRAP_FUNCTION_GC; |
---|
476 | } |
---|
477 | |
---|
478 | if (egc_was_enabled) { |
---|
479 | egc_control(false, (BytePtr) a->active); |
---|
480 | } |
---|
481 | gc_from_xp(xp, 0L); |
---|
482 | if (gc_deferred > gc_previously_deferred) { |
---|
483 | full_gc_deferred = 1; |
---|
484 | } else { |
---|
485 | full_gc_deferred = 0; |
---|
486 | } |
---|
487 | if (selector > GC_TRAP_FUNCTION_GC) { |
---|
488 | if (selector & GC_TRAP_FUNCTION_IMPURIFY) { |
---|
489 | impurify_from_xp(xp, 0L); |
---|
490 | /* nrs_GC_EVENT_STATUS_BITS.vcell |= gc_integrity_check_bit; */ |
---|
491 | gc_from_xp(xp, 0L); |
---|
492 | release_readonly_area(); |
---|
493 | } |
---|
494 | if (selector & GC_TRAP_FUNCTION_PURIFY) { |
---|
495 | purify_from_xp(xp, 0L); |
---|
496 | gc_from_xp(xp, 0L); |
---|
497 | } |
---|
498 | if (selector & GC_TRAP_FUNCTION_SAVE_APPLICATION) { |
---|
499 | OSErr err; |
---|
500 | extern OSErr save_application(unsigned); |
---|
501 | TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)); |
---|
502 | area *vsarea = tcr->vs_area; |
---|
503 | |
---|
504 | nrs_TOPLFUNC.vcell = *((LispObj *)(vsarea->high)-1); |
---|
505 | err = save_application(arg); |
---|
506 | if (err == noErr) { |
---|
507 | _exit(0); |
---|
508 | } |
---|
509 | fatal_oserr(": save_application", err); |
---|
510 | } |
---|
511 | switch (selector) { |
---|
512 | case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE: |
---|
513 | xpGPR(xp, imm0) = 0; |
---|
514 | break; |
---|
515 | |
---|
516 | case GC_TRAP_FUNCTION_FREEZE: |
---|
517 | a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size); |
---|
518 | tenured_area->static_dnodes = area_dnode(a->active, a->low); |
---|
519 | xpGPR(xp, imm0) = tenured_area->static_dnodes << dnode_shift; |
---|
520 | break; |
---|
521 | default: |
---|
522 | break; |
---|
523 | } |
---|
524 | } |
---|
525 | |
---|
526 | if (egc_was_enabled) { |
---|
527 | egc_control(true, NULL); |
---|
528 | } |
---|
529 | break; |
---|
530 | |
---|
531 | } |
---|
532 | |
---|
533 | adjust_exception_pc(xp,4); |
---|
534 | return 0; |
---|
535 | } |
---|
536 | |
---|
537 | |
---|
538 | |
---|
539 | void |
---|
540 | signal_stack_soft_overflow(ExceptionInformation *xp, unsigned reg) |
---|
541 | { |
---|
542 | /* The cstack just overflowed. Force the current thread's |
---|
543 | control stack to do so until all stacks are well under their overflow |
---|
544 | limits. |
---|
545 | */ |
---|
546 | |
---|
547 | #if 0 |
---|
548 | lisp_global(CS_OVERFLOW_LIMIT) = CS_OVERFLOW_FORCE_LIMIT; /* force unsigned traps to fail */ |
---|
549 | #endif |
---|
550 | handle_error(xp, error_stack_overflow, reg, 0, xpPC(xp)); |
---|
551 | } |
---|
552 | |
---|
553 | /* |
---|
554 | Lower (move toward 0) the "end" of the soft protected area associated |
---|
555 | with a by a page, if we can. |
---|
556 | */ |
---|
557 | |
---|
558 | void |
---|
559 | adjust_soft_protection_limit(area *a) |
---|
560 | { |
---|
561 | char *proposed_new_soft_limit = a->softlimit - 4096; |
---|
562 | protected_area_ptr p = a->softprot; |
---|
563 | |
---|
564 | if (proposed_new_soft_limit >= (p->start+16384)) { |
---|
565 | p->end = proposed_new_soft_limit; |
---|
566 | p->protsize = p->end-p->start; |
---|
567 | a->softlimit = proposed_new_soft_limit; |
---|
568 | } |
---|
569 | protect_area(p); |
---|
570 | } |
---|
571 | |
---|
572 | void |
---|
573 | restore_soft_stack_limit(unsigned stkreg) |
---|
574 | { |
---|
575 | area *a; |
---|
576 | TCR *tcr = get_tcr(true); |
---|
577 | |
---|
578 | switch (stkreg) { |
---|
579 | case sp: |
---|
580 | a = tcr->cs_area; |
---|
581 | if ((a->softlimit - 4096) > (a->hardlimit + 16384)) { |
---|
582 | a->softlimit -= 4096; |
---|
583 | } |
---|
584 | tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit); |
---|
585 | break; |
---|
586 | case vsp: |
---|
587 | a = tcr->vs_area; |
---|
588 | adjust_soft_protection_limit(a); |
---|
589 | break; |
---|
590 | case tsp: |
---|
591 | a = tcr->ts_area; |
---|
592 | adjust_soft_protection_limit(a); |
---|
593 | } |
---|
594 | } |
---|
595 | |
---|
596 | /* Maybe this'll work someday. We may have to do something to |
---|
597 | make the thread look like it's not handling an exception */ |
---|
598 | void |
---|
599 | reset_lisp_process(ExceptionInformation *xp) |
---|
600 | { |
---|
601 | TCR *tcr = TCR_FROM_TSD(xpGPR(xp,rcontext)); |
---|
602 | catch_frame *last_catch = (catch_frame *) ptr_from_lispobj(untag(tcr->catch_top)); |
---|
603 | |
---|
604 | tcr->save_allocptr = (void *) ptr_from_lispobj(xpGPR(xp, allocptr)); |
---|
605 | tcr->save_allocbase = (void *) ptr_from_lispobj(xpGPR(xp, allocbase)); |
---|
606 | |
---|
607 | tcr->save_vsp = (LispObj *) ptr_from_lispobj(((lisp_frame *)ptr_from_lispobj(last_catch->csp))->savevsp); |
---|
608 | tcr->save_tsp = (LispObj *) ptr_from_lispobj((LispObj) ptr_to_lispobj(last_catch)) - (2*node_size); /* account for TSP header */ |
---|
609 | |
---|
610 | start_lisp(tcr, 1); |
---|
611 | } |
---|
612 | |
---|
613 | /* |
---|
614 | This doesn't GC; it returns true if it made enough room, false |
---|
615 | otherwise. |
---|
616 | If "extend" is true, it can try to extend the dynamic area to |
---|
617 | satisfy the request. |
---|
618 | */ |
---|
619 | |
---|
620 | Boolean |
---|
621 | new_heap_segment(ExceptionInformation *xp, natural need, Boolean extend, TCR *tcr) |
---|
622 | { |
---|
623 | area *a; |
---|
624 | natural newlimit, oldlimit; |
---|
625 | natural log2_allocation_quantum = tcr->log2_allocation_quantum; |
---|
626 | |
---|
627 | a = active_dynamic_area; |
---|
628 | oldlimit = (natural) a->active; |
---|
629 | newlimit = (align_to_power_of_2(oldlimit, log2_allocation_quantum) + |
---|
630 | align_to_power_of_2(need, log2_allocation_quantum)); |
---|
631 | if (newlimit > (natural) (a->high)) { |
---|
632 | if (extend) { |
---|
633 | signed_natural inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT)); |
---|
634 | natural extend_by = inhibit ? 0 : lisp_heap_gc_threshold; |
---|
635 | do { |
---|
636 | if (resize_dynamic_heap(a->active, (newlimit-oldlimit)+extend_by)) { |
---|
637 | break; |
---|
638 | } |
---|
639 | extend_by = align_to_power_of_2(extend_by>>1, log2_allocation_quantum); |
---|
640 | if (extend_by < 4<<20) { |
---|
641 | return false; |
---|
642 | } |
---|
643 | } while (1); |
---|
644 | } else { |
---|
645 | return false; |
---|
646 | } |
---|
647 | } |
---|
648 | a->active = (BytePtr) newlimit; |
---|
649 | tcr->last_allocptr = (void *)newlimit; |
---|
650 | xpGPR(xp,allocptr) = (LispObj) newlimit; |
---|
651 | xpGPR(xp,allocbase) = (LispObj) oldlimit; |
---|
652 | |
---|
653 | return true; |
---|
654 | } |
---|
655 | |
---|
656 | |
---|
657 | void |
---|
658 | update_area_active (area **aptr, BytePtr value) |
---|
659 | { |
---|
660 | area *a = *aptr; |
---|
661 | for (; a; a = a->older) { |
---|
662 | if ((a->low <= value) && (a->high >= value)) break; |
---|
663 | }; |
---|
664 | if (a == NULL) Bug(NULL, "Can't find active area"); |
---|
665 | a->active = value; |
---|
666 | *aptr = a; |
---|
667 | |
---|
668 | for (a = a->younger; a; a = a->younger) { |
---|
669 | a->active = a->high; |
---|
670 | } |
---|
671 | } |
---|
672 | |
---|
673 | void |
---|
674 | normalize_tcr(ExceptionInformation *xp, TCR *tcr, Boolean is_other_tcr) |
---|
675 | { |
---|
676 | void *cur_allocptr = NULL; |
---|
677 | LispObj freeptr = 0; |
---|
678 | |
---|
679 | if (xp) { |
---|
680 | if (is_other_tcr) { |
---|
681 | pc_luser_xp(xp, tcr, NULL); |
---|
682 | freeptr = xpGPR(xp, allocptr); |
---|
683 | if (fulltag_of(freeptr) == 0){ |
---|
684 | cur_allocptr = (void *) ptr_from_lispobj(freeptr); |
---|
685 | } |
---|
686 | } |
---|
687 | update_area_active((area **)&tcr->cs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, sp))); |
---|
688 | update_area_active((area **)&tcr->vs_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, vsp))); |
---|
689 | update_area_active((area **)&tcr->ts_area, (BytePtr) ptr_from_lispobj(xpGPR(xp, tsp))); |
---|
690 | #ifdef DEBUG |
---|
691 | fprintf(stderr, "TCR 0x%x in lisp code, vsp = 0x%lx, tsp = 0x%lx\n", |
---|
692 | tcr, xpGPR(xp, vsp), xpGPR(xp, tsp)); |
---|
693 | fprintf(stderr, "TCR 0x%x, allocbase/allocptr were 0x%x/0x%x at #x%x\n", |
---|
694 | tcr, |
---|
695 | xpGPR(xp, allocbase), |
---|
696 | xpGPR(xp, allocptr), |
---|
697 | xpPC(xp)); |
---|
698 | fprintf(stderr, "TCR 0x%x, exception context = 0x%x\n", |
---|
699 | tcr, |
---|
700 | tcr->pending_exception_context); |
---|
701 | #endif |
---|
702 | } else { |
---|
703 | /* In ff-call. No need to update cs_area */ |
---|
704 | cur_allocptr = (void *) (tcr->save_allocptr); |
---|
705 | #ifdef DEBUG |
---|
706 | fprintf(stderr, "TCR 0x%x in foreign code, vsp = 0x%lx, tsp = 0x%lx\n", |
---|
707 | tcr, tcr->save_vsp, tcr->save_tsp); |
---|
708 | fprintf(stderr, "TCR 0x%x, save_allocbase/save_allocptr were 0x%x/0x%x at #x%x\n", |
---|
709 | tcr, |
---|
710 | tcr->save_allocbase, |
---|
711 | tcr->save_allocptr, |
---|
712 | xpPC(xp)); |
---|
713 | |
---|
714 | #endif |
---|
715 | update_area_active((area **)&tcr->vs_area, (BytePtr) tcr->save_vsp); |
---|
716 | update_area_active((area **)&tcr->ts_area, (BytePtr) tcr->save_tsp); |
---|
717 | } |
---|
718 | |
---|
719 | |
---|
720 | tcr->save_allocptr = tcr->save_allocbase = (void *)VOID_ALLOCPTR; |
---|
721 | if (cur_allocptr) { |
---|
722 | update_bytes_allocated(tcr, cur_allocptr); |
---|
723 | if (freeptr) { |
---|
724 | xpGPR(xp, allocptr) = VOID_ALLOCPTR; |
---|
725 | xpGPR(xp, allocbase) = VOID_ALLOCPTR; |
---|
726 | } |
---|
727 | } |
---|
728 | } |
---|
729 | |
---|
730 | TCR *gc_tcr = NULL; |
---|
731 | |
---|
732 | /* Suspend and "normalize" other tcrs, then call a gc-like function |
---|
733 | in that context. Resume the other tcrs, then return what the |
---|
734 | function returned */ |
---|
735 | |
---|
736 | int |
---|
737 | gc_like_from_xp(ExceptionInformation *xp, |
---|
738 | int(*fun)(TCR *, signed_natural), |
---|
739 | signed_natural param) |
---|
740 | { |
---|
741 | TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)), *other_tcr; |
---|
742 | int result; |
---|
743 | signed_natural inhibit; |
---|
744 | |
---|
745 | suspend_other_threads(true); |
---|
746 | inhibit = (signed_natural)(lisp_global(GC_INHIBIT_COUNT)); |
---|
747 | if (inhibit != 0) { |
---|
748 | if (inhibit > 0) { |
---|
749 | lisp_global(GC_INHIBIT_COUNT) = (LispObj)(-inhibit); |
---|
750 | } |
---|
751 | resume_other_threads(true); |
---|
752 | gc_deferred++; |
---|
753 | return 0; |
---|
754 | } |
---|
755 | gc_deferred = 0; |
---|
756 | |
---|
757 | gc_tcr = tcr; |
---|
758 | |
---|
759 | xpGPR(xp, allocptr) = VOID_ALLOCPTR; |
---|
760 | xpGPR(xp, allocbase) = VOID_ALLOCPTR; |
---|
761 | |
---|
762 | normalize_tcr(xp, tcr, false); |
---|
763 | |
---|
764 | |
---|
765 | for (other_tcr = tcr->next; other_tcr != tcr; other_tcr = other_tcr->next) { |
---|
766 | if (other_tcr->pending_exception_context) { |
---|
767 | other_tcr->gc_context = other_tcr->pending_exception_context; |
---|
768 | } else if (other_tcr->valence == TCR_STATE_LISP) { |
---|
769 | other_tcr->gc_context = other_tcr->suspend_context; |
---|
770 | } else { |
---|
771 | /* no pending exception, didn't suspend in lisp state: |
---|
772 | must have executed a synchronous ff-call. |
---|
773 | */ |
---|
774 | other_tcr->gc_context = NULL; |
---|
775 | } |
---|
776 | normalize_tcr(other_tcr->gc_context, other_tcr, true); |
---|
777 | } |
---|
778 | |
---|
779 | |
---|
780 | |
---|
781 | result = fun(tcr, param); |
---|
782 | |
---|
783 | other_tcr = tcr; |
---|
784 | do { |
---|
785 | other_tcr->gc_context = NULL; |
---|
786 | other_tcr = other_tcr->next; |
---|
787 | } while (other_tcr != tcr); |
---|
788 | |
---|
789 | gc_tcr = NULL; |
---|
790 | |
---|
791 | resume_other_threads(true); |
---|
792 | |
---|
793 | return result; |
---|
794 | |
---|
795 | } |
---|
796 | |
---|
797 | |
---|
798 | /* Returns #bytes freed by invoking GC */ |
---|
799 | |
---|
800 | int |
---|
801 | gc_from_tcr(TCR *tcr, signed_natural param) |
---|
802 | { |
---|
803 | area *a; |
---|
804 | BytePtr oldfree, newfree; |
---|
805 | BytePtr oldend, newend; |
---|
806 | |
---|
807 | #ifdef DEBUG |
---|
808 | fprintf(stderr, "Start GC in 0x%lx\n", tcr); |
---|
809 | #endif |
---|
810 | a = active_dynamic_area; |
---|
811 | oldend = a->high; |
---|
812 | oldfree = a->active; |
---|
813 | gc(tcr, param); |
---|
814 | newfree = a->active; |
---|
815 | newend = a->high; |
---|
816 | #if 0 |
---|
817 | fprintf(stderr, "End GC in 0x%lx\n", tcr); |
---|
818 | #endif |
---|
819 | return ((oldfree-newfree)+(newend-oldend)); |
---|
820 | } |
---|
821 | |
---|
822 | int |
---|
823 | gc_from_xp(ExceptionInformation *xp, signed_natural param) |
---|
824 | { |
---|
825 | int status = gc_like_from_xp(xp, gc_from_tcr, param); |
---|
826 | |
---|
827 | freeGCptrs(); |
---|
828 | return status; |
---|
829 | } |
---|
830 | |
---|
831 | int |
---|
832 | purify_from_xp(ExceptionInformation *xp, signed_natural param) |
---|
833 | { |
---|
834 | return gc_like_from_xp(xp, purify, param); |
---|
835 | } |
---|
836 | |
---|
837 | int |
---|
838 | impurify_from_xp(ExceptionInformation *xp, signed_natural param) |
---|
839 | { |
---|
840 | return gc_like_from_xp(xp, impurify, param); |
---|
841 | } |
---|
842 | |
---|
843 | |
---|
844 | |
---|
845 | |
---|
846 | |
---|
847 | |
---|
848 | protection_handler |
---|
849 | * protection_handlers[] = { |
---|
850 | do_spurious_wp_fault, |
---|
851 | do_soft_stack_overflow, |
---|
852 | do_soft_stack_overflow, |
---|
853 | do_soft_stack_overflow, |
---|
854 | do_hard_stack_overflow, |
---|
855 | do_hard_stack_overflow, |
---|
856 | do_hard_stack_overflow |
---|
857 | }; |
---|
858 | |
---|
859 | |
---|
860 | Boolean |
---|
861 | is_write_fault(ExceptionInformation *xp, siginfo_t *info) |
---|
862 | { |
---|
863 | /* use the siginfo if it's available. Some versions of Linux |
---|
864 | don't propagate the DSISR and TRAP fields correctly from |
---|
865 | 64- to 32-bit handlers. |
---|
866 | */ |
---|
867 | if (info) { |
---|
868 | /* |
---|
869 | To confuse matters still further, the value of SEGV_ACCERR |
---|
870 | varies quite a bit among LinuxPPC variants (the value defined |
---|
871 | in the header files varies, and the value actually set by |
---|
872 | the kernel also varies. So far, we're only looking at the |
---|
873 | siginfo under Linux and Linux always seems to generate |
---|
874 | SIGSEGV, so check for SIGSEGV and check the low 16 bits |
---|
875 | of the si_code. |
---|
876 | */ |
---|
877 | return ((info->si_signo == SIGSEGV) && |
---|
878 | ((info->si_code & 0xff) == (SEGV_ACCERR & 0xff))); |
---|
879 | } |
---|
880 | return(((xpDSISR(xp) & (1 << 25)) != 0) && |
---|
881 | (xpTRAP(xp) == |
---|
882 | #ifdef LINUX |
---|
883 | 0x0300 |
---|
884 | #endif |
---|
885 | #ifdef DARWIN |
---|
886 | 0x0300/0x100 |
---|
887 | #endif |
---|
888 | ) |
---|
889 | ); |
---|
890 | #if 0 |
---|
891 | /* Maybe worth keeping around; not sure if it's an exhaustive |
---|
892 | list of PPC instructions that could cause a WP fault */ |
---|
893 | /* Some OSes lose track of the DSISR and DSR SPRs, or don't provide |
---|
894 | valid values of those SPRs in the context they provide to |
---|
895 | exception handlers. Look at the opcode of the offending |
---|
896 | instruction & recognize 32-bit store operations */ |
---|
897 | opcode instr = *(xpPC(xp)); |
---|
898 | |
---|
899 | if (xp->regs->trap != 0x300) { |
---|
900 | return 0; |
---|
901 | } |
---|
902 | switch (instr >> 26) { |
---|
903 | case 47: /* STMW */ |
---|
904 | case 36: /* STW */ |
---|
905 | case 37: /* STWU */ |
---|
906 | return 1; |
---|
907 | case 31: |
---|
908 | switch ((instr >> 1) & 1023) { |
---|
909 | case 151: /* STWX */ |
---|
910 | case 183: /* STWUX */ |
---|
911 | return 1; |
---|
912 | default: |
---|
913 | return 0; |
---|
914 | } |
---|
915 | default: |
---|
916 | return 0; |
---|
917 | } |
---|
918 | #endif |
---|
919 | } |
---|
920 | |
---|
921 | OSStatus |
---|
922 | handle_protection_violation(ExceptionInformation *xp, siginfo_t *info, TCR *tcr, int old_valence) |
---|
923 | { |
---|
924 | BytePtr addr; |
---|
925 | protected_area_ptr area; |
---|
926 | protection_handler *handler; |
---|
927 | extern Boolean touch_page(void *); |
---|
928 | extern void touch_page_end(void); |
---|
929 | |
---|
930 | if (info) { |
---|
931 | addr = (BytePtr)(info->si_addr); |
---|
932 | } else { |
---|
933 | addr = (BytePtr) ((natural) (xpDAR(xp))); |
---|
934 | } |
---|
935 | |
---|
936 | if (addr && (addr == tcr->safe_ref_address)) { |
---|
937 | adjust_exception_pc(xp,4); |
---|
938 | |
---|
939 | xpGPR(xp,imm0) = 0; |
---|
940 | return 0; |
---|
941 | } |
---|
942 | |
---|
943 | if (xpPC(xp) == (pc)touch_page) { |
---|
944 | xpGPR(xp,imm0) = 0; |
---|
945 | xpPC(xp) = (pc)touch_page_end; |
---|
946 | return 0; |
---|
947 | } |
---|
948 | |
---|
949 | |
---|
950 | if (is_write_fault(xp,info)) { |
---|
951 | area = find_protected_area(addr); |
---|
952 | if (area != NULL) { |
---|
953 | handler = protection_handlers[area->why]; |
---|
954 | return handler(xp, area, addr); |
---|
955 | } else { |
---|
956 | if ((addr >= readonly_area->low) && |
---|
957 | (addr < readonly_area->active)) { |
---|
958 | UnProtectMemory((LogicalAddress)(truncate_to_power_of_2(addr,log2_page_size)), |
---|
959 | page_size); |
---|
960 | return 0; |
---|
961 | } |
---|
962 | } |
---|
963 | } |
---|
964 | if (old_valence == TCR_STATE_LISP) { |
---|
965 | callback_for_trap(nrs_CMAIN.vcell, xp, (pc)xpPC(xp), SIGBUS, (natural)addr, is_write_fault(xp,info)); |
---|
966 | } |
---|
967 | return -1; |
---|
968 | } |
---|
969 | |
---|
970 | |
---|
971 | |
---|
972 | |
---|
973 | |
---|
974 | OSStatus |
---|
975 | do_hard_stack_overflow(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr) |
---|
976 | { |
---|
977 | #ifdef SUPPORT_PRAGMA_UNUSED |
---|
978 | #pragma unused(area,addr) |
---|
979 | #endif |
---|
980 | reset_lisp_process(xp); |
---|
981 | return -1; |
---|
982 | } |
---|
983 | |
---|
984 | extern area* |
---|
985 | allocate_vstack(natural useable); /* This is in "pmcl-kernel.c" */ |
---|
986 | |
---|
987 | extern area* |
---|
988 | allocate_tstack(natural useable); /* This is in "pmcl-kernel.c" */ |
---|
989 | |
---|
990 | #ifdef EXTEND_VSTACK |
---|
991 | Boolean |
---|
992 | catch_frame_p(lisp_frame *spPtr) |
---|
993 | { |
---|
994 | catch_frame* catch = (catch_frame *) untag(lisp_global(CATCH_TOP)); |
---|
995 | |
---|
996 | for (; catch; catch = (catch_frame *) untag(catch->link)) { |
---|
997 | if (spPtr == ((lisp_frame *) catch->csp)) { |
---|
998 | return true; |
---|
999 | } |
---|
1000 | } |
---|
1001 | return false; |
---|
1002 | } |
---|
1003 | #endif |
---|
1004 | |
---|
1005 | Boolean |
---|
1006 | unwind_protect_cleanup_frame_p(lisp_frame *spPtr) |
---|
1007 | { |
---|
1008 | if ((spPtr->savevsp == (LispObj)NULL) || /* The frame to where the unwind-protect will return */ |
---|
1009 | (((spPtr->backlink)->savevsp) == (LispObj)NULL)) { /* The frame that returns to the kernel from the cleanup form */ |
---|
1010 | return true; |
---|
1011 | } else { |
---|
1012 | return false; |
---|
1013 | } |
---|
1014 | } |
---|
1015 | |
---|
1016 | Boolean |
---|
1017 | lexpr_entry_frame_p(lisp_frame *spPtr) |
---|
1018 | { |
---|
1019 | LispObj savelr = spPtr->savelr; |
---|
1020 | LispObj lexpr_return = (LispObj) lisp_global(LEXPR_RETURN); |
---|
1021 | LispObj lexpr_return1v = (LispObj) lisp_global(LEXPR_RETURN1V); |
---|
1022 | LispObj ret1valn = (LispObj) lisp_global(RET1VALN); |
---|
1023 | |
---|
1024 | return |
---|
1025 | (savelr == lexpr_return1v) || |
---|
1026 | (savelr == lexpr_return) || |
---|
1027 | ((savelr == ret1valn) && |
---|
1028 | (((spPtr->backlink)->savelr) == lexpr_return)); |
---|
1029 | } |
---|
1030 | |
---|
1031 | Boolean |
---|
1032 | lisp_frame_p(lisp_frame *spPtr) |
---|
1033 | { |
---|
1034 | LispObj savefn; |
---|
1035 | /* We can't just look at the size of the stack frame under the EABI |
---|
1036 | calling sequence, but that's the first thing to check. */ |
---|
1037 | if (((lisp_frame *) spPtr->backlink) != (spPtr+1)) { |
---|
1038 | return false; |
---|
1039 | } |
---|
1040 | savefn = spPtr->savefn; |
---|
1041 | return (savefn == 0) || (fulltag_of(savefn) == fulltag_misc); |
---|
1042 | |
---|
1043 | } |
---|
1044 | |
---|
1045 | |
---|
1046 | int ffcall_overflow_count = 0; |
---|
1047 | |
---|
1048 | /* Find a frame that is neither a catch frame nor one of the |
---|
1049 | lexpr_entry frames We don't check for non-lisp frames here because |
---|
1050 | we'll always stop before we get there due to a dummy lisp frame |
---|
1051 | pushed by .SPcallback that masks out the foreign frames. The one |
---|
1052 | exception is that there is a non-lisp frame without a valid VSP |
---|
1053 | while in the process of ppc-ff-call. We recognize that because its |
---|
1054 | savelr is NIL. If the saved VSP itself is 0 or the savevsp in the |
---|
1055 | next frame is 0, then we're executing an unwind-protect cleanup |
---|
1056 | form, and the top stack frame belongs to its (no longer extant) |
---|
1057 | catch frame. */ |
---|
1058 | |
---|
1059 | #ifdef EXTEND_VSTACK |
---|
1060 | lisp_frame * |
---|
1061 | find_non_catch_frame_from_xp (ExceptionInformation *xp) |
---|
1062 | { |
---|
1063 | lisp_frame *spPtr = (lisp_frame *) xpGPR(xp, sp); |
---|
1064 | if ((((natural) spPtr) + sizeof(lisp_frame)) != ((natural) (spPtr->backlink))) { |
---|
1065 | ffcall_overflow_count++; /* This is mostly so I can breakpoint here */ |
---|
1066 | } |
---|
1067 | for (; !lisp_frame_p(spPtr) || /* In the process of ppc-ff-call */ |
---|
1068 | unwind_protect_cleanup_frame_p(spPtr) || |
---|
1069 | catch_frame_p(spPtr) || |
---|
1070 | lexpr_entry_frame_p(spPtr) ; ) { |
---|
1071 | spPtr = spPtr->backlink; |
---|
1072 | }; |
---|
1073 | return spPtr; |
---|
1074 | } |
---|
1075 | #endif |
---|
1076 | |
---|
1077 | #ifdef EXTEND_VSTACK |
---|
1078 | Boolean |
---|
1079 | db_link_chain_in_area_p (area *a) |
---|
1080 | { |
---|
1081 | LispObj *db = (LispObj *) lisp_global(DB_LINK), |
---|
1082 | *high = (LispObj *) a->high, |
---|
1083 | *low = (LispObj *) a->low; |
---|
1084 | for (; db; db = (LispObj *) *db) { |
---|
1085 | if ((db >= low) && (db < high)) return true; |
---|
1086 | }; |
---|
1087 | return false; |
---|
1088 | } |
---|
1089 | #endif |
---|
1090 | |
---|
1091 | |
---|
1092 | |
---|
1093 | |
---|
1094 | /* Note: CURRENT_VS (CURRENT_TS) is always either the area containing |
---|
1095 | the current value of VSP (TSP) or an older area. */ |
---|
1096 | |
---|
1097 | OSStatus |
---|
1098 | do_vsp_overflow (ExceptionInformation *xp, BytePtr addr) |
---|
1099 | { |
---|
1100 | TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)); |
---|
1101 | area *a = tcr->vs_area; |
---|
1102 | protected_area_ptr vsp_soft = a->softprot; |
---|
1103 | unprotect_area(vsp_soft); |
---|
1104 | signal_stack_soft_overflow(xp,vsp); |
---|
1105 | return 0; |
---|
1106 | } |
---|
1107 | |
---|
1108 | |
---|
1109 | OSStatus |
---|
1110 | do_tsp_overflow (ExceptionInformation *xp, BytePtr addr) |
---|
1111 | { |
---|
1112 | TCR* tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)); |
---|
1113 | area *a = tcr->ts_area; |
---|
1114 | protected_area_ptr tsp_soft = a->softprot; |
---|
1115 | unprotect_area(tsp_soft); |
---|
1116 | signal_stack_soft_overflow(xp,tsp); |
---|
1117 | return 0; |
---|
1118 | } |
---|
1119 | |
---|
1120 | OSStatus |
---|
1121 | do_soft_stack_overflow(ExceptionInformation *xp, protected_area_ptr prot_area, BytePtr addr) |
---|
1122 | { |
---|
1123 | /* Trying to write into a guard page on the vstack or tstack. |
---|
1124 | Allocate a new stack segment, emulate stwu and stwux for the TSP, and |
---|
1125 | signal an error_stack_overflow condition. |
---|
1126 | */ |
---|
1127 | lisp_protection_kind which = prot_area->why; |
---|
1128 | Boolean on_TSP = (which == kTSPsoftguard); |
---|
1129 | |
---|
1130 | if (on_TSP) { |
---|
1131 | return do_tsp_overflow(xp, addr); |
---|
1132 | } else { |
---|
1133 | return do_vsp_overflow(xp, addr); |
---|
1134 | } |
---|
1135 | } |
---|
1136 | |
---|
1137 | OSStatus |
---|
1138 | do_spurious_wp_fault(ExceptionInformation *xp, protected_area_ptr area, BytePtr addr) |
---|
1139 | { |
---|
1140 | #ifdef SUPPORT_PRAGMA_UNUSED |
---|
1141 | #pragma unused(xp,area,addr) |
---|
1142 | #endif |
---|
1143 | return -1; |
---|
1144 | } |
---|
1145 | |
---|
1146 | |
---|
1147 | /* |
---|
1148 | We have a couple of choices here. We can simply unprotect the page |
---|
1149 | and let the store happen on return, or we can try to emulate writes |
---|
1150 | that we know will involve an intergenerational reference. Both are |
---|
1151 | correct as far as EGC constraints go, but the latter approach is |
---|
1152 | probably more efficient. (This only matters in the case where the |
---|
1153 | GC runs after this exception handler returns but before the write |
---|
1154 | actually happens. If we didn't emulate node stores here, the EGC |
---|
1155 | would scan the newly-writen page, find nothing interesting, and |
---|
1156 | run to completion. This thread will try the write again afer it |
---|
1157 | resumes, the page'll be re-protected, and we'll have taken this |
---|
1158 | fault twice. The whole scenario shouldn't happen very often, but |
---|
1159 | (having already taken a fault and committed to an mprotect syscall) |
---|
1160 | we might as well emulate stores involving intergenerational references, |
---|
1161 | since they're pretty easy to identify. |
---|
1162 | |
---|
1163 | Note that cases involving two or more threads writing to the same |
---|
1164 | page (before either of them can run this handler) is benign: one |
---|
1165 | invocation of the handler will just unprotect an unprotected page in |
---|
1166 | that case. |
---|
1167 | |
---|
1168 | If there are GCs (or any other suspensions of the thread between |
---|
1169 | the time that the write fault was detected and the time that the |
---|
1170 | exception lock is obtained) none of this stuff happens. |
---|
1171 | */ |
---|
1172 | |
---|
1173 | /* |
---|
1174 | Return true (and emulate the instruction) iff: |
---|
1175 | a) the fault was caused by an "stw rs,d(ra)" or "stwx rs,ra.rb" |
---|
1176 | instruction. |
---|
1177 | b) RS is a node register (>= fn) |
---|
1178 | c) RS is tagged as a cons or vector |
---|
1179 | d) RS is in some ephemeral generation. |
---|
1180 | This is slightly conservative, since RS may be no younger than the |
---|
1181 | EA being written to. |
---|
1182 | */ |
---|
1183 | Boolean |
---|
1184 | is_ephemeral_node_store(ExceptionInformation *xp, BytePtr ea) |
---|
1185 | { |
---|
1186 | if (((ptr_to_lispobj(ea)) & 3) == 0) { |
---|
1187 | opcode instr = *xpPC(xp); |
---|
1188 | |
---|
1189 | if (X_opcode_p(instr,major_opcode_X31,minor_opcode_STWX) || |
---|
1190 | major_opcode_p(instr, major_opcode_STW)) { |
---|
1191 | LispObj |
---|
1192 | rs = RS_field(instr), |
---|
1193 | rsval = xpGPR(xp,rs), |
---|
1194 | tag = fulltag_of(rsval); |
---|
1195 | |
---|
1196 | if (rs >= fn) { |
---|
1197 | if ((tag == fulltag_misc) || (tag == fulltag_cons)) { |
---|
1198 | if (((BytePtr)ptr_from_lispobj(rsval) > tenured_area->high) && |
---|
1199 | ((BytePtr)ptr_from_lispobj(rsval) < active_dynamic_area->high)) { |
---|
1200 | *(LispObj *)ea = rsval; |
---|
1201 | return true; |
---|
1202 | } |
---|
1203 | } |
---|
1204 | } |
---|
1205 | } |
---|
1206 | } |
---|
1207 | return false; |
---|
1208 | } |
---|
1209 | |
---|
1210 | |
---|
1211 | |
---|
1212 | |
---|
1213 | |
---|
1214 | |
---|
1215 | |
---|
1216 | OSStatus |
---|
1217 | handle_sigfpe(ExceptionInformation *xp, TCR *tcr) |
---|
1218 | { |
---|
1219 | (void) zero_fpscr(tcr); |
---|
1220 | enable_fp_exceptions(); |
---|
1221 | |
---|
1222 | |
---|
1223 | tcr->lisp_fpscr.words.l = xpFPSCR(xp) & ~_FPU_RESERVED; |
---|
1224 | |
---|
1225 | /* 'handle_fpux_binop' scans back from the specified PC until it finds an FPU |
---|
1226 | operation; there's an FPU operation right at the PC, so tell it to start |
---|
1227 | looking one word beyond */ |
---|
1228 | return handle_fpux_binop(xp, (pc)((natural)(xpPC(xp))+4)); |
---|
1229 | } |
---|
1230 | |
---|
1231 | |
---|
1232 | int |
---|
1233 | altivec_present = 1; |
---|
1234 | |
---|
1235 | |
---|
1236 | /* This only tries to implement the "optional" fsqrt and fsqrts |
---|
1237 | instructions, which were generally implemented on IBM hardware |
---|
1238 | but generally not available on Motorola/Freescale systems. |
---|
1239 | */ |
---|
1240 | OSStatus |
---|
1241 | handle_unimplemented_instruction(ExceptionInformation *xp, |
---|
1242 | opcode instruction, |
---|
1243 | TCR *tcr) |
---|
1244 | { |
---|
1245 | (void) zero_fpscr(tcr); |
---|
1246 | enable_fp_exceptions(); |
---|
1247 | /* the rc bit (bit 0 in the instruction) is supposed to cause |
---|
1248 | some FPSCR bits to be copied to CR1. OpenMCL doesn't generate |
---|
1249 | fsqrt. or fsqrts. |
---|
1250 | */ |
---|
1251 | if (((major_opcode_p(instruction,major_opcode_FPU_DOUBLE)) || |
---|
1252 | (major_opcode_p(instruction,major_opcode_FPU_SINGLE))) && |
---|
1253 | ((instruction & ((1 << 6) -2)) == (22<<1))) { |
---|
1254 | double b, d, sqrt(double); |
---|
1255 | |
---|
1256 | b = xpFPR(xp,RB_field(instruction)); |
---|
1257 | d = sqrt(b); |
---|
1258 | xpFPSCR(xp) = ((xpFPSCR(xp) & ~_FPU_RESERVED) | |
---|
1259 | (get_fpscr() & _FPU_RESERVED)); |
---|
1260 | xpFPR(xp,RT_field(instruction)) = d; |
---|
1261 | adjust_exception_pc(xp,4); |
---|
1262 | return 0; |
---|
1263 | } |
---|
1264 | |
---|
1265 | return -1; |
---|
1266 | } |
---|
1267 | |
---|
1268 | OSStatus |
---|
1269 | PMCL_exception_handler(int xnum, |
---|
1270 | ExceptionInformation *xp, |
---|
1271 | TCR *tcr, |
---|
1272 | siginfo_t *info, |
---|
1273 | int old_valence) |
---|
1274 | { |
---|
1275 | OSStatus status = -1; |
---|
1276 | pc program_counter; |
---|
1277 | opcode instruction = 0; |
---|
1278 | |
---|
1279 | |
---|
1280 | program_counter = xpPC(xp); |
---|
1281 | |
---|
1282 | if ((xnum == SIGILL) | (xnum == SIGTRAP)) { |
---|
1283 | instruction = *program_counter; |
---|
1284 | } |
---|
1285 | |
---|
1286 | if (instruction == ALLOC_TRAP_INSTRUCTION) { |
---|
1287 | status = handle_alloc_trap(xp, tcr); |
---|
1288 | } else if ((xnum == SIGSEGV) || |
---|
1289 | (xnum == SIGBUS)) { |
---|
1290 | status = handle_protection_violation(xp, info, tcr, old_valence); |
---|
1291 | } else if (xnum == SIGFPE) { |
---|
1292 | status = handle_sigfpe(xp, tcr); |
---|
1293 | } else if ((xnum == SIGILL) || (xnum == SIGTRAP)) { |
---|
1294 | if (instruction == GC_TRAP_INSTRUCTION) { |
---|
1295 | status = handle_gc_trap(xp, tcr); |
---|
1296 | } else if (IS_UUO(instruction)) { |
---|
1297 | status = handle_uuo(xp, instruction, program_counter); |
---|
1298 | } else if (is_conditional_trap(instruction)) { |
---|
1299 | status = handle_trap(xp, instruction, program_counter, info); |
---|
1300 | } else { |
---|
1301 | status = handle_unimplemented_instruction(xp,instruction,tcr); |
---|
1302 | } |
---|
1303 | } else if (xnum == SIGNAL_FOR_PROCESS_INTERRUPT) { |
---|
1304 | tcr->interrupt_pending = 0; |
---|
1305 | callback_for_trap(nrs_CMAIN.vcell, xp, 0, TRI_instruction(TO_GT,nargs,0),0, 0); |
---|
1306 | status = 0; |
---|
1307 | } |
---|
1308 | |
---|
1309 | return status; |
---|
1310 | } |
---|
1311 | |
---|
1312 | void |
---|
1313 | adjust_exception_pc(ExceptionInformation *xp, int delta) |
---|
1314 | { |
---|
1315 | xpPC(xp) += (delta >> 2); |
---|
1316 | } |
---|
1317 | |
---|
1318 | |
---|
1319 | /* |
---|
1320 | This wants to scan backwards until "where" points to an instruction |
---|
1321 | whose major opcode is either 63 (double-float) or 59 (single-float) |
---|
1322 | */ |
---|
1323 | |
---|
1324 | OSStatus |
---|
1325 | handle_fpux_binop(ExceptionInformation *xp, pc where) |
---|
1326 | { |
---|
1327 | OSStatus err; |
---|
1328 | opcode *there = (opcode *) where, instr, errnum = 0; |
---|
1329 | int i = TRAP_LOOKUP_TRIES, delta = 0; |
---|
1330 | |
---|
1331 | while (i--) { |
---|
1332 | instr = *--there; |
---|
1333 | delta -= 4; |
---|
1334 | if (codevec_hdr_p(instr)) { |
---|
1335 | return -1; |
---|
1336 | } |
---|
1337 | if (major_opcode_p(instr, major_opcode_FPU_DOUBLE)) { |
---|
1338 | errnum = error_FPU_exception_double; |
---|
1339 | break; |
---|
1340 | } |
---|
1341 | |
---|
1342 | if (major_opcode_p(instr, major_opcode_FPU_SINGLE)) { |
---|
1343 | errnum = error_FPU_exception_short; |
---|
1344 | break; |
---|
1345 | } |
---|
1346 | } |
---|
1347 | |
---|
1348 | err = handle_error(xp, errnum, rcontext, 0, there); |
---|
1349 | /* Yeah, we said "non-continuable". In case we ever change that ... */ |
---|
1350 | |
---|
1351 | adjust_exception_pc(xp, delta); |
---|
1352 | xpFPSCR(xp) &= 0x03fff; |
---|
1353 | |
---|
1354 | return err; |
---|
1355 | |
---|
1356 | } |
---|
1357 | |
---|
1358 | OSStatus |
---|
1359 | handle_uuo(ExceptionInformation *xp, opcode the_uuo, pc where) |
---|
1360 | { |
---|
1361 | #ifdef SUPPORT_PRAGMA_UNUSED |
---|
1362 | #pragma unused(where) |
---|
1363 | #endif |
---|
1364 | unsigned |
---|
1365 | minor = UUO_MINOR(the_uuo), |
---|
1366 | rb = 0x1f & (the_uuo >> 11), |
---|
1367 | errnum = 0x3ff & (the_uuo >> 16); |
---|
1368 | |
---|
1369 | OSStatus status = -1; |
---|
1370 | |
---|
1371 | int bump = 4; |
---|
1372 | |
---|
1373 | switch (minor) { |
---|
1374 | |
---|
1375 | case UUO_ZERO_FPSCR: |
---|
1376 | status = 0; |
---|
1377 | xpFPSCR(xp) = 0; |
---|
1378 | break; |
---|
1379 | |
---|
1380 | |
---|
1381 | case UUO_INTERR: |
---|
1382 | { |
---|
1383 | TCR * target = (TCR *)xpGPR(xp,arg_z); |
---|
1384 | status = 0; |
---|
1385 | switch (errnum) { |
---|
1386 | case error_propagate_suspend: |
---|
1387 | break; |
---|
1388 | case error_interrupt: |
---|
1389 | xpGPR(xp,imm0) = (LispObj) raise_thread_interrupt(target); |
---|
1390 | break; |
---|
1391 | case error_suspend: |
---|
1392 | xpGPR(xp,imm0) = (LispObj) lisp_suspend_tcr(target); |
---|
1393 | break; |
---|
1394 | case error_suspend_all: |
---|
1395 | lisp_suspend_other_threads(); |
---|
1396 | break; |
---|
1397 | case error_resume: |
---|
1398 | xpGPR(xp,imm0) = (LispObj) lisp_resume_tcr(target); |
---|
1399 | break; |
---|
1400 | case error_resume_all: |
---|
1401 | lisp_resume_other_threads(); |
---|
1402 | break; |
---|
1403 | case error_kill: |
---|
1404 | xpGPR(xp,imm0) = (LispObj)kill_tcr(target); |
---|
1405 | break; |
---|
1406 | case error_allocate_list: |
---|
1407 | allocate_list(xp,get_tcr(true)); |
---|
1408 | break; |
---|
1409 | default: |
---|
1410 | status = handle_error(xp, errnum, rb, 0, where); |
---|
1411 | break; |
---|
1412 | } |
---|
1413 | } |
---|
1414 | break; |
---|
1415 | |
---|
1416 | case UUO_INTCERR: |
---|
1417 | status = handle_error(xp, errnum, rb, 1, where); |
---|
1418 | if (errnum == error_udf_call) { |
---|
1419 | /* If lisp's returned from a continuable undefined-function call, |
---|
1420 | it's put a code vector in the xp's PC. Don't advance the |
---|
1421 | PC ... */ |
---|
1422 | bump = 0; |
---|
1423 | } |
---|
1424 | break; |
---|
1425 | |
---|
1426 | case UUO_FPUX_BINOP: |
---|
1427 | status = handle_fpux_binop(xp, where); |
---|
1428 | bump = 0; |
---|
1429 | break; |
---|
1430 | |
---|
1431 | default: |
---|
1432 | status = -1; |
---|
1433 | bump = 0; |
---|
1434 | } |
---|
1435 | |
---|
1436 | if ((!status) && bump) { |
---|
1437 | adjust_exception_pc(xp, bump); |
---|
1438 | } |
---|
1439 | return status; |
---|
1440 | } |
---|
1441 | |
---|
1442 | natural |
---|
1443 | register_codevector_contains_pc (natural lisp_function, pc where) |
---|
1444 | { |
---|
1445 | natural code_vector, size; |
---|
1446 | |
---|
1447 | if ((fulltag_of(lisp_function) == fulltag_misc) && |
---|
1448 | (header_subtag(header_of(lisp_function)) == subtag_function)) { |
---|
1449 | code_vector = deref(lisp_function, 1); |
---|
1450 | size = header_element_count(header_of(code_vector)) << 2; |
---|
1451 | if ((untag(code_vector) < (natural)where) && |
---|
1452 | ((natural)where < (code_vector + size))) |
---|
1453 | return(code_vector); |
---|
1454 | } |
---|
1455 | |
---|
1456 | return(0); |
---|
1457 | } |
---|
1458 | |
---|
1459 | /* Callback to lisp to handle a trap. Need to translate the |
---|
1460 | PC (where) into one of two forms of pairs: |
---|
1461 | |
---|
1462 | 1. If PC is in fn or nfn's code vector, use the register number |
---|
1463 | of fn or nfn and the index into that function's code vector. |
---|
1464 | 2. Otherwise use 0 and the pc itself |
---|
1465 | */ |
---|
1466 | void |
---|
1467 | callback_for_trap (LispObj callback_macptr, ExceptionInformation *xp, pc where, |
---|
1468 | natural arg1, natural arg2, natural arg3) |
---|
1469 | { |
---|
1470 | natural code_vector = register_codevector_contains_pc(xpGPR(xp, fn), where); |
---|
1471 | unsigned register_number = fn; |
---|
1472 | natural index = (natural)where; |
---|
1473 | |
---|
1474 | if (code_vector == 0) { |
---|
1475 | register_number = nfn; |
---|
1476 | code_vector = register_codevector_contains_pc(xpGPR(xp, nfn), where); |
---|
1477 | } |
---|
1478 | if (code_vector == 0) |
---|
1479 | register_number = 0; |
---|
1480 | else |
---|
1481 | index = ((natural)where - (code_vector + misc_data_offset)) >> 2; |
---|
1482 | callback_to_lisp(callback_macptr, xp, register_number, index, arg1, arg2, arg3); |
---|
1483 | } |
---|
1484 | |
---|
1485 | void |
---|
1486 | callback_to_lisp (LispObj callback_macptr, ExceptionInformation *xp, |
---|
1487 | natural arg1, natural arg2, natural arg3, natural arg4, natural arg5) |
---|
1488 | { |
---|
1489 | natural callback_ptr; |
---|
1490 | area *a; |
---|
1491 | |
---|
1492 | TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)); |
---|
1493 | |
---|
1494 | /* Put the active stack pointer where .SPcallback expects it */ |
---|
1495 | a = tcr->cs_area; |
---|
1496 | a->active = (BytePtr) ptr_from_lispobj(xpGPR(xp, sp)); |
---|
1497 | |
---|
1498 | /* Copy globals from the exception frame to tcr */ |
---|
1499 | tcr->save_allocptr = (void *)ptr_from_lispobj(xpGPR(xp, allocptr)); |
---|
1500 | tcr->save_allocbase = (void *)ptr_from_lispobj(xpGPR(xp, allocbase)); |
---|
1501 | tcr->save_vsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, vsp)); |
---|
1502 | tcr->save_tsp = (LispObj*) ptr_from_lispobj(xpGPR(xp, tsp)); |
---|
1503 | |
---|
1504 | |
---|
1505 | |
---|
1506 | /* Call back. |
---|
1507 | Lisp will handle trampolining through some code that |
---|
1508 | will push lr/fn & pc/nfn stack frames for backtrace. |
---|
1509 | */ |
---|
1510 | callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address; |
---|
1511 | #ifdef DEBUG |
---|
1512 | fprintf(stderr, "0x%x releasing exception lock for callback\n", tcr); |
---|
1513 | #endif |
---|
1514 | UNLOCK(lisp_global(EXCEPTION_LOCK), tcr); |
---|
1515 | ((void (*)())callback_ptr) (xp, arg1, arg2, arg3, arg4, arg5); |
---|
1516 | LOCK(lisp_global(EXCEPTION_LOCK), tcr); |
---|
1517 | #ifdef DEBUG |
---|
1518 | fprintf(stderr, "0x%x acquired exception lock after callback\n", tcr); |
---|
1519 | #endif |
---|
1520 | |
---|
1521 | |
---|
1522 | |
---|
1523 | /* Copy GC registers back into exception frame */ |
---|
1524 | xpGPR(xp, allocbase) = (LispObj) ptr_to_lispobj(tcr->save_allocbase); |
---|
1525 | xpGPR(xp, allocptr) = (LispObj) ptr_to_lispobj(tcr->save_allocptr); |
---|
1526 | } |
---|
1527 | |
---|
1528 | area * |
---|
1529 | allocate_no_stack (natural size) |
---|
1530 | { |
---|
1531 | #ifdef SUPPORT_PRAGMA_UNUSED |
---|
1532 | #pragma unused(size) |
---|
1533 | #endif |
---|
1534 | |
---|
1535 | return (area *) NULL; |
---|
1536 | } |
---|
1537 | |
---|
1538 | |
---|
1539 | |
---|
1540 | |
---|
1541 | |
---|
1542 | |
---|
1543 | /* callback to (symbol-value cmain) if it is a macptr, |
---|
1544 | otherwise report cause and function name to console. |
---|
1545 | Returns noErr if exception handled OK */ |
---|
1546 | OSStatus |
---|
1547 | handle_trap(ExceptionInformation *xp, opcode the_trap, pc where, siginfo_t *info) |
---|
1548 | { |
---|
1549 | LispObj cmain = nrs_CMAIN.vcell; |
---|
1550 | TCR *tcr = TCR_FROM_TSD(xpGPR(xp, rcontext)); |
---|
1551 | |
---|
1552 | /* If we got here, "the_trap" is either a TRI or a TR instruction. |
---|
1553 | It's a TRI instruction iff its major opcode is major_opcode_TRI. */ |
---|
1554 | |
---|
1555 | /* If it's a "trllt" instruction where RA == sp, it's a failed |
---|
1556 | control stack overflow check. In that case: |
---|
1557 | |
---|
1558 | a) We're in "yellow zone" mode if the value of the |
---|
1559 | lisp_global(CS_OVERFLOW_LIMIT) is CS_OVERFLOW_FORCE_LIMIT. If |
---|
1560 | we're not already in yellow zone mode, attempt to create a new |
---|
1561 | thread and continue execution on its stack. If that fails, call |
---|
1562 | signal_stack_soft_overflow to enter yellow zone mode and signal |
---|
1563 | the condition to lisp. |
---|
1564 | |
---|
1565 | b) If we're already in "yellow zone" mode, then: |
---|
1566 | |
---|
1567 | 1) if the SP is past the current control-stack area's hard |
---|
1568 | overflow limit, signal a "hard" stack overflow error (e.g., throw |
---|
1569 | to toplevel as quickly as possible. If we aren't in "yellow zone" |
---|
1570 | mode, attempt to continue on another thread first. |
---|
1571 | |
---|
1572 | 2) if SP is "well" (> 4K) below its soft overflow limit, set |
---|
1573 | lisp_global(CS_OVERFLOW_LIMIT) to its "real" value. We're out of |
---|
1574 | "yellow zone mode" in this case. |
---|
1575 | |
---|
1576 | 3) Otherwise, do nothing. We'll continue to trap every time |
---|
1577 | something gets pushed on the control stack, so we should try to |
---|
1578 | detect and handle all of these cases fairly quickly. Of course, |
---|
1579 | the trap overhead is going to slow things down quite a bit. |
---|
1580 | */ |
---|
1581 | |
---|
1582 | if (X_opcode_p(the_trap,major_opcode_X31,minor_opcode_TR) && |
---|
1583 | (RA_field(the_trap) == sp) && |
---|
1584 | (TO_field(the_trap) == TO_LO)) { |
---|
1585 | area |
---|
1586 | *CS_area = tcr->cs_area, |
---|
1587 | *VS_area = tcr->vs_area; |
---|
1588 | |
---|
1589 | natural |
---|
1590 | current_SP = xpGPR(xp,sp), |
---|
1591 | current_VSP = xpGPR(xp,vsp); |
---|
1592 | |
---|
1593 | if (current_SP < (natural) (CS_area->hardlimit)) { |
---|
1594 | /* If we're not in soft overflow mode yet, assume that the |
---|
1595 | user has set the soft overflow size very small and try to |
---|
1596 | continue on another thread before throwing to toplevel */ |
---|
1597 | if ((tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT)) { |
---|
1598 | reset_lisp_process(xp); |
---|
1599 | } |
---|
1600 | } else { |
---|
1601 | if (tcr->cs_limit == CS_OVERFLOW_FORCE_LIMIT) { |
---|
1602 | /* If the control stack pointer is at least 4K away from its soft limit |
---|
1603 | and the value stack pointer is at least 4K away from its soft limit, |
---|
1604 | stop trapping. Else keep trapping. */ |
---|
1605 | if ((current_SP > (natural) ((CS_area->softlimit)+4096)) && |
---|
1606 | (current_VSP > (natural) ((VS_area->softlimit)+4096))) { |
---|
1607 | protected_area_ptr vs_soft = VS_area->softprot; |
---|
1608 | if (vs_soft->nprot == 0) { |
---|
1609 | protect_area(vs_soft); |
---|
1610 | } |
---|
1611 | tcr->cs_limit = ptr_to_lispobj(CS_area->softlimit); |
---|
1612 | } |
---|
1613 | } else { |
---|
1614 | tcr->cs_limit = ptr_to_lispobj(CS_area->hardlimit); |
---|
1615 | signal_stack_soft_overflow(xp, sp); |
---|
1616 | } |
---|
1617 | } |
---|
1618 | |
---|
1619 | adjust_exception_pc(xp, 4); |
---|
1620 | return noErr; |
---|
1621 | } else { |
---|
1622 | if (the_trap == LISP_BREAK_INSTRUCTION) { |
---|
1623 | char *message = (char *) ptr_from_lispobj(xpGPR(xp,3)); |
---|
1624 | set_xpPC(xp, xpLR(xp)); |
---|
1625 | if (message == NULL) { |
---|
1626 | message = "Lisp Breakpoint"; |
---|
1627 | } |
---|
1628 | lisp_Debugger(xp, info, debug_entry_dbg, false, message); |
---|
1629 | return noErr; |
---|
1630 | } |
---|
1631 | if (the_trap == QUIET_LISP_BREAK_INSTRUCTION) { |
---|
1632 | adjust_exception_pc(xp,4); |
---|
1633 | lisp_Debugger(xp, info, debug_entry_dbg, false, "Lisp Breakpoint"); |
---|
1634 | return noErr; |
---|
1635 | } |
---|
1636 | /* |
---|
1637 | twlle ra,rb is used to detect tlb overflow, where RA = current |
---|
1638 | limit and RB = index to use. |
---|
1639 | */ |
---|
1640 | if ((X_opcode_p(the_trap, 31, minor_opcode_TR)) && |
---|
1641 | (TO_field(the_trap) == (TO_LO|TO_EQ))) { |
---|
1642 | if (extend_tcr_tlb(tcr, xp, RA_field(the_trap), RB_field(the_trap))) { |
---|
1643 | return noErr; |
---|
1644 | } |
---|
1645 | return -1; |
---|
1646 | } |
---|
1647 | |
---|
1648 | if ((fulltag_of(cmain) == fulltag_misc) && |
---|
1649 | (header_subtag(header_of(cmain)) == subtag_macptr)) { |
---|
1650 | if (the_trap == TRI_instruction(TO_GT,nargs,0)) { |
---|
1651 | /* reset interrup_level, interrupt_pending */ |
---|
1652 | TCR_INTERRUPT_LEVEL(tcr) = 0; |
---|
1653 | tcr->interrupt_pending = 0; |
---|
1654 | } |
---|
1655 | #if 0 |
---|
1656 | fprintf(stderr, "About to do trap callback in 0x%x\n",tcr); |
---|
1657 | #endif |
---|
1658 | callback_for_trap(cmain, xp, where, (natural) the_trap, 0, 0); |
---|
1659 | adjust_exception_pc(xp, 4); |
---|
1660 | return(noErr); |
---|
1661 | } |
---|
1662 | return -1; |
---|
1663 | } |
---|
1664 | } |
---|
1665 | |
---|
1666 | |
---|
1667 | /* Look at up to TRAP_LOOKUP_TRIES instrs before trap instr for a pattern. |
---|
1668 | Stop if subtag_code_vector is encountered. */ |
---|
1669 | unsigned |
---|
1670 | scan_for_instr( unsigned target, unsigned mask, pc where ) |
---|
1671 | { |
---|
1672 | int i = TRAP_LOOKUP_TRIES; |
---|
1673 | |
---|
1674 | while( i-- ) { |
---|
1675 | unsigned instr = *(--where); |
---|
1676 | if ( codevec_hdr_p(instr) ) { |
---|
1677 | return 0; |
---|
1678 | } else if ( match_instr(instr, mask, target) ) { |
---|
1679 | return instr; |
---|
1680 | } |
---|
1681 | } |
---|
1682 | return 0; |
---|
1683 | } |
---|
1684 | |
---|
1685 | |
---|
1686 | void non_fatal_error( char *msg ) |
---|
1687 | { |
---|
1688 | fprintf( stderr, "Non-fatal error: %s.\n", msg ); |
---|
1689 | fflush( stderr ); |
---|
1690 | } |
---|
1691 | |
---|
1692 | /* The main opcode. */ |
---|
1693 | |
---|
1694 | int |
---|
1695 | is_conditional_trap(opcode instr) |
---|
1696 | { |
---|
1697 | unsigned to = TO_field(instr); |
---|
1698 | int is_tr = X_opcode_p(instr,major_opcode_X31,minor_opcode_TR); |
---|
1699 | |
---|
1700 | #ifndef MACOS |
---|
1701 | if ((instr == LISP_BREAK_INSTRUCTION) || |
---|
1702 | (instr == QUIET_LISP_BREAK_INSTRUCTION)) { |
---|
1703 | return 1; |
---|
1704 | } |
---|
1705 | #endif |
---|
1706 | if (is_tr || major_opcode_p(instr,major_opcode_TRI)) { |
---|
1707 | /* A "tw/td" or "twi/tdi" instruction. To be unconditional, the |
---|
1708 | EQ bit must be set in the TO mask and either the register |
---|
1709 | operands (if "tw") are the same or either both of the signed or |
---|
1710 | both of the unsigned inequality bits must be set. */ |
---|
1711 | if (! (to & TO_EQ)) { |
---|
1712 | return 1; /* Won't trap on EQ: conditional */ |
---|
1713 | } |
---|
1714 | if (is_tr && (RA_field(instr) == RB_field(instr))) { |
---|
1715 | return 0; /* Will trap on EQ, same regs: unconditional */ |
---|
1716 | } |
---|
1717 | if (((to & (TO_LO|TO_HI)) == (TO_LO|TO_HI)) || |
---|
1718 | ((to & (TO_LT|TO_GT)) == (TO_LT|TO_GT))) { |
---|
1719 | return 0; /* Will trap on EQ and either (LT|GT) or (LO|HI) : unconditional */ |
---|
1720 | } |
---|
1721 | return 1; /* must be conditional */ |
---|
1722 | } |
---|
1723 | return 0; /* Not "tw/td" or "twi/tdi". Let |
---|
1724 | debugger have it */ |
---|
1725 | } |
---|
1726 | |
---|
1727 | OSStatus |
---|
1728 | handle_error(ExceptionInformation *xp, unsigned errnum, unsigned rb, unsigned continuable, pc where) |
---|
1729 | { |
---|
1730 | LispObj errdisp = nrs_ERRDISP.vcell; |
---|
1731 | |
---|
1732 | if ((fulltag_of(errdisp) == fulltag_misc) && |
---|
1733 | (header_subtag(header_of(errdisp)) == subtag_macptr)) { |
---|
1734 | /* errdisp is a macptr, we can call back to lisp */ |
---|
1735 | callback_for_trap(errdisp, xp, where, errnum, rb, continuable); |
---|
1736 | return(0); |
---|
1737 | } |
---|
1738 | |
---|
1739 | return(-1); |
---|
1740 | } |
---|
1741 | |
---|
1742 | |
---|
1743 | /* |
---|
1744 | Current thread has all signals masked. Before unmasking them, |
---|
1745 | make it appear that the current thread has been suspended. |
---|
1746 | (This is to handle the case where another thread is trying |
---|
1747 | to GC before this thread is able to sieze the exception lock.) |
---|
1748 | */ |
---|
1749 | int |
---|
1750 | prepare_to_wait_for_exception_lock(TCR *tcr, ExceptionInformation *context) |
---|
1751 | { |
---|
1752 | int old_valence = tcr->valence; |
---|
1753 | |
---|
1754 | tcr->pending_exception_context = context; |
---|
1755 | tcr->valence = TCR_STATE_EXCEPTION_WAIT; |
---|
1756 | |
---|
1757 | ALLOW_EXCEPTIONS(context); |
---|
1758 | return old_valence; |
---|
1759 | } |
---|
1760 | |
---|
1761 | void |
---|
1762 | wait_for_exception_lock_in_handler(TCR *tcr, |
---|
1763 | ExceptionInformation *context, |
---|
1764 | xframe_list *xf) |
---|
1765 | { |
---|
1766 | |
---|
1767 | LOCK(lisp_global(EXCEPTION_LOCK), tcr); |
---|
1768 | #ifdef DEBUG |
---|
1769 | fprintf(stderr, "0x%x has exception lock\n", tcr); |
---|
1770 | #endif |
---|
1771 | xf->curr = context; |
---|
1772 | xf->prev = tcr->xframe; |
---|
1773 | tcr->xframe = xf; |
---|
1774 | tcr->pending_exception_context = NULL; |
---|
1775 | tcr->valence = TCR_STATE_FOREIGN; |
---|
1776 | } |
---|
1777 | |
---|
1778 | void |
---|
1779 | unlock_exception_lock_in_handler(TCR *tcr) |
---|
1780 | { |
---|
1781 | tcr->pending_exception_context = tcr->xframe->curr; |
---|
1782 | tcr->xframe = tcr->xframe->prev; |
---|
1783 | tcr->valence = TCR_STATE_EXCEPTION_RETURN; |
---|
1784 | #ifdef DEBUG |
---|
1785 | fprintf(stderr, "0x%x releasing exception lock\n", tcr); |
---|
1786 | #endif |
---|
1787 | UNLOCK(lisp_global(EXCEPTION_LOCK),tcr); |
---|
1788 | } |
---|
1789 | |
---|
1790 | /* |
---|
1791 | If an interrupt is pending on exception exit, try to ensure |
---|
1792 | that the thread sees it as soon as it's able to run. |
---|
1793 | */ |
---|
1794 | void |
---|
1795 | raise_pending_interrupt(TCR *tcr) |
---|
1796 | { |
---|
1797 | if (TCR_INTERRUPT_LEVEL(tcr) > 0) { |
---|
1798 | pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), SIGNAL_FOR_PROCESS_INTERRUPT); |
---|
1799 | } |
---|
1800 | } |
---|
1801 | |
---|
1802 | void |
---|
1803 | exit_signal_handler(TCR *tcr, int old_valence) |
---|
1804 | { |
---|
1805 | sigset_t mask; |
---|
1806 | sigfillset(&mask); |
---|
1807 | |
---|
1808 | pthread_sigmask(SIG_SETMASK,&mask, NULL); |
---|
1809 | tcr->valence = old_valence; |
---|
1810 | tcr->pending_exception_context = NULL; |
---|
1811 | } |
---|
1812 | |
---|
1813 | |
---|
1814 | void |
---|
1815 | signal_handler(int signum, siginfo_t *info, ExceptionInformation *context, TCR *tcr, int old_valence) |
---|
1816 | { |
---|
1817 | xframe_list xframe_link; |
---|
1818 | |
---|
1819 | #ifdef DARWIN |
---|
1820 | if (running_under_rosetta) { |
---|
1821 | fprintf(stderr, "signal handler: signal = %d, pc = 0x%08x\n", signum, xpPC(context)); |
---|
1822 | } |
---|
1823 | #endif |
---|
1824 | if (!use_mach_exception_handling) { |
---|
1825 | |
---|
1826 | tcr = (TCR *) get_interrupt_tcr(false); |
---|
1827 | |
---|
1828 | /* The signal handler's entered with all signals (notably the |
---|
1829 | thread_suspend signal) blocked. Don't allow any other signals |
---|
1830 | (notably the thread_suspend signal) to preempt us until we've |
---|
1831 | set the TCR's xframe slot to include the current exception |
---|
1832 | context. |
---|
1833 | */ |
---|
1834 | |
---|
1835 | old_valence = prepare_to_wait_for_exception_lock(tcr, context); |
---|
1836 | } |
---|
1837 | |
---|
1838 | if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) { |
---|
1839 | CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND); |
---|
1840 | pthread_kill(pthread_self(), thread_suspend_signal); |
---|
1841 | } |
---|
1842 | |
---|
1843 | |
---|
1844 | wait_for_exception_lock_in_handler(tcr, context, &xframe_link); |
---|
1845 | if ((noErr != PMCL_exception_handler(signum, context, tcr, info, old_valence))) { |
---|
1846 | char msg[512]; |
---|
1847 | snprintf(msg, sizeof(msg), "Unhandled exception %d at 0x%lx, context->regs at #x%lx", signum, xpPC(context), (natural)xpGPRvector(context)); |
---|
1848 | if (lisp_Debugger(context, info, signum, false, msg)) { |
---|
1849 | SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION); |
---|
1850 | } |
---|
1851 | } |
---|
1852 | |
---|
1853 | unlock_exception_lock_in_handler(tcr); |
---|
1854 | |
---|
1855 | /* This thread now looks like a thread that was suspended while |
---|
1856 | executing lisp code. If some other thread gets the exception |
---|
1857 | lock and GCs, the context (this thread's suspend_context) will |
---|
1858 | be updated. (That's only of concern if it happens before we |
---|
1859 | can return to the kernel/to the Mach exception handler). |
---|
1860 | */ |
---|
1861 | if (!use_mach_exception_handling) { |
---|
1862 | exit_signal_handler(tcr, old_valence); |
---|
1863 | raise_pending_interrupt(tcr); |
---|
1864 | } |
---|
1865 | } |
---|
1866 | |
---|
1867 | /* |
---|
1868 | If it looks like we're in the middle of an atomic operation, make |
---|
1869 | it seem as if that operation is either complete or hasn't started |
---|
1870 | yet. |
---|
1871 | |
---|
1872 | The cases handled include: |
---|
1873 | |
---|
1874 | a) storing into a newly-allocated lisp frame on the stack. |
---|
1875 | b) marking a newly-allocated TSP frame as containing "raw" data. |
---|
1876 | c) consing: the GC has its own ideas about how this should be |
---|
1877 | handled, but other callers would be best advised to back |
---|
1878 | up or move forward, according to whether we're in the middle |
---|
1879 | of allocating a cons cell or allocating a uvector. |
---|
1880 | d) a STMW to the vsp |
---|
1881 | e) EGC write-barrier subprims. |
---|
1882 | */ |
---|
1883 | |
---|
1884 | extern opcode |
---|
1885 | egc_write_barrier_start, |
---|
1886 | egc_write_barrier_end, |
---|
1887 | egc_store_node_conditional, |
---|
1888 | egc_store_node_conditional_test, |
---|
1889 | egc_set_hash_key, |
---|
1890 | egc_gvset, |
---|
1891 | egc_rplaca, |
---|
1892 | egc_rplacd, |
---|
1893 | egc_set_hash_key_conditional, |
---|
1894 | egc_set_hash_key_conditional_test; |
---|
1895 | |
---|
1896 | |
---|
1897 | extern opcode ffcall_return_window, ffcall_return_window_end; |
---|
1898 | |
---|
1899 | void |
---|
1900 | pc_luser_xp(ExceptionInformation *xp, TCR *tcr, signed_natural *alloc_disp) |
---|
1901 | { |
---|
1902 | pc program_counter = xpPC(xp); |
---|
1903 | opcode instr = *program_counter; |
---|
1904 | lisp_frame *frame = (lisp_frame *)ptr_from_lispobj(xpGPR(xp,sp)); |
---|
1905 | LispObj cur_allocptr = xpGPR(xp, allocptr); |
---|
1906 | int allocptr_tag = fulltag_of(cur_allocptr); |
---|
1907 | |
---|
1908 | |
---|
1909 | |
---|
1910 | if ((program_counter < &egc_write_barrier_end) && |
---|
1911 | (program_counter >= &egc_write_barrier_start)) { |
---|
1912 | LispObj *ea = 0, val = 0, root = 0; |
---|
1913 | bitvector refbits = (bitvector)(lisp_global(REFBITS)); |
---|
1914 | Boolean need_store = true, need_check_memo = true, need_memoize_root = false; |
---|
1915 | |
---|
1916 | if (program_counter >= &egc_set_hash_key_conditional) { |
---|
1917 | if ((program_counter < &egc_set_hash_key_conditional_test) || |
---|
1918 | ((program_counter == &egc_set_hash_key_conditional_test) && |
---|
1919 | (! (xpCCR(xp) & 0x20000000)))) { |
---|
1920 | return; |
---|
1921 | } |
---|
1922 | need_store = false; |
---|
1923 | root = xpGPR(xp,arg_x); |
---|
1924 | ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset); |
---|
1925 | need_memoize_root = true; |
---|
1926 | } else if (program_counter >= &egc_store_node_conditional) { |
---|
1927 | if ((program_counter < &egc_store_node_conditional_test) || |
---|
1928 | ((program_counter == &egc_store_node_conditional_test) && |
---|
1929 | (! (xpCCR(xp) & 0x20000000)))) { |
---|
1930 | /* The conditional store either hasn't been attempted yet, or |
---|
1931 | has failed. No need to adjust the PC, or do memoization. */ |
---|
1932 | return; |
---|
1933 | } |
---|
1934 | ea = (LispObj*)(xpGPR(xp,arg_x) + xpGPR(xp,imm4)); |
---|
1935 | xpGPR(xp,arg_z) = t_value; |
---|
1936 | need_store = false; |
---|
1937 | } else if (program_counter >= &egc_set_hash_key) { |
---|
1938 | root = xpGPR(xp,arg_x); |
---|
1939 | val = xpGPR(xp,arg_z); |
---|
1940 | ea = (LispObj *) (root+xpGPR(xp,arg_y)+misc_data_offset); |
---|
1941 | need_memoize_root = true; |
---|
1942 | } else if (program_counter >= &egc_gvset) { |
---|
1943 | ea = (LispObj *) (xpGPR(xp,arg_x)+xpGPR(xp,arg_y)+misc_data_offset); |
---|
1944 | val = xpGPR(xp,arg_z); |
---|
1945 | } else if (program_counter >= &egc_rplacd) { |
---|
1946 | ea = (LispObj *) untag(xpGPR(xp,arg_y)); |
---|
1947 | val = xpGPR(xp,arg_z); |
---|
1948 | } else { /* egc_rplaca */ |
---|
1949 | ea = ((LispObj *) untag(xpGPR(xp,arg_y)))+1; |
---|
1950 | val = xpGPR(xp,arg_z); |
---|
1951 | } |
---|
1952 | if (need_store) { |
---|
1953 | *ea = val; |
---|
1954 | } |
---|
1955 | if (need_check_memo) { |
---|
1956 | natural bitnumber = area_dnode(ea, lisp_global(HEAP_START)); |
---|
1957 | if ((bitnumber < lisp_global(OLDSPACE_DNODE_COUNT)) && |
---|
1958 | ((LispObj)ea < val)) { |
---|
1959 | atomic_set_bit(refbits, bitnumber); |
---|
1960 | if (need_memoize_root) { |
---|
1961 | bitnumber = area_dnode(root, lisp_global(HEAP_START)); |
---|
1962 | atomic_set_bit(refbits, bitnumber); |
---|
1963 | } |
---|
1964 | } |
---|
1965 | } |
---|
1966 | set_xpPC(xp, xpLR(xp)); |
---|
1967 | return; |
---|
1968 | } |
---|
1969 | |
---|
1970 | |
---|
1971 | if (instr == MARK_TSP_FRAME_INSTRUCTION) { |
---|
1972 | LispObj tsp_val = xpGPR(xp,tsp); |
---|
1973 | |
---|
1974 | ((LispObj *)ptr_from_lispobj(tsp_val))[1] = tsp_val; |
---|
1975 | adjust_exception_pc(xp, 4); |
---|
1976 | return; |
---|
1977 | } |
---|
1978 | |
---|
1979 | if (frame->backlink == (frame+1)) { |
---|
1980 | if ( |
---|
1981 | #ifdef PPC64 |
---|
1982 | (major_opcode_p(instr, major_opcode_DS_STORE64)) && |
---|
1983 | (DS_VARIANT_FIELD(instr) == DS_STORE64_VARIANT_STD) && |
---|
1984 | #else |
---|
1985 | (major_opcode_p(instr, major_opcode_STW)) && |
---|
1986 | #endif |
---|
1987 | (RA_field(instr) == sp) && |
---|
1988 | /* There are a few places in the runtime that store into |
---|
1989 | a previously-allocated frame atop the stack when |
---|
1990 | throwing values around. We only care about the case |
---|
1991 | where the frame was newly allocated, in which case |
---|
1992 | there must have been a CREATE_LISP_FRAME_INSTRUCTION |
---|
1993 | a few instructions before the current program counter. |
---|
1994 | (The whole point here is that a newly allocated frame |
---|
1995 | might contain random values that we don't want the |
---|
1996 | GC to see; a previously allocated frame should already |
---|
1997 | be completely initialized.) |
---|
1998 | */ |
---|
1999 | ((program_counter[-1] == CREATE_LISP_FRAME_INSTRUCTION) || |
---|
2000 | (program_counter[-2] == CREATE_LISP_FRAME_INSTRUCTION) || |
---|
2001 | (program_counter[-3] == CREATE_LISP_FRAME_INSTRUCTION))) { |
---|
2002 | #ifdef PPC64 |
---|
2003 | int disp = DS_field(instr); |
---|
2004 | #else |
---|
2005 | int disp = D_field(instr); |
---|
2006 | #endif |
---|
2007 | |
---|
2008 | |
---|
2009 | if (disp < (4*node_size)) { |
---|
2010 | #if 0 |
---|
2011 | fprintf(stderr, "pc-luser: finish SP frame in 0x%x, disp = %d\n",tcr, disp); |
---|
2012 | #endif |
---|
2013 | frame->savevsp = 0; |
---|
2014 | if (disp < (3*node_size)) { |
---|
2015 | frame->savelr = 0; |
---|
2016 | if (disp == node_size) { |
---|
2017 | frame->savefn = 0; |
---|
2018 | } |
---|
2019 | } |
---|
2020 | } |
---|
2021 | return; |
---|
2022 | } |
---|
2023 | } |
---|
2024 | |
---|
2025 | if (allocptr_tag != tag_fixnum) { |
---|
2026 | signed_natural disp = allocptr_displacement(xp); |
---|
2027 | |
---|
2028 | if (disp) { |
---|
2029 | /* Being architecturally "at" the alloc trap doesn't tell |
---|
2030 | us much (in particular, it doesn't tell us whether |
---|
2031 | or not the thread has committed to taking the trap |
---|
2032 | and is waiting for the exception lock (or waiting |
---|
2033 | for the Mach exception thread to tell it how bad |
---|
2034 | things are) or is about to execute a conditional |
---|
2035 | trap. |
---|
2036 | Regardless of which case applies, we want the |
---|
2037 | other thread to take (or finish taking) the |
---|
2038 | trap, and we don't want it to consider its |
---|
2039 | current allocptr to be valid. |
---|
2040 | The difference between this case (suspend other |
---|
2041 | thread for GC) and the previous case (suspend |
---|
2042 | current thread for interrupt) is solely a |
---|
2043 | matter of what happens after we leave this |
---|
2044 | function: some non-current thread will stay |
---|
2045 | suspended until the GC finishes, then take |
---|
2046 | (or start processing) the alloc trap. The |
---|
2047 | current thread will go off and do PROCESS-INTERRUPT |
---|
2048 | or something, and may return from the interrupt |
---|
2049 | and need to finish the allocation that got interrupted. |
---|
2050 | */ |
---|
2051 | |
---|
2052 | if (alloc_disp) { |
---|
2053 | *alloc_disp = disp; |
---|
2054 | xpGPR(xp,allocptr) += disp; |
---|
2055 | /* Leave the PC at the alloc trap. When the interrupt |
---|
2056 | handler returns, it'll decrement allocptr by disp |
---|
2057 | and the trap may or may not be taken. |
---|
2058 | */ |
---|
2059 | } else { |
---|
2060 | update_bytes_allocated(tcr, (void *) ptr_from_lispobj(cur_allocptr + disp)); |
---|
2061 | xpGPR(xp, allocbase) = VOID_ALLOCPTR; |
---|
2062 | xpGPR(xp, allocptr) = VOID_ALLOCPTR - disp; |
---|
2063 | } |
---|
2064 | } else { |
---|
2065 | #ifdef DEBUG |
---|
2066 | fprintf(stderr, "tcr 0x%x is past alloc trap, finishing alloc at 0x%x\n", tcr, xpGPR(xp,allocptr)); |
---|
2067 | #endif |
---|
2068 | /* If we're already past the alloc_trap, finish allocating |
---|
2069 | the object. */ |
---|
2070 | if (allocptr_tag == fulltag_cons) { |
---|
2071 | finish_allocating_cons(xp); |
---|
2072 | #ifdef DEBUG |
---|
2073 | fprintf(stderr, "finish allocating cons in TCR = #x%x\n", |
---|
2074 | tcr); |
---|
2075 | #endif |
---|
2076 | } else { |
---|
2077 | if (allocptr_tag == fulltag_misc) { |
---|
2078 | #ifdef DEBUG |
---|
2079 | fprintf(stderr, "finish allocating uvector in TCR = #x%x\n", |
---|
2080 | tcr); |
---|
2081 | #endif |
---|
2082 | finish_allocating_uvector(xp); |
---|
2083 | } else { |
---|
2084 | Bug(xp, "what's being allocated here ?"); |
---|
2085 | } |
---|
2086 | } |
---|
2087 | /* Whatever we finished allocating, reset allocptr/allocbase to |
---|
2088 | VOID_ALLOCPTR */ |
---|
2089 | xpGPR(xp,allocptr) = xpGPR(xp,allocbase) = VOID_ALLOCPTR; |
---|
2090 | } |
---|
2091 | return; |
---|
2092 | } |
---|
2093 | |
---|
2094 | if ((instr & INIT_CATCH_FRAME_MASK) == INIT_CATCH_FRAME_INSTRUCTION) { |
---|
2095 | LispObj *frame = ptr_from_lispobj(untag(xpGPR(xp, nargs))); |
---|
2096 | int idx = ((int)((short)(D_field(instr))+fulltag_misc))>>fixnumshift; |
---|
2097 | #if 0 |
---|
2098 | fprintf(stderr, "pc-luser: CATCH frame in 0x%x, idx = %d\n",tcr, idx); |
---|
2099 | #endif |
---|
2100 | |
---|
2101 | for (;idx < sizeof(catch_frame)/sizeof(LispObj); idx++) { |
---|
2102 | deref(frame,idx) = 0; |
---|
2103 | } |
---|
2104 | ((LispObj *)(xpGPR(xp, tsp)))[1] = 0; |
---|
2105 | return; |
---|
2106 | } |
---|
2107 | |
---|
2108 | #ifndef PC64 |
---|
2109 | if ((major_opcode_p(instr, 47)) && /* 47 = stmw */ |
---|
2110 | (RA_field(instr) == vsp)) { |
---|
2111 | int r; |
---|
2112 | LispObj *vspptr = ptr_from_lispobj(xpGPR(xp,vsp)); |
---|
2113 | |
---|
2114 | for (r = RS_field(instr); r <= 31; r++) { |
---|
2115 | *vspptr++ = xpGPR(xp,r); |
---|
2116 | } |
---|
2117 | adjust_exception_pc(xp, 4); |
---|
2118 | } |
---|
2119 | #endif |
---|
2120 | } |
---|
2121 | |
---|
2122 | void |
---|
2123 | interrupt_handler (int signum, siginfo_t *info, ExceptionInformation *context) |
---|
2124 | { |
---|
2125 | TCR *tcr = get_interrupt_tcr(false); |
---|
2126 | if (tcr) { |
---|
2127 | if (TCR_INTERRUPT_LEVEL(tcr) < 0) { |
---|
2128 | tcr->interrupt_pending = 1 << fixnumshift; |
---|
2129 | } else { |
---|
2130 | LispObj cmain = nrs_CMAIN.vcell; |
---|
2131 | |
---|
2132 | if ((fulltag_of(cmain) == fulltag_misc) && |
---|
2133 | (header_subtag(header_of(cmain)) == subtag_macptr)) { |
---|
2134 | /* |
---|
2135 | This thread can (allegedly) take an interrupt now. |
---|
2136 | It's tricky to do that if we're executing |
---|
2137 | foreign code (especially Linuxthreads code, much |
---|
2138 | of which isn't reentrant.) |
---|
2139 | If we're unwinding the stack, we also want to defer |
---|
2140 | the interrupt. |
---|
2141 | */ |
---|
2142 | if ((tcr->valence != TCR_STATE_LISP) || |
---|
2143 | (tcr->unwinding != 0)) { |
---|
2144 | TCR_INTERRUPT_LEVEL(tcr) = (1 << fixnumshift); |
---|
2145 | } else { |
---|
2146 | xframe_list xframe_link; |
---|
2147 | int old_valence; |
---|
2148 | signed_natural disp=0; |
---|
2149 | |
---|
2150 | pc_luser_xp(context, tcr, &disp); |
---|
2151 | old_valence = prepare_to_wait_for_exception_lock(tcr, context); |
---|
2152 | wait_for_exception_lock_in_handler(tcr, context, &xframe_link); |
---|
2153 | #ifdef DEBUG |
---|
2154 | fprintf(stderr, "[0x%x acquired exception lock for interrupt]\n",tcr); |
---|
2155 | #endif |
---|
2156 | PMCL_exception_handler(signum, context, tcr, info, old_valence); |
---|
2157 | if (disp) { |
---|
2158 | xpGPR(context,allocptr) -= disp; |
---|
2159 | } |
---|
2160 | unlock_exception_lock_in_handler(tcr); |
---|
2161 | #ifdef DEBUG |
---|
2162 | fprintf(stderr, "[0x%x released exception lock for interrupt]\n",tcr); |
---|
2163 | #endif |
---|
2164 | exit_signal_handler(tcr, old_valence); |
---|
2165 | } |
---|
2166 | } |
---|
2167 | } |
---|
2168 | } |
---|
2169 | #ifdef DARWIN |
---|
2170 | DarwinSigReturn(context); |
---|
2171 | #endif |
---|
2172 | } |
---|
2173 | |
---|
2174 | |
---|
2175 | |
---|
2176 | void |
---|
2177 | install_signal_handler(int signo, void *handler) |
---|
2178 | { |
---|
2179 | struct sigaction sa; |
---|
2180 | |
---|
2181 | sa.sa_sigaction = (void *)handler; |
---|
2182 | sigfillset(&sa.sa_mask); |
---|
2183 | sa.sa_flags = |
---|
2184 | 0 /* SA_RESTART */ |
---|
2185 | | SA_SIGINFO |
---|
2186 | #ifdef DARWIN |
---|
2187 | #ifdef PPC64 |
---|
2188 | | SA_64REGSET |
---|
2189 | #endif |
---|
2190 | #endif |
---|
2191 | ; |
---|
2192 | |
---|
2193 | sigaction(signo, &sa, NULL); |
---|
2194 | } |
---|
2195 | |
---|
2196 | void |
---|
2197 | install_pmcl_exception_handlers() |
---|
2198 | { |
---|
2199 | #ifdef DARWIN |
---|
2200 | extern Boolean use_mach_exception_handling; |
---|
2201 | #endif |
---|
2202 | |
---|
2203 | Boolean install_signal_handlers_for_exceptions = |
---|
2204 | #ifdef DARWIN |
---|
2205 | !use_mach_exception_handling |
---|
2206 | #else |
---|
2207 | true |
---|
2208 | #endif |
---|
2209 | ; |
---|
2210 | if (install_signal_handlers_for_exceptions) { |
---|
2211 | extern int no_sigtrap; |
---|
2212 | install_signal_handler(SIGILL, (void *)signal_handler); |
---|
2213 | if (no_sigtrap != 1) { |
---|
2214 | install_signal_handler(SIGTRAP, (void *)signal_handler); |
---|
2215 | } |
---|
2216 | install_signal_handler(SIGBUS, (void *)signal_handler); |
---|
2217 | install_signal_handler(SIGSEGV, (void *)signal_handler); |
---|
2218 | install_signal_handler(SIGFPE, (void *)signal_handler); |
---|
2219 | } |
---|
2220 | |
---|
2221 | install_signal_handler(SIGNAL_FOR_PROCESS_INTERRUPT, |
---|
2222 | (void *)interrupt_handler); |
---|
2223 | signal(SIGPIPE, SIG_IGN); |
---|
2224 | } |
---|
2225 | |
---|
2226 | void |
---|
2227 | thread_kill_handler(int signum, siginfo_t info, ExceptionInformation *xp) |
---|
2228 | { |
---|
2229 | TCR *tcr = get_tcr(false); |
---|
2230 | area *a; |
---|
2231 | sigset_t mask; |
---|
2232 | |
---|
2233 | sigemptyset(&mask); |
---|
2234 | |
---|
2235 | if (tcr) { |
---|
2236 | tcr->valence = TCR_STATE_FOREIGN; |
---|
2237 | a = tcr->vs_area; |
---|
2238 | if (a) { |
---|
2239 | a->active = a->high; |
---|
2240 | } |
---|
2241 | a = tcr->ts_area; |
---|
2242 | if (a) { |
---|
2243 | a->active = a->high; |
---|
2244 | } |
---|
2245 | a = tcr->cs_area; |
---|
2246 | if (a) { |
---|
2247 | a->active = a->high; |
---|
2248 | } |
---|
2249 | } |
---|
2250 | |
---|
2251 | pthread_sigmask(SIG_SETMASK,&mask,NULL); |
---|
2252 | pthread_exit(NULL); |
---|
2253 | } |
---|
2254 | |
---|
2255 | void |
---|
2256 | thread_signal_setup() |
---|
2257 | { |
---|
2258 | thread_suspend_signal = SIG_SUSPEND_THREAD; |
---|
2259 | thread_kill_signal = SIG_KILL_THREAD; |
---|
2260 | |
---|
2261 | install_signal_handler(thread_suspend_signal, (void *) suspend_resume_handler); |
---|
2262 | install_signal_handler(thread_kill_signal, (void *)thread_kill_handler); |
---|
2263 | } |
---|
2264 | |
---|
2265 | |
---|
2266 | |
---|
2267 | void |
---|
2268 | unprotect_all_areas() |
---|
2269 | { |
---|
2270 | protected_area_ptr p; |
---|
2271 | |
---|
2272 | for(p = AllProtectedAreas, AllProtectedAreas = NULL; p; p = p->next) { |
---|
2273 | unprotect_area(p); |
---|
2274 | } |
---|
2275 | } |
---|
2276 | |
---|
2277 | /* |
---|
2278 | A binding subprim has just done "twlle limit_regno,idx_regno" and |
---|
2279 | the trap's been taken. Extend the tcr's tlb so that the index will |
---|
2280 | be in bounds and the new limit will be on a page boundary, filling |
---|
2281 | in the new page(s) with 'no_thread_local_binding_marker'. Update |
---|
2282 | the tcr fields and the registers in the xp and return true if this |
---|
2283 | all works, false otherwise. |
---|
2284 | |
---|
2285 | Note that the tlb was allocated via malloc, so realloc can do some |
---|
2286 | of the hard work. |
---|
2287 | */ |
---|
2288 | Boolean |
---|
2289 | extend_tcr_tlb(TCR *tcr, |
---|
2290 | ExceptionInformation *xp, |
---|
2291 | unsigned limit_regno, |
---|
2292 | unsigned idx_regno) |
---|
2293 | { |
---|
2294 | unsigned |
---|
2295 | index = (unsigned) (xpGPR(xp,idx_regno)), |
---|
2296 | old_limit = tcr->tlb_limit, |
---|
2297 | new_limit = align_to_power_of_2(index+1,12), |
---|
2298 | new_bytes = new_limit-old_limit; |
---|
2299 | LispObj |
---|
2300 | *old_tlb = tcr->tlb_pointer, |
---|
2301 | *new_tlb = realloc(old_tlb, new_limit), |
---|
2302 | *work; |
---|
2303 | |
---|
2304 | if (new_tlb == NULL) { |
---|
2305 | return false; |
---|
2306 | } |
---|
2307 | |
---|
2308 | work = (LispObj *) ((BytePtr)new_tlb+old_limit); |
---|
2309 | |
---|
2310 | while (new_bytes) { |
---|
2311 | *work++ = no_thread_local_binding_marker; |
---|
2312 | new_bytes -= sizeof(LispObj); |
---|
2313 | } |
---|
2314 | tcr->tlb_pointer = new_tlb; |
---|
2315 | tcr->tlb_limit = new_limit; |
---|
2316 | xpGPR(xp, limit_regno) = new_limit; |
---|
2317 | return true; |
---|
2318 | } |
---|
2319 | |
---|
2320 | |
---|
2321 | |
---|
2322 | void |
---|
2323 | exception_init() |
---|
2324 | { |
---|
2325 | install_pmcl_exception_handlers(); |
---|
2326 | } |
---|
2327 | |
---|
2328 | |
---|
2329 | |
---|
2330 | |
---|
2331 | |
---|
2332 | #ifdef DARWIN |
---|
2333 | |
---|
2334 | |
---|
2335 | #define TCR_FROM_EXCEPTION_PORT(p) ((TCR *)((natural)p)) |
---|
2336 | #define TCR_TO_EXCEPTION_PORT(tcr) ((mach_port_t)((natural)(tcr))) |
---|
2337 | |
---|
2338 | |
---|
2339 | |
---|
2340 | #define LISP_EXCEPTIONS_HANDLED_MASK \ |
---|
2341 | (EXC_MASK_SOFTWARE | EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC) |
---|
2342 | |
---|
2343 | /* (logcount LISP_EXCEPTIONS_HANDLED_MASK) */ |
---|
2344 | #define NUM_LISP_EXCEPTIONS_HANDLED 4 |
---|
2345 | |
---|
2346 | typedef struct { |
---|
2347 | int foreign_exception_port_count; |
---|
2348 | exception_mask_t masks[NUM_LISP_EXCEPTIONS_HANDLED]; |
---|
2349 | mach_port_t ports[NUM_LISP_EXCEPTIONS_HANDLED]; |
---|
2350 | exception_behavior_t behaviors[NUM_LISP_EXCEPTIONS_HANDLED]; |
---|
2351 | thread_state_flavor_t flavors[NUM_LISP_EXCEPTIONS_HANDLED]; |
---|
2352 | } MACH_foreign_exception_state; |
---|
2353 | |
---|
2354 | |
---|
2355 | |
---|
2356 | |
---|
2357 | /* |
---|
2358 | Mach's exception mechanism works a little better than its signal |
---|
2359 | mechanism (and, not incidentally, it gets along with GDB a lot |
---|
2360 | better. |
---|
2361 | |
---|
2362 | Initially, we install an exception handler to handle each native |
---|
2363 | thread's exceptions. This process involves creating a distinguished |
---|
2364 | thread which listens for kernel exception messages on a set of |
---|
2365 | 0 or more thread exception ports. As threads are created, they're |
---|
2366 | added to that port set; a thread's exception port is destroyed |
---|
2367 | (and therefore removed from the port set) when the thread exits. |
---|
2368 | |
---|
2369 | A few exceptions can be handled directly in the handler thread; |
---|
2370 | others require that we resume the user thread (and that the |
---|
2371 | exception thread resumes listening for exceptions.) The user |
---|
2372 | thread might eventually want to return to the original context |
---|
2373 | (possibly modified somewhat.) |
---|
2374 | |
---|
2375 | As it turns out, the simplest way to force the faulting user |
---|
2376 | thread to handle its own exceptions is to do pretty much what |
---|
2377 | signal() does: the exception handlng thread sets up a sigcontext |
---|
2378 | on the user thread's stack and forces the user thread to resume |
---|
2379 | execution as if a signal handler had been called with that |
---|
2380 | context as an argument. We can use a distinguished UUO at a |
---|
2381 | distinguished address to do something like sigreturn(); that'll |
---|
2382 | have the effect of resuming the user thread's execution in |
---|
2383 | the (pseudo-) signal context. |
---|
2384 | |
---|
2385 | Since: |
---|
2386 | a) we have miles of code in C and in Lisp that knows how to |
---|
2387 | deal with Linux sigcontexts |
---|
2388 | b) Linux sigcontexts contain a little more useful information |
---|
2389 | (the DAR, DSISR, etc.) than their Darwin counterparts |
---|
2390 | c) we have to create a sigcontext ourselves when calling out |
---|
2391 | to the user thread: we aren't really generating a signal, just |
---|
2392 | leveraging existing signal-handling code. |
---|
2393 | |
---|
2394 | we create a Linux sigcontext struct. |
---|
2395 | |
---|
2396 | Simple ? Hopefully from the outside it is ... |
---|
2397 | |
---|
2398 | We want the process of passing a thread's own context to it to |
---|
2399 | appear to be atomic: in particular, we don't want the GC to suspend |
---|
2400 | a thread that's had an exception but has not yet had its user-level |
---|
2401 | exception handler called, and we don't want the thread's exception |
---|
2402 | context to be modified by a GC while the Mach handler thread is |
---|
2403 | copying it around. On Linux (and on Jaguar), we avoid this issue |
---|
2404 | because (a) the kernel sets up the user-level signal handler and |
---|
2405 | (b) the signal handler blocks signals (including the signal used |
---|
2406 | by the GC to suspend threads) until tcr->xframe is set up. |
---|
2407 | |
---|
2408 | The GC and the Mach server thread therefore contend for the lock |
---|
2409 | "mach_exception_lock". The Mach server thread holds the lock |
---|
2410 | when copying exception information between the kernel and the |
---|
2411 | user thread; the GC holds this lock during most of its execution |
---|
2412 | (delaying exception processing until it can be done without |
---|
2413 | GC interference.) |
---|
2414 | |
---|
2415 | */ |
---|
2416 | |
---|
2417 | #ifdef PPC64 |
---|
2418 | #define C_REDZONE_LEN 320 |
---|
2419 | #define C_STK_ALIGN 32 |
---|
2420 | #else |
---|
2421 | #define C_REDZONE_LEN 224 |
---|
2422 | #define C_STK_ALIGN 16 |
---|
2423 | #endif |
---|
2424 | #define C_PARAMSAVE_LEN 64 |
---|
2425 | #define C_LINKAGE_LEN 48 |
---|
2426 | |
---|
2427 | #define TRUNC_DOWN(a,b,c) (((((natural)a)-(b))/(c)) * (c)) |
---|
2428 | |
---|
2429 | void |
---|
2430 | fatal_mach_error(char *format, ...); |
---|
2431 | |
---|
2432 | #define MACH_CHECK_ERROR(context,x) if (x != KERN_SUCCESS) {fatal_mach_error("Mach error while %s : %d", context, x);} |
---|
2433 | |
---|
2434 | |
---|
2435 | void |
---|
2436 | restore_mach_thread_state(mach_port_t thread, ExceptionInformation *pseudosigcontext) |
---|
2437 | { |
---|
2438 | kern_return_t kret; |
---|
2439 | MCONTEXT_T mc = UC_MCONTEXT(pseudosigcontext); |
---|
2440 | |
---|
2441 | /* Set the thread's FP state from the pseudosigcontext */ |
---|
2442 | kret = thread_set_state(thread, |
---|
2443 | PPC_FLOAT_STATE, |
---|
2444 | (thread_state_t)&(mc->fs), |
---|
2445 | PPC_FLOAT_STATE_COUNT); |
---|
2446 | |
---|
2447 | MACH_CHECK_ERROR("setting thread FP state", kret); |
---|
2448 | |
---|
2449 | /* The thread'll be as good as new ... */ |
---|
2450 | #ifdef PPC64 |
---|
2451 | kret = thread_set_state(thread, |
---|
2452 | PPC_THREAD_STATE64, |
---|
2453 | (thread_state_t)&(mc->ss), |
---|
2454 | PPC_THREAD_STATE64_COUNT); |
---|
2455 | #else |
---|
2456 | kret = thread_set_state(thread, |
---|
2457 | MACHINE_THREAD_STATE, |
---|
2458 | (thread_state_t)&(mc->ss), |
---|
2459 | MACHINE_THREAD_STATE_COUNT); |
---|
2460 | #endif |
---|
2461 | MACH_CHECK_ERROR("setting thread state", kret); |
---|
2462 | } |
---|
2463 | |
---|
2464 | /* This code runs in the exception handling thread, in response |
---|
2465 | to an attempt to execute the UU0 at "pseudo_sigreturn" (e.g., |
---|
2466 | in response to a call to pseudo_sigreturn() from the specified |
---|
2467 | user thread. |
---|
2468 | Find that context (the user thread's R3 points to it), then |
---|
2469 | use that context to set the user thread's state. When this |
---|
2470 | function's caller returns, the Mach kernel will resume the |
---|
2471 | user thread. |
---|
2472 | */ |
---|
2473 | |
---|
2474 | kern_return_t |
---|
2475 | do_pseudo_sigreturn(mach_port_t thread, TCR *tcr) |
---|
2476 | { |
---|
2477 | ExceptionInformation *xp; |
---|
2478 | |
---|
2479 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
2480 | fprintf(stderr, "doing pseudo_sigreturn for 0x%x\n",tcr); |
---|
2481 | #endif |
---|
2482 | xp = tcr->pending_exception_context; |
---|
2483 | if (xp) { |
---|
2484 | tcr->pending_exception_context = NULL; |
---|
2485 | tcr->valence = TCR_STATE_LISP; |
---|
2486 | restore_mach_thread_state(thread, xp); |
---|
2487 | raise_pending_interrupt(tcr); |
---|
2488 | } else { |
---|
2489 | Bug(NULL, "no xp here!\n"); |
---|
2490 | } |
---|
2491 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
2492 | fprintf(stderr, "did pseudo_sigreturn for 0x%x\n",tcr); |
---|
2493 | #endif |
---|
2494 | return KERN_SUCCESS; |
---|
2495 | } |
---|
2496 | |
---|
2497 | ExceptionInformation * |
---|
2498 | create_thread_context_frame(mach_port_t thread, |
---|
2499 | natural *new_stack_top) |
---|
2500 | { |
---|
2501 | #ifdef PPC64 |
---|
2502 | ppc_thread_state64_t ts; |
---|
2503 | #else |
---|
2504 | ppc_thread_state_t ts; |
---|
2505 | #endif |
---|
2506 | mach_msg_type_number_t thread_state_count; |
---|
2507 | kern_return_t result; |
---|
2508 | ExceptionInformation *pseudosigcontext; |
---|
2509 | MCONTEXT_T mc; |
---|
2510 | natural stackp, backlink; |
---|
2511 | |
---|
2512 | #ifdef PPC64 |
---|
2513 | thread_state_count = PPC_THREAD_STATE64_COUNT; |
---|
2514 | result = thread_get_state(thread, |
---|
2515 | PPC_THREAD_STATE64, |
---|
2516 | (thread_state_t)&ts, |
---|
2517 | &thread_state_count); |
---|
2518 | #else |
---|
2519 | thread_state_count = MACHINE_THREAD_STATE_COUNT; |
---|
2520 | result = thread_get_state(thread, |
---|
2521 | PPC_THREAD_STATE, /* GPRs, some SPRs */ |
---|
2522 | (thread_state_t)&ts, |
---|
2523 | &thread_state_count); |
---|
2524 | #endif |
---|
2525 | |
---|
2526 | if (result != KERN_SUCCESS) { |
---|
2527 | get_tcr(true); |
---|
2528 | Bug(NULL, "Exception thread can't obtain thread state, Mach result = %d", result); |
---|
2529 | } |
---|
2530 | stackp = ts.r1; |
---|
2531 | backlink = stackp; |
---|
2532 | stackp = TRUNC_DOWN(stackp, C_REDZONE_LEN, C_STK_ALIGN); |
---|
2533 | stackp -= sizeof(*pseudosigcontext); |
---|
2534 | pseudosigcontext = (ExceptionInformation *) ptr_from_lispobj(stackp); |
---|
2535 | |
---|
2536 | stackp = TRUNC_DOWN(stackp, sizeof(*mc), C_STK_ALIGN); |
---|
2537 | mc = (MCONTEXT_T) ptr_from_lispobj(stackp); |
---|
2538 | memmove(&(mc->ss),&ts,sizeof(ts)); |
---|
2539 | |
---|
2540 | thread_state_count = PPC_FLOAT_STATE_COUNT; |
---|
2541 | thread_get_state(thread, |
---|
2542 | PPC_FLOAT_STATE, |
---|
2543 | (thread_state_t)&(mc->fs), |
---|
2544 | &thread_state_count); |
---|
2545 | |
---|
2546 | |
---|
2547 | #ifdef PPC64 |
---|
2548 | thread_state_count = PPC_EXCEPTION_STATE64_COUNT; |
---|
2549 | #else |
---|
2550 | thread_state_count = PPC_EXCEPTION_STATE_COUNT; |
---|
2551 | #endif |
---|
2552 | thread_get_state(thread, |
---|
2553 | #ifdef PPC64 |
---|
2554 | PPC_EXCEPTION_STATE64, |
---|
2555 | #else |
---|
2556 | PPC_EXCEPTION_STATE, |
---|
2557 | #endif |
---|
2558 | (thread_state_t)&(mc->es), |
---|
2559 | &thread_state_count); |
---|
2560 | |
---|
2561 | |
---|
2562 | UC_MCONTEXT(pseudosigcontext) = mc; |
---|
2563 | stackp = TRUNC_DOWN(stackp, C_PARAMSAVE_LEN, C_STK_ALIGN); |
---|
2564 | stackp -= C_LINKAGE_LEN; |
---|
2565 | *(natural *)ptr_from_lispobj(stackp) = backlink; |
---|
2566 | if (new_stack_top) { |
---|
2567 | *new_stack_top = stackp; |
---|
2568 | } |
---|
2569 | return pseudosigcontext; |
---|
2570 | } |
---|
2571 | |
---|
2572 | /* |
---|
2573 | This code sets up the user thread so that it executes a "pseudo-signal |
---|
2574 | handler" function when it resumes. Create a linux sigcontext struct |
---|
2575 | on the thread's stack and pass it as an argument to the pseudo-signal |
---|
2576 | handler. |
---|
2577 | |
---|
2578 | Things are set up so that the handler "returns to" pseudo_sigreturn(), |
---|
2579 | which will restore the thread's context. |
---|
2580 | |
---|
2581 | If the handler invokes code that throws (or otherwise never sigreturn()'s |
---|
2582 | to the context), that's fine. |
---|
2583 | |
---|
2584 | Actually, check that: throw (and variants) may need to be careful and |
---|
2585 | pop the tcr's xframe list until it's younger than any frame being |
---|
2586 | entered. |
---|
2587 | */ |
---|
2588 | |
---|
2589 | int |
---|
2590 | setup_signal_frame(mach_port_t thread, |
---|
2591 | void *handler_address, |
---|
2592 | int signum, |
---|
2593 | int code, |
---|
2594 | TCR *tcr) |
---|
2595 | { |
---|
2596 | #ifdef PPC64 |
---|
2597 | ppc_thread_state64_t ts; |
---|
2598 | #else |
---|
2599 | ppc_thread_state_t ts; |
---|
2600 | #endif |
---|
2601 | ExceptionInformation *pseudosigcontext; |
---|
2602 | int old_valence = tcr->valence; |
---|
2603 | natural stackp; |
---|
2604 | |
---|
2605 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
2606 | fprintf(stderr,"Setting up exception handling for 0x%x\n", tcr); |
---|
2607 | #endif |
---|
2608 | pseudosigcontext = create_thread_context_frame(thread, &stackp); |
---|
2609 | pseudosigcontext->uc_onstack = 0; |
---|
2610 | pseudosigcontext->uc_sigmask = (sigset_t) 0; |
---|
2611 | tcr->pending_exception_context = pseudosigcontext; |
---|
2612 | tcr->valence = TCR_STATE_EXCEPTION_WAIT; |
---|
2613 | |
---|
2614 | |
---|
2615 | /* |
---|
2616 | It seems like we've created a sigcontext on the thread's |
---|
2617 | stack. Set things up so that we call the handler (with appropriate |
---|
2618 | args) when the thread's resumed. |
---|
2619 | */ |
---|
2620 | |
---|
2621 | ts.srr0 = (natural) handler_address; |
---|
2622 | ts.srr1 = (int) xpMSR(pseudosigcontext) & ~MSR_FE0_FE1_MASK; |
---|
2623 | ts.r1 = stackp; |
---|
2624 | ts.r3 = signum; |
---|
2625 | ts.r4 = (natural)pseudosigcontext; |
---|
2626 | ts.r5 = (natural)tcr; |
---|
2627 | ts.r6 = (natural)old_valence; |
---|
2628 | ts.lr = (natural)pseudo_sigreturn; |
---|
2629 | |
---|
2630 | |
---|
2631 | #ifdef PPC64 |
---|
2632 | ts.r13 = xpGPR(pseudosigcontext,13); |
---|
2633 | thread_set_state(thread, |
---|
2634 | PPC_THREAD_STATE64, |
---|
2635 | (thread_state_t)&ts, |
---|
2636 | PPC_THREAD_STATE64_COUNT); |
---|
2637 | #else |
---|
2638 | thread_set_state(thread, |
---|
2639 | MACHINE_THREAD_STATE, |
---|
2640 | (thread_state_t)&ts, |
---|
2641 | MACHINE_THREAD_STATE_COUNT); |
---|
2642 | #endif |
---|
2643 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
2644 | fprintf(stderr,"Set up exception context for 0x%x at 0x%x\n", tcr, tcr->pending_exception_context); |
---|
2645 | #endif |
---|
2646 | return 0; |
---|
2647 | } |
---|
2648 | |
---|
2649 | |
---|
2650 | void |
---|
2651 | pseudo_signal_handler(int signum, ExceptionInformation *context, TCR *tcr, int old_valence) |
---|
2652 | { |
---|
2653 | signal_handler(signum, NULL, context, tcr, old_valence); |
---|
2654 | } |
---|
2655 | |
---|
2656 | |
---|
2657 | int |
---|
2658 | thread_set_fp_exceptions_enabled(mach_port_t thread, Boolean enabled) |
---|
2659 | { |
---|
2660 | #ifdef PPC64 |
---|
2661 | ppc_thread_state64_t ts; |
---|
2662 | #else |
---|
2663 | ppc_thread_state_t ts; |
---|
2664 | #endif |
---|
2665 | mach_msg_type_number_t thread_state_count; |
---|
2666 | |
---|
2667 | #ifdef PPC64 |
---|
2668 | thread_state_count = PPC_THREAD_STATE64_COUNT; |
---|
2669 | #else |
---|
2670 | thread_state_count = PPC_THREAD_STATE_COUNT; |
---|
2671 | #endif |
---|
2672 | thread_get_state(thread, |
---|
2673 | #ifdef PPC64 |
---|
2674 | PPC_THREAD_STATE64, /* GPRs, some SPRs */ |
---|
2675 | #else |
---|
2676 | PPC_THREAD_STATE, /* GPRs, some SPRs */ |
---|
2677 | #endif |
---|
2678 | (thread_state_t)&ts, |
---|
2679 | &thread_state_count); |
---|
2680 | if (enabled) { |
---|
2681 | ts.srr1 |= MSR_FE0_FE1_MASK; |
---|
2682 | } else { |
---|
2683 | ts.srr1 &= ~MSR_FE0_FE1_MASK; |
---|
2684 | } |
---|
2685 | /* |
---|
2686 | Hack-o-rama warning (isn't it about time for such a warning?): |
---|
2687 | pthread_kill() seems to want to lose the MSR's FE0/FE1 bits. |
---|
2688 | Our handler for lisp's use of pthread_kill() pushes a phony |
---|
2689 | lisp frame on the stack and force the context to resume at |
---|
2690 | the UUO in enable_fp_exceptions(); the "saveLR" field of that |
---|
2691 | lisp frame contains the -real- address that process_interrupt |
---|
2692 | should have returned to, and the fact that it's in a lisp |
---|
2693 | frame should convince the GC to notice that address if it |
---|
2694 | runs in the tiny time window between returning from our |
---|
2695 | interrupt handler and ... here. |
---|
2696 | If the top frame on the stack is a lisp frame, discard it |
---|
2697 | and set ts.srr0 to the saveLR field in that frame. Otherwise, |
---|
2698 | just adjust ts.srr0 to skip over the UUO. |
---|
2699 | */ |
---|
2700 | { |
---|
2701 | lisp_frame *tos = (lisp_frame *)ts.r1, |
---|
2702 | *next_frame = tos->backlink; |
---|
2703 | |
---|
2704 | if (tos == (next_frame -1)) { |
---|
2705 | ts.srr0 = tos->savelr; |
---|
2706 | ts.r1 = (LispObj) next_frame; |
---|
2707 | } else { |
---|
2708 | ts.srr0 += 4; |
---|
2709 | } |
---|
2710 | } |
---|
2711 | thread_set_state(thread, |
---|
2712 | #ifdef PPC64 |
---|
2713 | PPC_THREAD_STATE64, /* GPRs, some SPRs */ |
---|
2714 | #else |
---|
2715 | PPC_THREAD_STATE, /* GPRs, some SPRs */ |
---|
2716 | #endif |
---|
2717 | (thread_state_t)&ts, |
---|
2718 | #ifdef PPC64 |
---|
2719 | PPC_THREAD_STATE64_COUNT |
---|
2720 | #else |
---|
2721 | PPC_THREAD_STATE_COUNT |
---|
2722 | #endif |
---|
2723 | ); |
---|
2724 | |
---|
2725 | return 0; |
---|
2726 | } |
---|
2727 | |
---|
2728 | /* |
---|
2729 | This function runs in the exception handling thread. It's |
---|
2730 | called (by this precise name) from the library function "exc_server()" |
---|
2731 | when the thread's exception ports are set up. (exc_server() is called |
---|
2732 | via mach_msg_server(), which is a function that waits for and dispatches |
---|
2733 | on exception messages from the Mach kernel.) |
---|
2734 | |
---|
2735 | This checks to see if the exception was caused by a pseudo_sigreturn() |
---|
2736 | UUO; if so, it arranges for the thread to have its state restored |
---|
2737 | from the specified context. |
---|
2738 | |
---|
2739 | Otherwise, it tries to map the exception to a signal number and |
---|
2740 | arranges that the thread run a "pseudo signal handler" to handle |
---|
2741 | the exception. |
---|
2742 | |
---|
2743 | Some exceptions could and should be handled here directly. |
---|
2744 | */ |
---|
2745 | |
---|
2746 | kern_return_t |
---|
2747 | catch_exception_raise(mach_port_t exception_port, |
---|
2748 | mach_port_t thread, |
---|
2749 | mach_port_t task, |
---|
2750 | exception_type_t exception, |
---|
2751 | exception_data_t code_vector, |
---|
2752 | mach_msg_type_number_t code_count) |
---|
2753 | { |
---|
2754 | int signum = 0, code = *code_vector, code1; |
---|
2755 | TCR *tcr = TCR_FROM_EXCEPTION_PORT(exception_port); |
---|
2756 | kern_return_t kret; |
---|
2757 | |
---|
2758 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
2759 | fprintf(stderr, "obtaining Mach exception lock in exception thread\n"); |
---|
2760 | #endif |
---|
2761 | |
---|
2762 | if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_EXCEPTION)) { |
---|
2763 | CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_EXCEPTION); |
---|
2764 | } |
---|
2765 | if ((exception == EXC_BAD_INSTRUCTION) && |
---|
2766 | (code_vector[0] == EXC_PPC_UNIPL_INST) && |
---|
2767 | (((code1 = code_vector[1]) == (int)pseudo_sigreturn) || |
---|
2768 | (code1 == (int)enable_fp_exceptions) || |
---|
2769 | (code1 == (int)disable_fp_exceptions))) { |
---|
2770 | if (code1 == (int)pseudo_sigreturn) { |
---|
2771 | kret = do_pseudo_sigreturn(thread, tcr); |
---|
2772 | #if 0 |
---|
2773 | fprintf(stderr, "Exception return in 0x%x\n",tcr); |
---|
2774 | #endif |
---|
2775 | |
---|
2776 | } else if (code1 == (int)enable_fp_exceptions) { |
---|
2777 | kret = thread_set_fp_exceptions_enabled(thread, true); |
---|
2778 | } else kret = thread_set_fp_exceptions_enabled(thread, false); |
---|
2779 | } else if (tcr->flags & (1<<TCR_FLAG_BIT_PROPAGATE_EXCEPTION)) { |
---|
2780 | CLR_TCR_FLAG(tcr,TCR_FLAG_BIT_PROPAGATE_EXCEPTION); |
---|
2781 | kret = 17; |
---|
2782 | } else { |
---|
2783 | switch (exception) { |
---|
2784 | case EXC_BAD_ACCESS: |
---|
2785 | signum = SIGSEGV; |
---|
2786 | break; |
---|
2787 | |
---|
2788 | case EXC_BAD_INSTRUCTION: |
---|
2789 | signum = SIGILL; |
---|
2790 | break; |
---|
2791 | |
---|
2792 | case EXC_SOFTWARE: |
---|
2793 | if (code == EXC_PPC_TRAP) { |
---|
2794 | signum = SIGTRAP; |
---|
2795 | } |
---|
2796 | break; |
---|
2797 | |
---|
2798 | case EXC_ARITHMETIC: |
---|
2799 | signum = SIGFPE; |
---|
2800 | break; |
---|
2801 | |
---|
2802 | default: |
---|
2803 | break; |
---|
2804 | } |
---|
2805 | if (signum) { |
---|
2806 | kret = setup_signal_frame(thread, |
---|
2807 | (void *)pseudo_signal_handler, |
---|
2808 | signum, |
---|
2809 | code, |
---|
2810 | tcr); |
---|
2811 | #if 0 |
---|
2812 | fprintf(stderr, "Setup pseudosignal handling in 0x%x\n",tcr); |
---|
2813 | #endif |
---|
2814 | |
---|
2815 | } else { |
---|
2816 | kret = 17; |
---|
2817 | } |
---|
2818 | } |
---|
2819 | |
---|
2820 | return kret; |
---|
2821 | } |
---|
2822 | |
---|
2823 | |
---|
2824 | |
---|
2825 | typedef struct { |
---|
2826 | mach_msg_header_t Head; |
---|
2827 | /* start of the kernel processed data */ |
---|
2828 | mach_msg_body_t msgh_body; |
---|
2829 | mach_msg_port_descriptor_t thread; |
---|
2830 | mach_msg_port_descriptor_t task; |
---|
2831 | /* end of the kernel processed data */ |
---|
2832 | NDR_record_t NDR; |
---|
2833 | exception_type_t exception; |
---|
2834 | mach_msg_type_number_t codeCnt; |
---|
2835 | integer_t code[2]; |
---|
2836 | mach_msg_trailer_t trailer; |
---|
2837 | } exceptionRequest; |
---|
2838 | |
---|
2839 | |
---|
2840 | boolean_t |
---|
2841 | openmcl_exc_server(mach_msg_header_t *in, mach_msg_header_t *out) |
---|
2842 | { |
---|
2843 | static NDR_record_t _NDR = {0}; |
---|
2844 | kern_return_t handled; |
---|
2845 | mig_reply_error_t *reply = (mig_reply_error_t *) out; |
---|
2846 | exceptionRequest *req = (exceptionRequest *) in; |
---|
2847 | |
---|
2848 | reply->NDR = _NDR; |
---|
2849 | |
---|
2850 | out->msgh_bits = in->msgh_bits & MACH_MSGH_BITS_REMOTE_MASK; |
---|
2851 | out->msgh_remote_port = in->msgh_remote_port; |
---|
2852 | out->msgh_size = sizeof(mach_msg_header_t)+(3 * sizeof(unsigned)); |
---|
2853 | out->msgh_local_port = MACH_PORT_NULL; |
---|
2854 | out->msgh_id = in->msgh_id+100; |
---|
2855 | |
---|
2856 | /* Could handle other exception flavors in the range 2401-2403 */ |
---|
2857 | |
---|
2858 | |
---|
2859 | if (in->msgh_id != 2401) { |
---|
2860 | reply->RetCode = MIG_BAD_ID; |
---|
2861 | return FALSE; |
---|
2862 | } |
---|
2863 | handled = catch_exception_raise(req->Head.msgh_local_port, |
---|
2864 | req->thread.name, |
---|
2865 | req->task.name, |
---|
2866 | req->exception, |
---|
2867 | req->code, |
---|
2868 | req->codeCnt); |
---|
2869 | reply->RetCode = handled; |
---|
2870 | return TRUE; |
---|
2871 | } |
---|
2872 | |
---|
2873 | /* |
---|
2874 | The initial function for an exception-handling thread. |
---|
2875 | */ |
---|
2876 | |
---|
2877 | void * |
---|
2878 | exception_handler_proc(void *arg) |
---|
2879 | { |
---|
2880 | extern boolean_t exc_server(); |
---|
2881 | mach_port_t p = TCR_TO_EXCEPTION_PORT(arg); |
---|
2882 | |
---|
2883 | mach_msg_server(openmcl_exc_server, 2048, p, 0); |
---|
2884 | /* Should never return. */ |
---|
2885 | abort(); |
---|
2886 | } |
---|
2887 | |
---|
2888 | |
---|
2889 | |
---|
2890 | mach_port_t |
---|
2891 | mach_exception_port_set() |
---|
2892 | { |
---|
2893 | static mach_port_t __exception_port_set = MACH_PORT_NULL; |
---|
2894 | kern_return_t kret; |
---|
2895 | if (__exception_port_set == MACH_PORT_NULL) { |
---|
2896 | kret = mach_port_allocate(mach_task_self(), |
---|
2897 | MACH_PORT_RIGHT_PORT_SET, |
---|
2898 | &__exception_port_set); |
---|
2899 | MACH_CHECK_ERROR("allocating thread exception_ports",kret); |
---|
2900 | create_system_thread(0, |
---|
2901 | NULL, |
---|
2902 | exception_handler_proc, |
---|
2903 | (void *)((natural)__exception_port_set)); |
---|
2904 | } |
---|
2905 | return __exception_port_set; |
---|
2906 | } |
---|
2907 | |
---|
2908 | /* |
---|
2909 | Setup a new thread to handle those exceptions specified by |
---|
2910 | the mask "which". This involves creating a special Mach |
---|
2911 | message port, telling the Mach kernel to send exception |
---|
2912 | messages for the calling thread to that port, and setting |
---|
2913 | up a handler thread which listens for and responds to |
---|
2914 | those messages. |
---|
2915 | |
---|
2916 | */ |
---|
2917 | |
---|
2918 | /* |
---|
2919 | Establish the lisp thread's TCR as its exception port, and determine |
---|
2920 | whether any other ports have been established by foreign code for |
---|
2921 | exceptions that lisp cares about. |
---|
2922 | |
---|
2923 | If this happens at all, it should happen on return from foreign |
---|
2924 | code and on entry to lisp code via a callback. |
---|
2925 | |
---|
2926 | This is a lot of trouble (and overhead) to support Java, or other |
---|
2927 | embeddable systems that clobber their caller's thread exception ports. |
---|
2928 | |
---|
2929 | */ |
---|
2930 | kern_return_t |
---|
2931 | tcr_establish_exception_port(TCR *tcr, mach_port_t thread) |
---|
2932 | { |
---|
2933 | kern_return_t kret; |
---|
2934 | MACH_foreign_exception_state *fxs = (MACH_foreign_exception_state *)tcr->native_thread_info; |
---|
2935 | int i; |
---|
2936 | unsigned n = NUM_LISP_EXCEPTIONS_HANDLED; |
---|
2937 | mach_port_t lisp_port = TCR_TO_EXCEPTION_PORT(tcr), foreign_port; |
---|
2938 | exception_mask_t mask = 0; |
---|
2939 | |
---|
2940 | kret = thread_swap_exception_ports(thread, |
---|
2941 | LISP_EXCEPTIONS_HANDLED_MASK, |
---|
2942 | lisp_port, |
---|
2943 | EXCEPTION_DEFAULT, |
---|
2944 | THREAD_STATE_NONE, |
---|
2945 | fxs->masks, |
---|
2946 | &n, |
---|
2947 | fxs->ports, |
---|
2948 | fxs->behaviors, |
---|
2949 | fxs->flavors); |
---|
2950 | if (kret == KERN_SUCCESS) { |
---|
2951 | fxs->foreign_exception_port_count = n; |
---|
2952 | for (i = 0; i < n; i ++) { |
---|
2953 | foreign_port = fxs->ports[i]; |
---|
2954 | |
---|
2955 | if ((foreign_port != lisp_port) && |
---|
2956 | (foreign_port != MACH_PORT_NULL)) { |
---|
2957 | mask |= fxs->masks[i]; |
---|
2958 | } |
---|
2959 | } |
---|
2960 | tcr->foreign_exception_status = (int) mask; |
---|
2961 | } |
---|
2962 | return kret; |
---|
2963 | } |
---|
2964 | |
---|
2965 | kern_return_t |
---|
2966 | tcr_establish_lisp_exception_port(TCR *tcr) |
---|
2967 | { |
---|
2968 | return tcr_establish_exception_port(tcr, (mach_port_t)((natural)tcr->native_thread_id)); |
---|
2969 | } |
---|
2970 | |
---|
2971 | /* |
---|
2972 | Do this when calling out to or returning from foreign code, if |
---|
2973 | any conflicting foreign exception ports were established when we |
---|
2974 | last entered lisp code. |
---|
2975 | */ |
---|
2976 | kern_return_t |
---|
2977 | restore_foreign_exception_ports(TCR *tcr) |
---|
2978 | { |
---|
2979 | exception_mask_t m = (exception_mask_t) tcr->foreign_exception_status; |
---|
2980 | |
---|
2981 | if (m) { |
---|
2982 | MACH_foreign_exception_state *fxs = |
---|
2983 | (MACH_foreign_exception_state *) tcr->native_thread_info; |
---|
2984 | int i, n = fxs->foreign_exception_port_count; |
---|
2985 | exception_mask_t tm; |
---|
2986 | |
---|
2987 | for (i = 0; i < n; i++) { |
---|
2988 | if ((tm = fxs->masks[i]) & m) { |
---|
2989 | thread_set_exception_ports((mach_port_t)((natural)tcr->native_thread_id), |
---|
2990 | tm, |
---|
2991 | fxs->ports[i], |
---|
2992 | fxs->behaviors[i], |
---|
2993 | fxs->flavors[i]); |
---|
2994 | } |
---|
2995 | } |
---|
2996 | } |
---|
2997 | } |
---|
2998 | |
---|
2999 | |
---|
3000 | /* |
---|
3001 | This assumes that a Mach port (to be used as the thread's exception port) whose |
---|
3002 | "name" matches the TCR's 32-bit address has already been allocated. |
---|
3003 | */ |
---|
3004 | |
---|
3005 | kern_return_t |
---|
3006 | setup_mach_exception_handling(TCR *tcr) |
---|
3007 | { |
---|
3008 | mach_port_t |
---|
3009 | thread_exception_port = TCR_TO_EXCEPTION_PORT(tcr), |
---|
3010 | task_self = mach_task_self(); |
---|
3011 | kern_return_t kret; |
---|
3012 | |
---|
3013 | kret = mach_port_insert_right(task_self, |
---|
3014 | thread_exception_port, |
---|
3015 | thread_exception_port, |
---|
3016 | MACH_MSG_TYPE_MAKE_SEND); |
---|
3017 | MACH_CHECK_ERROR("adding send right to exception_port",kret); |
---|
3018 | |
---|
3019 | kret = tcr_establish_exception_port(tcr, (mach_port_t)((natural) tcr->native_thread_id)); |
---|
3020 | if (kret == KERN_SUCCESS) { |
---|
3021 | mach_port_t exception_port_set = mach_exception_port_set(); |
---|
3022 | |
---|
3023 | kret = mach_port_move_member(task_self, |
---|
3024 | thread_exception_port, |
---|
3025 | exception_port_set); |
---|
3026 | } |
---|
3027 | return kret; |
---|
3028 | } |
---|
3029 | |
---|
3030 | void |
---|
3031 | darwin_exception_init(TCR *tcr) |
---|
3032 | { |
---|
3033 | void tcr_monitor_exception_handling(TCR*, Boolean); |
---|
3034 | kern_return_t kret; |
---|
3035 | MACH_foreign_exception_state *fxs = |
---|
3036 | calloc(1, sizeof(MACH_foreign_exception_state)); |
---|
3037 | |
---|
3038 | tcr->native_thread_info = (void *) fxs; |
---|
3039 | |
---|
3040 | if ((kret = setup_mach_exception_handling(tcr)) |
---|
3041 | != KERN_SUCCESS) { |
---|
3042 | fprintf(stderr, "Couldn't setup exception handler - error = %d\n", kret); |
---|
3043 | terminate_lisp(); |
---|
3044 | } |
---|
3045 | lisp_global(LISP_EXIT_HOOK) = (LispObj) restore_foreign_exception_ports; |
---|
3046 | lisp_global(LISP_RETURN_HOOK) = (LispObj) tcr_establish_lisp_exception_port; |
---|
3047 | } |
---|
3048 | |
---|
3049 | /* |
---|
3050 | The tcr is the "name" of the corresponding thread's exception port. |
---|
3051 | Destroying the port should remove it from all port sets of which it's |
---|
3052 | a member (notably, the exception port set.) |
---|
3053 | */ |
---|
3054 | void |
---|
3055 | darwin_exception_cleanup(TCR *tcr) |
---|
3056 | { |
---|
3057 | void *fxs = tcr->native_thread_info; |
---|
3058 | extern Boolean use_mach_exception_handling; |
---|
3059 | |
---|
3060 | if (fxs) { |
---|
3061 | tcr->native_thread_info = NULL; |
---|
3062 | free(fxs); |
---|
3063 | } |
---|
3064 | if (use_mach_exception_handling) { |
---|
3065 | mach_port_deallocate(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr)); |
---|
3066 | mach_port_destroy(mach_task_self(),TCR_TO_EXCEPTION_PORT(tcr)); |
---|
3067 | } |
---|
3068 | } |
---|
3069 | |
---|
3070 | |
---|
3071 | Boolean |
---|
3072 | suspend_mach_thread(mach_port_t mach_thread) |
---|
3073 | { |
---|
3074 | kern_return_t status; |
---|
3075 | Boolean aborted = false; |
---|
3076 | |
---|
3077 | do { |
---|
3078 | aborted = false; |
---|
3079 | status = thread_suspend(mach_thread); |
---|
3080 | if (status == KERN_SUCCESS) { |
---|
3081 | status = thread_abort_safely(mach_thread); |
---|
3082 | if (status == KERN_SUCCESS) { |
---|
3083 | aborted = true; |
---|
3084 | } else { |
---|
3085 | fprintf(stderr, "abort failed on thread = 0x%x\n",mach_thread); |
---|
3086 | thread_resume(mach_thread); |
---|
3087 | } |
---|
3088 | } else { |
---|
3089 | return false; |
---|
3090 | } |
---|
3091 | } while (! aborted); |
---|
3092 | return true; |
---|
3093 | } |
---|
3094 | |
---|
3095 | /* |
---|
3096 | Only do this if pthread_kill indicated that the pthread isn't |
---|
3097 | listening to signals anymore, as can happen as soon as pthread_exit() |
---|
3098 | is called on Darwin. The thread could still call out to lisp as it |
---|
3099 | is exiting, so we need another way to suspend it in this case. |
---|
3100 | */ |
---|
3101 | Boolean |
---|
3102 | mach_suspend_tcr(TCR *tcr) |
---|
3103 | { |
---|
3104 | mach_port_t mach_thread = (mach_port_t)((natural)( tcr->native_thread_id)); |
---|
3105 | ExceptionInformation *pseudosigcontext; |
---|
3106 | Boolean result = false; |
---|
3107 | |
---|
3108 | result = suspend_mach_thread(mach_thread); |
---|
3109 | if (result) { |
---|
3110 | pseudosigcontext = create_thread_context_frame(mach_thread, NULL); |
---|
3111 | pseudosigcontext->uc_onstack = 0; |
---|
3112 | pseudosigcontext->uc_sigmask = (sigset_t) 0; |
---|
3113 | tcr->suspend_context = pseudosigcontext; |
---|
3114 | } |
---|
3115 | return result; |
---|
3116 | } |
---|
3117 | |
---|
3118 | void |
---|
3119 | mach_resume_tcr(TCR *tcr) |
---|
3120 | { |
---|
3121 | ExceptionInformation *xp; |
---|
3122 | mach_port_t mach_thread = (mach_port_t)((natural)(tcr->native_thread_id)); |
---|
3123 | |
---|
3124 | xp = tcr->suspend_context; |
---|
3125 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
3126 | fprintf(stderr, "resuming TCR 0x%x, pending_exception_context = 0x%x\n", |
---|
3127 | tcr, tcr->pending_exception_context); |
---|
3128 | #endif |
---|
3129 | tcr->suspend_context = NULL; |
---|
3130 | restore_mach_thread_state(mach_thread, xp); |
---|
3131 | #ifdef DEBUG_MACH_EXCEPTIONS |
---|
3132 | fprintf(stderr, "restored state in TCR 0x%x, pending_exception_context = 0x%x\n", |
---|
3133 | tcr, tcr->pending_exception_context); |
---|
3134 | #endif |
---|
3135 | thread_resume(mach_thread); |
---|
3136 | } |
---|
3137 | |
---|
3138 | void |
---|
3139 | fatal_mach_error(char *format, ...) |
---|
3140 | { |
---|
3141 | va_list args; |
---|
3142 | char s[512]; |
---|
3143 | |
---|
3144 | |
---|
3145 | va_start(args, format); |
---|
3146 | vsnprintf(s, sizeof(s),format, args); |
---|
3147 | va_end(args); |
---|
3148 | |
---|
3149 | Fatal("Mach error", s); |
---|
3150 | } |
---|
3151 | |
---|
3152 | void |
---|
3153 | pseudo_interrupt_handler(int signum, ExceptionInformation *context) |
---|
3154 | { |
---|
3155 | interrupt_handler(signum, NULL, context); |
---|
3156 | } |
---|
3157 | |
---|
3158 | int |
---|
3159 | mach_raise_thread_interrupt(TCR *target) |
---|
3160 | { |
---|
3161 | mach_port_t mach_thread = (mach_port_t)((natural)(target->native_thread_id)); |
---|
3162 | kern_return_t kret; |
---|
3163 | Boolean result = false; |
---|
3164 | TCR *current = get_tcr(false); |
---|
3165 | thread_basic_info_data_t info; |
---|
3166 | mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT; |
---|
3167 | |
---|
3168 | LOCK(lisp_global(TCR_AREA_LOCK), current); |
---|
3169 | |
---|
3170 | if (suspend_mach_thread(mach_thread)) { |
---|
3171 | if (thread_info(mach_thread, |
---|
3172 | THREAD_BASIC_INFO, |
---|
3173 | (thread_info_t)&info, |
---|
3174 | &info_count) == KERN_SUCCESS) { |
---|
3175 | if (info.suspend_count == 1) { |
---|
3176 | if ((target->valence == TCR_STATE_LISP) && |
---|
3177 | (!target->unwinding) && |
---|
3178 | (TCR_INTERRUPT_LEVEL(target) >= 0)) { |
---|
3179 | kret = setup_signal_frame(mach_thread, |
---|
3180 | (void *)pseudo_interrupt_handler, |
---|
3181 | SIGNAL_FOR_PROCESS_INTERRUPT, |
---|
3182 | 0, |
---|
3183 | target); |
---|
3184 | if (kret == KERN_SUCCESS) { |
---|
3185 | result = true; |
---|
3186 | } |
---|
3187 | } |
---|
3188 | } |
---|
3189 | } |
---|
3190 | if (! result) { |
---|
3191 | target->interrupt_pending = 1 << fixnumshift; |
---|
3192 | } |
---|
3193 | thread_resume(mach_thread); |
---|
3194 | |
---|
3195 | } |
---|
3196 | UNLOCK(lisp_global(TCR_AREA_LOCK), current); |
---|
3197 | return 0; |
---|
3198 | } |
---|
3199 | |
---|
3200 | #endif |
---|