1 | /* |
---|
2 | Copyright (C) 2009 Clozure Associates |
---|
3 | Copyright (C) 1994-2001 Digitool, Inc |
---|
4 | This file is part of Clozure CL. |
---|
5 | |
---|
6 | Clozure CL is licensed under the terms of the Lisp Lesser GNU Public |
---|
7 | License , known as the LLGPL and distributed with Clozure CL as the |
---|
8 | file "LICENSE". The LLGPL consists of a preamble and the LGPL, |
---|
9 | which is distributed with Clozure CL as the file "LGPL". Where these |
---|
10 | conflict, the preamble takes precedence. |
---|
11 | |
---|
12 | Clozure CL is referenced in the preamble as the "LIBRARY." |
---|
13 | |
---|
14 | The LLGPL is also available online at |
---|
15 | http://opensource.franz.com/preamble.html |
---|
16 | */ |
---|
17 | |
---|
18 | #include "lisp.h" |
---|
19 | #include "lisp-exceptions.h" |
---|
20 | #include "lisp_globals.h" |
---|
21 | #include "threads.h" |
---|
22 | #include <ctype.h> |
---|
23 | #include <stdio.h> |
---|
24 | #include <stdlib.h> |
---|
25 | #include <stddef.h> |
---|
26 | #include <string.h> |
---|
27 | #include <stdarg.h> |
---|
28 | #include <errno.h> |
---|
29 | #include <stdio.h> |
---|
30 | #include <unistd.h> |
---|
31 | #ifdef LINUX |
---|
32 | #include <strings.h> |
---|
33 | #endif |
---|
34 | #ifdef DARWIN |
---|
35 | #include <pthread.h> |
---|
36 | #endif |
---|
37 | |
---|
38 | #ifndef WINDOWS |
---|
39 | #include <sys/mman.h> |
---|
40 | #endif |
---|
41 | |
---|
42 | #define DEBUG_MEMORY 0 |
---|
43 | |
---|
44 | void |
---|
45 | allocation_failure(Boolean pointerp, natural size) |
---|
46 | { |
---|
47 | char buf[64]; |
---|
48 | sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size); |
---|
49 | Fatal(": Kernel memory allocation failure. ", buf); |
---|
50 | } |
---|
51 | |
---|
52 | void * |
---|
53 | lisp_malloc(size_t size) |
---|
54 | { |
---|
55 | return malloc(size); |
---|
56 | } |
---|
57 | |
---|
58 | void |
---|
59 | lisp_free(void *p) |
---|
60 | { |
---|
61 | free(p); |
---|
62 | } |
---|
63 | |
---|
64 | |
---|
65 | LogicalAddress |
---|
66 | ReserveMemoryForHeap(LogicalAddress want, natural totalsize) |
---|
67 | { |
---|
68 | void raise_limit(void); |
---|
69 | LogicalAddress start; |
---|
70 | raise_limit(); |
---|
71 | #ifdef WINDOWS |
---|
72 | start = VirtualAlloc((void *)want, |
---|
73 | totalsize + heap_segment_size, |
---|
74 | MEM_RESERVE, |
---|
75 | PAGE_NOACCESS); |
---|
76 | if (!start) { |
---|
77 | #if DEBUG_MEMORY |
---|
78 | fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want); |
---|
79 | #endif |
---|
80 | start = VirtualAlloc(0, |
---|
81 | totalsize + heap_segment_size, |
---|
82 | MEM_RESERVE, |
---|
83 | PAGE_NOACCESS); |
---|
84 | if (!start) { |
---|
85 | return NULL; |
---|
86 | } |
---|
87 | } |
---|
88 | #else |
---|
89 | start = mmap((void *)want, |
---|
90 | totalsize + heap_segment_size, |
---|
91 | PROT_NONE, |
---|
92 | MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, |
---|
93 | -1, |
---|
94 | 0); |
---|
95 | if (start == MAP_FAILED) { |
---|
96 | return NULL; |
---|
97 | } |
---|
98 | |
---|
99 | if (start != want) { |
---|
100 | munmap(start, totalsize+heap_segment_size); |
---|
101 | start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1)); |
---|
102 | if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) { |
---|
103 | return NULL; |
---|
104 | } |
---|
105 | } |
---|
106 | mprotect(start, totalsize, PROT_NONE); |
---|
107 | #endif |
---|
108 | #if DEBUG_MEMORY |
---|
109 | fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize); |
---|
110 | #endif |
---|
111 | return start; |
---|
112 | } |
---|
113 | |
---|
114 | int |
---|
115 | CommitMemory (LogicalAddress start, natural len) |
---|
116 | { |
---|
117 | #if DEBUG_MEMORY |
---|
118 | fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len); |
---|
119 | #endif |
---|
120 | #ifdef WINDOWS |
---|
121 | LogicalAddress rc; |
---|
122 | |
---|
123 | if ((start < ((LogicalAddress)nil_value)) && |
---|
124 | (((LogicalAddress)nil_value) < (start+len))) { |
---|
125 | /* nil area is in the executable on Windows; ensure range is |
---|
126 | read-write */ |
---|
127 | DWORD as_if_i_care; |
---|
128 | if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) { |
---|
129 | return false; |
---|
130 | } |
---|
131 | return true; |
---|
132 | } |
---|
133 | rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE); |
---|
134 | if (!rc) { |
---|
135 | wperror("CommitMemory VirtualAlloc"); |
---|
136 | return false; |
---|
137 | } |
---|
138 | return true; |
---|
139 | #else |
---|
140 | int i; |
---|
141 | void *addr; |
---|
142 | |
---|
143 | for (i = 0; i < 3; i++) { |
---|
144 | addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0); |
---|
145 | if (addr == start) { |
---|
146 | return true; |
---|
147 | } else { |
---|
148 | mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0); |
---|
149 | } |
---|
150 | } |
---|
151 | return false; |
---|
152 | #endif |
---|
153 | } |
---|
154 | |
---|
155 | void |
---|
156 | UnCommitMemory (LogicalAddress start, natural len) { |
---|
157 | #if DEBUG_MEMORY |
---|
158 | fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len); |
---|
159 | #endif |
---|
160 | #ifdef WINDOWS |
---|
161 | int rc = VirtualFree(start, len, MEM_DECOMMIT); |
---|
162 | if (!rc) { |
---|
163 | wperror("UnCommitMemory VirtualFree"); |
---|
164 | Fatal("mmap error", ""); |
---|
165 | return; |
---|
166 | } |
---|
167 | #else |
---|
168 | if (len) { |
---|
169 | madvise(start, len, MADV_DONTNEED); |
---|
170 | if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0) |
---|
171 | != start) { |
---|
172 | int err = errno; |
---|
173 | Fatal("mmap error", ""); |
---|
174 | fprintf(dbgout, "errno = %d", err); |
---|
175 | } |
---|
176 | } |
---|
177 | #endif |
---|
178 | } |
---|
179 | |
---|
180 | |
---|
181 | LogicalAddress |
---|
182 | MapMemory(LogicalAddress addr, natural nbytes, int protection) |
---|
183 | { |
---|
184 | LogicalAddress p; |
---|
185 | #if DEBUG_MEMORY |
---|
186 | fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes); |
---|
187 | #endif |
---|
188 | #ifdef WINDOWS |
---|
189 | p = VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX); |
---|
190 | if (p == NULL) { |
---|
191 | wperror("MapMemory"); |
---|
192 | } |
---|
193 | return p; |
---|
194 | #else |
---|
195 | { |
---|
196 | int flags = MAP_PRIVATE|MAP_ANON; |
---|
197 | |
---|
198 | if (addr > 0) flags |= MAP_FIXED; |
---|
199 | return mmap(addr, nbytes, protection, flags, -1, 0); |
---|
200 | } |
---|
201 | #endif |
---|
202 | } |
---|
203 | |
---|
204 | LogicalAddress |
---|
205 | MapMemoryForStack(natural nbytes) |
---|
206 | { |
---|
207 | #if DEBUG_MEMORY |
---|
208 | fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes); |
---|
209 | #endif |
---|
210 | #ifdef WINDOWS |
---|
211 | return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX); |
---|
212 | #else |
---|
213 | return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0); |
---|
214 | #endif |
---|
215 | } |
---|
216 | |
---|
217 | |
---|
218 | /* Cause the mapped memory region at ADDR to become completely unmapped. |
---|
219 | ADDR should be an address returned by MapMemoryForStack() or MapMemory(), |
---|
220 | and NBYTES should be the size of the mapped region at that address. */ |
---|
221 | int |
---|
222 | UnMapMemory(LogicalAddress addr, natural nbytes) |
---|
223 | { |
---|
224 | #if DEBUG_MEMORY |
---|
225 | fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes); |
---|
226 | #endif |
---|
227 | #ifdef WINDOWS |
---|
228 | return !VirtualFree(addr, 0, MEM_RELEASE); |
---|
229 | #else |
---|
230 | return munmap(addr, nbytes); |
---|
231 | #endif |
---|
232 | } |
---|
233 | |
---|
234 | int |
---|
235 | ProtectMemory(LogicalAddress addr, natural nbytes) |
---|
236 | { |
---|
237 | #if DEBUG_MEMORY |
---|
238 | fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes); |
---|
239 | #endif |
---|
240 | #ifdef WINDOWS |
---|
241 | DWORD oldProtect; |
---|
242 | BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect); |
---|
243 | |
---|
244 | if(!status) { |
---|
245 | wperror("ProtectMemory VirtualProtect"); |
---|
246 | Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status); |
---|
247 | } |
---|
248 | return status; |
---|
249 | #else |
---|
250 | int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC); |
---|
251 | |
---|
252 | if (status) { |
---|
253 | status = errno; |
---|
254 | |
---|
255 | if (status == ENOMEM) { |
---|
256 | void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0); |
---|
257 | if (mapaddr != MAP_FAILED) { |
---|
258 | return 0; |
---|
259 | } |
---|
260 | } |
---|
261 | Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status); |
---|
262 | } |
---|
263 | return status; |
---|
264 | #endif |
---|
265 | } |
---|
266 | |
---|
267 | int |
---|
268 | UnProtectMemory(LogicalAddress addr, natural nbytes) |
---|
269 | { |
---|
270 | #if DEBUG_MEMORY |
---|
271 | fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes); |
---|
272 | #endif |
---|
273 | #ifdef WINDOWS |
---|
274 | DWORD oldProtect; |
---|
275 | return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect); |
---|
276 | #else |
---|
277 | return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC); |
---|
278 | #endif |
---|
279 | } |
---|
280 | |
---|
281 | int |
---|
282 | MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) |
---|
283 | { |
---|
284 | #ifdef WINDOWS |
---|
285 | #if 0 |
---|
286 | /* Lots of hair in here: mostly alignment issues, but also address space reservation */ |
---|
287 | HANDLE hFile, hFileMapping; |
---|
288 | LPVOID rc; |
---|
289 | DWORD desiredAccess; |
---|
290 | |
---|
291 | if (permissions == MEMPROTECT_RWX) { |
---|
292 | permissions |= PAGE_WRITECOPY; |
---|
293 | desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE; |
---|
294 | } else { |
---|
295 | desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE; |
---|
296 | } |
---|
297 | |
---|
298 | hFile = _get_osfhandle(fd); |
---|
299 | hFileMapping = CreateFileMapping(hFile, NULL, permissions, |
---|
300 | (nbytes >> 32), (nbytes & 0xffffffff), NULL); |
---|
301 | |
---|
302 | if (!hFileMapping) { |
---|
303 | wperror("CreateFileMapping"); |
---|
304 | return false; |
---|
305 | } |
---|
306 | |
---|
307 | rc = MapViewOfFileEx(hFileMapping, |
---|
308 | desiredAccess, |
---|
309 | (pos >> 32), |
---|
310 | (pos & 0xffffffff), |
---|
311 | nbytes, |
---|
312 | addr); |
---|
313 | #else |
---|
314 | size_t count, total = 0; |
---|
315 | size_t opos; |
---|
316 | |
---|
317 | opos = LSEEK(fd, 0, SEEK_CUR); |
---|
318 | CommitMemory(addr, nbytes); |
---|
319 | LSEEK(fd, pos, SEEK_SET); |
---|
320 | |
---|
321 | while (total < nbytes) { |
---|
322 | count = read(fd, addr + total, nbytes - total); |
---|
323 | total += count; |
---|
324 | // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes); |
---|
325 | if (!(count > 0)) |
---|
326 | return false; |
---|
327 | } |
---|
328 | |
---|
329 | LSEEK(fd, opos, SEEK_SET); |
---|
330 | |
---|
331 | return true; |
---|
332 | #endif |
---|
333 | #else |
---|
334 | return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED; |
---|
335 | #endif |
---|
336 | } |
---|
337 | |
---|
338 | void |
---|
339 | unprotect_area(protected_area_ptr p) |
---|
340 | { |
---|
341 | BytePtr start = p->start; |
---|
342 | natural nprot = p->nprot; |
---|
343 | |
---|
344 | if (nprot) { |
---|
345 | UnProtectMemory(start, nprot); |
---|
346 | p->nprot = 0; |
---|
347 | } |
---|
348 | } |
---|
349 | |
---|
350 | protected_area_ptr |
---|
351 | new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now) |
---|
352 | { |
---|
353 | protected_area_ptr p = malloc(sizeof(protected_area)); |
---|
354 | |
---|
355 | if (p == NULL) return NULL; |
---|
356 | p->protsize = protsize; |
---|
357 | p->nprot = 0; |
---|
358 | p->start = start; |
---|
359 | p->end = end; |
---|
360 | p->why = reason; |
---|
361 | p->next = AllProtectedAreas; |
---|
362 | |
---|
363 | AllProtectedAreas = p; |
---|
364 | if (now) { |
---|
365 | protect_area(p); |
---|
366 | } |
---|
367 | |
---|
368 | return p; |
---|
369 | } |
---|
370 | |
---|
371 | |
---|
372 | |
---|
373 | |
---|
374 | protected_area_ptr |
---|
375 | AllProtectedAreas = NULL; |
---|
376 | |
---|
377 | |
---|
378 | /* |
---|
379 | This does a linear search. Areas aren't created all that often; |
---|
380 | if there get to be very many of them, some sort of tree search |
---|
381 | might be justified. |
---|
382 | */ |
---|
383 | |
---|
384 | protected_area_ptr |
---|
385 | find_protected_area(BytePtr addr) |
---|
386 | { |
---|
387 | protected_area* p; |
---|
388 | |
---|
389 | for(p = AllProtectedAreas; p; p=p->next) { |
---|
390 | if ((p->start <= addr) && (p->end > addr)) { |
---|
391 | return p; |
---|
392 | } |
---|
393 | } |
---|
394 | return NULL; |
---|
395 | } |
---|
396 | |
---|
397 | |
---|
398 | void |
---|
399 | zero_memory_range(BytePtr start, BytePtr end) |
---|
400 | { |
---|
401 | #ifdef WINDOWS |
---|
402 | ZeroMemory(start,end-start); |
---|
403 | #else |
---|
404 | bzero(start,(size_t)(end-start)); |
---|
405 | #endif |
---|
406 | } |
---|
407 | |
---|
408 | void |
---|
409 | zero_refbits(bitvector refidx, bitvector refbits, natural ndnodes) |
---|
410 | { |
---|
411 | bitvector refbase = refbits, refword, limit = refbase + ((ndnodes + (WORD_SIZE-1)) >> node_shift), reflimit; |
---|
412 | natural i, n = (((ndnodes + 255) >> 8) + (WORD_SIZE-1)) >> bitmap_shift, bit, idx; |
---|
413 | |
---|
414 | for (i = 0; i < n; i++, refbase += WORD_SIZE * (256 / WORD_SIZE)) { |
---|
415 | idx = *refidx; |
---|
416 | |
---|
417 | if (idx != 0) { |
---|
418 | *refidx = 0; |
---|
419 | while (idx) { |
---|
420 | bit = count_leading_zeros(idx); |
---|
421 | idx &= ~(BIT0_MASK>>bit); |
---|
422 | refword = refbase + bit * (256/WORD_SIZE); |
---|
423 | reflimit = refword + (256/WORD_SIZE); |
---|
424 | if (limit < reflimit) { |
---|
425 | reflimit = limit; |
---|
426 | } |
---|
427 | while (refword < reflimit) { |
---|
428 | *refword++ = 0; |
---|
429 | } |
---|
430 | } |
---|
431 | } |
---|
432 | refidx++; |
---|
433 | } |
---|
434 | #if 0 |
---|
435 | /* Check,slowly */ |
---|
436 | for (i=0;i<ndnodes;i++) { |
---|
437 | if (ref_bit(refbits,i)) { |
---|
438 | Bug(NULL, "Bit 0x" LISP " set unexpectedly\n", i); |
---|
439 | } |
---|
440 | } |
---|
441 | #endif |
---|
442 | } |
---|
443 | |
---|
444 | |
---|
445 | |
---|
446 | |
---|
447 | /* |
---|
448 | Grow or shrink the dynamic area. Or maybe not. |
---|
449 | Whether or not the end of (mapped space in) the heap changes, |
---|
450 | ensure that everything between the freeptr and the heap end |
---|
451 | is mapped and read/write. (It'll incidentally be zeroed.) |
---|
452 | */ |
---|
453 | Boolean |
---|
454 | resize_dynamic_heap(BytePtr newfree, |
---|
455 | natural free_space_size) |
---|
456 | { |
---|
457 | area *a = active_dynamic_area; |
---|
458 | BytePtr newlimit; |
---|
459 | |
---|
460 | if (free_space_size) { |
---|
461 | BytePtr lowptr = a->active; |
---|
462 | newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size, |
---|
463 | log2_heap_segment_size); |
---|
464 | if (newlimit > a->high) { |
---|
465 | return grow_dynamic_area(newlimit-a->high); |
---|
466 | } else if ((lowptr + free_space_size) < a->high) { |
---|
467 | shrink_dynamic_area(a->high-newlimit); |
---|
468 | return true; |
---|
469 | } |
---|
470 | } |
---|
471 | return false; |
---|
472 | } |
---|
473 | |
---|
474 | void |
---|
475 | protect_area(protected_area_ptr p) |
---|
476 | { |
---|
477 | BytePtr start = p->start; |
---|
478 | natural n = p->protsize; |
---|
479 | |
---|
480 | if (n && ! p->nprot) { |
---|
481 | ProtectMemory(start, n); |
---|
482 | #ifdef WINDOWS |
---|
483 | VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD); |
---|
484 | #endif |
---|
485 | p->nprot = n; |
---|
486 | } |
---|
487 | } |
---|
488 | |
---|
489 | |
---|
490 | void |
---|
491 | zero_page(BytePtr start) |
---|
492 | { |
---|
493 | extern int page_size; |
---|
494 | #ifdef PPC |
---|
495 | extern void zero_cache_lines(BytePtr, size_t, size_t); |
---|
496 | zero_cache_lines(start, (page_size/cache_block_size), cache_block_size); |
---|
497 | #else |
---|
498 | memset(start, 0, page_size); |
---|
499 | #endif |
---|
500 | } |
---|
501 | |
---|
502 | /* area management */ |
---|
503 | |
---|
504 | |
---|
505 | area * |
---|
506 | new_area(BytePtr lowaddr, BytePtr highaddr, area_code code) |
---|
507 | { |
---|
508 | area *a = calloc(1, sizeof(area)); |
---|
509 | if (a) { |
---|
510 | natural ndnodes = area_dnode(highaddr, lowaddr); |
---|
511 | a->low = lowaddr; |
---|
512 | a->high = highaddr; |
---|
513 | a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr; |
---|
514 | a->code = code; |
---|
515 | a->ndnodes = ndnodes; |
---|
516 | /* Caller must allocate markbits when allocating heap ! */ |
---|
517 | |
---|
518 | } |
---|
519 | return a; |
---|
520 | } |
---|
521 | |
---|
522 | static area * |
---|
523 | add_area_before(area *new_area, area *before) |
---|
524 | { |
---|
525 | area *before_before = before->pred; |
---|
526 | |
---|
527 | new_area->pred = before_before; |
---|
528 | new_area->succ = before; |
---|
529 | before_before->succ = new_area; |
---|
530 | before->pred = new_area; |
---|
531 | return new_area; |
---|
532 | } |
---|
533 | |
---|
534 | /* |
---|
535 | The active dynamic area comes first. |
---|
536 | Static areas follow dynamic areas. |
---|
537 | Stack areas follow static areas. |
---|
538 | Readonly areas come last. |
---|
539 | */ |
---|
540 | |
---|
541 | /* |
---|
542 | If we already own the area_lock (or during iniitalization), it's safe |
---|
543 | to add an area. |
---|
544 | */ |
---|
545 | |
---|
546 | |
---|
547 | void |
---|
548 | add_area_holding_area_lock(area *new_area) |
---|
549 | { |
---|
550 | area *that = all_areas; |
---|
551 | int |
---|
552 | thiscode = (int)(new_area->code), |
---|
553 | thatcode; |
---|
554 | |
---|
555 | /* Cdr down the linked list */ |
---|
556 | do { |
---|
557 | that = that->succ; |
---|
558 | thatcode = (int)(that->code); |
---|
559 | } while (thiscode < thatcode); |
---|
560 | add_area_before(new_area, that); |
---|
561 | } |
---|
562 | |
---|
563 | /* |
---|
564 | In general, we need to own the area lock before adding an area. |
---|
565 | */ |
---|
566 | void |
---|
567 | add_area(area *new_area, TCR *tcr) |
---|
568 | { |
---|
569 | LOCK(lisp_global(TCR_AREA_LOCK),tcr); |
---|
570 | add_area_holding_area_lock(new_area); |
---|
571 | LOCK(lisp_global(TCR_AREA_LOCK),tcr); |
---|
572 | } |
---|
573 | |
---|
574 | /* |
---|
575 | Search areas "forward" from the header's successor, until |
---|
576 | an area containing ADDR is found or an area with code < MINCODE |
---|
577 | is encountered. |
---|
578 | This walks the area list visiting heaps (dynamic, then static) |
---|
579 | first, then stacks. |
---|
580 | |
---|
581 | */ |
---|
582 | static area * |
---|
583 | find_area_forward(BytePtr addr, area_code mincode) |
---|
584 | { |
---|
585 | area *p, *header = all_areas; |
---|
586 | |
---|
587 | for (p = header->succ; p != header; p = p->succ) { |
---|
588 | area_code pcode = p->code; |
---|
589 | if (pcode < mincode) { |
---|
590 | return NULL; |
---|
591 | } |
---|
592 | if (pcode >= AREA_READONLY) { |
---|
593 | if ((addr >= p->low) && |
---|
594 | (addr < p->active)) { |
---|
595 | return p; |
---|
596 | } |
---|
597 | } else { |
---|
598 | if ((addr >= p->active) && |
---|
599 | (addr < p->high)) { |
---|
600 | return p; |
---|
601 | } |
---|
602 | } |
---|
603 | } |
---|
604 | return NULL; |
---|
605 | } |
---|
606 | |
---|
607 | static area * |
---|
608 | find_area_backward(BytePtr addr, area_code maxcode) |
---|
609 | { |
---|
610 | area *p, *header = all_areas; |
---|
611 | |
---|
612 | for (p = header->pred; p != header; p = p->pred) { |
---|
613 | area_code pcode = p->code; |
---|
614 | |
---|
615 | if (pcode > maxcode) { |
---|
616 | return NULL; |
---|
617 | } |
---|
618 | if (pcode >= AREA_READONLY) { |
---|
619 | if ((addr >= p->low) && |
---|
620 | (addr < p->active)) { |
---|
621 | return p; |
---|
622 | } |
---|
623 | } else { |
---|
624 | if ((addr >= p->active) && |
---|
625 | (addr < p->high)) { |
---|
626 | return p; |
---|
627 | } |
---|
628 | } |
---|
629 | } |
---|
630 | return NULL; |
---|
631 | } |
---|
632 | |
---|
633 | area * |
---|
634 | area_containing(BytePtr addr) |
---|
635 | { |
---|
636 | return find_area_forward(addr, AREA_VOID); |
---|
637 | } |
---|
638 | |
---|
639 | area * |
---|
640 | heap_area_containing(BytePtr addr) |
---|
641 | { |
---|
642 | return find_area_forward(addr, AREA_READONLY); |
---|
643 | } |
---|
644 | |
---|
645 | area * |
---|
646 | stack_area_containing(BytePtr addr) |
---|
647 | { |
---|
648 | return find_area_backward(addr, AREA_TSTACK); |
---|
649 | } |
---|
650 | |
---|
651 | /* |
---|
652 | Make everything "younger" than the start of the target area |
---|
653 | belong to that area; all younger areas will become empty, and |
---|
654 | the dynamic area will have to lose some of its markbits (they |
---|
655 | get zeroed and become part of the tenured area's refbits.) |
---|
656 | |
---|
657 | The active dynamic area must have been "normalized" (e.g., its |
---|
658 | active pointer must match the free pointer) before this is called. |
---|
659 | |
---|
660 | If the target area is 'tenured_area' (the oldest ephemeral generation), |
---|
661 | zero its refbits and update YOUNGEST_EPHEMERAL. |
---|
662 | |
---|
663 | */ |
---|
664 | |
---|
665 | void |
---|
666 | tenure_to_area(area *target) |
---|
667 | { |
---|
668 | area *a = active_dynamic_area, *child; |
---|
669 | BytePtr |
---|
670 | curfree = a->active, |
---|
671 | target_low = target->low; |
---|
672 | natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low); |
---|
673 | bitvector |
---|
674 | refbits = tenured_area->refbits, |
---|
675 | markbits = a->markbits, |
---|
676 | new_markbits; |
---|
677 | |
---|
678 | target->high = target->active = curfree; |
---|
679 | target->ndnodes = area_dnode(curfree, target_low); |
---|
680 | |
---|
681 | for (child = target->younger; child != a; child = child->younger) { |
---|
682 | child->high = child->low = child->active = curfree; |
---|
683 | child->ndnodes = 0; |
---|
684 | } |
---|
685 | |
---|
686 | a->low = curfree; |
---|
687 | a->ndnodes = area_dnode(a->high, curfree); |
---|
688 | |
---|
689 | new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift); |
---|
690 | |
---|
691 | if (target == tenured_area) { |
---|
692 | zero_refbits(global_refidx,managed_static_area->refbits, managed_static_area->ndnodes); |
---|
693 | zero_bits(refbits, new_tenured_dnodes); |
---|
694 | zero_bits(dynamic_refidx,(new_tenured_dnodes+255)>>8); |
---|
695 | lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree); |
---|
696 | } else { |
---|
697 | /* Need more (zeroed) refbits & fewer markbits */ |
---|
698 | zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift)); |
---|
699 | } |
---|
700 | |
---|
701 | a->markbits = new_markbits; |
---|
702 | lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE)); |
---|
703 | } |
---|
704 | |
---|
705 | |
---|
706 | |
---|
707 | /* |
---|
708 | Make everything younger than the oldest byte in 'from' belong to |
---|
709 | the youngest generation. If 'from' is 'tenured_area', this means |
---|
710 | that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set |
---|
711 | to 0 to indicate this.) |
---|
712 | |
---|
713 | Some tenured_area refbits become dynamic area markbits in the process; |
---|
714 | it's not necessary to zero them, since the GC will do that. |
---|
715 | */ |
---|
716 | |
---|
717 | void |
---|
718 | untenure_from_area(area *from) |
---|
719 | { |
---|
720 | if (lisp_global(OLDEST_EPHEMERAL) != 0) { |
---|
721 | area *a = active_dynamic_area, *child; |
---|
722 | BytePtr curlow = from->low; |
---|
723 | natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low); |
---|
724 | |
---|
725 | for (child = from; child != a; child = child->younger) { |
---|
726 | child->low = child->active = child->high = curlow; |
---|
727 | child->ndnodes = 0; |
---|
728 | } |
---|
729 | |
---|
730 | a->low = curlow; |
---|
731 | a->ndnodes = area_dnode(a->high, curlow); |
---|
732 | |
---|
733 | a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift); |
---|
734 | if (from == tenured_area) { |
---|
735 | /* Everything's in the dynamic area */ |
---|
736 | lisp_global(OLDEST_EPHEMERAL) = 0; |
---|
737 | lisp_global(OLDSPACE_DNODE_COUNT) = 0; |
---|
738 | |
---|
739 | } |
---|
740 | } |
---|
741 | } |
---|
742 | |
---|
743 | |
---|
744 | Boolean |
---|
745 | egc_control(Boolean activate, BytePtr curfree) |
---|
746 | { |
---|
747 | area *a = active_dynamic_area; |
---|
748 | Boolean egc_is_active = (a->older != NULL); |
---|
749 | |
---|
750 | if (activate != egc_is_active) { |
---|
751 | if (curfree != NULL) { |
---|
752 | a->active = curfree; |
---|
753 | } |
---|
754 | if (activate) { |
---|
755 | a->older = g1_area; |
---|
756 | tenure_to_area(tenured_area); |
---|
757 | egc_is_active = true; |
---|
758 | } else { |
---|
759 | untenure_from_area(tenured_area); |
---|
760 | a->older = NULL; |
---|
761 | egc_is_active = false; |
---|
762 | } |
---|
763 | } |
---|
764 | return egc_is_active; |
---|
765 | } |
---|
766 | |
---|
767 | /* |
---|
768 | Lisp ff-calls this; it needs to set the active area's active pointer |
---|
769 | correctly. |
---|
770 | */ |
---|
771 | |
---|
772 | Boolean |
---|
773 | lisp_egc_control(Boolean activate) |
---|
774 | { |
---|
775 | area *a = active_dynamic_area; |
---|
776 | return egc_control(activate, (BytePtr) a->active); |
---|
777 | } |
---|
778 | |
---|
779 | |
---|
780 | |
---|
781 | |
---|
782 | /* Splice the protected_area_ptr out of the list and dispose of it. */ |
---|
783 | void |
---|
784 | delete_protected_area(protected_area_ptr p) |
---|
785 | { |
---|
786 | BytePtr start = p->start; |
---|
787 | int nbytes = p->nprot; |
---|
788 | protected_area_ptr *prev = &AllProtectedAreas, q; |
---|
789 | |
---|
790 | if (nbytes) { |
---|
791 | UnProtectMemory((LogicalAddress)start, nbytes); |
---|
792 | } |
---|
793 | |
---|
794 | while ((q = *prev) != NULL) { |
---|
795 | if (p == q) { |
---|
796 | *prev = p->next; |
---|
797 | break; |
---|
798 | } else { |
---|
799 | prev = &(q->next); |
---|
800 | } |
---|
801 | } |
---|
802 | |
---|
803 | free(p); |
---|
804 | } |
---|
805 | |
---|
806 | |
---|
807 | |
---|
808 | |
---|
809 | /* |
---|
810 | Unlink the area from all_areas. |
---|
811 | Unprotect and dispose of any hard/soft protected_areas. |
---|
812 | If the area has a handle, dispose of that as well. |
---|
813 | */ |
---|
814 | |
---|
815 | void |
---|
816 | condemn_area_holding_area_lock(area *a) |
---|
817 | { |
---|
818 | void free_stack(void *); |
---|
819 | area *prev = a->pred, *next = a->succ; |
---|
820 | Ptr h = a->h; |
---|
821 | protected_area_ptr p; |
---|
822 | |
---|
823 | prev->succ = next; |
---|
824 | next->pred = prev; |
---|
825 | |
---|
826 | p = a->softprot; |
---|
827 | if (p) delete_protected_area(p); |
---|
828 | |
---|
829 | p = a->hardprot; |
---|
830 | |
---|
831 | if (p) delete_protected_area(p); |
---|
832 | |
---|
833 | if (h) free_stack(h); |
---|
834 | free(a); |
---|
835 | } |
---|
836 | |
---|
837 | |
---|
838 | |
---|
839 | void |
---|
840 | condemn_area(area *a, TCR *tcr) |
---|
841 | { |
---|
842 | LOCK(lisp_global(TCR_AREA_LOCK),tcr); |
---|
843 | condemn_area_holding_area_lock(a); |
---|
844 | LOCK(lisp_global(TCR_AREA_LOCK),tcr); |
---|
845 | } |
---|
846 | |
---|
847 | |
---|
848 | |
---|
849 | |
---|
850 | /* |
---|
851 | condemn an area and all the other areas that can be reached |
---|
852 | via the area.older & area.younger links. |
---|
853 | This is the function in the ppc::kernel-import-condemn-area slot, |
---|
854 | called by free-stack-area |
---|
855 | */ |
---|
856 | void |
---|
857 | condemn_area_chain(area *a, TCR *tcr) |
---|
858 | { |
---|
859 | area *older; |
---|
860 | |
---|
861 | LOCK(lisp_global(TCR_AREA_LOCK),tcr); |
---|
862 | |
---|
863 | for (; a->younger; a = a->younger) ; |
---|
864 | for (;a;) { |
---|
865 | older = a->older; |
---|
866 | condemn_area_holding_area_lock(a); |
---|
867 | a = older; |
---|
868 | } |
---|
869 | UNLOCK(lisp_global(TCR_AREA_LOCK),tcr); |
---|
870 | } |
---|
871 | |
---|
872 | |
---|
873 | void |
---|
874 | protect_watched_areas() |
---|
875 | { |
---|
876 | area *a = active_dynamic_area; |
---|
877 | natural code = a->code; |
---|
878 | |
---|
879 | while (code != AREA_VOID) { |
---|
880 | if (code == AREA_WATCHED) { |
---|
881 | natural size = a->high - a->low; |
---|
882 | |
---|
883 | ProtectMemory(a->low, size); |
---|
884 | } |
---|
885 | a = a->succ; |
---|
886 | code = a->code; |
---|
887 | } |
---|
888 | } |
---|
889 | |
---|
890 | void |
---|
891 | unprotect_watched_areas() |
---|
892 | { |
---|
893 | area *a = active_dynamic_area; |
---|
894 | natural code = a->code; |
---|
895 | |
---|
896 | while (code != AREA_VOID) { |
---|
897 | if (code == AREA_WATCHED) { |
---|
898 | natural size = a->high - a->low; |
---|
899 | |
---|
900 | UnProtectMemory(a->low, size); |
---|
901 | } |
---|
902 | a = a->succ; |
---|
903 | code = a->code; |
---|
904 | } |
---|
905 | } |
---|
906 | |
---|
907 | LogicalAddress |
---|
908 | ReserveMemory(natural size) |
---|
909 | { |
---|
910 | LogicalAddress p; |
---|
911 | #ifdef WINDOWS |
---|
912 | p = VirtualAlloc(0, |
---|
913 | size, |
---|
914 | MEM_RESERVE, |
---|
915 | PAGE_NOACCESS); |
---|
916 | return p; |
---|
917 | #else |
---|
918 | p = mmap(NULL,size,PROT_NONE,MAP_PRIVATE|MAP_ANON|MAP_NORESERVE,-1,0); |
---|
919 | if (p == MAP_FAILED) { |
---|
920 | return NULL; |
---|
921 | } |
---|
922 | return p; |
---|
923 | #endif |
---|
924 | } |
---|
925 | |
---|
926 | |
---|
927 | |
---|
928 | |
---|
929 | |
---|
930 | #ifdef DARWIN |
---|
931 | /* |
---|
932 | On 64-bit Darwin, we try to make a TCR's address serve as a Mach port |
---|
933 | name, which means that it has to fit in 32 bits (and not conflict with |
---|
934 | an existing port name, but that's a separate issue.) Darwin doesn't |
---|
935 | seem to offer means of mapping/allocating memory that's guaranteed to |
---|
936 | return a 32-bit address on 64-bit systems, and trial-and-error doesn't |
---|
937 | scale well. |
---|
938 | |
---|
939 | Since it's a PITA to allocate 32-bit TCR pointers, we never free them |
---|
940 | once we've done so. (We maintain a queue of "freed" TCRs but never |
---|
941 | unmap the memory.) When we need to allocate TCR pointers, we try to |
---|
942 | allocate substantially more than we need. |
---|
943 | |
---|
944 | The bulk allocation works by scanning the task's mapped memory |
---|
945 | regions until a free region of appropriate size is found, then |
---|
946 | mapping that region (without the dangerous use of MAP_FIXED). This |
---|
947 | will win if OSX's mmap() tries to honor the suggested address if it |
---|
948 | doesn't conflict with a mapped region (as it seems to in practice |
---|
949 | since at least 10.5 and as it's documented to in 10.6.) |
---|
950 | |
---|
951 | OSX 10.8 introduces new horrors that affect 32-bit CCL as well: |
---|
952 | |
---|
953 | mach_port_allocate_name(mach_task_self(),MACH_PORT_RIGHT_RECEIVE,n) |
---|
954 | |
---|
955 | returns KERN_NO_SPACE for n > ~#x09800000. It's not known whether or |
---|
956 | not this is intentional; even if it's a bug, it suggests that we should |
---|
957 | probably stop trying to arrange that a TCR's address can be used as the |
---|
958 | corresponding thread's exception port and maintain some sort of |
---|
959 | efficient and thread-safe mapping from port to TCR. Soon. |
---|
960 | |
---|
961 | News flash: mach_port_allocate_name() is not only worse than we |
---|
962 | imagine on 10.8, but it's worse than we can imagine. Give up. |
---|
963 | (This means that there are no longer any constraints on TCR addresses |
---|
964 | and we could just use malloc here, but keep some of this code around |
---|
965 | for now.) |
---|
966 | */ |
---|
967 | |
---|
968 | pthread_mutex_t darwin_tcr_lock = PTHREAD_MUTEX_INITIALIZER; |
---|
969 | |
---|
970 | TCR _free_tcr_queue, *darwin_tcr_freelist=&_free_tcr_queue; |
---|
971 | |
---|
972 | #define TCR_CLUSTER_COUNT 1024 /* Enough that we allocate clusters rarely, |
---|
973 | but not so much that we waste lots of 32-bit memory. */ |
---|
974 | |
---|
975 | |
---|
976 | |
---|
977 | |
---|
978 | /* force 16-bit alignment, just in case */ |
---|
979 | typedef struct { |
---|
980 | TCR tcr; |
---|
981 | } __attribute__ ((aligned(16))) MTCR; |
---|
982 | |
---|
983 | |
---|
984 | |
---|
985 | void |
---|
986 | link_tcr_list(TCR *head, MTCR *buf, int n) |
---|
987 | { |
---|
988 | TCR *prev = head, *tcr; |
---|
989 | int i; |
---|
990 | |
---|
991 | for (i=0; i < n; i++, buf++) { |
---|
992 | tcr = &(buf->tcr); |
---|
993 | prev->next = tcr; |
---|
994 | tcr->prev = prev; |
---|
995 | head->prev = tcr; |
---|
996 | tcr->next = head; |
---|
997 | prev = tcr; |
---|
998 | } |
---|
999 | } |
---|
1000 | |
---|
1001 | |
---|
1002 | |
---|
1003 | |
---|
1004 | void |
---|
1005 | map_tcr_cluster(TCR *head) |
---|
1006 | { |
---|
1007 | MTCR *work = NULL; |
---|
1008 | TCR *prev = head; |
---|
1009 | int i; |
---|
1010 | size_t request_size = align_to_power_of_2((TCR_CLUSTER_COUNT*sizeof(MTCR)),log2_page_size); |
---|
1011 | |
---|
1012 | work = (MTCR *)mmap(NULL, |
---|
1013 | request_size, |
---|
1014 | PROT_READ|PROT_WRITE, |
---|
1015 | MAP_PRIVATE|MAP_ANON, |
---|
1016 | -1, |
---|
1017 | 0); |
---|
1018 | |
---|
1019 | if (work == MAP_FAILED) { |
---|
1020 | Fatal("Can't allocate memory for thread-local storage.", ""); |
---|
1021 | } |
---|
1022 | link_tcr_list(head, work, TCR_CLUSTER_COUNT); |
---|
1023 | } |
---|
1024 | |
---|
1025 | void |
---|
1026 | darwin_free_tcr(TCR *tcr) |
---|
1027 | { |
---|
1028 | TCR *head = darwin_tcr_freelist, *tail; |
---|
1029 | |
---|
1030 | pthread_mutex_lock(&darwin_tcr_lock); |
---|
1031 | tail = head->prev; |
---|
1032 | tail->next = tcr; |
---|
1033 | head->prev = tcr; |
---|
1034 | tcr->prev = tail; |
---|
1035 | tcr->next = head; |
---|
1036 | pthread_mutex_unlock(&darwin_tcr_lock); |
---|
1037 | } |
---|
1038 | |
---|
1039 | |
---|
1040 | TCR * |
---|
1041 | darwin_allocate_tcr() |
---|
1042 | { |
---|
1043 | TCR *head = darwin_tcr_freelist, *tail, *tcr; |
---|
1044 | pthread_mutex_lock(&darwin_tcr_lock); |
---|
1045 | if (head->next == NULL) { /* First time */ |
---|
1046 | head->next = head->prev = head; |
---|
1047 | } |
---|
1048 | |
---|
1049 | if (head->next == head) { |
---|
1050 | map_tcr_cluster(head); |
---|
1051 | } |
---|
1052 | tcr = head->next; |
---|
1053 | tail = tcr->next; |
---|
1054 | tail->prev = head; |
---|
1055 | head->next = tail; |
---|
1056 | pthread_mutex_unlock(&darwin_tcr_lock); |
---|
1057 | memset(tcr,0,sizeof(TCR)); |
---|
1058 | return tcr; |
---|
1059 | } |
---|
1060 | |
---|
1061 | |
---|
1062 | |
---|
1063 | |
---|
1064 | #endif |
---|