source: branches/arm/lisp-kernel/memory.c @ 14066

Last change on this file since 14066 was 14066, checked in by gb, 10 years ago

Lots of changes to support stack-overflow detection on ARM Linux.
(Write protect the control stack, handle SIGSEGV on an alternate
signal stack ...) The sigaltstack mechanism doesn't work if the
specified signal stack is within the allocated control stack region
(we generally use the top few pages of the control stack on x86;
here, we map a few pages and need to remember to free them when the
thread dies.)
Also: need some recovery mechanism, so that after the thread unwinds
out of the "yellow zone" the yellow zone is re-protected.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.2 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "Threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36
37#ifndef WINDOWS
38#include <sys/mman.h>
39#endif
40
41#define DEBUG_MEMORY 0
42
43void
44allocation_failure(Boolean pointerp, natural size)
45{
46  char buf[64];
47  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
48  Fatal(":   Kernel memory allocation failure.  ", buf);
49}
50
51void
52fatal_oserr(StringPtr param, OSErr err)
53{
54  char buf[64];
55  sprintf(buf," - operating system error %d.", err);
56  Fatal(param, buf);
57}
58
59
60Ptr
61allocate(natural size)
62{
63  return (Ptr) malloc(size);
64}
65
66void
67deallocate(Ptr p)
68{
69  free((void *)p);
70}
71
72Ptr
73zalloc(natural size)
74{
75  Ptr p = allocate(size);
76  if (p != NULL) {
77    memset(p, 0, size);
78  }
79  return p;
80}
81
82#ifdef DARWIN
83#if WORD_SIZE == 64
84#define vm_region vm_region_64
85#endif
86
87/*
88  Check to see if the specified address is unmapped by trying to get
89  information about the mapped address at or beyond the target.  If
90  the difference between the target address and the next mapped address
91  is >= len, we can safely mmap len bytes at addr.
92*/
93Boolean
94address_unmapped_p(char *addr, natural len)
95{
96  vm_address_t vm_addr = (vm_address_t)addr;
97  vm_size_t vm_size;
98#if WORD_SIZE == 64
99  vm_region_basic_info_data_64_t vm_info;
100#else
101  vm_region_basic_info_data_t vm_info;
102#endif
103#if WORD_SIZE == 64
104  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
105#else
106  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
107#endif
108  mach_port_t vm_object_name = (mach_port_t) 0;
109  kern_return_t kret;
110
111  kret = vm_region(mach_task_self(),
112                   &vm_addr,
113                   &vm_size,
114#if WORD_SIZE == 64
115                   VM_REGION_BASIC_INFO_64,
116#else
117                   VM_REGION_BASIC_INFO,
118#endif
119                   (vm_region_info_t)&vm_info,
120                   &vm_info_size,
121                   &vm_object_name);
122  if (kret != KERN_SUCCESS) {
123    return false;
124  }
125
126  return vm_addr >= (vm_address_t)(addr+len);
127}
128#endif
129
130
131  /*
132    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
133    likely to reside near the beginning of an unmapped block of memory
134    that's at least 1GB in size.  We'd like to load the heap image's
135    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
136    that'd allow us to file-map those sections (and would enable us to
137    avoid having to relocate references in the data sections.)
138
139    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
140    by creating an anonymous mapping with mmap().
141
142    If we try to insist that mmap() map a 1GB block at
143    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
144    mmap() will gleefully clobber any mapped memory that's already
145    there.  (That region's empty at this writing, but some future
146    version of the OS might decide to put something there.)
147
148    If we don't specify MAP_FIXED, mmap() is free to treat the address
149    we give it as a hint; Linux seems to accept the hint if doing so
150    wouldn't cause a problem.  Naturally, that behavior's too useful
151    for Darwin (or perhaps too inconvenient for it): it'll often
152    return another address, even if the hint would have worked fine.
153
154    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
155    would conflict with anything.  Until we discover a need to do
156    otherwise, we'll assume that if Linux's mmap() fails to take the
157    hint, it's because of a legitimate conflict.
158
159    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
160    to implement an address_unmapped_p() for Linux.
161  */
162
163LogicalAddress
164ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
165{
166  LogicalAddress start;
167  Boolean fixed_map_ok = false;
168#ifdef DARWIN
169  fixed_map_ok = address_unmapped_p(want,totalsize);
170#endif
171#ifdef SOLARIS
172  fixed_map_ok = true;
173#endif
174  raise_limit();
175#ifdef WINDOWS
176  start = VirtualAlloc((void *)want,
177                       totalsize + heap_segment_size,
178                       MEM_RESERVE,
179                       PAGE_NOACCESS);
180  if (!start) {
181#if DEBUG_MEMORY   
182    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
183#endif
184    start = VirtualAlloc(0,
185                         totalsize + heap_segment_size,
186                         MEM_RESERVE,
187                         PAGE_NOACCESS);
188    if (!start) {
189      return NULL;
190    }
191  }
192#else
193  start = mmap((void *)want,
194               totalsize + heap_segment_size,
195               PROT_NONE,
196               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
197               -1,
198               0);
199  if (start == MAP_FAILED) {
200    return NULL;
201  }
202
203  if (start != want) {
204    munmap(start, totalsize+heap_segment_size);
205    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
206    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
207      return NULL;
208    }
209  }
210  mprotect(start, totalsize, PROT_NONE);
211#endif
212#if DEBUG_MEMORY
213  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
214#endif
215  return start;
216}
217
218int
219CommitMemory (LogicalAddress start, natural len) 
220{
221  LogicalAddress rc;
222#if DEBUG_MEMORY
223  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
224#endif
225#ifdef WINDOWS
226  if ((start < ((LogicalAddress)nil_value)) &&
227      (((LogicalAddress)nil_value) < (start+len))) {
228    /* nil area is in the executable on Windows; ensure range is
229       read-write */
230    DWORD as_if_i_care;
231    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
232      return false;
233    }
234    return true;
235  }
236  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
237  if (!rc) {
238    wperror("CommitMemory VirtualAlloc");
239    return false;
240  }
241  return true;
242#else
243  int i, err;
244  void *addr;
245
246  for (i = 0; i < 3; i++) {
247    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
248    if (addr == start) {
249      return true;
250    } else {
251      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
252    }
253  }
254  return false;
255#endif
256}
257
258void
259UnCommitMemory (LogicalAddress start, natural len) {
260#if DEBUG_MEMORY
261  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
262#endif
263#ifdef WINDOWS
264  int rc = VirtualFree(start, len, MEM_DECOMMIT);
265  if (!rc) {
266    wperror("UnCommitMemory VirtualFree");
267    Fatal("mmap error", "");
268    return;
269  }
270#else
271  if (len) {
272    madvise(start, len, MADV_DONTNEED);
273    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
274        != start) {
275      int err = errno;
276      Fatal("mmap error", "");
277      fprintf(dbgout, "errno = %d", err);
278    }
279  }
280#endif
281}
282
283
284LogicalAddress
285MapMemory(LogicalAddress addr, natural nbytes, int protection)
286{
287#if DEBUG_MEMORY
288  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
289#endif
290#ifdef WINDOWS
291  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
292#else
293  {
294    int flags = MAP_PRIVATE|MAP_ANON;
295
296    if (addr > 0) flags |= MAP_FIXED;
297    return mmap(addr, nbytes, protection, flags, -1, 0);
298  }
299#endif
300}
301
302LogicalAddress
303MapMemoryForStack(natural nbytes)
304{
305#if DEBUG_MEMORY
306  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
307#endif
308#ifdef WINDOWS
309  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
310#else
311  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
312#endif
313}
314
315
316/* Cause the mapped memory region at ADDR to become completely unmapped.
317   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
318   and NBYTES should be the size of the mapped region at that address. */
319int
320UnMapMemory(LogicalAddress addr, natural nbytes)
321{
322#if DEBUG_MEMORY
323  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
324#endif
325#ifdef WINDOWS
326  return !VirtualFree(addr, 0, MEM_RELEASE);
327#else
328  return munmap(addr, nbytes);
329#endif
330}
331
332int
333ProtectMemory(LogicalAddress addr, natural nbytes)
334{
335#if DEBUG_MEMORY
336  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
337#endif
338#ifdef WINDOWS
339  DWORD oldProtect;
340  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
341 
342  if(!status) {
343    wperror("ProtectMemory VirtualProtect");
344    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
345  }
346  return status;
347#else
348  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
349 
350  if (status) {
351    status = errno;
352   
353    if (status == ENOMEM) {
354      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
355      if (mapaddr != MAP_FAILED) {
356        return 0;
357      }
358    }
359    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
360  }
361  return status;
362#endif
363}
364
365int
366UnProtectMemory(LogicalAddress addr, natural nbytes)
367{
368#if DEBUG_MEMORY
369  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
370#endif
371#ifdef WINDOWS
372  DWORD oldProtect;
373  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
374#else
375  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
376#endif
377}
378
379int
380MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
381{
382#ifdef WINDOWS
383#if 0
384  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
385  HANDLE hFile, hFileMapping;
386  LPVOID rc;
387  DWORD desiredAccess;
388
389  if (permissions == MEMPROTECT_RWX) {
390    permissions |= PAGE_WRITECOPY;
391    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
392  } else {
393    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
394  }
395
396  hFile = _get_osfhandle(fd);
397  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
398                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
399 
400  if (!hFileMapping) {
401    wperror("CreateFileMapping");
402    return false;
403  }
404
405  rc = MapViewOfFileEx(hFileMapping,
406                       desiredAccess,
407                       (pos >> 32),
408                       (pos & 0xffffffff),
409                       nbytes,
410                       addr);
411#else
412  size_t count, total = 0;
413  size_t opos;
414
415  opos = LSEEK(fd, 0, SEEK_CUR);
416  CommitMemory(addr, nbytes);
417  LSEEK(fd, pos, SEEK_SET);
418
419  while (total < nbytes) {
420    count = read(fd, addr + total, nbytes - total);
421    total += count;
422    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
423    if (!(count > 0))
424      return false;
425  }
426
427  LSEEK(fd, opos, SEEK_SET);
428
429  return true;
430#endif
431#else
432  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
433#endif
434}
435
436void
437unprotect_area(protected_area_ptr p)
438{
439  BytePtr start = p->start;
440  natural nprot = p->nprot;
441 
442  if (nprot) {
443    UnProtectMemory(start, nprot);
444    p->nprot = 0;
445  }
446}
447
448protected_area_ptr
449new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
450{
451  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
452 
453  if (p == NULL) return NULL;
454  p->protsize = protsize;
455  p->nprot = 0;
456  p->start = start;
457  p->end = end;
458  p->why = reason;
459  p->next = AllProtectedAreas;
460
461  AllProtectedAreas = p;
462  if (now) {
463    protect_area(p);
464  }
465 
466  return p;
467}
468
469
470
471
472protected_area_ptr
473AllProtectedAreas = NULL;
474
475
476/*
477  This does a linear search.  Areas aren't created all that often;
478  if there get to be very many of them, some sort of tree search
479  might be justified.
480*/
481
482protected_area_ptr
483find_protected_area(BytePtr addr)
484{
485  protected_area* p;
486 
487  for(p = AllProtectedAreas; p; p=p->next) {
488    if ((p->start <= addr) && (p->end > addr)) {
489      return p;
490    }
491  }
492  return NULL;
493}
494
495
496void
497zero_memory_range(BytePtr start, BytePtr end)
498{
499#ifdef WINDOWS
500  ZeroMemory(start,end-start);
501#else
502  bzero(start,(size_t)(end-start));
503#endif
504}
505
506
507 
508
509/*
510   Grow or shrink the dynamic area.  Or maybe not.
511   Whether or not the end of (mapped space in) the heap changes,
512   ensure that everything between the freeptr and the heap end
513   is mapped and read/write.  (It'll incidentally be zeroed.)
514*/
515Boolean
516resize_dynamic_heap(BytePtr newfree, 
517                    natural free_space_size)
518{
519  extern int page_size;
520  area *a = active_dynamic_area;
521  BytePtr newlimit, protptr, zptr;
522  int psize = page_size;
523  if (free_space_size) {
524    BytePtr lowptr = a->active;
525    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
526                                            log2_heap_segment_size);
527    if (newlimit > a->high) {
528      return grow_dynamic_area(newlimit-a->high);
529    } else if ((lowptr + free_space_size) < a->high) {
530      shrink_dynamic_area(a->high-newlimit);
531      return true;
532    }
533  }
534}
535
536void
537protect_area(protected_area_ptr p)
538{
539  BytePtr start = p->start;
540  natural n = p->protsize;
541
542  if (n && ! p->nprot) {
543    ProtectMemory(start, n);
544#ifdef WINDOWS
545    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
546#endif
547    p->nprot = n;
548  }
549}
550
551
552void
553zero_page(BytePtr start)
554{
555  extern int page_size;
556#ifdef PPC
557  extern void zero_cache_lines(BytePtr, size_t, size_t);
558  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
559#else
560  memset(start, 0, page_size);
561#endif
562}
563
564/* area management */
565
566
567area *
568new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
569{
570  area *a = (area *) (zalloc(sizeof(area)));
571  if (a) {
572    natural ndnodes = area_dnode(highaddr, lowaddr);
573    a->low = lowaddr;
574    a->high = highaddr;
575    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
576    a->code = code;
577    a->ndnodes = ndnodes;
578    /* Caller must allocate markbits when allocating heap ! */
579   
580  }
581  return a;
582}
583
584static area *
585add_area_before(area *new_area, area *before)
586{
587  area *before_before = before->pred;
588
589  new_area->pred = before_before;
590  new_area->succ = before;
591  before_before->succ = new_area;
592  before->pred = new_area;
593  return new_area;
594}
595
596/*
597  The active dynamic area comes first.
598  Static areas follow dynamic areas.
599  Stack areas follow static areas.
600  Readonly areas come last.
601*/
602
603/*
604  If we already own the area_lock (or during iniitalization), it's safe
605  to add an area.
606*/
607
608
609void
610add_area_holding_area_lock(area *new_area)
611{
612  area *that = all_areas;
613  int
614    thiscode = (int)(new_area->code),
615    thatcode;
616
617  /* Cdr down the linked list */
618  do {
619    that = that->succ;
620    thatcode = (int)(that->code);
621  } while (thiscode < thatcode);
622  add_area_before(new_area, that);
623}
624
625/*
626  In general, we need to own the area lock before adding an area.
627*/
628void
629add_area(area *new_area, TCR *tcr)
630{
631  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
632  add_area_holding_area_lock(new_area);
633  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
634} 
635
636/*
637  Search areas "forward" from the header's successor, until
638  an area containing ADDR is found or an area with code < MINCODE
639  is encountered.
640  This walks the area list visiting heaps (dynamic, then static)
641  first, then stacks.
642
643*/
644static area *
645find_area_forward(BytePtr addr, area_code mincode)
646{
647  area *p, *header = all_areas;
648
649  for (p = header->succ; p != header; p = p->succ) {
650    area_code pcode = p->code;
651    if (pcode < mincode) {
652      return NULL;
653    }
654    if (pcode >= AREA_READONLY) {
655      if ((addr >= p->low) &&
656          (addr < p->active)) {
657        return p;
658      }
659    } else {
660      if ((addr >= p->active) &&
661          (addr < p->high)) {
662        return p;
663      }
664    }
665  }
666  return NULL;
667}
668
669static area *
670find_area_backward(BytePtr addr, area_code maxcode)
671{
672  area *p, *header = all_areas;
673
674  for (p = header->pred; p != header; p = p->pred) {
675    area_code pcode = p->code;
676
677    if (pcode > maxcode) {
678      return NULL;
679    }
680    if (pcode >= AREA_READONLY) {
681      if ((addr >= p->low) &&
682          (addr < p->active)) {
683        return p;
684      }
685    } else {
686      if ((addr >= p->active) &&
687          (addr < p->high)) {
688        return p;
689      }
690    }
691  }
692  return NULL;
693}
694
695area *
696area_containing(BytePtr addr)
697{
698  return find_area_forward(addr, AREA_VOID);
699}
700
701area *
702heap_area_containing(BytePtr addr)
703{
704  return find_area_forward(addr, AREA_READONLY);
705}
706
707area *
708stack_area_containing(BytePtr addr)
709{
710  return find_area_backward(addr, AREA_TSTACK);
711}
712
713/*
714  Make everything "younger" than the start of the target area
715  belong to that area; all younger areas will become empty, and
716  the dynamic area will have to lose some of its markbits (they
717  get zeroed and become part of the tenured area's refbits.)
718
719  The active dynamic area must have been "normalized" (e.g., its
720  active pointer must match the free pointer) before this is called.
721
722  If the target area is 'tenured_area' (the oldest ephemeral generation),
723  zero its refbits and update YOUNGEST_EPHEMERAL.
724
725*/
726
727void
728tenure_to_area(area *target)
729{
730  area *a = active_dynamic_area, *child;
731  BytePtr
732    curfree = a->active,
733    target_low = target->low,
734    tenured_low = tenured_area->low;
735  natural
736    dynamic_dnodes = area_dnode(curfree, a->low),
737    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
738  bitvector
739    refbits = tenured_area->refbits,
740    markbits = a->markbits,
741    new_markbits;
742
743  target->high = target->active = curfree;
744  target->ndnodes = area_dnode(curfree, target_low);
745
746  for (child = target->younger; child != a; child = child->younger) {
747    child->high = child->low = child->active = curfree;
748    child->ndnodes = 0;
749  }
750
751  a->low = curfree;
752  a->ndnodes = area_dnode(a->high, curfree);
753
754  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
755 
756  if (target == tenured_area) {
757    zero_bits(refbits, new_tenured_dnodes);
758    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
759  } else {
760    /* Need more (zeroed) refbits & fewer markbits */
761    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
762  }
763   
764  a->markbits = new_markbits;
765  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
766}
767
768
769
770/*
771  Make everything younger than the oldest byte in 'from' belong to
772  the youngest generation.  If 'from' is 'tenured_area', this means
773  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
774  to 0 to indicate this.)
775 
776  Some tenured_area refbits become dynamic area markbits in the process;
777  it's not necessary to zero them, since the GC will do that.
778*/
779
780void
781untenure_from_area(area *from)
782{
783  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
784    area *a = active_dynamic_area, *child;
785    BytePtr curlow = from->low;
786    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
787   
788    for (child = from; child != a; child = child->younger) {
789      child->low = child->active = child->high = curlow;
790      child->ndnodes = 0;
791    }
792   
793    a->low = curlow;
794    a->ndnodes = area_dnode(a->high, curlow);
795   
796    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
797    if (from == tenured_area) {
798      /* Everything's in the dynamic area */
799      lisp_global(OLDEST_EPHEMERAL) = 0;
800      lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
801
802    }
803  }
804}
805
806
807Boolean
808egc_control(Boolean activate, BytePtr curfree)
809{
810  area *a = active_dynamic_area;
811  Boolean egc_is_active = (a->older != NULL);
812
813  if (activate != egc_is_active) {
814    if (curfree != NULL) {
815      a->active = curfree;
816    }
817    if (activate) {
818      a->older = g1_area;
819      tenure_to_area(tenured_area);
820      egc_is_active = true;
821    } else {
822      untenure_from_area(tenured_area);
823      a->older = NULL;
824      egc_is_active = false;
825    }
826  }
827  return egc_is_active;
828}
829
830/*
831  Lisp ff-calls this; it needs to set the active area's active pointer
832  correctly.
833*/
834
835Boolean
836lisp_egc_control(Boolean activate)
837{
838  area *a = active_dynamic_area;
839  return egc_control(activate, (BytePtr) a->active);
840}
841
842
843
844 
845/* Splice the protected_area_ptr out of the list and dispose of it. */
846void
847delete_protected_area(protected_area_ptr p)
848{
849  BytePtr start = p->start;
850  int nbytes = p->nprot;
851  protected_area_ptr *prev = &AllProtectedAreas, q;
852
853  if (nbytes) {
854    UnProtectMemory((LogicalAddress)start, nbytes);
855  }
856 
857  while ((q = *prev) != NULL) {
858    if (p == q) {
859      *prev = p->next;
860      break;
861    } else {
862      prev = &(q->next);
863    }
864  }
865
866  deallocate((Ptr)p);
867}
868
869
870
871
872/*
873  Unlink the area from all_areas.
874  Unprotect and dispose of any hard/soft protected_areas.
875  If the area has a handle, dispose of that as well.
876  */
877
878void
879condemn_area_holding_area_lock(area *a)
880{
881  void free_stack(void *);
882  area *prev = a->pred, *next = a->succ;
883  Ptr h = a->h;
884  protected_area_ptr p;
885
886  prev->succ = next;
887  next->pred = prev;
888
889  p = a->softprot;
890  if (p) delete_protected_area(p);
891
892  p = a->hardprot;
893
894  if (p) delete_protected_area(p);
895
896  if (h) free_stack(h);
897  deallocate((Ptr)a);
898}
899
900
901
902void
903condemn_area(area *a, TCR *tcr)
904{
905  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
906  condemn_area_holding_area_lock(a);
907  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
908}
909
910
911
912
913/*
914  condemn an area and all the other areas that can be reached
915  via the area.older & area.younger links.
916  This is the function in the ppc::kernel-import-condemn-area slot,
917  called by free-stack-area
918  */
919void
920condemn_area_chain(area *a, TCR *tcr)
921{
922  area *older;
923
924  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
925
926  for (; a->younger; a = a->younger) ;
927  for (;a;) {
928    older = a->older;
929    condemn_area_holding_area_lock(a);
930    a = older;
931  }
932  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
933}
934
935
936void
937protect_watched_areas()
938{
939  area *a = active_dynamic_area;
940  natural code = a->code;
941
942  while (code != AREA_VOID) {
943    if (code == AREA_WATCHED) {
944      natural size = a->high - a->low;
945     
946      ProtectMemory(a->low, size);
947    }
948    a = a->succ;
949    code = a->code;
950  }
951}
952
953void
954unprotect_watched_areas()
955{
956  area *a = active_dynamic_area;
957  natural code = a->code;
958
959  while (code != AREA_VOID) {
960    if (code == AREA_WATCHED) {
961      natural size = a->high - a->low;
962     
963      UnProtectMemory(a->low, size);
964    }
965    a = a->succ;
966    code = a->code;
967  }
968}
Note: See TracBrowser for help on using the repository browser.