source: trunk/source/lisp-kernel/memory.c @ 11636

Last change on this file since 11636 was 11636, checked in by gb, 11 years ago

Revert back to previous ReserveMemoryForHeap?() behavior (return NULL
on failure.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.7 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#include <unistd.h>
30#ifdef LINUX
31#include <strings.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35
36#ifndef WINDOWS
37#include <sys/mman.h>
38#endif
39
40#define DEBUG_MEMORY 0
41
42void
43allocation_failure(Boolean pointerp, natural size)
44{
45  char buf[64];
46  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
47  Fatal(":   Kernel memory allocation failure.  ", buf);
48}
49
50void
51fatal_oserr(StringPtr param, OSErr err)
52{
53  char buf[64];
54  sprintf(buf," - operating system error %d.", err);
55  Fatal(param, buf);
56}
57
58
59Ptr
60allocate(natural size)
61{
62  return (Ptr) malloc(size);
63}
64
65void
66deallocate(Ptr p)
67{
68  free((void *)p);
69}
70
71Ptr
72zalloc(natural size)
73{
74  Ptr p = allocate(size);
75  if (p != NULL) {
76    memset(p, 0, size);
77  }
78  return p;
79}
80
81#ifdef DARWIN
82#if WORD_SIZE == 64
83#define vm_region vm_region_64
84#endif
85
86/*
87  Check to see if the specified address is unmapped by trying to get
88  information about the mapped address at or beyond the target.  If
89  the difference between the target address and the next mapped address
90  is >= len, we can safely mmap len bytes at addr.
91*/
92Boolean
93address_unmapped_p(char *addr, natural len)
94{
95  vm_address_t vm_addr = (vm_address_t)addr;
96  vm_size_t vm_size;
97#if WORD_SIZE == 64
98  vm_region_basic_info_data_64_t vm_info;
99#else
100  vm_region_basic_info_data_t vm_info;
101#endif
102#if WORD_SIZE == 64
103  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
104#else
105  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
106#endif
107  mach_port_t vm_object_name = (mach_port_t) 0;
108  kern_return_t kret;
109
110  kret = vm_region(mach_task_self(),
111                   &vm_addr,
112                   &vm_size,
113#if WORD_SIZE == 64
114                   VM_REGION_BASIC_INFO_64,
115#else
116                   VM_REGION_BASIC_INFO,
117#endif
118                   (vm_region_info_t)&vm_info,
119                   &vm_info_size,
120                   &vm_object_name);
121  if (kret != KERN_SUCCESS) {
122    return false;
123  }
124
125  return vm_addr >= (vm_address_t)(addr+len);
126}
127#endif
128
129
130  /*
131    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
132    likely to reside near the beginning of an unmapped block of memory
133    that's at least 1GB in size.  We'd like to load the heap image's
134    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
135    that'd allow us to file-map those sections (and would enable us to
136    avoid having to relocate references in the data sections.)
137
138    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
139    by creating an anonymous mapping with mmap().
140
141    If we try to insist that mmap() map a 1GB block at
142    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
143    mmap() will gleefully clobber any mapped memory that's already
144    there.  (That region's empty at this writing, but some future
145    version of the OS might decide to put something there.)
146
147    If we don't specify MAP_FIXED, mmap() is free to treat the address
148    we give it as a hint; Linux seems to accept the hint if doing so
149    wouldn't cause a problem.  Naturally, that behavior's too useful
150    for Darwin (or perhaps too inconvenient for it): it'll often
151    return another address, even if the hint would have worked fine.
152
153    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
154    would conflict with anything.  Until we discover a need to do
155    otherwise, we'll assume that if Linux's mmap() fails to take the
156    hint, it's because of a legitimate conflict.
157
158    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
159    to implement an address_unmapped_p() for Linux.
160  */
161
162LogicalAddress
163ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
164{
165  LogicalAddress start;
166  Boolean fixed_map_ok = false;
167#ifdef DARWIN
168  fixed_map_ok = address_unmapped_p(want,totalsize);
169#endif
170#ifdef SOLARIS
171  fixed_map_ok = true;
172#endif
173  raise_limit();
174#ifdef WINDOWS
175  start = VirtualAlloc((void *)want,
176                       totalsize + heap_segment_size,
177                       MEM_RESERVE,
178                       PAGE_NOACCESS);
179  if (!start) {
180    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
181    start = VirtualAlloc(0,
182                         totalsize + heap_segment_size,
183                         MEM_RESERVE,
184                         PAGE_NOACCESS);
185    if (!start) {
186      return NULL;
187    }
188  }
189#else
190  start = mmap((void *)want,
191               totalsize + heap_segment_size,
192               PROT_NONE,
193               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
194               -1,
195               0);
196  if (start == MAP_FAILED) {
197    return NULL;
198  }
199
200  if (start != want) {
201    munmap(start, totalsize+heap_segment_size);
202    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
203    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
204      return NULL;
205    }
206  }
207  mprotect(start, totalsize, PROT_NONE);
208#endif
209#if DEBUG_MEMORY
210  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
211#endif
212  return start;
213}
214
215int
216CommitMemory (LogicalAddress start, natural len) 
217{
218  LogicalAddress rc;
219#if DEBUG_MEMORY
220  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
221#endif
222#ifdef WINDOWS
223  if ((start < ((LogicalAddress)nil_value)) &&
224      (((LogicalAddress)nil_value) < (start+len))) {
225    /* nil area is in the executable on Windows; ensure range is
226       read-write */
227    DWORD as_if_i_care;
228    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
229      return false;
230    }
231    return true;
232  }
233  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
234  if (!rc) {
235    wperror("CommitMemory VirtualAlloc");
236    return false;
237  }
238  return true;
239#else
240  int i, err;
241  void *addr;
242
243  for (i = 0; i < 3; i++) {
244    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
245    if (addr == start) {
246      return true;
247    } else {
248      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
249    }
250  }
251  return false;
252#endif
253}
254
255void
256UnCommitMemory (LogicalAddress start, natural len) {
257#if DEBUG_MEMORY
258  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
259#endif
260#ifdef WINDOWS
261  int rc = VirtualFree(start, len, MEM_DECOMMIT);
262  if (!rc) {
263    wperror("UnCommitMemory VirtualFree");
264    Fatal("mmap error", "");
265    return;
266  }
267#else
268  if (len) {
269    madvise(start, len, MADV_DONTNEED);
270    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
271        != start) {
272      int err = errno;
273      Fatal("mmap error", "");
274      fprintf(dbgout, "errno = %d", err);
275    }
276  }
277#endif
278}
279
280
281LogicalAddress
282MapMemory(LogicalAddress addr, natural nbytes, int protection)
283{
284#if DEBUG_MEMORY
285  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
286#endif
287#ifdef WINDOWS
288  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
289#else
290  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
291#endif
292}
293
294LogicalAddress
295MapMemoryForStack(natural nbytes)
296{
297#if DEBUG_MEMORY
298  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
299#endif
300#ifdef WINDOWS
301  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
302#else
303  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
304#endif
305}
306
307int
308UnMapMemory(LogicalAddress addr, natural nbytes)
309{
310#if DEBUG_MEMORY
311  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
312#endif
313#ifdef WINDOWS
314  /* Can't MEM_RELEASE here because we only want to free a chunk */
315  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
316#else
317  return munmap(addr, nbytes);
318#endif
319}
320
321int
322ProtectMemory(LogicalAddress addr, natural nbytes)
323{
324#if DEBUG_MEMORY
325  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
326#endif
327#ifdef WINDOWS
328  DWORD oldProtect;
329  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
330 
331  if(!status) {
332    wperror("ProtectMemory VirtualProtect");
333    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
334  }
335  return status;
336#else
337  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
338 
339  if (status) {
340    status = errno;
341    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
342  }
343  return status;
344#endif
345}
346
347int
348UnProtectMemory(LogicalAddress addr, natural nbytes)
349{
350#if DEBUG_MEMORY
351  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
352#endif
353#ifdef WINDOWS
354  DWORD oldProtect;
355  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
356#else
357  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
358#endif
359}
360
361int
362MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
363{
364#ifdef WINDOWS
365#if 0
366  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
367  HANDLE hFile, hFileMapping;
368  LPVOID rc;
369  DWORD desiredAccess;
370
371  if (permissions == MEMPROTECT_RWX) {
372    permissions |= PAGE_WRITECOPY;
373    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
374  } else {
375    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
376  }
377
378  hFile = _get_osfhandle(fd);
379  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
380                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
381 
382  if (!hFileMapping) {
383    wperror("CreateFileMapping");
384    return false;
385  }
386
387  rc = MapViewOfFileEx(hFileMapping,
388                       desiredAccess,
389                       (pos >> 32),
390                       (pos & 0xffffffff),
391                       nbytes,
392                       addr);
393#else
394  size_t count, total = 0;
395  size_t opos;
396
397  opos = LSEEK(fd, 0, SEEK_CUR);
398  CommitMemory(addr, nbytes);
399  LSEEK(fd, pos, SEEK_SET);
400
401  while (total < nbytes) {
402    count = read(fd, addr + total, nbytes - total);
403    total += count;
404    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
405    if (!(count > 0))
406      return false;
407  }
408
409  LSEEK(fd, opos, SEEK_SET);
410
411  return true;
412#endif
413#else
414  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
415#endif
416}
417
418void
419unprotect_area(protected_area_ptr p)
420{
421  BytePtr start = p->start;
422  natural nprot = p->nprot;
423 
424  if (nprot) {
425    UnProtectMemory(start, nprot);
426    p->nprot = 0;
427  }
428}
429
430protected_area_ptr
431new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
432{
433  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
434 
435  if (p == NULL) return NULL;
436  p->protsize = protsize;
437  p->nprot = 0;
438  p->start = start;
439  p->end = end;
440  p->why = reason;
441  p->next = AllProtectedAreas;
442
443  AllProtectedAreas = p;
444  if (now) {
445    protect_area(p);
446  }
447 
448  return p;
449}
450
451/*
452  Un-protect the first nbytes bytes in specified area.
453  Note that this may cause the area to be empty.
454*/
455void
456unprotect_area_prefix(protected_area_ptr area, size_t delta)
457{
458  unprotect_area(area);
459  area->start += delta;
460  if ((area->start + area->protsize) <= area->end) {
461    protect_area(area);
462  }
463}
464
465
466/*
467  Extend the protected area, causing the preceding nbytes bytes
468  to be included and protected.
469*/
470void
471protect_area_prefix(protected_area_ptr area, size_t delta)
472{
473  unprotect_area(area);
474  area->start -= delta;
475  protect_area(area);
476}
477
478protected_area_ptr
479AllProtectedAreas = NULL;
480
481
482/*
483  This does a linear search.  Areas aren't created all that often;
484  if there get to be very many of them, some sort of tree search
485  might be justified.
486*/
487
488protected_area_ptr
489find_protected_area(BytePtr addr)
490{
491  protected_area* p;
492 
493  for(p = AllProtectedAreas; p; p=p->next) {
494    if ((p->start <= addr) && (p->end > addr)) {
495      return p;
496    }
497  }
498  return NULL;
499}
500
501
502void
503zero_memory_range(BytePtr start, BytePtr end)
504{
505#ifdef WINDOWS
506  ZeroMemory(start,end-start);
507#else
508  bzero(start,(size_t)(end-start));
509#endif
510}
511
512
513 
514
515/*
516   Grow or shrink the dynamic area.  Or maybe not.
517   Whether or not the end of (mapped space in) the heap changes,
518   ensure that everything between the freeptr and the heap end
519   is mapped and read/write.  (It'll incidentally be zeroed.)
520*/
521Boolean
522resize_dynamic_heap(BytePtr newfree, 
523                    natural free_space_size)
524{
525  extern int page_size;
526  area *a = active_dynamic_area;
527  BytePtr newlimit, protptr, zptr;
528  int psize = page_size;
529  if (free_space_size) {
530    BytePtr lowptr = a->active;
531    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
532                                            log2_heap_segment_size);
533    if (newlimit > a->high) {
534      return grow_dynamic_area(newlimit-a->high);
535    } else if ((lowptr + free_space_size) < a->high) {
536      shrink_dynamic_area(a->high-newlimit);
537      return true;
538    }
539  }
540}
541
542void
543protect_area(protected_area_ptr p)
544{
545  BytePtr start = p->start;
546  natural n = p->protsize;
547
548  if (n && ! p->nprot) {
549    ProtectMemory(start, n);
550    p->nprot = n;
551  }
552}
553
554
555void
556zero_page(BytePtr start)
557{
558  extern int page_size;
559#ifdef PPC
560  extern void zero_cache_lines(BytePtr, size_t, size_t);
561  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
562#else
563  memset(start, 0, page_size);
564#endif
565}
566
567/* area management */
568
569
570area *
571new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
572{
573  area *a = (area *) (zalloc(sizeof(area)));
574  if (a) {
575    natural ndnodes = area_dnode(highaddr, lowaddr);
576    a->low = lowaddr;
577    a->high = highaddr;
578    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
579    a->code = code;
580    a->ndnodes = ndnodes;
581    /* Caller must allocate markbits when allocating heap ! */
582   
583  }
584  return a;
585}
586
587static area *
588add_area_before(area *new_area, area *before)
589{
590  area *before_before = before->pred;
591
592  new_area->pred = before_before;
593  new_area->succ = before;
594  before_before->succ = new_area;
595  before->pred = new_area;
596  return new_area;
597}
598
599/*
600  The active dynamic area comes first.
601  Static areas follow dynamic areas.
602  Stack areas follow static areas.
603  Readonly areas come last.
604*/
605
606/*
607  If we already own the area_lock (or during iniitalization), it's safe
608  to add an area.
609*/
610
611
612void
613add_area_holding_area_lock(area *new_area)
614{
615  area *that = all_areas;
616  int
617    thiscode = (int)(new_area->code),
618    thatcode;
619
620  /* Cdr down the linked list */
621  do {
622    that = that->succ;
623    thatcode = (int)(that->code);
624  } while (thiscode < thatcode);
625  add_area_before(new_area, that);
626}
627
628/*
629  In general, we need to own the area lock before adding an area.
630*/
631void
632add_area(area *new_area, TCR *tcr)
633{
634  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
635  add_area_holding_area_lock(new_area);
636  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
637} 
638
639/*
640  Search areas "forward" from the header's successor, until
641  an area containing ADDR is found or an area with code < MINCODE
642  is encountered.
643  This walks the area list visiting heaps (dynamic, then static)
644  first, then stacks.
645
646*/
647static area *
648find_area_forward(BytePtr addr, area_code mincode)
649{
650  area *p, *header = all_areas;
651
652  for (p = header->succ; p != header; p = p->succ) {
653    area_code pcode = p->code;
654    if (pcode < mincode) {
655      return NULL;
656    }
657    if (pcode >= AREA_READONLY) {
658      if ((addr >= p->low) &&
659          (addr < p->active)) {
660        return p;
661      }
662    } else {
663      if ((addr >= p->active) &&
664          (addr < p->high)) {
665        return p;
666      }
667    }
668  }
669  return NULL;
670}
671
672static area *
673find_area_backward(BytePtr addr, area_code maxcode)
674{
675  area *p, *header = all_areas;
676
677  for (p = header->pred; p != header; p = p->pred) {
678    area_code pcode = p->code;
679
680    if (pcode > maxcode) {
681      return NULL;
682    }
683    if (pcode >= AREA_READONLY) {
684      if ((addr >= p->low) &&
685          (addr < p->active)) {
686        return p;
687      }
688    } else {
689      if ((addr >= p->active) &&
690          (addr < p->high)) {
691        return p;
692      }
693    }
694  }
695  return NULL;
696}
697
698area *
699area_containing(BytePtr addr)
700{
701  return find_area_forward(addr, AREA_VOID);
702}
703
704area *
705heap_area_containing(BytePtr addr)
706{
707  return find_area_forward(addr, AREA_READONLY);
708}
709
710area *
711stack_area_containing(BytePtr addr)
712{
713  return find_area_backward(addr, AREA_TSTACK);
714}
715
716/*
717  Make everything "younger" than the start of the target area
718  belong to that area; all younger areas will become empty, and
719  the dynamic area will have to lose some of its markbits (they
720  get zeroed and become part of the tenured area's refbits.)
721
722  The active dynamic area must have been "normalized" (e.g., its
723  active pointer must match the free pointer) before this is called.
724
725  If the target area is 'tenured_area' (the oldest ephemeral generation),
726  zero its refbits and update YOUNGEST_EPHEMERAL.
727
728*/
729
730void
731tenure_to_area(area *target)
732{
733  area *a = active_dynamic_area, *child;
734  BytePtr
735    curfree = a->active,
736    target_low = target->low,
737    tenured_low = tenured_area->low;
738  natural
739    dynamic_dnodes = area_dnode(curfree, a->low),
740    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
741  bitvector
742    refbits = tenured_area->refbits,
743    markbits = a->markbits,
744    new_markbits;
745
746  target->high = target->active = curfree;
747  target->ndnodes = area_dnode(curfree, target_low);
748
749  for (child = target->younger; child != a; child = child->younger) {
750    child->high = child->low = child->active = curfree;
751    child->ndnodes = 0;
752  }
753
754  a->low = curfree;
755  a->ndnodes = area_dnode(a->high, curfree);
756
757  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
758 
759  if (target == tenured_area) {
760    zero_bits(refbits, new_tenured_dnodes);
761    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
762  } else {
763    /* Need more (zeroed) refbits & fewer markbits */
764    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
765  }
766   
767  a->markbits = new_markbits;
768  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
769}
770
771
772
773/*
774  Make everything younger than the oldest byte in 'from' belong to
775  the youngest generation.  If 'from' is 'tenured_area', this means
776  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
777  to 0 to indicate this.)
778 
779  Some tenured_area refbits become dynamic area markbits in the process;
780  it's not necessary to zero them, since the GC will do that.
781*/
782
783void
784untenure_from_area(area *from)
785{
786  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
787    area *a = active_dynamic_area, *child;
788    BytePtr curlow = from->low;
789    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
790   
791    for (child = from; child != a; child = child->younger) {
792      child->low = child->active = child->high = curlow;
793      child->ndnodes = 0;
794    }
795   
796    a->low = curlow;
797    a->ndnodes = area_dnode(a->high, curlow);
798   
799    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
800    if (from == tenured_area) {
801      /* Everything's in the dynamic area */
802      lisp_global(OLDEST_EPHEMERAL) = 0;
803      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
804
805    }
806  }
807}
808
809
810Boolean
811egc_control(Boolean activate, BytePtr curfree)
812{
813  area *a = active_dynamic_area;
814  Boolean egc_is_active = (a->older != NULL);
815
816  if (activate != egc_is_active) {
817    if (curfree != NULL) {
818      a->active = curfree;
819    }
820    if (activate) {
821      a->older = g1_area;
822      tenure_to_area(tenured_area);
823      egc_is_active = true;
824    } else {
825      untenure_from_area(tenured_area);
826      a->older = NULL;
827      egc_is_active = false;
828    }
829  }
830  return egc_is_active;
831}
832
833/*
834  Lisp ff-calls this; it needs to set the active area's active pointer
835  correctly.
836*/
837
838Boolean
839lisp_egc_control(Boolean activate)
840{
841  area *a = active_dynamic_area;
842  return egc_control(activate, (BytePtr) a->active);
843}
844
845
846
847 
848/* Splice the protected_area_ptr out of the list and dispose of it. */
849void
850delete_protected_area(protected_area_ptr p)
851{
852  BytePtr start = p->start;
853  int nbytes = p->nprot;
854  protected_area_ptr *prev = &AllProtectedAreas, q;
855
856  if (nbytes) {
857    UnProtectMemory((LogicalAddress)start, nbytes);
858  }
859 
860  while ((q = *prev) != NULL) {
861    if (p == q) {
862      *prev = p->next;
863      break;
864    } else {
865      prev = &(q->next);
866    }
867  }
868
869  deallocate((Ptr)p);
870}
871
872
873
874
875/*
876  Unlink the area from all_areas.
877  Unprotect and dispose of any hard/soft protected_areas.
878  If the area has a handle, dispose of that as well.
879  */
880
881void
882condemn_area_holding_area_lock(area *a)
883{
884  void free_stack(void *);
885  area *prev = a->pred, *next = a->succ;
886  Ptr h = a->h;
887  protected_area_ptr p;
888
889  prev->succ = next;
890  next->pred = prev;
891
892  p = a->softprot;
893  if (p) delete_protected_area(p);
894
895  p = a->hardprot;
896
897  if (p) delete_protected_area(p);
898
899  if (h) free_stack(h);
900  deallocate((Ptr)a);
901}
902
903
904
905void
906condemn_area(area *a, TCR *tcr)
907{
908  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
909  condemn_area_holding_area_lock(a);
910  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
911}
912
913
914
915
916/*
917  condemn an area and all the other areas that can be reached
918  via the area.older & area.younger links.
919  This is the function in the ppc::kernel-import-condemn-area slot,
920  called by free-stack-area
921  */
922void
923condemn_area_chain(area *a, TCR *tcr)
924{
925  area *older;
926
927  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
928
929  for (; a->younger; a = a->younger) ;
930  for (;a;) {
931    older = a->older;
932    condemn_area_holding_area_lock(a);
933    a = older;
934  }
935  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
936}
937
938void
939release_readonly_area()
940{
941  area *a = readonly_area;
942  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
943  a->active = a->low;
944  a->ndnodes = 0;
945  pure_space_active = pure_space_start;
946}
Note: See TracBrowser for help on using the repository browser.