source: branches/win64/lisp-kernel/memory.c @ 8956

Last change on this file since 8956 was 8956, checked in by gb, 12 years ago

Define DEBUG_MEMORY, suppress debugging progress messages unless it's
non-zero.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.4 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#include <unistd.h>
30#ifdef LINUX
31#include <strings.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35
36#ifndef WINDOWS
37#include <sys/mman.h>
38#endif
39
40#define DEBUG_MEMORY 0
41
42void
43allocation_failure(Boolean pointerp, natural size)
44{
45  char buf[64];
46  sprintf(buf, "Can't allocate %s of size %Id bytes.", pointerp ? "pointer" : "handle", size);
47  Fatal(":   Kernel memory allocation failure.  ", buf);
48}
49
50void
51fatal_oserr(StringPtr param, OSErr err)
52{
53  char buf[64];
54  sprintf(buf," - operating system error %d.", err);
55  Fatal(param, buf);
56}
57
58
59Ptr
60allocate(natural size)
61{
62  return (Ptr) malloc(size);
63}
64
65void
66deallocate(Ptr p)
67{
68  free((void *)p);
69}
70
71Ptr
72zalloc(natural size)
73{
74  Ptr p = allocate(size);
75  if (p != NULL) {
76    memset(p, 0, size);
77  }
78  return p;
79}
80
81#ifdef DARWIN
82#if WORD_SIZE == 64
83#define vm_region vm_region_64
84#endif
85
86/*
87  Check to see if the specified address is unmapped by trying to get
88  information about the mapped address at or beyond the target.  If
89  the difference between the target address and the next mapped address
90  is >= len, we can safely mmap len bytes at addr.
91*/
92Boolean
93address_unmapped_p(char *addr, natural len)
94{
95  vm_address_t vm_addr = (vm_address_t)addr;
96  vm_size_t vm_size;
97#if WORD_SIZE == 64
98  vm_region_basic_info_data_64_t vm_info;
99#else
100  vm_region_basic_info_data_t vm_info;
101#endif
102#if WORD_SIZE == 64
103  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
104#else
105  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
106#endif
107  mach_port_t vm_object_name = (mach_port_t) 0;
108  kern_return_t kret;
109
110  kret = vm_region(mach_task_self(),
111                   &vm_addr,
112                   &vm_size,
113#if WORD_SIZE == 64
114                   VM_REGION_BASIC_INFO_64,
115#else
116                   VM_REGION_BASIC_INFO,
117#endif
118                   (vm_region_info_t)&vm_info,
119                   &vm_info_size,
120                   &vm_object_name);
121  if (kret != KERN_SUCCESS) {
122    return false;
123  }
124
125  return vm_addr >= (vm_address_t)(addr+len);
126}
127#endif
128
129
130  /*
131    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
132    likely to reside near the beginning of an unmapped block of memory
133    that's at least 1GB in size.  We'd like to load the heap image's
134    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
135    that'd allow us to file-map those sections (and would enable us to
136    avoid having to relocate references in the data sections.)
137
138    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
139    by creating an anonymous mapping with mmap().
140
141    If we try to insist that mmap() map a 1GB block at
142    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
143    mmap() will gleefully clobber any mapped memory that's already
144    there.  (That region's empty at this writing, but some future
145    version of the OS might decide to put something there.)
146
147    If we don't specify MAP_FIXED, mmap() is free to treat the address
148    we give it as a hint; Linux seems to accept the hint if doing so
149    wouldn't cause a problem.  Naturally, that behavior's too useful
150    for Darwin (or perhaps too inconvenient for it): it'll often
151    return another address, even if the hint would have worked fine.
152
153    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
154    would conflict with anything.  Until we discover a need to do
155    otherwise, we'll assume that if Linux's mmap() fails to take the
156    hint, it's because of a legitimate conflict.
157
158    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
159    to implement an address_unmapped_p() for Linux.
160  */
161
162LogicalAddress
163ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
164{
165  LogicalAddress start;
166  Boolean fixed_map_ok = false;
167#ifdef DARWIN
168  fixed_map_ok = address_unmapped_p(want,totalsize);
169#endif
170#ifdef SOLARIS
171  fixed_map_ok = true;
172#endif
173  raise_limit();
174#ifdef WINDOWS
175  start = VirtualAlloc((void *)want,
176                       totalsize + heap_segment_size,
177                       MEM_RESERVE,
178                       PAGE_NOACCESS);
179  if (!start) {
180    fprintf(stderr, "Can't get desired heap address at 0x%Ix\n", want);
181    start = VirtualAlloc(0,
182                         totalsize + heap_segment_size,
183                         MEM_RESERVE,
184                         PAGE_NOACCESS);
185    if (!start) {
186      wperror("VirtualAlloc");
187      return NULL;
188    }
189  }
190#else
191  start = mmap((void *)want,
192               totalsize + heap_segment_size,
193               PROT_NONE,
194               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
195               -1,
196               0);
197  if (start == MAP_FAILED) {
198    perror("Initial mmap");
199    return NULL;
200  }
201
202  if (start != want) {
203    munmap(start, totalsize+heap_segment_size);
204    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
205    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
206      return NULL;
207    }
208  }
209  mprotect(start, totalsize, PROT_NONE);
210#endif
211#if DEBUG_MEMORY
212  fprintf(stderr, "Reserving heap at 0x%Ix, size 0x%Ix\n", start, totalsize);
213#endif
214  return start;
215}
216
217int
218CommitMemory (LogicalAddress start, natural len) {
219  LogicalAddress rc;
220#if DEBUG_MEMORY
221  fprintf(stderr, "Committing memory at 0x%Ix, size 0x%Ix\n", start, len);
222#endif
223#ifdef WINDOWS
224  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
225  if (!rc) {
226    wperror("CommitMemory VirtualAlloc");
227    return false;
228  }
229  return true;
230#else
231  int i, err;
232  void *addr;
233
234  for (i = 0; i < 3; i++) {
235    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
236    if (addr == start) {
237      return true;
238    } else {
239      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
240    }
241  }
242  return false;
243#endif
244}
245
246void
247UnCommitMemory (LogicalAddress start, natural len) {
248#if DEBUG_MEMORY
249  fprintf(stderr, "Uncommitting memory at 0x%Ix, size 0x%Ix\n", start, len);
250#endif
251#ifdef WINDOWS
252  int rc = VirtualFree(start, len, MEM_DECOMMIT);
253  if (!rc) {
254    wperror("UnCommitMemory VirtualFree");
255    Fatal("mmap error", "");
256    return;
257  }
258#else
259  if (len) {
260    madvise(start, len, MADV_DONTNEED);
261    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
262        != start) {
263      int err = errno;
264      Fatal("mmap error", "");
265      fprintf(stderr, "errno = %d", err);
266    }
267  }
268#endif
269}
270
271
272LogicalAddress
273MapMemory(LogicalAddress addr, natural nbytes, int protection)
274{
275#if DEBUG_MEMORY
276  fprintf(stderr, "Mapping memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
277#endif
278#ifdef WINDOWS
279  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
280#else
281  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
282#endif
283}
284
285LogicalAddress
286MapMemoryForStack(natural nbytes)
287{
288#if DEBUG_MEMORY
289  fprintf(stderr, "Mapping stack of size 0x%Ix\n", nbytes);
290#endif
291#ifdef WINDOWS
292  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
293#else
294  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
295#endif
296}
297
298int
299UnMapMemory(LogicalAddress addr, natural nbytes)
300{
301#if DEBUG_MEMORY
302  fprintf(stderr, "Unmapping memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
303#endif
304#ifdef WINDOWS
305  /* Can't MEM_RELEASE here because we only want to free a chunk */
306  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
307#else
308  return munmap(addr, nbytes);
309#endif
310}
311
312int
313ProtectMemory(LogicalAddress addr, natural nbytes)
314{
315#if DEBUG_MEMORY
316  fprintf(stderr, "Protecting memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
317#endif
318#ifdef WINDOWS
319  DWORD oldProtect;
320  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
321 
322  if(!status) {
323    wperror("ProtectMemory VirtualProtect");
324    Bug(NULL, "couldn't protect %Id bytes at %x, errno = %d", nbytes, addr, status);
325  }
326  return status;
327#else
328  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
329 
330  if (status) {
331    status = errno;
332    Bug(NULL, "couldn't protect %Id bytes at %Ix, errno = %d", nbytes, addr, status);
333  }
334  return status;
335#endif
336}
337
338int
339UnProtectMemory(LogicalAddress addr, natural nbytes)
340{
341#if DEBUG_MEMORY
342  fprintf(stderr, "Unprotecting memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
343#endif
344#ifdef WINDOWS
345  DWORD oldProtect;
346  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
347#else
348  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
349#endif
350}
351
352int
353MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) {
354#ifdef WINDOWS
355#if 0
356  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
357  HANDLE hFile, hFileMapping;
358  LPVOID rc;
359  DWORD desiredAccess;
360
361  if (permissions == MEMPROTECT_RWX) {
362    permissions |= PAGE_WRITECOPY;
363    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
364  } else {
365    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
366  }
367
368  hFile = _get_osfhandle(fd);
369  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
370                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
371 
372  if (!hFileMapping) {
373    wperror("CreateFileMapping");
374    return false;
375  }
376
377  rc = MapViewOfFileEx(hFileMapping,
378                       desiredAccess,
379                       (pos >> 32),
380                       (pos & 0xffffffff),
381                       nbytes,
382                       addr);
383#else
384  size_t count, total = 0;
385  size_t opos;
386
387  opos = lseek(fd, 0, SEEK_CUR);
388  CommitMemory(addr, nbytes);
389  lseek(fd, pos, SEEK_SET);
390
391  while (total < nbytes) {
392    count = read(fd, addr + total, nbytes - total);
393    total += count;
394    // fprintf(stderr, "read %Id bytes, for a total of %Id out of %Id so far\n", count, total, nbytes);
395    if (!(count > 0))
396      return false;
397  }
398
399  lseek(fd, opos, SEEK_SET);
400
401  return true;
402#endif
403#else
404  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
405#endif
406}
407
408void
409unprotect_area(protected_area_ptr p)
410{
411  BytePtr start = p->start;
412  natural nprot = p->nprot;
413 
414  if (nprot) {
415    UnProtectMemory(start, nprot);
416    p->nprot = 0;
417  }
418}
419
420protected_area_ptr
421new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
422{
423  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
424 
425  if (p == NULL) return NULL;
426  p->protsize = protsize;
427  p->nprot = 0;
428  p->start = start;
429  p->end = end;
430  p->why = reason;
431  p->next = AllProtectedAreas;
432
433  AllProtectedAreas = p;
434  if (now) {
435    protect_area(p);
436  }
437 
438  return p;
439}
440
441/*
442  Un-protect the first nbytes bytes in specified area.
443  Note that this may cause the area to be empty.
444*/
445void
446unprotect_area_prefix(protected_area_ptr area, size_t delta)
447{
448  unprotect_area(area);
449  area->start += delta;
450  if ((area->start + area->protsize) <= area->end) {
451    protect_area(area);
452  }
453}
454
455
456/*
457  Extend the protected area, causing the preceding nbytes bytes
458  to be included and protected.
459*/
460void
461protect_area_prefix(protected_area_ptr area, size_t delta)
462{
463  unprotect_area(area);
464  area->start -= delta;
465  protect_area(area);
466}
467
468protected_area_ptr
469AllProtectedAreas = NULL;
470
471
472/*
473  This does a linear search.  Areas aren't created all that often;
474  if there get to be very many of them, some sort of tree search
475  might be justified.
476*/
477
478protected_area_ptr
479find_protected_area(BytePtr addr)
480{
481  protected_area* p;
482 
483  for(p = AllProtectedAreas; p; p=p->next) {
484    if ((p->start <= addr) && (p->end > addr)) {
485      return p;
486    }
487  }
488  return NULL;
489}
490
491
492void
493zero_memory_range(BytePtr start, BytePtr end)
494{
495#ifdef WINDOWS
496  ZeroMemory(start,end-start);
497#else
498  bzero(start,(size_t)(end-start));
499#endif
500}
501
502
503 
504
505/*
506   Grow or shrink the dynamic area.  Or maybe not.
507   Whether or not the end of (mapped space in) the heap changes,
508   ensure that everything between the freeptr and the heap end
509   is mapped and read/write.  (It'll incidentally be zeroed.)
510*/
511Boolean
512resize_dynamic_heap(BytePtr newfree, 
513                    natural free_space_size)
514{
515  extern int page_size;
516  area *a = active_dynamic_area;
517  BytePtr newlimit, protptr, zptr;
518  int psize = page_size;
519  if (free_space_size) {
520    BytePtr lowptr = a->active;
521    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
522                                            log2_heap_segment_size);
523    if (newlimit > a->high) {
524      return grow_dynamic_area(newlimit-a->high);
525    } else if ((lowptr + free_space_size) < a->high) {
526      shrink_dynamic_area(a->high-newlimit);
527      return true;
528    }
529  }
530}
531
532void
533protect_area(protected_area_ptr p)
534{
535  BytePtr start = p->start;
536  natural n = p->protsize;
537
538  if (n && ! p->nprot) {
539    ProtectMemory(start, n);
540    p->nprot = n;
541  }
542}
543
544
545void
546zero_page(BytePtr start)
547{
548  extern int page_size;
549#ifdef PPC
550  extern void zero_cache_lines(BytePtr, size_t, size_t);
551  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
552#else
553  memset(start, 0, page_size);
554#endif
555}
556
557/* area management */
558
559
560area *
561new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
562{
563  area *a = (area *) (zalloc(sizeof(area)));
564  if (a) {
565    natural ndnodes = area_dnode(highaddr, lowaddr);
566    a->low = lowaddr;
567    a->high = highaddr;
568    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
569    a->code = code;
570    a->ndnodes = ndnodes;
571    /* Caller must allocate markbits when allocating heap ! */
572   
573  }
574  return a;
575}
576
577static area *
578add_area_before(area *new_area, area *before)
579{
580  area *before_before = before->pred;
581
582  new_area->pred = before_before;
583  new_area->succ = before;
584  before_before->succ = new_area;
585  before->pred = new_area;
586  return new_area;
587}
588
589/*
590  The active dynamic area comes first.
591  Static areas follow dynamic areas.
592  Stack areas follow static areas.
593  Readonly areas come last.
594*/
595
596/*
597  If we already own the area_lock (or during iniitalization), it's safe
598  to add an area.
599*/
600
601
602void
603add_area_holding_area_lock(area *new_area)
604{
605  area *that = all_areas;
606  int
607    thiscode = (int)(new_area->code),
608    thatcode;
609
610  /* Cdr down the linked list */
611  do {
612    that = that->succ;
613    thatcode = (int)(that->code);
614  } while (thiscode < thatcode);
615  add_area_before(new_area, that);
616}
617
618/*
619  In general, we need to own the area lock before adding an area.
620*/
621void
622add_area(area *new_area, TCR *tcr)
623{
624  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
625  add_area_holding_area_lock(new_area);
626  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
627} 
628
629/*
630  Search areas "forward" from the header's successor, until
631  an area containing ADDR is found or an area with code < MINCODE
632  is encountered.
633  This walks the area list visiting heaps (dynamic, then static)
634  first, then stacks.
635
636*/
637static area *
638find_area_forward(BytePtr addr, area_code mincode)
639{
640  area *p, *header = all_areas;
641
642  for (p = header->succ; p != header; p = p->succ) {
643    area_code pcode = p->code;
644    if (pcode < mincode) {
645      return NULL;
646    }
647    if (pcode >= AREA_READONLY) {
648      if ((addr >= p->low) &&
649          (addr < p->active)) {
650        return p;
651      }
652    } else {
653      if ((addr >= p->active) &&
654          (addr < p->high)) {
655        return p;
656      }
657    }
658  }
659  return NULL;
660}
661
662static area *
663find_area_backward(BytePtr addr, area_code maxcode)
664{
665  area *p, *header = all_areas;
666
667  for (p = header->pred; p != header; p = p->pred) {
668    area_code pcode = p->code;
669
670    if (pcode > maxcode) {
671      return NULL;
672    }
673    if (pcode >= AREA_READONLY) {
674      if ((addr >= p->low) &&
675          (addr < p->active)) {
676        return p;
677      }
678    } else {
679      if ((addr >= p->active) &&
680          (addr < p->high)) {
681        return p;
682      }
683    }
684  }
685  return NULL;
686}
687
688area *
689area_containing(BytePtr addr)
690{
691  return find_area_forward(addr, AREA_VOID);
692}
693
694area *
695heap_area_containing(BytePtr addr)
696{
697  return find_area_forward(addr, AREA_READONLY);
698}
699
700area *
701stack_area_containing(BytePtr addr)
702{
703  return find_area_backward(addr, AREA_TSTACK);
704}
705
706/*
707  Make everything "younger" than the start of the target area
708  belong to that area; all younger areas will become empty, and
709  the dynamic area will have to lose some of its markbits (they
710  get zeroed and become part of the tenured area's refbits.)
711
712  The active dynamic area must have been "normalized" (e.g., its
713  active pointer must match the free pointer) before this is called.
714
715  If the target area is 'tenured_area' (the oldest ephemeral generation),
716  zero its refbits and update YOUNGEST_EPHEMERAL.
717
718*/
719
720void
721tenure_to_area(area *target)
722{
723  area *a = active_dynamic_area, *child;
724  BytePtr
725    curfree = a->active,
726    target_low = target->low,
727    tenured_low = tenured_area->low;
728  natural
729    dynamic_dnodes = area_dnode(curfree, a->low),
730    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
731  bitvector
732    refbits = tenured_area->refbits,
733    markbits = a->markbits,
734    new_markbits;
735
736  target->high = target->active = curfree;
737  target->ndnodes = area_dnode(curfree, target_low);
738
739  for (child = target->younger; child != a; child = child->younger) {
740    child->high = child->low = child->active = curfree;
741    child->ndnodes = 0;
742  }
743
744  a->low = curfree;
745  a->ndnodes = area_dnode(a->high, curfree);
746
747  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
748 
749  if (target == tenured_area) {
750    zero_bits(refbits, new_tenured_dnodes);
751    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
752  } else {
753    /* Need more (zeroed) refbits & fewer markbits */
754    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
755  }
756   
757  a->markbits = new_markbits;
758  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
759}
760
761
762
763/*
764  Make everything younger than the oldest byte in 'from' belong to
765  the youngest generation.  If 'from' is 'tenured_area', this means
766  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
767  to 0 to indicate this.)
768 
769  Some tenured_area refbits become dynamic area markbits in the process;
770  it's not necessary to zero them, since the GC will do that.
771*/
772
773void
774untenure_from_area(area *from)
775{
776  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
777    area *a = active_dynamic_area, *child;
778    BytePtr curlow = from->low;
779    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
780   
781    for (child = from; child != a; child = child->younger) {
782      child->low = child->active = child->high = curlow;
783      child->ndnodes = 0;
784    }
785   
786    a->low = curlow;
787    a->ndnodes = area_dnode(a->high, curlow);
788   
789    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
790    if (from == tenured_area) {
791      /* Everything's in the dynamic area */
792      lisp_global(OLDEST_EPHEMERAL) = 0;
793      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
794
795    }
796  }
797}
798
799
800Boolean
801egc_control(Boolean activate, BytePtr curfree)
802{
803  area *a = active_dynamic_area;
804  Boolean egc_is_active = (a->older != NULL);
805
806  if (activate != egc_is_active) {
807    if (curfree != NULL) {
808      a->active = curfree;
809    }
810    if (activate) {
811      LispObj *heap_start = ptr_from_lispobj(lisp_global(HEAP_START));
812
813      a->older = g1_area;
814      tenure_to_area(tenured_area);
815      egc_is_active = true;
816    } else {
817      untenure_from_area(tenured_area);
818      a->older = NULL;
819      egc_is_active = false;
820    }
821  }
822  return egc_is_active;
823}
824
825/*
826  Lisp ff-calls this; it needs to set the active area's active pointer
827  correctly.
828*/
829
830Boolean
831lisp_egc_control(Boolean activate)
832{
833  area *a = active_dynamic_area;
834  return egc_control(activate, (BytePtr) a->active);
835}
836
837
838
839 
840/* Splice the protected_area_ptr out of the list and dispose of it. */
841void
842delete_protected_area(protected_area_ptr p)
843{
844  BytePtr start = p->start;
845  int nbytes = p->nprot;
846  protected_area_ptr *prev = &AllProtectedAreas, q;
847
848  if (nbytes) {
849    UnProtectMemory((LogicalAddress)start, nbytes);
850  }
851 
852  while ((q = *prev) != NULL) {
853    if (p == q) {
854      *prev = p->next;
855      break;
856    } else {
857      prev = &(q->next);
858    }
859  }
860
861  deallocate((Ptr)p);
862}
863
864
865
866
867/*
868  Unlink the area from all_areas.
869  Unprotect and dispose of any hard/soft protected_areas.
870  If the area has a handle, dispose of that as well.
871  */
872
873void
874condemn_area_holding_area_lock(area *a)
875{
876  void free_stack(void *);
877  area *prev = a->pred, *next = a->succ;
878  Ptr h = a->h;
879  protected_area_ptr p;
880
881  prev->succ = next;
882  next->pred = prev;
883
884  p = a->softprot;
885  if (p) delete_protected_area(p);
886
887  p = a->hardprot;
888
889  if (p) delete_protected_area(p);
890
891  if (h) free_stack(h);
892  deallocate((Ptr)a);
893}
894
895
896
897void
898condemn_area(area *a, TCR *tcr)
899{
900  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
901  condemn_area_holding_area_lock(a);
902  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
903}
904
905
906
907
908/*
909  condemn an area and all the other areas that can be reached
910  via the area.older & area.younger links.
911  This is the function in the ppc::kernel-import-condemn-area slot,
912  called by free-stack-area
913  */
914void
915condemn_area_chain(area *a, TCR *tcr)
916{
917  area *older;
918
919  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
920
921  for (; a->younger; a = a->younger) ;
922  for (;a;) {
923    older = a->older;
924    condemn_area_holding_area_lock(a);
925    a = older;
926  }
927  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
928}
929
930void
931release_readonly_area()
932{
933  area *a = readonly_area;
934  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
935  a->active = a->low;
936  a->ndnodes = 0;
937  pure_space_active = pure_space_start;
938}
Note: See TracBrowser for help on using the repository browser.