source: trunk/source/lisp-kernel/memory.c @ 11986

Last change on this file since 11986 was 11986, checked in by gb, 10 years ago

suppress debugging msg on Windows

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.8 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#include <unistd.h>
30#ifdef LINUX
31#include <strings.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35
36#ifndef WINDOWS
37#include <sys/mman.h>
38#endif
39
40#define DEBUG_MEMORY 0
41
42void
43allocation_failure(Boolean pointerp, natural size)
44{
45  char buf[64];
46  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
47  Fatal(":   Kernel memory allocation failure.  ", buf);
48}
49
50void
51fatal_oserr(StringPtr param, OSErr err)
52{
53  char buf[64];
54  sprintf(buf," - operating system error %d.", err);
55  Fatal(param, buf);
56}
57
58
59Ptr
60allocate(natural size)
61{
62  return (Ptr) malloc(size);
63}
64
65void
66deallocate(Ptr p)
67{
68  free((void *)p);
69}
70
71Ptr
72zalloc(natural size)
73{
74  Ptr p = allocate(size);
75  if (p != NULL) {
76    memset(p, 0, size);
77  }
78  return p;
79}
80
81#ifdef DARWIN
82#if WORD_SIZE == 64
83#define vm_region vm_region_64
84#endif
85
86/*
87  Check to see if the specified address is unmapped by trying to get
88  information about the mapped address at or beyond the target.  If
89  the difference between the target address and the next mapped address
90  is >= len, we can safely mmap len bytes at addr.
91*/
92Boolean
93address_unmapped_p(char *addr, natural len)
94{
95  vm_address_t vm_addr = (vm_address_t)addr;
96  vm_size_t vm_size;
97#if WORD_SIZE == 64
98  vm_region_basic_info_data_64_t vm_info;
99#else
100  vm_region_basic_info_data_t vm_info;
101#endif
102#if WORD_SIZE == 64
103  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
104#else
105  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
106#endif
107  mach_port_t vm_object_name = (mach_port_t) 0;
108  kern_return_t kret;
109
110  kret = vm_region(mach_task_self(),
111                   &vm_addr,
112                   &vm_size,
113#if WORD_SIZE == 64
114                   VM_REGION_BASIC_INFO_64,
115#else
116                   VM_REGION_BASIC_INFO,
117#endif
118                   (vm_region_info_t)&vm_info,
119                   &vm_info_size,
120                   &vm_object_name);
121  if (kret != KERN_SUCCESS) {
122    return false;
123  }
124
125  return vm_addr >= (vm_address_t)(addr+len);
126}
127#endif
128
129
130  /*
131    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
132    likely to reside near the beginning of an unmapped block of memory
133    that's at least 1GB in size.  We'd like to load the heap image's
134    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
135    that'd allow us to file-map those sections (and would enable us to
136    avoid having to relocate references in the data sections.)
137
138    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
139    by creating an anonymous mapping with mmap().
140
141    If we try to insist that mmap() map a 1GB block at
142    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
143    mmap() will gleefully clobber any mapped memory that's already
144    there.  (That region's empty at this writing, but some future
145    version of the OS might decide to put something there.)
146
147    If we don't specify MAP_FIXED, mmap() is free to treat the address
148    we give it as a hint; Linux seems to accept the hint if doing so
149    wouldn't cause a problem.  Naturally, that behavior's too useful
150    for Darwin (or perhaps too inconvenient for it): it'll often
151    return another address, even if the hint would have worked fine.
152
153    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
154    would conflict with anything.  Until we discover a need to do
155    otherwise, we'll assume that if Linux's mmap() fails to take the
156    hint, it's because of a legitimate conflict.
157
158    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
159    to implement an address_unmapped_p() for Linux.
160  */
161
162LogicalAddress
163ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
164{
165  LogicalAddress start;
166  Boolean fixed_map_ok = false;
167#ifdef DARWIN
168  fixed_map_ok = address_unmapped_p(want,totalsize);
169#endif
170#ifdef SOLARIS
171  fixed_map_ok = true;
172#endif
173  raise_limit();
174#ifdef WINDOWS
175  start = VirtualAlloc((void *)want,
176                       totalsize + heap_segment_size,
177                       MEM_RESERVE,
178                       PAGE_NOACCESS);
179  if (!start) {
180#if DEBUG_MEMORY   
181    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
182#endif
183    start = VirtualAlloc(0,
184                         totalsize + heap_segment_size,
185                         MEM_RESERVE,
186                         PAGE_NOACCESS);
187    if (!start) {
188      return NULL;
189    }
190  }
191#else
192  start = mmap((void *)want,
193               totalsize + heap_segment_size,
194               PROT_NONE,
195               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
196               -1,
197               0);
198  if (start == MAP_FAILED) {
199    return NULL;
200  }
201
202  if (start != want) {
203    munmap(start, totalsize+heap_segment_size);
204    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
205    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
206      return NULL;
207    }
208  }
209  mprotect(start, totalsize, PROT_NONE);
210#endif
211#if DEBUG_MEMORY
212  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
213#endif
214  return start;
215}
216
217int
218CommitMemory (LogicalAddress start, natural len) 
219{
220  LogicalAddress rc;
221#if DEBUG_MEMORY
222  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
223#endif
224#ifdef WINDOWS
225  if ((start < ((LogicalAddress)nil_value)) &&
226      (((LogicalAddress)nil_value) < (start+len))) {
227    /* nil area is in the executable on Windows; ensure range is
228       read-write */
229    DWORD as_if_i_care;
230    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
231      return false;
232    }
233    return true;
234  }
235  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
236  if (!rc) {
237    wperror("CommitMemory VirtualAlloc");
238    return false;
239  }
240  return true;
241#else
242  int i, err;
243  void *addr;
244
245  for (i = 0; i < 3; i++) {
246    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
247    if (addr == start) {
248      return true;
249    } else {
250      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
251    }
252  }
253  return false;
254#endif
255}
256
257void
258UnCommitMemory (LogicalAddress start, natural len) {
259#if DEBUG_MEMORY
260  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
261#endif
262#ifdef WINDOWS
263  int rc = VirtualFree(start, len, MEM_DECOMMIT);
264  if (!rc) {
265    wperror("UnCommitMemory VirtualFree");
266    Fatal("mmap error", "");
267    return;
268  }
269#else
270  if (len) {
271    madvise(start, len, MADV_DONTNEED);
272    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
273        != start) {
274      int err = errno;
275      Fatal("mmap error", "");
276      fprintf(dbgout, "errno = %d", err);
277    }
278  }
279#endif
280}
281
282
283LogicalAddress
284MapMemory(LogicalAddress addr, natural nbytes, int protection)
285{
286#if DEBUG_MEMORY
287  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
288#endif
289#ifdef WINDOWS
290  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
291#else
292  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
293#endif
294}
295
296LogicalAddress
297MapMemoryForStack(natural nbytes)
298{
299#if DEBUG_MEMORY
300  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
301#endif
302#ifdef WINDOWS
303  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
304#else
305  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
306#endif
307}
308
309int
310UnMapMemory(LogicalAddress addr, natural nbytes)
311{
312#if DEBUG_MEMORY
313  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
314#endif
315#ifdef WINDOWS
316  /* Can't MEM_RELEASE here because we only want to free a chunk */
317  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
318#else
319  return munmap(addr, nbytes);
320#endif
321}
322
323int
324ProtectMemory(LogicalAddress addr, natural nbytes)
325{
326#if DEBUG_MEMORY
327  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
328#endif
329#ifdef WINDOWS
330  DWORD oldProtect;
331  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
332 
333  if(!status) {
334    wperror("ProtectMemory VirtualProtect");
335    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
336  }
337  return status;
338#else
339  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
340 
341  if (status) {
342    status = errno;
343    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
344  }
345  return status;
346#endif
347}
348
349int
350UnProtectMemory(LogicalAddress addr, natural nbytes)
351{
352#if DEBUG_MEMORY
353  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
354#endif
355#ifdef WINDOWS
356  DWORD oldProtect;
357  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
358#else
359  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
360#endif
361}
362
363int
364MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
365{
366#ifdef WINDOWS
367#if 0
368  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
369  HANDLE hFile, hFileMapping;
370  LPVOID rc;
371  DWORD desiredAccess;
372
373  if (permissions == MEMPROTECT_RWX) {
374    permissions |= PAGE_WRITECOPY;
375    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
376  } else {
377    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
378  }
379
380  hFile = _get_osfhandle(fd);
381  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
382                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
383 
384  if (!hFileMapping) {
385    wperror("CreateFileMapping");
386    return false;
387  }
388
389  rc = MapViewOfFileEx(hFileMapping,
390                       desiredAccess,
391                       (pos >> 32),
392                       (pos & 0xffffffff),
393                       nbytes,
394                       addr);
395#else
396  size_t count, total = 0;
397  size_t opos;
398
399  opos = LSEEK(fd, 0, SEEK_CUR);
400  CommitMemory(addr, nbytes);
401  LSEEK(fd, pos, SEEK_SET);
402
403  while (total < nbytes) {
404    count = read(fd, addr + total, nbytes - total);
405    total += count;
406    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
407    if (!(count > 0))
408      return false;
409  }
410
411  LSEEK(fd, opos, SEEK_SET);
412
413  return true;
414#endif
415#else
416  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
417#endif
418}
419
420void
421unprotect_area(protected_area_ptr p)
422{
423  BytePtr start = p->start;
424  natural nprot = p->nprot;
425 
426  if (nprot) {
427    UnProtectMemory(start, nprot);
428    p->nprot = 0;
429  }
430}
431
432protected_area_ptr
433new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
434{
435  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
436 
437  if (p == NULL) return NULL;
438  p->protsize = protsize;
439  p->nprot = 0;
440  p->start = start;
441  p->end = end;
442  p->why = reason;
443  p->next = AllProtectedAreas;
444
445  AllProtectedAreas = p;
446  if (now) {
447    protect_area(p);
448  }
449 
450  return p;
451}
452
453/*
454  Un-protect the first nbytes bytes in specified area.
455  Note that this may cause the area to be empty.
456*/
457void
458unprotect_area_prefix(protected_area_ptr area, size_t delta)
459{
460  unprotect_area(area);
461  area->start += delta;
462  if ((area->start + area->protsize) <= area->end) {
463    protect_area(area);
464  }
465}
466
467
468/*
469  Extend the protected area, causing the preceding nbytes bytes
470  to be included and protected.
471*/
472void
473protect_area_prefix(protected_area_ptr area, size_t delta)
474{
475  unprotect_area(area);
476  area->start -= delta;
477  protect_area(area);
478}
479
480protected_area_ptr
481AllProtectedAreas = NULL;
482
483
484/*
485  This does a linear search.  Areas aren't created all that often;
486  if there get to be very many of them, some sort of tree search
487  might be justified.
488*/
489
490protected_area_ptr
491find_protected_area(BytePtr addr)
492{
493  protected_area* p;
494 
495  for(p = AllProtectedAreas; p; p=p->next) {
496    if ((p->start <= addr) && (p->end > addr)) {
497      return p;
498    }
499  }
500  return NULL;
501}
502
503
504void
505zero_memory_range(BytePtr start, BytePtr end)
506{
507#ifdef WINDOWS
508  ZeroMemory(start,end-start);
509#else
510  bzero(start,(size_t)(end-start));
511#endif
512}
513
514
515 
516
517/*
518   Grow or shrink the dynamic area.  Or maybe not.
519   Whether or not the end of (mapped space in) the heap changes,
520   ensure that everything between the freeptr and the heap end
521   is mapped and read/write.  (It'll incidentally be zeroed.)
522*/
523Boolean
524resize_dynamic_heap(BytePtr newfree, 
525                    natural free_space_size)
526{
527  extern int page_size;
528  area *a = active_dynamic_area;
529  BytePtr newlimit, protptr, zptr;
530  int psize = page_size;
531  if (free_space_size) {
532    BytePtr lowptr = a->active;
533    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
534                                            log2_heap_segment_size);
535    if (newlimit > a->high) {
536      return grow_dynamic_area(newlimit-a->high);
537    } else if ((lowptr + free_space_size) < a->high) {
538      shrink_dynamic_area(a->high-newlimit);
539      return true;
540    }
541  }
542}
543
544void
545protect_area(protected_area_ptr p)
546{
547  BytePtr start = p->start;
548  natural n = p->protsize;
549
550  if (n && ! p->nprot) {
551    ProtectMemory(start, n);
552    p->nprot = n;
553  }
554}
555
556
557void
558zero_page(BytePtr start)
559{
560  extern int page_size;
561#ifdef PPC
562  extern void zero_cache_lines(BytePtr, size_t, size_t);
563  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
564#else
565  memset(start, 0, page_size);
566#endif
567}
568
569/* area management */
570
571
572area *
573new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
574{
575  area *a = (area *) (zalloc(sizeof(area)));
576  if (a) {
577    natural ndnodes = area_dnode(highaddr, lowaddr);
578    a->low = lowaddr;
579    a->high = highaddr;
580    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
581    a->code = code;
582    a->ndnodes = ndnodes;
583    /* Caller must allocate markbits when allocating heap ! */
584   
585  }
586  return a;
587}
588
589static area *
590add_area_before(area *new_area, area *before)
591{
592  area *before_before = before->pred;
593
594  new_area->pred = before_before;
595  new_area->succ = before;
596  before_before->succ = new_area;
597  before->pred = new_area;
598  return new_area;
599}
600
601/*
602  The active dynamic area comes first.
603  Static areas follow dynamic areas.
604  Stack areas follow static areas.
605  Readonly areas come last.
606*/
607
608/*
609  If we already own the area_lock (or during iniitalization), it's safe
610  to add an area.
611*/
612
613
614void
615add_area_holding_area_lock(area *new_area)
616{
617  area *that = all_areas;
618  int
619    thiscode = (int)(new_area->code),
620    thatcode;
621
622  /* Cdr down the linked list */
623  do {
624    that = that->succ;
625    thatcode = (int)(that->code);
626  } while (thiscode < thatcode);
627  add_area_before(new_area, that);
628}
629
630/*
631  In general, we need to own the area lock before adding an area.
632*/
633void
634add_area(area *new_area, TCR *tcr)
635{
636  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
637  add_area_holding_area_lock(new_area);
638  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
639} 
640
641/*
642  Search areas "forward" from the header's successor, until
643  an area containing ADDR is found or an area with code < MINCODE
644  is encountered.
645  This walks the area list visiting heaps (dynamic, then static)
646  first, then stacks.
647
648*/
649static area *
650find_area_forward(BytePtr addr, area_code mincode)
651{
652  area *p, *header = all_areas;
653
654  for (p = header->succ; p != header; p = p->succ) {
655    area_code pcode = p->code;
656    if (pcode < mincode) {
657      return NULL;
658    }
659    if (pcode >= AREA_READONLY) {
660      if ((addr >= p->low) &&
661          (addr < p->active)) {
662        return p;
663      }
664    } else {
665      if ((addr >= p->active) &&
666          (addr < p->high)) {
667        return p;
668      }
669    }
670  }
671  return NULL;
672}
673
674static area *
675find_area_backward(BytePtr addr, area_code maxcode)
676{
677  area *p, *header = all_areas;
678
679  for (p = header->pred; p != header; p = p->pred) {
680    area_code pcode = p->code;
681
682    if (pcode > maxcode) {
683      return NULL;
684    }
685    if (pcode >= AREA_READONLY) {
686      if ((addr >= p->low) &&
687          (addr < p->active)) {
688        return p;
689      }
690    } else {
691      if ((addr >= p->active) &&
692          (addr < p->high)) {
693        return p;
694      }
695    }
696  }
697  return NULL;
698}
699
700area *
701area_containing(BytePtr addr)
702{
703  return find_area_forward(addr, AREA_VOID);
704}
705
706area *
707heap_area_containing(BytePtr addr)
708{
709  return find_area_forward(addr, AREA_READONLY);
710}
711
712area *
713stack_area_containing(BytePtr addr)
714{
715  return find_area_backward(addr, AREA_TSTACK);
716}
717
718/*
719  Make everything "younger" than the start of the target area
720  belong to that area; all younger areas will become empty, and
721  the dynamic area will have to lose some of its markbits (they
722  get zeroed and become part of the tenured area's refbits.)
723
724  The active dynamic area must have been "normalized" (e.g., its
725  active pointer must match the free pointer) before this is called.
726
727  If the target area is 'tenured_area' (the oldest ephemeral generation),
728  zero its refbits and update YOUNGEST_EPHEMERAL.
729
730*/
731
732void
733tenure_to_area(area *target)
734{
735  area *a = active_dynamic_area, *child;
736  BytePtr
737    curfree = a->active,
738    target_low = target->low,
739    tenured_low = tenured_area->low;
740  natural
741    dynamic_dnodes = area_dnode(curfree, a->low),
742    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
743  bitvector
744    refbits = tenured_area->refbits,
745    markbits = a->markbits,
746    new_markbits;
747
748  target->high = target->active = curfree;
749  target->ndnodes = area_dnode(curfree, target_low);
750
751  for (child = target->younger; child != a; child = child->younger) {
752    child->high = child->low = child->active = curfree;
753    child->ndnodes = 0;
754  }
755
756  a->low = curfree;
757  a->ndnodes = area_dnode(a->high, curfree);
758
759  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
760 
761  if (target == tenured_area) {
762    zero_bits(refbits, new_tenured_dnodes);
763    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
764  } else {
765    /* Need more (zeroed) refbits & fewer markbits */
766    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
767  }
768   
769  a->markbits = new_markbits;
770  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
771}
772
773
774
775/*
776  Make everything younger than the oldest byte in 'from' belong to
777  the youngest generation.  If 'from' is 'tenured_area', this means
778  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
779  to 0 to indicate this.)
780 
781  Some tenured_area refbits become dynamic area markbits in the process;
782  it's not necessary to zero them, since the GC will do that.
783*/
784
785void
786untenure_from_area(area *from)
787{
788  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
789    area *a = active_dynamic_area, *child;
790    BytePtr curlow = from->low;
791    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
792   
793    for (child = from; child != a; child = child->younger) {
794      child->low = child->active = child->high = curlow;
795      child->ndnodes = 0;
796    }
797   
798    a->low = curlow;
799    a->ndnodes = area_dnode(a->high, curlow);
800   
801    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
802    if (from == tenured_area) {
803      /* Everything's in the dynamic area */
804      lisp_global(OLDEST_EPHEMERAL) = 0;
805      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
806
807    }
808  }
809}
810
811
812Boolean
813egc_control(Boolean activate, BytePtr curfree)
814{
815  area *a = active_dynamic_area;
816  Boolean egc_is_active = (a->older != NULL);
817
818  if (activate != egc_is_active) {
819    if (curfree != NULL) {
820      a->active = curfree;
821    }
822    if (activate) {
823      a->older = g1_area;
824      tenure_to_area(tenured_area);
825      egc_is_active = true;
826    } else {
827      untenure_from_area(tenured_area);
828      a->older = NULL;
829      egc_is_active = false;
830    }
831  }
832  return egc_is_active;
833}
834
835/*
836  Lisp ff-calls this; it needs to set the active area's active pointer
837  correctly.
838*/
839
840Boolean
841lisp_egc_control(Boolean activate)
842{
843  area *a = active_dynamic_area;
844  return egc_control(activate, (BytePtr) a->active);
845}
846
847
848
849 
850/* Splice the protected_area_ptr out of the list and dispose of it. */
851void
852delete_protected_area(protected_area_ptr p)
853{
854  BytePtr start = p->start;
855  int nbytes = p->nprot;
856  protected_area_ptr *prev = &AllProtectedAreas, q;
857
858  if (nbytes) {
859    UnProtectMemory((LogicalAddress)start, nbytes);
860  }
861 
862  while ((q = *prev) != NULL) {
863    if (p == q) {
864      *prev = p->next;
865      break;
866    } else {
867      prev = &(q->next);
868    }
869  }
870
871  deallocate((Ptr)p);
872}
873
874
875
876
877/*
878  Unlink the area from all_areas.
879  Unprotect and dispose of any hard/soft protected_areas.
880  If the area has a handle, dispose of that as well.
881  */
882
883void
884condemn_area_holding_area_lock(area *a)
885{
886  void free_stack(void *);
887  area *prev = a->pred, *next = a->succ;
888  Ptr h = a->h;
889  protected_area_ptr p;
890
891  prev->succ = next;
892  next->pred = prev;
893
894  p = a->softprot;
895  if (p) delete_protected_area(p);
896
897  p = a->hardprot;
898
899  if (p) delete_protected_area(p);
900
901  if (h) free_stack(h);
902  deallocate((Ptr)a);
903}
904
905
906
907void
908condemn_area(area *a, TCR *tcr)
909{
910  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
911  condemn_area_holding_area_lock(a);
912  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
913}
914
915
916
917
918/*
919  condemn an area and all the other areas that can be reached
920  via the area.older & area.younger links.
921  This is the function in the ppc::kernel-import-condemn-area slot,
922  called by free-stack-area
923  */
924void
925condemn_area_chain(area *a, TCR *tcr)
926{
927  area *older;
928
929  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
930
931  for (; a->younger; a = a->younger) ;
932  for (;a;) {
933    older = a->older;
934    condemn_area_holding_area_lock(a);
935    a = older;
936  }
937  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
938}
939
940void
941release_readonly_area()
942{
943  area *a = readonly_area;
944  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
945  a->active = a->low;
946  a->ndnodes = 0;
947  pure_space_active = pure_space_start;
948}
Note: See TracBrowser for help on using the repository browser.