source: branches/win64/lisp-kernel/memory.c @ 8696

Last change on this file since 8696 was 8696, checked in by andreas, 13 years ago

Separation of page management functions, implementation for Windows.
Still buggy.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.1 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifndef WINDOWS
35#include <sys/mman.h>
36#endif
37
38void
39allocation_failure(Boolean pointerp, natural size)
40{
41  char buf[64];
42  sprintf(buf, "Can't allocate %s of size %d bytes.", pointerp ? "pointer" : "handle", size);
43  Fatal(":   Kernel memory allocation failure.  ", buf);
44}
45
46void
47fatal_oserr(StringPtr param, OSErr err)
48{
49  char buf[64];
50  sprintf(buf," - operating system error %d.", err);
51  Fatal(param, buf);
52}
53
54
55Ptr
56allocate(natural size)
57{
58  return (Ptr) malloc(size);
59}
60
61void
62deallocate(Ptr p)
63{
64  free((void *)p);
65}
66
67Ptr
68zalloc(natural size)
69{
70  Ptr p = allocate(size);
71  if (p != NULL) {
72    memset(p, 0, size);
73  }
74  return p;
75}
76
77#ifdef DARWIN
78#if WORD_SIZE == 64
79#define vm_region vm_region_64
80#endif
81
82/*
83  Check to see if the specified address is unmapped by trying to get
84  information about the mapped address at or beyond the target.  If
85  the difference between the target address and the next mapped address
86  is >= len, we can safely mmap len bytes at addr.
87*/
88Boolean
89address_unmapped_p(char *addr, natural len)
90{
91  vm_address_t vm_addr = (vm_address_t)addr;
92  vm_size_t vm_size;
93#if WORD_SIZE == 64
94  vm_region_basic_info_data_64_t vm_info;
95#else
96  vm_region_basic_info_data_t vm_info;
97#endif
98#if WORD_SIZE == 64
99  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
100#else
101  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
102#endif
103  mach_port_t vm_object_name = (mach_port_t) 0;
104  kern_return_t kret;
105
106  kret = vm_region(mach_task_self(),
107                   &vm_addr,
108                   &vm_size,
109#if WORD_SIZE == 64
110                   VM_REGION_BASIC_INFO_64,
111#else
112                   VM_REGION_BASIC_INFO,
113#endif
114                   (vm_region_info_t)&vm_info,
115                   &vm_info_size,
116                   &vm_object_name);
117  if (kret != KERN_SUCCESS) {
118    return false;
119  }
120
121  return vm_addr >= (vm_address_t)(addr+len);
122}
123#endif
124
125
126  /*
127    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
128    likely to reside near the beginning of an unmapped block of memory
129    that's at least 1GB in size.  We'd like to load the heap image's
130    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
131    that'd allow us to file-map those sections (and would enable us to
132    avoid having to relocate references in the data sections.)
133
134    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
135    by creating an anonymous mapping with mmap().
136
137    If we try to insist that mmap() map a 1GB block at
138    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
139    mmap() will gleefully clobber any mapped memory that's already
140    there.  (That region's empty at this writing, but some future
141    version of the OS might decide to put something there.)
142
143    If we don't specify MAP_FIXED, mmap() is free to treat the address
144    we give it as a hint; Linux seems to accept the hint if doing so
145    wouldn't cause a problem.  Naturally, that behavior's too useful
146    for Darwin (or perhaps too inconvenient for it): it'll often
147    return another address, even if the hint would have worked fine.
148
149    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
150    would conflict with anything.  Until we discover a need to do
151    otherwise, we'll assume that if Linux's mmap() fails to take the
152    hint, it's because of a legitimate conflict.
153
154    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
155    to implement an address_unmapped_p() for Linux.
156  */
157
158LogicalAddress
159ReserveMemoryForHeap(LogicalAddress want, int totalsize)
160{
161  LogicalAddress start;
162  Boolean fixed_map_ok = false;
163#ifdef DARWIN
164  fixed_map_ok = address_unmapped_p(want,totalsize);
165#endif
166#ifdef SOLARIS
167  fixed_map_ok = true;
168#endif
169  raise_limit();
170#ifdef WINDOWS
171  start = VirtualAlloc((void *)want,
172                       totalsize + heap_segment_size,
173                       MEM_RESERVE,
174                       PAGE_NOACCESS);
175  if (!start) {
176    start = VirtualAlloc(0,
177                         totalsize + heap_segment_size,
178                         MEM_RESERVE,
179                         PAGE_NOACCESS);
180    if (!start) {
181      wperror("VirtualAlloc");
182      return NULL;
183    }
184  }
185#else
186  start = mmap((void *)want,
187               totalsize + heap_segment_size,
188               PROT_NONE,
189               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
190               -1,
191               0);
192  if (start == MAP_FAILED) {
193    perror("Initial mmap");
194    return NULL;
195  }
196
197  if (start != want) {
198    munmap(start, totalsize+heap_segment_size);
199    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
200    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
201      return NULL;
202    }
203  }
204  mprotect(start, totalsize, PROT_NONE);
205#endif
206  return start;
207}
208
209int
210CommitMemory (LogicalAddress start, int len) {
211  LogicalAddress rc;
212#ifdef WINDOWS
213  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
214  if (!rc) {
215    wperror("CommitMemory VirtualAlloc");
216    return false;
217  }
218  return true;
219#else
220  int i, err;
221  void *addr;
222
223  for (i = 0; i < 3; i++) {
224    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
225    if (addr == start) {
226      return true;
227    } else {
228      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
229    }
230  }
231  return false;
232#endif
233}
234
235void
236UnCommitMemory (LogicalAddress start, int len) {
237#ifdef WINDOWS
238  int rc = VirtualFree(start, len, MEM_DECOMMIT);
239  if (!rc) {
240    wperror("UnCommitMemory VirtualFree");
241    Fatal("mmap error", "");
242    return;
243  }
244#else
245  if (len) {
246    madvise(start, len, MADV_DONTNEED);
247    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
248        != start) {
249      int err = errno;
250      Fatal("mmap error", "");
251      fprintf(stderr, "errno = %d", err);
252    }
253  }
254#endif
255}
256
257
258LogicalAddress
259MapMemory(LogicalAddress addr, int nbytes, int protection)
260{
261#ifdef WINDOWS
262  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
263#else
264  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
265#endif
266}
267
268LogicalAddress
269MapMemoryForStack(int nbytes)
270{
271#ifdef WINDOWS
272  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
273#else
274  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
275#endif
276}
277
278int
279UnMapMemory(LogicalAddress addr, int nbytes)
280{
281#ifdef WINDOWS
282  /* Can't MEM_RELEASE here because we only want to free a chunk */
283  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
284#else
285  return munmap(addr, nbytes);
286#endif
287}
288
289int
290ProtectMemory(LogicalAddress addr, int nbytes)
291{
292#ifdef WINDOWS
293  DWORD oldProtect;
294  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
295 
296  if(!status) {
297    wperror("ProtectMemory VirtualProtect");
298    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
299  }
300  return status;
301#else
302  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
303 
304  if (status) {
305    status = errno;
306    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
307  }
308  return status;
309#endif
310}
311
312int
313UnProtectMemory(LogicalAddress addr, int nbytes)
314{
315#ifdef WINDOWS
316  DWORD oldProtect;
317  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
318#else
319  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
320#endif
321}
322
323void
324unprotect_area(protected_area_ptr p)
325{
326  BytePtr start = p->start;
327  natural nprot = p->nprot;
328 
329  if (nprot) {
330    UnProtectMemory(start, nprot);
331    p->nprot = 0;
332  }
333}
334
335protected_area_ptr
336new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
337{
338  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
339 
340  if (p == NULL) return NULL;
341  p->protsize = protsize;
342  p->nprot = 0;
343  p->start = start;
344  p->end = end;
345  p->why = reason;
346  p->next = AllProtectedAreas;
347
348  AllProtectedAreas = p;
349  if (now) {
350    protect_area(p);
351  }
352 
353  return p;
354}
355
356/*
357  Un-protect the first nbytes bytes in specified area.
358  Note that this may cause the area to be empty.
359*/
360void
361unprotect_area_prefix(protected_area_ptr area, size_t delta)
362{
363  unprotect_area(area);
364  area->start += delta;
365  if ((area->start + area->protsize) <= area->end) {
366    protect_area(area);
367  }
368}
369
370
371/*
372  Extend the protected area, causing the preceding nbytes bytes
373  to be included and protected.
374*/
375void
376protect_area_prefix(protected_area_ptr area, size_t delta)
377{
378  unprotect_area(area);
379  area->start -= delta;
380  protect_area(area);
381}
382
383protected_area_ptr
384AllProtectedAreas = NULL;
385
386
387/*
388  This does a linear search.  Areas aren't created all that often;
389  if there get to be very many of them, some sort of tree search
390  might be justified.
391*/
392
393protected_area_ptr
394find_protected_area(BytePtr addr)
395{
396  protected_area* p;
397 
398  for(p = AllProtectedAreas; p; p=p->next) {
399    if ((p->start <= addr) && (p->end > addr)) {
400      return p;
401    }
402  }
403  return NULL;
404}
405
406
407void
408zero_memory_range(BytePtr start, BytePtr end)
409{
410#ifdef WINDOWS
411  ZeroMemory(start,end-start);
412#else
413  bzero(start,(size_t)(end-start));
414#endif
415}
416
417
418 
419
420/*
421   Grow or shrink the dynamic area.  Or maybe not.
422   Whether or not the end of (mapped space in) the heap changes,
423   ensure that everything between the freeptr and the heap end
424   is mapped and read/write.  (It'll incidentally be zeroed.)
425*/
426Boolean
427resize_dynamic_heap(BytePtr newfree, 
428                    natural free_space_size)
429{
430  extern int page_size;
431  area *a = active_dynamic_area;
432  BytePtr newlimit, protptr, zptr;
433  int psize = page_size;
434  if (free_space_size) {
435    BytePtr lowptr = a->active;
436    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
437                                            log2_heap_segment_size);
438    if (newlimit > a->high) {
439      return grow_dynamic_area(newlimit-a->high);
440    } else if ((lowptr + free_space_size) < a->high) {
441      shrink_dynamic_area(a->high-newlimit);
442      return true;
443    }
444  }
445}
446
447void
448protect_area(protected_area_ptr p)
449{
450  BytePtr start = p->start;
451  natural n = p->protsize;
452
453  if (n && ! p->nprot) {
454    ProtectMemory(start, n);
455    p->nprot = n;
456  }
457}
458
459
460void
461zero_page(BytePtr start)
462{
463  extern int page_size;
464#ifdef PPC
465  extern void zero_cache_lines(BytePtr, size_t, size_t);
466  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
467#else
468  memset(start, 0, page_size);
469#endif
470}
471
472/* area management */
473
474
475area *
476new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
477{
478  area *a = (area *) (zalloc(sizeof(area)));
479  if (a) {
480    natural ndnodes = area_dnode(highaddr, lowaddr);
481    a->low = lowaddr;
482    a->high = highaddr;
483    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
484    a->code = code;
485    a->ndnodes = ndnodes;
486    /* Caller must allocate markbits when allocating heap ! */
487   
488  }
489  return a;
490}
491
492static area *
493add_area_before(area *new_area, area *before)
494{
495  area *before_before = before->pred;
496
497  new_area->pred = before_before;
498  new_area->succ = before;
499  before_before->succ = new_area;
500  before->pred = new_area;
501  return new_area;
502}
503
504/*
505  The active dynamic area comes first.
506  Static areas follow dynamic areas.
507  Stack areas follow static areas.
508  Readonly areas come last.
509*/
510
511/*
512  If we already own the area_lock (or during iniitalization), it's safe
513  to add an area.
514*/
515
516
517void
518add_area_holding_area_lock(area *new_area)
519{
520  area *that = all_areas;
521  int
522    thiscode = (int)(new_area->code),
523    thatcode;
524
525  /* Cdr down the linked list */
526  do {
527    that = that->succ;
528    thatcode = (int)(that->code);
529  } while (thiscode < thatcode);
530  add_area_before(new_area, that);
531}
532
533/*
534  In general, we need to own the area lock before adding an area.
535*/
536void
537add_area(area *new_area, TCR *tcr)
538{
539  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
540  add_area_holding_area_lock(new_area);
541  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
542} 
543
544/*
545  Search areas "forward" from the header's successor, until
546  an area containing ADDR is found or an area with code < MINCODE
547  is encountered.
548  This walks the area list visiting heaps (dynamic, then static)
549  first, then stacks.
550
551*/
552static area *
553find_area_forward(BytePtr addr, area_code mincode)
554{
555  area *p, *header = all_areas;
556
557  for (p = header->succ; p != header; p = p->succ) {
558    area_code pcode = p->code;
559    if (pcode < mincode) {
560      return NULL;
561    }
562    if (pcode >= AREA_READONLY) {
563      if ((addr >= p->low) &&
564          (addr < p->active)) {
565        return p;
566      }
567    } else {
568      if ((addr >= p->active) &&
569          (addr < p->high)) {
570        return p;
571      }
572    }
573  }
574  return NULL;
575}
576
577static area *
578find_area_backward(BytePtr addr, area_code maxcode)
579{
580  area *p, *header = all_areas;
581
582  for (p = header->pred; p != header; p = p->pred) {
583    area_code pcode = p->code;
584
585    if (pcode > maxcode) {
586      return NULL;
587    }
588    if (pcode >= AREA_READONLY) {
589      if ((addr >= p->low) &&
590          (addr < p->active)) {
591        return p;
592      }
593    } else {
594      if ((addr >= p->active) &&
595          (addr < p->high)) {
596        return p;
597      }
598    }
599  }
600  return NULL;
601}
602
603area *
604area_containing(BytePtr addr)
605{
606  return find_area_forward(addr, AREA_VOID);
607}
608
609area *
610heap_area_containing(BytePtr addr)
611{
612  return find_area_forward(addr, AREA_READONLY);
613}
614
615area *
616stack_area_containing(BytePtr addr)
617{
618  return find_area_backward(addr, AREA_TSTACK);
619}
620
621/*
622  Make everything "younger" than the start of the target area
623  belong to that area; all younger areas will become empty, and
624  the dynamic area will have to lose some of its markbits (they
625  get zeroed and become part of the tenured area's refbits.)
626
627  The active dynamic area must have been "normalized" (e.g., its
628  active pointer must match the free pointer) before this is called.
629
630  If the target area is 'tenured_area' (the oldest ephemeral generation),
631  zero its refbits and update YOUNGEST_EPHEMERAL.
632
633*/
634
635void
636tenure_to_area(area *target)
637{
638  area *a = active_dynamic_area, *child;
639  BytePtr
640    curfree = a->active,
641    target_low = target->low,
642    tenured_low = tenured_area->low;
643  natural
644    dynamic_dnodes = area_dnode(curfree, a->low),
645    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
646  bitvector
647    refbits = tenured_area->refbits,
648    markbits = a->markbits,
649    new_markbits;
650
651  target->high = target->active = curfree;
652  target->ndnodes = area_dnode(curfree, target_low);
653
654  for (child = target->younger; child != a; child = child->younger) {
655    child->high = child->low = child->active = curfree;
656    child->ndnodes = 0;
657  }
658
659  a->low = curfree;
660  a->ndnodes = area_dnode(a->high, curfree);
661
662  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
663 
664  if (target == tenured_area) {
665    zero_bits(refbits, new_tenured_dnodes);
666    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
667  } else {
668    /* Need more (zeroed) refbits & fewer markbits */
669    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
670  }
671   
672  a->markbits = new_markbits;
673  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
674}
675
676
677
678/*
679  Make everything younger than the oldest byte in 'from' belong to
680  the youngest generation.  If 'from' is 'tenured_area', this means
681  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
682  to 0 to indicate this.)
683 
684  Some tenured_area refbits become dynamic area markbits in the process;
685  it's not necessary to zero them, since the GC will do that.
686*/
687
688void
689untenure_from_area(area *from)
690{
691  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
692    area *a = active_dynamic_area, *child;
693    BytePtr curlow = from->low;
694    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
695   
696    for (child = from; child != a; child = child->younger) {
697      child->low = child->active = child->high = curlow;
698      child->ndnodes = 0;
699    }
700   
701    a->low = curlow;
702    a->ndnodes = area_dnode(a->high, curlow);
703   
704    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
705    if (from == tenured_area) {
706      /* Everything's in the dynamic area */
707      lisp_global(OLDEST_EPHEMERAL) = 0;
708      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
709
710    }
711  }
712}
713
714
715Boolean
716egc_control(Boolean activate, BytePtr curfree)
717{
718  area *a = active_dynamic_area;
719  Boolean egc_is_active = (a->older != NULL);
720
721  if (activate != egc_is_active) {
722    if (curfree != NULL) {
723      a->active = curfree;
724    }
725    if (activate) {
726      LispObj *heap_start = ptr_from_lispobj(lisp_global(HEAP_START));
727
728      a->older = g1_area;
729      tenure_to_area(tenured_area);
730      egc_is_active = true;
731    } else {
732      untenure_from_area(tenured_area);
733      a->older = NULL;
734      egc_is_active = false;
735    }
736  }
737  return egc_is_active;
738}
739
740/*
741  Lisp ff-calls this; it needs to set the active area's active pointer
742  correctly.
743*/
744
745Boolean
746lisp_egc_control(Boolean activate)
747{
748  area *a = active_dynamic_area;
749  return egc_control(activate, (BytePtr) a->active);
750}
751
752
753
754 
755/* Splice the protected_area_ptr out of the list and dispose of it. */
756void
757delete_protected_area(protected_area_ptr p)
758{
759  BytePtr start = p->start;
760  int nbytes = p->nprot;
761  protected_area_ptr *prev = &AllProtectedAreas, q;
762
763  if (nbytes) {
764    UnProtectMemory((LogicalAddress)start, nbytes);
765  }
766 
767  while ((q = *prev) != NULL) {
768    if (p == q) {
769      *prev = p->next;
770      break;
771    } else {
772      prev = &(q->next);
773    }
774  }
775
776  deallocate((Ptr)p);
777}
778
779
780
781
782/*
783  Unlink the area from all_areas.
784  Unprotect and dispose of any hard/soft protected_areas.
785  If the area has a handle, dispose of that as well.
786  */
787
788void
789condemn_area_holding_area_lock(area *a)
790{
791  void free_stack(void *);
792  area *prev = a->pred, *next = a->succ;
793  Ptr h = a->h;
794  protected_area_ptr p;
795
796  prev->succ = next;
797  next->pred = prev;
798
799  p = a->softprot;
800  if (p) delete_protected_area(p);
801
802  p = a->hardprot;
803
804  if (p) delete_protected_area(p);
805
806  if (h) free_stack(h);
807  deallocate((Ptr)a);
808}
809
810
811
812void
813condemn_area(area *a, TCR *tcr)
814{
815  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
816  condemn_area_holding_area_lock(a);
817  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
818}
819
820
821
822
823/*
824  condemn an area and all the other areas that can be reached
825  via the area.older & area.younger links.
826  This is the function in the ppc::kernel-import-condemn-area slot,
827  called by free-stack-area
828  */
829void
830condemn_area_chain(area *a, TCR *tcr)
831{
832  area *older;
833
834  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
835
836  for (; a->younger; a = a->younger) ;
837  for (;a;) {
838    older = a->older;
839    condemn_area_holding_area_lock(a);
840    a = older;
841  }
842  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
843}
844
845void
846release_readonly_area()
847{
848  area *a = readonly_area;
849  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
850  a->active = a->low;
851  a->ndnodes = 0;
852  pure_space_active = pure_space_start;
853}
Note: See TracBrowser for help on using the repository browser.