source: branches/win64/lisp-kernel/memory.c @ 8707

Last change on this file since 8707 was 8707, checked in by andreas, 13 years ago
  • Change type of all memory size parameters from int to natural
  • API for memory mapping files, with implementation for UNIX and a cheat for Windows
  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.2 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifndef WINDOWS
35#include <sys/mman.h>
36#endif
37
38void
39allocation_failure(Boolean pointerp, natural size)
40{
41  char buf[64];
42  sprintf(buf, "Can't allocate %s of size %d bytes.", pointerp ? "pointer" : "handle", size);
43  Fatal(":   Kernel memory allocation failure.  ", buf);
44}
45
46void
47fatal_oserr(StringPtr param, OSErr err)
48{
49  char buf[64];
50  sprintf(buf," - operating system error %d.", err);
51  Fatal(param, buf);
52}
53
54
55Ptr
56allocate(natural size)
57{
58  return (Ptr) malloc(size);
59}
60
61void
62deallocate(Ptr p)
63{
64  free((void *)p);
65}
66
67Ptr
68zalloc(natural size)
69{
70  Ptr p = allocate(size);
71  if (p != NULL) {
72    memset(p, 0, size);
73  }
74  return p;
75}
76
77#ifdef DARWIN
78#if WORD_SIZE == 64
79#define vm_region vm_region_64
80#endif
81
82/*
83  Check to see if the specified address is unmapped by trying to get
84  information about the mapped address at or beyond the target.  If
85  the difference between the target address and the next mapped address
86  is >= len, we can safely mmap len bytes at addr.
87*/
88Boolean
89address_unmapped_p(char *addr, natural len)
90{
91  vm_address_t vm_addr = (vm_address_t)addr;
92  vm_size_t vm_size;
93#if WORD_SIZE == 64
94  vm_region_basic_info_data_64_t vm_info;
95#else
96  vm_region_basic_info_data_t vm_info;
97#endif
98#if WORD_SIZE == 64
99  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
100#else
101  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
102#endif
103  mach_port_t vm_object_name = (mach_port_t) 0;
104  kern_return_t kret;
105
106  kret = vm_region(mach_task_self(),
107                   &vm_addr,
108                   &vm_size,
109#if WORD_SIZE == 64
110                   VM_REGION_BASIC_INFO_64,
111#else
112                   VM_REGION_BASIC_INFO,
113#endif
114                   (vm_region_info_t)&vm_info,
115                   &vm_info_size,
116                   &vm_object_name);
117  if (kret != KERN_SUCCESS) {
118    return false;
119  }
120
121  return vm_addr >= (vm_address_t)(addr+len);
122}
123#endif
124
125
126  /*
127    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
128    likely to reside near the beginning of an unmapped block of memory
129    that's at least 1GB in size.  We'd like to load the heap image's
130    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
131    that'd allow us to file-map those sections (and would enable us to
132    avoid having to relocate references in the data sections.)
133
134    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
135    by creating an anonymous mapping with mmap().
136
137    If we try to insist that mmap() map a 1GB block at
138    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
139    mmap() will gleefully clobber any mapped memory that's already
140    there.  (That region's empty at this writing, but some future
141    version of the OS might decide to put something there.)
142
143    If we don't specify MAP_FIXED, mmap() is free to treat the address
144    we give it as a hint; Linux seems to accept the hint if doing so
145    wouldn't cause a problem.  Naturally, that behavior's too useful
146    for Darwin (or perhaps too inconvenient for it): it'll often
147    return another address, even if the hint would have worked fine.
148
149    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
150    would conflict with anything.  Until we discover a need to do
151    otherwise, we'll assume that if Linux's mmap() fails to take the
152    hint, it's because of a legitimate conflict.
153
154    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
155    to implement an address_unmapped_p() for Linux.
156  */
157
158LogicalAddress
159ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
160{
161  LogicalAddress start;
162  Boolean fixed_map_ok = false;
163#ifdef DARWIN
164  fixed_map_ok = address_unmapped_p(want,totalsize);
165#endif
166#ifdef SOLARIS
167  fixed_map_ok = true;
168#endif
169  raise_limit();
170#ifdef WINDOWS
171  start = VirtualAlloc((void *)want,
172                       totalsize + heap_segment_size,
173                       MEM_RESERVE,
174                       PAGE_NOACCESS);
175  if (!start) {
176    start = VirtualAlloc(0,
177                         totalsize + heap_segment_size,
178                         MEM_RESERVE,
179                         PAGE_NOACCESS);
180    if (!start) {
181      wperror("VirtualAlloc");
182      return NULL;
183    }
184  }
185#else
186  start = mmap((void *)want,
187               totalsize + heap_segment_size,
188               PROT_NONE,
189               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
190               -1,
191               0);
192  if (start == MAP_FAILED) {
193    perror("Initial mmap");
194    return NULL;
195  }
196
197  if (start != want) {
198    munmap(start, totalsize+heap_segment_size);
199    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
200    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
201      return NULL;
202    }
203  }
204  mprotect(start, totalsize, PROT_NONE);
205#endif
206  return start;
207}
208
209int
210CommitMemory (LogicalAddress start, natural len) {
211  LogicalAddress rc;
212#ifdef WINDOWS
213  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
214  if (!rc) {
215    wperror("CommitMemory VirtualAlloc");
216    return false;
217  }
218  return true;
219#else
220  int i, err;
221  void *addr;
222
223  for (i = 0; i < 3; i++) {
224    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
225    if (addr == start) {
226      return true;
227    } else {
228      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
229    }
230  }
231  return false;
232#endif
233}
234
235void
236UnCommitMemory (LogicalAddress start, natural len) {
237#ifdef WINDOWS
238  int rc = VirtualFree(start, len, MEM_DECOMMIT);
239  if (!rc) {
240    wperror("UnCommitMemory VirtualFree");
241    Fatal("mmap error", "");
242    return;
243  }
244#else
245  if (len) {
246    madvise(start, len, MADV_DONTNEED);
247    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
248        != start) {
249      int err = errno;
250      Fatal("mmap error", "");
251      fprintf(stderr, "errno = %d", err);
252    }
253  }
254#endif
255}
256
257
258LogicalAddress
259MapMemory(LogicalAddress addr, natural nbytes, int protection)
260{
261#ifdef WINDOWS
262  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
263#else
264  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
265#endif
266}
267
268LogicalAddress
269MapMemoryForStack(natural nbytes)
270{
271#ifdef WINDOWS
272  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
273#else
274  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
275#endif
276}
277
278int
279UnMapMemory(LogicalAddress addr, natural nbytes)
280{
281#ifdef WINDOWS
282  /* Can't MEM_RELEASE here because we only want to free a chunk */
283  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
284#else
285  return munmap(addr, nbytes);
286#endif
287}
288
289int
290ProtectMemory(LogicalAddress addr, natural nbytes)
291{
292#ifdef WINDOWS
293  DWORD oldProtect;
294  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
295 
296  if(!status) {
297    wperror("ProtectMemory VirtualProtect");
298    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
299  }
300  return status;
301#else
302  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
303 
304  if (status) {
305    status = errno;
306    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
307  }
308  return status;
309#endif
310}
311
312int
313UnProtectMemory(LogicalAddress addr, natural nbytes)
314{
315#ifdef WINDOWS
316  DWORD oldProtect;
317  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
318#else
319  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
320#endif
321}
322
323int
324MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) {
325#ifdef WINDOWS
326#if 0
327  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
328  HANDLE hFile, hFileMapping;
329  LPVOID rc;
330  DWORD desiredAccess;
331
332  if (permissions == MEMPROTECT_RWX) {
333    permissions |= PAGE_WRITECOPY;
334    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
335  } else {
336    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
337  }
338
339  hFile = _get_osfhandle(fd);
340  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
341                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
342 
343  if (!hFileMapping) {
344    wperror("CreateFileMapping");
345    return false;
346  }
347
348  rc = MapViewOfFileEx(hFileMapping,
349                       desiredAccess,
350                       (pos >> 32),
351                       (pos & 0xffffffff),
352                       nbytes,
353                       addr);
354#else
355  size_t count;
356
357  CommitMemory(addr, nbytes);
358  lseek(fd, pos, SEEK_SET);
359  count = read(fd, pos, nbytes);
360 
361  return count == nbytes;
362#endif
363#else
364  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
365#endif
366}
367
368void
369unprotect_area(protected_area_ptr p)
370{
371  BytePtr start = p->start;
372  natural nprot = p->nprot;
373 
374  if (nprot) {
375    UnProtectMemory(start, nprot);
376    p->nprot = 0;
377  }
378}
379
380protected_area_ptr
381new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
382{
383  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
384 
385  if (p == NULL) return NULL;
386  p->protsize = protsize;
387  p->nprot = 0;
388  p->start = start;
389  p->end = end;
390  p->why = reason;
391  p->next = AllProtectedAreas;
392
393  AllProtectedAreas = p;
394  if (now) {
395    protect_area(p);
396  }
397 
398  return p;
399}
400
401/*
402  Un-protect the first nbytes bytes in specified area.
403  Note that this may cause the area to be empty.
404*/
405void
406unprotect_area_prefix(protected_area_ptr area, size_t delta)
407{
408  unprotect_area(area);
409  area->start += delta;
410  if ((area->start + area->protsize) <= area->end) {
411    protect_area(area);
412  }
413}
414
415
416/*
417  Extend the protected area, causing the preceding nbytes bytes
418  to be included and protected.
419*/
420void
421protect_area_prefix(protected_area_ptr area, size_t delta)
422{
423  unprotect_area(area);
424  area->start -= delta;
425  protect_area(area);
426}
427
428protected_area_ptr
429AllProtectedAreas = NULL;
430
431
432/*
433  This does a linear search.  Areas aren't created all that often;
434  if there get to be very many of them, some sort of tree search
435  might be justified.
436*/
437
438protected_area_ptr
439find_protected_area(BytePtr addr)
440{
441  protected_area* p;
442 
443  for(p = AllProtectedAreas; p; p=p->next) {
444    if ((p->start <= addr) && (p->end > addr)) {
445      return p;
446    }
447  }
448  return NULL;
449}
450
451
452void
453zero_memory_range(BytePtr start, BytePtr end)
454{
455#ifdef WINDOWS
456  ZeroMemory(start,end-start);
457#else
458  bzero(start,(size_t)(end-start));
459#endif
460}
461
462
463 
464
465/*
466   Grow or shrink the dynamic area.  Or maybe not.
467   Whether or not the end of (mapped space in) the heap changes,
468   ensure that everything between the freeptr and the heap end
469   is mapped and read/write.  (It'll incidentally be zeroed.)
470*/
471Boolean
472resize_dynamic_heap(BytePtr newfree, 
473                    natural free_space_size)
474{
475  extern int page_size;
476  area *a = active_dynamic_area;
477  BytePtr newlimit, protptr, zptr;
478  int psize = page_size;
479  if (free_space_size) {
480    BytePtr lowptr = a->active;
481    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
482                                            log2_heap_segment_size);
483    if (newlimit > a->high) {
484      return grow_dynamic_area(newlimit-a->high);
485    } else if ((lowptr + free_space_size) < a->high) {
486      shrink_dynamic_area(a->high-newlimit);
487      return true;
488    }
489  }
490}
491
492void
493protect_area(protected_area_ptr p)
494{
495  BytePtr start = p->start;
496  natural n = p->protsize;
497
498  if (n && ! p->nprot) {
499    ProtectMemory(start, n);
500    p->nprot = n;
501  }
502}
503
504
505void
506zero_page(BytePtr start)
507{
508  extern int page_size;
509#ifdef PPC
510  extern void zero_cache_lines(BytePtr, size_t, size_t);
511  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
512#else
513  memset(start, 0, page_size);
514#endif
515}
516
517/* area management */
518
519
520area *
521new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
522{
523  area *a = (area *) (zalloc(sizeof(area)));
524  if (a) {
525    natural ndnodes = area_dnode(highaddr, lowaddr);
526    a->low = lowaddr;
527    a->high = highaddr;
528    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
529    a->code = code;
530    a->ndnodes = ndnodes;
531    /* Caller must allocate markbits when allocating heap ! */
532   
533  }
534  return a;
535}
536
537static area *
538add_area_before(area *new_area, area *before)
539{
540  area *before_before = before->pred;
541
542  new_area->pred = before_before;
543  new_area->succ = before;
544  before_before->succ = new_area;
545  before->pred = new_area;
546  return new_area;
547}
548
549/*
550  The active dynamic area comes first.
551  Static areas follow dynamic areas.
552  Stack areas follow static areas.
553  Readonly areas come last.
554*/
555
556/*
557  If we already own the area_lock (or during iniitalization), it's safe
558  to add an area.
559*/
560
561
562void
563add_area_holding_area_lock(area *new_area)
564{
565  area *that = all_areas;
566  int
567    thiscode = (int)(new_area->code),
568    thatcode;
569
570  /* Cdr down the linked list */
571  do {
572    that = that->succ;
573    thatcode = (int)(that->code);
574  } while (thiscode < thatcode);
575  add_area_before(new_area, that);
576}
577
578/*
579  In general, we need to own the area lock before adding an area.
580*/
581void
582add_area(area *new_area, TCR *tcr)
583{
584  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
585  add_area_holding_area_lock(new_area);
586  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
587} 
588
589/*
590  Search areas "forward" from the header's successor, until
591  an area containing ADDR is found or an area with code < MINCODE
592  is encountered.
593  This walks the area list visiting heaps (dynamic, then static)
594  first, then stacks.
595
596*/
597static area *
598find_area_forward(BytePtr addr, area_code mincode)
599{
600  area *p, *header = all_areas;
601
602  for (p = header->succ; p != header; p = p->succ) {
603    area_code pcode = p->code;
604    if (pcode < mincode) {
605      return NULL;
606    }
607    if (pcode >= AREA_READONLY) {
608      if ((addr >= p->low) &&
609          (addr < p->active)) {
610        return p;
611      }
612    } else {
613      if ((addr >= p->active) &&
614          (addr < p->high)) {
615        return p;
616      }
617    }
618  }
619  return NULL;
620}
621
622static area *
623find_area_backward(BytePtr addr, area_code maxcode)
624{
625  area *p, *header = all_areas;
626
627  for (p = header->pred; p != header; p = p->pred) {
628    area_code pcode = p->code;
629
630    if (pcode > maxcode) {
631      return NULL;
632    }
633    if (pcode >= AREA_READONLY) {
634      if ((addr >= p->low) &&
635          (addr < p->active)) {
636        return p;
637      }
638    } else {
639      if ((addr >= p->active) &&
640          (addr < p->high)) {
641        return p;
642      }
643    }
644  }
645  return NULL;
646}
647
648area *
649area_containing(BytePtr addr)
650{
651  return find_area_forward(addr, AREA_VOID);
652}
653
654area *
655heap_area_containing(BytePtr addr)
656{
657  return find_area_forward(addr, AREA_READONLY);
658}
659
660area *
661stack_area_containing(BytePtr addr)
662{
663  return find_area_backward(addr, AREA_TSTACK);
664}
665
666/*
667  Make everything "younger" than the start of the target area
668  belong to that area; all younger areas will become empty, and
669  the dynamic area will have to lose some of its markbits (they
670  get zeroed and become part of the tenured area's refbits.)
671
672  The active dynamic area must have been "normalized" (e.g., its
673  active pointer must match the free pointer) before this is called.
674
675  If the target area is 'tenured_area' (the oldest ephemeral generation),
676  zero its refbits and update YOUNGEST_EPHEMERAL.
677
678*/
679
680void
681tenure_to_area(area *target)
682{
683  area *a = active_dynamic_area, *child;
684  BytePtr
685    curfree = a->active,
686    target_low = target->low,
687    tenured_low = tenured_area->low;
688  natural
689    dynamic_dnodes = area_dnode(curfree, a->low),
690    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
691  bitvector
692    refbits = tenured_area->refbits,
693    markbits = a->markbits,
694    new_markbits;
695
696  target->high = target->active = curfree;
697  target->ndnodes = area_dnode(curfree, target_low);
698
699  for (child = target->younger; child != a; child = child->younger) {
700    child->high = child->low = child->active = curfree;
701    child->ndnodes = 0;
702  }
703
704  a->low = curfree;
705  a->ndnodes = area_dnode(a->high, curfree);
706
707  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
708 
709  if (target == tenured_area) {
710    zero_bits(refbits, new_tenured_dnodes);
711    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
712  } else {
713    /* Need more (zeroed) refbits & fewer markbits */
714    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
715  }
716   
717  a->markbits = new_markbits;
718  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
719}
720
721
722
723/*
724  Make everything younger than the oldest byte in 'from' belong to
725  the youngest generation.  If 'from' is 'tenured_area', this means
726  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
727  to 0 to indicate this.)
728 
729  Some tenured_area refbits become dynamic area markbits in the process;
730  it's not necessary to zero them, since the GC will do that.
731*/
732
733void
734untenure_from_area(area *from)
735{
736  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
737    area *a = active_dynamic_area, *child;
738    BytePtr curlow = from->low;
739    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
740   
741    for (child = from; child != a; child = child->younger) {
742      child->low = child->active = child->high = curlow;
743      child->ndnodes = 0;
744    }
745   
746    a->low = curlow;
747    a->ndnodes = area_dnode(a->high, curlow);
748   
749    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
750    if (from == tenured_area) {
751      /* Everything's in the dynamic area */
752      lisp_global(OLDEST_EPHEMERAL) = 0;
753      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
754
755    }
756  }
757}
758
759
760Boolean
761egc_control(Boolean activate, BytePtr curfree)
762{
763  area *a = active_dynamic_area;
764  Boolean egc_is_active = (a->older != NULL);
765
766  if (activate != egc_is_active) {
767    if (curfree != NULL) {
768      a->active = curfree;
769    }
770    if (activate) {
771      LispObj *heap_start = ptr_from_lispobj(lisp_global(HEAP_START));
772
773      a->older = g1_area;
774      tenure_to_area(tenured_area);
775      egc_is_active = true;
776    } else {
777      untenure_from_area(tenured_area);
778      a->older = NULL;
779      egc_is_active = false;
780    }
781  }
782  return egc_is_active;
783}
784
785/*
786  Lisp ff-calls this; it needs to set the active area's active pointer
787  correctly.
788*/
789
790Boolean
791lisp_egc_control(Boolean activate)
792{
793  area *a = active_dynamic_area;
794  return egc_control(activate, (BytePtr) a->active);
795}
796
797
798
799 
800/* Splice the protected_area_ptr out of the list and dispose of it. */
801void
802delete_protected_area(protected_area_ptr p)
803{
804  BytePtr start = p->start;
805  int nbytes = p->nprot;
806  protected_area_ptr *prev = &AllProtectedAreas, q;
807
808  if (nbytes) {
809    UnProtectMemory((LogicalAddress)start, nbytes);
810  }
811 
812  while ((q = *prev) != NULL) {
813    if (p == q) {
814      *prev = p->next;
815      break;
816    } else {
817      prev = &(q->next);
818    }
819  }
820
821  deallocate((Ptr)p);
822}
823
824
825
826
827/*
828  Unlink the area from all_areas.
829  Unprotect and dispose of any hard/soft protected_areas.
830  If the area has a handle, dispose of that as well.
831  */
832
833void
834condemn_area_holding_area_lock(area *a)
835{
836  void free_stack(void *);
837  area *prev = a->pred, *next = a->succ;
838  Ptr h = a->h;
839  protected_area_ptr p;
840
841  prev->succ = next;
842  next->pred = prev;
843
844  p = a->softprot;
845  if (p) delete_protected_area(p);
846
847  p = a->hardprot;
848
849  if (p) delete_protected_area(p);
850
851  if (h) free_stack(h);
852  deallocate((Ptr)a);
853}
854
855
856
857void
858condemn_area(area *a, TCR *tcr)
859{
860  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
861  condemn_area_holding_area_lock(a);
862  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
863}
864
865
866
867
868/*
869  condemn an area and all the other areas that can be reached
870  via the area.older & area.younger links.
871  This is the function in the ppc::kernel-import-condemn-area slot,
872  called by free-stack-area
873  */
874void
875condemn_area_chain(area *a, TCR *tcr)
876{
877  area *older;
878
879  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
880
881  for (; a->younger; a = a->younger) ;
882  for (;a;) {
883    older = a->older;
884    condemn_area_holding_area_lock(a);
885    a = older;
886  }
887  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
888}
889
890void
891release_readonly_area()
892{
893  area *a = readonly_area;
894  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
895  a->active = a->low;
896  a->ndnodes = 0;
897  pure_space_active = pure_space_start;
898}
Note: See TracBrowser for help on using the repository browser.