source: branches/win64/lisp-kernel/memory.c @ 8790

Last change on this file since 8790 was 8790, checked in by gb, 12 years ago

Trust read() return value, now that image file is opened with O_BINARY
set.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.2 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#include <unistd.h>
30#ifdef LINUX
31#include <strings.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35
36#ifndef WINDOWS
37#include <sys/mman.h>
38#endif
39
40void
41allocation_failure(Boolean pointerp, natural size)
42{
43  char buf[64];
44  sprintf(buf, "Can't allocate %s of size %Id bytes.", pointerp ? "pointer" : "handle", size);
45  Fatal(":   Kernel memory allocation failure.  ", buf);
46}
47
48void
49fatal_oserr(StringPtr param, OSErr err)
50{
51  char buf[64];
52  sprintf(buf," - operating system error %d.", err);
53  Fatal(param, buf);
54}
55
56
57Ptr
58allocate(natural size)
59{
60  return (Ptr) malloc(size);
61}
62
63void
64deallocate(Ptr p)
65{
66  free((void *)p);
67}
68
69Ptr
70zalloc(natural size)
71{
72  Ptr p = allocate(size);
73  if (p != NULL) {
74    memset(p, 0, size);
75  }
76  return p;
77}
78
79#ifdef DARWIN
80#if WORD_SIZE == 64
81#define vm_region vm_region_64
82#endif
83
84/*
85  Check to see if the specified address is unmapped by trying to get
86  information about the mapped address at or beyond the target.  If
87  the difference between the target address and the next mapped address
88  is >= len, we can safely mmap len bytes at addr.
89*/
90Boolean
91address_unmapped_p(char *addr, natural len)
92{
93  vm_address_t vm_addr = (vm_address_t)addr;
94  vm_size_t vm_size;
95#if WORD_SIZE == 64
96  vm_region_basic_info_data_64_t vm_info;
97#else
98  vm_region_basic_info_data_t vm_info;
99#endif
100#if WORD_SIZE == 64
101  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
102#else
103  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
104#endif
105  mach_port_t vm_object_name = (mach_port_t) 0;
106  kern_return_t kret;
107
108  kret = vm_region(mach_task_self(),
109                   &vm_addr,
110                   &vm_size,
111#if WORD_SIZE == 64
112                   VM_REGION_BASIC_INFO_64,
113#else
114                   VM_REGION_BASIC_INFO,
115#endif
116                   (vm_region_info_t)&vm_info,
117                   &vm_info_size,
118                   &vm_object_name);
119  if (kret != KERN_SUCCESS) {
120    return false;
121  }
122
123  return vm_addr >= (vm_address_t)(addr+len);
124}
125#endif
126
127
128  /*
129    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
130    likely to reside near the beginning of an unmapped block of memory
131    that's at least 1GB in size.  We'd like to load the heap image's
132    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
133    that'd allow us to file-map those sections (and would enable us to
134    avoid having to relocate references in the data sections.)
135
136    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
137    by creating an anonymous mapping with mmap().
138
139    If we try to insist that mmap() map a 1GB block at
140    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
141    mmap() will gleefully clobber any mapped memory that's already
142    there.  (That region's empty at this writing, but some future
143    version of the OS might decide to put something there.)
144
145    If we don't specify MAP_FIXED, mmap() is free to treat the address
146    we give it as a hint; Linux seems to accept the hint if doing so
147    wouldn't cause a problem.  Naturally, that behavior's too useful
148    for Darwin (or perhaps too inconvenient for it): it'll often
149    return another address, even if the hint would have worked fine.
150
151    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
152    would conflict with anything.  Until we discover a need to do
153    otherwise, we'll assume that if Linux's mmap() fails to take the
154    hint, it's because of a legitimate conflict.
155
156    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
157    to implement an address_unmapped_p() for Linux.
158  */
159
160LogicalAddress
161ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
162{
163  LogicalAddress start;
164  Boolean fixed_map_ok = false;
165#ifdef DARWIN
166  fixed_map_ok = address_unmapped_p(want,totalsize);
167#endif
168#ifdef SOLARIS
169  fixed_map_ok = true;
170#endif
171  raise_limit();
172#ifdef WINDOWS
173  start = VirtualAlloc((void *)want,
174                       totalsize + heap_segment_size,
175                       MEM_RESERVE,
176                       PAGE_NOACCESS);
177  if (!start) {
178    fprintf(stderr, "Can't get desired heap address at 0x%Ix\n", want);
179    start = VirtualAlloc(0,
180                         totalsize + heap_segment_size,
181                         MEM_RESERVE,
182                         PAGE_NOACCESS);
183    if (!start) {
184      wperror("VirtualAlloc");
185      return NULL;
186    }
187  }
188#else
189  start = mmap((void *)want,
190               totalsize + heap_segment_size,
191               PROT_NONE,
192               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
193               -1,
194               0);
195  if (start == MAP_FAILED) {
196    perror("Initial mmap");
197    return NULL;
198  }
199
200  if (start != want) {
201    munmap(start, totalsize+heap_segment_size);
202    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
203    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
204      return NULL;
205    }
206  }
207  mprotect(start, totalsize, PROT_NONE);
208#endif
209  fprintf(stderr, "Reserving heap at 0x%Ix, size 0x%Ix\n", start, totalsize);
210  return start;
211}
212
213int
214CommitMemory (LogicalAddress start, natural len) {
215  LogicalAddress rc;
216  fprintf(stderr, "Committing memory at 0x%Ix, size 0x%Ix\n", start, len);
217#ifdef WINDOWS
218  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
219  if (!rc) {
220    wperror("CommitMemory VirtualAlloc");
221    return false;
222  }
223  return true;
224#else
225  int i, err;
226  void *addr;
227
228  for (i = 0; i < 3; i++) {
229    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
230    if (addr == start) {
231      return true;
232    } else {
233      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
234    }
235  }
236  return false;
237#endif
238}
239
240void
241UnCommitMemory (LogicalAddress start, natural len) {
242  fprintf(stderr, "Uncommitting memory at 0x%Ix, size 0x%Ix\n", start, len);
243#ifdef WINDOWS
244  int rc = VirtualFree(start, len, MEM_DECOMMIT);
245  if (!rc) {
246    wperror("UnCommitMemory VirtualFree");
247    Fatal("mmap error", "");
248    return;
249  }
250#else
251  if (len) {
252    madvise(start, len, MADV_DONTNEED);
253    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
254        != start) {
255      int err = errno;
256      Fatal("mmap error", "");
257      fprintf(stderr, "errno = %d", err);
258    }
259  }
260#endif
261}
262
263
264LogicalAddress
265MapMemory(LogicalAddress addr, natural nbytes, int protection)
266{
267  fprintf(stderr, "Mapping memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
268#ifdef WINDOWS
269  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
270#else
271  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
272#endif
273}
274
275LogicalAddress
276MapMemoryForStack(natural nbytes)
277{
278  fprintf(stderr, "Mapping stack of size 0x%Ix\n", nbytes);
279#ifdef WINDOWS
280  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
281#else
282  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
283#endif
284}
285
286int
287UnMapMemory(LogicalAddress addr, natural nbytes)
288{
289  fprintf(stderr, "Unmapping memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
290#ifdef WINDOWS
291  /* Can't MEM_RELEASE here because we only want to free a chunk */
292  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
293#else
294  return munmap(addr, nbytes);
295#endif
296}
297
298int
299ProtectMemory(LogicalAddress addr, natural nbytes)
300{
301  fprintf(stderr, "Protecting memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
302#ifdef WINDOWS
303  DWORD oldProtect;
304  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
305 
306  if(!status) {
307    wperror("ProtectMemory VirtualProtect");
308    Bug(NULL, "couldn't protect %Id bytes at %x, errno = %d", nbytes, addr, status);
309  }
310  return status;
311#else
312  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
313 
314  if (status) {
315    status = errno;
316    Bug(NULL, "couldn't protect %Id bytes at %Ix, errno = %d", nbytes, addr, status);
317  }
318  return status;
319#endif
320}
321
322int
323UnProtectMemory(LogicalAddress addr, natural nbytes)
324{
325  fprintf(stderr, "Unprotecting memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
326#ifdef WINDOWS
327  DWORD oldProtect;
328  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
329#else
330  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
331#endif
332}
333
334int
335MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) {
336#ifdef WINDOWS
337#if 0
338  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
339  HANDLE hFile, hFileMapping;
340  LPVOID rc;
341  DWORD desiredAccess;
342
343  if (permissions == MEMPROTECT_RWX) {
344    permissions |= PAGE_WRITECOPY;
345    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
346  } else {
347    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
348  }
349
350  hFile = _get_osfhandle(fd);
351  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
352                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
353 
354  if (!hFileMapping) {
355    wperror("CreateFileMapping");
356    return false;
357  }
358
359  rc = MapViewOfFileEx(hFileMapping,
360                       desiredAccess,
361                       (pos >> 32),
362                       (pos & 0xffffffff),
363                       nbytes,
364                       addr);
365#else
366  size_t count, total = 0;
367  size_t opos;
368
369  opos = lseek(fd, 0, SEEK_CUR);
370  CommitMemory(addr, nbytes);
371  lseek(fd, pos, SEEK_SET);
372
373  while (total < nbytes) {
374    count = read(fd, addr + total, nbytes - total);
375    total += count;
376    // fprintf(stderr, "read %Id bytes, for a total of %Id out of %Id so far\n", count, total, nbytes);
377    if (!(count > 0))
378      return false;
379  }
380
381  lseek(fd, opos, SEEK_SET);
382
383  return true;
384#endif
385#else
386  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
387#endif
388}
389
390void
391unprotect_area(protected_area_ptr p)
392{
393  BytePtr start = p->start;
394  natural nprot = p->nprot;
395 
396  if (nprot) {
397    UnProtectMemory(start, nprot);
398    p->nprot = 0;
399  }
400}
401
402protected_area_ptr
403new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
404{
405  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
406 
407  if (p == NULL) return NULL;
408  p->protsize = protsize;
409  p->nprot = 0;
410  p->start = start;
411  p->end = end;
412  p->why = reason;
413  p->next = AllProtectedAreas;
414
415  AllProtectedAreas = p;
416  if (now) {
417    protect_area(p);
418  }
419 
420  return p;
421}
422
423/*
424  Un-protect the first nbytes bytes in specified area.
425  Note that this may cause the area to be empty.
426*/
427void
428unprotect_area_prefix(protected_area_ptr area, size_t delta)
429{
430  unprotect_area(area);
431  area->start += delta;
432  if ((area->start + area->protsize) <= area->end) {
433    protect_area(area);
434  }
435}
436
437
438/*
439  Extend the protected area, causing the preceding nbytes bytes
440  to be included and protected.
441*/
442void
443protect_area_prefix(protected_area_ptr area, size_t delta)
444{
445  unprotect_area(area);
446  area->start -= delta;
447  protect_area(area);
448}
449
450protected_area_ptr
451AllProtectedAreas = NULL;
452
453
454/*
455  This does a linear search.  Areas aren't created all that often;
456  if there get to be very many of them, some sort of tree search
457  might be justified.
458*/
459
460protected_area_ptr
461find_protected_area(BytePtr addr)
462{
463  protected_area* p;
464 
465  for(p = AllProtectedAreas; p; p=p->next) {
466    if ((p->start <= addr) && (p->end > addr)) {
467      return p;
468    }
469  }
470  return NULL;
471}
472
473
474void
475zero_memory_range(BytePtr start, BytePtr end)
476{
477#ifdef WINDOWS
478  ZeroMemory(start,end-start);
479#else
480  bzero(start,(size_t)(end-start));
481#endif
482}
483
484
485 
486
487/*
488   Grow or shrink the dynamic area.  Or maybe not.
489   Whether or not the end of (mapped space in) the heap changes,
490   ensure that everything between the freeptr and the heap end
491   is mapped and read/write.  (It'll incidentally be zeroed.)
492*/
493Boolean
494resize_dynamic_heap(BytePtr newfree, 
495                    natural free_space_size)
496{
497  extern int page_size;
498  area *a = active_dynamic_area;
499  BytePtr newlimit, protptr, zptr;
500  int psize = page_size;
501  if (free_space_size) {
502    BytePtr lowptr = a->active;
503    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
504                                            log2_heap_segment_size);
505    if (newlimit > a->high) {
506      return grow_dynamic_area(newlimit-a->high);
507    } else if ((lowptr + free_space_size) < a->high) {
508      shrink_dynamic_area(a->high-newlimit);
509      return true;
510    }
511  }
512}
513
514void
515protect_area(protected_area_ptr p)
516{
517  BytePtr start = p->start;
518  natural n = p->protsize;
519
520  if (n && ! p->nprot) {
521    ProtectMemory(start, n);
522    p->nprot = n;
523  }
524}
525
526
527void
528zero_page(BytePtr start)
529{
530  extern int page_size;
531#ifdef PPC
532  extern void zero_cache_lines(BytePtr, size_t, size_t);
533  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
534#else
535  memset(start, 0, page_size);
536#endif
537}
538
539/* area management */
540
541
542area *
543new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
544{
545  area *a = (area *) (zalloc(sizeof(area)));
546  if (a) {
547    natural ndnodes = area_dnode(highaddr, lowaddr);
548    a->low = lowaddr;
549    a->high = highaddr;
550    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
551    a->code = code;
552    a->ndnodes = ndnodes;
553    /* Caller must allocate markbits when allocating heap ! */
554   
555  }
556  return a;
557}
558
559static area *
560add_area_before(area *new_area, area *before)
561{
562  area *before_before = before->pred;
563
564  new_area->pred = before_before;
565  new_area->succ = before;
566  before_before->succ = new_area;
567  before->pred = new_area;
568  return new_area;
569}
570
571/*
572  The active dynamic area comes first.
573  Static areas follow dynamic areas.
574  Stack areas follow static areas.
575  Readonly areas come last.
576*/
577
578/*
579  If we already own the area_lock (or during iniitalization), it's safe
580  to add an area.
581*/
582
583
584void
585add_area_holding_area_lock(area *new_area)
586{
587  area *that = all_areas;
588  int
589    thiscode = (int)(new_area->code),
590    thatcode;
591
592  /* Cdr down the linked list */
593  do {
594    that = that->succ;
595    thatcode = (int)(that->code);
596  } while (thiscode < thatcode);
597  add_area_before(new_area, that);
598}
599
600/*
601  In general, we need to own the area lock before adding an area.
602*/
603void
604add_area(area *new_area, TCR *tcr)
605{
606  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
607  add_area_holding_area_lock(new_area);
608  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
609} 
610
611/*
612  Search areas "forward" from the header's successor, until
613  an area containing ADDR is found or an area with code < MINCODE
614  is encountered.
615  This walks the area list visiting heaps (dynamic, then static)
616  first, then stacks.
617
618*/
619static area *
620find_area_forward(BytePtr addr, area_code mincode)
621{
622  area *p, *header = all_areas;
623
624  for (p = header->succ; p != header; p = p->succ) {
625    area_code pcode = p->code;
626    if (pcode < mincode) {
627      return NULL;
628    }
629    if (pcode >= AREA_READONLY) {
630      if ((addr >= p->low) &&
631          (addr < p->active)) {
632        return p;
633      }
634    } else {
635      if ((addr >= p->active) &&
636          (addr < p->high)) {
637        return p;
638      }
639    }
640  }
641  return NULL;
642}
643
644static area *
645find_area_backward(BytePtr addr, area_code maxcode)
646{
647  area *p, *header = all_areas;
648
649  for (p = header->pred; p != header; p = p->pred) {
650    area_code pcode = p->code;
651
652    if (pcode > maxcode) {
653      return NULL;
654    }
655    if (pcode >= AREA_READONLY) {
656      if ((addr >= p->low) &&
657          (addr < p->active)) {
658        return p;
659      }
660    } else {
661      if ((addr >= p->active) &&
662          (addr < p->high)) {
663        return p;
664      }
665    }
666  }
667  return NULL;
668}
669
670area *
671area_containing(BytePtr addr)
672{
673  return find_area_forward(addr, AREA_VOID);
674}
675
676area *
677heap_area_containing(BytePtr addr)
678{
679  return find_area_forward(addr, AREA_READONLY);
680}
681
682area *
683stack_area_containing(BytePtr addr)
684{
685  return find_area_backward(addr, AREA_TSTACK);
686}
687
688/*
689  Make everything "younger" than the start of the target area
690  belong to that area; all younger areas will become empty, and
691  the dynamic area will have to lose some of its markbits (they
692  get zeroed and become part of the tenured area's refbits.)
693
694  The active dynamic area must have been "normalized" (e.g., its
695  active pointer must match the free pointer) before this is called.
696
697  If the target area is 'tenured_area' (the oldest ephemeral generation),
698  zero its refbits and update YOUNGEST_EPHEMERAL.
699
700*/
701
702void
703tenure_to_area(area *target)
704{
705  area *a = active_dynamic_area, *child;
706  BytePtr
707    curfree = a->active,
708    target_low = target->low,
709    tenured_low = tenured_area->low;
710  natural
711    dynamic_dnodes = area_dnode(curfree, a->low),
712    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
713  bitvector
714    refbits = tenured_area->refbits,
715    markbits = a->markbits,
716    new_markbits;
717
718  target->high = target->active = curfree;
719  target->ndnodes = area_dnode(curfree, target_low);
720
721  for (child = target->younger; child != a; child = child->younger) {
722    child->high = child->low = child->active = curfree;
723    child->ndnodes = 0;
724  }
725
726  a->low = curfree;
727  a->ndnodes = area_dnode(a->high, curfree);
728
729  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
730 
731  if (target == tenured_area) {
732    zero_bits(refbits, new_tenured_dnodes);
733    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
734  } else {
735    /* Need more (zeroed) refbits & fewer markbits */
736    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
737  }
738   
739  a->markbits = new_markbits;
740  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
741}
742
743
744
745/*
746  Make everything younger than the oldest byte in 'from' belong to
747  the youngest generation.  If 'from' is 'tenured_area', this means
748  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
749  to 0 to indicate this.)
750 
751  Some tenured_area refbits become dynamic area markbits in the process;
752  it's not necessary to zero them, since the GC will do that.
753*/
754
755void
756untenure_from_area(area *from)
757{
758  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
759    area *a = active_dynamic_area, *child;
760    BytePtr curlow = from->low;
761    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
762   
763    for (child = from; child != a; child = child->younger) {
764      child->low = child->active = child->high = curlow;
765      child->ndnodes = 0;
766    }
767   
768    a->low = curlow;
769    a->ndnodes = area_dnode(a->high, curlow);
770   
771    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
772    if (from == tenured_area) {
773      /* Everything's in the dynamic area */
774      lisp_global(OLDEST_EPHEMERAL) = 0;
775      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
776
777    }
778  }
779}
780
781
782Boolean
783egc_control(Boolean activate, BytePtr curfree)
784{
785  area *a = active_dynamic_area;
786  Boolean egc_is_active = (a->older != NULL);
787
788  if (activate != egc_is_active) {
789    if (curfree != NULL) {
790      a->active = curfree;
791    }
792    if (activate) {
793      LispObj *heap_start = ptr_from_lispobj(lisp_global(HEAP_START));
794
795      a->older = g1_area;
796      tenure_to_area(tenured_area);
797      egc_is_active = true;
798    } else {
799      untenure_from_area(tenured_area);
800      a->older = NULL;
801      egc_is_active = false;
802    }
803  }
804  return egc_is_active;
805}
806
807/*
808  Lisp ff-calls this; it needs to set the active area's active pointer
809  correctly.
810*/
811
812Boolean
813lisp_egc_control(Boolean activate)
814{
815  area *a = active_dynamic_area;
816  return egc_control(activate, (BytePtr) a->active);
817}
818
819
820
821 
822/* Splice the protected_area_ptr out of the list and dispose of it. */
823void
824delete_protected_area(protected_area_ptr p)
825{
826  BytePtr start = p->start;
827  int nbytes = p->nprot;
828  protected_area_ptr *prev = &AllProtectedAreas, q;
829
830  if (nbytes) {
831    UnProtectMemory((LogicalAddress)start, nbytes);
832  }
833 
834  while ((q = *prev) != NULL) {
835    if (p == q) {
836      *prev = p->next;
837      break;
838    } else {
839      prev = &(q->next);
840    }
841  }
842
843  deallocate((Ptr)p);
844}
845
846
847
848
849/*
850  Unlink the area from all_areas.
851  Unprotect and dispose of any hard/soft protected_areas.
852  If the area has a handle, dispose of that as well.
853  */
854
855void
856condemn_area_holding_area_lock(area *a)
857{
858  void free_stack(void *);
859  area *prev = a->pred, *next = a->succ;
860  Ptr h = a->h;
861  protected_area_ptr p;
862
863  prev->succ = next;
864  next->pred = prev;
865
866  p = a->softprot;
867  if (p) delete_protected_area(p);
868
869  p = a->hardprot;
870
871  if (p) delete_protected_area(p);
872
873  if (h) free_stack(h);
874  deallocate((Ptr)a);
875}
876
877
878
879void
880condemn_area(area *a, TCR *tcr)
881{
882  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
883  condemn_area_holding_area_lock(a);
884  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
885}
886
887
888
889
890/*
891  condemn an area and all the other areas that can be reached
892  via the area.older & area.younger links.
893  This is the function in the ppc::kernel-import-condemn-area slot,
894  called by free-stack-area
895  */
896void
897condemn_area_chain(area *a, TCR *tcr)
898{
899  area *older;
900
901  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
902
903  for (; a->younger; a = a->younger) ;
904  for (;a;) {
905    older = a->older;
906    condemn_area_holding_area_lock(a);
907    a = older;
908  }
909  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
910}
911
912void
913release_readonly_area()
914{
915  area *a = readonly_area;
916  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
917  a->active = a->low;
918  a->ndnodes = 0;
919  pure_space_active = pure_space_start;
920}
Note: See TracBrowser for help on using the repository browser.