source: trunk/source/lisp-kernel/memory.c @ 14233

Last change on this file since 14233 was 14233, checked in by gb, 9 years ago

Don't use the (Linux-specific ?) MAP_GROWSDOWN mmap option when allocating
stacks; it doesn't do what we thought it did and using it seems to trigger
a bug in some 2.6.32 Linux kernels. See ticket:731, which this change might
fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.0 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36
37#ifndef WINDOWS
38#include <sys/mman.h>
39#endif
40
41#define DEBUG_MEMORY 0
42
43void
44allocation_failure(Boolean pointerp, natural size)
45{
46  char buf[64];
47  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
48  Fatal(":   Kernel memory allocation failure.  ", buf);
49}
50
51void
52fatal_oserr(StringPtr param, OSErr err)
53{
54  char buf[64];
55  sprintf(buf," - operating system error %d.", err);
56  Fatal(param, buf);
57}
58
59void *
60lisp_malloc(size_t size)
61{
62  return malloc(size);
63}
64
65void
66lisp_free(void *p)
67{
68  free(p);
69}
70
71#ifdef DARWIN
72#if WORD_SIZE == 64
73#define vm_region vm_region_64
74#endif
75
76/*
77  Check to see if the specified address is unmapped by trying to get
78  information about the mapped address at or beyond the target.  If
79  the difference between the target address and the next mapped address
80  is >= len, we can safely mmap len bytes at addr.
81*/
82Boolean
83address_unmapped_p(char *addr, natural len)
84{
85  vm_address_t vm_addr = (vm_address_t)addr;
86  vm_size_t vm_size;
87#if WORD_SIZE == 64
88  vm_region_basic_info_data_64_t vm_info;
89#else
90  vm_region_basic_info_data_t vm_info;
91#endif
92#if WORD_SIZE == 64
93  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
94#else
95  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
96#endif
97  mach_port_t vm_object_name = (mach_port_t) 0;
98  kern_return_t kret;
99
100  kret = vm_region(mach_task_self(),
101                   &vm_addr,
102                   &vm_size,
103#if WORD_SIZE == 64
104                   VM_REGION_BASIC_INFO_64,
105#else
106                   VM_REGION_BASIC_INFO,
107#endif
108                   (vm_region_info_t)&vm_info,
109                   &vm_info_size,
110                   &vm_object_name);
111  if (kret != KERN_SUCCESS) {
112    return false;
113  }
114
115  return vm_addr >= (vm_address_t)(addr+len);
116}
117#endif
118
119
120  /*
121    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
122    likely to reside near the beginning of an unmapped block of memory
123    that's at least 1GB in size.  We'd like to load the heap image's
124    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
125    that'd allow us to file-map those sections (and would enable us to
126    avoid having to relocate references in the data sections.)
127
128    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
129    by creating an anonymous mapping with mmap().
130
131    If we try to insist that mmap() map a 1GB block at
132    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
133    mmap() will gleefully clobber any mapped memory that's already
134    there.  (That region's empty at this writing, but some future
135    version of the OS might decide to put something there.)
136
137    If we don't specify MAP_FIXED, mmap() is free to treat the address
138    we give it as a hint; Linux seems to accept the hint if doing so
139    wouldn't cause a problem.  Naturally, that behavior's too useful
140    for Darwin (or perhaps too inconvenient for it): it'll often
141    return another address, even if the hint would have worked fine.
142
143    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
144    would conflict with anything.  Until we discover a need to do
145    otherwise, we'll assume that if Linux's mmap() fails to take the
146    hint, it's because of a legitimate conflict.
147
148    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
149    to implement an address_unmapped_p() for Linux.
150  */
151
152LogicalAddress
153ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
154{
155  LogicalAddress start;
156  Boolean fixed_map_ok = false;
157#ifdef DARWIN
158  fixed_map_ok = address_unmapped_p(want,totalsize);
159#endif
160#ifdef SOLARIS
161  fixed_map_ok = true;
162#endif
163  raise_limit();
164#ifdef WINDOWS
165  start = VirtualAlloc((void *)want,
166                       totalsize + heap_segment_size,
167                       MEM_RESERVE,
168                       PAGE_NOACCESS);
169  if (!start) {
170#if DEBUG_MEMORY   
171    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
172#endif
173    start = VirtualAlloc(0,
174                         totalsize + heap_segment_size,
175                         MEM_RESERVE,
176                         PAGE_NOACCESS);
177    if (!start) {
178      return NULL;
179    }
180  }
181#else
182  start = mmap((void *)want,
183               totalsize + heap_segment_size,
184               PROT_NONE,
185               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
186               -1,
187               0);
188  if (start == MAP_FAILED) {
189    return NULL;
190  }
191
192  if (start != want) {
193    munmap(start, totalsize+heap_segment_size);
194    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
195    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
196      return NULL;
197    }
198  }
199  mprotect(start, totalsize, PROT_NONE);
200#endif
201#if DEBUG_MEMORY
202  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
203#endif
204  return start;
205}
206
207int
208CommitMemory (LogicalAddress start, natural len) 
209{
210  LogicalAddress rc;
211#if DEBUG_MEMORY
212  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
213#endif
214#ifdef WINDOWS
215  if ((start < ((LogicalAddress)nil_value)) &&
216      (((LogicalAddress)nil_value) < (start+len))) {
217    /* nil area is in the executable on Windows; ensure range is
218       read-write */
219    DWORD as_if_i_care;
220    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
221      return false;
222    }
223    return true;
224  }
225  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
226  if (!rc) {
227    wperror("CommitMemory VirtualAlloc");
228    return false;
229  }
230  return true;
231#else
232  int i, err;
233  void *addr;
234
235  for (i = 0; i < 3; i++) {
236    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
237    if (addr == start) {
238      return true;
239    } else {
240      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
241    }
242  }
243  return false;
244#endif
245}
246
247void
248UnCommitMemory (LogicalAddress start, natural len) {
249#if DEBUG_MEMORY
250  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
251#endif
252#ifdef WINDOWS
253  int rc = VirtualFree(start, len, MEM_DECOMMIT);
254  if (!rc) {
255    wperror("UnCommitMemory VirtualFree");
256    Fatal("mmap error", "");
257    return;
258  }
259#else
260  if (len) {
261    madvise(start, len, MADV_DONTNEED);
262    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
263        != start) {
264      int err = errno;
265      Fatal("mmap error", "");
266      fprintf(dbgout, "errno = %d", err);
267    }
268  }
269#endif
270}
271
272
273LogicalAddress
274MapMemory(LogicalAddress addr, natural nbytes, int protection)
275{
276#if DEBUG_MEMORY
277  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
278#endif
279#ifdef WINDOWS
280  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
281#else
282  {
283    int flags = MAP_PRIVATE|MAP_ANON;
284
285    if (addr > 0) flags |= MAP_FIXED;
286    return mmap(addr, nbytes, protection, flags, -1, 0);
287  }
288#endif
289}
290
291LogicalAddress
292MapMemoryForStack(natural nbytes)
293{
294#if DEBUG_MEMORY
295  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
296#endif
297#ifdef WINDOWS
298  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
299#else
300  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
301#endif
302}
303
304
305/* Cause the mapped memory region at ADDR to become completely unmapped.
306   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
307   and NBYTES should be the size of the mapped region at that address. */
308int
309UnMapMemory(LogicalAddress addr, natural nbytes)
310{
311#if DEBUG_MEMORY
312  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
313#endif
314#ifdef WINDOWS
315  return !VirtualFree(addr, 0, MEM_RELEASE);
316#else
317  return munmap(addr, nbytes);
318#endif
319}
320
321int
322ProtectMemory(LogicalAddress addr, natural nbytes)
323{
324#if DEBUG_MEMORY
325  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
326#endif
327#ifdef WINDOWS
328  DWORD oldProtect;
329  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
330 
331  if(!status) {
332    wperror("ProtectMemory VirtualProtect");
333    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
334  }
335  return status;
336#else
337  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
338 
339  if (status) {
340    status = errno;
341   
342    if (status == ENOMEM) {
343      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
344      if (mapaddr != MAP_FAILED) {
345        return 0;
346      }
347    }
348    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
349  }
350  return status;
351#endif
352}
353
354int
355UnProtectMemory(LogicalAddress addr, natural nbytes)
356{
357#if DEBUG_MEMORY
358  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
359#endif
360#ifdef WINDOWS
361  DWORD oldProtect;
362  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
363#else
364  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
365#endif
366}
367
368int
369MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
370{
371#ifdef WINDOWS
372#if 0
373  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
374  HANDLE hFile, hFileMapping;
375  LPVOID rc;
376  DWORD desiredAccess;
377
378  if (permissions == MEMPROTECT_RWX) {
379    permissions |= PAGE_WRITECOPY;
380    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
381  } else {
382    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
383  }
384
385  hFile = _get_osfhandle(fd);
386  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
387                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
388 
389  if (!hFileMapping) {
390    wperror("CreateFileMapping");
391    return false;
392  }
393
394  rc = MapViewOfFileEx(hFileMapping,
395                       desiredAccess,
396                       (pos >> 32),
397                       (pos & 0xffffffff),
398                       nbytes,
399                       addr);
400#else
401  size_t count, total = 0;
402  size_t opos;
403
404  opos = LSEEK(fd, 0, SEEK_CUR);
405  CommitMemory(addr, nbytes);
406  LSEEK(fd, pos, SEEK_SET);
407
408  while (total < nbytes) {
409    count = read(fd, addr + total, nbytes - total);
410    total += count;
411    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
412    if (!(count > 0))
413      return false;
414  }
415
416  LSEEK(fd, opos, SEEK_SET);
417
418  return true;
419#endif
420#else
421  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
422#endif
423}
424
425void
426unprotect_area(protected_area_ptr p)
427{
428  BytePtr start = p->start;
429  natural nprot = p->nprot;
430 
431  if (nprot) {
432    UnProtectMemory(start, nprot);
433    p->nprot = 0;
434  }
435}
436
437protected_area_ptr
438new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
439{
440  protected_area_ptr p = malloc(sizeof(protected_area));
441 
442  if (p == NULL) return NULL;
443  p->protsize = protsize;
444  p->nprot = 0;
445  p->start = start;
446  p->end = end;
447  p->why = reason;
448  p->next = AllProtectedAreas;
449
450  AllProtectedAreas = p;
451  if (now) {
452    protect_area(p);
453  }
454 
455  return p;
456}
457
458
459
460
461protected_area_ptr
462AllProtectedAreas = NULL;
463
464
465/*
466  This does a linear search.  Areas aren't created all that often;
467  if there get to be very many of them, some sort of tree search
468  might be justified.
469*/
470
471protected_area_ptr
472find_protected_area(BytePtr addr)
473{
474  protected_area* p;
475 
476  for(p = AllProtectedAreas; p; p=p->next) {
477    if ((p->start <= addr) && (p->end > addr)) {
478      return p;
479    }
480  }
481  return NULL;
482}
483
484
485void
486zero_memory_range(BytePtr start, BytePtr end)
487{
488#ifdef WINDOWS
489  ZeroMemory(start,end-start);
490#else
491  bzero(start,(size_t)(end-start));
492#endif
493}
494
495
496 
497
498/*
499   Grow or shrink the dynamic area.  Or maybe not.
500   Whether or not the end of (mapped space in) the heap changes,
501   ensure that everything between the freeptr and the heap end
502   is mapped and read/write.  (It'll incidentally be zeroed.)
503*/
504Boolean
505resize_dynamic_heap(BytePtr newfree, 
506                    natural free_space_size)
507{
508  extern int page_size;
509  area *a = active_dynamic_area;
510  BytePtr newlimit, protptr, zptr;
511  int psize = page_size;
512  if (free_space_size) {
513    BytePtr lowptr = a->active;
514    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
515                                            log2_heap_segment_size);
516    if (newlimit > a->high) {
517      return grow_dynamic_area(newlimit-a->high);
518    } else if ((lowptr + free_space_size) < a->high) {
519      shrink_dynamic_area(a->high-newlimit);
520      return true;
521    }
522  }
523}
524
525void
526protect_area(protected_area_ptr p)
527{
528  BytePtr start = p->start;
529  natural n = p->protsize;
530
531  if (n && ! p->nprot) {
532    ProtectMemory(start, n);
533#ifdef WINDOWS
534    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
535#endif
536    p->nprot = n;
537  }
538}
539
540
541void
542zero_page(BytePtr start)
543{
544  extern int page_size;
545#ifdef PPC
546  extern void zero_cache_lines(BytePtr, size_t, size_t);
547  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
548#else
549  memset(start, 0, page_size);
550#endif
551}
552
553/* area management */
554
555
556area *
557new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
558{
559  area *a = calloc(1, sizeof(area));
560  if (a) {
561    natural ndnodes = area_dnode(highaddr, lowaddr);
562    a->low = lowaddr;
563    a->high = highaddr;
564    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
565    a->code = code;
566    a->ndnodes = ndnodes;
567    /* Caller must allocate markbits when allocating heap ! */
568   
569  }
570  return a;
571}
572
573static area *
574add_area_before(area *new_area, area *before)
575{
576  area *before_before = before->pred;
577
578  new_area->pred = before_before;
579  new_area->succ = before;
580  before_before->succ = new_area;
581  before->pred = new_area;
582  return new_area;
583}
584
585/*
586  The active dynamic area comes first.
587  Static areas follow dynamic areas.
588  Stack areas follow static areas.
589  Readonly areas come last.
590*/
591
592/*
593  If we already own the area_lock (or during iniitalization), it's safe
594  to add an area.
595*/
596
597
598void
599add_area_holding_area_lock(area *new_area)
600{
601  area *that = all_areas;
602  int
603    thiscode = (int)(new_area->code),
604    thatcode;
605
606  /* Cdr down the linked list */
607  do {
608    that = that->succ;
609    thatcode = (int)(that->code);
610  } while (thiscode < thatcode);
611  add_area_before(new_area, that);
612}
613
614/*
615  In general, we need to own the area lock before adding an area.
616*/
617void
618add_area(area *new_area, TCR *tcr)
619{
620  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
621  add_area_holding_area_lock(new_area);
622  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
623} 
624
625/*
626  Search areas "forward" from the header's successor, until
627  an area containing ADDR is found or an area with code < MINCODE
628  is encountered.
629  This walks the area list visiting heaps (dynamic, then static)
630  first, then stacks.
631
632*/
633static area *
634find_area_forward(BytePtr addr, area_code mincode)
635{
636  area *p, *header = all_areas;
637
638  for (p = header->succ; p != header; p = p->succ) {
639    area_code pcode = p->code;
640    if (pcode < mincode) {
641      return NULL;
642    }
643    if (pcode >= AREA_READONLY) {
644      if ((addr >= p->low) &&
645          (addr < p->active)) {
646        return p;
647      }
648    } else {
649      if ((addr >= p->active) &&
650          (addr < p->high)) {
651        return p;
652      }
653    }
654  }
655  return NULL;
656}
657
658static area *
659find_area_backward(BytePtr addr, area_code maxcode)
660{
661  area *p, *header = all_areas;
662
663  for (p = header->pred; p != header; p = p->pred) {
664    area_code pcode = p->code;
665
666    if (pcode > maxcode) {
667      return NULL;
668    }
669    if (pcode >= AREA_READONLY) {
670      if ((addr >= p->low) &&
671          (addr < p->active)) {
672        return p;
673      }
674    } else {
675      if ((addr >= p->active) &&
676          (addr < p->high)) {
677        return p;
678      }
679    }
680  }
681  return NULL;
682}
683
684area *
685area_containing(BytePtr addr)
686{
687  return find_area_forward(addr, AREA_VOID);
688}
689
690area *
691heap_area_containing(BytePtr addr)
692{
693  return find_area_forward(addr, AREA_READONLY);
694}
695
696area *
697stack_area_containing(BytePtr addr)
698{
699  return find_area_backward(addr, AREA_TSTACK);
700}
701
702/*
703  Make everything "younger" than the start of the target area
704  belong to that area; all younger areas will become empty, and
705  the dynamic area will have to lose some of its markbits (they
706  get zeroed and become part of the tenured area's refbits.)
707
708  The active dynamic area must have been "normalized" (e.g., its
709  active pointer must match the free pointer) before this is called.
710
711  If the target area is 'tenured_area' (the oldest ephemeral generation),
712  zero its refbits and update YOUNGEST_EPHEMERAL.
713
714*/
715
716void
717tenure_to_area(area *target)
718{
719  area *a = active_dynamic_area, *child;
720  BytePtr
721    curfree = a->active,
722    target_low = target->low,
723    tenured_low = tenured_area->low;
724  natural
725    dynamic_dnodes = area_dnode(curfree, a->low),
726    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
727  bitvector
728    refbits = tenured_area->refbits,
729    markbits = a->markbits,
730    new_markbits;
731
732  target->high = target->active = curfree;
733  target->ndnodes = area_dnode(curfree, target_low);
734
735  for (child = target->younger; child != a; child = child->younger) {
736    child->high = child->low = child->active = curfree;
737    child->ndnodes = 0;
738  }
739
740  a->low = curfree;
741  a->ndnodes = area_dnode(a->high, curfree);
742
743  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
744 
745  if (target == tenured_area) {
746    zero_bits(refbits, new_tenured_dnodes);
747    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
748  } else {
749    /* Need more (zeroed) refbits & fewer markbits */
750    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
751  }
752   
753  a->markbits = new_markbits;
754  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
755}
756
757
758
759/*
760  Make everything younger than the oldest byte in 'from' belong to
761  the youngest generation.  If 'from' is 'tenured_area', this means
762  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
763  to 0 to indicate this.)
764 
765  Some tenured_area refbits become dynamic area markbits in the process;
766  it's not necessary to zero them, since the GC will do that.
767*/
768
769void
770untenure_from_area(area *from)
771{
772  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
773    area *a = active_dynamic_area, *child;
774    BytePtr curlow = from->low;
775    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
776   
777    for (child = from; child != a; child = child->younger) {
778      child->low = child->active = child->high = curlow;
779      child->ndnodes = 0;
780    }
781   
782    a->low = curlow;
783    a->ndnodes = area_dnode(a->high, curlow);
784   
785    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
786    if (from == tenured_area) {
787      /* Everything's in the dynamic area */
788      lisp_global(OLDEST_EPHEMERAL) = 0;
789      lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
790
791    }
792  }
793}
794
795
796Boolean
797egc_control(Boolean activate, BytePtr curfree)
798{
799  area *a = active_dynamic_area;
800  Boolean egc_is_active = (a->older != NULL);
801
802  if (activate != egc_is_active) {
803    if (curfree != NULL) {
804      a->active = curfree;
805    }
806    if (activate) {
807      a->older = g1_area;
808      tenure_to_area(tenured_area);
809      egc_is_active = true;
810    } else {
811      untenure_from_area(tenured_area);
812      a->older = NULL;
813      egc_is_active = false;
814    }
815  }
816  return egc_is_active;
817}
818
819/*
820  Lisp ff-calls this; it needs to set the active area's active pointer
821  correctly.
822*/
823
824Boolean
825lisp_egc_control(Boolean activate)
826{
827  area *a = active_dynamic_area;
828  return egc_control(activate, (BytePtr) a->active);
829}
830
831
832
833 
834/* Splice the protected_area_ptr out of the list and dispose of it. */
835void
836delete_protected_area(protected_area_ptr p)
837{
838  BytePtr start = p->start;
839  int nbytes = p->nprot;
840  protected_area_ptr *prev = &AllProtectedAreas, q;
841
842  if (nbytes) {
843    UnProtectMemory((LogicalAddress)start, nbytes);
844  }
845 
846  while ((q = *prev) != NULL) {
847    if (p == q) {
848      *prev = p->next;
849      break;
850    } else {
851      prev = &(q->next);
852    }
853  }
854
855  free(p);
856}
857
858
859
860
861/*
862  Unlink the area from all_areas.
863  Unprotect and dispose of any hard/soft protected_areas.
864  If the area has a handle, dispose of that as well.
865  */
866
867void
868condemn_area_holding_area_lock(area *a)
869{
870  void free_stack(void *);
871  area *prev = a->pred, *next = a->succ;
872  Ptr h = a->h;
873  protected_area_ptr p;
874
875  prev->succ = next;
876  next->pred = prev;
877
878  p = a->softprot;
879  if (p) delete_protected_area(p);
880
881  p = a->hardprot;
882
883  if (p) delete_protected_area(p);
884
885  if (h) free_stack(h);
886  free(a);
887}
888
889
890
891void
892condemn_area(area *a, TCR *tcr)
893{
894  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
895  condemn_area_holding_area_lock(a);
896  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
897}
898
899
900
901
902/*
903  condemn an area and all the other areas that can be reached
904  via the area.older & area.younger links.
905  This is the function in the ppc::kernel-import-condemn-area slot,
906  called by free-stack-area
907  */
908void
909condemn_area_chain(area *a, TCR *tcr)
910{
911  area *older;
912
913  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
914
915  for (; a->younger; a = a->younger) ;
916  for (;a;) {
917    older = a->older;
918    condemn_area_holding_area_lock(a);
919    a = older;
920  }
921  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
922}
923
924
925void
926protect_watched_areas()
927{
928  area *a = active_dynamic_area;
929  natural code = a->code;
930
931  while (code != AREA_VOID) {
932    if (code == AREA_WATCHED) {
933      natural size = a->high - a->low;
934     
935      ProtectMemory(a->low, size);
936    }
937    a = a->succ;
938    code = a->code;
939  }
940}
941
942void
943unprotect_watched_areas()
944{
945  area *a = active_dynamic_area;
946  natural code = a->code;
947
948  while (code != AREA_VOID) {
949    if (code == AREA_WATCHED) {
950      natural size = a->high - a->low;
951     
952      UnProtectMemory(a->low, size);
953    }
954    a = a->succ;
955    code = a->code;
956  }
957}
Note: See TracBrowser for help on using the repository browser.