source: release/1.7/source/lisp-kernel/memory.c @ 15267

Last change on this file since 15267 was 14295, checked in by rme, 9 years ago

Eliminate some (but not all) warnings produced when building with
"-Wall -Wno-format". Also a couple of minor changes that enable
clang to build the lisp kernel (at least on x8632 and x8664).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.7 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#endif
34
35#ifndef WINDOWS
36#include <sys/mman.h>
37#endif
38
39#define DEBUG_MEMORY 0
40
41void
42allocation_failure(Boolean pointerp, natural size)
43{
44  char buf[64];
45  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
46  Fatal(":   Kernel memory allocation failure.  ", buf);
47}
48
49void *
50lisp_malloc(size_t size)
51{
52  return malloc(size);
53}
54
55void
56lisp_free(void *p)
57{
58  free(p);
59}
60
61#ifdef DARWIN
62#if WORD_SIZE == 64
63#define vm_region vm_region_64
64#endif
65
66/*
67  Check to see if the specified address is unmapped by trying to get
68  information about the mapped address at or beyond the target.  If
69  the difference between the target address and the next mapped address
70  is >= len, we can safely mmap len bytes at addr.
71*/
72Boolean
73address_unmapped_p(char *addr, natural len)
74{
75  vm_address_t vm_addr = (vm_address_t)addr;
76  vm_size_t vm_size;
77#if WORD_SIZE == 64
78  vm_region_basic_info_data_64_t vm_info;
79#else
80  vm_region_basic_info_data_t vm_info;
81#endif
82#if WORD_SIZE == 64
83  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
84#else
85  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
86#endif
87  mach_port_t vm_object_name = (mach_port_t) 0;
88  kern_return_t kret;
89
90  kret = vm_region(mach_task_self(),
91                   &vm_addr,
92                   &vm_size,
93#if WORD_SIZE == 64
94                   VM_REGION_BASIC_INFO_64,
95#else
96                   VM_REGION_BASIC_INFO,
97#endif
98                   (vm_region_info_t)&vm_info,
99                   &vm_info_size,
100                   &vm_object_name);
101  if (kret != KERN_SUCCESS) {
102    return false;
103  }
104
105  return vm_addr >= (vm_address_t)(addr+len);
106}
107#endif
108
109
110
111  /*
112    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
113    likely to reside near the beginning of an unmapped block of memory
114    that's at least 1GB in size.  We'd like to load the heap image's
115    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
116    that'd allow us to file-map those sections (and would enable us to
117    avoid having to relocate references in the data sections.)
118
119    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
120    by creating an anonymous mapping with mmap().
121
122    If we try to insist that mmap() map a 1GB block at
123    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
124    mmap() will gleefully clobber any mapped memory that's already
125    there.  (That region's empty at this writing, but some future
126    version of the OS might decide to put something there.)
127
128    If we don't specify MAP_FIXED, mmap() is free to treat the address
129    we give it as a hint; Linux seems to accept the hint if doing so
130    wouldn't cause a problem.  Naturally, that behavior's too useful
131    for Darwin (or perhaps too inconvenient for it): it'll often
132    return another address, even if the hint would have worked fine.
133
134    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
135    would conflict with anything.  Until we discover a need to do
136    otherwise, we'll assume that if Linux's mmap() fails to take the
137    hint, it's because of a legitimate conflict.
138
139    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
140    to implement an address_unmapped_p() for Linux.
141  */
142
143LogicalAddress
144ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
145{
146  LogicalAddress start;
147  Boolean fixed_map_ok = false;
148#ifdef DARWIN
149  fixed_map_ok = address_unmapped_p(want,totalsize);
150#endif
151#ifdef SOLARIS
152  fixed_map_ok = true;
153#endif
154  raise_limit();
155#ifdef WINDOWS
156  start = VirtualAlloc((void *)want,
157                       totalsize + heap_segment_size,
158                       MEM_RESERVE,
159                       PAGE_NOACCESS);
160  if (!start) {
161#if DEBUG_MEMORY   
162    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
163#endif
164    start = VirtualAlloc(0,
165                         totalsize + heap_segment_size,
166                         MEM_RESERVE,
167                         PAGE_NOACCESS);
168    if (!start) {
169      return NULL;
170    }
171  }
172#else
173  start = mmap((void *)want,
174               totalsize + heap_segment_size,
175               PROT_NONE,
176               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
177               -1,
178               0);
179  if (start == MAP_FAILED) {
180    return NULL;
181  }
182
183  if (start != want) {
184    munmap(start, totalsize+heap_segment_size);
185    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
186    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
187      return NULL;
188    }
189  }
190  mprotect(start, totalsize, PROT_NONE);
191#endif
192#if DEBUG_MEMORY
193  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
194#endif
195  return start;
196}
197
198int
199CommitMemory (LogicalAddress start, natural len) 
200{
201#if DEBUG_MEMORY
202  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
203#endif
204#ifdef WINDOWS
205  LogicalAddress rc;
206
207  if ((start < ((LogicalAddress)nil_value)) &&
208      (((LogicalAddress)nil_value) < (start+len))) {
209    /* nil area is in the executable on Windows; ensure range is
210       read-write */
211    DWORD as_if_i_care;
212    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
213      return false;
214    }
215    return true;
216  }
217  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
218  if (!rc) {
219    wperror("CommitMemory VirtualAlloc");
220    return false;
221  }
222  return true;
223#else
224  int i;
225  void *addr;
226
227  for (i = 0; i < 3; i++) {
228    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
229    if (addr == start) {
230      return true;
231    } else {
232      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
233    }
234  }
235  return false;
236#endif
237}
238
239void
240UnCommitMemory (LogicalAddress start, natural len) {
241#if DEBUG_MEMORY
242  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
243#endif
244#ifdef WINDOWS
245  int rc = VirtualFree(start, len, MEM_DECOMMIT);
246  if (!rc) {
247    wperror("UnCommitMemory VirtualFree");
248    Fatal("mmap error", "");
249    return;
250  }
251#else
252  if (len) {
253    madvise(start, len, MADV_DONTNEED);
254    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
255        != start) {
256      int err = errno;
257      Fatal("mmap error", "");
258      fprintf(dbgout, "errno = %d", err);
259    }
260  }
261#endif
262}
263
264
265LogicalAddress
266MapMemory(LogicalAddress addr, natural nbytes, int protection)
267{
268#if DEBUG_MEMORY
269  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
270#endif
271#ifdef WINDOWS
272  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
273#else
274  {
275    int flags = MAP_PRIVATE|MAP_ANON;
276
277    if (addr > 0) flags |= MAP_FIXED;
278    return mmap(addr, nbytes, protection, flags, -1, 0);
279  }
280#endif
281}
282
283LogicalAddress
284MapMemoryForStack(natural nbytes)
285{
286#if DEBUG_MEMORY
287  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
288#endif
289#ifdef WINDOWS
290  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
291#else
292  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
293#endif
294}
295
296
297/* Cause the mapped memory region at ADDR to become completely unmapped.
298   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
299   and NBYTES should be the size of the mapped region at that address. */
300int
301UnMapMemory(LogicalAddress addr, natural nbytes)
302{
303#if DEBUG_MEMORY
304  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
305#endif
306#ifdef WINDOWS
307  return !VirtualFree(addr, 0, MEM_RELEASE);
308#else
309  return munmap(addr, nbytes);
310#endif
311}
312
313int
314ProtectMemory(LogicalAddress addr, natural nbytes)
315{
316#if DEBUG_MEMORY
317  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
318#endif
319#ifdef WINDOWS
320  DWORD oldProtect;
321  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
322 
323  if(!status) {
324    wperror("ProtectMemory VirtualProtect");
325    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
326  }
327  return status;
328#else
329  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
330 
331  if (status) {
332    status = errno;
333   
334    if (status == ENOMEM) {
335      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
336      if (mapaddr != MAP_FAILED) {
337        return 0;
338      }
339    }
340    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
341  }
342  return status;
343#endif
344}
345
346int
347UnProtectMemory(LogicalAddress addr, natural nbytes)
348{
349#if DEBUG_MEMORY
350  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
351#endif
352#ifdef WINDOWS
353  DWORD oldProtect;
354  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
355#else
356  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
357#endif
358}
359
360int
361MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
362{
363#ifdef WINDOWS
364#if 0
365  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
366  HANDLE hFile, hFileMapping;
367  LPVOID rc;
368  DWORD desiredAccess;
369
370  if (permissions == MEMPROTECT_RWX) {
371    permissions |= PAGE_WRITECOPY;
372    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
373  } else {
374    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
375  }
376
377  hFile = _get_osfhandle(fd);
378  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
379                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
380 
381  if (!hFileMapping) {
382    wperror("CreateFileMapping");
383    return false;
384  }
385
386  rc = MapViewOfFileEx(hFileMapping,
387                       desiredAccess,
388                       (pos >> 32),
389                       (pos & 0xffffffff),
390                       nbytes,
391                       addr);
392#else
393  size_t count, total = 0;
394  size_t opos;
395
396  opos = LSEEK(fd, 0, SEEK_CUR);
397  CommitMemory(addr, nbytes);
398  LSEEK(fd, pos, SEEK_SET);
399
400  while (total < nbytes) {
401    count = read(fd, addr + total, nbytes - total);
402    total += count;
403    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
404    if (!(count > 0))
405      return false;
406  }
407
408  LSEEK(fd, opos, SEEK_SET);
409
410  return true;
411#endif
412#else
413  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
414#endif
415}
416
417void
418unprotect_area(protected_area_ptr p)
419{
420  BytePtr start = p->start;
421  natural nprot = p->nprot;
422 
423  if (nprot) {
424    UnProtectMemory(start, nprot);
425    p->nprot = 0;
426  }
427}
428
429protected_area_ptr
430new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
431{
432  protected_area_ptr p = malloc(sizeof(protected_area));
433 
434  if (p == NULL) return NULL;
435  p->protsize = protsize;
436  p->nprot = 0;
437  p->start = start;
438  p->end = end;
439  p->why = reason;
440  p->next = AllProtectedAreas;
441
442  AllProtectedAreas = p;
443  if (now) {
444    protect_area(p);
445  }
446 
447  return p;
448}
449
450
451
452
453protected_area_ptr
454AllProtectedAreas = NULL;
455
456
457/*
458  This does a linear search.  Areas aren't created all that often;
459  if there get to be very many of them, some sort of tree search
460  might be justified.
461*/
462
463protected_area_ptr
464find_protected_area(BytePtr addr)
465{
466  protected_area* p;
467 
468  for(p = AllProtectedAreas; p; p=p->next) {
469    if ((p->start <= addr) && (p->end > addr)) {
470      return p;
471    }
472  }
473  return NULL;
474}
475
476
477void
478zero_memory_range(BytePtr start, BytePtr end)
479{
480#ifdef WINDOWS
481  ZeroMemory(start,end-start);
482#else
483  bzero(start,(size_t)(end-start));
484#endif
485}
486
487
488 
489
490/*
491   Grow or shrink the dynamic area.  Or maybe not.
492   Whether or not the end of (mapped space in) the heap changes,
493   ensure that everything between the freeptr and the heap end
494   is mapped and read/write.  (It'll incidentally be zeroed.)
495*/
496Boolean
497resize_dynamic_heap(BytePtr newfree, 
498                    natural free_space_size)
499{
500  area *a = active_dynamic_area;
501  BytePtr newlimit;
502
503  if (free_space_size) {
504    BytePtr lowptr = a->active;
505    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
506                                            log2_heap_segment_size);
507    if (newlimit > a->high) {
508      return grow_dynamic_area(newlimit-a->high);
509    } else if ((lowptr + free_space_size) < a->high) {
510      shrink_dynamic_area(a->high-newlimit);
511      return true;
512    }
513  }
514  return false;
515}
516
517void
518protect_area(protected_area_ptr p)
519{
520  BytePtr start = p->start;
521  natural n = p->protsize;
522
523  if (n && ! p->nprot) {
524    ProtectMemory(start, n);
525#ifdef WINDOWS
526    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
527#endif
528    p->nprot = n;
529  }
530}
531
532
533void
534zero_page(BytePtr start)
535{
536  extern int page_size;
537#ifdef PPC
538  extern void zero_cache_lines(BytePtr, size_t, size_t);
539  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
540#else
541  memset(start, 0, page_size);
542#endif
543}
544
545/* area management */
546
547
548area *
549new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
550{
551  area *a = calloc(1, sizeof(area));
552  if (a) {
553    natural ndnodes = area_dnode(highaddr, lowaddr);
554    a->low = lowaddr;
555    a->high = highaddr;
556    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
557    a->code = code;
558    a->ndnodes = ndnodes;
559    /* Caller must allocate markbits when allocating heap ! */
560   
561  }
562  return a;
563}
564
565static area *
566add_area_before(area *new_area, area *before)
567{
568  area *before_before = before->pred;
569
570  new_area->pred = before_before;
571  new_area->succ = before;
572  before_before->succ = new_area;
573  before->pred = new_area;
574  return new_area;
575}
576
577/*
578  The active dynamic area comes first.
579  Static areas follow dynamic areas.
580  Stack areas follow static areas.
581  Readonly areas come last.
582*/
583
584/*
585  If we already own the area_lock (or during iniitalization), it's safe
586  to add an area.
587*/
588
589
590void
591add_area_holding_area_lock(area *new_area)
592{
593  area *that = all_areas;
594  int
595    thiscode = (int)(new_area->code),
596    thatcode;
597
598  /* Cdr down the linked list */
599  do {
600    that = that->succ;
601    thatcode = (int)(that->code);
602  } while (thiscode < thatcode);
603  add_area_before(new_area, that);
604}
605
606/*
607  In general, we need to own the area lock before adding an area.
608*/
609void
610add_area(area *new_area, TCR *tcr)
611{
612  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
613  add_area_holding_area_lock(new_area);
614  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
615} 
616
617/*
618  Search areas "forward" from the header's successor, until
619  an area containing ADDR is found or an area with code < MINCODE
620  is encountered.
621  This walks the area list visiting heaps (dynamic, then static)
622  first, then stacks.
623
624*/
625static area *
626find_area_forward(BytePtr addr, area_code mincode)
627{
628  area *p, *header = all_areas;
629
630  for (p = header->succ; p != header; p = p->succ) {
631    area_code pcode = p->code;
632    if (pcode < mincode) {
633      return NULL;
634    }
635    if (pcode >= AREA_READONLY) {
636      if ((addr >= p->low) &&
637          (addr < p->active)) {
638        return p;
639      }
640    } else {
641      if ((addr >= p->active) &&
642          (addr < p->high)) {
643        return p;
644      }
645    }
646  }
647  return NULL;
648}
649
650static area *
651find_area_backward(BytePtr addr, area_code maxcode)
652{
653  area *p, *header = all_areas;
654
655  for (p = header->pred; p != header; p = p->pred) {
656    area_code pcode = p->code;
657
658    if (pcode > maxcode) {
659      return NULL;
660    }
661    if (pcode >= AREA_READONLY) {
662      if ((addr >= p->low) &&
663          (addr < p->active)) {
664        return p;
665      }
666    } else {
667      if ((addr >= p->active) &&
668          (addr < p->high)) {
669        return p;
670      }
671    }
672  }
673  return NULL;
674}
675
676area *
677area_containing(BytePtr addr)
678{
679  return find_area_forward(addr, AREA_VOID);
680}
681
682area *
683heap_area_containing(BytePtr addr)
684{
685  return find_area_forward(addr, AREA_READONLY);
686}
687
688area *
689stack_area_containing(BytePtr addr)
690{
691  return find_area_backward(addr, AREA_TSTACK);
692}
693
694/*
695  Make everything "younger" than the start of the target area
696  belong to that area; all younger areas will become empty, and
697  the dynamic area will have to lose some of its markbits (they
698  get zeroed and become part of the tenured area's refbits.)
699
700  The active dynamic area must have been "normalized" (e.g., its
701  active pointer must match the free pointer) before this is called.
702
703  If the target area is 'tenured_area' (the oldest ephemeral generation),
704  zero its refbits and update YOUNGEST_EPHEMERAL.
705
706*/
707
708void
709tenure_to_area(area *target)
710{
711  area *a = active_dynamic_area, *child;
712  BytePtr
713    curfree = a->active,
714    target_low = target->low;
715  natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
716  bitvector
717    refbits = tenured_area->refbits,
718    markbits = a->markbits,
719    new_markbits;
720
721  target->high = target->active = curfree;
722  target->ndnodes = area_dnode(curfree, target_low);
723
724  for (child = target->younger; child != a; child = child->younger) {
725    child->high = child->low = child->active = curfree;
726    child->ndnodes = 0;
727  }
728
729  a->low = curfree;
730  a->ndnodes = area_dnode(a->high, curfree);
731
732  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
733 
734  if (target == tenured_area) {
735    zero_bits(refbits, new_tenured_dnodes);
736    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
737  } else {
738    /* Need more (zeroed) refbits & fewer markbits */
739    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
740  }
741   
742  a->markbits = new_markbits;
743  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
744}
745
746
747
748/*
749  Make everything younger than the oldest byte in 'from' belong to
750  the youngest generation.  If 'from' is 'tenured_area', this means
751  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
752  to 0 to indicate this.)
753 
754  Some tenured_area refbits become dynamic area markbits in the process;
755  it's not necessary to zero them, since the GC will do that.
756*/
757
758void
759untenure_from_area(area *from)
760{
761  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
762    area *a = active_dynamic_area, *child;
763    BytePtr curlow = from->low;
764    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
765   
766    for (child = from; child != a; child = child->younger) {
767      child->low = child->active = child->high = curlow;
768      child->ndnodes = 0;
769    }
770   
771    a->low = curlow;
772    a->ndnodes = area_dnode(a->high, curlow);
773   
774    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
775    if (from == tenured_area) {
776      /* Everything's in the dynamic area */
777      lisp_global(OLDEST_EPHEMERAL) = 0;
778      lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
779
780    }
781  }
782}
783
784
785Boolean
786egc_control(Boolean activate, BytePtr curfree)
787{
788  area *a = active_dynamic_area;
789  Boolean egc_is_active = (a->older != NULL);
790
791  if (activate != egc_is_active) {
792    if (curfree != NULL) {
793      a->active = curfree;
794    }
795    if (activate) {
796      a->older = g1_area;
797      tenure_to_area(tenured_area);
798      egc_is_active = true;
799    } else {
800      untenure_from_area(tenured_area);
801      a->older = NULL;
802      egc_is_active = false;
803    }
804  }
805  return egc_is_active;
806}
807
808/*
809  Lisp ff-calls this; it needs to set the active area's active pointer
810  correctly.
811*/
812
813Boolean
814lisp_egc_control(Boolean activate)
815{
816  area *a = active_dynamic_area;
817  return egc_control(activate, (BytePtr) a->active);
818}
819
820
821
822 
823/* Splice the protected_area_ptr out of the list and dispose of it. */
824void
825delete_protected_area(protected_area_ptr p)
826{
827  BytePtr start = p->start;
828  int nbytes = p->nprot;
829  protected_area_ptr *prev = &AllProtectedAreas, q;
830
831  if (nbytes) {
832    UnProtectMemory((LogicalAddress)start, nbytes);
833  }
834 
835  while ((q = *prev) != NULL) {
836    if (p == q) {
837      *prev = p->next;
838      break;
839    } else {
840      prev = &(q->next);
841    }
842  }
843
844  free(p);
845}
846
847
848
849
850/*
851  Unlink the area from all_areas.
852  Unprotect and dispose of any hard/soft protected_areas.
853  If the area has a handle, dispose of that as well.
854  */
855
856void
857condemn_area_holding_area_lock(area *a)
858{
859  void free_stack(void *);
860  area *prev = a->pred, *next = a->succ;
861  Ptr h = a->h;
862  protected_area_ptr p;
863
864  prev->succ = next;
865  next->pred = prev;
866
867  p = a->softprot;
868  if (p) delete_protected_area(p);
869
870  p = a->hardprot;
871
872  if (p) delete_protected_area(p);
873
874  if (h) free_stack(h);
875  free(a);
876}
877
878
879
880void
881condemn_area(area *a, TCR *tcr)
882{
883  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
884  condemn_area_holding_area_lock(a);
885  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
886}
887
888
889
890
891/*
892  condemn an area and all the other areas that can be reached
893  via the area.older & area.younger links.
894  This is the function in the ppc::kernel-import-condemn-area slot,
895  called by free-stack-area
896  */
897void
898condemn_area_chain(area *a, TCR *tcr)
899{
900  area *older;
901
902  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
903
904  for (; a->younger; a = a->younger) ;
905  for (;a;) {
906    older = a->older;
907    condemn_area_holding_area_lock(a);
908    a = older;
909  }
910  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
911}
912
913
914void
915protect_watched_areas()
916{
917  area *a = active_dynamic_area;
918  natural code = a->code;
919
920  while (code != AREA_VOID) {
921    if (code == AREA_WATCHED) {
922      natural size = a->high - a->low;
923     
924      ProtectMemory(a->low, size);
925    }
926    a = a->succ;
927    code = a->code;
928  }
929}
930
931void
932unprotect_watched_areas()
933{
934  area *a = active_dynamic_area;
935  natural code = a->code;
936
937  while (code != AREA_VOID) {
938    if (code == AREA_WATCHED) {
939      natural size = a->high - a->low;
940     
941      UnProtectMemory(a->low, size);
942    }
943    a = a->succ;
944    code = a->code;
945  }
946}
Note: See TracBrowser for help on using the repository browser.