source: trunk/source/lisp-kernel/memory.c @ 15370

Last change on this file since 15370 was 15370, checked in by gb, 7 years ago

Define assembly lisp-globals in lisp_globals.s, not in *constants.s
Provide _rnode and support, so that the lisp_globals structure can
have its elements defined in predecremented descending order.
Make managed_static_dnodes/_refbits new globals so that they can
be used on all platforms (even though the managed_static_area is only
used on x86 at the moment.
When disabling the EGC, set lisp_global(OLDSPACE_DNODE_COUNT) to 0
(not effectively to lisp_global(MANAGED_STATIC_DNODES), since the
new write barrier code doesn't overload a single set of refbits.

Hopefully, that's it for these changes.
(We'll see if the code even compiles on other platforms.)

-

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.1 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#endif
34
35#ifndef WINDOWS
36#include <sys/mman.h>
37#endif
38
39#define DEBUG_MEMORY 0
40
41void
42allocation_failure(Boolean pointerp, natural size)
43{
44  char buf[64];
45  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
46  Fatal(":   Kernel memory allocation failure.  ", buf);
47}
48
49void *
50lisp_malloc(size_t size)
51{
52  return malloc(size);
53}
54
55void
56lisp_free(void *p)
57{
58  free(p);
59}
60
61#ifdef DARWIN
62#if WORD_SIZE == 64
63#define vm_region vm_region_64
64#endif
65
66/*
67  Check to see if the specified address is unmapped by trying to get
68  information about the mapped address at or beyond the target.  If
69  the difference between the target address and the next mapped address
70  is >= len, we can safely mmap len bytes at addr.
71*/
72Boolean
73address_unmapped_p(char *addr, natural len)
74{
75  vm_address_t vm_addr = (vm_address_t)addr;
76  vm_size_t vm_size;
77#if WORD_SIZE == 64
78  vm_region_basic_info_data_64_t vm_info;
79#else
80  vm_region_basic_info_data_t vm_info;
81#endif
82#if WORD_SIZE == 64
83  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
84#else
85  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
86#endif
87  mach_port_t vm_object_name = (mach_port_t) 0;
88  kern_return_t kret;
89
90  kret = vm_region(mach_task_self(),
91                   &vm_addr,
92                   &vm_size,
93#if WORD_SIZE == 64
94                   VM_REGION_BASIC_INFO_64,
95#else
96                   VM_REGION_BASIC_INFO,
97#endif
98                   (vm_region_info_t)&vm_info,
99                   &vm_info_size,
100                   &vm_object_name);
101  if (kret != KERN_SUCCESS) {
102    return false;
103  }
104
105  return vm_addr >= (vm_address_t)(addr+len);
106}
107#endif
108
109
110
111  /*
112    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
113    likely to reside near the beginning of an unmapped block of memory
114    that's at least 1GB in size.  We'd like to load the heap image's
115    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
116    that'd allow us to file-map those sections (and would enable us to
117    avoid having to relocate references in the data sections.)
118
119    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
120    by creating an anonymous mapping with mmap().
121
122    If we try to insist that mmap() map a 1GB block at
123    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
124    mmap() will gleefully clobber any mapped memory that's already
125    there.  (That region's empty at this writing, but some future
126    version of the OS might decide to put something there.)
127
128    If we don't specify MAP_FIXED, mmap() is free to treat the address
129    we give it as a hint; Linux seems to accept the hint if doing so
130    wouldn't cause a problem.  Naturally, that behavior's too useful
131    for Darwin (or perhaps too inconvenient for it): it'll often
132    return another address, even if the hint would have worked fine.
133
134    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
135    would conflict with anything.  Until we discover a need to do
136    otherwise, we'll assume that if Linux's mmap() fails to take the
137    hint, it's because of a legitimate conflict.
138
139    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
140    to implement an address_unmapped_p() for Linux.
141  */
142
143LogicalAddress
144ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
145{
146  LogicalAddress start;
147  Boolean fixed_map_ok = false;
148#ifdef DARWIN
149  fixed_map_ok = address_unmapped_p(want,totalsize);
150#endif
151#ifdef SOLARIS
152  fixed_map_ok = true;
153#endif
154  raise_limit();
155#ifdef WINDOWS
156  start = VirtualAlloc((void *)want,
157                       totalsize + heap_segment_size,
158                       MEM_RESERVE,
159                       PAGE_NOACCESS);
160  if (!start) {
161#if DEBUG_MEMORY   
162    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
163#endif
164    start = VirtualAlloc(0,
165                         totalsize + heap_segment_size,
166                         MEM_RESERVE,
167                         PAGE_NOACCESS);
168    if (!start) {
169      return NULL;
170    }
171  }
172#else
173  start = mmap((void *)want,
174               totalsize + heap_segment_size,
175               PROT_NONE,
176               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
177               -1,
178               0);
179  if (start == MAP_FAILED) {
180    return NULL;
181  }
182
183  if (start != want) {
184    munmap(start, totalsize+heap_segment_size);
185    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
186    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
187      return NULL;
188    }
189  }
190  mprotect(start, totalsize, PROT_NONE);
191#endif
192#if DEBUG_MEMORY
193  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
194#endif
195  return start;
196}
197
198int
199CommitMemory (LogicalAddress start, natural len) 
200{
201#if DEBUG_MEMORY
202  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
203#endif
204#ifdef WINDOWS
205  LogicalAddress rc;
206
207  if ((start < ((LogicalAddress)nil_value)) &&
208      (((LogicalAddress)nil_value) < (start+len))) {
209    /* nil area is in the executable on Windows; ensure range is
210       read-write */
211    DWORD as_if_i_care;
212    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
213      return false;
214    }
215    return true;
216  }
217  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
218  if (!rc) {
219    wperror("CommitMemory VirtualAlloc");
220    return false;
221  }
222  return true;
223#else
224  int i;
225  void *addr;
226
227  for (i = 0; i < 3; i++) {
228    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
229    if (addr == start) {
230      return true;
231    } else {
232      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
233    }
234  }
235  return false;
236#endif
237}
238
239void
240UnCommitMemory (LogicalAddress start, natural len) {
241#if DEBUG_MEMORY
242  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
243#endif
244#ifdef WINDOWS
245  int rc = VirtualFree(start, len, MEM_DECOMMIT);
246  if (!rc) {
247    wperror("UnCommitMemory VirtualFree");
248    Fatal("mmap error", "");
249    return;
250  }
251#else
252  if (len) {
253    madvise(start, len, MADV_DONTNEED);
254    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
255        != start) {
256      int err = errno;
257      Fatal("mmap error", "");
258      fprintf(dbgout, "errno = %d", err);
259    }
260  }
261#endif
262}
263
264
265LogicalAddress
266MapMemory(LogicalAddress addr, natural nbytes, int protection)
267{
268  LogicalAddress p;
269#if DEBUG_MEMORY
270  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
271#endif
272#ifdef WINDOWS
273  p = VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
274  if (p == NULL) {
275    wperror("MapMemory");
276  }
277  return p;
278#else
279  {
280    int flags = MAP_PRIVATE|MAP_ANON;
281
282    if (addr > 0) flags |= MAP_FIXED;
283    return mmap(addr, nbytes, protection, flags, -1, 0);
284  }
285#endif
286}
287
288LogicalAddress
289MapMemoryForStack(natural nbytes)
290{
291#if DEBUG_MEMORY
292  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
293#endif
294#ifdef WINDOWS
295  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
296#else
297  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
298#endif
299}
300
301
302/* Cause the mapped memory region at ADDR to become completely unmapped.
303   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
304   and NBYTES should be the size of the mapped region at that address. */
305int
306UnMapMemory(LogicalAddress addr, natural nbytes)
307{
308#if DEBUG_MEMORY
309  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
310#endif
311#ifdef WINDOWS
312  return !VirtualFree(addr, 0, MEM_RELEASE);
313#else
314  return munmap(addr, nbytes);
315#endif
316}
317
318int
319ProtectMemory(LogicalAddress addr, natural nbytes)
320{
321#if DEBUG_MEMORY
322  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
323#endif
324#ifdef WINDOWS
325  DWORD oldProtect;
326  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
327 
328  if(!status) {
329    wperror("ProtectMemory VirtualProtect");
330    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
331  }
332  return status;
333#else
334  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
335 
336  if (status) {
337    status = errno;
338   
339    if (status == ENOMEM) {
340      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
341      if (mapaddr != MAP_FAILED) {
342        return 0;
343      }
344    }
345    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
346  }
347  return status;
348#endif
349}
350
351int
352UnProtectMemory(LogicalAddress addr, natural nbytes)
353{
354#if DEBUG_MEMORY
355  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
356#endif
357#ifdef WINDOWS
358  DWORD oldProtect;
359  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
360#else
361  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
362#endif
363}
364
365int
366MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
367{
368#ifdef WINDOWS
369#if 0
370  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
371  HANDLE hFile, hFileMapping;
372  LPVOID rc;
373  DWORD desiredAccess;
374
375  if (permissions == MEMPROTECT_RWX) {
376    permissions |= PAGE_WRITECOPY;
377    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
378  } else {
379    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
380  }
381
382  hFile = _get_osfhandle(fd);
383  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
384                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
385 
386  if (!hFileMapping) {
387    wperror("CreateFileMapping");
388    return false;
389  }
390
391  rc = MapViewOfFileEx(hFileMapping,
392                       desiredAccess,
393                       (pos >> 32),
394                       (pos & 0xffffffff),
395                       nbytes,
396                       addr);
397#else
398  size_t count, total = 0;
399  size_t opos;
400
401  opos = LSEEK(fd, 0, SEEK_CUR);
402  CommitMemory(addr, nbytes);
403  LSEEK(fd, pos, SEEK_SET);
404
405  while (total < nbytes) {
406    count = read(fd, addr + total, nbytes - total);
407    total += count;
408    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
409    if (!(count > 0))
410      return false;
411  }
412
413  LSEEK(fd, opos, SEEK_SET);
414
415  return true;
416#endif
417#else
418  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
419#endif
420}
421
422void
423unprotect_area(protected_area_ptr p)
424{
425  BytePtr start = p->start;
426  natural nprot = p->nprot;
427 
428  if (nprot) {
429    UnProtectMemory(start, nprot);
430    p->nprot = 0;
431  }
432}
433
434protected_area_ptr
435new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
436{
437  protected_area_ptr p = malloc(sizeof(protected_area));
438 
439  if (p == NULL) return NULL;
440  p->protsize = protsize;
441  p->nprot = 0;
442  p->start = start;
443  p->end = end;
444  p->why = reason;
445  p->next = AllProtectedAreas;
446
447  AllProtectedAreas = p;
448  if (now) {
449    protect_area(p);
450  }
451 
452  return p;
453}
454
455
456
457
458protected_area_ptr
459AllProtectedAreas = NULL;
460
461
462/*
463  This does a linear search.  Areas aren't created all that often;
464  if there get to be very many of them, some sort of tree search
465  might be justified.
466*/
467
468protected_area_ptr
469find_protected_area(BytePtr addr)
470{
471  protected_area* p;
472 
473  for(p = AllProtectedAreas; p; p=p->next) {
474    if ((p->start <= addr) && (p->end > addr)) {
475      return p;
476    }
477  }
478  return NULL;
479}
480
481
482void
483zero_memory_range(BytePtr start, BytePtr end)
484{
485#ifdef WINDOWS
486  ZeroMemory(start,end-start);
487#else
488  bzero(start,(size_t)(end-start));
489#endif
490}
491
492
493 
494
495/*
496   Grow or shrink the dynamic area.  Or maybe not.
497   Whether or not the end of (mapped space in) the heap changes,
498   ensure that everything between the freeptr and the heap end
499   is mapped and read/write.  (It'll incidentally be zeroed.)
500*/
501Boolean
502resize_dynamic_heap(BytePtr newfree, 
503                    natural free_space_size)
504{
505  area *a = active_dynamic_area;
506  BytePtr newlimit;
507
508  if (free_space_size) {
509    BytePtr lowptr = a->active;
510    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
511                                            log2_heap_segment_size);
512    if (newlimit > a->high) {
513      return grow_dynamic_area(newlimit-a->high);
514    } else if ((lowptr + free_space_size) < a->high) {
515      shrink_dynamic_area(a->high-newlimit);
516      return true;
517    }
518  }
519  return false;
520}
521
522void
523protect_area(protected_area_ptr p)
524{
525  BytePtr start = p->start;
526  natural n = p->protsize;
527
528  if (n && ! p->nprot) {
529    ProtectMemory(start, n);
530#ifdef WINDOWS
531    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
532#endif
533    p->nprot = n;
534  }
535}
536
537
538void
539zero_page(BytePtr start)
540{
541  extern int page_size;
542#ifdef PPC
543  extern void zero_cache_lines(BytePtr, size_t, size_t);
544  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
545#else
546  memset(start, 0, page_size);
547#endif
548}
549
550/* area management */
551
552
553area *
554new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
555{
556  area *a = calloc(1, sizeof(area));
557  if (a) {
558    natural ndnodes = area_dnode(highaddr, lowaddr);
559    a->low = lowaddr;
560    a->high = highaddr;
561    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
562    a->code = code;
563    a->ndnodes = ndnodes;
564    /* Caller must allocate markbits when allocating heap ! */
565   
566  }
567  return a;
568}
569
570static area *
571add_area_before(area *new_area, area *before)
572{
573  area *before_before = before->pred;
574
575  new_area->pred = before_before;
576  new_area->succ = before;
577  before_before->succ = new_area;
578  before->pred = new_area;
579  return new_area;
580}
581
582/*
583  The active dynamic area comes first.
584  Static areas follow dynamic areas.
585  Stack areas follow static areas.
586  Readonly areas come last.
587*/
588
589/*
590  If we already own the area_lock (or during iniitalization), it's safe
591  to add an area.
592*/
593
594
595void
596add_area_holding_area_lock(area *new_area)
597{
598  area *that = all_areas;
599  int
600    thiscode = (int)(new_area->code),
601    thatcode;
602
603  /* Cdr down the linked list */
604  do {
605    that = that->succ;
606    thatcode = (int)(that->code);
607  } while (thiscode < thatcode);
608  add_area_before(new_area, that);
609}
610
611/*
612  In general, we need to own the area lock before adding an area.
613*/
614void
615add_area(area *new_area, TCR *tcr)
616{
617  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
618  add_area_holding_area_lock(new_area);
619  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
620} 
621
622/*
623  Search areas "forward" from the header's successor, until
624  an area containing ADDR is found or an area with code < MINCODE
625  is encountered.
626  This walks the area list visiting heaps (dynamic, then static)
627  first, then stacks.
628
629*/
630static area *
631find_area_forward(BytePtr addr, area_code mincode)
632{
633  area *p, *header = all_areas;
634
635  for (p = header->succ; p != header; p = p->succ) {
636    area_code pcode = p->code;
637    if (pcode < mincode) {
638      return NULL;
639    }
640    if (pcode >= AREA_READONLY) {
641      if ((addr >= p->low) &&
642          (addr < p->active)) {
643        return p;
644      }
645    } else {
646      if ((addr >= p->active) &&
647          (addr < p->high)) {
648        return p;
649      }
650    }
651  }
652  return NULL;
653}
654
655static area *
656find_area_backward(BytePtr addr, area_code maxcode)
657{
658  area *p, *header = all_areas;
659
660  for (p = header->pred; p != header; p = p->pred) {
661    area_code pcode = p->code;
662
663    if (pcode > maxcode) {
664      return NULL;
665    }
666    if (pcode >= AREA_READONLY) {
667      if ((addr >= p->low) &&
668          (addr < p->active)) {
669        return p;
670      }
671    } else {
672      if ((addr >= p->active) &&
673          (addr < p->high)) {
674        return p;
675      }
676    }
677  }
678  return NULL;
679}
680
681area *
682area_containing(BytePtr addr)
683{
684  return find_area_forward(addr, AREA_VOID);
685}
686
687area *
688heap_area_containing(BytePtr addr)
689{
690  return find_area_forward(addr, AREA_READONLY);
691}
692
693area *
694stack_area_containing(BytePtr addr)
695{
696  return find_area_backward(addr, AREA_TSTACK);
697}
698
699/*
700  Make everything "younger" than the start of the target area
701  belong to that area; all younger areas will become empty, and
702  the dynamic area will have to lose some of its markbits (they
703  get zeroed and become part of the tenured area's refbits.)
704
705  The active dynamic area must have been "normalized" (e.g., its
706  active pointer must match the free pointer) before this is called.
707
708  If the target area is 'tenured_area' (the oldest ephemeral generation),
709  zero its refbits and update YOUNGEST_EPHEMERAL.
710
711*/
712
713void
714tenure_to_area(area *target)
715{
716  area *a = active_dynamic_area, *child;
717  BytePtr
718    curfree = a->active,
719    target_low = target->low;
720  natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
721  bitvector
722    refbits = tenured_area->refbits,
723    markbits = a->markbits,
724    new_markbits;
725
726  target->high = target->active = curfree;
727  target->ndnodes = area_dnode(curfree, target_low);
728
729  for (child = target->younger; child != a; child = child->younger) {
730    child->high = child->low = child->active = curfree;
731    child->ndnodes = 0;
732  }
733
734  a->low = curfree;
735  a->ndnodes = area_dnode(a->high, curfree);
736
737  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
738 
739  if (target == tenured_area) {
740    zero_bits(refbits, new_tenured_dnodes);
741    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
742  } else {
743    /* Need more (zeroed) refbits & fewer markbits */
744    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
745  }
746   
747  a->markbits = new_markbits;
748  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
749}
750
751
752
753/*
754  Make everything younger than the oldest byte in 'from' belong to
755  the youngest generation.  If 'from' is 'tenured_area', this means
756  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
757  to 0 to indicate this.)
758 
759  Some tenured_area refbits become dynamic area markbits in the process;
760  it's not necessary to zero them, since the GC will do that.
761*/
762
763void
764untenure_from_area(area *from)
765{
766  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
767    area *a = active_dynamic_area, *child;
768    BytePtr curlow = from->low;
769    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
770   
771    for (child = from; child != a; child = child->younger) {
772      child->low = child->active = child->high = curlow;
773      child->ndnodes = 0;
774    }
775   
776    a->low = curlow;
777    a->ndnodes = area_dnode(a->high, curlow);
778   
779    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
780    if (from == tenured_area) {
781      /* Everything's in the dynamic area */
782      lisp_global(OLDEST_EPHEMERAL) = 0;
783      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
784
785    }
786  }
787}
788
789
790Boolean
791egc_control(Boolean activate, BytePtr curfree)
792{
793  area *a = active_dynamic_area;
794  Boolean egc_is_active = (a->older != NULL);
795
796  if (activate != egc_is_active) {
797    if (curfree != NULL) {
798      a->active = curfree;
799    }
800    if (activate) {
801      a->older = g1_area;
802      tenure_to_area(tenured_area);
803      egc_is_active = true;
804    } else {
805      untenure_from_area(tenured_area);
806      a->older = NULL;
807      egc_is_active = false;
808    }
809  }
810  return egc_is_active;
811}
812
813/*
814  Lisp ff-calls this; it needs to set the active area's active pointer
815  correctly.
816*/
817
818Boolean
819lisp_egc_control(Boolean activate)
820{
821  area *a = active_dynamic_area;
822  return egc_control(activate, (BytePtr) a->active);
823}
824
825
826
827 
828/* Splice the protected_area_ptr out of the list and dispose of it. */
829void
830delete_protected_area(protected_area_ptr p)
831{
832  BytePtr start = p->start;
833  int nbytes = p->nprot;
834  protected_area_ptr *prev = &AllProtectedAreas, q;
835
836  if (nbytes) {
837    UnProtectMemory((LogicalAddress)start, nbytes);
838  }
839 
840  while ((q = *prev) != NULL) {
841    if (p == q) {
842      *prev = p->next;
843      break;
844    } else {
845      prev = &(q->next);
846    }
847  }
848
849  free(p);
850}
851
852
853
854
855/*
856  Unlink the area from all_areas.
857  Unprotect and dispose of any hard/soft protected_areas.
858  If the area has a handle, dispose of that as well.
859  */
860
861void
862condemn_area_holding_area_lock(area *a)
863{
864  void free_stack(void *);
865  area *prev = a->pred, *next = a->succ;
866  Ptr h = a->h;
867  protected_area_ptr p;
868
869  prev->succ = next;
870  next->pred = prev;
871
872  p = a->softprot;
873  if (p) delete_protected_area(p);
874
875  p = a->hardprot;
876
877  if (p) delete_protected_area(p);
878
879  if (h) free_stack(h);
880  free(a);
881}
882
883
884
885void
886condemn_area(area *a, TCR *tcr)
887{
888  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
889  condemn_area_holding_area_lock(a);
890  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
891}
892
893
894
895
896/*
897  condemn an area and all the other areas that can be reached
898  via the area.older & area.younger links.
899  This is the function in the ppc::kernel-import-condemn-area slot,
900  called by free-stack-area
901  */
902void
903condemn_area_chain(area *a, TCR *tcr)
904{
905  area *older;
906
907  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
908
909  for (; a->younger; a = a->younger) ;
910  for (;a;) {
911    older = a->older;
912    condemn_area_holding_area_lock(a);
913    a = older;
914  }
915  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
916}
917
918
919void
920protect_watched_areas()
921{
922  area *a = active_dynamic_area;
923  natural code = a->code;
924
925  while (code != AREA_VOID) {
926    if (code == AREA_WATCHED) {
927      natural size = a->high - a->low;
928     
929      ProtectMemory(a->low, size);
930    }
931    a = a->succ;
932    code = a->code;
933  }
934}
935
936void
937unprotect_watched_areas()
938{
939  area *a = active_dynamic_area;
940  natural code = a->code;
941
942  while (code != AREA_VOID) {
943    if (code == AREA_WATCHED) {
944      natural size = a->high - a->low;
945     
946      UnProtectMemory(a->low, size);
947    }
948    a = a->succ;
949    code = a->code;
950  }
951}
952
953LogicalAddress
954ReserveMemory(natural size)
955{
956  LogicalAddress p;
957#ifdef WINDOWS
958  p = VirtualAlloc(0,
959                   size,
960                   MEM_RESERVE,
961                   PAGE_NOACCESS);
962  return p;
963#else
964  p = mmap(NULL,size,PROT_NONE,MAP_PRIVATE|MAP_ANON|MAP_NORESERVE,-1,0);
965  if (p == MAP_FAILED) {
966    return NULL;
967  }
968  return p;
969#endif
970}
971
Note: See TracBrowser for help on using the repository browser.