source: branches/arm/lisp-kernel/memory.c @ 13923

Last change on this file since 13923 was 13431, checked in by gb, 10 years ago

Try to address ticket:649 by clearly distinguishing between UnMapMemory?()
(which completely deallocates an entire memory region created by MapMemory?()
or MapMemoryForStack?()) from UnCommitMemory?() (which leaves a previously
mapped and accessible range of pages mapped but inaccessible/uncommitted.)

Follow Alexander Gavrilov's suggestions (and use his patch) to ensure
that thread handles are closed on Windows. Make create_system_thread()
return a Boolean, since no callers care about the exact value and the
value that was returned on Windows was a handle that needed to be closed.

Ensure that create_system_thread() observes its stack_size argument.
Make the size of vstack soft protected area ("the yellow zone") larger.

Use a readable/writable PAGE_GUARD page at the bottom of the
cstack/tstack yellow zone and handle the Windows exception that's
raised when a PAGE_GUARD page is written to, so that stack overflow
detection has a prayer of working on Windows.

UNTESTED ANYWHERE BUT WIN32.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.0 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "Threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#include <fpu_control.h>
34#include <linux/prctl.h>
35#endif
36
37#ifndef WINDOWS
38#include <sys/mman.h>
39#endif
40
41#define DEBUG_MEMORY 0
42
43void
44allocation_failure(Boolean pointerp, natural size)
45{
46  char buf[64];
47  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
48  Fatal(":   Kernel memory allocation failure.  ", buf);
49}
50
51void
52fatal_oserr(StringPtr param, OSErr err)
53{
54  char buf[64];
55  sprintf(buf," - operating system error %d.", err);
56  Fatal(param, buf);
57}
58
59
60Ptr
61allocate(natural size)
62{
63  return (Ptr) malloc(size);
64}
65
66void
67deallocate(Ptr p)
68{
69  free((void *)p);
70}
71
72Ptr
73zalloc(natural size)
74{
75  Ptr p = allocate(size);
76  if (p != NULL) {
77    memset(p, 0, size);
78  }
79  return p;
80}
81
82#ifdef DARWIN
83#if WORD_SIZE == 64
84#define vm_region vm_region_64
85#endif
86
87/*
88  Check to see if the specified address is unmapped by trying to get
89  information about the mapped address at or beyond the target.  If
90  the difference between the target address and the next mapped address
91  is >= len, we can safely mmap len bytes at addr.
92*/
93Boolean
94address_unmapped_p(char *addr, natural len)
95{
96  vm_address_t vm_addr = (vm_address_t)addr;
97  vm_size_t vm_size;
98#if WORD_SIZE == 64
99  vm_region_basic_info_data_64_t vm_info;
100#else
101  vm_region_basic_info_data_t vm_info;
102#endif
103#if WORD_SIZE == 64
104  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
105#else
106  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
107#endif
108  mach_port_t vm_object_name = (mach_port_t) 0;
109  kern_return_t kret;
110
111  kret = vm_region(mach_task_self(),
112                   &vm_addr,
113                   &vm_size,
114#if WORD_SIZE == 64
115                   VM_REGION_BASIC_INFO_64,
116#else
117                   VM_REGION_BASIC_INFO,
118#endif
119                   (vm_region_info_t)&vm_info,
120                   &vm_info_size,
121                   &vm_object_name);
122  if (kret != KERN_SUCCESS) {
123    return false;
124  }
125
126  return vm_addr >= (vm_address_t)(addr+len);
127}
128#endif
129
130
131  /*
132    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
133    likely to reside near the beginning of an unmapped block of memory
134    that's at least 1GB in size.  We'd like to load the heap image's
135    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
136    that'd allow us to file-map those sections (and would enable us to
137    avoid having to relocate references in the data sections.)
138
139    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
140    by creating an anonymous mapping with mmap().
141
142    If we try to insist that mmap() map a 1GB block at
143    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
144    mmap() will gleefully clobber any mapped memory that's already
145    there.  (That region's empty at this writing, but some future
146    version of the OS might decide to put something there.)
147
148    If we don't specify MAP_FIXED, mmap() is free to treat the address
149    we give it as a hint; Linux seems to accept the hint if doing so
150    wouldn't cause a problem.  Naturally, that behavior's too useful
151    for Darwin (or perhaps too inconvenient for it): it'll often
152    return another address, even if the hint would have worked fine.
153
154    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
155    would conflict with anything.  Until we discover a need to do
156    otherwise, we'll assume that if Linux's mmap() fails to take the
157    hint, it's because of a legitimate conflict.
158
159    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
160    to implement an address_unmapped_p() for Linux.
161  */
162
163LogicalAddress
164ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
165{
166  LogicalAddress start;
167  Boolean fixed_map_ok = false;
168#ifdef DARWIN
169  fixed_map_ok = address_unmapped_p(want,totalsize);
170#endif
171#ifdef SOLARIS
172  fixed_map_ok = true;
173#endif
174  raise_limit();
175#ifdef WINDOWS
176  start = VirtualAlloc((void *)want,
177                       totalsize + heap_segment_size,
178                       MEM_RESERVE,
179                       PAGE_NOACCESS);
180  if (!start) {
181#if DEBUG_MEMORY   
182    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
183#endif
184    start = VirtualAlloc(0,
185                         totalsize + heap_segment_size,
186                         MEM_RESERVE,
187                         PAGE_NOACCESS);
188    if (!start) {
189      return NULL;
190    }
191  }
192#else
193  start = mmap((void *)want,
194               totalsize + heap_segment_size,
195               PROT_NONE,
196               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
197               -1,
198               0);
199  if (start == MAP_FAILED) {
200    return NULL;
201  }
202
203  if (start != want) {
204    munmap(start, totalsize+heap_segment_size);
205    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
206    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
207      return NULL;
208    }
209  }
210  mprotect(start, totalsize, PROT_NONE);
211#endif
212#if DEBUG_MEMORY
213  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
214#endif
215  return start;
216}
217
218int
219CommitMemory (LogicalAddress start, natural len) 
220{
221  LogicalAddress rc;
222#if DEBUG_MEMORY
223  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
224#endif
225#ifdef WINDOWS
226  if ((start < ((LogicalAddress)nil_value)) &&
227      (((LogicalAddress)nil_value) < (start+len))) {
228    /* nil area is in the executable on Windows; ensure range is
229       read-write */
230    DWORD as_if_i_care;
231    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
232      return false;
233    }
234    return true;
235  }
236  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
237  if (!rc) {
238    wperror("CommitMemory VirtualAlloc");
239    return false;
240  }
241  return true;
242#else
243  int i, err;
244  void *addr;
245
246  for (i = 0; i < 3; i++) {
247    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
248    if (addr == start) {
249      return true;
250    } else {
251      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
252    }
253  }
254  return false;
255#endif
256}
257
258void
259UnCommitMemory (LogicalAddress start, natural len) {
260#if DEBUG_MEMORY
261  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
262#endif
263#ifdef WINDOWS
264  int rc = VirtualFree(start, len, MEM_DECOMMIT);
265  if (!rc) {
266    wperror("UnCommitMemory VirtualFree");
267    Fatal("mmap error", "");
268    return;
269  }
270#else
271  if (len) {
272    madvise(start, len, MADV_DONTNEED);
273    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
274        != start) {
275      int err = errno;
276      Fatal("mmap error", "");
277      fprintf(dbgout, "errno = %d", err);
278    }
279  }
280#endif
281}
282
283
284LogicalAddress
285MapMemory(LogicalAddress addr, natural nbytes, int protection)
286{
287#if DEBUG_MEMORY
288  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
289#endif
290#ifdef WINDOWS
291  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
292#else
293  {
294    int flags = MAP_PRIVATE|MAP_ANON;
295
296    if (addr > 0) flags |= MAP_FIXED;
297    return mmap(addr, nbytes, protection, flags, -1, 0);
298  }
299#endif
300}
301
302LogicalAddress
303MapMemoryForStack(natural nbytes)
304{
305#if DEBUG_MEMORY
306  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
307#endif
308#ifdef WINDOWS
309  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
310#else
311  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
312#endif
313}
314
315
316/* Cause the mapped memory region at ADDR to become completely unmapped.
317   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
318   and NBYTES should be the size of the mapped region at that address. */
319int
320UnMapMemory(LogicalAddress addr, natural nbytes)
321{
322#if DEBUG_MEMORY
323  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
324#endif
325#ifdef WINDOWS
326  return !VirtualFree(addr, 0, MEM_RELEASE);
327#else
328  return munmap(addr, nbytes);
329#endif
330}
331
332int
333ProtectMemory(LogicalAddress addr, natural nbytes)
334{
335#if DEBUG_MEMORY
336  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
337#endif
338#ifdef WINDOWS
339  DWORD oldProtect;
340  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
341 
342  if(!status) {
343    wperror("ProtectMemory VirtualProtect");
344    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
345  }
346  return status;
347#else
348  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
349 
350  if (status) {
351    status = errno;
352    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
353  }
354  return status;
355#endif
356}
357
358int
359UnProtectMemory(LogicalAddress addr, natural nbytes)
360{
361#if DEBUG_MEMORY
362  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
363#endif
364#ifdef WINDOWS
365  DWORD oldProtect;
366  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
367#else
368  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
369#endif
370}
371
372int
373MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
374{
375#ifdef WINDOWS
376#if 0
377  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
378  HANDLE hFile, hFileMapping;
379  LPVOID rc;
380  DWORD desiredAccess;
381
382  if (permissions == MEMPROTECT_RWX) {
383    permissions |= PAGE_WRITECOPY;
384    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
385  } else {
386    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
387  }
388
389  hFile = _get_osfhandle(fd);
390  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
391                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
392 
393  if (!hFileMapping) {
394    wperror("CreateFileMapping");
395    return false;
396  }
397
398  rc = MapViewOfFileEx(hFileMapping,
399                       desiredAccess,
400                       (pos >> 32),
401                       (pos & 0xffffffff),
402                       nbytes,
403                       addr);
404#else
405  size_t count, total = 0;
406  size_t opos;
407
408  opos = LSEEK(fd, 0, SEEK_CUR);
409  CommitMemory(addr, nbytes);
410  LSEEK(fd, pos, SEEK_SET);
411
412  while (total < nbytes) {
413    count = read(fd, addr + total, nbytes - total);
414    total += count;
415    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
416    if (!(count > 0))
417      return false;
418  }
419
420  LSEEK(fd, opos, SEEK_SET);
421
422  return true;
423#endif
424#else
425  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
426#endif
427}
428
429void
430unprotect_area(protected_area_ptr p)
431{
432  BytePtr start = p->start;
433  natural nprot = p->nprot;
434 
435  if (nprot) {
436    UnProtectMemory(start, nprot);
437    p->nprot = 0;
438  }
439}
440
441protected_area_ptr
442new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
443{
444  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
445 
446  if (p == NULL) return NULL;
447  p->protsize = protsize;
448  p->nprot = 0;
449  p->start = start;
450  p->end = end;
451  p->why = reason;
452  p->next = AllProtectedAreas;
453
454  AllProtectedAreas = p;
455  if (now) {
456    protect_area(p);
457  }
458 
459  return p;
460}
461
462
463
464
465protected_area_ptr
466AllProtectedAreas = NULL;
467
468
469/*
470  This does a linear search.  Areas aren't created all that often;
471  if there get to be very many of them, some sort of tree search
472  might be justified.
473*/
474
475protected_area_ptr
476find_protected_area(BytePtr addr)
477{
478  protected_area* p;
479 
480  for(p = AllProtectedAreas; p; p=p->next) {
481    if ((p->start <= addr) && (p->end > addr)) {
482      return p;
483    }
484  }
485  return NULL;
486}
487
488
489void
490zero_memory_range(BytePtr start, BytePtr end)
491{
492#ifdef WINDOWS
493  ZeroMemory(start,end-start);
494#else
495  bzero(start,(size_t)(end-start));
496#endif
497}
498
499
500 
501
502/*
503   Grow or shrink the dynamic area.  Or maybe not.
504   Whether or not the end of (mapped space in) the heap changes,
505   ensure that everything between the freeptr and the heap end
506   is mapped and read/write.  (It'll incidentally be zeroed.)
507*/
508Boolean
509resize_dynamic_heap(BytePtr newfree, 
510                    natural free_space_size)
511{
512  extern int page_size;
513  area *a = active_dynamic_area;
514  BytePtr newlimit, protptr, zptr;
515  int psize = page_size;
516  if (free_space_size) {
517    BytePtr lowptr = a->active;
518    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
519                                            log2_heap_segment_size);
520    if (newlimit > a->high) {
521      return grow_dynamic_area(newlimit-a->high);
522    } else if ((lowptr + free_space_size) < a->high) {
523      shrink_dynamic_area(a->high-newlimit);
524      return true;
525    }
526  }
527}
528
529void
530protect_area(protected_area_ptr p)
531{
532  BytePtr start = p->start;
533  natural n = p->protsize;
534
535  if (n && ! p->nprot) {
536    ProtectMemory(start, n);
537#ifdef WINDOWS
538    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
539#endif
540    p->nprot = n;
541  }
542}
543
544
545void
546zero_page(BytePtr start)
547{
548  extern int page_size;
549#ifdef PPC
550  extern void zero_cache_lines(BytePtr, size_t, size_t);
551  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
552#else
553  memset(start, 0, page_size);
554#endif
555}
556
557/* area management */
558
559
560area *
561new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
562{
563  area *a = (area *) (zalloc(sizeof(area)));
564  if (a) {
565    natural ndnodes = area_dnode(highaddr, lowaddr);
566    a->low = lowaddr;
567    a->high = highaddr;
568    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
569    a->code = code;
570    a->ndnodes = ndnodes;
571    /* Caller must allocate markbits when allocating heap ! */
572   
573  }
574  return a;
575}
576
577static area *
578add_area_before(area *new_area, area *before)
579{
580  area *before_before = before->pred;
581
582  new_area->pred = before_before;
583  new_area->succ = before;
584  before_before->succ = new_area;
585  before->pred = new_area;
586  return new_area;
587}
588
589/*
590  The active dynamic area comes first.
591  Static areas follow dynamic areas.
592  Stack areas follow static areas.
593  Readonly areas come last.
594*/
595
596/*
597  If we already own the area_lock (or during iniitalization), it's safe
598  to add an area.
599*/
600
601
602void
603add_area_holding_area_lock(area *new_area)
604{
605  area *that = all_areas;
606  int
607    thiscode = (int)(new_area->code),
608    thatcode;
609
610  /* Cdr down the linked list */
611  do {
612    that = that->succ;
613    thatcode = (int)(that->code);
614  } while (thiscode < thatcode);
615  add_area_before(new_area, that);
616}
617
618/*
619  In general, we need to own the area lock before adding an area.
620*/
621void
622add_area(area *new_area, TCR *tcr)
623{
624  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
625  add_area_holding_area_lock(new_area);
626  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
627} 
628
629/*
630  Search areas "forward" from the header's successor, until
631  an area containing ADDR is found or an area with code < MINCODE
632  is encountered.
633  This walks the area list visiting heaps (dynamic, then static)
634  first, then stacks.
635
636*/
637static area *
638find_area_forward(BytePtr addr, area_code mincode)
639{
640  area *p, *header = all_areas;
641
642  for (p = header->succ; p != header; p = p->succ) {
643    area_code pcode = p->code;
644    if (pcode < mincode) {
645      return NULL;
646    }
647    if (pcode >= AREA_READONLY) {
648      if ((addr >= p->low) &&
649          (addr < p->active)) {
650        return p;
651      }
652    } else {
653      if ((addr >= p->active) &&
654          (addr < p->high)) {
655        return p;
656      }
657    }
658  }
659  return NULL;
660}
661
662static area *
663find_area_backward(BytePtr addr, area_code maxcode)
664{
665  area *p, *header = all_areas;
666
667  for (p = header->pred; p != header; p = p->pred) {
668    area_code pcode = p->code;
669
670    if (pcode > maxcode) {
671      return NULL;
672    }
673    if (pcode >= AREA_READONLY) {
674      if ((addr >= p->low) &&
675          (addr < p->active)) {
676        return p;
677      }
678    } else {
679      if ((addr >= p->active) &&
680          (addr < p->high)) {
681        return p;
682      }
683    }
684  }
685  return NULL;
686}
687
688area *
689area_containing(BytePtr addr)
690{
691  return find_area_forward(addr, AREA_VOID);
692}
693
694area *
695heap_area_containing(BytePtr addr)
696{
697  return find_area_forward(addr, AREA_READONLY);
698}
699
700area *
701stack_area_containing(BytePtr addr)
702{
703  return find_area_backward(addr, AREA_TSTACK);
704}
705
706/*
707  Make everything "younger" than the start of the target area
708  belong to that area; all younger areas will become empty, and
709  the dynamic area will have to lose some of its markbits (they
710  get zeroed and become part of the tenured area's refbits.)
711
712  The active dynamic area must have been "normalized" (e.g., its
713  active pointer must match the free pointer) before this is called.
714
715  If the target area is 'tenured_area' (the oldest ephemeral generation),
716  zero its refbits and update YOUNGEST_EPHEMERAL.
717
718*/
719
720void
721tenure_to_area(area *target)
722{
723  area *a = active_dynamic_area, *child;
724  BytePtr
725    curfree = a->active,
726    target_low = target->low,
727    tenured_low = tenured_area->low;
728  natural
729    dynamic_dnodes = area_dnode(curfree, a->low),
730    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
731  bitvector
732    refbits = tenured_area->refbits,
733    markbits = a->markbits,
734    new_markbits;
735
736  target->high = target->active = curfree;
737  target->ndnodes = area_dnode(curfree, target_low);
738
739  for (child = target->younger; child != a; child = child->younger) {
740    child->high = child->low = child->active = curfree;
741    child->ndnodes = 0;
742  }
743
744  a->low = curfree;
745  a->ndnodes = area_dnode(a->high, curfree);
746
747  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
748 
749  if (target == tenured_area) {
750    zero_bits(refbits, new_tenured_dnodes);
751    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
752  } else {
753    /* Need more (zeroed) refbits & fewer markbits */
754    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
755  }
756   
757  a->markbits = new_markbits;
758  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
759}
760
761
762
763/*
764  Make everything younger than the oldest byte in 'from' belong to
765  the youngest generation.  If 'from' is 'tenured_area', this means
766  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
767  to 0 to indicate this.)
768 
769  Some tenured_area refbits become dynamic area markbits in the process;
770  it's not necessary to zero them, since the GC will do that.
771*/
772
773void
774untenure_from_area(area *from)
775{
776  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
777    area *a = active_dynamic_area, *child;
778    BytePtr curlow = from->low;
779    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
780   
781    for (child = from; child != a; child = child->younger) {
782      child->low = child->active = child->high = curlow;
783      child->ndnodes = 0;
784    }
785   
786    a->low = curlow;
787    a->ndnodes = area_dnode(a->high, curlow);
788   
789    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
790    if (from == tenured_area) {
791      /* Everything's in the dynamic area */
792      lisp_global(OLDEST_EPHEMERAL) = 0;
793      lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
794
795    }
796  }
797}
798
799
800Boolean
801egc_control(Boolean activate, BytePtr curfree)
802{
803  area *a = active_dynamic_area;
804  Boolean egc_is_active = (a->older != NULL);
805
806  if (activate != egc_is_active) {
807    if (curfree != NULL) {
808      a->active = curfree;
809    }
810    if (activate) {
811      a->older = g1_area;
812      tenure_to_area(tenured_area);
813      egc_is_active = true;
814    } else {
815      untenure_from_area(tenured_area);
816      a->older = NULL;
817      egc_is_active = false;
818    }
819  }
820  return egc_is_active;
821}
822
823/*
824  Lisp ff-calls this; it needs to set the active area's active pointer
825  correctly.
826*/
827
828Boolean
829lisp_egc_control(Boolean activate)
830{
831  area *a = active_dynamic_area;
832  return egc_control(activate, (BytePtr) a->active);
833}
834
835
836
837 
838/* Splice the protected_area_ptr out of the list and dispose of it. */
839void
840delete_protected_area(protected_area_ptr p)
841{
842  BytePtr start = p->start;
843  int nbytes = p->nprot;
844  protected_area_ptr *prev = &AllProtectedAreas, q;
845
846  if (nbytes) {
847    UnProtectMemory((LogicalAddress)start, nbytes);
848  }
849 
850  while ((q = *prev) != NULL) {
851    if (p == q) {
852      *prev = p->next;
853      break;
854    } else {
855      prev = &(q->next);
856    }
857  }
858
859  deallocate((Ptr)p);
860}
861
862
863
864
865/*
866  Unlink the area from all_areas.
867  Unprotect and dispose of any hard/soft protected_areas.
868  If the area has a handle, dispose of that as well.
869  */
870
871void
872condemn_area_holding_area_lock(area *a)
873{
874  void free_stack(void *);
875  area *prev = a->pred, *next = a->succ;
876  Ptr h = a->h;
877  protected_area_ptr p;
878
879  prev->succ = next;
880  next->pred = prev;
881
882  p = a->softprot;
883  if (p) delete_protected_area(p);
884
885  p = a->hardprot;
886
887  if (p) delete_protected_area(p);
888
889  if (h) free_stack(h);
890  deallocate((Ptr)a);
891}
892
893
894
895void
896condemn_area(area *a, TCR *tcr)
897{
898  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
899  condemn_area_holding_area_lock(a);
900  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
901}
902
903
904
905
906/*
907  condemn an area and all the other areas that can be reached
908  via the area.older & area.younger links.
909  This is the function in the ppc::kernel-import-condemn-area slot,
910  called by free-stack-area
911  */
912void
913condemn_area_chain(area *a, TCR *tcr)
914{
915  area *older;
916
917  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
918
919  for (; a->younger; a = a->younger) ;
920  for (;a;) {
921    older = a->older;
922    condemn_area_holding_area_lock(a);
923    a = older;
924  }
925  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
926}
927
928
929void
930protect_watched_areas()
931{
932  area *a = active_dynamic_area;
933  natural code = a->code;
934
935  while (code != AREA_VOID) {
936    if (code == AREA_WATCHED) {
937      natural size = a->high - a->low;
938     
939      ProtectMemory(a->low, size);
940    }
941    a = a->succ;
942    code = a->code;
943  }
944}
945
946void
947unprotect_watched_areas()
948{
949  area *a = active_dynamic_area;
950  natural code = a->code;
951
952  while (code != AREA_VOID) {
953    if (code == AREA_WATCHED) {
954      natural size = a->high - a->low;
955     
956      UnProtectMemory(a->low, size);
957    }
958    a = a->succ;
959    code = a->code;
960  }
961}
Note: See TracBrowser for help on using the repository browser.