source: branches/win64/lisp-kernel/memory.c @ 9651

Last change on this file since 9651 was 9651, checked in by gb, 13 years ago

CommitMemory?: on windows, the region that contains NIL is already mapped,
so just return true.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.6 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stdlib.h>
24#include <stddef.h>
25#include <string.h>
26#include <stdarg.h>
27#include <errno.h>
28#include <stdio.h>
29#include <unistd.h>
30#ifdef LINUX
31#include <strings.h>
32#include <fpu_control.h>
33#include <linux/prctl.h>
34#endif
35
36#ifndef WINDOWS
37#include <sys/mman.h>
38#endif
39
40#define DEBUG_MEMORY 0
41
42void
43allocation_failure(Boolean pointerp, natural size)
44{
45  char buf[64];
46  sprintf(buf, "Can't allocate %s of size %Id bytes.", pointerp ? "pointer" : "handle", size);
47  Fatal(":   Kernel memory allocation failure.  ", buf);
48}
49
50void
51fatal_oserr(StringPtr param, OSErr err)
52{
53  char buf[64];
54  sprintf(buf," - operating system error %d.", err);
55  Fatal(param, buf);
56}
57
58
59Ptr
60allocate(natural size)
61{
62  return (Ptr) malloc(size);
63}
64
65void
66deallocate(Ptr p)
67{
68  free((void *)p);
69}
70
71Ptr
72zalloc(natural size)
73{
74  Ptr p = allocate(size);
75  if (p != NULL) {
76    memset(p, 0, size);
77  }
78  return p;
79}
80
81#ifdef DARWIN
82#if WORD_SIZE == 64
83#define vm_region vm_region_64
84#endif
85
86/*
87  Check to see if the specified address is unmapped by trying to get
88  information about the mapped address at or beyond the target.  If
89  the difference between the target address and the next mapped address
90  is >= len, we can safely mmap len bytes at addr.
91*/
92Boolean
93address_unmapped_p(char *addr, natural len)
94{
95  vm_address_t vm_addr = (vm_address_t)addr;
96  vm_size_t vm_size;
97#if WORD_SIZE == 64
98  vm_region_basic_info_data_64_t vm_info;
99#else
100  vm_region_basic_info_data_t vm_info;
101#endif
102#if WORD_SIZE == 64
103  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
104#else
105  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
106#endif
107  mach_port_t vm_object_name = (mach_port_t) 0;
108  kern_return_t kret;
109
110  kret = vm_region(mach_task_self(),
111                   &vm_addr,
112                   &vm_size,
113#if WORD_SIZE == 64
114                   VM_REGION_BASIC_INFO_64,
115#else
116                   VM_REGION_BASIC_INFO,
117#endif
118                   (vm_region_info_t)&vm_info,
119                   &vm_info_size,
120                   &vm_object_name);
121  if (kret != KERN_SUCCESS) {
122    return false;
123  }
124
125  return vm_addr >= (vm_address_t)(addr+len);
126}
127#endif
128
129
130  /*
131    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
132    likely to reside near the beginning of an unmapped block of memory
133    that's at least 1GB in size.  We'd like to load the heap image's
134    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
135    that'd allow us to file-map those sections (and would enable us to
136    avoid having to relocate references in the data sections.)
137
138    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
139    by creating an anonymous mapping with mmap().
140
141    If we try to insist that mmap() map a 1GB block at
142    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
143    mmap() will gleefully clobber any mapped memory that's already
144    there.  (That region's empty at this writing, but some future
145    version of the OS might decide to put something there.)
146
147    If we don't specify MAP_FIXED, mmap() is free to treat the address
148    we give it as a hint; Linux seems to accept the hint if doing so
149    wouldn't cause a problem.  Naturally, that behavior's too useful
150    for Darwin (or perhaps too inconvenient for it): it'll often
151    return another address, even if the hint would have worked fine.
152
153    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
154    would conflict with anything.  Until we discover a need to do
155    otherwise, we'll assume that if Linux's mmap() fails to take the
156    hint, it's because of a legitimate conflict.
157
158    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
159    to implement an address_unmapped_p() for Linux.
160  */
161
162LogicalAddress
163ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
164{
165  LogicalAddress start;
166  Boolean fixed_map_ok = false;
167#ifdef DARWIN
168  fixed_map_ok = address_unmapped_p(want,totalsize);
169#endif
170#ifdef SOLARIS
171  fixed_map_ok = true;
172#endif
173  raise_limit();
174#ifdef WINDOWS
175  start = VirtualAlloc((void *)want,
176                       totalsize + heap_segment_size,
177                       MEM_RESERVE,
178                       PAGE_NOACCESS);
179  if (!start) {
180    fprintf(stderr, "Can't get desired heap address at 0x%Ix\n", want);
181    start = VirtualAlloc(0,
182                         totalsize + heap_segment_size,
183                         MEM_RESERVE,
184                         PAGE_NOACCESS);
185    if (!start) {
186      wperror("VirtualAlloc");
187      return NULL;
188    }
189  }
190#else
191  start = mmap((void *)want,
192               totalsize + heap_segment_size,
193               PROT_NONE,
194               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
195               -1,
196               0);
197  if (start == MAP_FAILED) {
198    perror("Initial mmap");
199    return NULL;
200  }
201
202  if (start != want) {
203    munmap(start, totalsize+heap_segment_size);
204    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
205    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
206      return NULL;
207    }
208  }
209  mprotect(start, totalsize, PROT_NONE);
210#endif
211#if DEBUG_MEMORY
212  fprintf(stderr, "Reserving heap at 0x%Ix, size 0x%Ix\n", start, totalsize);
213#endif
214  return start;
215}
216
217int
218CommitMemory (LogicalAddress start, natural len) {
219  LogicalAddress rc;
220#if DEBUG_MEMORY
221  fprintf(stderr, "Committing memory at 0x%Ix, size 0x%Ix\n", start, len);
222#endif
223#ifdef WINDOWS
224  if ((start < ((LogicalAddress)nil_value)) &&
225      (((LogicalAddress)nil_value) < (start+len))) {
226    /* nil area is in the executable on Windows, do nothing */
227    return true;
228  }
229  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
230  if (!rc) {
231    wperror("CommitMemory VirtualAlloc");
232    return false;
233  }
234  return true;
235#else
236  int i, err;
237  void *addr;
238
239  for (i = 0; i < 3; i++) {
240    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
241    if (addr == start) {
242      return true;
243    } else {
244      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
245    }
246  }
247  return false;
248#endif
249}
250
251void
252UnCommitMemory (LogicalAddress start, natural len) {
253#if DEBUG_MEMORY
254  fprintf(stderr, "Uncommitting memory at 0x%Ix, size 0x%Ix\n", start, len);
255#endif
256#ifdef WINDOWS
257  int rc = VirtualFree(start, len, MEM_DECOMMIT);
258  if (!rc) {
259    wperror("UnCommitMemory VirtualFree");
260    Fatal("mmap error", "");
261    return;
262  }
263#else
264  if (len) {
265    madvise(start, len, MADV_DONTNEED);
266    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
267        != start) {
268      int err = errno;
269      Fatal("mmap error", "");
270      fprintf(stderr, "errno = %d", err);
271    }
272  }
273#endif
274}
275
276
277LogicalAddress
278MapMemory(LogicalAddress addr, natural nbytes, int protection)
279{
280#if DEBUG_MEMORY
281  fprintf(stderr, "Mapping memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
282#endif
283#ifdef WINDOWS
284  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
285#else
286  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
287#endif
288}
289
290LogicalAddress
291MapMemoryForStack(natural nbytes)
292{
293#if DEBUG_MEMORY
294  fprintf(stderr, "Mapping stack of size 0x%Ix\n", nbytes);
295#endif
296#ifdef WINDOWS
297  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
298#else
299  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
300#endif
301}
302
303int
304UnMapMemory(LogicalAddress addr, natural nbytes)
305{
306#if DEBUG_MEMORY
307  fprintf(stderr, "Unmapping memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
308#endif
309#ifdef WINDOWS
310  /* Can't MEM_RELEASE here because we only want to free a chunk */
311  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
312#else
313  return munmap(addr, nbytes);
314#endif
315}
316
317int
318ProtectMemory(LogicalAddress addr, natural nbytes)
319{
320#if DEBUG_MEMORY
321  fprintf(stderr, "Protecting memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
322#endif
323#ifdef WINDOWS
324  DWORD oldProtect;
325  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
326 
327  if(!status) {
328    wperror("ProtectMemory VirtualProtect");
329    Bug(NULL, "couldn't protect %Id bytes at %x, errno = %d", nbytes, addr, status);
330  }
331  return status;
332#else
333  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
334 
335  if (status) {
336    status = errno;
337    Bug(NULL, "couldn't protect %Id bytes at %Ix, errno = %d", nbytes, addr, status);
338  }
339  return status;
340#endif
341}
342
343int
344UnProtectMemory(LogicalAddress addr, natural nbytes)
345{
346#if DEBUG_MEMORY
347  fprintf(stderr, "Unprotecting memory at 0x%Ix, size 0x%Ix\n", addr, nbytes);
348#endif
349#ifdef WINDOWS
350  DWORD oldProtect;
351  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
352#else
353  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
354#endif
355}
356
357int
358MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
359{
360#ifdef WINDOWS
361#if 0
362  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
363  HANDLE hFile, hFileMapping;
364  LPVOID rc;
365  DWORD desiredAccess;
366
367  if (permissions == MEMPROTECT_RWX) {
368    permissions |= PAGE_WRITECOPY;
369    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
370  } else {
371    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
372  }
373
374  hFile = _get_osfhandle(fd);
375  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
376                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
377 
378  if (!hFileMapping) {
379    wperror("CreateFileMapping");
380    return false;
381  }
382
383  rc = MapViewOfFileEx(hFileMapping,
384                       desiredAccess,
385                       (pos >> 32),
386                       (pos & 0xffffffff),
387                       nbytes,
388                       addr);
389#else
390  size_t count, total = 0;
391  size_t opos;
392
393  opos = lseek(fd, 0, SEEK_CUR);
394  CommitMemory(addr, nbytes);
395  lseek(fd, pos, SEEK_SET);
396
397  while (total < nbytes) {
398    count = read(fd, addr + total, nbytes - total);
399    total += count;
400    // fprintf(stderr, "read %Id bytes, for a total of %Id out of %Id so far\n", count, total, nbytes);
401    if (!(count > 0))
402      return false;
403  }
404
405  lseek(fd, opos, SEEK_SET);
406
407  return true;
408#endif
409#else
410  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
411#endif
412}
413
414void
415unprotect_area(protected_area_ptr p)
416{
417  BytePtr start = p->start;
418  natural nprot = p->nprot;
419 
420  if (nprot) {
421    UnProtectMemory(start, nprot);
422    p->nprot = 0;
423  }
424}
425
426protected_area_ptr
427new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
428{
429  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
430 
431  if (p == NULL) return NULL;
432  p->protsize = protsize;
433  p->nprot = 0;
434  p->start = start;
435  p->end = end;
436  p->why = reason;
437  p->next = AllProtectedAreas;
438
439  AllProtectedAreas = p;
440  if (now) {
441    protect_area(p);
442  }
443 
444  return p;
445}
446
447/*
448  Un-protect the first nbytes bytes in specified area.
449  Note that this may cause the area to be empty.
450*/
451void
452unprotect_area_prefix(protected_area_ptr area, size_t delta)
453{
454  unprotect_area(area);
455  area->start += delta;
456  if ((area->start + area->protsize) <= area->end) {
457    protect_area(area);
458  }
459}
460
461
462/*
463  Extend the protected area, causing the preceding nbytes bytes
464  to be included and protected.
465*/
466void
467protect_area_prefix(protected_area_ptr area, size_t delta)
468{
469  unprotect_area(area);
470  area->start -= delta;
471  protect_area(area);
472}
473
474protected_area_ptr
475AllProtectedAreas = NULL;
476
477
478/*
479  This does a linear search.  Areas aren't created all that often;
480  if there get to be very many of them, some sort of tree search
481  might be justified.
482*/
483
484protected_area_ptr
485find_protected_area(BytePtr addr)
486{
487  protected_area* p;
488 
489  for(p = AllProtectedAreas; p; p=p->next) {
490    if ((p->start <= addr) && (p->end > addr)) {
491      return p;
492    }
493  }
494  return NULL;
495}
496
497
498void
499zero_memory_range(BytePtr start, BytePtr end)
500{
501#ifdef WINDOWS
502  ZeroMemory(start,end-start);
503#else
504  bzero(start,(size_t)(end-start));
505#endif
506}
507
508
509 
510
511/*
512   Grow or shrink the dynamic area.  Or maybe not.
513   Whether or not the end of (mapped space in) the heap changes,
514   ensure that everything between the freeptr and the heap end
515   is mapped and read/write.  (It'll incidentally be zeroed.)
516*/
517Boolean
518resize_dynamic_heap(BytePtr newfree, 
519                    natural free_space_size)
520{
521  extern int page_size;
522  area *a = active_dynamic_area;
523  BytePtr newlimit, protptr, zptr;
524  int psize = page_size;
525  if (free_space_size) {
526    BytePtr lowptr = a->active;
527    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
528                                            log2_heap_segment_size);
529    if (newlimit > a->high) {
530      return grow_dynamic_area(newlimit-a->high);
531    } else if ((lowptr + free_space_size) < a->high) {
532      shrink_dynamic_area(a->high-newlimit);
533      return true;
534    }
535  }
536}
537
538void
539protect_area(protected_area_ptr p)
540{
541  BytePtr start = p->start;
542  natural n = p->protsize;
543
544  if (n && ! p->nprot) {
545    ProtectMemory(start, n);
546    p->nprot = n;
547  }
548}
549
550
551void
552zero_page(BytePtr start)
553{
554  extern int page_size;
555#ifdef PPC
556  extern void zero_cache_lines(BytePtr, size_t, size_t);
557  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
558#else
559  memset(start, 0, page_size);
560#endif
561}
562
563/* area management */
564
565
566area *
567new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
568{
569  area *a = (area *) (zalloc(sizeof(area)));
570  if (a) {
571    natural ndnodes = area_dnode(highaddr, lowaddr);
572    a->low = lowaddr;
573    a->high = highaddr;
574    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
575    a->code = code;
576    a->ndnodes = ndnodes;
577    /* Caller must allocate markbits when allocating heap ! */
578   
579  }
580  return a;
581}
582
583static area *
584add_area_before(area *new_area, area *before)
585{
586  area *before_before = before->pred;
587
588  new_area->pred = before_before;
589  new_area->succ = before;
590  before_before->succ = new_area;
591  before->pred = new_area;
592  return new_area;
593}
594
595/*
596  The active dynamic area comes first.
597  Static areas follow dynamic areas.
598  Stack areas follow static areas.
599  Readonly areas come last.
600*/
601
602/*
603  If we already own the area_lock (or during iniitalization), it's safe
604  to add an area.
605*/
606
607
608void
609add_area_holding_area_lock(area *new_area)
610{
611  area *that = all_areas;
612  int
613    thiscode = (int)(new_area->code),
614    thatcode;
615
616  /* Cdr down the linked list */
617  do {
618    that = that->succ;
619    thatcode = (int)(that->code);
620  } while (thiscode < thatcode);
621  add_area_before(new_area, that);
622}
623
624/*
625  In general, we need to own the area lock before adding an area.
626*/
627void
628add_area(area *new_area, TCR *tcr)
629{
630  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
631  add_area_holding_area_lock(new_area);
632  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
633} 
634
635/*
636  Search areas "forward" from the header's successor, until
637  an area containing ADDR is found or an area with code < MINCODE
638  is encountered.
639  This walks the area list visiting heaps (dynamic, then static)
640  first, then stacks.
641
642*/
643static area *
644find_area_forward(BytePtr addr, area_code mincode)
645{
646  area *p, *header = all_areas;
647
648  for (p = header->succ; p != header; p = p->succ) {
649    area_code pcode = p->code;
650    if (pcode < mincode) {
651      return NULL;
652    }
653    if (pcode >= AREA_READONLY) {
654      if ((addr >= p->low) &&
655          (addr < p->active)) {
656        return p;
657      }
658    } else {
659      if ((addr >= p->active) &&
660          (addr < p->high)) {
661        return p;
662      }
663    }
664  }
665  return NULL;
666}
667
668static area *
669find_area_backward(BytePtr addr, area_code maxcode)
670{
671  area *p, *header = all_areas;
672
673  for (p = header->pred; p != header; p = p->pred) {
674    area_code pcode = p->code;
675
676    if (pcode > maxcode) {
677      return NULL;
678    }
679    if (pcode >= AREA_READONLY) {
680      if ((addr >= p->low) &&
681          (addr < p->active)) {
682        return p;
683      }
684    } else {
685      if ((addr >= p->active) &&
686          (addr < p->high)) {
687        return p;
688      }
689    }
690  }
691  return NULL;
692}
693
694area *
695area_containing(BytePtr addr)
696{
697  return find_area_forward(addr, AREA_VOID);
698}
699
700area *
701heap_area_containing(BytePtr addr)
702{
703  return find_area_forward(addr, AREA_READONLY);
704}
705
706area *
707stack_area_containing(BytePtr addr)
708{
709  return find_area_backward(addr, AREA_TSTACK);
710}
711
712/*
713  Make everything "younger" than the start of the target area
714  belong to that area; all younger areas will become empty, and
715  the dynamic area will have to lose some of its markbits (they
716  get zeroed and become part of the tenured area's refbits.)
717
718  The active dynamic area must have been "normalized" (e.g., its
719  active pointer must match the free pointer) before this is called.
720
721  If the target area is 'tenured_area' (the oldest ephemeral generation),
722  zero its refbits and update YOUNGEST_EPHEMERAL.
723
724*/
725
726void
727tenure_to_area(area *target)
728{
729  area *a = active_dynamic_area, *child;
730  BytePtr
731    curfree = a->active,
732    target_low = target->low,
733    tenured_low = tenured_area->low;
734  natural
735    dynamic_dnodes = area_dnode(curfree, a->low),
736    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
737  bitvector
738    refbits = tenured_area->refbits,
739    markbits = a->markbits,
740    new_markbits;
741
742  target->high = target->active = curfree;
743  target->ndnodes = area_dnode(curfree, target_low);
744
745  for (child = target->younger; child != a; child = child->younger) {
746    child->high = child->low = child->active = curfree;
747    child->ndnodes = 0;
748  }
749
750  a->low = curfree;
751  a->ndnodes = area_dnode(a->high, curfree);
752
753  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
754 
755  if (target == tenured_area) {
756    zero_bits(refbits, new_tenured_dnodes);
757    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
758  } else {
759    /* Need more (zeroed) refbits & fewer markbits */
760    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
761  }
762   
763  a->markbits = new_markbits;
764  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
765}
766
767
768
769/*
770  Make everything younger than the oldest byte in 'from' belong to
771  the youngest generation.  If 'from' is 'tenured_area', this means
772  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
773  to 0 to indicate this.)
774 
775  Some tenured_area refbits become dynamic area markbits in the process;
776  it's not necessary to zero them, since the GC will do that.
777*/
778
779void
780untenure_from_area(area *from)
781{
782  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
783    area *a = active_dynamic_area, *child;
784    BytePtr curlow = from->low;
785    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
786   
787    for (child = from; child != a; child = child->younger) {
788      child->low = child->active = child->high = curlow;
789      child->ndnodes = 0;
790    }
791   
792    a->low = curlow;
793    a->ndnodes = area_dnode(a->high, curlow);
794   
795    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
796    if (from == tenured_area) {
797      /* Everything's in the dynamic area */
798      lisp_global(OLDEST_EPHEMERAL) = 0;
799      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
800
801    }
802  }
803}
804
805
806Boolean
807egc_control(Boolean activate, BytePtr curfree)
808{
809  area *a = active_dynamic_area;
810  Boolean egc_is_active = (a->older != NULL);
811
812  if (activate != egc_is_active) {
813    if (curfree != NULL) {
814      a->active = curfree;
815    }
816    if (activate) {
817      LispObj *heap_start = ptr_from_lispobj(lisp_global(HEAP_START));
818
819      a->older = g1_area;
820      tenure_to_area(tenured_area);
821      egc_is_active = true;
822    } else {
823      untenure_from_area(tenured_area);
824      a->older = NULL;
825      egc_is_active = false;
826    }
827  }
828  return egc_is_active;
829}
830
831/*
832  Lisp ff-calls this; it needs to set the active area's active pointer
833  correctly.
834*/
835
836Boolean
837lisp_egc_control(Boolean activate)
838{
839  area *a = active_dynamic_area;
840  return egc_control(activate, (BytePtr) a->active);
841}
842
843
844
845 
846/* Splice the protected_area_ptr out of the list and dispose of it. */
847void
848delete_protected_area(protected_area_ptr p)
849{
850  BytePtr start = p->start;
851  int nbytes = p->nprot;
852  protected_area_ptr *prev = &AllProtectedAreas, q;
853
854  if (nbytes) {
855    UnProtectMemory((LogicalAddress)start, nbytes);
856  }
857 
858  while ((q = *prev) != NULL) {
859    if (p == q) {
860      *prev = p->next;
861      break;
862    } else {
863      prev = &(q->next);
864    }
865  }
866
867  deallocate((Ptr)p);
868}
869
870
871
872
873/*
874  Unlink the area from all_areas.
875  Unprotect and dispose of any hard/soft protected_areas.
876  If the area has a handle, dispose of that as well.
877  */
878
879void
880condemn_area_holding_area_lock(area *a)
881{
882  void free_stack(void *);
883  area *prev = a->pred, *next = a->succ;
884  Ptr h = a->h;
885  protected_area_ptr p;
886
887  prev->succ = next;
888  next->pred = prev;
889
890  p = a->softprot;
891  if (p) delete_protected_area(p);
892
893  p = a->hardprot;
894
895  if (p) delete_protected_area(p);
896
897  if (h) free_stack(h);
898  deallocate((Ptr)a);
899}
900
901
902
903void
904condemn_area(area *a, TCR *tcr)
905{
906  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
907  condemn_area_holding_area_lock(a);
908  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
909}
910
911
912
913
914/*
915  condemn an area and all the other areas that can be reached
916  via the area.older & area.younger links.
917  This is the function in the ppc::kernel-import-condemn-area slot,
918  called by free-stack-area
919  */
920void
921condemn_area_chain(area *a, TCR *tcr)
922{
923  area *older;
924
925  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
926
927  for (; a->younger; a = a->younger) ;
928  for (;a;) {
929    older = a->older;
930    condemn_area_holding_area_lock(a);
931    a = older;
932  }
933  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
934}
935
936void
937release_readonly_area()
938{
939  area *a = readonly_area;
940  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
941  a->active = a->low;
942  a->ndnodes = 0;
943  pure_space_active = pure_space_start;
944}
Note: See TracBrowser for help on using the repository browser.