source: trunk/source/lisp-kernel/memory.c @ 15373

Last change on this file since 15373 was 15373, checked in by gb, 7 years ago

Suppress some warnings from llvm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.1 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#endif
34
35#ifndef WINDOWS
36#include <sys/mman.h>
37#endif
38
39#define DEBUG_MEMORY 0
40
41void
42allocation_failure(Boolean pointerp, natural size)
43{
44  char buf[64];
45  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
46  Fatal(":   Kernel memory allocation failure.  ", buf);
47}
48
49void *
50lisp_malloc(size_t size)
51{
52  return malloc(size);
53}
54
55void
56lisp_free(void *p)
57{
58  free(p);
59}
60
61#ifdef DARWIN
62#if WORD_SIZE == 64
63#define vm_region vm_region_64
64#endif
65
66/*
67  Check to see if the specified address is unmapped by trying to get
68  information about the mapped address at or beyond the target.  If
69  the difference between the target address and the next mapped address
70  is >= len, we can safely mmap len bytes at addr.
71*/
72Boolean
73address_unmapped_p(char *addr, natural len)
74{
75  vm_address_t vm_addr = (vm_address_t)addr;
76  vm_size_t vm_size;
77#if WORD_SIZE == 64
78  vm_region_basic_info_data_64_t vm_info;
79#else
80  vm_region_basic_info_data_t vm_info;
81#endif
82#if WORD_SIZE == 64
83  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
84#else
85  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
86#endif
87  mach_port_t vm_object_name = (mach_port_t) 0;
88  kern_return_t kret;
89
90  kret = vm_region(mach_task_self(),
91                   &vm_addr,
92                   &vm_size,
93#if WORD_SIZE == 64
94                   VM_REGION_BASIC_INFO_64,
95#else
96                   VM_REGION_BASIC_INFO,
97#endif
98                   (vm_region_info_t)&vm_info,
99                   &vm_info_size,
100                   &vm_object_name);
101  if (kret != KERN_SUCCESS) {
102    return false;
103  }
104
105  return vm_addr >= (vm_address_t)(addr+len);
106}
107#endif
108
109
110
111  /*
112    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
113    likely to reside near the beginning of an unmapped block of memory
114    that's at least 1GB in size.  We'd like to load the heap image's
115    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
116    that'd allow us to file-map those sections (and would enable us to
117    avoid having to relocate references in the data sections.)
118
119    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
120    by creating an anonymous mapping with mmap().
121
122    If we try to insist that mmap() map a 1GB block at
123    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
124    mmap() will gleefully clobber any mapped memory that's already
125    there.  (That region's empty at this writing, but some future
126    version of the OS might decide to put something there.)
127
128    If we don't specify MAP_FIXED, mmap() is free to treat the address
129    we give it as a hint; Linux seems to accept the hint if doing so
130    wouldn't cause a problem.  Naturally, that behavior's too useful
131    for Darwin (or perhaps too inconvenient for it): it'll often
132    return another address, even if the hint would have worked fine.
133
134    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
135    would conflict with anything.  Until we discover a need to do
136    otherwise, we'll assume that if Linux's mmap() fails to take the
137    hint, it's because of a legitimate conflict.
138
139    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
140    to implement an address_unmapped_p() for Linux.
141  */
142
143LogicalAddress
144ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
145{
146  void raise_limit(void);
147  LogicalAddress start;
148  Boolean fixed_map_ok = false;
149#ifdef DARWIN
150  fixed_map_ok = address_unmapped_p(want,totalsize);
151#endif
152#ifdef SOLARIS
153  fixed_map_ok = true;
154#endif
155  raise_limit();
156#ifdef WINDOWS
157  start = VirtualAlloc((void *)want,
158                       totalsize + heap_segment_size,
159                       MEM_RESERVE,
160                       PAGE_NOACCESS);
161  if (!start) {
162#if DEBUG_MEMORY   
163    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
164#endif
165    start = VirtualAlloc(0,
166                         totalsize + heap_segment_size,
167                         MEM_RESERVE,
168                         PAGE_NOACCESS);
169    if (!start) {
170      return NULL;
171    }
172  }
173#else
174  start = mmap((void *)want,
175               totalsize + heap_segment_size,
176               PROT_NONE,
177               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
178               -1,
179               0);
180  if (start == MAP_FAILED) {
181    return NULL;
182  }
183
184  if (start != want) {
185    munmap(start, totalsize+heap_segment_size);
186    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
187    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
188      return NULL;
189    }
190  }
191  mprotect(start, totalsize, PROT_NONE);
192#endif
193#if DEBUG_MEMORY
194  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
195#endif
196  return start;
197}
198
199int
200CommitMemory (LogicalAddress start, natural len) 
201{
202#if DEBUG_MEMORY
203  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
204#endif
205#ifdef WINDOWS
206  LogicalAddress rc;
207
208  if ((start < ((LogicalAddress)nil_value)) &&
209      (((LogicalAddress)nil_value) < (start+len))) {
210    /* nil area is in the executable on Windows; ensure range is
211       read-write */
212    DWORD as_if_i_care;
213    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
214      return false;
215    }
216    return true;
217  }
218  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
219  if (!rc) {
220    wperror("CommitMemory VirtualAlloc");
221    return false;
222  }
223  return true;
224#else
225  int i;
226  void *addr;
227
228  for (i = 0; i < 3; i++) {
229    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
230    if (addr == start) {
231      return true;
232    } else {
233      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
234    }
235  }
236  return false;
237#endif
238}
239
240void
241UnCommitMemory (LogicalAddress start, natural len) {
242#if DEBUG_MEMORY
243  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
244#endif
245#ifdef WINDOWS
246  int rc = VirtualFree(start, len, MEM_DECOMMIT);
247  if (!rc) {
248    wperror("UnCommitMemory VirtualFree");
249    Fatal("mmap error", "");
250    return;
251  }
252#else
253  if (len) {
254    madvise(start, len, MADV_DONTNEED);
255    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
256        != start) {
257      int err = errno;
258      Fatal("mmap error", "");
259      fprintf(dbgout, "errno = %d", err);
260    }
261  }
262#endif
263}
264
265
266LogicalAddress
267MapMemory(LogicalAddress addr, natural nbytes, int protection)
268{
269  LogicalAddress p;
270#if DEBUG_MEMORY
271  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
272#endif
273#ifdef WINDOWS
274  p = VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
275  if (p == NULL) {
276    wperror("MapMemory");
277  }
278  return p;
279#else
280  {
281    int flags = MAP_PRIVATE|MAP_ANON;
282
283    if (addr > 0) flags |= MAP_FIXED;
284    return mmap(addr, nbytes, protection, flags, -1, 0);
285  }
286#endif
287}
288
289LogicalAddress
290MapMemoryForStack(natural nbytes)
291{
292#if DEBUG_MEMORY
293  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
294#endif
295#ifdef WINDOWS
296  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
297#else
298  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
299#endif
300}
301
302
303/* Cause the mapped memory region at ADDR to become completely unmapped.
304   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
305   and NBYTES should be the size of the mapped region at that address. */
306int
307UnMapMemory(LogicalAddress addr, natural nbytes)
308{
309#if DEBUG_MEMORY
310  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
311#endif
312#ifdef WINDOWS
313  return !VirtualFree(addr, 0, MEM_RELEASE);
314#else
315  return munmap(addr, nbytes);
316#endif
317}
318
319int
320ProtectMemory(LogicalAddress addr, natural nbytes)
321{
322#if DEBUG_MEMORY
323  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
324#endif
325#ifdef WINDOWS
326  DWORD oldProtect;
327  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
328 
329  if(!status) {
330    wperror("ProtectMemory VirtualProtect");
331    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
332  }
333  return status;
334#else
335  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
336 
337  if (status) {
338    status = errno;
339   
340    if (status == ENOMEM) {
341      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
342      if (mapaddr != MAP_FAILED) {
343        return 0;
344      }
345    }
346    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
347  }
348  return status;
349#endif
350}
351
352int
353UnProtectMemory(LogicalAddress addr, natural nbytes)
354{
355#if DEBUG_MEMORY
356  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
357#endif
358#ifdef WINDOWS
359  DWORD oldProtect;
360  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
361#else
362  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
363#endif
364}
365
366int
367MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
368{
369#ifdef WINDOWS
370#if 0
371  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
372  HANDLE hFile, hFileMapping;
373  LPVOID rc;
374  DWORD desiredAccess;
375
376  if (permissions == MEMPROTECT_RWX) {
377    permissions |= PAGE_WRITECOPY;
378    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
379  } else {
380    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
381  }
382
383  hFile = _get_osfhandle(fd);
384  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
385                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
386 
387  if (!hFileMapping) {
388    wperror("CreateFileMapping");
389    return false;
390  }
391
392  rc = MapViewOfFileEx(hFileMapping,
393                       desiredAccess,
394                       (pos >> 32),
395                       (pos & 0xffffffff),
396                       nbytes,
397                       addr);
398#else
399  size_t count, total = 0;
400  size_t opos;
401
402  opos = LSEEK(fd, 0, SEEK_CUR);
403  CommitMemory(addr, nbytes);
404  LSEEK(fd, pos, SEEK_SET);
405
406  while (total < nbytes) {
407    count = read(fd, addr + total, nbytes - total);
408    total += count;
409    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
410    if (!(count > 0))
411      return false;
412  }
413
414  LSEEK(fd, opos, SEEK_SET);
415
416  return true;
417#endif
418#else
419  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
420#endif
421}
422
423void
424unprotect_area(protected_area_ptr p)
425{
426  BytePtr start = p->start;
427  natural nprot = p->nprot;
428 
429  if (nprot) {
430    UnProtectMemory(start, nprot);
431    p->nprot = 0;
432  }
433}
434
435protected_area_ptr
436new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
437{
438  protected_area_ptr p = malloc(sizeof(protected_area));
439 
440  if (p == NULL) return NULL;
441  p->protsize = protsize;
442  p->nprot = 0;
443  p->start = start;
444  p->end = end;
445  p->why = reason;
446  p->next = AllProtectedAreas;
447
448  AllProtectedAreas = p;
449  if (now) {
450    protect_area(p);
451  }
452 
453  return p;
454}
455
456
457
458
459protected_area_ptr
460AllProtectedAreas = NULL;
461
462
463/*
464  This does a linear search.  Areas aren't created all that often;
465  if there get to be very many of them, some sort of tree search
466  might be justified.
467*/
468
469protected_area_ptr
470find_protected_area(BytePtr addr)
471{
472  protected_area* p;
473 
474  for(p = AllProtectedAreas; p; p=p->next) {
475    if ((p->start <= addr) && (p->end > addr)) {
476      return p;
477    }
478  }
479  return NULL;
480}
481
482
483void
484zero_memory_range(BytePtr start, BytePtr end)
485{
486#ifdef WINDOWS
487  ZeroMemory(start,end-start);
488#else
489  bzero(start,(size_t)(end-start));
490#endif
491}
492
493
494 
495
496/*
497   Grow or shrink the dynamic area.  Or maybe not.
498   Whether or not the end of (mapped space in) the heap changes,
499   ensure that everything between the freeptr and the heap end
500   is mapped and read/write.  (It'll incidentally be zeroed.)
501*/
502Boolean
503resize_dynamic_heap(BytePtr newfree, 
504                    natural free_space_size)
505{
506  area *a = active_dynamic_area;
507  BytePtr newlimit;
508
509  if (free_space_size) {
510    BytePtr lowptr = a->active;
511    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
512                                            log2_heap_segment_size);
513    if (newlimit > a->high) {
514      return grow_dynamic_area(newlimit-a->high);
515    } else if ((lowptr + free_space_size) < a->high) {
516      shrink_dynamic_area(a->high-newlimit);
517      return true;
518    }
519  }
520  return false;
521}
522
523void
524protect_area(protected_area_ptr p)
525{
526  BytePtr start = p->start;
527  natural n = p->protsize;
528
529  if (n && ! p->nprot) {
530    ProtectMemory(start, n);
531#ifdef WINDOWS
532    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
533#endif
534    p->nprot = n;
535  }
536}
537
538
539void
540zero_page(BytePtr start)
541{
542  extern int page_size;
543#ifdef PPC
544  extern void zero_cache_lines(BytePtr, size_t, size_t);
545  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
546#else
547  memset(start, 0, page_size);
548#endif
549}
550
551/* area management */
552
553
554area *
555new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
556{
557  area *a = calloc(1, sizeof(area));
558  if (a) {
559    natural ndnodes = area_dnode(highaddr, lowaddr);
560    a->low = lowaddr;
561    a->high = highaddr;
562    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
563    a->code = code;
564    a->ndnodes = ndnodes;
565    /* Caller must allocate markbits when allocating heap ! */
566   
567  }
568  return a;
569}
570
571static area *
572add_area_before(area *new_area, area *before)
573{
574  area *before_before = before->pred;
575
576  new_area->pred = before_before;
577  new_area->succ = before;
578  before_before->succ = new_area;
579  before->pred = new_area;
580  return new_area;
581}
582
583/*
584  The active dynamic area comes first.
585  Static areas follow dynamic areas.
586  Stack areas follow static areas.
587  Readonly areas come last.
588*/
589
590/*
591  If we already own the area_lock (or during iniitalization), it's safe
592  to add an area.
593*/
594
595
596void
597add_area_holding_area_lock(area *new_area)
598{
599  area *that = all_areas;
600  int
601    thiscode = (int)(new_area->code),
602    thatcode;
603
604  /* Cdr down the linked list */
605  do {
606    that = that->succ;
607    thatcode = (int)(that->code);
608  } while (thiscode < thatcode);
609  add_area_before(new_area, that);
610}
611
612/*
613  In general, we need to own the area lock before adding an area.
614*/
615void
616add_area(area *new_area, TCR *tcr)
617{
618  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
619  add_area_holding_area_lock(new_area);
620  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
621} 
622
623/*
624  Search areas "forward" from the header's successor, until
625  an area containing ADDR is found or an area with code < MINCODE
626  is encountered.
627  This walks the area list visiting heaps (dynamic, then static)
628  first, then stacks.
629
630*/
631static area *
632find_area_forward(BytePtr addr, area_code mincode)
633{
634  area *p, *header = all_areas;
635
636  for (p = header->succ; p != header; p = p->succ) {
637    area_code pcode = p->code;
638    if (pcode < mincode) {
639      return NULL;
640    }
641    if (pcode >= AREA_READONLY) {
642      if ((addr >= p->low) &&
643          (addr < p->active)) {
644        return p;
645      }
646    } else {
647      if ((addr >= p->active) &&
648          (addr < p->high)) {
649        return p;
650      }
651    }
652  }
653  return NULL;
654}
655
656static area *
657find_area_backward(BytePtr addr, area_code maxcode)
658{
659  area *p, *header = all_areas;
660
661  for (p = header->pred; p != header; p = p->pred) {
662    area_code pcode = p->code;
663
664    if (pcode > maxcode) {
665      return NULL;
666    }
667    if (pcode >= AREA_READONLY) {
668      if ((addr >= p->low) &&
669          (addr < p->active)) {
670        return p;
671      }
672    } else {
673      if ((addr >= p->active) &&
674          (addr < p->high)) {
675        return p;
676      }
677    }
678  }
679  return NULL;
680}
681
682area *
683area_containing(BytePtr addr)
684{
685  return find_area_forward(addr, AREA_VOID);
686}
687
688area *
689heap_area_containing(BytePtr addr)
690{
691  return find_area_forward(addr, AREA_READONLY);
692}
693
694area *
695stack_area_containing(BytePtr addr)
696{
697  return find_area_backward(addr, AREA_TSTACK);
698}
699
700/*
701  Make everything "younger" than the start of the target area
702  belong to that area; all younger areas will become empty, and
703  the dynamic area will have to lose some of its markbits (they
704  get zeroed and become part of the tenured area's refbits.)
705
706  The active dynamic area must have been "normalized" (e.g., its
707  active pointer must match the free pointer) before this is called.
708
709  If the target area is 'tenured_area' (the oldest ephemeral generation),
710  zero its refbits and update YOUNGEST_EPHEMERAL.
711
712*/
713
714void
715tenure_to_area(area *target)
716{
717  area *a = active_dynamic_area, *child;
718  BytePtr
719    curfree = a->active,
720    target_low = target->low;
721  natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
722  bitvector
723    refbits = tenured_area->refbits,
724    markbits = a->markbits,
725    new_markbits;
726
727  target->high = target->active = curfree;
728  target->ndnodes = area_dnode(curfree, target_low);
729
730  for (child = target->younger; child != a; child = child->younger) {
731    child->high = child->low = child->active = curfree;
732    child->ndnodes = 0;
733  }
734
735  a->low = curfree;
736  a->ndnodes = area_dnode(a->high, curfree);
737
738  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
739 
740  if (target == tenured_area) {
741    zero_bits(refbits, new_tenured_dnodes);
742    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
743  } else {
744    /* Need more (zeroed) refbits & fewer markbits */
745    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
746  }
747   
748  a->markbits = new_markbits;
749  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
750}
751
752
753
754/*
755  Make everything younger than the oldest byte in 'from' belong to
756  the youngest generation.  If 'from' is 'tenured_area', this means
757  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
758  to 0 to indicate this.)
759 
760  Some tenured_area refbits become dynamic area markbits in the process;
761  it's not necessary to zero them, since the GC will do that.
762*/
763
764void
765untenure_from_area(area *from)
766{
767  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
768    area *a = active_dynamic_area, *child;
769    BytePtr curlow = from->low;
770    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
771   
772    for (child = from; child != a; child = child->younger) {
773      child->low = child->active = child->high = curlow;
774      child->ndnodes = 0;
775    }
776   
777    a->low = curlow;
778    a->ndnodes = area_dnode(a->high, curlow);
779   
780    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
781    if (from == tenured_area) {
782      /* Everything's in the dynamic area */
783      lisp_global(OLDEST_EPHEMERAL) = 0;
784      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
785
786    }
787  }
788}
789
790
791Boolean
792egc_control(Boolean activate, BytePtr curfree)
793{
794  area *a = active_dynamic_area;
795  Boolean egc_is_active = (a->older != NULL);
796
797  if (activate != egc_is_active) {
798    if (curfree != NULL) {
799      a->active = curfree;
800    }
801    if (activate) {
802      a->older = g1_area;
803      tenure_to_area(tenured_area);
804      egc_is_active = true;
805    } else {
806      untenure_from_area(tenured_area);
807      a->older = NULL;
808      egc_is_active = false;
809    }
810  }
811  return egc_is_active;
812}
813
814/*
815  Lisp ff-calls this; it needs to set the active area's active pointer
816  correctly.
817*/
818
819Boolean
820lisp_egc_control(Boolean activate)
821{
822  area *a = active_dynamic_area;
823  return egc_control(activate, (BytePtr) a->active);
824}
825
826
827
828 
829/* Splice the protected_area_ptr out of the list and dispose of it. */
830void
831delete_protected_area(protected_area_ptr p)
832{
833  BytePtr start = p->start;
834  int nbytes = p->nprot;
835  protected_area_ptr *prev = &AllProtectedAreas, q;
836
837  if (nbytes) {
838    UnProtectMemory((LogicalAddress)start, nbytes);
839  }
840 
841  while ((q = *prev) != NULL) {
842    if (p == q) {
843      *prev = p->next;
844      break;
845    } else {
846      prev = &(q->next);
847    }
848  }
849
850  free(p);
851}
852
853
854
855
856/*
857  Unlink the area from all_areas.
858  Unprotect and dispose of any hard/soft protected_areas.
859  If the area has a handle, dispose of that as well.
860  */
861
862void
863condemn_area_holding_area_lock(area *a)
864{
865  void free_stack(void *);
866  area *prev = a->pred, *next = a->succ;
867  Ptr h = a->h;
868  protected_area_ptr p;
869
870  prev->succ = next;
871  next->pred = prev;
872
873  p = a->softprot;
874  if (p) delete_protected_area(p);
875
876  p = a->hardprot;
877
878  if (p) delete_protected_area(p);
879
880  if (h) free_stack(h);
881  free(a);
882}
883
884
885
886void
887condemn_area(area *a, TCR *tcr)
888{
889  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
890  condemn_area_holding_area_lock(a);
891  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
892}
893
894
895
896
897/*
898  condemn an area and all the other areas that can be reached
899  via the area.older & area.younger links.
900  This is the function in the ppc::kernel-import-condemn-area slot,
901  called by free-stack-area
902  */
903void
904condemn_area_chain(area *a, TCR *tcr)
905{
906  area *older;
907
908  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
909
910  for (; a->younger; a = a->younger) ;
911  for (;a;) {
912    older = a->older;
913    condemn_area_holding_area_lock(a);
914    a = older;
915  }
916  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
917}
918
919
920void
921protect_watched_areas()
922{
923  area *a = active_dynamic_area;
924  natural code = a->code;
925
926  while (code != AREA_VOID) {
927    if (code == AREA_WATCHED) {
928      natural size = a->high - a->low;
929     
930      ProtectMemory(a->low, size);
931    }
932    a = a->succ;
933    code = a->code;
934  }
935}
936
937void
938unprotect_watched_areas()
939{
940  area *a = active_dynamic_area;
941  natural code = a->code;
942
943  while (code != AREA_VOID) {
944    if (code == AREA_WATCHED) {
945      natural size = a->high - a->low;
946     
947      UnProtectMemory(a->low, size);
948    }
949    a = a->succ;
950    code = a->code;
951  }
952}
953
954LogicalAddress
955ReserveMemory(natural size)
956{
957  LogicalAddress p;
958#ifdef WINDOWS
959  p = VirtualAlloc(0,
960                   size,
961                   MEM_RESERVE,
962                   PAGE_NOACCESS);
963  return p;
964#else
965  p = mmap(NULL,size,PROT_NONE,MAP_PRIVATE|MAP_ANON|MAP_NORESERVE,-1,0);
966  if (p == MAP_FAILED) {
967    return NULL;
968  }
969  return p;
970#endif
971}
972
Note: See TracBrowser for help on using the repository browser.