source: branches/acl2-egc/lisp-kernel/memory.c @ 16371

Last change on this file since 16371 was 16371, checked in by gb, 5 years ago

still in progress

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.2 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#endif
34#ifdef DARWIN
35#include <pthread.h>
36#endif
37
38#ifndef WINDOWS
39#include <sys/mman.h>
40#endif
41
42#define DEBUG_MEMORY 0
43
44void
45allocation_failure(Boolean pointerp, natural size)
46{
47  char buf[64];
48  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
49  Fatal(":   Kernel memory allocation failure.  ", buf);
50}
51
52void *
53lisp_malloc(size_t size)
54{
55  return malloc(size);
56}
57
58void
59lisp_free(void *p)
60{
61  free(p);
62}
63
64
65LogicalAddress
66ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
67{
68  void raise_limit(void);
69  LogicalAddress start;
70  raise_limit();
71#ifdef WINDOWS
72  start = VirtualAlloc((void *)want,
73                       totalsize + heap_segment_size,
74                       MEM_RESERVE,
75                       PAGE_NOACCESS);
76  if (!start) {
77#if DEBUG_MEMORY   
78    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
79#endif
80    start = VirtualAlloc(0,
81                         totalsize + heap_segment_size,
82                         MEM_RESERVE,
83                         PAGE_NOACCESS);
84    if (!start) {
85      return NULL;
86    }
87  }
88#else
89  start = mmap((void *)want,
90               totalsize + heap_segment_size,
91               PROT_NONE,
92               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
93               -1,
94               0);
95  if (start == MAP_FAILED) {
96    return NULL;
97  }
98
99  if (start != want) {
100    munmap(start, totalsize+heap_segment_size);
101    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
102    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
103      return NULL;
104    }
105  }
106  mprotect(start, totalsize, PROT_NONE);
107#endif
108#if DEBUG_MEMORY
109  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
110#endif
111  return start;
112}
113
114int
115CommitMemory (LogicalAddress start, natural len) 
116{
117#if DEBUG_MEMORY
118  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
119#endif
120#ifdef WINDOWS
121  LogicalAddress rc;
122
123  if ((start < ((LogicalAddress)nil_value)) &&
124      (((LogicalAddress)nil_value) < (start+len))) {
125    /* nil area is in the executable on Windows; ensure range is
126       read-write */
127    DWORD as_if_i_care;
128    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
129      return false;
130    }
131    return true;
132  }
133  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
134  if (!rc) {
135    wperror("CommitMemory VirtualAlloc");
136    return false;
137  }
138  return true;
139#else
140  int i;
141  void *addr;
142
143  for (i = 0; i < 3; i++) {
144    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
145    if (addr == start) {
146      return true;
147    } else {
148      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
149    }
150  }
151  return false;
152#endif
153}
154
155void
156UnCommitMemory (LogicalAddress start, natural len) {
157#if DEBUG_MEMORY
158  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
159#endif
160#ifdef WINDOWS
161  int rc = VirtualFree(start, len, MEM_DECOMMIT);
162  if (!rc) {
163    wperror("UnCommitMemory VirtualFree");
164    Fatal("mmap error", "");
165    return;
166  }
167#else
168  if (len) {
169    madvise(start, len, MADV_DONTNEED);
170    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
171        != start) {
172      int err = errno;
173      Fatal("mmap error", "");
174      fprintf(dbgout, "errno = %d", err);
175    }
176  }
177#endif
178}
179
180
181LogicalAddress
182MapMemory(LogicalAddress addr, natural nbytes, int protection)
183{
184  LogicalAddress p;
185#if DEBUG_MEMORY
186  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
187#endif
188#ifdef WINDOWS
189  p = VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
190  if (p == NULL) {
191    wperror("MapMemory");
192  }
193  return p;
194#else
195  {
196    int flags = MAP_PRIVATE|MAP_ANON;
197
198    if (addr > 0) flags |= MAP_FIXED;
199    return mmap(addr, nbytes, protection, flags, -1, 0);
200  }
201#endif
202}
203
204LogicalAddress
205MapMemoryForStack(natural nbytes)
206{
207#if DEBUG_MEMORY
208  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
209#endif
210#ifdef WINDOWS
211  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
212#else
213  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
214#endif
215}
216
217
218/* Cause the mapped memory region at ADDR to become completely unmapped.
219   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
220   and NBYTES should be the size of the mapped region at that address. */
221int
222UnMapMemory(LogicalAddress addr, natural nbytes)
223{
224#if DEBUG_MEMORY
225  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
226#endif
227#ifdef WINDOWS
228  return !VirtualFree(addr, 0, MEM_RELEASE);
229#else
230  return munmap(addr, nbytes);
231#endif
232}
233
234int
235ProtectMemory(LogicalAddress addr, natural nbytes)
236{
237#if DEBUG_MEMORY
238  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
239#endif
240#ifdef WINDOWS
241  DWORD oldProtect;
242  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
243 
244  if(!status) {
245    wperror("ProtectMemory VirtualProtect");
246    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
247  }
248  return status;
249#else
250  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
251 
252  if (status) {
253    status = errno;
254   
255    if (status == ENOMEM) {
256      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
257      if (mapaddr != MAP_FAILED) {
258        return 0;
259      }
260    }
261    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
262  }
263  return status;
264#endif
265}
266
267int
268UnProtectMemory(LogicalAddress addr, natural nbytes)
269{
270#if DEBUG_MEMORY
271  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
272#endif
273#ifdef WINDOWS
274  DWORD oldProtect;
275  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
276#else
277  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
278#endif
279}
280
281int
282MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
283{
284#ifdef WINDOWS
285#if 0
286  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
287  HANDLE hFile, hFileMapping;
288  LPVOID rc;
289  DWORD desiredAccess;
290
291  if (permissions == MEMPROTECT_RWX) {
292    permissions |= PAGE_WRITECOPY;
293    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
294  } else {
295    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
296  }
297
298  hFile = _get_osfhandle(fd);
299  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
300                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
301 
302  if (!hFileMapping) {
303    wperror("CreateFileMapping");
304    return false;
305  }
306
307  rc = MapViewOfFileEx(hFileMapping,
308                       desiredAccess,
309                       (pos >> 32),
310                       (pos & 0xffffffff),
311                       nbytes,
312                       addr);
313#else
314  size_t count, total = 0;
315  size_t opos;
316
317  opos = LSEEK(fd, 0, SEEK_CUR);
318  CommitMemory(addr, nbytes);
319  LSEEK(fd, pos, SEEK_SET);
320
321  while (total < nbytes) {
322    count = read(fd, addr + total, nbytes - total);
323    total += count;
324    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
325    if (!(count > 0))
326      return false;
327  }
328
329  LSEEK(fd, opos, SEEK_SET);
330
331  return true;
332#endif
333#else
334  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
335#endif
336}
337
338void
339unprotect_area(protected_area_ptr p)
340{
341  BytePtr start = p->start;
342  natural nprot = p->nprot;
343 
344  if (nprot) {
345    UnProtectMemory(start, nprot);
346    p->nprot = 0;
347  }
348}
349
350protected_area_ptr
351new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
352{
353  protected_area_ptr p = malloc(sizeof(protected_area));
354 
355  if (p == NULL) return NULL;
356  p->protsize = protsize;
357  p->nprot = 0;
358  p->start = start;
359  p->end = end;
360  p->why = reason;
361  p->next = AllProtectedAreas;
362
363  AllProtectedAreas = p;
364  if (now) {
365    protect_area(p);
366  }
367 
368  return p;
369}
370
371
372
373
374protected_area_ptr
375AllProtectedAreas = NULL;
376
377
378/*
379  This does a linear search.  Areas aren't created all that often;
380  if there get to be very many of them, some sort of tree search
381  might be justified.
382*/
383
384protected_area_ptr
385find_protected_area(BytePtr addr)
386{
387  protected_area* p;
388 
389  for(p = AllProtectedAreas; p; p=p->next) {
390    if ((p->start <= addr) && (p->end > addr)) {
391      return p;
392    }
393  }
394  return NULL;
395}
396
397
398void
399zero_refbits(bitvector refidx, bitvector refbits, natural ndnodes)
400{
401  bitvector refbase = refbits, refword, limit = refbase + ((ndnodes + (WORD_SIZE-1)) >> node_shift), reflimit;
402  natural i, n = (((ndnodes + 255) >> 8) + (WORD_SIZE-1)) >> bitmap_shift, bit, idx;
403
404  for (i = 0; i < n; i++, refbase += WORD_SIZE * (256 / WORD_SIZE)) {
405    idx = *refidx;
406   
407    if (idx != 0) {
408      *refidx = 0;
409      while (idx) {
410        bit = count_leading_zeros(idx);
411        idx &= ~(BIT0_MASK>>bit);
412        refword = refbase + bit * (256/WORD_SIZE);
413        reflimit = refword + (256/WORD_SIZE);
414        if (limit < reflimit) {
415          reflimit = limit;
416        }
417        while (refword < reflimit) {
418          *refword++ = 0;
419        }
420      }
421    }
422    refidx++;
423  }
424#if 0
425  /* Check,slowly */
426  for (i=0;i<ndnodes;i++) {
427    if (ref_bit(refbits,i)) {
428      Bug(NULL, "Bit 0x" LISP " set unexpectedly\n", i);
429    }
430  }
431#endif
432}
433
434
435 
436
437/*
438   Grow or shrink the dynamic area.  Or maybe not.
439   Whether or not the end of (mapped space in) the heap changes,
440   ensure that everything between the freeptr and the heap end
441   is mapped and read/write.  (It'll incidentally be zeroed.)
442*/
443Boolean
444resize_dynamic_heap(BytePtr newfree, 
445                    natural free_space_size)
446{
447  area *a = active_dynamic_area;
448  BytePtr newlimit;
449
450  if (free_space_size) {
451    BytePtr lowptr = a->active;
452    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
453                                            log2_heap_segment_size);
454    if (newlimit > a->high) {
455      return grow_dynamic_area(newlimit-a->high);
456    } else if ((lowptr + free_space_size) < a->high) {
457      shrink_dynamic_area(a->high-newlimit);
458      return true;
459    }
460  } 
461  return false;
462}
463
464void
465protect_area(protected_area_ptr p)
466{
467  BytePtr start = p->start;
468  natural n = p->protsize;
469
470  if (n && ! p->nprot) {
471    ProtectMemory(start, n);
472#ifdef WINDOWS
473    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
474#endif
475    p->nprot = n;
476  }
477}
478
479
480void
481zero_page(BytePtr start)
482{
483  extern int page_size;
484#ifdef PPC
485  extern void zero_cache_lines(BytePtr, size_t, size_t);
486  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
487#else
488  memset(start, 0, page_size);
489#endif
490}
491
492/* area management */
493
494
495area *
496new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
497{
498  area *a = calloc(1, sizeof(area));
499  if (a) {
500    natural ndnodes = area_dnode(highaddr, lowaddr);
501    a->low = lowaddr;
502    a->high = highaddr;
503    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
504    a->code = code;
505    a->ndnodes = ndnodes;
506    /* Caller must allocate markbits when allocating heap ! */
507   
508  }
509  return a;
510}
511
512static area *
513add_area_before(area *new_area, area *before)
514{
515  area *before_before = before->pred;
516
517  new_area->pred = before_before;
518  new_area->succ = before;
519  before_before->succ = new_area;
520  before->pred = new_area;
521  return new_area;
522}
523
524/*
525  The active dynamic area comes first.
526  Static areas follow dynamic areas.
527  Stack areas follow static areas.
528  Readonly areas come last.
529*/
530
531/*
532  If we already own the area_lock (or during iniitalization), it's safe
533  to add an area.
534*/
535
536
537void
538add_area_holding_area_lock(area *new_area)
539{
540  area *that = all_areas;
541  int
542    thiscode = (int)(new_area->code),
543    thatcode;
544
545  /* Cdr down the linked list */
546  do {
547    that = that->succ;
548    thatcode = (int)(that->code);
549  } while (thiscode < thatcode);
550  add_area_before(new_area, that);
551}
552
553/*
554  In general, we need to own the area lock before adding an area.
555*/
556void
557add_area(area *new_area, TCR *tcr)
558{
559  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
560  add_area_holding_area_lock(new_area);
561  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
562} 
563
564/*
565  Search areas "forward" from the header's successor, until
566  an area containing ADDR is found or an area with code < MINCODE
567  is encountered.
568  This walks the area list visiting heaps (dynamic, then static)
569  first, then stacks.
570
571*/
572static area *
573find_area_forward(BytePtr addr, area_code mincode)
574{
575  area *p, *header = all_areas;
576
577  for (p = header->succ; p != header; p = p->succ) {
578    area_code pcode = p->code;
579    if (pcode < mincode) {
580      return NULL;
581    }
582    if (pcode >= AREA_READONLY) {
583      if ((addr >= p->low) &&
584          (addr < p->active)) {
585        return p;
586      }
587    } else {
588      if ((addr >= p->active) &&
589          (addr < p->high)) {
590        return p;
591      }
592    }
593  }
594  return NULL;
595}
596
597static area *
598find_area_backward(BytePtr addr, area_code maxcode)
599{
600  area *p, *header = all_areas;
601
602  for (p = header->pred; p != header; p = p->pred) {
603    area_code pcode = p->code;
604
605    if (pcode > maxcode) {
606      return NULL;
607    }
608    if (pcode >= AREA_READONLY) {
609      if ((addr >= p->low) &&
610          (addr < p->active)) {
611        return p;
612      }
613    } else {
614      if ((addr >= p->active) &&
615          (addr < p->high)) {
616        return p;
617      }
618    }
619  }
620  return NULL;
621}
622
623area *
624area_containing(BytePtr addr)
625{
626  return find_area_forward(addr, AREA_VOID);
627}
628
629area *
630heap_area_containing(BytePtr addr)
631{
632  return find_area_forward(addr, AREA_READONLY);
633}
634
635area *
636stack_area_containing(BytePtr addr)
637{
638  return find_area_backward(addr, AREA_TSTACK);
639}
640
641/*
642  Make everything "younger" than the start of the target area
643  belong to that area; all younger areas will become empty, and
644  the dynamic area will have to lose some of its markbits (they
645  get zeroed and become part of the tenured area's refbits.)
646
647  The active dynamic area must have been "normalized" (e.g., its
648  active pointer must match the free pointer) before this is called.
649
650  If the target area is 'tenured_area' (the oldest ephemeral generation),
651  zero its refbits and update YOUNGEST_EPHEMERAL.
652
653*/
654
655void
656tenure_to_area(area *target)
657{
658  area *a = active_dynamic_area, *child;
659  BytePtr
660    curfree = a->active,
661    target_low = target->low;
662  natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
663  bitvector
664    refbits = tenured_area->refbits,
665    markbits = a->markbits,
666    new_markbits;
667
668  target->high = target->active = curfree;
669  target->ndnodes = area_dnode(curfree, target_low);
670
671  for (child = target->younger; child != a; child = child->younger) {
672    child->high = child->low = child->active = curfree;
673    child->ndnodes = 0;
674  }
675
676  a->low = curfree;
677  a->ndnodes = area_dnode(a->high, curfree);
678
679  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
680 
681  if (target == tenured_area) {
682    zero_refbits(global_refidx,managed_static_area->refbits, managed_static_area->ndnodes);
683    zero_bits(refbits, new_tenured_dnodes);
684    zero_bits(dynamic_refidx,(new_tenured_dnodes+255)>>8);
685    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
686  } else {
687    /* Need more (zeroed) refbits & fewer markbits */
688    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
689  }
690   
691  a->markbits = new_markbits;
692  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
693}
694
695
696
697/*
698  Make everything younger than the oldest byte in 'from' belong to
699  the youngest generation.  If 'from' is 'tenured_area', this means
700  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
701  to 0 to indicate this.)
702 
703  Some tenured_area refbits become dynamic area markbits in the process;
704  it's not necessary to zero them, since the GC will do that.
705*/
706
707void
708untenure_from_area(area *from)
709{
710  if ((lisp_global(OLDEST_EPHEMERAL) != 0) &&
711      (from->static_dnodes ==0)) {
712    area *a = active_dynamic_area, *child;
713    BytePtr curlow = from->low;
714    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
715   
716    for (child = from; child != a; child = child->younger) {
717      child->low = child->active = child->high = curlow;
718      child->ndnodes = 0;
719    }
720   
721    a->low = curlow;
722    a->ndnodes = area_dnode(a->high, curlow);
723   
724    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
725    if (from == tenured_area) {
726      /* Everything's in the dynamic area */
727      lisp_global(OLDEST_EPHEMERAL) = 0;
728      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
729
730    }
731  }
732}
733
734
735Boolean
736egc_control(Boolean activate, BytePtr curfree)
737{
738  area *a = active_dynamic_area;
739  Boolean egc_is_active = (a->older != NULL);
740
741  if (activate != egc_is_active) {
742    if (curfree != NULL) {
743      a->active = curfree;
744    }
745    if (activate) {
746      a->older = g1_area;
747      tenure_to_area(tenured_area);
748      egc_is_active = true;
749    } else {
750      untenure_from_area(tenured_area);
751      a->older = NULL;
752      egc_is_active = false;
753    }
754  }
755  return egc_is_active;
756}
757
758/*
759  Lisp ff-calls this; it needs to set the active area's active pointer
760  correctly.
761*/
762
763Boolean
764lisp_egc_control(Boolean activate)
765{
766  area *a = active_dynamic_area;
767  return egc_control(activate, (BytePtr) a->active);
768}
769
770
771
772 
773/* Splice the protected_area_ptr out of the list and dispose of it. */
774void
775delete_protected_area(protected_area_ptr p)
776{
777  BytePtr start = p->start;
778  int nbytes = p->nprot;
779  protected_area_ptr *prev = &AllProtectedAreas, q;
780
781  if (nbytes) {
782    UnProtectMemory((LogicalAddress)start, nbytes);
783  }
784 
785  while ((q = *prev) != NULL) {
786    if (p == q) {
787      *prev = p->next;
788      break;
789    } else {
790      prev = &(q->next);
791    }
792  }
793
794  free(p);
795}
796
797
798
799
800/*
801  Unlink the area from all_areas.
802  Unprotect and dispose of any hard/soft protected_areas.
803  If the area has a handle, dispose of that as well.
804  */
805
806void
807condemn_area_holding_area_lock(area *a)
808{
809  void free_stack(void *);
810  area *prev = a->pred, *next = a->succ;
811  Ptr h = a->h;
812  protected_area_ptr p;
813
814  prev->succ = next;
815  next->pred = prev;
816
817  p = a->softprot;
818  if (p) delete_protected_area(p);
819
820  p = a->hardprot;
821
822  if (p) delete_protected_area(p);
823
824  if (h) free_stack(h);
825  free(a);
826}
827
828
829
830void
831condemn_area(area *a, TCR *tcr)
832{
833  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
834  condemn_area_holding_area_lock(a);
835  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
836}
837
838
839
840
841/*
842  condemn an area and all the other areas that can be reached
843  via the area.older & area.younger links.
844  This is the function in the ppc::kernel-import-condemn-area slot,
845  called by free-stack-area
846  */
847void
848condemn_area_chain(area *a, TCR *tcr)
849{
850  area *older;
851
852  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
853
854  for (; a->younger; a = a->younger) ;
855  for (;a;) {
856    older = a->older;
857    condemn_area_holding_area_lock(a);
858    a = older;
859  }
860  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
861}
862
863
864void
865protect_watched_areas()
866{
867  area *a = active_dynamic_area;
868  natural code = a->code;
869
870  while (code != AREA_VOID) {
871    if (code == AREA_WATCHED) {
872      natural size = a->high - a->low;
873     
874      ProtectMemory(a->low, size);
875    }
876    a = a->succ;
877    code = a->code;
878  }
879}
880
881void
882unprotect_watched_areas()
883{
884  area *a = active_dynamic_area;
885  natural code = a->code;
886
887  while (code != AREA_VOID) {
888    if (code == AREA_WATCHED) {
889      natural size = a->high - a->low;
890     
891      UnProtectMemory(a->low, size);
892    }
893    a = a->succ;
894    code = a->code;
895  }
896}
897
898LogicalAddress
899ReserveMemory(natural size)
900{
901  LogicalAddress p;
902#ifdef WINDOWS
903  p = VirtualAlloc(0,
904                   size,
905                   MEM_RESERVE,
906                   PAGE_NOACCESS);
907  return p;
908#else
909  p = mmap(NULL,size,PROT_NONE,MAP_PRIVATE|MAP_ANON|MAP_NORESERVE,-1,0);
910  if (p == MAP_FAILED) {
911    return NULL;
912  }
913  return p;
914#endif
915}
Note: See TracBrowser for help on using the repository browser.