source: release/1.8/source/lisp-kernel/memory.c @ 15449

Last change on this file since 15449 was 15449, checked in by gb, 8 years ago

Propagate r15437-r15439 to 1.8.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.8 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#endif
34#ifdef DARWIN64
35#include <pthread.h>
36#endif
37
38#ifndef WINDOWS
39#include <sys/mman.h>
40#endif
41
42#define DEBUG_MEMORY 0
43
44void
45allocation_failure(Boolean pointerp, natural size)
46{
47  char buf[64];
48  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
49  Fatal(":   Kernel memory allocation failure.  ", buf);
50}
51
52void *
53lisp_malloc(size_t size)
54{
55  return malloc(size);
56}
57
58void
59lisp_free(void *p)
60{
61  free(p);
62}
63
64#ifdef DARWIN
65#if WORD_SIZE == 64
66#define vm_region vm_region_64
67#endif
68
69/*
70  Check to see if the specified address is unmapped by trying to get
71  information about the mapped address at or beyond the target.  If
72  the difference between the target address and the next mapped address
73  is >= len, we can safely mmap len bytes at addr.
74*/
75Boolean
76address_unmapped_p(char *addr, natural len)
77{
78  vm_address_t vm_addr = (vm_address_t)addr;
79  vm_size_t vm_size;
80#if WORD_SIZE == 64
81  vm_region_basic_info_data_64_t vm_info;
82#else
83  vm_region_basic_info_data_t vm_info;
84#endif
85#if WORD_SIZE == 64
86  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
87#else
88  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
89#endif
90  mach_port_t vm_object_name = (mach_port_t) 0;
91  kern_return_t kret;
92
93  kret = vm_region(mach_task_self(),
94                   &vm_addr,
95                   &vm_size,
96#if WORD_SIZE == 64
97                   VM_REGION_BASIC_INFO_64,
98#else
99                   VM_REGION_BASIC_INFO,
100#endif
101                   (vm_region_info_t)&vm_info,
102                   &vm_info_size,
103                   &vm_object_name);
104  if (kret != KERN_SUCCESS) {
105    return false;
106  }
107
108  return vm_addr >= (vm_address_t)(addr+len);
109}
110#endif
111
112
113
114  /*
115    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
116    likely to reside near the beginning of an unmapped block of memory
117    that's at least 1GB in size.  We'd like to load the heap image's
118    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
119    that'd allow us to file-map those sections (and would enable us to
120    avoid having to relocate references in the data sections.)
121
122    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
123    by creating an anonymous mapping with mmap().
124
125    If we try to insist that mmap() map a 1GB block at
126    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
127    mmap() will gleefully clobber any mapped memory that's already
128    there.  (That region's empty at this writing, but some future
129    version of the OS might decide to put something there.)
130
131    If we don't specify MAP_FIXED, mmap() is free to treat the address
132    we give it as a hint; Linux seems to accept the hint if doing so
133    wouldn't cause a problem.  Naturally, that behavior's too useful
134    for Darwin (or perhaps too inconvenient for it): it'll often
135    return another address, even if the hint would have worked fine.
136
137    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
138    would conflict with anything.  Until we discover a need to do
139    otherwise, we'll assume that if Linux's mmap() fails to take the
140    hint, it's because of a legitimate conflict.
141
142    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
143    to implement an address_unmapped_p() for Linux.
144  */
145
146LogicalAddress
147ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
148{
149  LogicalAddress start;
150  Boolean fixed_map_ok = false;
151#ifdef DARWIN
152  fixed_map_ok = address_unmapped_p(want,totalsize);
153#endif
154#ifdef SOLARIS
155  fixed_map_ok = true;
156#endif
157  raise_limit();
158#ifdef WINDOWS
159  start = VirtualAlloc((void *)want,
160                       totalsize + heap_segment_size,
161                       MEM_RESERVE,
162                       PAGE_NOACCESS);
163  if (!start) {
164#if DEBUG_MEMORY   
165    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
166#endif
167    start = VirtualAlloc(0,
168                         totalsize + heap_segment_size,
169                         MEM_RESERVE,
170                         PAGE_NOACCESS);
171    if (!start) {
172      return NULL;
173    }
174  }
175#else
176  start = mmap((void *)want,
177               totalsize + heap_segment_size,
178               PROT_NONE,
179               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
180               -1,
181               0);
182  if (start == MAP_FAILED) {
183    return NULL;
184  }
185
186  if (start != want) {
187    munmap(start, totalsize+heap_segment_size);
188    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
189    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
190      return NULL;
191    }
192  }
193  mprotect(start, totalsize, PROT_NONE);
194#endif
195#if DEBUG_MEMORY
196  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
197#endif
198  return start;
199}
200
201int
202CommitMemory (LogicalAddress start, natural len) 
203{
204#if DEBUG_MEMORY
205  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
206#endif
207#ifdef WINDOWS
208  LogicalAddress rc;
209
210  if ((start < ((LogicalAddress)nil_value)) &&
211      (((LogicalAddress)nil_value) < (start+len))) {
212    /* nil area is in the executable on Windows; ensure range is
213       read-write */
214    DWORD as_if_i_care;
215    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
216      return false;
217    }
218    return true;
219  }
220  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
221  if (!rc) {
222    wperror("CommitMemory VirtualAlloc");
223    return false;
224  }
225  return true;
226#else
227  int i;
228  void *addr;
229
230  for (i = 0; i < 3; i++) {
231    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
232    if (addr == start) {
233      return true;
234    } else {
235      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
236    }
237  }
238  return false;
239#endif
240}
241
242void
243UnCommitMemory (LogicalAddress start, natural len) {
244#if DEBUG_MEMORY
245  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
246#endif
247#ifdef WINDOWS
248  int rc = VirtualFree(start, len, MEM_DECOMMIT);
249  if (!rc) {
250    wperror("UnCommitMemory VirtualFree");
251    Fatal("mmap error", "");
252    return;
253  }
254#else
255  if (len) {
256    madvise(start, len, MADV_DONTNEED);
257    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
258        != start) {
259      int err = errno;
260      Fatal("mmap error", "");
261      fprintf(dbgout, "errno = %d", err);
262    }
263  }
264#endif
265}
266
267
268LogicalAddress
269MapMemory(LogicalAddress addr, natural nbytes, int protection)
270{
271#if DEBUG_MEMORY
272  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
273#endif
274#ifdef WINDOWS
275  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
276#else
277  {
278    int flags = MAP_PRIVATE|MAP_ANON;
279
280    if (addr > 0) flags |= MAP_FIXED;
281    return mmap(addr, nbytes, protection, flags, -1, 0);
282  }
283#endif
284}
285
286LogicalAddress
287MapMemoryForStack(natural nbytes)
288{
289#if DEBUG_MEMORY
290  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
291#endif
292#ifdef WINDOWS
293  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
294#else
295  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
296#endif
297}
298
299
300/* Cause the mapped memory region at ADDR to become completely unmapped.
301   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
302   and NBYTES should be the size of the mapped region at that address. */
303int
304UnMapMemory(LogicalAddress addr, natural nbytes)
305{
306#if DEBUG_MEMORY
307  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
308#endif
309#ifdef WINDOWS
310  return !VirtualFree(addr, 0, MEM_RELEASE);
311#else
312  return munmap(addr, nbytes);
313#endif
314}
315
316int
317ProtectMemory(LogicalAddress addr, natural nbytes)
318{
319#if DEBUG_MEMORY
320  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
321#endif
322#ifdef WINDOWS
323  DWORD oldProtect;
324  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
325 
326  if(!status) {
327    wperror("ProtectMemory VirtualProtect");
328    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
329  }
330  return status;
331#else
332  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
333 
334  if (status) {
335    status = errno;
336   
337    if (status == ENOMEM) {
338      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
339      if (mapaddr != MAP_FAILED) {
340        return 0;
341      }
342    }
343    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
344  }
345  return status;
346#endif
347}
348
349int
350UnProtectMemory(LogicalAddress addr, natural nbytes)
351{
352#if DEBUG_MEMORY
353  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
354#endif
355#ifdef WINDOWS
356  DWORD oldProtect;
357  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
358#else
359  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
360#endif
361}
362
363int
364MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
365{
366#ifdef WINDOWS
367#if 0
368  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
369  HANDLE hFile, hFileMapping;
370  LPVOID rc;
371  DWORD desiredAccess;
372
373  if (permissions == MEMPROTECT_RWX) {
374    permissions |= PAGE_WRITECOPY;
375    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
376  } else {
377    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
378  }
379
380  hFile = _get_osfhandle(fd);
381  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
382                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
383 
384  if (!hFileMapping) {
385    wperror("CreateFileMapping");
386    return false;
387  }
388
389  rc = MapViewOfFileEx(hFileMapping,
390                       desiredAccess,
391                       (pos >> 32),
392                       (pos & 0xffffffff),
393                       nbytes,
394                       addr);
395#else
396  size_t count, total = 0;
397  size_t opos;
398
399  opos = LSEEK(fd, 0, SEEK_CUR);
400  CommitMemory(addr, nbytes);
401  LSEEK(fd, pos, SEEK_SET);
402
403  while (total < nbytes) {
404    count = read(fd, addr + total, nbytes - total);
405    total += count;
406    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
407    if (!(count > 0))
408      return false;
409  }
410
411  LSEEK(fd, opos, SEEK_SET);
412
413  return true;
414#endif
415#else
416  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
417#endif
418}
419
420void
421unprotect_area(protected_area_ptr p)
422{
423  BytePtr start = p->start;
424  natural nprot = p->nprot;
425 
426  if (nprot) {
427    UnProtectMemory(start, nprot);
428    p->nprot = 0;
429  }
430}
431
432protected_area_ptr
433new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
434{
435  protected_area_ptr p = malloc(sizeof(protected_area));
436 
437  if (p == NULL) return NULL;
438  p->protsize = protsize;
439  p->nprot = 0;
440  p->start = start;
441  p->end = end;
442  p->why = reason;
443  p->next = AllProtectedAreas;
444
445  AllProtectedAreas = p;
446  if (now) {
447    protect_area(p);
448  }
449 
450  return p;
451}
452
453
454
455
456protected_area_ptr
457AllProtectedAreas = NULL;
458
459
460/*
461  This does a linear search.  Areas aren't created all that often;
462  if there get to be very many of them, some sort of tree search
463  might be justified.
464*/
465
466protected_area_ptr
467find_protected_area(BytePtr addr)
468{
469  protected_area* p;
470 
471  for(p = AllProtectedAreas; p; p=p->next) {
472    if ((p->start <= addr) && (p->end > addr)) {
473      return p;
474    }
475  }
476  return NULL;
477}
478
479
480void
481zero_memory_range(BytePtr start, BytePtr end)
482{
483#ifdef WINDOWS
484  ZeroMemory(start,end-start);
485#else
486  bzero(start,(size_t)(end-start));
487#endif
488}
489
490
491 
492
493/*
494   Grow or shrink the dynamic area.  Or maybe not.
495   Whether or not the end of (mapped space in) the heap changes,
496   ensure that everything between the freeptr and the heap end
497   is mapped and read/write.  (It'll incidentally be zeroed.)
498*/
499Boolean
500resize_dynamic_heap(BytePtr newfree, 
501                    natural free_space_size)
502{
503  area *a = active_dynamic_area;
504  BytePtr newlimit;
505
506  if (free_space_size) {
507    BytePtr lowptr = a->active;
508    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
509                                            log2_heap_segment_size);
510    if (newlimit > a->high) {
511      return grow_dynamic_area(newlimit-a->high);
512    } else if ((lowptr + free_space_size) < a->high) {
513      shrink_dynamic_area(a->high-newlimit);
514      return true;
515    }
516  }
517  return false;
518}
519
520void
521protect_area(protected_area_ptr p)
522{
523  BytePtr start = p->start;
524  natural n = p->protsize;
525
526  if (n && ! p->nprot) {
527    ProtectMemory(start, n);
528#ifdef WINDOWS
529    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
530#endif
531    p->nprot = n;
532  }
533}
534
535
536void
537zero_page(BytePtr start)
538{
539  extern int page_size;
540#ifdef PPC
541  extern void zero_cache_lines(BytePtr, size_t, size_t);
542  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
543#else
544  memset(start, 0, page_size);
545#endif
546}
547
548/* area management */
549
550
551area *
552new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
553{
554  area *a = calloc(1, sizeof(area));
555  if (a) {
556    natural ndnodes = area_dnode(highaddr, lowaddr);
557    a->low = lowaddr;
558    a->high = highaddr;
559    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
560    a->code = code;
561    a->ndnodes = ndnodes;
562    /* Caller must allocate markbits when allocating heap ! */
563   
564  }
565  return a;
566}
567
568static area *
569add_area_before(area *new_area, area *before)
570{
571  area *before_before = before->pred;
572
573  new_area->pred = before_before;
574  new_area->succ = before;
575  before_before->succ = new_area;
576  before->pred = new_area;
577  return new_area;
578}
579
580/*
581  The active dynamic area comes first.
582  Static areas follow dynamic areas.
583  Stack areas follow static areas.
584  Readonly areas come last.
585*/
586
587/*
588  If we already own the area_lock (or during iniitalization), it's safe
589  to add an area.
590*/
591
592
593void
594add_area_holding_area_lock(area *new_area)
595{
596  area *that = all_areas;
597  int
598    thiscode = (int)(new_area->code),
599    thatcode;
600
601  /* Cdr down the linked list */
602  do {
603    that = that->succ;
604    thatcode = (int)(that->code);
605  } while (thiscode < thatcode);
606  add_area_before(new_area, that);
607}
608
609/*
610  In general, we need to own the area lock before adding an area.
611*/
612void
613add_area(area *new_area, TCR *tcr)
614{
615  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
616  add_area_holding_area_lock(new_area);
617  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
618} 
619
620/*
621  Search areas "forward" from the header's successor, until
622  an area containing ADDR is found or an area with code < MINCODE
623  is encountered.
624  This walks the area list visiting heaps (dynamic, then static)
625  first, then stacks.
626
627*/
628static area *
629find_area_forward(BytePtr addr, area_code mincode)
630{
631  area *p, *header = all_areas;
632
633  for (p = header->succ; p != header; p = p->succ) {
634    area_code pcode = p->code;
635    if (pcode < mincode) {
636      return NULL;
637    }
638    if (pcode >= AREA_READONLY) {
639      if ((addr >= p->low) &&
640          (addr < p->active)) {
641        return p;
642      }
643    } else {
644      if ((addr >= p->active) &&
645          (addr < p->high)) {
646        return p;
647      }
648    }
649  }
650  return NULL;
651}
652
653static area *
654find_area_backward(BytePtr addr, area_code maxcode)
655{
656  area *p, *header = all_areas;
657
658  for (p = header->pred; p != header; p = p->pred) {
659    area_code pcode = p->code;
660
661    if (pcode > maxcode) {
662      return NULL;
663    }
664    if (pcode >= AREA_READONLY) {
665      if ((addr >= p->low) &&
666          (addr < p->active)) {
667        return p;
668      }
669    } else {
670      if ((addr >= p->active) &&
671          (addr < p->high)) {
672        return p;
673      }
674    }
675  }
676  return NULL;
677}
678
679area *
680area_containing(BytePtr addr)
681{
682  return find_area_forward(addr, AREA_VOID);
683}
684
685area *
686heap_area_containing(BytePtr addr)
687{
688  return find_area_forward(addr, AREA_READONLY);
689}
690
691area *
692stack_area_containing(BytePtr addr)
693{
694  return find_area_backward(addr, AREA_TSTACK);
695}
696
697/*
698  Make everything "younger" than the start of the target area
699  belong to that area; all younger areas will become empty, and
700  the dynamic area will have to lose some of its markbits (they
701  get zeroed and become part of the tenured area's refbits.)
702
703  The active dynamic area must have been "normalized" (e.g., its
704  active pointer must match the free pointer) before this is called.
705
706  If the target area is 'tenured_area' (the oldest ephemeral generation),
707  zero its refbits and update YOUNGEST_EPHEMERAL.
708
709*/
710
711void
712tenure_to_area(area *target)
713{
714  area *a = active_dynamic_area, *child;
715  BytePtr
716    curfree = a->active,
717    target_low = target->low;
718  natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
719  bitvector
720    refbits = tenured_area->refbits,
721    markbits = a->markbits,
722    new_markbits;
723
724  target->high = target->active = curfree;
725  target->ndnodes = area_dnode(curfree, target_low);
726
727  for (child = target->younger; child != a; child = child->younger) {
728    child->high = child->low = child->active = curfree;
729    child->ndnodes = 0;
730  }
731
732  a->low = curfree;
733  a->ndnodes = area_dnode(a->high, curfree);
734
735  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
736 
737  if (target == tenured_area) {
738    zero_bits(refbits, new_tenured_dnodes);
739    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
740  } else {
741    /* Need more (zeroed) refbits & fewer markbits */
742    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
743  }
744   
745  a->markbits = new_markbits;
746  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
747}
748
749
750
751/*
752  Make everything younger than the oldest byte in 'from' belong to
753  the youngest generation.  If 'from' is 'tenured_area', this means
754  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
755  to 0 to indicate this.)
756 
757  Some tenured_area refbits become dynamic area markbits in the process;
758  it's not necessary to zero them, since the GC will do that.
759*/
760
761void
762untenure_from_area(area *from)
763{
764  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
765    area *a = active_dynamic_area, *child;
766    BytePtr curlow = from->low;
767    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
768   
769    for (child = from; child != a; child = child->younger) {
770      child->low = child->active = child->high = curlow;
771      child->ndnodes = 0;
772    }
773   
774    a->low = curlow;
775    a->ndnodes = area_dnode(a->high, curlow);
776   
777    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
778    if (from == tenured_area) {
779      /* Everything's in the dynamic area */
780      lisp_global(OLDEST_EPHEMERAL) = 0;
781      lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(managed_static_area->active,managed_static_area->low);
782
783    }
784  }
785}
786
787
788Boolean
789egc_control(Boolean activate, BytePtr curfree)
790{
791  area *a = active_dynamic_area;
792  Boolean egc_is_active = (a->older != NULL);
793
794  if (activate != egc_is_active) {
795    if (curfree != NULL) {
796      a->active = curfree;
797    }
798    if (activate) {
799      a->older = g1_area;
800      tenure_to_area(tenured_area);
801      egc_is_active = true;
802    } else {
803      untenure_from_area(tenured_area);
804      a->older = NULL;
805      egc_is_active = false;
806    }
807  }
808  return egc_is_active;
809}
810
811/*
812  Lisp ff-calls this; it needs to set the active area's active pointer
813  correctly.
814*/
815
816Boolean
817lisp_egc_control(Boolean activate)
818{
819  area *a = active_dynamic_area;
820  return egc_control(activate, (BytePtr) a->active);
821}
822
823
824
825 
826/* Splice the protected_area_ptr out of the list and dispose of it. */
827void
828delete_protected_area(protected_area_ptr p)
829{
830  BytePtr start = p->start;
831  int nbytes = p->nprot;
832  protected_area_ptr *prev = &AllProtectedAreas, q;
833
834  if (nbytes) {
835    UnProtectMemory((LogicalAddress)start, nbytes);
836  }
837 
838  while ((q = *prev) != NULL) {
839    if (p == q) {
840      *prev = p->next;
841      break;
842    } else {
843      prev = &(q->next);
844    }
845  }
846
847  free(p);
848}
849
850
851
852
853/*
854  Unlink the area from all_areas.
855  Unprotect and dispose of any hard/soft protected_areas.
856  If the area has a handle, dispose of that as well.
857  */
858
859void
860condemn_area_holding_area_lock(area *a)
861{
862  void free_stack(void *);
863  area *prev = a->pred, *next = a->succ;
864  Ptr h = a->h;
865  protected_area_ptr p;
866
867  prev->succ = next;
868  next->pred = prev;
869
870  p = a->softprot;
871  if (p) delete_protected_area(p);
872
873  p = a->hardprot;
874
875  if (p) delete_protected_area(p);
876
877  if (h) free_stack(h);
878  free(a);
879}
880
881
882
883void
884condemn_area(area *a, TCR *tcr)
885{
886  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
887  condemn_area_holding_area_lock(a);
888  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
889}
890
891
892
893
894/*
895  condemn an area and all the other areas that can be reached
896  via the area.older & area.younger links.
897  This is the function in the ppc::kernel-import-condemn-area slot,
898  called by free-stack-area
899  */
900void
901condemn_area_chain(area *a, TCR *tcr)
902{
903  area *older;
904
905  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
906
907  for (; a->younger; a = a->younger) ;
908  for (;a;) {
909    older = a->older;
910    condemn_area_holding_area_lock(a);
911    a = older;
912  }
913  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
914}
915
916
917void
918protect_watched_areas()
919{
920  area *a = active_dynamic_area;
921  natural code = a->code;
922
923  while (code != AREA_VOID) {
924    if (code == AREA_WATCHED) {
925      natural size = a->high - a->low;
926     
927      ProtectMemory(a->low, size);
928    }
929    a = a->succ;
930    code = a->code;
931  }
932}
933
934void
935unprotect_watched_areas()
936{
937  area *a = active_dynamic_area;
938  natural code = a->code;
939
940  while (code != AREA_VOID) {
941    if (code == AREA_WATCHED) {
942      natural size = a->high - a->low;
943     
944      UnProtectMemory(a->low, size);
945    }
946    a = a->succ;
947    code = a->code;
948  }
949}
950
951LogicalAddress
952ReserveMemory(natural size)
953{
954  LogicalAddress p;
955#ifdef WINDOWS
956  p = VirtualAlloc(0,
957                   size,
958                   MEM_RESERVE,
959                   PAGE_NOACCESS);
960  return p;
961#else
962  p = mmap(NULL,size,PROT_NONE,MAP_PRIVATE|MAP_ANON|MAP_NORESERVE,-1,0);
963  if (p == MAP_FAILED) {
964    return NULL;
965  }
966  return p;
967#endif
968}
969
970#ifdef DARWIN64
971/*
972  On 64-bit Darwin, we try to make a TCR's address serve as a Mach port
973  name, which means that it has to fit in 32 bits (and not conflict with
974  an existing port name, but that's a separate issue.)  Darwin doesn't
975  seem to offer means of mapping/allocating memory that's guaranteed to
976  return a 32-bit address on 64-bit systems, and trial-and-error doesn't
977  scale well.
978 
979  Since it's a PITA to allocate 32-bit TCR pointers, we never free them
980  once we've done so.  (We maintain a queue of "freed" TCRs but never
981  unmap the memory.)  When we need to allocate TCR pointers, we try to
982  allocate substantially more than we need.
983
984  The bulk allocation works by scanning the task's mapped memory
985  regions until a free region of appropriate size is found, then
986  mapping that region (without the dangerous use of MAP_FIXED).  This
987  will win if OSX's mmap() tries to honor the suggested address if it
988  doesn't conflict with a mapped region (as it seems to in practice
989  since at least 10.5 and as it's documented to in 10.6.)
990*/
991
992pthread_mutex_t darwin_tcr_lock = PTHREAD_MUTEX_INITIALIZER;
993
994TCR _free_tcr_queue, *darwin_tcr_freelist=&_free_tcr_queue;
995
996#define TCR_CLUSTER_COUNT 1024   /* Enough that we allocate clusters rarely,
997but not so much that we waste lots of 32-bit memory. */
998
999#define LIMIT_32_BIT (natural)(1L<<32L)
1000
1001void
1002map_tcr_cluster(TCR *head)
1003{
1004  TCR *work = NULL, *prev = head;
1005  int i;
1006  vm_address_t addr = (vm_address_t)0, nextaddr;
1007  void *p;
1008  vm_size_t request_size = align_to_power_of_2((TCR_CLUSTER_COUNT*sizeof(TCR)),log2_page_size), vm_size;
1009  vm_region_basic_info_data_64_t vm_info;
1010  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
1011  mach_port_t vm_object_name = (mach_port_t) 0;
1012  kern_return_t kret;
1013
1014  while (1) {
1015    nextaddr = addr;
1016    vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
1017    kret = vm_region_64(mach_task_self(),
1018                        &nextaddr,
1019                        &vm_size,
1020                        VM_REGION_BASIC_INFO_64,
1021                        (vm_region_info_t)&vm_info,
1022                        &vm_info_size,
1023                        &vm_object_name);
1024    if (kret != KERN_SUCCESS) {
1025      break;
1026    }
1027    if (addr && ((nextaddr - addr) > request_size)) {
1028      if ((addr + request_size) > LIMIT_32_BIT) {
1029        break;
1030      }
1031      p = mmap((void *)addr,
1032               request_size,
1033               PROT_READ|PROT_WRITE,
1034               MAP_PRIVATE|MAP_ANON,
1035               -1,
1036               0);
1037      if (p == MAP_FAILED) {
1038        break;
1039      } else {
1040        if (((natural)p > LIMIT_32_BIT) ||
1041            ((((natural)p)+request_size) > LIMIT_32_BIT)) {
1042          munmap(p, request_size);
1043          nextaddr = 0;
1044          vm_size = 0;
1045        } else {
1046          work = (TCR *) p;
1047          break;
1048        }
1049      }
1050    }
1051    addr = nextaddr + vm_size;   
1052  }
1053  if (!work) {
1054    Fatal("Can't allocate memory for thread-local storage.", "");
1055  }
1056 
1057  for (i=0; i < TCR_CLUSTER_COUNT; i++, work++) {
1058    prev->next = work;
1059    work->prev = prev;
1060    head->prev = work;
1061    work->next = head;
1062    prev = work;
1063  }
1064}
1065
1066void
1067darwin_free_tcr(TCR *tcr)
1068{
1069  TCR  *head = darwin_tcr_freelist, *tail;
1070
1071  pthread_mutex_lock(&darwin_tcr_lock);
1072  tail = head->prev;
1073  tail->next = tcr;
1074  head->prev = tcr;
1075  tcr->prev = tail;
1076  tcr->next = head;
1077  pthread_mutex_unlock(&darwin_tcr_lock);
1078}
1079
1080TCR *
1081darwin_allocate_tcr()
1082{
1083  TCR  *head = darwin_tcr_freelist, *tail, *tcr;
1084  pthread_mutex_lock(&darwin_tcr_lock);
1085  if (head->next == NULL) { /* First time */
1086    head->next = head->prev = head;
1087  }
1088
1089  if (head->next == head) {
1090    map_tcr_cluster(head);
1091  }
1092  tcr = head->next;
1093  tail = tcr->next;
1094  tail->prev = head;
1095  head->next = tail;
1096  pthread_mutex_unlock(&darwin_tcr_lock);
1097  return tcr;
1098}
1099 
1100
1101
1102
1103#endif
Note: See TracBrowser for help on using the repository browser.