source: release/1.9/source/lisp-kernel/memory.c @ 16083

Last change on this file since 16083 was 15470, checked in by gb, 7 years ago

Try to minimize Mach dependencies on Darwin; in particular, use POSIX
signal handling to handle exceptions (as is done on other *nix platforms.)

Use sigaltstack() on Darwin; it still seems to have problems, but at least
doesn't (usually) try to force all threads to use the same alt stack. (That
just never gets old somehow ...)

Lots of stuff removed; lots of (mostly small, mostly obvious) changes to
support the above.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.9 KB
Line 
1/*
2   Copyright (C) 2009 Clozure Associates
3   Copyright (C) 1994-2001 Digitool, Inc
4   This file is part of Clozure CL. 
5
6   Clozure CL is licensed under the terms of the Lisp Lesser GNU Public
7   License , known as the LLGPL and distributed with Clozure CL as the
8   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
9   which is distributed with Clozure CL as the file "LGPL".  Where these
10   conflict, the preamble takes precedence. 
11
12   Clozure CL is referenced in the preamble as the "LIBRARY."
13
14   The LLGPL is also available online at
15   http://opensource.franz.com/preamble.html
16*/
17
18#include "lisp.h"
19#include "lisp-exceptions.h"
20#include "lisp_globals.h"
21#include "threads.h"
22#include <ctype.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <stddef.h>
26#include <string.h>
27#include <stdarg.h>
28#include <errno.h>
29#include <stdio.h>
30#include <unistd.h>
31#ifdef LINUX
32#include <strings.h>
33#endif
34#ifdef DARWIN
35#include <pthread.h>
36#endif
37
38#ifndef WINDOWS
39#include <sys/mman.h>
40#endif
41
42#define DEBUG_MEMORY 0
43
44void
45allocation_failure(Boolean pointerp, natural size)
46{
47  char buf[64];
48  sprintf(buf, "Can't allocate %s of size " DECIMAL " bytes.", pointerp ? "pointer" : "handle", size);
49  Fatal(":   Kernel memory allocation failure.  ", buf);
50}
51
52void *
53lisp_malloc(size_t size)
54{
55  return malloc(size);
56}
57
58void
59lisp_free(void *p)
60{
61  free(p);
62}
63
64
65LogicalAddress
66ReserveMemoryForHeap(LogicalAddress want, natural totalsize)
67{
68  void raise_limit(void);
69  LogicalAddress start;
70  raise_limit();
71#ifdef WINDOWS
72  start = VirtualAlloc((void *)want,
73                       totalsize + heap_segment_size,
74                       MEM_RESERVE,
75                       PAGE_NOACCESS);
76  if (!start) {
77#if DEBUG_MEMORY   
78    fprintf(dbgout, "Can't get desired heap address at 0x" LISP "\n", want);
79#endif
80    start = VirtualAlloc(0,
81                         totalsize + heap_segment_size,
82                         MEM_RESERVE,
83                         PAGE_NOACCESS);
84    if (!start) {
85      return NULL;
86    }
87  }
88#else
89  start = mmap((void *)want,
90               totalsize + heap_segment_size,
91               PROT_NONE,
92               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
93               -1,
94               0);
95  if (start == MAP_FAILED) {
96    return NULL;
97  }
98
99  if (start != want) {
100    munmap(start, totalsize+heap_segment_size);
101    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
102    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
103      return NULL;
104    }
105  }
106  mprotect(start, totalsize, PROT_NONE);
107#endif
108#if DEBUG_MEMORY
109  fprintf(dbgout, "Reserving heap at 0x" LISP ", size 0x" LISP "\n", start, totalsize);
110#endif
111  return start;
112}
113
114int
115CommitMemory (LogicalAddress start, natural len) 
116{
117#if DEBUG_MEMORY
118  fprintf(dbgout, "Committing memory at 0x" LISP ", size 0x" LISP "\n", start, len);
119#endif
120#ifdef WINDOWS
121  LogicalAddress rc;
122
123  if ((start < ((LogicalAddress)nil_value)) &&
124      (((LogicalAddress)nil_value) < (start+len))) {
125    /* nil area is in the executable on Windows; ensure range is
126       read-write */
127    DWORD as_if_i_care;
128    if (!VirtualProtect(start,len,PAGE_EXECUTE_READWRITE,&as_if_i_care)) {
129      return false;
130    }
131    return true;
132  }
133  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
134  if (!rc) {
135    wperror("CommitMemory VirtualAlloc");
136    return false;
137  }
138  return true;
139#else
140  int i;
141  void *addr;
142
143  for (i = 0; i < 3; i++) {
144    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
145    if (addr == start) {
146      return true;
147    } else {
148      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
149    }
150  }
151  return false;
152#endif
153}
154
155void
156UnCommitMemory (LogicalAddress start, natural len) {
157#if DEBUG_MEMORY
158  fprintf(dbgout, "Uncommitting memory at 0x" LISP ", size 0x" LISP "\n", start, len);
159#endif
160#ifdef WINDOWS
161  int rc = VirtualFree(start, len, MEM_DECOMMIT);
162  if (!rc) {
163    wperror("UnCommitMemory VirtualFree");
164    Fatal("mmap error", "");
165    return;
166  }
167#else
168  if (len) {
169    madvise(start, len, MADV_DONTNEED);
170    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
171        != start) {
172      int err = errno;
173      Fatal("mmap error", "");
174      fprintf(dbgout, "errno = %d", err);
175    }
176  }
177#endif
178}
179
180
181LogicalAddress
182MapMemory(LogicalAddress addr, natural nbytes, int protection)
183{
184  LogicalAddress p;
185#if DEBUG_MEMORY
186  fprintf(dbgout, "Mapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
187#endif
188#ifdef WINDOWS
189  p = VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
190  if (p == NULL) {
191    wperror("MapMemory");
192  }
193  return p;
194#else
195  {
196    int flags = MAP_PRIVATE|MAP_ANON;
197
198    if (addr > 0) flags |= MAP_FIXED;
199    return mmap(addr, nbytes, protection, flags, -1, 0);
200  }
201#endif
202}
203
204LogicalAddress
205MapMemoryForStack(natural nbytes)
206{
207#if DEBUG_MEMORY
208  fprintf(dbgout, "Mapping stack of size 0x" LISP "\n", nbytes);
209#endif
210#ifdef WINDOWS
211  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
212#else
213  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON, -1, 0);
214#endif
215}
216
217
218/* Cause the mapped memory region at ADDR to become completely unmapped.
219   ADDR should be an address returned by MapMemoryForStack() or MapMemory(),
220   and NBYTES should be the size of the mapped region at that address. */
221int
222UnMapMemory(LogicalAddress addr, natural nbytes)
223{
224#if DEBUG_MEMORY
225  fprintf(dbgout, "Unmapping memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
226#endif
227#ifdef WINDOWS
228  return !VirtualFree(addr, 0, MEM_RELEASE);
229#else
230  return munmap(addr, nbytes);
231#endif
232}
233
234int
235ProtectMemory(LogicalAddress addr, natural nbytes)
236{
237#if DEBUG_MEMORY
238  fprintf(dbgout, "Protecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
239#endif
240#ifdef WINDOWS
241  DWORD oldProtect;
242  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
243 
244  if(!status) {
245    wperror("ProtectMemory VirtualProtect");
246    Bug(NULL, "couldn't protect " DECIMAL " bytes at 0x" LISP ", errno = %d", nbytes, addr, status);
247  }
248  return status;
249#else
250  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
251 
252  if (status) {
253    status = errno;
254   
255    if (status == ENOMEM) {
256      void *mapaddr = mmap(addr,nbytes, PROT_READ | PROT_EXEC, MAP_ANON|MAP_PRIVATE|MAP_FIXED,-1,0);
257      if (mapaddr != MAP_FAILED) {
258        return 0;
259      }
260    }
261    Bug(NULL, "couldn't protect " DECIMAL " bytes at " LISP ", errno = %d", nbytes, addr, status);
262  }
263  return status;
264#endif
265}
266
267int
268UnProtectMemory(LogicalAddress addr, natural nbytes)
269{
270#if DEBUG_MEMORY
271  fprintf(dbgout, "Unprotecting memory at 0x" LISP ", size 0x" LISP "\n", addr, nbytes);
272#endif
273#ifdef WINDOWS
274  DWORD oldProtect;
275  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
276#else
277  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
278#endif
279}
280
281int
282MapFile(LogicalAddress addr, natural pos, natural nbytes, int permissions, int fd) 
283{
284#ifdef WINDOWS
285#if 0
286  /* Lots of hair in here: mostly alignment issues, but also address space reservation */
287  HANDLE hFile, hFileMapping;
288  LPVOID rc;
289  DWORD desiredAccess;
290
291  if (permissions == MEMPROTECT_RWX) {
292    permissions |= PAGE_WRITECOPY;
293    desiredAccess = FILE_MAP_READ|FILE_MAP_WRITE|FILE_MAP_COPY|FILE_MAP_EXECUTE;
294  } else {
295    desiredAccess = FILE_MAP_READ|FILE_MAP_COPY|FILE_MAP_EXECUTE;
296  }
297
298  hFile = _get_osfhandle(fd);
299  hFileMapping = CreateFileMapping(hFile, NULL, permissions,
300                                   (nbytes >> 32), (nbytes & 0xffffffff), NULL);
301 
302  if (!hFileMapping) {
303    wperror("CreateFileMapping");
304    return false;
305  }
306
307  rc = MapViewOfFileEx(hFileMapping,
308                       desiredAccess,
309                       (pos >> 32),
310                       (pos & 0xffffffff),
311                       nbytes,
312                       addr);
313#else
314  size_t count, total = 0;
315  size_t opos;
316
317  opos = LSEEK(fd, 0, SEEK_CUR);
318  CommitMemory(addr, nbytes);
319  LSEEK(fd, pos, SEEK_SET);
320
321  while (total < nbytes) {
322    count = read(fd, addr + total, nbytes - total);
323    total += count;
324    // fprintf(dbgout, "read " DECIMAL " bytes, for a total of " DECIMAL " out of " DECIMAL " so far\n", count, total, nbytes);
325    if (!(count > 0))
326      return false;
327  }
328
329  LSEEK(fd, opos, SEEK_SET);
330
331  return true;
332#endif
333#else
334  return mmap(addr, nbytes, permissions, MAP_PRIVATE|MAP_FIXED, fd, pos) != MAP_FAILED;
335#endif
336}
337
338void
339unprotect_area(protected_area_ptr p)
340{
341  BytePtr start = p->start;
342  natural nprot = p->nprot;
343 
344  if (nprot) {
345    UnProtectMemory(start, nprot);
346    p->nprot = 0;
347  }
348}
349
350protected_area_ptr
351new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
352{
353  protected_area_ptr p = malloc(sizeof(protected_area));
354 
355  if (p == NULL) return NULL;
356  p->protsize = protsize;
357  p->nprot = 0;
358  p->start = start;
359  p->end = end;
360  p->why = reason;
361  p->next = AllProtectedAreas;
362
363  AllProtectedAreas = p;
364  if (now) {
365    protect_area(p);
366  }
367 
368  return p;
369}
370
371
372
373
374protected_area_ptr
375AllProtectedAreas = NULL;
376
377
378/*
379  This does a linear search.  Areas aren't created all that often;
380  if there get to be very many of them, some sort of tree search
381  might be justified.
382*/
383
384protected_area_ptr
385find_protected_area(BytePtr addr)
386{
387  protected_area* p;
388 
389  for(p = AllProtectedAreas; p; p=p->next) {
390    if ((p->start <= addr) && (p->end > addr)) {
391      return p;
392    }
393  }
394  return NULL;
395}
396
397
398void
399zero_memory_range(BytePtr start, BytePtr end)
400{
401#ifdef WINDOWS
402  ZeroMemory(start,end-start);
403#else
404  bzero(start,(size_t)(end-start));
405#endif
406}
407
408
409 
410
411/*
412   Grow or shrink the dynamic area.  Or maybe not.
413   Whether or not the end of (mapped space in) the heap changes,
414   ensure that everything between the freeptr and the heap end
415   is mapped and read/write.  (It'll incidentally be zeroed.)
416*/
417Boolean
418resize_dynamic_heap(BytePtr newfree, 
419                    natural free_space_size)
420{
421  area *a = active_dynamic_area;
422  BytePtr newlimit;
423
424  if (free_space_size) {
425    BytePtr lowptr = a->active;
426    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
427                                            log2_heap_segment_size);
428    if (newlimit > a->high) {
429      return grow_dynamic_area(newlimit-a->high);
430    } else if ((lowptr + free_space_size) < a->high) {
431      shrink_dynamic_area(a->high-newlimit);
432      return true;
433    }
434  }
435  return false;
436}
437
438void
439protect_area(protected_area_ptr p)
440{
441  BytePtr start = p->start;
442  natural n = p->protsize;
443
444  if (n && ! p->nprot) {
445    ProtectMemory(start, n);
446#ifdef WINDOWS
447    VirtualAlloc(start+n-page_size,page_size,MEM_COMMIT,PAGE_READWRITE|PAGE_GUARD);
448#endif
449    p->nprot = n;
450  }
451}
452
453
454void
455zero_page(BytePtr start)
456{
457  extern int page_size;
458#ifdef PPC
459  extern void zero_cache_lines(BytePtr, size_t, size_t);
460  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
461#else
462  memset(start, 0, page_size);
463#endif
464}
465
466/* area management */
467
468
469area *
470new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
471{
472  area *a = calloc(1, sizeof(area));
473  if (a) {
474    natural ndnodes = area_dnode(highaddr, lowaddr);
475    a->low = lowaddr;
476    a->high = highaddr;
477    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
478    a->code = code;
479    a->ndnodes = ndnodes;
480    /* Caller must allocate markbits when allocating heap ! */
481   
482  }
483  return a;
484}
485
486static area *
487add_area_before(area *new_area, area *before)
488{
489  area *before_before = before->pred;
490
491  new_area->pred = before_before;
492  new_area->succ = before;
493  before_before->succ = new_area;
494  before->pred = new_area;
495  return new_area;
496}
497
498/*
499  The active dynamic area comes first.
500  Static areas follow dynamic areas.
501  Stack areas follow static areas.
502  Readonly areas come last.
503*/
504
505/*
506  If we already own the area_lock (or during iniitalization), it's safe
507  to add an area.
508*/
509
510
511void
512add_area_holding_area_lock(area *new_area)
513{
514  area *that = all_areas;
515  int
516    thiscode = (int)(new_area->code),
517    thatcode;
518
519  /* Cdr down the linked list */
520  do {
521    that = that->succ;
522    thatcode = (int)(that->code);
523  } while (thiscode < thatcode);
524  add_area_before(new_area, that);
525}
526
527/*
528  In general, we need to own the area lock before adding an area.
529*/
530void
531add_area(area *new_area, TCR *tcr)
532{
533  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
534  add_area_holding_area_lock(new_area);
535  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
536} 
537
538/*
539  Search areas "forward" from the header's successor, until
540  an area containing ADDR is found or an area with code < MINCODE
541  is encountered.
542  This walks the area list visiting heaps (dynamic, then static)
543  first, then stacks.
544
545*/
546static area *
547find_area_forward(BytePtr addr, area_code mincode)
548{
549  area *p, *header = all_areas;
550
551  for (p = header->succ; p != header; p = p->succ) {
552    area_code pcode = p->code;
553    if (pcode < mincode) {
554      return NULL;
555    }
556    if (pcode >= AREA_READONLY) {
557      if ((addr >= p->low) &&
558          (addr < p->active)) {
559        return p;
560      }
561    } else {
562      if ((addr >= p->active) &&
563          (addr < p->high)) {
564        return p;
565      }
566    }
567  }
568  return NULL;
569}
570
571static area *
572find_area_backward(BytePtr addr, area_code maxcode)
573{
574  area *p, *header = all_areas;
575
576  for (p = header->pred; p != header; p = p->pred) {
577    area_code pcode = p->code;
578
579    if (pcode > maxcode) {
580      return NULL;
581    }
582    if (pcode >= AREA_READONLY) {
583      if ((addr >= p->low) &&
584          (addr < p->active)) {
585        return p;
586      }
587    } else {
588      if ((addr >= p->active) &&
589          (addr < p->high)) {
590        return p;
591      }
592    }
593  }
594  return NULL;
595}
596
597area *
598area_containing(BytePtr addr)
599{
600  return find_area_forward(addr, AREA_VOID);
601}
602
603area *
604heap_area_containing(BytePtr addr)
605{
606  return find_area_forward(addr, AREA_READONLY);
607}
608
609area *
610stack_area_containing(BytePtr addr)
611{
612  return find_area_backward(addr, AREA_TSTACK);
613}
614
615/*
616  Make everything "younger" than the start of the target area
617  belong to that area; all younger areas will become empty, and
618  the dynamic area will have to lose some of its markbits (they
619  get zeroed and become part of the tenured area's refbits.)
620
621  The active dynamic area must have been "normalized" (e.g., its
622  active pointer must match the free pointer) before this is called.
623
624  If the target area is 'tenured_area' (the oldest ephemeral generation),
625  zero its refbits and update YOUNGEST_EPHEMERAL.
626
627*/
628
629void
630tenure_to_area(area *target)
631{
632  area *a = active_dynamic_area, *child;
633  BytePtr
634    curfree = a->active,
635    target_low = target->low;
636  natural new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
637  bitvector
638    refbits = tenured_area->refbits,
639    markbits = a->markbits,
640    new_markbits;
641
642  target->high = target->active = curfree;
643  target->ndnodes = area_dnode(curfree, target_low);
644
645  for (child = target->younger; child != a; child = child->younger) {
646    child->high = child->low = child->active = curfree;
647    child->ndnodes = 0;
648  }
649
650  a->low = curfree;
651  a->ndnodes = area_dnode(a->high, curfree);
652
653  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
654 
655  if (target == tenured_area) {
656    zero_bits(refbits, new_tenured_dnodes);
657    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
658  } else {
659    /* Need more (zeroed) refbits & fewer markbits */
660    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
661  }
662   
663  a->markbits = new_markbits;
664  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(REF_BASE));
665}
666
667
668
669/*
670  Make everything younger than the oldest byte in 'from' belong to
671  the youngest generation.  If 'from' is 'tenured_area', this means
672  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
673  to 0 to indicate this.)
674 
675  Some tenured_area refbits become dynamic area markbits in the process;
676  it's not necessary to zero them, since the GC will do that.
677*/
678
679void
680untenure_from_area(area *from)
681{
682  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
683    area *a = active_dynamic_area, *child;
684    BytePtr curlow = from->low;
685    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
686   
687    for (child = from; child != a; child = child->younger) {
688      child->low = child->active = child->high = curlow;
689      child->ndnodes = 0;
690    }
691   
692    a->low = curlow;
693    a->ndnodes = area_dnode(a->high, curlow);
694   
695    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
696    if (from == tenured_area) {
697      /* Everything's in the dynamic area */
698      lisp_global(OLDEST_EPHEMERAL) = 0;
699      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
700
701    }
702  }
703}
704
705
706Boolean
707egc_control(Boolean activate, BytePtr curfree)
708{
709  area *a = active_dynamic_area;
710  Boolean egc_is_active = (a->older != NULL);
711
712  if (activate != egc_is_active) {
713    if (curfree != NULL) {
714      a->active = curfree;
715    }
716    if (activate) {
717      a->older = g1_area;
718      tenure_to_area(tenured_area);
719      egc_is_active = true;
720    } else {
721      untenure_from_area(tenured_area);
722      a->older = NULL;
723      egc_is_active = false;
724    }
725  }
726  return egc_is_active;
727}
728
729/*
730  Lisp ff-calls this; it needs to set the active area's active pointer
731  correctly.
732*/
733
734Boolean
735lisp_egc_control(Boolean activate)
736{
737  area *a = active_dynamic_area;
738  return egc_control(activate, (BytePtr) a->active);
739}
740
741
742
743 
744/* Splice the protected_area_ptr out of the list and dispose of it. */
745void
746delete_protected_area(protected_area_ptr p)
747{
748  BytePtr start = p->start;
749  int nbytes = p->nprot;
750  protected_area_ptr *prev = &AllProtectedAreas, q;
751
752  if (nbytes) {
753    UnProtectMemory((LogicalAddress)start, nbytes);
754  }
755 
756  while ((q = *prev) != NULL) {
757    if (p == q) {
758      *prev = p->next;
759      break;
760    } else {
761      prev = &(q->next);
762    }
763  }
764
765  free(p);
766}
767
768
769
770
771/*
772  Unlink the area from all_areas.
773  Unprotect and dispose of any hard/soft protected_areas.
774  If the area has a handle, dispose of that as well.
775  */
776
777void
778condemn_area_holding_area_lock(area *a)
779{
780  void free_stack(void *);
781  area *prev = a->pred, *next = a->succ;
782  Ptr h = a->h;
783  protected_area_ptr p;
784
785  prev->succ = next;
786  next->pred = prev;
787
788  p = a->softprot;
789  if (p) delete_protected_area(p);
790
791  p = a->hardprot;
792
793  if (p) delete_protected_area(p);
794
795  if (h) free_stack(h);
796  free(a);
797}
798
799
800
801void
802condemn_area(area *a, TCR *tcr)
803{
804  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
805  condemn_area_holding_area_lock(a);
806  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
807}
808
809
810
811
812/*
813  condemn an area and all the other areas that can be reached
814  via the area.older & area.younger links.
815  This is the function in the ppc::kernel-import-condemn-area slot,
816  called by free-stack-area
817  */
818void
819condemn_area_chain(area *a, TCR *tcr)
820{
821  area *older;
822
823  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
824
825  for (; a->younger; a = a->younger) ;
826  for (;a;) {
827    older = a->older;
828    condemn_area_holding_area_lock(a);
829    a = older;
830  }
831  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
832}
833
834
835void
836protect_watched_areas()
837{
838  area *a = active_dynamic_area;
839  natural code = a->code;
840
841  while (code != AREA_VOID) {
842    if (code == AREA_WATCHED) {
843      natural size = a->high - a->low;
844     
845      ProtectMemory(a->low, size);
846    }
847    a = a->succ;
848    code = a->code;
849  }
850}
851
852void
853unprotect_watched_areas()
854{
855  area *a = active_dynamic_area;
856  natural code = a->code;
857
858  while (code != AREA_VOID) {
859    if (code == AREA_WATCHED) {
860      natural size = a->high - a->low;
861     
862      UnProtectMemory(a->low, size);
863    }
864    a = a->succ;
865    code = a->code;
866  }
867}
868
869LogicalAddress
870ReserveMemory(natural size)
871{
872  LogicalAddress p;
873#ifdef WINDOWS
874  p = VirtualAlloc(0,
875                   size,
876                   MEM_RESERVE,
877                   PAGE_NOACCESS);
878  return p;
879#else
880  p = mmap(NULL,size,PROT_NONE,MAP_PRIVATE|MAP_ANON|MAP_NORESERVE,-1,0);
881  if (p == MAP_FAILED) {
882    return NULL;
883  }
884  return p;
885#endif
886}
887
888#ifdef DARWIN
889/*
890  On 64-bit Darwin, we try to make a TCR's address serve as a Mach port
891  name, which means that it has to fit in 32 bits (and not conflict with
892  an existing port name, but that's a separate issue.)  Darwin doesn't
893  seem to offer means of mapping/allocating memory that's guaranteed to
894  return a 32-bit address on 64-bit systems, and trial-and-error doesn't
895  scale well.
896 
897  Since it's a PITA to allocate 32-bit TCR pointers, we never free them
898  once we've done so.  (We maintain a queue of "freed" TCRs but never
899  unmap the memory.)  When we need to allocate TCR pointers, we try to
900  allocate substantially more than we need.
901
902  The bulk allocation works by scanning the task's mapped memory
903  regions until a free region of appropriate size is found, then
904  mapping that region (without the dangerous use of MAP_FIXED).  This
905  will win if OSX's mmap() tries to honor the suggested address if it
906  doesn't conflict with a mapped region (as it seems to in practice
907  since at least 10.5 and as it's documented to in 10.6.)
908
909  OSX 10.8 introduces new horrors that affect 32-bit CCL as well:
910
911  mach_port_allocate_name(mach_task_self(),MACH_PORT_RIGHT_RECEIVE,n)
912 
913  returns KERN_NO_SPACE for n > ~#x09800000.  It's not known whether or
914  not this is intentional; even if it's a bug, it suggests that we should
915  probably stop trying to arrange that a TCR's address can be used as the
916  corresponding thread's exception port and maintain some sort of
917  efficient and thread-safe mapping from port to TCR.  Soon.
918
919  News flash:  mach_port_allocate_name() is not only worse than we
920  imagine on 10.8, but it's worse than we can imagine.  Give up.
921  (This means that there are no longer any constraints on TCR addresses
922  and we could just use malloc here, but keep some of this code around
923  for now.)
924*/
925
926pthread_mutex_t darwin_tcr_lock = PTHREAD_MUTEX_INITIALIZER;
927
928TCR _free_tcr_queue, *darwin_tcr_freelist=&_free_tcr_queue;
929
930#define TCR_CLUSTER_COUNT 1024   /* Enough that we allocate clusters rarely,
931but not so much that we waste lots of 32-bit memory. */
932
933
934
935
936/* force 16-bit alignment, just in case */
937typedef struct {
938  TCR tcr;
939}  __attribute__ ((aligned(16))) MTCR;
940
941
942
943void
944link_tcr_list(TCR *head, MTCR *buf, int n)
945{
946  TCR *prev = head, *tcr;
947  int i;
948
949  for (i=0; i < n; i++, buf++) {
950    tcr = &(buf->tcr);
951    prev->next = tcr;
952    tcr->prev = prev;
953    head->prev = tcr;
954    tcr->next = head;
955    prev = tcr;
956  }
957}
958
959
960
961
962void
963map_tcr_cluster(TCR *head)
964{
965  MTCR *work = NULL;
966  TCR *prev = head;
967  int i;
968  size_t request_size = align_to_power_of_2((TCR_CLUSTER_COUNT*sizeof(MTCR)),log2_page_size);
969
970  work = (MTCR *)mmap(NULL,
971                      request_size,
972                      PROT_READ|PROT_WRITE,
973                      MAP_PRIVATE|MAP_ANON,
974                      -1,
975                      0);
976
977  if (work == MAP_FAILED) {
978    Fatal("Can't allocate memory for thread-local storage.", "");
979  }
980  link_tcr_list(head, work, TCR_CLUSTER_COUNT);
981}
982
983void
984darwin_free_tcr(TCR *tcr)
985{
986  TCR  *head = darwin_tcr_freelist, *tail;
987
988  pthread_mutex_lock(&darwin_tcr_lock);
989  tail = head->prev;
990  tail->next = tcr;
991  head->prev = tcr;
992  tcr->prev = tail;
993  tcr->next = head;
994  pthread_mutex_unlock(&darwin_tcr_lock);
995}
996
997
998TCR *
999darwin_allocate_tcr()
1000{
1001  TCR  *head = darwin_tcr_freelist, *tail, *tcr;
1002  pthread_mutex_lock(&darwin_tcr_lock);
1003  if (head->next == NULL) { /* First time */
1004    head->next = head->prev = head;
1005  }
1006
1007  if (head->next == head) {
1008    map_tcr_cluster(head);
1009  }
1010  tcr = head->next;
1011  tail = tcr->next;
1012  tail->prev = head;
1013  head->next = tail;
1014  pthread_mutex_unlock(&darwin_tcr_lock);
1015  memset(tcr,0,sizeof(TCR));
1016  return tcr;
1017}
1018 
1019
1020
1021
1022#endif
Note: See TracBrowser for help on using the repository browser.