source: trunk/source/lisp-kernel/memory.c @ 8701

Last change on this file since 8701 was 8701, checked in by gb, 12 years ago

ProtectMemory?,UnprotectMemory?: second arg should be "natural", not "int".

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 13.1 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifndef WINDOWS
35#include <sys/mman.h>
36#endif
37
38void
39allocation_failure(Boolean pointerp, natural size)
40{
41  char buf[64];
42  sprintf(buf, "Can't allocate %s of size %d bytes.", pointerp ? "pointer" : "handle", size);
43  Fatal(":   Kernel memory allocation failure.  ", buf);
44}
45
46void
47fatal_oserr(StringPtr param, OSErr err)
48{
49  char buf[64];
50  sprintf(buf," - operating system error %d.", err);
51  Fatal(param, buf);
52}
53
54
55Ptr
56allocate(natural size)
57{
58  return (Ptr) malloc(size);
59}
60
61void
62deallocate(Ptr p)
63{
64  free((void *)p);
65}
66
67Ptr
68zalloc(natural size)
69{
70  Ptr p = allocate(size);
71  if (p != NULL) {
72    memset(p, 0, size);
73  }
74  return p;
75}
76
77int
78ProtectMemory(LogicalAddress addr, natural nbytes)
79{
80  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
81 
82  if (status) {
83    status = errno;
84    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
85  }
86  return status;
87}
88
89int
90UnProtectMemory(LogicalAddress addr, natural nbytes)
91{
92  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
93}
94
95void
96unprotect_area(protected_area_ptr p)
97{
98  BytePtr start = p->start;
99  natural nprot = p->nprot;
100 
101  if (nprot) {
102    UnProtectMemory(start, nprot);
103    p->nprot = 0;
104  }
105}
106
107protected_area_ptr
108new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
109{
110  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
111 
112  if (p == NULL) return NULL;
113  p->protsize = protsize;
114  p->nprot = 0;
115  p->start = start;
116  p->end = end;
117  p->why = reason;
118  p->next = AllProtectedAreas;
119
120  AllProtectedAreas = p;
121  if (now) {
122    protect_area(p);
123  }
124 
125  return p;
126}
127
128/*
129  Un-protect the first nbytes bytes in specified area.
130  Note that this may cause the area to be empty.
131*/
132void
133unprotect_area_prefix(protected_area_ptr area, size_t delta)
134{
135  unprotect_area(area);
136  area->start += delta;
137  if ((area->start + area->protsize) <= area->end) {
138    protect_area(area);
139  }
140}
141
142
143/*
144  Extend the protected area, causing the preceding nbytes bytes
145  to be included and protected.
146*/
147void
148protect_area_prefix(protected_area_ptr area, size_t delta)
149{
150  unprotect_area(area);
151  area->start -= delta;
152  protect_area(area);
153}
154
155protected_area_ptr
156AllProtectedAreas = NULL;
157
158
159/*
160  This does a linear search.  Areas aren't created all that often;
161  if there get to be very many of them, some sort of tree search
162  might be justified.
163*/
164
165protected_area_ptr
166find_protected_area(BytePtr addr)
167{
168  protected_area* p;
169 
170  for(p = AllProtectedAreas; p; p=p->next) {
171    if ((p->start <= addr) && (p->end > addr)) {
172      return p;
173    }
174  }
175  return NULL;
176}
177
178
179void
180zero_memory_range(BytePtr start, BytePtr end)
181{
182#ifdef WINDOWS
183  ZeroMemory(start,end-start);
184#else
185  bzero(start,(size_t)(end-start));
186#endif
187}
188
189
190 
191
192/*
193   Grow or shrink the dynamic area.  Or maybe not.
194   Whether or not the end of (mapped space in) the heap changes,
195   ensure that everything between the freeptr and the heap end
196   is mapped and read/write.  (It'll incidentally be zeroed.)
197*/
198Boolean
199resize_dynamic_heap(BytePtr newfree, 
200                    natural free_space_size)
201{
202  extern int page_size;
203  area *a = active_dynamic_area;
204  BytePtr newlimit, protptr, zptr;
205  int psize = page_size;
206  if (free_space_size) {
207    BytePtr lowptr = a->active;
208    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
209                                            log2_heap_segment_size);
210    if (newlimit > a->high) {
211      return grow_dynamic_area(newlimit-a->high);
212    } else if ((lowptr + free_space_size) < a->high) {
213      shrink_dynamic_area(a->high-newlimit);
214      return true;
215    }
216  }
217}
218
219void
220protect_area(protected_area_ptr p)
221{
222  BytePtr start = p->start;
223  natural n = p->protsize;
224
225  if (n && ! p->nprot) {
226    ProtectMemory(start, n);
227    p->nprot = n;
228  }
229}
230
231
232void
233zero_page(BytePtr start)
234{
235  extern int page_size;
236#ifdef PPC
237  extern void zero_cache_lines(BytePtr, size_t, size_t);
238  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
239#else
240  memset(start, 0, page_size);
241#endif
242}
243
244/* area management */
245
246
247area *
248new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
249{
250  area *a = (area *) (zalloc(sizeof(area)));
251  if (a) {
252    natural ndnodes = area_dnode(highaddr, lowaddr);
253    a->low = lowaddr;
254    a->high = highaddr;
255    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
256    a->code = code;
257    a->ndnodes = ndnodes;
258    /* Caller must allocate markbits when allocating heap ! */
259   
260  }
261  return a;
262}
263
264static area *
265add_area_before(area *new_area, area *before)
266{
267  area *before_before = before->pred;
268
269  new_area->pred = before_before;
270  new_area->succ = before;
271  before_before->succ = new_area;
272  before->pred = new_area;
273  return new_area;
274}
275
276/*
277  The active dynamic area comes first.
278  Static areas follow dynamic areas.
279  Stack areas follow static areas.
280  Readonly areas come last.
281*/
282
283/*
284  If we already own the area_lock (or during iniitalization), it's safe
285  to add an area.
286*/
287
288
289void
290add_area_holding_area_lock(area *new_area)
291{
292  area *that = all_areas;
293  int
294    thiscode = (int)(new_area->code),
295    thatcode;
296
297  /* Cdr down the linked list */
298  do {
299    that = that->succ;
300    thatcode = (int)(that->code);
301  } while (thiscode < thatcode);
302  add_area_before(new_area, that);
303}
304
305/*
306  In general, we need to own the area lock before adding an area.
307*/
308void
309add_area(area *new_area, TCR *tcr)
310{
311  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
312  add_area_holding_area_lock(new_area);
313  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
314} 
315
316/*
317  Search areas "forward" from the header's successor, until
318  an area containing ADDR is found or an area with code < MINCODE
319  is encountered.
320  This walks the area list visiting heaps (dynamic, then static)
321  first, then stacks.
322
323*/
324static area *
325find_area_forward(BytePtr addr, area_code mincode)
326{
327  area *p, *header = all_areas;
328
329  for (p = header->succ; p != header; p = p->succ) {
330    area_code pcode = p->code;
331    if (pcode < mincode) {
332      return NULL;
333    }
334    if (pcode >= AREA_READONLY) {
335      if ((addr >= p->low) &&
336          (addr < p->active)) {
337        return p;
338      }
339    } else {
340      if ((addr >= p->active) &&
341          (addr < p->high)) {
342        return p;
343      }
344    }
345  }
346  return NULL;
347}
348
349static area *
350find_area_backward(BytePtr addr, area_code maxcode)
351{
352  area *p, *header = all_areas;
353
354  for (p = header->pred; p != header; p = p->pred) {
355    area_code pcode = p->code;
356
357    if (pcode > maxcode) {
358      return NULL;
359    }
360    if (pcode >= AREA_READONLY) {
361      if ((addr >= p->low) &&
362          (addr < p->active)) {
363        return p;
364      }
365    } else {
366      if ((addr >= p->active) &&
367          (addr < p->high)) {
368        return p;
369      }
370    }
371  }
372  return NULL;
373}
374
375area *
376area_containing(BytePtr addr)
377{
378  return find_area_forward(addr, AREA_VOID);
379}
380
381area *
382heap_area_containing(BytePtr addr)
383{
384  return find_area_forward(addr, AREA_READONLY);
385}
386
387area *
388stack_area_containing(BytePtr addr)
389{
390  return find_area_backward(addr, AREA_TSTACK);
391}
392
393/*
394  Make everything "younger" than the start of the target area
395  belong to that area; all younger areas will become empty, and
396  the dynamic area will have to lose some of its markbits (they
397  get zeroed and become part of the tenured area's refbits.)
398
399  The active dynamic area must have been "normalized" (e.g., its
400  active pointer must match the free pointer) before this is called.
401
402  If the target area is 'tenured_area' (the oldest ephemeral generation),
403  zero its refbits and update YOUNGEST_EPHEMERAL.
404
405*/
406
407void
408tenure_to_area(area *target)
409{
410  area *a = active_dynamic_area, *child;
411  BytePtr
412    curfree = a->active,
413    target_low = target->low,
414    tenured_low = tenured_area->low;
415  natural
416    dynamic_dnodes = area_dnode(curfree, a->low),
417    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
418  bitvector
419    refbits = tenured_area->refbits,
420    markbits = a->markbits,
421    new_markbits;
422
423  target->high = target->active = curfree;
424  target->ndnodes = area_dnode(curfree, target_low);
425
426  for (child = target->younger; child != a; child = child->younger) {
427    child->high = child->low = child->active = curfree;
428    child->ndnodes = 0;
429  }
430
431  a->low = curfree;
432  a->ndnodes = area_dnode(a->high, curfree);
433
434  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
435 
436  if (target == tenured_area) {
437    zero_bits(refbits, new_tenured_dnodes);
438    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
439  } else {
440    /* Need more (zeroed) refbits & fewer markbits */
441    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
442  }
443   
444  a->markbits = new_markbits;
445  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
446}
447
448
449
450/*
451  Make everything younger than the oldest byte in 'from' belong to
452  the youngest generation.  If 'from' is 'tenured_area', this means
453  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
454  to 0 to indicate this.)
455 
456  Some tenured_area refbits become dynamic area markbits in the process;
457  it's not necessary to zero them, since the GC will do that.
458*/
459
460void
461untenure_from_area(area *from)
462{
463  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
464    area *a = active_dynamic_area, *child;
465    BytePtr curlow = from->low;
466    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
467   
468    for (child = from; child != a; child = child->younger) {
469      child->low = child->active = child->high = curlow;
470      child->ndnodes = 0;
471    }
472   
473    a->low = curlow;
474    a->ndnodes = area_dnode(a->high, curlow);
475   
476    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
477    if (from == tenured_area) {
478      /* Everything's in the dynamic area */
479      lisp_global(OLDEST_EPHEMERAL) = 0;
480      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
481
482    }
483  }
484}
485
486
487Boolean
488egc_control(Boolean activate, BytePtr curfree)
489{
490  area *a = active_dynamic_area;
491  Boolean egc_is_active = (a->older != NULL);
492
493  if (activate != egc_is_active) {
494    if (curfree != NULL) {
495      a->active = curfree;
496    }
497    if (activate) {
498      LispObj *heap_start = ptr_from_lispobj(lisp_global(HEAP_START));
499
500      a->older = g1_area;
501      tenure_to_area(tenured_area);
502      egc_is_active = true;
503    } else {
504      untenure_from_area(tenured_area);
505      a->older = NULL;
506      egc_is_active = false;
507    }
508  }
509  return egc_is_active;
510}
511
512/*
513  Lisp ff-calls this; it needs to set the active area's active pointer
514  correctly.
515*/
516
517Boolean
518lisp_egc_control(Boolean activate)
519{
520  area *a = active_dynamic_area;
521  return egc_control(activate, (BytePtr) a->active);
522}
523
524
525
526 
527/* Splice the protected_area_ptr out of the list and dispose of it. */
528void
529delete_protected_area(protected_area_ptr p)
530{
531  BytePtr start = p->start;
532  int nbytes = p->nprot;
533  protected_area_ptr *prev = &AllProtectedAreas, q;
534
535  if (nbytes) {
536    UnProtectMemory((LogicalAddress)start, nbytes);
537  }
538 
539  while ((q = *prev) != NULL) {
540    if (p == q) {
541      *prev = p->next;
542      break;
543    } else {
544      prev = &(q->next);
545    }
546  }
547
548  deallocate((Ptr)p);
549}
550
551
552
553
554/*
555  Unlink the area from all_areas.
556  Unprotect and dispose of any hard/soft protected_areas.
557  If the area has a handle, dispose of that as well.
558  */
559
560void
561condemn_area_holding_area_lock(area *a)
562{
563  void free_stack(void *);
564  area *prev = a->pred, *next = a->succ;
565  Ptr h = a->h;
566  protected_area_ptr p;
567
568  prev->succ = next;
569  next->pred = prev;
570
571  p = a->softprot;
572  if (p) delete_protected_area(p);
573
574  p = a->hardprot;
575
576  if (p) delete_protected_area(p);
577
578  if (h) free_stack(h);
579  deallocate((Ptr)a);
580}
581
582
583
584void
585condemn_area(area *a, TCR *tcr)
586{
587  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
588  condemn_area_holding_area_lock(a);
589  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
590}
591
592
593
594
595/*
596  condemn an area and all the other areas that can be reached
597  via the area.older & area.younger links.
598  This is the function in the ppc::kernel-import-condemn-area slot,
599  called by free-stack-area
600  */
601void
602condemn_area_chain(area *a, TCR *tcr)
603{
604  area *older;
605
606  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
607
608  for (; a->younger; a = a->younger) ;
609  for (;a;) {
610    older = a->older;
611    condemn_area_holding_area_lock(a);
612    a = older;
613  }
614  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
615}
616
617#ifdef WINDOWS
618void
619release_readonly_area()
620{
621}
622#else
623void
624release_readonly_area()
625{
626  area *a = readonly_area;
627  munmap(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
628  a->active = a->low;
629  a->ndnodes = 0;
630  pure_space_active = pure_space_start;
631}
632#endif
Note: See TracBrowser for help on using the repository browser.