source: trunk/source/lisp-kernel/memory.c @ 10222

Last change on this file since 10222 was 10222, checked in by gb, 12 years ago

Don't conditionalize for Solaris in UnProtectMemory?.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 12.9 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17#include "lisp.h"
18#include "lisp-exceptions.h"
19#include "lisp_globals.h"
20#include "Threads.h"
21#include <ctype.h>
22#include <stdio.h>
23#include <stddef.h>
24#include <string.h>
25#include <stdarg.h>
26#include <errno.h>
27#include <stdio.h>
28#ifdef LINUX
29#include <strings.h>
30#include <fpu_control.h>
31#include <linux/prctl.h>
32#endif
33
34#ifndef WINDOWS
35#include <sys/mman.h>
36#endif
37
38void
39allocation_failure(Boolean pointerp, natural size)
40{
41  char buf[64];
42  sprintf(buf, "Can't allocate %s of size %ld bytes.", pointerp ? "pointer" : "handle", size);
43  Fatal(":   Kernel memory allocation failure.  ", buf);
44}
45
46void
47fatal_oserr(StringPtr param, OSErr err)
48{
49  char buf[64];
50  sprintf(buf," - operating system error %d.", err);
51  Fatal(param, buf);
52}
53
54
55Ptr
56allocate(natural size)
57{
58  return (Ptr) malloc(size);
59}
60
61void
62deallocate(Ptr p)
63{
64  free((void *)p);
65}
66
67Ptr
68zalloc(natural size)
69{
70  Ptr p = allocate(size);
71  if (p != NULL) {
72    memset(p, 0, size);
73  }
74  return p;
75}
76
77int
78ProtectMemory(LogicalAddress addr, natural nbytes)
79{
80  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
81 
82  if (status) {
83    status = errno;
84    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
85  }
86  return status;
87}
88
89int
90UnProtectMemory(LogicalAddress addr, natural nbytes)
91{
92  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
93}
94
95void
96unprotect_area(protected_area_ptr p)
97{
98  BytePtr start = p->start;
99  natural nprot = p->nprot;
100 
101  if (nprot) {
102    UnProtectMemory(start, nprot);
103    p->nprot = 0;
104  }
105}
106
107protected_area_ptr
108new_protected_area(BytePtr start, BytePtr end, lisp_protection_kind reason, natural protsize, Boolean now)
109{
110  protected_area_ptr p = (protected_area_ptr) allocate(sizeof(protected_area));
111 
112  if (p == NULL) return NULL;
113  p->protsize = protsize;
114  p->nprot = 0;
115  p->start = start;
116  p->end = end;
117  p->why = reason;
118  p->next = AllProtectedAreas;
119
120  AllProtectedAreas = p;
121  if (now) {
122    protect_area(p);
123  }
124 
125  return p;
126}
127
128/*
129  Un-protect the first nbytes bytes in specified area.
130  Note that this may cause the area to be empty.
131*/
132void
133unprotect_area_prefix(protected_area_ptr area, size_t delta)
134{
135  unprotect_area(area);
136  area->start += delta;
137  if ((area->start + area->protsize) <= area->end) {
138    protect_area(area);
139  }
140}
141
142
143/*
144  Extend the protected area, causing the preceding nbytes bytes
145  to be included and protected.
146*/
147void
148protect_area_prefix(protected_area_ptr area, size_t delta)
149{
150  unprotect_area(area);
151  area->start -= delta;
152  protect_area(area);
153}
154
155protected_area_ptr
156AllProtectedAreas = NULL;
157
158
159/*
160  This does a linear search.  Areas aren't created all that often;
161  if there get to be very many of them, some sort of tree search
162  might be justified.
163*/
164
165protected_area_ptr
166find_protected_area(BytePtr addr)
167{
168  protected_area* p;
169 
170  for(p = AllProtectedAreas; p; p=p->next) {
171    if ((p->start <= addr) && (p->end > addr)) {
172      return p;
173    }
174  }
175  return NULL;
176}
177
178
179void
180zero_memory_range(BytePtr start, BytePtr end)
181{
182#ifdef WINDOWS
183  ZeroMemory(start,end-start);
184#else
185  bzero(start,(size_t)(end-start));
186#endif
187}
188
189
190 
191
192/*
193   Grow or shrink the dynamic area.  Or maybe not.
194   Whether or not the end of (mapped space in) the heap changes,
195   ensure that everything between the freeptr and the heap end
196   is mapped and read/write.  (It'll incidentally be zeroed.)
197*/
198Boolean
199resize_dynamic_heap(BytePtr newfree, 
200                    natural free_space_size)
201{
202  area *a = active_dynamic_area;
203  BytePtr newlimit;
204  if (free_space_size) {
205    BytePtr lowptr = a->active;
206    newlimit = lowptr + align_to_power_of_2(newfree-lowptr+free_space_size,
207                                            log2_heap_segment_size);
208    if (newlimit > a->high) {
209      return grow_dynamic_area(newlimit-a->high);
210    } else if ((lowptr + free_space_size) < a->high) {
211      shrink_dynamic_area(a->high-newlimit);
212      return true;
213    }
214  }
215}
216
217void
218protect_area(protected_area_ptr p)
219{
220  BytePtr start = p->start;
221  natural n = p->protsize;
222
223  if (n && ! p->nprot) {
224    ProtectMemory(start, n);
225    p->nprot = n;
226  }
227}
228
229
230void
231zero_page(BytePtr start)
232{
233  extern int page_size;
234#ifdef PPC
235  extern void zero_cache_lines(BytePtr, size_t, size_t);
236  zero_cache_lines(start, (page_size/cache_block_size), cache_block_size);
237#else
238  memset(start, 0, page_size);
239#endif
240}
241
242/* area management */
243
244
245area *
246new_area(BytePtr lowaddr, BytePtr highaddr, area_code code)
247{
248  area *a = (area *) (zalloc(sizeof(area)));
249  if (a) {
250    natural ndnodes = area_dnode(highaddr, lowaddr);
251    a->low = lowaddr;
252    a->high = highaddr;
253    a->active = (code == AREA_DYNAMIC) ? lowaddr : highaddr;
254    a->code = code;
255    a->ndnodes = ndnodes;
256    /* Caller must allocate markbits when allocating heap ! */
257   
258  }
259  return a;
260}
261
262static area *
263add_area_before(area *new_area, area *before)
264{
265  area *before_before = before->pred;
266
267  new_area->pred = before_before;
268  new_area->succ = before;
269  before_before->succ = new_area;
270  before->pred = new_area;
271  return new_area;
272}
273
274/*
275  The active dynamic area comes first.
276  Static areas follow dynamic areas.
277  Stack areas follow static areas.
278  Readonly areas come last.
279*/
280
281/*
282  If we already own the area_lock (or during iniitalization), it's safe
283  to add an area.
284*/
285
286
287void
288add_area_holding_area_lock(area *new_area)
289{
290  area *that = all_areas;
291  int
292    thiscode = (int)(new_area->code),
293    thatcode;
294
295  /* Cdr down the linked list */
296  do {
297    that = that->succ;
298    thatcode = (int)(that->code);
299  } while (thiscode < thatcode);
300  add_area_before(new_area, that);
301}
302
303/*
304  In general, we need to own the area lock before adding an area.
305*/
306void
307add_area(area *new_area, TCR *tcr)
308{
309  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
310  add_area_holding_area_lock(new_area);
311  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
312} 
313
314/*
315  Search areas "forward" from the header's successor, until
316  an area containing ADDR is found or an area with code < MINCODE
317  is encountered.
318  This walks the area list visiting heaps (dynamic, then static)
319  first, then stacks.
320
321*/
322static area *
323find_area_forward(BytePtr addr, area_code mincode)
324{
325  area *p, *header = all_areas;
326
327  for (p = header->succ; p != header; p = p->succ) {
328    area_code pcode = p->code;
329    if (pcode < mincode) {
330      return NULL;
331    }
332    if (pcode >= AREA_READONLY) {
333      if ((addr >= p->low) &&
334          (addr < p->active)) {
335        return p;
336      }
337    } else {
338      if ((addr >= p->active) &&
339          (addr < p->high)) {
340        return p;
341      }
342    }
343  }
344  return NULL;
345}
346
347static area *
348find_area_backward(BytePtr addr, area_code maxcode)
349{
350  area *p, *header = all_areas;
351
352  for (p = header->pred; p != header; p = p->pred) {
353    area_code pcode = p->code;
354
355    if (pcode > maxcode) {
356      return NULL;
357    }
358    if (pcode >= AREA_READONLY) {
359      if ((addr >= p->low) &&
360          (addr < p->active)) {
361        return p;
362      }
363    } else {
364      if ((addr >= p->active) &&
365          (addr < p->high)) {
366        return p;
367      }
368    }
369  }
370  return NULL;
371}
372
373area *
374area_containing(BytePtr addr)
375{
376  return find_area_forward(addr, AREA_VOID);
377}
378
379area *
380heap_area_containing(BytePtr addr)
381{
382  return find_area_forward(addr, AREA_READONLY);
383}
384
385area *
386stack_area_containing(BytePtr addr)
387{
388  return find_area_backward(addr, AREA_TSTACK);
389}
390
391/*
392  Make everything "younger" than the start of the target area
393  belong to that area; all younger areas will become empty, and
394  the dynamic area will have to lose some of its markbits (they
395  get zeroed and become part of the tenured area's refbits.)
396
397  The active dynamic area must have been "normalized" (e.g., its
398  active pointer must match the free pointer) before this is called.
399
400  If the target area is 'tenured_area' (the oldest ephemeral generation),
401  zero its refbits and update YOUNGEST_EPHEMERAL.
402
403*/
404
405void
406tenure_to_area(area *target)
407{
408  area *a = active_dynamic_area, *child;
409  BytePtr
410    curfree = a->active,
411    target_low = target->low;
412  natural
413    new_tenured_dnodes = area_dnode(curfree, tenured_area->low);
414  bitvector
415    refbits = tenured_area->refbits,
416    markbits = a->markbits,
417    new_markbits;
418
419  target->high = target->active = curfree;
420  target->ndnodes = area_dnode(curfree, target_low);
421
422  for (child = target->younger; child != a; child = child->younger) {
423    child->high = child->low = child->active = curfree;
424    child->ndnodes = 0;
425  }
426
427  a->low = curfree;
428  a->ndnodes = area_dnode(a->high, curfree);
429
430  new_markbits = refbits + ((new_tenured_dnodes + (nbits_in_word-1)) >> bitmap_shift);
431 
432  if (target == tenured_area) {
433    zero_bits(refbits, new_tenured_dnodes);
434    lisp_global(OLDEST_EPHEMERAL) = ptr_to_lispobj(curfree);
435  } else {
436    /* Need more (zeroed) refbits & fewer markbits */
437    zero_bits(markbits, ((new_markbits-markbits)<<bitmap_shift));
438  }
439   
440  a->markbits = new_markbits;
441  lisp_global(OLDSPACE_DNODE_COUNT) = area_dnode(curfree, lisp_global(HEAP_START));
442}
443
444
445
446/*
447  Make everything younger than the oldest byte in 'from' belong to
448  the youngest generation.  If 'from' is 'tenured_area', this means
449  that nothing's ephemeral any more (and OLDEST_EPHEMERAL can be set
450  to 0 to indicate this.)
451 
452  Some tenured_area refbits become dynamic area markbits in the process;
453  it's not necessary to zero them, since the GC will do that.
454*/
455
456void
457untenure_from_area(area *from)
458{
459  if (lisp_global(OLDEST_EPHEMERAL) != 0) {
460    area *a = active_dynamic_area, *child;
461    BytePtr curlow = from->low;
462    natural new_tenured_dnodes = area_dnode(curlow, tenured_area->low);
463   
464    for (child = from; child != a; child = child->younger) {
465      child->low = child->active = child->high = curlow;
466      child->ndnodes = 0;
467    }
468   
469    a->low = curlow;
470    a->ndnodes = area_dnode(a->high, curlow);
471   
472    a->markbits = (tenured_area->refbits) + ((new_tenured_dnodes+(nbits_in_word-1))>>bitmap_shift);
473    if (from == tenured_area) {
474      /* Everything's in the dynamic area */
475      lisp_global(OLDEST_EPHEMERAL) = 0;
476      lisp_global(OLDSPACE_DNODE_COUNT) = 0;
477
478    }
479  }
480}
481
482
483Boolean
484egc_control(Boolean activate, BytePtr curfree)
485{
486  area *a = active_dynamic_area;
487  Boolean egc_is_active = (a->older != NULL);
488
489  if (activate != egc_is_active) {
490    if (curfree != NULL) {
491      a->active = curfree;
492    }
493    if (activate) {
494      a->older = g1_area;
495      tenure_to_area(tenured_area);
496      egc_is_active = true;
497    } else {
498      untenure_from_area(tenured_area);
499      a->older = NULL;
500      egc_is_active = false;
501    }
502  }
503  return egc_is_active;
504}
505
506/*
507  Lisp ff-calls this; it needs to set the active area's active pointer
508  correctly.
509*/
510
511Boolean
512lisp_egc_control(Boolean activate)
513{
514  area *a = active_dynamic_area;
515  return egc_control(activate, (BytePtr) a->active);
516}
517
518
519
520 
521/* Splice the protected_area_ptr out of the list and dispose of it. */
522void
523delete_protected_area(protected_area_ptr p)
524{
525  BytePtr start = p->start;
526  int nbytes = p->nprot;
527  protected_area_ptr *prev = &AllProtectedAreas, q;
528
529  if (nbytes) {
530    UnProtectMemory((LogicalAddress)start, nbytes);
531  }
532 
533  while ((q = *prev) != NULL) {
534    if (p == q) {
535      *prev = p->next;
536      break;
537    } else {
538      prev = &(q->next);
539    }
540  }
541
542  deallocate((Ptr)p);
543}
544
545
546
547
548/*
549  Unlink the area from all_areas.
550  Unprotect and dispose of any hard/soft protected_areas.
551  If the area has a handle, dispose of that as well.
552  */
553
554void
555condemn_area_holding_area_lock(area *a)
556{
557  void free_stack(void *);
558  area *prev = a->pred, *next = a->succ;
559  Ptr h = a->h;
560  protected_area_ptr p;
561
562  prev->succ = next;
563  next->pred = prev;
564
565  p = a->softprot;
566  if (p) delete_protected_area(p);
567
568  p = a->hardprot;
569
570  if (p) delete_protected_area(p);
571
572  if (h) free_stack(h);
573  deallocate((Ptr)a);
574}
575
576
577
578void
579condemn_area(area *a, TCR *tcr)
580{
581  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
582  condemn_area_holding_area_lock(a);
583  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
584}
585
586
587
588
589/*
590  condemn an area and all the other areas that can be reached
591  via the area.older & area.younger links.
592  This is the function in the ppc::kernel-import-condemn-area slot,
593  called by free-stack-area
594  */
595void
596condemn_area_chain(area *a, TCR *tcr)
597{
598  area *older;
599
600  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
601
602  for (; a->younger; a = a->younger) ;
603  for (;a;) {
604    older = a->older;
605    condemn_area_holding_area_lock(a);
606    a = older;
607  }
608  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
609}
610
611#ifdef WINDOWS
612void
613release_readonly_area()
614{
615}
616#else
617void
618release_readonly_area()
619{
620  area *a = readonly_area;
621  munmap(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
622  a->active = a->low;
623  a->ndnodes = 0;
624  pure_space_active = pure_space_start;
625}
626#endif
Note: See TracBrowser for help on using the repository browser.