Changeset 8696


Ignore:
Timestamp:
Mar 9, 2008, 12:25:09 AM (12 years ago)
Author:
andreas
Message:

Separation of page management functions, implementation for Windows.
Still buggy.

Location:
branches/win64/lisp-kernel
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • branches/win64/lisp-kernel/linuxx8664/Makefile

    r7831 r8696  
    2323CDEFINES = -DLINUX -D_REENTRANT -DX86 -DX8664 -D_GNU_SOURCE -DHAVE_TLS -DUSE_FUTEX #-DDISABLE_EGC
    2424CDEBUG = -g
    25 COPT = -O2
     25COPT = -O0
    2626
    2727# If the linker supports a "--hash-style=" option, use traditional
  • branches/win64/lisp-kernel/memory.c

    r8547 r8696  
    7575}
    7676
     77#ifdef DARWIN
     78#if WORD_SIZE == 64
     79#define vm_region vm_region_64
     80#endif
     81
     82/*
     83  Check to see if the specified address is unmapped by trying to get
     84  information about the mapped address at or beyond the target.  If
     85  the difference between the target address and the next mapped address
     86  is >= len, we can safely mmap len bytes at addr.
     87*/
     88Boolean
     89address_unmapped_p(char *addr, natural len)
     90{
     91  vm_address_t vm_addr = (vm_address_t)addr;
     92  vm_size_t vm_size;
     93#if WORD_SIZE == 64
     94  vm_region_basic_info_data_64_t vm_info;
     95#else
     96  vm_region_basic_info_data_t vm_info;
     97#endif
     98#if WORD_SIZE == 64
     99  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
     100#else
     101  mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
     102#endif
     103  mach_port_t vm_object_name = (mach_port_t) 0;
     104  kern_return_t kret;
     105
     106  kret = vm_region(mach_task_self(),
     107                   &vm_addr,
     108                   &vm_size,
     109#if WORD_SIZE == 64
     110                   VM_REGION_BASIC_INFO_64,
     111#else
     112                   VM_REGION_BASIC_INFO,
     113#endif
     114                   (vm_region_info_t)&vm_info,
     115                   &vm_info_size,
     116                   &vm_object_name);
     117  if (kret != KERN_SUCCESS) {
     118    return false;
     119  }
     120
     121  return vm_addr >= (vm_address_t)(addr+len);
     122}
     123#endif
     124
     125
     126  /*
     127    Through trial and error, we've found that IMAGE_BASE_ADDRESS is
     128    likely to reside near the beginning of an unmapped block of memory
     129    that's at least 1GB in size.  We'd like to load the heap image's
     130    sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
     131    that'd allow us to file-map those sections (and would enable us to
     132    avoid having to relocate references in the data sections.)
     133
     134    In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
     135    by creating an anonymous mapping with mmap().
     136
     137    If we try to insist that mmap() map a 1GB block at
     138    IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
     139    mmap() will gleefully clobber any mapped memory that's already
     140    there.  (That region's empty at this writing, but some future
     141    version of the OS might decide to put something there.)
     142
     143    If we don't specify MAP_FIXED, mmap() is free to treat the address
     144    we give it as a hint; Linux seems to accept the hint if doing so
     145    wouldn't cause a problem.  Naturally, that behavior's too useful
     146    for Darwin (or perhaps too inconvenient for it): it'll often
     147    return another address, even if the hint would have worked fine.
     148
     149    We call address_unmapped_p() to ask Mach whether using MAP_FIXED
     150    would conflict with anything.  Until we discover a need to do
     151    otherwise, we'll assume that if Linux's mmap() fails to take the
     152    hint, it's because of a legitimate conflict.
     153
     154    If Linux starts ignoring hints, we can parse /proc/<pid>/maps
     155    to implement an address_unmapped_p() for Linux.
     156  */
     157
     158LogicalAddress
     159ReserveMemoryForHeap(LogicalAddress want, int totalsize)
     160{
     161  LogicalAddress start;
     162  Boolean fixed_map_ok = false;
     163#ifdef DARWIN
     164  fixed_map_ok = address_unmapped_p(want,totalsize);
     165#endif
     166#ifdef SOLARIS
     167  fixed_map_ok = true;
     168#endif
     169  raise_limit();
     170#ifdef WINDOWS
     171  start = VirtualAlloc((void *)want,
     172                       totalsize + heap_segment_size,
     173                       MEM_RESERVE,
     174                       PAGE_NOACCESS);
     175  if (!start) {
     176    start = VirtualAlloc(0,
     177                         totalsize + heap_segment_size,
     178                         MEM_RESERVE,
     179                         PAGE_NOACCESS);
     180    if (!start) {
     181      wperror("VirtualAlloc");
     182      return NULL;
     183    }
     184  }
     185#else
     186  start = mmap((void *)want,
     187               totalsize + heap_segment_size,
     188               PROT_NONE,
     189               MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0) | MAP_NORESERVE,
     190               -1,
     191               0);
     192  if (start == MAP_FAILED) {
     193    perror("Initial mmap");
     194    return NULL;
     195  }
     196
     197  if (start != want) {
     198    munmap(start, totalsize+heap_segment_size);
     199    start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
     200    if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
     201      return NULL;
     202    }
     203  }
     204  mprotect(start, totalsize, PROT_NONE);
     205#endif
     206  return start;
     207}
     208
     209int
     210CommitMemory (LogicalAddress start, int len) {
     211  LogicalAddress rc;
     212#ifdef WINDOWS
     213  rc = VirtualAlloc(start, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
     214  if (!rc) {
     215    wperror("CommitMemory VirtualAlloc");
     216    return false;
     217  }
     218  return true;
     219#else
     220  int i, err;
     221  void *addr;
     222
     223  for (i = 0; i < 3; i++) {
     224    addr = mmap(start, len, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
     225    if (addr == start) {
     226      return true;
     227    } else {
     228      mmap(addr, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
     229    }
     230  }
     231  return false;
     232#endif
     233}
     234
     235void
     236UnCommitMemory (LogicalAddress start, int len) {
     237#ifdef WINDOWS
     238  int rc = VirtualFree(start, len, MEM_DECOMMIT);
     239  if (!rc) {
     240    wperror("UnCommitMemory VirtualFree");
     241    Fatal("mmap error", "");
     242    return;
     243  }
     244#else
     245  if (len) {
     246    madvise(start, len, MADV_DONTNEED);
     247    if (mmap(start, len, MEMPROTECT_NONE, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0)
     248        != start) {
     249      int err = errno;
     250      Fatal("mmap error", "");
     251      fprintf(stderr, "errno = %d", err);
     252    }
     253  }
     254#endif
     255}
     256
     257
     258LogicalAddress
     259MapMemory(LogicalAddress addr, int nbytes, int protection)
     260{
     261#ifdef WINDOWS
     262  return VirtualAlloc(addr, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
     263#else
     264  return mmap(addr, nbytes, protection, MAP_PRIVATE|MAP_ANON|MAP_FIXED, -1, 0);
     265#endif
     266}
     267
     268LogicalAddress
     269MapMemoryForStack(int nbytes)
     270{
     271#ifdef WINDOWS
     272  return VirtualAlloc(0, nbytes, MEM_RESERVE|MEM_COMMIT, MEMPROTECT_RWX);
     273#else
     274  return mmap(NULL, nbytes, MEMPROTECT_RWX, MAP_PRIVATE|MAP_ANON|MAP_GROWSDOWN, -1, 0);
     275#endif
     276}
     277
     278int
     279UnMapMemory(LogicalAddress addr, int nbytes)
     280{
     281#ifdef WINDOWS
     282  /* Can't MEM_RELEASE here because we only want to free a chunk */
     283  return VirtualFree(addr, nbytes, MEM_DECOMMIT);
     284#else
     285  return munmap(addr, nbytes);
     286#endif
     287}
     288
    77289int
    78290ProtectMemory(LogicalAddress addr, int nbytes)
    79291{
     292#ifdef WINDOWS
     293  DWORD oldProtect;
     294  BOOL status = VirtualProtect(addr, nbytes, MEMPROTECT_RX, &oldProtect);
     295 
     296  if(!status) {
     297    wperror("ProtectMemory VirtualProtect");
     298    Bug(NULL, "couldn't protect %d bytes at %x, errno = %d", nbytes, addr, status);
     299  }
     300  return status;
     301#else
    80302  int status = mprotect(addr, nbytes, PROT_READ | PROT_EXEC);
    81303 
     
    85307  }
    86308  return status;
     309#endif
    87310}
    88311
     
    90313UnProtectMemory(LogicalAddress addr, int nbytes)
    91314{
     315#ifdef WINDOWS
     316  DWORD oldProtect;
     317  return VirtualProtect(addr, nbytes, MEMPROTECT_RWX, &oldProtect);
     318#else
    92319  return mprotect(addr, nbytes, PROT_READ|PROT_WRITE|PROT_EXEC);
     320#endif
    93321}
    94322
     
    616844}
    617845
    618 #ifdef WINDOWS
    619846void
    620847release_readonly_area()
    621848{
    622 }
    623 #else
    624 void
    625 release_readonly_area()
    626 {
    627849  area *a = readonly_area;
    628   munmap(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
     850  UnMapMemory(a->low,align_to_power_of_2(a->active-a->low, log2_page_size));
    629851  a->active = a->low;
    630852  a->ndnodes = 0;
    631853  pure_space_active = pure_space_start;
    632854}
    633 #endif
  • branches/win64/lisp-kernel/memprotect.h

    r8408 r8696  
    3030
    3131#ifdef WINDOWS
    32 #define PROT_NONE (0)
    33 #define PROT_READ (1)
    34 #define PROT_WRITE (2)
    35 #define PROT_EXEC (3)
    36 
    37 #define MAP_PRIVATE (1)
    38 #define MAP_FIXED (2)
    39 #define MAP_ANON (3)
    40 
    41 void *mmap(void *, size_t, int, int, int, off_t);
    42 
    4332#define MAP_FAILED ((void *)(-1))
    4433
     34#define MEMPROTECT_NONE PAGE_NOACCESS
     35#define MEMPROTECT_RO   PAGE_READONLY
     36#define MEMPROTECT_RW   PAGE_READWRITE
     37#define MEMPROTECT_RX   PAGE_EXECUTE_READ
     38#define MEMPROTECT_RWX  PAGE_EXECUTE_READWRITE
     39
     40#else
     41
     42#define MEMPROTECT_NONE PROT_NONE
     43#define MEMPROTECT_RO   PROT_READ
     44#define MEMPROTECT_RW   (PROT_READ|PROT_WRITE)
     45#define MEMPROTECT_RX   (PROT_READ|PROT_EXEC)
     46#define MEMPROTECT_RWX  (PROT_READ|PROT_WRITE|PROT_EXEC)
     47
     48
     49
    4550#endif
     51
     52LogicalAddress
     53ReserveMemoryForHeap(LogicalAddress want, int totalsize);
     54
     55int
     56CommitMemory (LogicalAddress start, int len);
     57
     58void
     59UnCommitMemory (LogicalAddress start, int len);
     60
     61LogicalAddress
     62MapMemory(LogicalAddress addr, int nbytes, int protection);
     63
     64LogicalAddress
     65MapMemoryForStack(int nbytes);
     66
     67int
     68UnMapMemory(LogicalAddress addr, int nbytes);
    4669
    4770int
  • branches/win64/lisp-kernel/pmcl-kernel.c

    r8626 r8696  
    177177{
    178178#ifdef WINDOWS
     179
     180  /* On Windows, the stack is allocated on thread creation.  For the
     181     initial thread, the loader does that, and we cannot change the
     182     stack size after the fact.  For threads we create, we can set the
     183     stack size.  A possible solution is putting the initial thread
     184     asleep and using only runtime-created threads.
     185
     186     For now, just refuse any attempt to set another stack size, and
     187     return the linker default. */
     188
     189  return 0x200000;
     190
    179191#else
    180192  struct rlimit limits;
     
    441453*/
    442454
    443 #ifdef WINDOWS
    444455void
    445456uncommit_pages(void *start, size_t len)
    446457{
    447 }
    448 #else
    449 void
    450 uncommit_pages(void *start, size_t len)
    451 {
    452   if (len) {
    453     madvise(start, len, MADV_DONTNEED);
    454     if (mmap(start,
    455              len,
    456              PROT_NONE,
    457              MAP_PRIVATE | MAP_FIXED | MAP_ANON,
    458              -1,
    459              0) != start) {
    460       int err = errno;
    461       Fatal("mmap error", "");
    462       fprintf(stderr, "errno = %d", err);
    463     }
    464   }
    465 }
    466 #endif
     458  UnCommitMemory(start, len);
     459}
    467460
    468461#define TOUCH_PAGES_ON_COMMIT 0
     
    486479}
    487480
    488 #ifdef WINDOWS
    489481Boolean
    490482commit_pages(void *start, size_t len)
    491483{
    492 }
    493 #else
    494 Boolean
    495 commit_pages(void *start, size_t len)
    496 {
    497484  if (len != 0) {
    498     int i, err;
    499     void *addr;
    500 
    501     for (i = 0; i < 3; i++) {
    502       addr = mmap(start,
    503                   len,
    504                   PROT_READ | PROT_WRITE | PROT_EXEC,
    505                   MAP_PRIVATE | MAP_FIXED | MAP_ANON,
    506                   -1,
    507                   0);
    508       if (addr == start) {
    509         if (touch_all_pages(start, len)) {
    510           return true;
    511         }
    512         else {
    513           mmap(start,
    514                len,
    515                PROT_NONE,
    516                MAP_PRIVATE | MAP_FIXED | MAP_ANON,
    517                -1,
    518                0);
    519         }
     485    if (CommitMemory(start, len)) {
     486      if (touch_all_pages(start, len)) {
     487        return true;
    520488      }
    521489    }
    522     return false;
    523   }
    524 }
    525 #endif
     490  }
     491  return true;
     492}
    526493
    527494area *
     
    538505}
    539506
    540 #ifdef WINDOWS
    541 area *
    542 extend_readonly_area(unsigned more)
    543 {
    544 }
    545 #else
    546507area *
    547508extend_readonly_area(unsigned more)
     
    561522    new_start = (BytePtr)(align_to_power_of_2(a->active,log2_page_size));
    562523    new_end = (BytePtr)(align_to_power_of_2(a->active+more,log2_page_size));
    563     if (mmap(new_start,
    564              new_end-new_start,
    565              PROT_READ | PROT_WRITE | PROT_EXEC,
    566              MAP_PRIVATE | MAP_ANON | MAP_FIXED,
    567              -1,
    568              0) != new_start) {
     524    if (!CommitMemory(new_start, new_end-new_start)) {
    569525      return NULL;
    570526    }
     
    573529  return NULL;
    574530}
    575 #endif
    576531
    577532LispObj image_base=0;
    578533BytePtr pure_space_start, pure_space_active, pure_space_limit;
    579534BytePtr static_space_start, static_space_active, static_space_limit;
    580 
    581 #ifdef DARWIN
    582 #if WORD_SIZE == 64
    583 #define vm_region vm_region_64
    584 #endif
    585 
    586 /*
    587   Check to see if the specified address is unmapped by trying to get
    588   information about the mapped address at or beyond the target.  If
    589   the difference between the target address and the next mapped address
    590   is >= len, we can safely mmap len bytes at addr.
    591 */
    592 Boolean
    593 address_unmapped_p(char *addr, natural len)
    594 {
    595   vm_address_t vm_addr = (vm_address_t)addr;
    596   vm_size_t vm_size;
    597 #if WORD_SIZE == 64
    598   vm_region_basic_info_data_64_t vm_info;
    599 #else
    600   vm_region_basic_info_data_t vm_info;
    601 #endif
    602 #if WORD_SIZE == 64
    603   mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT_64;
    604 #else
    605   mach_msg_type_number_t vm_info_size = VM_REGION_BASIC_INFO_COUNT;
    606 #endif
    607   mach_port_t vm_object_name = (mach_port_t) 0;
    608   kern_return_t kret;
    609 
    610   kret = vm_region(mach_task_self(),
    611                    &vm_addr,
    612                    &vm_size,
    613 #if WORD_SIZE == 64
    614                    VM_REGION_BASIC_INFO_64,
    615 #else
    616                    VM_REGION_BASIC_INFO,
    617 #endif
    618                    (vm_region_info_t)&vm_info,
    619                    &vm_info_size,
    620                    &vm_object_name);
    621   if (kret != KERN_SUCCESS) {
    622     return false;
    623   }
    624 
    625   return vm_addr >= (vm_address_t)(addr+len);
    626 }
    627 #endif
    628535
    629536void
     
    641548
    642549
    643 #ifdef WINDOWS
    644 area *
    645 create_reserved_area(natural totalsize)
    646 {
    647 }
    648 #else
    649550area *
    650551create_reserved_area(natural totalsize)
     
    662563    try2;
    663564  area *reserved;
    664   Boolean fixed_map_ok = false;
    665 
    666   /*
    667     Through trial and error, we've found that IMAGE_BASE_ADDRESS is
    668     likely to reside near the beginning of an unmapped block of memory
    669     that's at least 1GB in size.  We'd like to load the heap image's
    670     sections relative to IMAGE_BASE_ADDRESS; if we're able to do so,
    671     that'd allow us to file-map those sections (and would enable us to
    672     avoid having to relocate references in the data sections.)
    673 
    674     In short, we'd like to reserve 1GB starting at IMAGE_BASE_ADDRESS
    675     by creating an anonymous mapping with mmap().
    676 
    677     If we try to insist that mmap() map a 1GB block at
    678     IMAGE_BASE_ADDRESS exactly (by specifying the MAP_FIXED flag),
    679     mmap() will gleefully clobber any mapped memory that's already
    680     there.  (That region's empty at this writing, but some future
    681     version of the OS might decide to put something there.)
    682 
    683     If we don't specify MAP_FIXED, mmap() is free to treat the address
    684     we give it as a hint; Linux seems to accept the hint if doing so
    685     wouldn't cause a problem.  Naturally, that behavior's too useful
    686     for Darwin (or perhaps too inconvenient for it): it'll often
    687     return another address, even if the hint would have worked fine.
    688 
    689     We call address_unmapped_p() to ask Mach whether using MAP_FIXED
    690     would conflict with anything.  Until we discover a need to do
    691     otherwise, we'll assume that if Linux's mmap() fails to take the
    692     hint, it's because of a legitimate conflict.
    693 
    694     If Linux starts ignoring hints, we can parse /proc/<pid>/maps
    695     to implement an address_unmapped_p() for Linux.
    696   */
    697565
    698566  totalsize = align_to_power_of_2((void *)totalsize, log2_heap_segment_size);
    699567
    700 #ifdef DARWIN
    701   fixed_map_ok = address_unmapped_p(want,totalsize);
    702 #endif
    703 #ifdef SOLARIS
    704   fixed_map_ok = true;
    705 #endif
    706   raise_limit();
    707   start = mmap((void *)want,
    708                totalsize + heap_segment_size,
    709                PROT_NONE,
    710                MAP_PRIVATE | MAP_ANON | (fixed_map_ok ? MAP_FIXED : 0, MAP_NORESERVE),
    711                -1,
    712                0);
    713   if (start == MAP_FAILED) {
    714     perror("Initial mmap");
    715     return NULL;
    716   }
    717 
    718   if (start != want) {
    719     munmap(start, totalsize+heap_segment_size);
    720     start = (void *)((((natural)start)+heap_segment_size-1) & ~(heap_segment_size-1));
    721     if(mmap(start, totalsize, PROT_NONE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, -1, 0) != start) {
    722       return NULL;
    723     }
    724   }
    725   mprotect(start, totalsize, PROT_NONE);
     568  start = ReserveMemoryForHeap(want, totalsize);
    726569
    727570  h = (Ptr) start;
     
    752595  return reserved;
    753596}
    754 #endif
    755597
    756598void *
     
    789631
    790632  if (new_reloctab_limit > reloctab_limit) {
     633    CommitMemory(global_mark_ref_bits, markbits_size);
    791634    UnProtectMemory(global_reloctab, reloctab_size);
    792635    reloctab_limit = new_reloctab_limit;
     
    794637 
    795638  if (new_markbits_limit > markbits_limit) {
     639    CommitMemory(global_mark_ref_bits, markbits_size);
    796640    UnProtectMemory(global_mark_ref_bits, markbits_size);
    797641    markbits_limit = new_markbits_limit;
     
    817661  a->markbits = reserved_area->markbits;
    818662  reserved_area->markbits = NULL;
    819   UnProtectMemory(start, end-start);
     663  CommitMemory(start, end-start);
    820664  a->h = start;
    821665  a->softprot = NULL;
     
    13371181    }
    13381182    xMakeDataExecutable(new, (void*)work-(void*)new);
    1339     mprotect(new, 0x1000, PROT_READ | PROT_EXEC);
     1183    ProtectMemory(new, 0x1000);
    13401184  }
    13411185}
     
    13451189#ifdef X8664
    13461190#ifdef WINDOWS
     1191
     1192/* By using linker tricks, we ensure there's memory between 0x11000
     1193   and 0x21000, so we just need to fix permissions and copy the spjump
     1194   table. */
     1195
    13471196void
    13481197remap_spjump()
  • branches/win64/lisp-kernel/thread_manager.c

    r8547 r8696  
    900900#endif
    901901#ifdef WINDOWS
    902           /* ThreadSelf() */ 23
     902          GetCurrentThread()
    903903#endif
    904904          );
     
    965965#endif
    966966
    967 #ifdef WINDOWS
    968967Ptr
    969968create_stack(int size)
    970969{
    971 }
    972 #else
    973 Ptr
    974 create_stack(int size)
    975 {
    976970  Ptr p;
    977971  size=align_to_power_of_2(size, log2_page_size);
    978   p = (Ptr) mmap(NULL,
    979                      (size_t)size,
    980                      PROT_READ | PROT_WRITE | PROT_EXEC,
    981                      MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
    982                      -1,        /* Darwin insists on this when not mmap()ing
    983                                  a real fd */
    984                      0);
     972  p = (Ptr) MapMemoryForStack((size_t)size);
    985973  if (p != (Ptr)(-1)) {
    986974    *((size_t *)p) = size;
     
    990978
    991979}
    992 #endif
    993980
    994981void *
     
    998985}
    999986
    1000 #ifdef WINDOWS
    1001987void
    1002988free_stack(void *s)
    1003989{
    1004 }
    1005 #else
    1006 void
    1007 free_stack(void *s)
    1008 {
    1009990  size_t size = *((size_t *)s);
    1010   munmap(s, size);
    1011 }
    1012 #endif
     991  UnMapMemory(s, size);
     992}
    1013993
    1014994Boolean threads_initialized = false;
     
    1019999count_cpus()
    10201000{
     1001  SYSTEM_INFO si;
     1002
     1003  GetSystemInfo(&si);
     1004  if (si.dwNumberOfProcessors > 1) {
     1005    spin_lock_tries = 1024;
     1006  }
    10211007}
    10221008#else
  • branches/win64/lisp-kernel/x86-gc.c

    r8547 r8696  
    20822082  impurify_noderef(&(regs[Iarg_y]), low, high, delta);
    20832083  impurify_noderef(&(regs[Iarg_x]), low, high, delta);
     2084#ifndef WINDOWS
    20842085  impurify_noderef(&(regs[Isave3]), low, high, delta);
     2086#endif
    20852087  impurify_noderef(&(regs[Isave2]), low, high, delta);
    20862088  impurify_noderef(&(regs[Isave1]), low, high, delta);
     
    22652267}
    22662268
    2267 #ifdef WINDOWS
    2268 int
    2269 impurify(TCR *tcr, signed_natural param)
    2270 {
    2271 }
    2272 #else
    22732269int
    22742270impurify(TCR *tcr, signed_natural param)
     
    22932289      a->active += n;
    22942290      memmove(oldfree, ro_base, n);
    2295       munmap((void *)ro_base, n);
     2291      UnMapMemory((void *)ro_base, n);
    22962292      a->ndnodes = area_dnode(a, a->active);
    22972293      pure_space_active = r->active = r->low;
     
    23142310  return -1;
    23152311}
    2316 #endif
Note: See TracChangeset for help on using the changeset viewer.