Changeset 7418


Ignore:
Timestamp:
Oct 12, 2007, 9:53:41 AM (12 years ago)
Author:
gb
Message:

Lots of changes to support new rwlocks, heap freeze, deferred GC.

Location:
branches/working-0710/ccl/lisp-kernel
Files:
22 edited

Legend:

Unmodified
Added
Removed
  • branches/working-0710/ccl/lisp-kernel/Threads.h

    r6260 r7418  
    4949Boolean extern threads_initialized;
    5050
     51#define LOCK_SPINLOCK(x,tcr) get_spin_lock(&(x),tcr)
     52#define RELEASE_SPINLOCK(x) (x)=0
     53
    5154#define TCR_TO_TSD(tcr) ((void *)((natural)(tcr)+TCR_BIAS))
    5255#define TCR_FROM_TSD(tsd) ((TCR *)((natural)(tsd)-TCR_BIAS))
     
    5659#define SEM_WAIT(s) sem_wait((SEMAPHORE)s)
    5760#define SEM_RAISE(s) sem_post((SEMAPHORE)s)
     61#define SEM_BROADCAST(s, count) do {while(count) {SEM_RAISE(s);(count)--;}}while(0)
    5862#define SEM_TIMEDWAIT(s,t) sem_timedwait((SEMAPHORE)s,(struct timespec *)t)
    5963#endif
     
    6367#define SEM_WAIT(s) semaphore_wait((SEMAPHORE)(natural)s)
    6468#define SEM_RAISE(s) semaphore_signal((SEMAPHORE)(natural)s)
     69#define SEM_BROADCAST(s,count)semaphore_signal_all((SEMAPHORE)(natural)s)
    6570#define SEM_TIMEDWAIT(s,t) semaphore_timedwait((SEMAPHORE)(natural)s,t)
    6671#endif
     
    121126Boolean resume_tcr(TCR *);
    122127
    123 typedef struct _rwquentry
    124 {
    125   struct _rwquentry *prev;
    126   struct _rwquentry *next;
    127   TCR *tcr;
    128   int count;
    129 } rwquentry;
    130 
    131128typedef struct
    132129{
    133   rwquentry head;
    134   int state;                    /* sum of all counts on queue */
    135   pthread_mutex_t *lock;        /* lock access to this data structure */
    136   pthread_cond_t *reader_signal;
    137   pthread_cond_t *writer_signal;
    138   int write_wait_count;
    139   int read_wait_count;
    140   int dying;
    141   rwquentry freelist;
     130  signed_natural spin; /* need spin lock to change fields */
     131  signed_natural state; /* 0 = free, positive if writer, negative if readers; */
     132  natural blocked_writers;
     133  natural blocked_readers;
     134  TCR  *writer;
     135  void * reader_signal;
     136  void * writer_signal;
     137  void *malloced_ptr;
    142138} rwlock;
    143139
    144 #define RWLOCK_WRITER(rw) rw->head.tcr
    145 #define RWLOCK_WRITE_COUNT(rw) rw->head.count
    146140
    147141rwlock * rwlock_new(void);
    148 int rwlock_destroy(rwlock *);
     142void rwlock_destroy(rwlock *);
    149143int rwlock_rlock(rwlock *, TCR *, struct timespec *);
    150144int rwlock_wlock(rwlock *, TCR *, struct timespec *);
    151145int rwlock_try_wlock(rwlock *, TCR *);
     146int rwlock_try_rlock(rwlock *, TCR *);
    152147int rwlock_unlock(rwlock *, TCR *);
    153148
  • branches/working-0710/ccl/lisp-kernel/errors.s

    r6899 r7418  
    2525error_excised_function_call = 6
    2626error_too_many_values = 7
     27error_propagate_suspend = 10   
    2728error_cant_call = 17
    2829       
  • branches/working-0710/ccl/lisp-kernel/gc.h

    r7137 r7418  
    118118#define GC_TRAP_FUNCTION_EGC_CONTROL 32
    119119#define GC_TRAP_FUNCTION_CONFIGURE_EGC 64
    120 #define GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE 128
     120#define GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE 128 /* deprecated */
     121#define GC_TRAP_FUNCTION_FREEZE 129
     122#define GC_TRAP_FUNCTION_THAW 130
     123
    121124#endif                          /* __GC_H__ */
  • branches/working-0710/ccl/lisp-kernel/image.c

    r6215 r7418  
    211211
    212212    a->static_dnodes = sect->static_dnodes;
    213     if (a->static_dnodes) {
    214       natural pages_size = (align_to_power_of_2((align_to_power_of_2(a->static_dnodes,
    215                                                                      log2_nbits_in_word)>>3),
    216                                                 log2_page_size));
    217       lseek(fd,pos+mem_size, SEEK_SET);
    218       pos = seek_to_next_page(fd);
    219       addr = mmap(NULL,
    220                   pages_size,
    221                   PROT_READ | PROT_WRITE,
    222                   MAP_PRIVATE,
    223                   fd,
    224                   pos);
    225       if (addr == MAP_FAILED) {
    226         return;
    227       }
    228       a->static_used = addr;
    229       advance = pages_size;
    230     }
    231213    sect->area = a;
    232214    break;
     
    420402#endif
    421403
    422   areas[0] = readonly_area;
    423   areas[1] = nilreg_area;
    424   areas[2] = active_dynamic_area;
     404  areas[0] = nilreg_area;
     405  areas[1] = active_dynamic_area;
     406  areas[2] = readonly_area;
    425407  areas[3] = managed_static_area;
    426408  for (i = 0; i < NUM_IMAGE_SECTIONS; i++) {
     
    477459    case FWDNUM:
    478460    case GC_NUM:
    479     case DELETED_STATIC_PAIRS:
     461    case STATIC_CONSES:
    480462      break;
    481463    default:
     
    503485        return errno;
    504486      }
    505       if (nstatic) {
    506         /* Need to write the static_used bitmap */
    507         natural static_used_size_in_bytes =
    508           (align_to_power_of_2((align_to_power_of_2(nstatic, log2_nbits_in_word)>>3),
    509                                log2_page_size));
    510         seek_to_next_page(fd);
    511         if (write(fd, tenured_area->static_used, static_used_size_in_bytes)
    512             != static_used_size_in_bytes) {
    513           return errno;
    514         }
    515       }
    516487    }
    517488  }
  • branches/working-0710/ccl/lisp-kernel/lisp-errors.h

    r5529 r7418  
    1818#define __ERRORS_X 1
    1919
    20 /*
    21 10/22/96 bill error_too_many_values
    22 --- 4.0 ---
    23 05/12/96  gb  conditionalize on __ERRORS_X to avoid conflict with <errors.h>
    24 --- 3.9 ---
    25 04/10/96  gb  error_memory_full
    26 04/09/96  gb  error_excised_function_call
    27 03/01/96  gb  FPU exceptions
    28 01/22/96  gb  add/rename error_alloc_failed, error_stack_overflow
    29 12/13/95  gb  add error_alloc_fail, error_throw_tag_missing.
    30 11/09/95  gb  in synch with %type-error-types%.
    31 */
    3220
    3321#define error_reg_regnum 0
     
    3927#define error_excised_function_call 6
    4028#define error_too_many_values 7
     29#define error_propagate_suspend 10
    4130#define error_cant_call 17
    4231
  • branches/working-0710/ccl/lisp-kernel/lisp_globals.h

    r6901 r7418  
    3333#define TCR_AREA_LOCK (-11)       /* all_areas/tcr queue lock */
    3434#define EXCEPTION_LOCK (-12)    /* serialize exception handling */
    35 #define DELETED_STATIC_PAIRS (-13) /* for hash-consing */
     35#define STATIC_CONSES (-13)
    3636#define DEFAULT_ALLOCATION_QUANTUM (-14)
    3737#define INTFLAG (-15)
  • branches/working-0710/ccl/lisp-kernel/ppc-constants.h

    r3493 r7418  
    6868#define TCR_FLAG_BIT_SUSPEND_ACK_PENDING (fixnumshift+4)
    6969#define TCR_FLAG_BIT_PENDING_EXCEPTION (fixnumshift+5)
     70#define TCR_FLAG_BIT_FOREIGN_EXCEPTION (fixnumshift+6)
     71#define TCR_FLAG_BIT_PENDING_SUSPEND (fixnumshift+7)
    7072
    7173#define TCR_STATE_FOREIGN (1)
  • branches/working-0710/ccl/lisp-kernel/ppc-constants32.s

    r5783 r7418  
    607607TCR_FLAG_BIT_FOREIGN = fixnum_shift
    608608TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)
     609TCR_FLAG_BIT_ALT_SUSPEND = (fixnumshift+2)
     610TCR_FLAG_BIT_PROPAGATE_EXCEPTION = (fixnumshift+3)
     611TCR_FLAG_BIT_SUSPEND_ACK_PENDING = (fixnumshift+4)
     612TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
     613TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
     614TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)       
    609615       
    610616r0 = 0
  • branches/working-0710/ccl/lisp-kernel/ppc-constants64.s

    r5783 r7418  
    577577
    578578TCR_FLAG_BIT_FOREIGN = fixnum_shift
    579 TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)
     579TCR_FLAG_BIT_AWAITING_PRESET = (fixnum_shift+1)
     580TCR_FLAG_BIT_ALT_SUSPEND = (fixnumshift+2)
     581TCR_FLAG_BIT_PROPAGATE_EXCEPTION = (fixnumshift+3)
     582TCR_FLAG_BIT_SUSPEND_ACK_PENDING = (fixnumshift+4)
     583TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
     584TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
     585TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)       
    580586
    581587
  • branches/working-0710/ccl/lisp-kernel/ppc-exceptions.c

    r7137 r7418  
    13271327
    13281328  case UUO_INTERR:
    1329     status = handle_error(xp, errnum, rb, 0,  where);
     1329    if (errnum == error_propagate_suspend) {
     1330      status = 0;
     1331    } else {
     1332      status = handle_error(xp, errnum, rb, 0,  where);
     1333    }
    13301334    break;
    13311335
     
    17591763    old_valence = prepare_to_wait_for_exception_lock(tcr, context);
    17601764  }
     1765
     1766  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
     1767    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
     1768    pthread_kill(pthread_self(), thread_suspend_signal);
     1769  }
     1770
    17611771 
    17621772  wait_for_exception_lock_in_handler(tcr, context, &xframe_link);
  • branches/working-0710/ccl/lisp-kernel/ppc-gc.c

    r7137 r7418  
    15971597
    15981598        case xmacptr_flag_rwlock:
     1599          rwlock_destroy((rwlock *)ptr_from_lispobj(ptr));
    15991600          break;
    16001601
  • branches/working-0710/ccl/lisp-kernel/ppc-macros.s

    r6515 r7418  
    826826define([aligned_bignum_size],[((~(dnode_size-1)&(node_size+(dnode_size-1)+(4*$1))))])
    827827
    828 
    829                
     828define([suspend_now],[
     829        uuo_interr(error_propagate_suspend,rzero)
     830])
  • branches/working-0710/ccl/lisp-kernel/ppc-spentry.s

    r6903 r7418  
    66826682/* any interrupt polling  */
    66836683       
    6684 _spentry(unbind_interrupt_level)       
    6685         __(ldr(imm2,tcr.tlb_pointer(rcontext)))   
     6684_spentry(unbind_interrupt_level)
     6685        __(ldr(imm0,tcr.flags(rcontext)))
     6686        __(ldr(imm2,tcr.tlb_pointer(rcontext)))
     6687        __(andi. imm0,imm0,1<<TCR_FLAG_BIT_PENDING_SUSPEND)
    66866688        __(ldr(imm1,tcr.db_link(rcontext)))
    66876689        __(ldr(temp1,INTERRUPT_LEVEL_BINDING_INDEX(imm2)))
    6688         __(cmpri(cr1,temp1,0))
     6690        __(bne 5f)
     66910:      __(cmpri(cr1,temp1,0))
    66896692        __(ldr(temp1,binding.val(imm1)))
    66906693        __(ldr(imm1,binding.link(imm1)))
     
    66986701        __(mr nargs,imm2)
    66996702        __(blr)
     67035:       /* Missed a suspend request; force suspend now if we're restoring
     6704          interrupt level to -1 or greater */
     6705        __(cmpri(temp1,-2<<fixnumshift))
     6706        __(bne 0b)
     6707        __(ldr(imm0,binding.val(imm1)))
     6708        __(cmpr(imm0,temp1))
     6709        __(beq 0b)
     6710        __(li imm0,1<<fixnumshift)
     6711        __(str(imm0,INTERRUPT_LEVEL_BINDING_INDEX(imm2)))
     6712        __(suspend_now())
     6713        __(b 0b)
     6714
    67006715
    67016716/* arg_x = array, arg_y = i, arg_z = j. Typecheck everything.
  • branches/working-0710/ccl/lisp-kernel/thread_manager.c

    r6904 r7418  
    117117  }
    118118  while (1) {
    119     get_spin_lock(&(m->spinlock),tcr);
     119    LOCK_SPINLOCK(m->spinlock,tcr);
    120120    ++m->avail;
    121121    if (m->avail == 1) {
    122122      m->owner = tcr;
    123123      m->count = 1;
    124       m->spinlock = 0;
     124      RELEASE_SPINLOCK(m->spinlock);
    125125      break;
    126126    }
    127     m->spinlock = 0;
     127    RELEASE_SPINLOCK(m->spinlock);
    128128    SEM_WAIT_FOREVER(m->signal);
    129129  }
     
    144144    --m->count;
    145145    if (m->count == 0) {
    146       get_spin_lock(&(m->spinlock),tcr);
     146      LOCK_SPINLOCK(m->spinlock,tcr);
    147147      m->owner = NULL;
    148148      pending = m->avail-1 + m->waiting;     /* Don't count us */
     
    154154        m->waiting = 0;
    155155      }
    156       m->spinlock = 0;
     156      RELEASE_SPINLOCK(m->spinlock);
    157157      if (pending >= 0) {
    158158        SEM_RAISE(m->signal);
     
    182182  TCR *owner = m->owner;
    183183
     184  LOCK_SPINLOCK(m->spinlock,tcr);
    184185  if (owner == tcr) {
    185186    m->count++;
    186187    if (was_free) {
    187188      *was_free = 0;
     189      RELEASE_SPINLOCK(m->spinlock);
    188190      return 0;
    189191    }
     
    195197      *was_free = 1;
    196198    }
     199    RELEASE_SPINLOCK(m->spinlock);
    197200    return 0;
    198201  }
    199202
     203  RELEASE_SPINLOCK(m->spinlock);
    200204  return EBUSY;
    201205}
     
    221225
    222226int
    223 wait_on_semaphore(SEMAPHORE s, int seconds, int millis)
     227wait_on_semaphore(void *s, int seconds, int millis)
    224228{
    225229  int nanos = (millis % 1000) * 1000000;
    226 #if defined(LINUX) || defined(FREEBSD)
     230#ifdef USE_POSIX_SEMAPHORES
    227231  int status;
    228232
    229233  struct timespec q;
    230234  gettimeofday((struct timeval *)&q, NULL);
    231   q.tv_nsec *= 1000L;
     235  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
    232236   
    233237  q.tv_nsec += nanos;
     
    259263
    260264
     265int
     266semaphore_maybe_timedwait(void *s, struct timespec *t)
     267{
     268  if (t) {
     269    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
     270  }
     271  SEM_WAIT_FOREVER(s);
     272  return 0;
     273}
     274
    261275void
    262276signal_semaphore(SEMAPHORE s)
     
    298312  TCR *tcr = get_interrupt_tcr(false);
    299313
    300   if (signo == thread_suspend_signal) {
     314  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
     315    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
     316  } else {
     317    if (signo == thread_suspend_signal) {
    301318#if 0
    302     sigset_t wait_for;
    303 #endif
    304 
    305     tcr->suspend_context = context;
     319      sigset_t wait_for;
     320#endif
     321
     322      tcr->suspend_context = context;
    306323#if 0
    307     sigfillset(&wait_for);
    308 #endif
    309     SEM_RAISE(tcr->suspend);
     324      sigfillset(&wait_for);
     325#endif
     326      SEM_RAISE(tcr->suspend);
    310327#if 0
    311     sigdelset(&wait_for, thread_resume_signal);
     328      sigdelset(&wait_for, thread_resume_signal);
    312329#endif
    313330#if 1
    314331#if RESUME_VIA_RESUME_SEMAPHORE
    315     SEM_WAIT_FOREVER(tcr->resume);
     332      SEM_WAIT_FOREVER(tcr->resume);
    316333#if SUSPEND_RESUME_VERBOSE
    317     fprintf(stderr, "got  resume in 0x%x\n",tcr);
    318 #endif
    319     tcr->suspend_context = NULL;
     334      fprintf(stderr, "got  resume in 0x%x\n",tcr);
     335#endif
     336      tcr->suspend_context = NULL;
    320337#else
    321     sigsuspend(&wait_for);
     338      sigsuspend(&wait_for);
    322339#endif
    323340#else
     
    326343    } while (tcr->suspend_context);
    327344#endif 
    328   } else {
    329     tcr->suspend_context = NULL;
     345    } else {
     346      tcr->suspend_context = NULL;
    330347#if SUSEPEND_RESUME_VERBOSE
    331     fprintf(stderr,"got  resume in in 0x%x\n",tcr);
    332 #endif
    333   }
     348      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
     349#endif
     350    }
    334351#if WAIT_FOR_RESUME_ACK
    335   SEM_RAISE(tcr->suspend);
    336 #endif
     352    SEM_RAISE(tcr->suspend);
     353#endif
     354  }
    337355#ifdef DARWIN_GS_HACK
    338356  if (gs_was_tcr) {
     
    13351353
    13361354
    1337 /*
    1338   Try to take an rwquentry off of the rwlock's freelist; failing that,
    1339   malloc one.  The caller owns the lock on the rwlock itself, of course.
    1340 
    1341 */
    1342 rwquentry *
    1343 recover_rwquentry(rwlock *rw)
    1344 {
    1345   rwquentry *freelist = &(rw->freelist),
    1346     *p = freelist->next,
    1347     *follow = p->next;
    1348 
    1349   if (p == freelist) {
    1350     p = NULL;
    1351   } else {
    1352     follow->prev = freelist;
    1353     freelist->next = follow;
    1354     p->prev = p->next = NULL;
    1355     p->tcr = NULL;
    1356     p->count = 0;
    1357   }
    1358   return p;
    1359 }
    1360 
    1361 rwquentry *
    1362 new_rwquentry(rwlock *rw)
    1363 {
    1364   rwquentry *p = recover_rwquentry(rw);
    1365 
    1366   if (p == NULL) {
    1367     p = calloc(1, sizeof(rwquentry));
    1368   }
    1369   return p;
    1370 }
    1371 
    1372 
    1373 void
    1374 free_rwquentry(rwquentry *p, rwlock *rw)
    1375 {
    1376   rwquentry
    1377     *prev = p->prev,
    1378     *next = p->next,
    1379     *freelist = &(rw->freelist),
    1380     *follow = freelist->next;
    1381  
    1382   prev->next = next;
    1383   next->prev = prev;
    1384   p->prev = freelist;
    1385   freelist->next = p;
    1386   follow->prev = p;
    1387   p->next = follow;
    1388   p->prev = freelist;
    1389 }
    1390  
    1391 void
    1392 add_rwquentry(rwquentry *p, rwlock *rw)
    1393 {
    1394   rwquentry
    1395     *head = &(rw->head),
    1396     *follow = head->next;
    1397  
    1398   head->next = p;
    1399   follow->prev = p;
    1400   p->next = follow;
    1401   p->prev = head;
    1402 }
    1403 
    1404 rwquentry *
    1405 find_enqueued_tcr(TCR *target, rwlock *rw)
    1406 {
    1407   rwquentry
    1408     *head = &(rw->head),
    1409     *p = head->next;
    1410 
    1411   do {
    1412     if (p->tcr == target) {
    1413       return p;
    1414     }
    1415     p = p->next;
    1416   } while (p != head);
    1417   return NULL;
    1418 }
    1419    
     1355
    14201356rwlock *
    14211357rwlock_new()
    14221358{
    1423   rwlock *rw = calloc(1, sizeof(rwlock));
    1424  
    1425   if (rw) {
    1426     pthread_mutex_t *lock = calloc(1, sizeof(pthread_mutex_t));
    1427     if (lock == NULL) {
    1428       free (rw);
     1359  extern int cache_block_size;
     1360
     1361  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
     1362  rwlock *rw;
     1363 
     1364  if (p) {
     1365    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
     1366    rw->malloced_ptr = p;
     1367    rw->reader_signal = new_semaphore(0);
     1368    rw->writer_signal = new_semaphore(0);
     1369    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
     1370      if (rw->reader_signal) {
     1371        destroy_semaphore(&(rw->reader_signal));
     1372      } else {
     1373        destroy_semaphore(&(rw->writer_signal));
     1374      }
     1375      free(rw);
    14291376      rw = NULL;
    1430     } else {
    1431       pthread_cond_t *reader_signal = calloc(1, sizeof(pthread_cond_t));
    1432       pthread_cond_t *writer_signal = calloc(1, sizeof(pthread_cond_t));
    1433       if ((reader_signal == NULL) || (writer_signal == NULL)) {
    1434         if (reader_signal) {
    1435           free(reader_signal);
    1436         } else {
    1437           free(writer_signal);
    1438         }
    1439        
    1440         free(lock);
    1441         free(rw);
    1442         rw = NULL;
    1443       } else {
    1444         pthread_mutex_init(lock, NULL);
    1445         pthread_cond_init(reader_signal, NULL);
    1446         pthread_cond_init(writer_signal, NULL);
    1447         rw->lock = lock;
    1448         rw->reader_signal = reader_signal;
    1449         rw->writer_signal = writer_signal;
    1450         rw->head.prev = rw->head.next = &(rw->head);
    1451         rw->freelist.prev = rw->freelist.next = &(rw->freelist);
    1452       }
    14531377    }
    14541378  }
     
    14561380}
    14571381
    1458 /*
    1459   no thread should be waiting on the lock, and the caller has just
    1460   unlocked it.
    1461 */
    1462 static void
    1463 rwlock_delete(rwlock *rw)
    1464 {
    1465   pthread_mutex_t *lock = rw->lock;
    1466   pthread_cond_t *cond;
    1467   rwquentry *entry;
    1468 
    1469   rw->lock = NULL;
    1470   cond = rw->reader_signal;
    1471   rw->reader_signal = NULL;
    1472   pthread_cond_destroy(cond);
    1473   free(cond);
    1474   cond = rw->writer_signal;
    1475   rw->writer_signal = NULL;
    1476   pthread_cond_destroy(cond);
    1477   free(cond);
    1478   while (entry = recover_rwquentry(rw)) {
    1479     free(entry);
    1480   }
    1481   free(rw);
    1482   pthread_mutex_unlock(lock);
    1483   free(lock);
    1484 }
    1485 
    1486 void
    1487 rwlock_rlock_cleanup(void *arg)
    1488 {
    1489   pthread_mutex_unlock((pthread_mutex_t *)arg);
    1490 }
    14911382     
    14921383/*
     
    15011392rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
    15021393{
    1503   pthread_mutex_t *lock = rw->lock;
    1504   rwquentry *entry;
    15051394  int err = 0;
    1506 
    1507 
    1508   pthread_mutex_lock(lock);
    1509 
    1510   if (RWLOCK_WRITER(rw) == tcr) {
    1511     pthread_mutex_unlock(lock);
     1395 
     1396  LOCK_SPINLOCK(rw->spin, tcr);
     1397
     1398  if (rw->writer == tcr) {
     1399    RELEASE_SPINLOCK(rw->spin);
    15121400    return EDEADLK;
    15131401  }
    15141402
    1515   if (rw->state > 0) {
    1516     /* already some readers, we may be one of them */
    1517     entry = find_enqueued_tcr(tcr, rw);
    1518     if (entry) {
    1519       entry->count++;
    1520       rw->state++;
    1521       pthread_mutex_unlock(lock);
    1522       return 0;
    1523     }
    1524   }
    1525   entry = new_rwquentry(rw);
    1526   entry->tcr = tcr;
    1527   entry->count = 1;
    1528 
    1529   pthread_cleanup_push(rwlock_rlock_cleanup,lock);
    1530 
    1531   /* Wait for current and pending writers */
    1532   while ((err == 0) && ((rw->state < 0) || (rw->write_wait_count > 0))) {
    1533     if (waitfor) {
    1534       if (pthread_cond_timedwait(rw->reader_signal, lock, waitfor)) {
    1535         err = errno;
    1536       }
    1537     } else {
    1538       pthread_cond_wait(rw->reader_signal, lock);
    1539     }
    1540   }
    1541  
    1542   if (err == 0) {
    1543     add_rwquentry(entry, rw);
    1544     rw->state++;
    1545   }
    1546 
    1547   pthread_cleanup_pop(1);
     1403  while (rw->blocked_writers || (rw->state > 0)) {
     1404    rw->blocked_readers++;
     1405    RELEASE_SPINLOCK(rw->spin);
     1406    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
     1407    LOCK_SPINLOCK(rw->spin,tcr);
     1408    rw->blocked_readers--;
     1409    if (err == EINTR) {
     1410      err = 0;
     1411    }
     1412    if (err) {
     1413      RELEASE_SPINLOCK(rw->spin);
     1414      return err;
     1415    }
     1416  }
     1417  rw->state--;
     1418  RELEASE_SPINLOCK(rw->spin);
    15481419  return err;
    15491420}
    15501421
    15511422
    1552 /*
    1553    This is here to support cancelation.  Cancelation is evil.
    1554 */
    1555 
    1556 void
    1557 rwlock_wlock_cleanup(void *arg)
    1558 {
    1559   rwlock *rw = (rwlock *)arg;
    1560 
    1561   /* If this thread was the only queued writer and the lock
    1562      is now available for reading, tell any threads that're
    1563      waiting for read access.
    1564      This thread owns the lock on the rwlock itself.
    1565   */
    1566   if ((--(rw->write_wait_count) == 0) &&
    1567       (rw->state >= 0)) {
    1568     pthread_cond_broadcast(rw->reader_signal);
    1569   }
    1570  
    1571   pthread_mutex_unlock(rw->lock);
    1572 }
    15731423
    15741424/*
    15751425  Try to obtain write access to the lock.
    1576   If we already have read access, fail with EDEADLK.
     1426  It is an error if we already have read access, but it's hard to
     1427  detect that.
    15771428  If we already have write access, increment the count that indicates
    15781429  that.
     
    15841435rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
    15851436{
    1586   pthread_mutex_t *lock = rw->lock;
    1587   rwquentry *entry;
    15881437  int err = 0;
    15891438
    1590 
    1591   pthread_mutex_lock(lock);
    1592   if (RWLOCK_WRITER(rw) == tcr) {
    1593     --RWLOCK_WRITE_COUNT(rw);
    1594     --rw->state;
    1595     pthread_mutex_unlock(lock);
     1439  LOCK_SPINLOCK(rw->spin,tcr);
     1440  if (rw->writer == tcr) {
     1441    rw->state++;
     1442    RELEASE_SPINLOCK(rw->spin);
    15961443    return 0;
    15971444  }
    1598  
    1599   if (rw->state > 0) {
    1600     /* already some readers, we may be one of them */
    1601     entry = find_enqueued_tcr(tcr, rw);
    1602     if (entry) {
    1603       pthread_mutex_unlock(lock);
    1604       return EDEADLK;
    1605     }
    1606   }
    1607   rw->write_wait_count++;
    1608   pthread_cleanup_push(rwlock_wlock_cleanup,rw);
    1609 
    1610   while ((err == 0) && (rw->state) != 0) {
    1611     if (waitfor) {
    1612       if (pthread_cond_timedwait(rw->writer_signal, lock, waitfor)) {
    1613         err = errno;
    1614       }
    1615     } else {
    1616       pthread_cond_wait(rw->writer_signal, lock);
    1617     }
    1618   }
    1619   if (err == 0) {
    1620     RWLOCK_WRITER(rw) = tcr;
    1621     RWLOCK_WRITE_COUNT(rw) = -1;
    1622     rw->state = -1;
    1623   }
    1624   pthread_cleanup_pop(1);
     1445
     1446  while (rw->state != 0) {
     1447    rw->blocked_writers++;
     1448    RELEASE_SPINLOCK(rw->spin);
     1449    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
     1450    LOCK_SPINLOCK(rw->spin,tcr);
     1451    rw->blocked_writers--;
     1452    if (err = EINTR) {
     1453      err = 0;
     1454    }
     1455    if (err) {
     1456      RELEASE_SPINLOCK(rw->spin);
     1457      return err;
     1458    }
     1459  }
     1460  rw->state = 1;
     1461  rw->writer = tcr;
     1462  RELEASE_SPINLOCK(rw->spin);
    16251463  return err;
    16261464}
     
    16281466/*
    16291467  Sort of the same as above, only return EBUSY if we'd have to wait.
    1630   In partucular, distinguish between the cases of "some other readers
    1631   (EBUSY) another writer/queued writer(s)" (EWOULDBLOK) and "we hold a
    1632   read lock" (EDEADLK.)
    16331468*/
    16341469int
    16351470rwlock_try_wlock(rwlock *rw, TCR *tcr)
    16361471{
    1637   pthread_mutex_t *lock = rw->lock;
    1638   rwquentry *entry;
    16391472  int ret = EBUSY;
    16401473
    1641   pthread_mutex_lock(lock);
    1642   if ((RWLOCK_WRITER(rw) == tcr) ||
    1643       ((rw->state == 0) && (rw->write_wait_count == 0))) {
    1644     RWLOCK_WRITER(rw) = tcr;
    1645     --RWLOCK_WRITE_COUNT(rw);
     1474  LOCK_SPINLOCK(rw->spin,tcr);
     1475  if (rw->writer == tcr) {
     1476    rw->state++;
     1477    ret = 0;
     1478  } else {
     1479    if (rw->state == 0) {
     1480      rw->writer = tcr;
     1481      rw->state = 1;
     1482      ret = 0;
     1483    }
     1484  }
     1485  RELEASE_SPINLOCK(rw->spin);
     1486  return ret;
     1487}
     1488
     1489int
     1490rwlock_try_rlock(rwlock *rw, TCR *tcr)
     1491{
     1492  int ret = EBUSY;
     1493
     1494  LOCK_SPINLOCK(rw->spin,tcr);
     1495  if (rw->state <= 0) {
    16461496    --rw->state;
    1647     pthread_mutex_unlock(lock);
    1648     return 0;
    1649   }
    1650  
    1651   if (rw->state > 0) {
    1652     /* already some readers, we may be one of them */
    1653     entry = find_enqueued_tcr(tcr, rw);
    1654     if (entry) {
    1655       ret = EDEADLK;
    1656     }
    1657   } else {
    1658     /* another writer or queued writers */
    1659     ret = EWOULDBLOCK;
    1660   }
    1661   pthread_mutex_unlock(rw->lock);
     1497    ret = 0;
     1498  }
     1499  RELEASE_SPINLOCK(rw->spin);
    16621500  return ret;
    16631501}
    16641502
    1665 /*
    1666   "Upgrade" a lock held once or more for reading to one held the same
    1667   number of times for writing.
    1668   Upgraders have higher priority than writers do
    1669 */
    1670 
    1671 int
    1672 rwlock_read_to_write(rwlock *rw, TCR *tcr)
    1673 {
    1674 }
    16751503
    16761504
     
    16781506rwlock_unlock(rwlock *rw, TCR *tcr)
    16791507{
    1680   rwquentry *entry;
    1681 
    1682   pthread_mutex_lock(rw->lock);
    1683   if (rw->state < 0) {
    1684     /* Locked for writing.  By us ? */
    1685     if (RWLOCK_WRITER(rw) != tcr) {
    1686       pthread_mutex_unlock(rw->lock);
    1687       /* Can't unlock: locked for writing by another thread. */
    1688       return EPERM;
    1689     }
    1690     if (++RWLOCK_WRITE_COUNT(rw) == 0) {
    1691       rw->state = 0;
    1692       RWLOCK_WRITER(rw) = NULL;
    1693       if (rw->write_wait_count) {
    1694         pthread_cond_signal(rw->writer_signal);
    1695       } else {
    1696         pthread_cond_broadcast(rw->reader_signal);
     1508
     1509  int err = 0;
     1510  natural blocked_readers = 0;
     1511
     1512  LOCK_SPINLOCK(rw->spin,tcr);
     1513  if (rw->state > 0) {
     1514    if (rw->writer != tcr) {
     1515      err = EINVAL;
     1516    } else {
     1517      --rw->state;
     1518    }
     1519  } else {
     1520    if (rw->state < 0) {
     1521      ++rw->state;
     1522    } else {
     1523      err = EINVAL;
     1524    }
     1525  }
     1526  if (err) {
     1527    RELEASE_SPINLOCK(rw->spin);
     1528    return err;
     1529  }
     1530 
     1531  if (rw->state == 0) {
     1532    if (rw->blocked_writers) {
     1533      SEM_RAISE(rw->writer_signal);
     1534    } else {
     1535      blocked_readers = rw->blocked_readers;
     1536      if (blocked_readers) {
     1537        SEM_BROADCAST(rw->reader_signal, blocked_readers);
    16971538      }
    16981539    }
    1699     pthread_mutex_unlock(rw->lock);
    1700     return 0;
    1701   }
    1702   entry = find_enqueued_tcr(tcr, rw);
    1703   if (entry == NULL) {
    1704     /* Not locked for reading by us, so why are we unlocking it ? */
    1705     pthread_mutex_unlock(rw->lock);
    1706     return EPERM;
    1707   }
    1708   if (--entry->count == 0) {
    1709     free_rwquentry(entry, rw);
    1710   }
    1711   if (--rw->state == 0) {
    1712     pthread_cond_signal(rw->writer_signal);
    1713   }
    1714   pthread_mutex_unlock(rw->lock);
     1540  }
     1541  RELEASE_SPINLOCK(rw->spin);
    17151542  return 0;
    17161543}
    17171544
    17181545       
    1719 int
     1546void
    17201547rwlock_destroy(rwlock *rw)
    17211548{
    1722   return 0;                     /* for now. */
    1723 }
    1724 
    1725 
    1726 
     1549  destroy_semaphore((void **)&rw->reader_signal);
     1550  destroy_semaphore((void **)&rw->writer_signal);
     1551  postGCfree((void *)(rw->malloced_ptr));
     1552}
     1553
     1554
     1555
  • branches/working-0710/ccl/lisp-kernel/x86-asmutils64.s

    r6520 r7418  
    173173       
    174174        __ifdef([DARWIN_GS_HACK])
    175 /* Check (in and ugly, non-portale way) to see if %gs is addressing
     175/* Check (in and ugly, non-portable way) to see if %gs is addressing
    176176   pthreads data.  If it was, return 0; otherwise, assume that it's
    177177   addressing a lisp tcr and set %gs to point to the tcr's tcr.osid,
  • branches/working-0710/ccl/lisp-kernel/x86-constants.h

    r6905 r7418  
    2525#define TCR_FLAG_BIT_PENDING_EXCEPTION (fixnumshift+5)
    2626#define TCR_FLAG_BIT_FOREIGN_EXCEPTION (fixnumshift+6)
     27#define TCR_FLAG_BIT_PENDING_SUSPEND (fixnumshift+7)
    2728#define TCR_STATE_FOREIGN (1)
    2829#define TCR_STATE_LISP    (0)
  • branches/working-0710/ccl/lisp-kernel/x86-constants64.s

    r6907 r7418  
    753753TCR_FLAG_BIT_PENDING_EXCEPTION = (fixnumshift+5)
    754754TCR_FLAG_BIT_FOREIGN_EXCEPTION = (fixnumshift+6)
     755TCR_FLAG_BIT_PENDING_SUSPEND = (fixnumshift+7)       
    755756       
    756757target_most_positive_fixnum = 1152921504606846975
  • branches/working-0710/ccl/lisp-kernel/x86-exceptions.c

    r7284 r7418  
    233233      fatal_oserr(": save_application", err);
    234234    }
    235     if (selector == GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE) {
    236       LispObj aligned_arg = align_to_power_of_2(arg, log2_nbits_in_word);
    237       signed_natural
    238         delta_dnodes = ((signed_natural) aligned_arg) -
    239         ((signed_natural) tenured_area->static_dnodes);
    240       change_hons_area_size_from_xp(xp, delta_dnodes*dnode_size);
    241       xpGPR(xp, Iimm0) = tenured_area->static_dnodes;
     235    switch (selector) {
     236    case GC_TRAP_FUNCTION_SET_HONS_AREA_SIZE:
     237      xpGPR(xp, Iimm0) = 0;
     238      break;
     239    case GC_TRAP_FUNCTION_FREEZE:
     240      a->active = (BytePtr) align_to_power_of_2(a->active, log2_page_size);
     241      tenured_area->static_dnodes = area_dnode(a->active, a->low);
     242      xpGPR(xp, Iimm0) = tenured_area->static_dnodes << dnode_shift;
     243      break;
     244    default:
     245      break;
    242246    }
    243247    if (egc_was_enabled) {
     
    958962  old_valence = prepare_to_wait_for_exception_lock(tcr, context);
    959963#endif
     964  if (tcr->flags & (1<<TCR_FLAG_BIT_PENDING_SUSPEND)) {
     965    CLR_TCR_FLAG(tcr, TCR_FLAG_BIT_PENDING_SUSPEND);
     966    pthread_kill(pthread_self(), thread_suspend_signal);
     967  }
    960968  wait_for_exception_lock_in_handler(tcr,context, &xframe_link);
    961969
     
    19211929
    19221930int
    1923 change_hons_area_size_from_xp(ExceptionInformation *xp, signed_natural delta_in_bytes)
    1924 {
    1925   return gc_like_from_xp(xp, change_hons_area_size, delta_in_bytes);
    1926 }
    1927 
    1928 int
    19291931purify_from_xp(ExceptionInformation *xp, signed_natural param)
    19301932{
     
    21372139    raise_pending_interrupt(tcr);
    21382140  } else {
    2139     FBug(NULL, "no xp here!\n");
     2141    Bug(NULL, "no xp here!\n");
    21402142  }
    21412143#ifdef DEBUG_MACH_EXCEPTIONS
  • branches/working-0710/ccl/lisp-kernel/x86-exceptions.h

    r7282 r7418  
    9999#define XUUO_TLB_TOO_SMALL 1
    100100#define XUUO_INTERRUPT_NOW 2
     101#define XUUO_SUSPEND_NOW 3
    101102
    102103void
  • branches/working-0710/ccl/lisp-kernel/x86-gc.c

    r7137 r7418  
    16001600
    16011601        case xmacptr_flag_rwlock:
     1602          rwlock_destroy((rwlock *)ptr_from_lispobj(ptr));
    16021603          break;
    16031604
     
    21112112}
    21122113
    2113 void
    2114 forward_and_resolve_static_references(area *a)
    2115 {
    2116   natural
    2117     nstatic = static_dnodes_for_area(a),
    2118     nstatic_bitmap_words = nstatic >> bitmap_shift;
    2119   if (nstatic != 0) {
    2120     /* exploit the fact that a cons is the same size as a dnode. */
    2121     cons *pagelet_start = (cons *) a->low, *work;
    2122     bitvector markbits = GCmarkbits,
    2123       usedbits = tenured_area->static_used;
    2124     natural marked, used, used_but_not_marked, ndeleted = 0, i;
    2125 
    2126     while (nstatic_bitmap_words--) {
    2127       marked = *markbits++;
    2128       used = *usedbits;
    2129       if (marked != used) {
    2130         *usedbits = marked;
    2131       }
    2132       used |= marked;
    2133       used_but_not_marked = used & ~marked;
    2134 
    2135       while (marked) {
    2136         i = count_leading_zeros(marked);
    2137         marked &= ~(BIT0_MASK >> i);
    2138         work = pagelet_start+i;
    2139         update_noderef(&work->cdr);
    2140         update_noderef(&work->car);
    2141       }
    2142 
    2143       while (used_but_not_marked) {
    2144         i = count_leading_zeros(used_but_not_marked);
    2145         used_but_not_marked &= ~(BIT0_MASK >> i);
    2146         work = pagelet_start+i;
    2147         if ((work->cdr != undefined) &&
    2148             (work->cdr != slot_unbound)) {
    2149           work->car = slot_unbound;
    2150           work->cdr = slot_unbound;
    2151           ndeleted++;
    2152         }
    2153       }
    2154       usedbits++;
    2155       pagelet_start += nbits_in_word;
    2156     }
    2157     lisp_global(DELETED_STATIC_PAIRS) += box_fixnum(ndeleted);
    2158   }
    2159 }
     2114
    21602115
    21612116
     
    23232278}
    23242279
     2280void
     2281reclaim_static_dnodes()
     2282{
     2283  natural nstatic = tenured_area->static_dnodes, i, bits, mask, bitnum;
     2284  cons *c = (cons *)tenured_area->low, *d;
     2285  bitvector bitsp = GCmarkbits;
     2286  LispObj head = lisp_global(STATIC_CONSES);
     2287
     2288  if (nstatic) {
     2289    if (head) {
     2290      for (i = 0; i < nstatic; i+= nbits_in_word, c+= nbits_in_word) {
     2291        bits = *bitsp++;
     2292        if (bits != ALL_ONES) {
     2293          for (bitnum = 0; bitnum < nbits_in_word; bitnum++) {
     2294            if (! (bits & (BIT0_MASK>>bitnum))) {
     2295              d = c + bitnum;
     2296              d->car = 0;
     2297              d->cdr = head;
     2298              head = ((LispObj)d)+fulltag_cons;
     2299            }
     2300          }
     2301        }
     2302      }
     2303      lisp_global(STATIC_CONSES) = head;
     2304    } else {
     2305      for (i = 0; i < nstatic; i+= nbits_in_word, c+= nbits_in_word) {
     2306        bits = *bitsp++;
     2307        if (bits != ALL_ONES) {
     2308          for (bitnum = 0; bitnum < nbits_in_word; bitnum++) {
     2309            if (! (bits & (BIT0_MASK>>bitnum))) {
     2310              d = c + bitnum;
     2311              d->car = 0;
     2312              d->cdr = 0;
     2313            }
     2314          }
     2315        }
     2316      }
     2317    }
     2318  }
     2319}
    23252320
    23262321Boolean
     
    25972592    GCfirstunmarked = calculate_relocation();
    25982593
    2599     forward_range((LispObj *) ptr_from_lispobj(GCareadynamiclow), (LispObj *) ptr_from_lispobj(GCfirstunmarked));
     2594    if (!GCephemeral_low) {
     2595      reclaim_static_dnodes();
     2596    }
     2597
     2598    forward_range((LispObj *) ptr_from_lispobj(GCarealow), (LispObj *) ptr_from_lispobj(GCfirstunmarked));
    26002599
    26012600    other_tcr = tcr;
     
    26432642    if (GCephemeral_low) {
    26442643      forward_memoized_area(tenured_area, area_dnode(a->low, tenured_area->low));
    2645     } else {
    2646       /* Full GC, need to process static space */
    2647       forward_and_resolve_static_references(a);
    26482644    }
    26492645 
     
    34893485  return -1;
    34903486}
    3491 
    3492 
    3493 void
    3494 adjust_locref(LispObj *loc, LispObj base, LispObj limit, signed_natural delta)
    3495 {
    3496   LispObj p = *loc;
    3497  
    3498   if (area_dnode(p, base) < limit) {
    3499     *loc = p+delta;
    3500   }
    3501 }
    3502 
    3503 /* like adjust_locref() above, but only changes the contents of LOC if it's
    3504    a tagged lisp pointer */
    3505 void
    3506 adjust_noderef(LispObj *loc, LispObj base, LispObj limit, signed_natural delta)
    3507 {
    3508   LispObj p = *loc;
    3509   int tag_n = fulltag_of(p);
    3510 
    3511   if (is_node_fulltag(tag_n)) {
    3512     if (area_dnode(p, base) < limit) {
    3513       *loc = p+delta;
    3514     }
    3515   }
    3516 }
    3517 
    3518 /*
    3519    If *loc is a tagged pointer into the address range denoted by BASE and LIMIT,
    3520    nuke it (set it to NIL.)
    3521 */
    3522 void
    3523 nuke_noderef(LispObj *loc, LispObj base, LispObj limit)
    3524 {
    3525   LispObj p = *loc;
    3526   int tag_n = fulltag_of(p);
    3527 
    3528   if (is_node_fulltag(tag_n)) {
    3529     if (area_dnode(p, base) < limit) {
    3530       *loc = lisp_nil;
    3531     }
    3532   }
    3533 }
    3534 
    3535 
    3536 void
    3537 adjust_pointers_in_xp(ExceptionInformation *xp,
    3538                       LispObj base,
    3539                       LispObj limit,
    3540                       signed_natural delta)
    3541 {
    3542   natural *regs = (natural *) xpGPRvector(xp);
    3543 
    3544   adjust_noderef((LispObj *) (&(regs[Iarg_z])),base,limit,delta);
    3545   adjust_noderef((LispObj *) (&(regs[Iarg_y])),base,limit,delta);
    3546   adjust_noderef((LispObj *) (&(regs[Iarg_x])),base,limit,delta);
    3547   adjust_noderef((LispObj *) (&(regs[Isave3])),base,limit,delta);
    3548   adjust_noderef((LispObj *) (&(regs[Isave2])),base,limit,delta);
    3549   adjust_noderef((LispObj *) (&(regs[Isave1])),base,limit,delta);
    3550   adjust_noderef((LispObj *) (&(regs[Isave0])),base,limit,delta);
    3551   adjust_noderef((LispObj *) (&(regs[Ifn])),base,limit,delta);
    3552   adjust_noderef((LispObj *) (&(regs[Itemp0])),base,limit,delta);
    3553   adjust_noderef((LispObj *) (&(regs[Itemp1])),base,limit,delta);
    3554   adjust_noderef((LispObj *) (&(regs[Itemp2])),base,limit,delta);
    3555   adjust_locref((LispObj *) (&(xpPC(xp))),base,limit,delta);
    3556 }
    3557 
    3558 void
    3559 nuke_pointers_in_xp(ExceptionInformation *xp,
    3560                       LispObj base,
    3561                       LispObj limit)
    3562 {
    3563   natural *regs = (natural *) xpGPRvector(xp);
    3564 
    3565   nuke_noderef((LispObj *) (&(regs[Iarg_z])),base,limit);
    3566   nuke_noderef((LispObj *) (&(regs[Iarg_y])),base,limit);
    3567   nuke_noderef((LispObj *) (&(regs[Iarg_x])),base,limit);
    3568   nuke_noderef((LispObj *) (&(regs[Isave3])),base,limit);
    3569   nuke_noderef((LispObj *) (&(regs[Isave2])),base,limit);
    3570   nuke_noderef((LispObj *) (&(regs[Isave1])),base,limit);
    3571   nuke_noderef((LispObj *) (&(regs[Isave0])),base,limit);
    3572   nuke_noderef((LispObj *) (&(regs[Ifn])),base,limit);
    3573   nuke_noderef((LispObj *) (&(regs[Itemp0])),base,limit);
    3574   nuke_noderef((LispObj *) (&(regs[Itemp1])),base,limit);
    3575   nuke_noderef((LispObj *) (&(regs[Itemp2])),base,limit);
    3576 
    3577 }
    3578 
    3579 void
    3580 adjust_pointers_in_headerless_range(LispObj *range_start,
    3581                                     LispObj *range_end,
    3582                                     LispObj base,
    3583                                     LispObj limit,
    3584                                     signed_natural delta)
    3585 {
    3586   LispObj *p = range_start;
    3587 
    3588   while (p < range_end) {
    3589     adjust_noderef(p, base, limit, delta);
    3590     p++;
    3591   }
    3592 }
    3593 
    3594 
    3595 void
    3596 adjust_pointers_in_range(LispObj *range_start,
    3597                          LispObj *range_end,
    3598                          LispObj base,
    3599                          LispObj limit,
    3600                          signed_natural delta)
    3601 {
    3602   LispObj *p = range_start, node, new;
    3603   int tag_n;
    3604   natural nwords;
    3605   hash_table_vector_header *hashp;
    3606 
    3607   while (p < range_end) {
    3608     node = *p;
    3609     tag_n = fulltag_of(node);
    3610     if (immheader_tag_p(tag_n)) {
    3611       p = (LispObj *) skip_over_ivector((natural) p, node);
    3612     } else if (nodeheader_tag_p(tag_n)) {
    3613       nwords = header_element_count(node);
    3614       nwords += (1 - (nwords&1));
    3615       if ((header_subtag(node) == subtag_hash_vector) &&
    3616           ((((hash_table_vector_header *)p)->flags) & nhash_track_keys_mask)) {
    3617         hashp = (hash_table_vector_header *) p;
    3618         hashp->flags |= nhash_key_moved_mask;
    3619       } else if (header_subtag(node) == subtag_function) {
    3620         int skip = (int)(p[1]);
    3621         p += skip;
    3622         nwords -= skip;
    3623       }
    3624       p++;
    3625       while (nwords--) {
    3626         adjust_noderef(p, base, limit, delta);
    3627         p++;
    3628       }
    3629     } else {
    3630       /* just a cons */
    3631       adjust_noderef(p, base, limit, delta);
    3632       p++;
    3633       adjust_noderef(p, base, limit, delta);
    3634       p++;
    3635     }
    3636   }
    3637 }
    3638 
    3639 void
    3640 nuke_pointers_in_headerless_range(LispObj *range_start,
    3641                                   LispObj *range_end,
    3642                                   LispObj base,
    3643                                   LispObj limit)
    3644 {
    3645   LispObj *p = range_start;
    3646 
    3647   while (p < range_end) {
    3648     nuke_noderef(p, base, limit);
    3649     p++;
    3650   }
    3651 }
    3652 
    3653 
    3654 void
    3655 nuke_pointers_in_range(LispObj *range_start,
    3656                          LispObj *range_end,
    3657                          LispObj base,
    3658                          LispObj limit)
    3659 {
    3660   LispObj *p = range_start, node, new;
    3661   int tag_n;
    3662   natural nwords;
    3663 
    3664   while (p < range_end) {
    3665     node = *p;
    3666     tag_n = fulltag_of(node);
    3667     if (immheader_tag_p(tag_n)) {
    3668       p = (LispObj *) skip_over_ivector((natural) p, node);
    3669     } else if (nodeheader_tag_p(tag_n)) {
    3670       nwords = header_element_count(node);
    3671       nwords += (1 - (nwords&1));
    3672       if (header_subtag(node) == subtag_function) {
    3673         int skip = (int)(p[1]);
    3674         p += skip;
    3675         nwords -= skip;
    3676       }
    3677       p++;
    3678       while (nwords--) {
    3679         nuke_noderef(p, base, limit);
    3680         p++;
    3681       }
    3682     } else {
    3683       /* just a cons */
    3684       nuke_noderef(p, base, limit);
    3685       p++;
    3686       nuke_noderef(p, base, limit);
    3687       p++;
    3688     }
    3689   }
    3690 }
    3691 
    3692 void
    3693 adjust_pointers_in_tstack_area(area *a,
    3694                                LispObj base,
    3695                                LispObj limit,
    3696                                LispObj delta)
    3697 {
    3698   LispObj
    3699     *current,
    3700     *next,
    3701     *start = (LispObj *) a->active,
    3702     *end = start,
    3703     *area_limit = (LispObj *) (a->high);
    3704 
    3705   for (current = start;
    3706        end != area_limit;
    3707        current = next) {
    3708     next = ptr_from_lispobj(*current);
    3709     end = ((next >= start) && (next < area_limit)) ? next : area_limit;
    3710     adjust_pointers_in_range(current+2, end, base, limit, delta);
    3711   }
    3712 }
    3713 
    3714 void
    3715 nuke_pointers_in_tstack_area(area *a,
    3716                              LispObj base,
    3717                              LispObj limit)
    3718 {
    3719   LispObj
    3720     *current,
    3721     *next,
    3722     *start = (LispObj *) a->active,
    3723     *end = start,
    3724     *area_limit = (LispObj *) (a->high);
    3725 
    3726   for (current = start;
    3727        end != area_limit;
    3728        current = next) {
    3729     next = ptr_from_lispobj(*current);
    3730     end = ((next >= start) && (next < area_limit)) ? next : area_limit;
    3731     if (current[1] == 0) {
    3732       nuke_pointers_in_range(current+2, end, base, limit);
    3733     }
    3734   }
    3735 }
    3736 
    3737 void
    3738 adjust_pointers_in_vstack_area(area *a,
    3739                                LispObj base,
    3740                                LispObj limit,
    3741                                LispObj delta)
    3742 {
    3743   LispObj
    3744     *p = (LispObj *) a->active,
    3745     *q = (LispObj *) a->high;
    3746 
    3747   adjust_pointers_in_headerless_range(p, q, base, limit, delta);
    3748 }
    3749 
    3750 void
    3751 nuke_pointers_in_vstack_area(area *a,
    3752                              LispObj base,
    3753                              LispObj limit)
    3754 {
    3755   LispObj
    3756     *p = (LispObj *) a->active,
    3757     *q = (LispObj *) a->high;
    3758 
    3759   nuke_pointers_in_headerless_range(p, q, base, limit);
    3760 }
    3761 
    3762 #ifdef PPC
    3763 void
    3764 adjust_pointers_in_cstack_area(area *a,
    3765                                LispObj base,
    3766                                LispObj limit,
    3767                                LispObj delta)
    3768 {
    3769   BytePtr
    3770     current,
    3771     next,
    3772     area_limit = a->high,
    3773     low = a->low;
    3774 
    3775   for (current = a->active; (current >= low) && (current < area_limit); current = next) {
    3776     next = *((BytePtr *)current);
    3777     if (next == NULL) break;
    3778     if (((next - current) == sizeof(lisp_frame)) &&
    3779         (((((lisp_frame *)current)->savefn) == 0) ||
    3780          (fulltag_of(((lisp_frame *)current)->savefn) == fulltag_misc))) {
    3781       adjust_noderef(&((lisp_frame *) current)->savefn, base, limit, delta);
    3782       adjust_locref(&((lisp_frame *) current)->savelr, base, limit, delta);
    3783     }
    3784   }
    3785 }
    3786 #endif
    3787 
    3788 
    3789 
    3790 void
    3791 adjust_pointers_in_tcrs(TCR *current, LispObj base, LispObj limit, signed_natural delta)
    3792 {
    3793   TCR *tcr = current;
    3794   xframe_list *xframes;
    3795   LispObj *tlb_start, *tlb_end;
    3796   ExceptionInformation *xp;
    3797 
    3798   do {
    3799     xp = tcr->gc_context;
    3800     if (xp) {
    3801       adjust_pointers_in_xp(xp, base, limit, delta);
    3802     }
    3803     for (xframes = (xframe_list *) tcr->xframe;
    3804          xframes;
    3805          xframes = xframes->prev) {
    3806       adjust_pointers_in_xp(xframes->curr, base, limit, delta);
    3807     }
    3808     adjust_pointers_in_range(tcr->tlb_pointer,
    3809                              (LispObj *) ((BytePtr)tcr->tlb_pointer+tcr->tlb_limit),
    3810                              base,
    3811                              limit,
    3812                              delta);
    3813     tcr = tcr->next;
    3814   } while (tcr != current);
    3815 }
    3816 
    3817 void
    3818 nuke_pointers_in_tcrs(TCR *current, LispObj base, LispObj limit)
    3819 {
    3820   TCR *tcr = current;
    3821   xframe_list *xframes;
    3822   LispObj *tlb_start, *tlb_end;
    3823   ExceptionInformation *xp;
    3824 
    3825   do {
    3826     xp = tcr->gc_context;
    3827     if (xp) {
    3828       nuke_pointers_in_xp(xp, base, limit);
    3829     }
    3830     for (xframes = (xframe_list *) tcr->xframe;
    3831          xframes;
    3832          xframes = xframes->prev) {
    3833       nuke_pointers_in_xp(xframes->curr, base, limit);
    3834     }
    3835     nuke_pointers_in_range(tcr->tlb_pointer,
    3836                            (LispObj *) ((BytePtr)tcr->tlb_pointer+tcr->tlb_limit),
    3837                            base,
    3838                            limit);
    3839     tcr = tcr->next;
    3840   } while (tcr != current);
    3841 }
    3842 
    3843 void
    3844 adjust_gcable_ptrs(LispObj base, LispObj limit, signed_natural delta)
    3845 {
    3846   /* These need to be special-cased, because xmacptrs are immediate
    3847      objects that contain (in their "link" fields") tagged pointers
    3848      to other xmacptrs */
    3849   LispObj *prev = &(lisp_global(GCABLE_POINTERS)), next;
    3850 
    3851   while ((next = *prev) != (LispObj)NULL) {
    3852     adjust_noderef(prev, base, limit, delta);
    3853     if (delta < 0) {
    3854       /* Assume that we've already moved things */
    3855       next = *prev;
    3856     }
    3857     prev = &(((xmacptr *)ptr_from_lispobj(untag(next)))->link);
    3858   }
    3859 }
    3860    
    3861 
    3862 void
    3863 adjust_pointers_in_dynamic_area(area *a,
    3864                                 LispObj base,
    3865                                 LispObj limit,
    3866                                 signed_natural delta)
    3867 {
    3868   natural
    3869     nstatic = static_dnodes_for_area(a),
    3870     nstatic_bitmap_words = nstatic >> bitmap_shift;
    3871   LispObj
    3872     *low = (LispObj *) (a->low),
    3873     *active = (LispObj *) (a->active),
    3874     *dynamic_low = low + (2 * nstatic);
    3875 
    3876   adjust_pointers_in_range(dynamic_low, active, base, limit, delta);
    3877 
    3878   if (nstatic && (nstatic <= a->ndnodes)) {
    3879     cons *pagelet_start = (cons *) a->low, *work;
    3880     bitvector usedbits = tenured_area->static_used;
    3881     natural used, i;
    3882    
    3883     while (nstatic_bitmap_words--) {
    3884       used = *usedbits++;
    3885 
    3886       while (used) {
    3887         i = count_leading_zeros(used);
    3888         used &= ~(BIT0_MASK >> i);
    3889         work = pagelet_start+i;
    3890         adjust_noderef(&(work->cdr), base, limit, delta);
    3891         adjust_noderef(&(work->car), base, limit, delta);
    3892       }
    3893       pagelet_start += nbits_in_word;
    3894     }
    3895   }
    3896 }
    3897 
    3898 void
    3899 nuke_pointers_in_dynamic_area(area *a,
    3900                               LispObj base,
    3901                               LispObj limit)
    3902 {
    3903   natural
    3904     nstatic = static_dnodes_for_area(a),
    3905     nstatic_bitmap_words = nstatic >> bitmap_shift;
    3906   LispObj
    3907     *low = (LispObj *) (a->low),
    3908     *active = (LispObj *) (a->active),
    3909     *dynamic_low = low + (2 * nstatic);
    3910 
    3911   nuke_pointers_in_range(dynamic_low, active, base, limit);
    3912 
    3913   if (nstatic && (nstatic <= a->ndnodes)) {
    3914     cons *pagelet_start = (cons *) a->low, *work;
    3915     bitvector usedbits = tenured_area->static_used;
    3916     natural used, i;
    3917    
    3918     while (nstatic_bitmap_words--) {
    3919       used = *usedbits++;
    3920 
    3921       while (used) {
    3922         i = count_leading_zeros(used);
    3923         used &= ~(BIT0_MASK >> i);
    3924         work = pagelet_start+i;
    3925         nuke_noderef(&(work->cdr), base, limit);
    3926         nuke_noderef(&(work->car), base, limit);
    3927       }
    3928       pagelet_start += nbits_in_word;
    3929     }
    3930   }
    3931 }
    3932 
    3933    
    3934 void
    3935 adjust_all_pointers(LispObj base, LispObj limit, signed_natural delta)
    3936 {
    3937   area *next_area;
    3938   area_code code;
    3939 
    3940   for (next_area = active_dynamic_area;
    3941        (code = next_area->code) != AREA_VOID;
    3942        next_area = next_area->succ) {
    3943     switch (code) {
    3944     case AREA_TSTACK:
    3945       adjust_pointers_in_tstack_area(next_area, base, limit, delta);
    3946       break;
    3947      
    3948     case AREA_VSTACK:
    3949       adjust_pointers_in_vstack_area(next_area, base, limit, delta);
    3950       break;
    3951 
    3952     case AREA_CSTACK:
    3953 #ifndef X86
    3954       adjust_pointers_in_cstack_area(next_area, base, limit, delta);
    3955 #endif
    3956       break;
    3957 
    3958     case AREA_STATIC:
    3959     case AREA_MANAGED_STATIC:
    3960       adjust_pointers_in_range((LispObj *) (next_area->low),
    3961                                (LispObj *) (next_area->active),
    3962                                base,
    3963                                limit,
    3964                                delta);
    3965       break;
    3966 
    3967     case AREA_DYNAMIC:
    3968       adjust_pointers_in_dynamic_area(next_area, base, limit, delta);
    3969       break;
    3970     }
    3971   }
    3972   adjust_pointers_in_tcrs(get_tcr(false), base, limit, delta);
    3973   adjust_gcable_ptrs(base, limit, delta);
    3974 }
    3975 
    3976 void
    3977 nuke_all_pointers(LispObj base, LispObj limit)
    3978 {
    3979   area *next_area;
    3980   area_code code;
    3981 
    3982   for (next_area = active_dynamic_area;
    3983        (code = next_area->code) != AREA_VOID;
    3984        next_area = next_area->succ) {
    3985     switch (code) {
    3986     case AREA_TSTACK:
    3987       nuke_pointers_in_tstack_area(next_area, base, limit);
    3988       break;
    3989      
    3990     case AREA_VSTACK:
    3991       nuke_pointers_in_vstack_area(next_area, base, limit);
    3992       break;
    3993 
    3994     case AREA_CSTACK:
    3995       /* There aren't any "nukable" pointers in a cstack area */
    3996       break;
    3997 
    3998     case AREA_STATIC:
    3999     case AREA_MANAGED_STATIC:
    4000       nuke_pointers_in_range((LispObj *) (next_area->low),
    4001                                (LispObj *) (next_area->active),
    4002                                base,
    4003                                limit);
    4004       break;
    4005 
    4006     case AREA_DYNAMIC:
    4007       nuke_pointers_in_dynamic_area(next_area, base, limit);
    4008       break;
    4009     }
    4010   }
    4011   nuke_pointers_in_tcrs(get_tcr(false), base, limit);
    4012 }
    4013 
    4014 #ifndef MREMAP_MAYMOVE
    4015 #define MREMAP_MAYMOVE 1
    4016 #endif
    4017 
    4018 #if defined(FREEBSD) || defined(SOLARIS)
    4019 void *
    4020 freebsd_mremap(void *old_address,
    4021                size_t old_size,
    4022                size_t new_size,
    4023                unsigned long flags)
    4024 {
    4025   return old_address;
    4026 }
    4027 #define mremap freebsd_mremap
    4028 
    4029 #endif
    4030 
    4031 #ifdef DARWIN
    4032 void *
    4033 darwin_mremap(void *old_address,
    4034               size_t old_size,
    4035               size_t new_size,
    4036               unsigned long flags)
    4037 {
    4038   void *end = (void *) ((char *)old_address+old_size);
    4039 
    4040   if (old_size == new_size) {
    4041     return old_address;
    4042   }
    4043   if (new_size < old_size) {
    4044     munmap(end, old_size-new_size);
    4045     return old_address;
    4046   }
    4047   {
    4048     void * new_address = mmap(NULL,
    4049                               new_size,
    4050                               PROT_READ|PROT_WRITE,
    4051                               MAP_PRIVATE | MAP_ANON,
    4052                               -1,
    4053                               0);
    4054     if (new_address !=  MAP_FAILED) {
    4055       vm_copy(mach_task_self(),
    4056               (vm_address_t)old_address,
    4057               old_size,
    4058               (vm_address_t)new_address);
    4059       munmap(old_address, old_size);
    4060     }
    4061     return new_address;
    4062   }
    4063 }
    4064 
    4065 #define mremap darwin_mremap
    4066 #endif
    4067 
    4068 Boolean
    4069 resize_used_bitvector(natural new_dnodes, bitvector *newbits)
    4070 {
    4071   natural
    4072     old_dnodes = tenured_area->static_dnodes,
    4073     old_page_aligned_size =
    4074     (align_to_power_of_2((align_to_power_of_2(old_dnodes, log2_nbits_in_word)>>3),
    4075                          log2_page_size)),
    4076     new_page_aligned_size =
    4077     (align_to_power_of_2((align_to_power_of_2(new_dnodes, log2_nbits_in_word)>>3),
    4078                          log2_page_size));
    4079   bitvector old_used = tenured_area->static_used, new_used = NULL;
    4080 
    4081   if (old_page_aligned_size == new_page_aligned_size) {
    4082     *newbits = old_used;
    4083     return true;
    4084   }
    4085 
    4086   if (old_used == NULL) {
    4087     new_used = (bitvector)mmap(NULL,
    4088                                new_page_aligned_size,
    4089                                PROT_READ|PROT_WRITE,
    4090                                MAP_PRIVATE | MAP_ANON,
    4091                                -1,
    4092                                0);
    4093     if (new_used == MAP_FAILED) {
    4094       *newbits = NULL;
    4095       return false;
    4096     } else {
    4097       *newbits = new_used;
    4098       return true;
    4099     }
    4100   }
    4101   if (new_page_aligned_size == 0) {
    4102     munmap((void *)old_used, old_page_aligned_size);
    4103     *newbits = NULL;
    4104     return true;
    4105   }
    4106    
    4107   /* Have to try to remap the old bitmap.  That's implementation-dependent,
    4108      and (naturally) Mach sucks, but no one understands how.
    4109   */
    4110   new_used = mremap(old_used,
    4111                     old_page_aligned_size,
    4112                     new_page_aligned_size,
    4113                     MREMAP_MAYMOVE);
    4114   if (new_used == MAP_FAILED) {
    4115     *newbits = NULL;
    4116     return false;
    4117   }
    4118   *newbits = new_used;
    4119   return true;
    4120 }
    4121 
    4122  
    4123 int
    4124 grow_hons_area(signed_natural delta_in_bytes)
    4125 {
    4126   bitvector new_used;
    4127   area *ada = active_dynamic_area;
    4128   natural
    4129     delta_in_dnodes = delta_in_bytes >> dnode_shift,
    4130     current_static_dnodes = tenured_area->static_dnodes,
    4131     new_static_dnodes;
    4132    
    4133   delta_in_dnodes = align_to_power_of_2(delta_in_dnodes,log2_nbits_in_word);
    4134   new_static_dnodes = current_static_dnodes+delta_in_dnodes;
    4135   delta_in_bytes = delta_in_dnodes << dnode_shift;
    4136   if (grow_dynamic_area((natural) delta_in_bytes)) {
    4137     LispObj
    4138       base = (LispObj) (ada->low + (current_static_dnodes*dnode_size)),
    4139       oldactive = (LispObj) ada->active,
    4140       limit = area_dnode(oldactive, base);
    4141     if (!resize_used_bitvector(new_static_dnodes, &new_used)) {
    4142       shrink_dynamic_area(delta_in_bytes);
    4143       return -1;
    4144     }
    4145     tenured_area->static_used = new_used;
    4146     adjust_all_pointers(base, limit, delta_in_bytes);
    4147     memmove((void *)(base+delta_in_bytes),(void *)base,oldactive-base);
    4148     ada->ndnodes = area_dnode(ada->high, ada->low);
    4149     ada->active += delta_in_bytes;
    4150     {
    4151       LispObj *p;
    4152       natural i;
    4153       for (p = (LispObj *)(tenured_area->low + (current_static_dnodes << dnode_shift)), i = 0;
    4154            i< delta_in_dnodes;
    4155            i++ ) {
    4156         *p++ = undefined;
    4157         *p++ = undefined;
    4158       }
    4159       tenured_area->static_dnodes += delta_in_dnodes;
    4160          
    4161     }
    4162     return 0;
    4163   }
    4164   return -1;
    4165 }
    4166 
    4167 int
    4168 shrink_hons_area(signed_natural delta_in_bytes)
    4169 {
    4170   area *ada = active_dynamic_area;
    4171   signed_natural
    4172     delta_in_dnodes = delta_in_bytes >> dnode_shift;
    4173   natural
    4174     current_static_dnodes = tenured_area->static_dnodes,
    4175     new_static_dnodes;
    4176   LispObj base, limit, oldactive;
    4177   bitvector newbits;
    4178 
    4179    
    4180   delta_in_dnodes = -align_to_power_of_2(-delta_in_dnodes,log2_nbits_in_word);
    4181   new_static_dnodes = current_static_dnodes+delta_in_dnodes;
    4182   delta_in_bytes = delta_in_dnodes << dnode_shift;
    4183   oldactive = (LispObj) (ada->active);
    4184 
    4185   resize_used_bitvector(new_static_dnodes, &newbits);
    4186   tenured_area->static_used = newbits; /* redundant */
    4187 
    4188   memmove(ada->low+(new_static_dnodes << dnode_shift),
    4189           ada->low+(current_static_dnodes << dnode_shift),
    4190           oldactive-(natural)(ada->low+(current_static_dnodes << dnode_shift)));
    4191   tenured_area->static_dnodes = new_static_dnodes;
    4192   ada->active -= -delta_in_bytes; /* delta_in_bytes is negative */
    4193   shrink_dynamic_area(-delta_in_bytes);
    4194 
    4195   base = (LispObj) (tenured_area->low +
    4196                     (new_static_dnodes << dnode_shift));
    4197   limit = area_dnode(tenured_area->low +
    4198                      (current_static_dnodes << dnode_shift), base);
    4199   nuke_all_pointers(base, limit);
    4200 
    4201   base = (LispObj) (tenured_area->low +
    4202                     (current_static_dnodes << dnode_shift));
    4203   limit = area_dnode(oldactive, base);
    4204   adjust_all_pointers(base, limit, delta_in_bytes);
    4205 
    4206   xMakeDataExecutable(tenured_area->low+(tenured_area->static_dnodes<<dnode_shift),
    4207                       ada->active-(tenured_area->low+(tenured_area->static_dnodes<<dnode_shift)));
    4208   return 0;
    4209 }
    4210 
    4211 int
    4212 change_hons_area_size(TCR *tcr, signed_natural delta_in_bytes)
    4213 {
    4214   if (delta_in_bytes > 0) {
    4215     return grow_hons_area(delta_in_bytes);
    4216   }
    4217   if (delta_in_bytes < 0) {
    4218     return shrink_hons_area(delta_in_bytes);
    4219   }
    4220   return 0;
    4221 }
    4222 
  • branches/working-0710/ccl/lisp-kernel/x86-spentry64.s

    r7289 r7418  
    24672467        __(jmp *%ra0)
    24682468/* Discard everything that's been pushed already, complain   */
     24698:      __(lea (%rsp,%imm0),%rsp)
     2470        __(movq %arg_y,%arg_z)  /* recover original   */
     2471        __(movq $XTMINPS,%arg_y)
     2472        __(set_nargs(2))
     2473        __(push %ra0)
     2474        __(jmp _SPksignalerr)
     2475/* Discard everything that's been pushed already, complain   */
    246924769:      __(lea (%rsp,%imm0),%rsp)
    24702477        __(movq %arg_y,%arg_z)  /* recover original   */
     
    34633470       
    34643471_spentry(unbind_interrupt_level)
     3472        __(btq $TCR_FLAG_BIT_PENDING_SUSPEND,%rcontext:tcr.flags)
    34653473        __(movq %rcontext:tcr.db_link,%imm1)
    34663474        __(movq %rcontext:tcr.tlb_pointer,%arg_x)
    34673475        __(movq INTERRUPT_LEVEL_BINDING_INDEX(%arg_x),%imm0)
    3468         __(testq %imm0,%imm0)
     3476        __(jc 5f)
     34770:      __(testq %imm0,%imm0)
    34693478        __(movq binding.val(%imm1),%temp0)
    34703479        __(movq binding.link(%imm1),%imm1)
    34713480        __(movq %temp0,INTERRUPT_LEVEL_BINDING_INDEX(%arg_x))
    34723481        __(movq %imm1,%rcontext:tcr.db_link)
    3473         __(js,pn 1f)
    3474 0:      __(repret)
    3475 1:      __(testq %temp0,%temp0)
    3476         __(js 0b)
    3477         __(check_pending_enabled_interrupt(2f))
    3478 2:      __(repret)     
     3482        __(js,pn 3f)
     34832:      __(repret)
     34843:      __(testq %temp0,%temp0)
     3485        __(js 2b)
     3486        __(check_pending_enabled_interrupt(4f))
     34874:      __(repret)
     34885:       /* Missed a suspend request; force suspend now if we're restoring
     3489          interrupt level to -1 or greater */
     3490        __(cmpq $-2<<fixnumshift,%imm0)
     3491        __(jne 0b)
     3492        __(movq binding.val(%imm1),%temp0)
     3493        __(cmpq %imm0,%temp0)
     3494        __(je 0b)
     3495        __(movq $-1<<fixnumshift,INTERRUPT_LEVEL_BINDING_INDEX(%arg_x))
     3496        __(suspend_now())
     3497        __(jmp 0b)
    34793498_endsubp(unbind_interrupt_level)
    34803499
  • branches/working-0710/ccl/lisp-kernel/x86-uuo.s

    r5458 r7418  
    6363])             
    6464
     65define([suspend_now],[
     66        xuuo(3)
     67])             
     68
    6569define([uuo_error_reg_not_fixnum],[
    6670        int [$]0xf0|$1
Note: See TracChangeset for help on using the changeset viewer.