Changeset 7203


Ignore:
Timestamp:
Sep 11, 2007, 12:03:23 PM (12 years ago)
Author:
gb
Message:

New rwlock stuff. Note: this probably isn't needed from C, and there
may be problems with most of the C implementation here. Nuke all but
creation/deletion of rwlocks when things are bootstrapped.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • branches/working-0709/ccl/lisp-kernel/thread_manager.c

    r7025 r7203  
    117117  }
    118118  while (1) {
    119     get_spin_lock(&(m->spinlock),tcr);
     119    LOCK_SPINLOCK(m->spinlock,tcr);
    120120    ++m->avail;
    121121    if (m->avail == 1) {
    122122      m->owner = tcr;
    123123      m->count = 1;
    124       m->spinlock = 0;
     124      RELEASE_SPINLOCK(m->spinlock);
    125125      break;
    126126    }
    127     m->spinlock = 0;
     127    RELEASE_SPINLOCK(m->spinlock);
    128128    SEM_WAIT_FOREVER(m->signal);
    129129  }
     
    144144    --m->count;
    145145    if (m->count == 0) {
    146       get_spin_lock(&(m->spinlock),tcr);
     146      LOCK_SPINLOCK(m->spinlock,tcr);
    147147      m->owner = NULL;
    148148      pending = m->avail-1 + m->waiting;     /* Don't count us */
     
    154154        m->waiting = 0;
    155155      }
    156       m->spinlock = 0;
     156      RELEASE_SPINLOCK(m->spinlock);
    157157      if (pending >= 0) {
    158158        SEM_RAISE(m->signal);
     
    182182  TCR *owner = m->owner;
    183183
     184  LOCK_SPINLOCK(m->spinlock,tcr);
    184185  if (owner == tcr) {
    185186    m->count++;
    186187    if (was_free) {
    187188      *was_free = 0;
     189      RELEASE_SPINLOCK(m->spinlock);
    188190      return 0;
    189191    }
     
    195197      *was_free = 1;
    196198    }
     199    RELEASE_SPINLOCK(m->spinlock);
    197200    return 0;
    198201  }
    199202
     203  RELEASE_SPINLOCK(m->spinlock);
    200204  return EBUSY;
    201205}
     
    221225
    222226int
    223 wait_on_semaphore(SEMAPHORE s, int seconds, int millis)
     227wait_on_semaphore(void *s, int seconds, int millis)
    224228{
    225229  int nanos = (millis % 1000) * 1000000;
    226 #if defined(LINUX) || defined(FREEBSD)
     230#ifdef USE_POSIX_SEMAPHORES
    227231  int status;
    228232
    229233  struct timespec q;
    230234  gettimeofday((struct timeval *)&q, NULL);
    231   q.tv_nsec *= 1000L;
     235  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
    232236   
    233237  q.tv_nsec += nanos;
     
    258262}
    259263
     264
     265int
     266semaphore_maybe_timedwait(void *s, struct timespec *t)
     267{
     268  if (t) {
     269    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
     270  }
     271  SEM_WAIT_FOREVER(s);
     272  return 0;
     273}
    260274
    261275void
     
    13391353
    13401354
    1341 /*
    1342   Try to take an rwquentry off of the rwlock's freelist; failing that,
    1343   malloc one.  The caller owns the lock on the rwlock itself, of course.
    1344 
    1345 */
    1346 rwquentry *
    1347 recover_rwquentry(rwlock *rw)
    1348 {
    1349   rwquentry *freelist = &(rw->freelist),
    1350     *p = freelist->next,
    1351     *follow = p->next;
    1352 
    1353   if (p == freelist) {
    1354     p = NULL;
    1355   } else {
    1356     follow->prev = freelist;
    1357     freelist->next = follow;
    1358     p->prev = p->next = NULL;
    1359     p->tcr = NULL;
    1360     p->count = 0;
    1361   }
    1362   return p;
    1363 }
    1364 
    1365 rwquentry *
    1366 new_rwquentry(rwlock *rw)
    1367 {
    1368   rwquentry *p = recover_rwquentry(rw);
    1369 
    1370   if (p == NULL) {
    1371     p = calloc(1, sizeof(rwquentry));
    1372   }
    1373   return p;
    1374 }
    1375 
    1376 
    1377 void
    1378 free_rwquentry(rwquentry *p, rwlock *rw)
    1379 {
    1380   rwquentry
    1381     *prev = p->prev,
    1382     *next = p->next,
    1383     *freelist = &(rw->freelist),
    1384     *follow = freelist->next;
    1385  
    1386   prev->next = next;
    1387   next->prev = prev;
    1388   p->prev = freelist;
    1389   freelist->next = p;
    1390   follow->prev = p;
    1391   p->next = follow;
    1392   p->prev = freelist;
    1393 }
    1394  
    1395 void
    1396 add_rwquentry(rwquentry *p, rwlock *rw)
    1397 {
    1398   rwquentry
    1399     *head = &(rw->head),
    1400     *follow = head->next;
    1401  
    1402   head->next = p;
    1403   follow->prev = p;
    1404   p->next = follow;
    1405   p->prev = head;
    1406 }
    1407 
    1408 rwquentry *
    1409 find_enqueued_tcr(TCR *target, rwlock *rw)
    1410 {
    1411   rwquentry
    1412     *head = &(rw->head),
    1413     *p = head->next;
    1414 
    1415   do {
    1416     if (p->tcr == target) {
    1417       return p;
    1418     }
    1419     p = p->next;
    1420   } while (p != head);
    1421   return NULL;
    1422 }
    1423    
     1355
    14241356rwlock *
    14251357rwlock_new()
    14261358{
    1427   rwlock *rw = calloc(1, sizeof(rwlock));
    1428  
    1429   if (rw) {
    1430     pthread_mutex_t *lock = calloc(1, sizeof(pthread_mutex_t));
    1431     if (lock == NULL) {
    1432       free (rw);
     1359  extern int cache_block_size;
     1360
     1361  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
     1362  rwlock *rw;
     1363 
     1364  if (p) {
     1365    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
     1366    rw->malloced_ptr = p;
     1367    rw->reader_signal = new_semaphore(0);
     1368    rw->writer_signal = new_semaphore(0);
     1369    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
     1370      if (rw->reader_signal) {
     1371        destroy_semaphore(&(rw->reader_signal));
     1372      } else {
     1373        destroy_semaphore(&(rw->writer_signal));
     1374      }
     1375      free(rw);
    14331376      rw = NULL;
    1434     } else {
    1435       pthread_cond_t *reader_signal = calloc(1, sizeof(pthread_cond_t));
    1436       pthread_cond_t *writer_signal = calloc(1, sizeof(pthread_cond_t));
    1437       if ((reader_signal == NULL) || (writer_signal == NULL)) {
    1438         if (reader_signal) {
    1439           free(reader_signal);
    1440         } else {
    1441           free(writer_signal);
    1442         }
    1443        
    1444         free(lock);
    1445         free(rw);
    1446         rw = NULL;
    1447       } else {
    1448         pthread_mutex_init(lock, NULL);
    1449         pthread_cond_init(reader_signal, NULL);
    1450         pthread_cond_init(writer_signal, NULL);
    1451         rw->lock = lock;
    1452         rw->reader_signal = reader_signal;
    1453         rw->writer_signal = writer_signal;
    1454         rw->head.prev = rw->head.next = &(rw->head);
    1455         rw->freelist.prev = rw->freelist.next = &(rw->freelist);
    1456       }
    14571377    }
    14581378  }
     
    14601380}
    14611381
    1462 /*
    1463   no thread should be waiting on the lock, and the caller has just
    1464   unlocked it.
    1465 */
    1466 static void
    1467 rwlock_delete(rwlock *rw)
    1468 {
    1469   pthread_mutex_t *lock = rw->lock;
    1470   pthread_cond_t *cond;
    1471   rwquentry *entry;
    1472 
    1473   rw->lock = NULL;
    1474   cond = rw->reader_signal;
    1475   rw->reader_signal = NULL;
    1476   pthread_cond_destroy(cond);
    1477   free(cond);
    1478   cond = rw->writer_signal;
    1479   rw->writer_signal = NULL;
    1480   pthread_cond_destroy(cond);
    1481   free(cond);
    1482   while (entry = recover_rwquentry(rw)) {
    1483     free(entry);
    1484   }
    1485   free(rw);
    1486   pthread_mutex_unlock(lock);
    1487   free(lock);
    1488 }
    1489 
    1490 void
    1491 rwlock_rlock_cleanup(void *arg)
    1492 {
    1493   pthread_mutex_unlock((pthread_mutex_t *)arg);
    1494 }
    14951382     
    14961383/*
     
    15051392rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
    15061393{
    1507   pthread_mutex_t *lock = rw->lock;
    1508   rwquentry *entry;
    15091394  int err = 0;
    1510 
    1511 
    1512   pthread_mutex_lock(lock);
    1513 
    1514   if (RWLOCK_WRITER(rw) == tcr) {
    1515     pthread_mutex_unlock(lock);
     1395 
     1396  LOCK_SPINLOCK(rw->spin, tcr);
     1397
     1398  if (rw->writer == tcr) {
     1399    RELEASE_SPINLOCK(rw->spin);
    15161400    return EDEADLK;
    15171401  }
    15181402
    1519   if (rw->state > 0) {
    1520     /* already some readers, we may be one of them */
    1521     entry = find_enqueued_tcr(tcr, rw);
    1522     if (entry) {
    1523       entry->count++;
    1524       rw->state++;
    1525       pthread_mutex_unlock(lock);
    1526       return 0;
    1527     }
    1528   }
    1529   entry = new_rwquentry(rw);
    1530   entry->tcr = tcr;
    1531   entry->count = 1;
    1532 
    1533   pthread_cleanup_push(rwlock_rlock_cleanup,lock);
    1534 
    1535   /* Wait for current and pending writers */
    1536   while ((err == 0) && ((rw->state < 0) || (rw->write_wait_count > 0))) {
    1537     if (waitfor) {
    1538       if (pthread_cond_timedwait(rw->reader_signal, lock, waitfor)) {
    1539         err = errno;
    1540       }
    1541     } else {
    1542       pthread_cond_wait(rw->reader_signal, lock);
    1543     }
    1544   }
    1545  
    1546   if (err == 0) {
    1547     add_rwquentry(entry, rw);
    1548     rw->state++;
    1549   }
    1550 
    1551   pthread_cleanup_pop(1);
     1403  while (rw->blocked_writers || (rw->state > 0)) {
     1404    rw->blocked_readers++;
     1405    RELEASE_SPINLOCK(rw->spin);
     1406    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
     1407    LOCK_SPINLOCK(rw->spin,tcr);
     1408    rw->blocked_readers--;
     1409    if (err == EINTR) {
     1410      err = 0;
     1411    }
     1412    if (err) {
     1413      RELEASE_SPINLOCK(rw->spin);
     1414      return err;
     1415    }
     1416  }
     1417  rw->state--;
     1418  RELEASE_SPINLOCK(rw->spin);
    15521419  return err;
    15531420}
    15541421
    15551422
    1556 /*
    1557    This is here to support cancelation.  Cancelation is evil.
    1558 */
    1559 
    1560 void
    1561 rwlock_wlock_cleanup(void *arg)
    1562 {
    1563   rwlock *rw = (rwlock *)arg;
    1564 
    1565   /* If this thread was the only queued writer and the lock
    1566      is now available for reading, tell any threads that're
    1567      waiting for read access.
    1568      This thread owns the lock on the rwlock itself.
    1569   */
    1570   if ((--(rw->write_wait_count) == 0) &&
    1571       (rw->state >= 0)) {
    1572     pthread_cond_broadcast(rw->reader_signal);
    1573   }
    1574  
    1575   pthread_mutex_unlock(rw->lock);
    1576 }
    15771423
    15781424/*
    15791425  Try to obtain write access to the lock.
    1580   If we already have read access, fail with EDEADLK.
     1426  It is an error if we already have read access, but it's hard to
     1427  detect that.
    15811428  If we already have write access, increment the count that indicates
    15821429  that.
     
    15881435rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
    15891436{
    1590   pthread_mutex_t *lock = rw->lock;
    1591   rwquentry *entry;
    15921437  int err = 0;
    15931438
    1594 
    1595   pthread_mutex_lock(lock);
    1596   if (RWLOCK_WRITER(rw) == tcr) {
    1597     --RWLOCK_WRITE_COUNT(rw);
    1598     --rw->state;
    1599     pthread_mutex_unlock(lock);
     1439  LOCK_SPINLOCK(rw->spin,tcr);
     1440  if (rw->writer == tcr) {
     1441    rw->state++;
     1442    RELEASE_SPINLOCK(rw->spin);
    16001443    return 0;
    16011444  }
    1602  
    1603   if (rw->state > 0) {
    1604     /* already some readers, we may be one of them */
    1605     entry = find_enqueued_tcr(tcr, rw);
    1606     if (entry) {
    1607       pthread_mutex_unlock(lock);
    1608       return EDEADLK;
    1609     }
    1610   }
    1611   rw->write_wait_count++;
    1612   pthread_cleanup_push(rwlock_wlock_cleanup,rw);
    1613 
    1614   while ((err == 0) && (rw->state) != 0) {
    1615     if (waitfor) {
    1616       if (pthread_cond_timedwait(rw->writer_signal, lock, waitfor)) {
    1617         err = errno;
    1618       }
    1619     } else {
    1620       pthread_cond_wait(rw->writer_signal, lock);
    1621     }
    1622   }
    1623   if (err == 0) {
    1624     RWLOCK_WRITER(rw) = tcr;
    1625     RWLOCK_WRITE_COUNT(rw) = -1;
    1626     rw->state = -1;
    1627   }
    1628   pthread_cleanup_pop(1);
     1445
     1446  while (rw->state != 0) {
     1447    rw->blocked_writers++;
     1448    RELEASE_SPINLOCK(rw->spin);
     1449    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
     1450    LOCK_SPINLOCK(rw->spin,tcr);
     1451    rw->blocked_writers--;
     1452    if (err = EINTR) {
     1453      err = 0;
     1454    }
     1455    if (err) {
     1456      RELEASE_SPINLOCK(rw->spin);
     1457      return err;
     1458    }
     1459  }
     1460  rw->state = 1;
     1461  rw->writer = tcr;
     1462  RELEASE_SPINLOCK(rw->spin);
    16291463  return err;
    16301464}
     
    16321466/*
    16331467  Sort of the same as above, only return EBUSY if we'd have to wait.
    1634   In partucular, distinguish between the cases of "some other readers
    1635   (EBUSY) another writer/queued writer(s)" (EWOULDBLOK) and "we hold a
    1636   read lock" (EDEADLK.)
    16371468*/
    16381469int
    16391470rwlock_try_wlock(rwlock *rw, TCR *tcr)
    16401471{
    1641   pthread_mutex_t *lock = rw->lock;
    1642   rwquentry *entry;
    16431472  int ret = EBUSY;
    16441473
    1645   pthread_mutex_lock(lock);
    1646   if ((RWLOCK_WRITER(rw) == tcr) ||
    1647       ((rw->state == 0) && (rw->write_wait_count == 0))) {
    1648     RWLOCK_WRITER(rw) = tcr;
    1649     --RWLOCK_WRITE_COUNT(rw);
     1474  LOCK_SPINLOCK(rw->spin,tcr);
     1475  if (rw->writer == tcr) {
     1476    rw->state++;
     1477    ret = 0;
     1478  } else {
     1479    if (rw->state == 0) {
     1480      rw->writer = tcr;
     1481      rw->state = 1;
     1482      ret = 0;
     1483    }
     1484  }
     1485  RELEASE_SPINLOCK(rw->spin);
     1486  return ret;
     1487}
     1488
     1489int
     1490rwlock_try_rlock(rwlock *rw, TCR *tcr)
     1491{
     1492  int ret = EBUSY;
     1493
     1494  LOCK_SPINLOCK(rw->spin,tcr);
     1495  if (rw->state <= 0) {
    16501496    --rw->state;
    1651     pthread_mutex_unlock(lock);
    1652     return 0;
    1653   }
    1654  
    1655   if (rw->state > 0) {
    1656     /* already some readers, we may be one of them */
    1657     entry = find_enqueued_tcr(tcr, rw);
    1658     if (entry) {
    1659       ret = EDEADLK;
    1660     }
    1661   } else {
    1662     /* another writer or queued writers */
    1663     ret = EWOULDBLOCK;
    1664   }
    1665   pthread_mutex_unlock(rw->lock);
     1497    ret = 0;
     1498  }
     1499  RELEASE_SPINLOCK(rw->spin);
    16661500  return ret;
    16671501}
    16681502
    1669 /*
    1670   "Upgrade" a lock held once or more for reading to one held the same
    1671   number of times for writing.
    1672   Upgraders have higher priority than writers do
    1673 */
    1674 
    1675 int
    1676 rwlock_read_to_write(rwlock *rw, TCR *tcr)
    1677 {
    1678 }
    16791503
    16801504
     
    16821506rwlock_unlock(rwlock *rw, TCR *tcr)
    16831507{
    1684   rwquentry *entry;
    1685 
    1686   pthread_mutex_lock(rw->lock);
    1687   if (rw->state < 0) {
    1688     /* Locked for writing.  By us ? */
    1689     if (RWLOCK_WRITER(rw) != tcr) {
    1690       pthread_mutex_unlock(rw->lock);
    1691       /* Can't unlock: locked for writing by another thread. */
    1692       return EPERM;
    1693     }
    1694     if (++RWLOCK_WRITE_COUNT(rw) == 0) {
    1695       rw->state = 0;
    1696       RWLOCK_WRITER(rw) = NULL;
    1697       if (rw->write_wait_count) {
    1698         pthread_cond_signal(rw->writer_signal);
    1699       } else {
    1700         pthread_cond_broadcast(rw->reader_signal);
     1508
     1509  int err = 0;
     1510  natural blocked_readers = 0;
     1511
     1512  LOCK_SPINLOCK(rw->spin,tcr);
     1513  if (rw->state > 0) {
     1514    if (rw->writer != tcr) {
     1515      err = EINVAL;
     1516    } else {
     1517      --rw->state;
     1518    }
     1519  } else {
     1520    if (rw->state < 0) {
     1521      ++rw->state;
     1522    } else {
     1523      err = EINVAL;
     1524    }
     1525  }
     1526  if (err) {
     1527    RELEASE_SPINLOCK(rw->spin);
     1528    return err;
     1529  }
     1530 
     1531  if (rw->state == 0) {
     1532    if (rw->blocked_writers) {
     1533      SEM_RAISE(rw->writer_signal);
     1534    } else {
     1535      blocked_readers = rw->blocked_readers;
     1536      if (blocked_readers) {
     1537        SEM_BROADCAST(rw->reader_signal, blocked_readers);
    17011538      }
    17021539    }
    1703     pthread_mutex_unlock(rw->lock);
    1704     return 0;
    1705   }
    1706   entry = find_enqueued_tcr(tcr, rw);
    1707   if (entry == NULL) {
    1708     /* Not locked for reading by us, so why are we unlocking it ? */
    1709     pthread_mutex_unlock(rw->lock);
    1710     return EPERM;
    1711   }
    1712   if (--entry->count == 0) {
    1713     free_rwquentry(entry, rw);
    1714   }
    1715   if (--rw->state == 0) {
    1716     pthread_cond_signal(rw->writer_signal);
    1717   }
    1718   pthread_mutex_unlock(rw->lock);
     1540  }
     1541  RELEASE_SPINLOCK(rw->spin);
    17191542  return 0;
    17201543}
    17211544
    17221545       
    1723 int
     1546void
    17241547rwlock_destroy(rwlock *rw)
    17251548{
    1726   return 0;                     /* for now. */
    1727 }
    1728 
    1729 
    1730 
     1549  destroy_semaphore((void **)&rw->reader_signal);
     1550  destroy_semaphore((void **)&rw->writer_signal);
     1551  postGCfree((void *)(rw->malloced_ptr));
     1552}
     1553
     1554
     1555
Note: See TracChangeset for help on using the changeset viewer.