source: trunk/source/lisp-kernel/thread_manager.c @ 9901

Last change on this file since 9901 was 9901, checked in by gb, 11 years ago

Remove unused variables. (May need to compile with -Wall to find
more unused vars on PPC, too.)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 37.2 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46#ifdef USE_FUTEX
47#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
48#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
49#define FUTEX_AVAIL (0)
50#define FUTEX_LOCKED (1)
51#define FUTEX_CONTENDED (2)
52#endif
53
54#ifdef WINDOWS
55int
56raise_thread_interrupt(TCR *target)
57{
58}
59#else
60int
61raise_thread_interrupt(TCR *target)
62{
63#ifdef DARWIN_not_yet
64  if (use_mach_exception_handling) {
65    return mach_raise_thread_interrupt(target);
66  }
67#endif
68 return pthread_kill((pthread_t)target->osid, SIGNAL_FOR_PROCESS_INTERRUPT);
69}
70#endif
71
72signed_natural
73atomic_incf_by(signed_natural *ptr, signed_natural by)
74{
75  signed_natural old, new;
76  do {
77    old = *ptr;
78    new = old+by;
79  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
80           (natural) old);
81  return new;
82}
83
84signed_natural
85atomic_incf(signed_natural *ptr)
86{
87  return atomic_incf_by(ptr, 1);
88}
89
90signed_natural
91atomic_decf(signed_natural *ptr)
92{
93  signed_natural old, new;
94  do {
95    old = *ptr;
96    new = old == 0 ? old : old-1;
97  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
98           (natural) old);
99  return old-1;
100}
101
102
103#ifndef USE_FUTEX
104int spin_lock_tries = 1;
105
106void
107get_spin_lock(signed_natural *p, TCR *tcr)
108{
109  int i, n = spin_lock_tries;
110 
111  while (1) {
112    for (i = 0; i < n; i++) {
113      if (atomic_swap(p,(signed_natural)tcr) == 0) {
114        return;
115      }
116    }
117#ifndef WINDOWS
118    sched_yield();
119#endif
120  }
121}
122#endif
123
124#ifndef USE_FUTEX
125int
126lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
127{
128
129  if (tcr == NULL) {
130    tcr = get_tcr(true);
131  }
132  if (m->owner == tcr) {
133    m->count++;
134    return 0;
135  }
136  while (1) {
137    LOCK_SPINLOCK(m->spinlock,tcr);
138    ++m->avail;
139    if (m->avail == 1) {
140      m->owner = tcr;
141      m->count = 1;
142      RELEASE_SPINLOCK(m->spinlock);
143      break;
144    }
145    RELEASE_SPINLOCK(m->spinlock);
146    SEM_WAIT_FOREVER(m->signal);
147  }
148  return 0;
149}
150
151#else /* USE_FUTEX */
152
153static void inline
154lock_futex(natural *p)
155{
156 
157  while (1) {
158    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
159      return;
160    }
161    while (1) {
162      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
163        return;
164      }
165      futex_wait(p,FUTEX_CONTENDED);
166    }
167  }
168}
169
170static void inline
171unlock_futex(natural *p)
172{
173  if (atomic_decf(p) != FUTEX_AVAIL) {
174    *p = FUTEX_AVAIL;
175    futex_wake(p,INT_MAX);
176  }
177}
178   
179int
180lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
181{
182  natural val;
183  if (tcr == NULL) {
184    tcr = get_tcr(true);
185  }
186  if (m->owner == tcr) {
187    m->count++;
188    return 0;
189  }
190  lock_futex(&m->avail);
191  m->owner = tcr;
192  m->count = 1;
193  return 0;
194}
195#endif /* USE_FUTEX */
196
197
198#ifndef USE_FUTEX 
199int
200unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
201{
202  int ret = EPERM, pending;
203
204  if (tcr == NULL) {
205    tcr = get_tcr(true);
206  }
207
208  if (m->owner == tcr) {
209    --m->count;
210    if (m->count == 0) {
211      LOCK_SPINLOCK(m->spinlock,tcr);
212      m->owner = NULL;
213      pending = m->avail-1 + m->waiting;     /* Don't count us */
214      m->avail = 0;
215      --pending;
216      if (pending > 0) {
217        m->waiting = pending;
218      } else {
219        m->waiting = 0;
220      }
221      RELEASE_SPINLOCK(m->spinlock);
222      if (pending >= 0) {
223        SEM_RAISE(m->signal);
224      }
225    }
226    ret = 0;
227  }
228  return ret;
229}
230#else /* USE_FUTEX */
231int
232unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
233{
234  int ret = EPERM, pending;
235
236   if (tcr == NULL) {
237    tcr = get_tcr(true);
238  }
239
240  if (m->owner == tcr) {
241    --m->count;
242    if (m->count == 0) {
243      m->owner = NULL;
244      unlock_futex(&m->avail);
245    }
246    ret = 0;
247  }
248  return ret;
249}
250#endif /* USE_FUTEX */
251
252void
253destroy_recursive_lock(RECURSIVE_LOCK m)
254{
255#ifndef USE_FUTEX
256  destroy_semaphore((void **)&m->signal);
257#endif
258  postGCfree((void *)(m->malloced_ptr));
259}
260
261/*
262  If we're already the owner (or if the lock is free), lock it
263  and increment the lock count; otherwise, return EBUSY without
264  waiting.
265*/
266
267#ifndef USE_FUTEX
268int
269recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
270{
271  TCR *owner = m->owner;
272
273  LOCK_SPINLOCK(m->spinlock,tcr);
274  if (owner == tcr) {
275    m->count++;
276    if (was_free) {
277      *was_free = 0;
278      RELEASE_SPINLOCK(m->spinlock);
279      return 0;
280    }
281  }
282  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
283    m->owner = tcr;
284    m->count = 1;
285    if (was_free) {
286      *was_free = 1;
287    }
288    RELEASE_SPINLOCK(m->spinlock);
289    return 0;
290  }
291
292  RELEASE_SPINLOCK(m->spinlock);
293  return EBUSY;
294}
295#else
296int
297recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
298{
299  TCR *owner = m->owner;
300
301  if (owner == tcr) {
302    m->count++;
303    if (was_free) {
304      *was_free = 0;
305      return 0;
306    }
307  }
308  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
309    m->owner = tcr;
310    m->count = 1;
311    if (was_free) {
312      *was_free = 1;
313    }
314    return 0;
315  }
316
317  return EBUSY;
318}
319#endif
320
321void
322sem_wait_forever(SEMAPHORE s)
323{
324  int status;
325
326  do {
327#ifdef USE_MACH_SEMAPHORES
328    mach_timespec_t q = {1,0};
329    status = SEM_TIMEDWAIT(s,q);
330#endif
331#ifdef USE_POSIX_SEMAPHORES
332    struct timespec q;
333    gettimeofday((struct timeval *)&q, NULL);
334    q.tv_sec += 1;
335    status = SEM_TIMEDWAIT(s,&q);
336#endif
337  } while (status != 0);
338}
339
340int
341wait_on_semaphore(void *s, int seconds, int millis)
342{
343  int nanos = (millis % 1000) * 1000000;
344#ifdef USE_POSIX_SEMAPHORES
345  int status;
346
347  struct timespec q;
348  gettimeofday((struct timeval *)&q, NULL);
349  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
350   
351  q.tv_nsec += nanos;
352  if (q.tv_nsec >= 1000000000L) {
353    q.tv_nsec -= 1000000000L;
354    seconds += 1;
355  }
356  q.tv_sec += seconds;
357  status = SEM_TIMEDWAIT(s, &q);
358  if (status < 0) {
359    return errno;
360  }
361  return status;
362#endif
363#ifdef USE_MACH_SEMAPHORES
364  mach_timespec_t q = {seconds, nanos};
365  int status = SEM_TIMEDWAIT(s, q);
366
367 
368  switch (status) {
369  case 0: return 0;
370  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
371  case KERN_ABORTED: return EINTR;
372  default: return EINVAL;
373  }
374
375#endif
376}
377
378
379int
380semaphore_maybe_timedwait(void *s, struct timespec *t)
381{
382  if (t) {
383    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
384  }
385  SEM_WAIT_FOREVER(s);
386  return 0;
387}
388
389void
390signal_semaphore(SEMAPHORE s)
391{
392  SEM_RAISE(s);
393}
394
395 
396#ifdef WINDOWS
397LispObj
398current_thread_osid()
399{
400}
401#else
402LispObj
403current_thread_osid()
404{
405  return (LispObj)ptr_to_lispobj(pthread_self());
406}
407#endif
408
409
410int thread_suspend_signal = 0, thread_resume_signal = 0;
411
412
413
414void
415linux_exception_init(TCR *tcr)
416{
417}
418
419
420TCR *
421get_interrupt_tcr(Boolean create)
422{
423  return get_tcr(create);
424}
425 
426  void
427suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
428{
429#ifdef DARWIN_GS_HACK
430  Boolean gs_was_tcr = ensure_gs_pthread();
431#endif
432  TCR *tcr = get_interrupt_tcr(false);
433
434  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
435    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
436  } else {
437    if (signo == thread_suspend_signal) {
438#if 0
439      sigset_t wait_for;
440#endif
441
442      tcr->suspend_context = context;
443#if 0
444      sigfillset(&wait_for);
445#endif
446      SEM_RAISE(tcr->suspend);
447#if 0
448      sigdelset(&wait_for, thread_resume_signal);
449#endif
450#if 1
451#if RESUME_VIA_RESUME_SEMAPHORE
452      SEM_WAIT_FOREVER(tcr->resume);
453#if SUSPEND_RESUME_VERBOSE
454      fprintf(stderr, "got  resume in 0x%x\n",tcr);
455#endif
456      tcr->suspend_context = NULL;
457#else
458      sigsuspend(&wait_for);
459#endif
460#else
461    do {
462      sigsuspend(&wait_for);
463    } while (tcr->suspend_context);
464#endif 
465    } else {
466      tcr->suspend_context = NULL;
467#if SUSEPEND_RESUME_VERBOSE
468      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
469#endif
470    }
471#if WAIT_FOR_RESUME_ACK
472    SEM_RAISE(tcr->suspend);
473#endif
474  }
475#ifdef DARWIN_GS_HACK
476  if (gs_was_tcr) {
477    set_gs_address(tcr);
478  }
479#endif
480#ifdef DARWIN
481  DarwinSigReturn(context);
482#endif
483#ifdef FREEBSD
484  freebsd_sigreturn(context);
485#endif
486}
487
488 
489
490/*
491  'base' should be set to the bottom (origin) of the stack, e.g., the
492  end from which it grows.
493*/
494 
495#ifdef WINDOWS
496void
497os_get_stack_bounds(LispObj q,void **base, natural *size)
498{
499}
500#else
501void
502os_get_stack_bounds(LispObj q,void **base, natural *size)
503{
504  pthread_t p = (pthread_t)(q);
505#ifdef DARWIN
506  *base = pthread_get_stackaddr_np(p);
507  *size = pthread_get_stacksize_np(p);
508#endif
509#ifdef LINUX
510  pthread_attr_t attr;
511
512  pthread_getattr_np(p,&attr);
513  pthread_attr_getstack(&attr, base, size);
514  *(natural *)base += *size;
515#endif
516#ifdef FREEBSD
517  pthread_attr_t attr;
518  void * temp_base;
519  size_t temp_size;
520 
521
522  pthread_attr_init(&attr); 
523  pthread_attr_get_np(p, &attr);
524  pthread_attr_getstackaddr(&attr,&temp_base);
525  pthread_attr_getstacksize(&attr,&temp_size);
526  *base = (void *)((natural)temp_base + temp_size);
527  *size = temp_size;
528#endif
529
530}
531#endif
532
533void *
534new_semaphore(int count)
535{
536#ifdef USE_POSIX_SEMAPHORES
537  sem_t *s = malloc(sizeof(sem_t));
538  sem_init(s, 0, count);
539  return s;
540#endif
541#ifdef USE_MACH_SEMAPHORES
542  semaphore_t s = (semaphore_t)0;
543  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
544  return (void *)(natural)s;
545#endif
546}
547
548RECURSIVE_LOCK
549new_recursive_lock()
550{
551  extern int cache_block_size;
552  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
553  RECURSIVE_LOCK m = NULL;
554#ifndef USE_FUTEX
555  void *signal = new_semaphore(0);
556#endif
557
558  if (p) {
559    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
560    m->malloced_ptr = p;
561  }
562
563#ifdef USE_FUTEX
564  if (m) {
565    return m;
566  }
567#else
568  if (m && signal) {
569    m->signal = signal;
570    return m;
571  }
572  if (m) {
573    free(p);
574  }
575  if (signal) {
576    destroy_semaphore(&signal);
577  }
578#endif
579  return NULL;
580}
581
582void
583destroy_semaphore(void **s)
584{
585  if (*s) {
586#ifdef USE_POSIX_SEMAPHORES
587    sem_destroy((sem_t *)*s);
588#endif
589#ifdef USE_MACH_SEMAPHORES
590    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
591#endif
592    *s=NULL;
593  }
594}
595
596#ifdef WINDOWS
597void
598tsd_set(LispObj key, void *datum)
599{
600}
601
602void *
603tsd_get(LispObj key)
604{
605}
606#else
607void
608tsd_set(LispObj key, void *datum)
609{
610  pthread_setspecific((pthread_key_t)key, datum);
611}
612
613void *
614tsd_get(LispObj key)
615{
616  return pthread_getspecific((pthread_key_t)key);
617}
618#endif
619
620void
621dequeue_tcr(TCR *tcr)
622{
623  TCR *next, *prev;
624
625  next = tcr->next;
626  prev = tcr->prev;
627
628  prev->next = next;
629  next->prev = prev;
630  tcr->prev = tcr->next = NULL;
631#ifdef X8664
632  tcr->linear = NULL;
633#endif
634}
635 
636void
637enqueue_tcr(TCR *new)
638{
639  TCR *head, *tail;
640 
641  LOCK(lisp_global(TCR_AREA_LOCK),new);
642  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
643  tail = head->prev;
644  tail->next = new;
645  head->prev = new;
646  new->prev = tail;
647  new->next = head;
648  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
649}
650
651TCR *
652allocate_tcr()
653{
654  TCR *tcr, *chain = NULL, *next;
655#ifdef DARWIN
656  extern Boolean use_mach_exception_handling;
657  kern_return_t kret;
658  mach_port_t
659    thread_exception_port,
660    task_self = mach_task_self();
661#endif
662  for (;;) {
663    tcr = calloc(1, sizeof(TCR));
664#ifdef DARWIN
665#if WORD_SIZE == 64
666    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
667      tcr->next = chain;
668      chain = tcr;
669      continue;
670    }
671#endif
672    if (use_mach_exception_handling) {
673      thread_exception_port = (mach_port_t)((natural)tcr);
674      kret = mach_port_allocate_name(task_self,
675                                     MACH_PORT_RIGHT_RECEIVE,
676                                     thread_exception_port);
677    } else {
678      kret = KERN_SUCCESS;
679    }
680
681    if (kret != KERN_SUCCESS) {
682      tcr->next = chain;
683      chain = tcr;
684      continue;
685    }
686#endif
687    for (next = chain; next;) {
688      next = next->next;
689      free(chain);
690    }
691    return tcr;
692  }
693}
694
695#ifdef X8664
696#ifdef LINUX
697#include <asm/prctl.h>
698#include <sys/prctl.h>
699#endif
700#ifdef FREEBSD
701#include <machine/sysarch.h>
702#endif
703
704void
705setup_tcr_extra_segment(TCR *tcr)
706{
707#ifdef FREEBSD
708  amd64_set_gsbase(tcr);
709#endif
710#ifdef LINUX
711  arch_prctl(ARCH_SET_GS, (natural)tcr);
712#endif
713#ifdef DARWIN
714  /* There's no way to do this yet.  See DARWIN_GS_HACK */
715  /* darwin_set_x8664_fs_reg(tcr); */
716#endif
717}
718
719#endif
720
721
722
723/*
724  Caller must hold the area_lock.
725*/
726#ifdef WINDOWS
727TCR *
728new_tcr(natural vstack_size, natural tstack_size)
729{
730}
731#else
732TCR *
733new_tcr(natural vstack_size, natural tstack_size)
734{
735  extern area
736    *allocate_vstack_holding_area_lock(natural),
737    *allocate_tstack_holding_area_lock(natural);
738  area *a;
739  int i;
740  sigset_t sigmask;
741
742  sigemptyset(&sigmask);
743  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
744#ifdef HAVE_TLS
745  TCR *tcr = &current_tcr;
746#else
747  TCR *tcr = allocate_tcr();
748#endif
749
750#ifdef X8664
751  setup_tcr_extra_segment(tcr);
752  tcr->linear = tcr;
753#endif
754
755#if (WORD_SIZE == 64)
756  tcr->single_float_convert.tag = subtag_single_float;
757#endif
758  lisp_global(TCR_COUNT) += (1<<fixnumshift);
759  tcr->suspend = new_semaphore(0);
760  tcr->resume = new_semaphore(0);
761  tcr->reset_completion = new_semaphore(0);
762  tcr->activate = new_semaphore(0);
763  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
764  a = allocate_vstack_holding_area_lock(vstack_size);
765  tcr->vs_area = a;
766  a->owner = tcr;
767  tcr->save_vsp = (LispObj *) a->active; 
768  a = allocate_tstack_holding_area_lock(tstack_size);
769  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
770  tcr->ts_area = a;
771  a->owner = tcr;
772  tcr->save_tsp = (LispObj *) a->active;
773#ifdef X86
774  tcr->next_tsp = tcr->save_tsp;
775#endif
776
777  tcr->valence = TCR_STATE_FOREIGN;
778#ifdef PPC
779  tcr->lisp_fpscr.words.l = 0xd0;
780#endif
781#ifdef X86
782  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
783#if 1                           /* Mask underflow; too hard to
784                                   deal with denorms if underflow is
785                                   enabled */
786    (1 << MXCSR_UM_BIT) | 
787#endif
788    (1 << MXCSR_PM_BIT);
789#endif
790  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
791  tcr->tlb_limit = 2048<<fixnumshift;
792  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
793  for (i = 0; i < 2048; i++) {
794    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
795  }
796  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
797  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
798  return tcr;
799}
800#endif
801
802void
803shutdown_thread_tcr(void *arg)
804{
805  TCR *tcr = TCR_FROM_TSD(arg);
806
807  area *vs, *ts, *cs;
808  void *termination_semaphore;
809 
810  if (--(tcr->shutdown_count) == 0) {
811    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
812      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
813        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
814   
815      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
816      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
817      tsd_set(lisp_global(TCR_KEY), NULL);
818    }
819#ifdef DARWIN
820    darwin_exception_cleanup(tcr);
821#endif
822    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
823    vs = tcr->vs_area;
824    tcr->vs_area = NULL;
825    ts = tcr->ts_area;
826    tcr->ts_area = NULL;
827    cs = tcr->cs_area;
828    tcr->cs_area = NULL;
829    if (vs) {
830      condemn_area_holding_area_lock(vs);
831    }
832    if (ts) {
833      condemn_area_holding_area_lock(ts);
834    }
835    if (cs) {
836      condemn_area_holding_area_lock(cs);
837    }
838    destroy_semaphore(&tcr->suspend);
839    destroy_semaphore(&tcr->resume);
840    destroy_semaphore(&tcr->reset_completion);
841    destroy_semaphore(&tcr->activate);
842    free(tcr->tlb_pointer);
843    tcr->tlb_pointer = NULL;
844    tcr->tlb_limit = 0;
845    tcr->osid = 0;
846    tcr->interrupt_pending = 0;
847    termination_semaphore = tcr->termination_semaphore;
848    tcr->termination_semaphore = NULL;
849#ifdef HAVE_TLS
850    dequeue_tcr(tcr);
851#endif
852    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
853    if (termination_semaphore) {
854      SEM_RAISE(termination_semaphore);
855    }
856  } else {
857    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
858  }
859}
860
861void
862tcr_cleanup(void *arg)
863{
864  TCR *tcr = (TCR *)arg;
865  area *a;
866
867  a = tcr->vs_area;
868  if (a) {
869    a->active = a->high;
870  }
871  a = tcr->ts_area;
872  if (a) {
873    a->active = a->high;
874  }
875  a = tcr->cs_area;
876  if (a) {
877    a->active = a->high;
878  }
879  tcr->valence = TCR_STATE_FOREIGN;
880  tcr->shutdown_count = 1;
881  shutdown_thread_tcr(tcr);
882  tsd_set(lisp_global(TCR_KEY), NULL);
883}
884
885void *
886current_native_thread_id()
887{
888  return ((void *) (natural)
889#ifdef LINUX
890          getpid()
891#endif
892#ifdef DARWIN
893          mach_thread_self()
894#endif
895#ifdef FREEBSD
896          pthread_self()
897#endif
898#ifdef SOLARIS
899          pthread_self()
900#endif
901#ifdef WINDOWS
902          /* ThreadSelf() */ 23
903#endif
904          );
905}
906
907
908void
909thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
910{
911  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
912
913  tcr->osid = current_thread_osid();
914  tcr->native_thread_id = current_native_thread_id();
915  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
916  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
917  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
918  tcr->cs_area = a;
919  a->owner = tcr;
920  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
921    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
922  }
923#ifdef LINUX
924#ifdef PPC
925#ifndef PPC64
926  tcr->native_thread_info = current_r2;
927#endif
928#endif
929#endif
930  tcr->errno_loc = &errno;
931  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
932#ifdef DARWIN
933  extern Boolean use_mach_exception_handling;
934  if (use_mach_exception_handling) {
935    darwin_exception_init(tcr);
936  }
937#endif
938#ifdef LINUX
939  linux_exception_init(tcr);
940#endif
941  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
942}
943
944/*
945  Register the specified tcr as "belonging to" the current thread.
946  Under Darwin, setup Mach exception handling for the thread.
947  Install cleanup handlers for thread termination.
948*/
949void
950register_thread_tcr(TCR *tcr)
951{
952  void *stack_base = NULL;
953  natural stack_size = 0;
954
955  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
956  thread_init_tcr(tcr, stack_base, stack_size);
957  enqueue_tcr(tcr);
958}
959
960
961 
962 
963#ifndef MAP_GROWSDOWN
964#define MAP_GROWSDOWN 0
965#endif
966
967#ifdef WINDOWS
968Ptr
969create_stack(int size)
970{
971}
972#else
973Ptr
974create_stack(natural size)
975{
976  Ptr p;
977  size=align_to_power_of_2(size, log2_page_size);
978  p = (Ptr) mmap(NULL,
979                 (size_t)size,
980                 PROT_READ | PROT_WRITE | PROT_EXEC,
981                 MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
982                 -1,    /* Darwin insists on this when not mmap()ing
983                           a real fd */
984                 0);
985  if (p != (Ptr)(-1)) {
986    *((size_t *)p) = size;
987    return p;
988  }
989  allocation_failure(true, size);
990
991}
992#endif
993
994void *
995allocate_stack(natural size)
996{
997  return create_stack(size);
998}
999
1000#ifdef WINDOWS
1001void
1002free_stack(void *s)
1003{
1004}
1005#else
1006void
1007free_stack(void *s)
1008{
1009  size_t size = *((size_t *)s);
1010  munmap(s, size);
1011}
1012#endif
1013
1014Boolean threads_initialized = false;
1015
1016#ifndef USE_FUTEX
1017#ifdef WINDOWS
1018void
1019count_cpus()
1020{
1021}
1022#else
1023void
1024count_cpus()
1025{
1026#ifdef DARWIN
1027  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
1028#include <mach/host_info.h>
1029
1030  struct host_basic_info info;
1031  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1032 
1033  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
1034    if (info.max_cpus > 1) {
1035      spin_lock_tries = 1024;
1036    }
1037  }
1038#else
1039  int n = sysconf(_SC_NPROCESSORS_ONLN);
1040 
1041  if (n > 1) {
1042    spin_lock_tries = 1024;
1043  }
1044#endif
1045}
1046#endif
1047#endif
1048
1049#ifdef WINDOWS
1050void
1051init_threads(void * stack_base, TCR *tcr)
1052{
1053}
1054void *
1055lisp_thread_entry(void *param)
1056{
1057}
1058#else
1059void
1060init_threads(void * stack_base, TCR *tcr)
1061{
1062  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
1063  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
1064  thread_signal_setup();
1065
1066#ifndef USE_FUTEX
1067  count_cpus();
1068#endif
1069  threads_initialized = true;
1070}
1071
1072
1073void *
1074lisp_thread_entry(void *param)
1075{
1076  thread_activation *activation = (thread_activation *)param;
1077  TCR *tcr = new_tcr(activation->vsize, activation->tsize);
1078  sigset_t mask, old_mask;
1079
1080  sigemptyset(&mask);
1081  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1082
1083  register_thread_tcr(tcr);
1084
1085  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1086  tcr->vs_area->active -= node_size;
1087  *(--tcr->save_vsp) = lisp_nil;
1088  enable_fp_exceptions();
1089  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1090  activation->tcr = tcr;
1091  SEM_RAISE(activation->created);
1092  do {
1093    SEM_RAISE(tcr->reset_completion);
1094    SEM_WAIT_FOREVER(tcr->activate);
1095    /* Now go run some lisp code */
1096    start_lisp(TCR_TO_TSD(tcr),0);
1097  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1098  pthread_cleanup_pop(true);
1099
1100}
1101#endif
1102
1103void *
1104xNewThread(natural control_stack_size,
1105           natural value_stack_size,
1106           natural temp_stack_size)
1107
1108{
1109  thread_activation activation;
1110
1111
1112  activation.tsize = temp_stack_size;
1113  activation.vsize = value_stack_size;
1114  activation.tcr = 0;
1115  activation.created = new_semaphore(0);
1116  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1117                           NULL, 
1118                           lisp_thread_entry,
1119                           (void *) &activation)) {
1120   
1121    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1122  }
1123  destroy_semaphore(&activation.created); 
1124  return TCR_TO_TSD(activation.tcr);
1125}
1126
1127Boolean
1128active_tcr_p(TCR *q)
1129{
1130  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1131 
1132  do {
1133    if (p == q) {
1134      return true;
1135    }
1136    p = p->next;
1137  } while (p != head);
1138  return false;
1139}
1140
1141#ifdef WINDOWS
1142OSErr
1143xDisposeThread(TCR *tcr)
1144{
1145}
1146#else
1147OSErr
1148xDisposeThread(TCR *tcr)
1149{
1150  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1151    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1152      pthread_cancel((pthread_t)(tcr->osid));
1153      return 0;
1154    }
1155  }
1156  return -50;
1157}
1158#endif
1159
1160OSErr
1161xYieldToThread(TCR *target)
1162{
1163  Bug(NULL, "xYieldToThread ?");
1164  return 0;
1165}
1166 
1167OSErr
1168xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1169{
1170  Bug(NULL, "xThreadCurrentStackSpace ?");
1171  return 0;
1172}
1173
1174
1175#ifdef WINDOWS
1176LispObj
1177create_system_thread(size_t stack_size,
1178                     void* stackaddr,
1179                     void* (*start_routine)(void *),
1180                     void* param)
1181{
1182}
1183#else
1184LispObj
1185create_system_thread(size_t stack_size,
1186                     void* stackaddr,
1187                     void* (*start_routine)(void *),
1188                     void* param)
1189{
1190  pthread_attr_t attr;
1191  pthread_t returned_thread = (pthread_t) 0;
1192
1193  pthread_attr_init(&attr);
1194  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1195
1196  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1197    stack_size = PTHREAD_STACK_MIN;
1198  }
1199
1200  stack_size = ensure_stack_limit(stack_size);
1201  if (stackaddr != NULL) {
1202    /* Size must have been specified.  Sort of makes sense ... */
1203#ifdef DARWIN
1204    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1205#else
1206    pthread_attr_setstack(&attr, stackaddr, stack_size);
1207#endif
1208  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1209    pthread_attr_setstacksize(&attr,stack_size);
1210  }
1211
1212  /*
1213     I think that's just about enough ... create the thread.
1214  */
1215  pthread_create(&returned_thread, &attr, start_routine, param);
1216  return (LispObj) ptr_to_lispobj(returned_thread);
1217}
1218#endif
1219
1220TCR *
1221get_tcr(Boolean create)
1222{
1223#ifdef HAVE_TLS
1224  TCR *current = current_tcr.linear;
1225#else
1226  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1227  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1228#endif
1229
1230  if ((current == NULL) && create) {
1231    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1232      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1233    int i, nbindwords = 0;
1234    extern unsigned initial_stack_size;
1235   
1236    /* Make one. */
1237    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1238    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1239    register_thread_tcr(current);
1240#ifdef DEBUG_TCR_CREATION
1241#ifndef WINDOWS
1242    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1243#endif
1244#endif
1245    current->vs_area->active -= node_size;
1246    *(--current->save_vsp) = lisp_nil;
1247#ifdef PPC
1248#define NSAVEREGS 8
1249#endif
1250#ifdef X8664
1251#define NSAVEREGS 4
1252#endif
1253    for (i = 0; i < NSAVEREGS; i++) {
1254      *(--current->save_vsp) = 0;
1255      current->vs_area->active -= node_size;
1256    }
1257    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1258    for (i = 0; i < nbindwords; i++) {
1259      *(--current->save_vsp) = 0;
1260      current->vs_area->active -= node_size;
1261    }
1262    current->shutdown_count = 1;
1263    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1264
1265  }
1266 
1267  return current;
1268}
1269
1270#ifdef WINDOWS
1271Boolean
1272suspend_tcr(TCR *tcr)
1273{
1274}
1275#else
1276Boolean
1277suspend_tcr(TCR *tcr)
1278{
1279  int suspend_count = atomic_incf(&(tcr->suspend_count));
1280  if (suspend_count == 1) {
1281#if SUSPEND_RESUME_VERBOSE
1282    fprintf(stderr,"Suspending 0x%x\n", tcr);
1283#endif
1284#ifdef DARWIN_nope
1285    if (mach_suspend_tcr(tcr)) {
1286      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1287      return true;
1288    }
1289#endif
1290    if (pthread_kill((pthread_t)(tcr->osid), thread_suspend_signal) == 0) {
1291      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1292    } else {
1293      /* A problem using pthread_kill.  On Darwin, this can happen
1294         if the thread has had its signal mask surgically removed
1295         by pthread_exit.  If the native (Mach) thread can be suspended,
1296         do that and return true; otherwise, flag the tcr as belonging
1297         to a dead thread by setting tcr->osid to 0.
1298      */
1299      tcr->osid = 0;
1300      return false;
1301    }
1302    return true;
1303  }
1304  return false;
1305}
1306#endif
1307
1308Boolean
1309tcr_suspend_ack(TCR *tcr)
1310{
1311  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1312    SEM_WAIT_FOREVER(tcr->suspend);
1313    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1314#if SUSPEND_RESUME_VERBOSE
1315    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1316#endif
1317
1318  }
1319  return true;
1320}
1321
1322     
1323
1324
1325Boolean
1326lisp_suspend_tcr(TCR *tcr)
1327{
1328  Boolean suspended;
1329  TCR *current = get_tcr(true);
1330 
1331  LOCK(lisp_global(TCR_AREA_LOCK),current);
1332#ifdef DARWIN
1333#if USE_MACH_EXCEPTION_LOCK
1334  if (use_mach_exception_handling) {
1335    pthread_mutex_lock(mach_exception_lock);
1336  }
1337#endif
1338#endif
1339  suspended = suspend_tcr(tcr);
1340  if (suspended) {
1341    while (!tcr_suspend_ack(tcr));
1342  }
1343#ifdef DARWIN
1344#if USE_MACH_EXCEPTION_LOCK
1345  if (use_mach_exception_handling) {
1346    pthread_mutex_unlock(mach_exception_lock);
1347  }
1348#endif
1349#endif
1350  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1351  return suspended;
1352}
1353         
1354
1355Boolean
1356resume_tcr(TCR *tcr)
1357{
1358  int suspend_count = atomic_decf(&(tcr->suspend_count));
1359  if (suspend_count == 0) {
1360#ifdef DARWIN
1361    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1362#if SUSPEND_RESUME_VERBOSE
1363    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1364#endif
1365      mach_resume_tcr(tcr);
1366      return true;
1367    }
1368#endif
1369#if RESUME_VIA_RESUME_SEMAPHORE
1370    SEM_RAISE(tcr->resume);
1371#else
1372    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1373      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1374    }
1375#endif
1376#if SUSPEND_RESUME_VERBOSE
1377    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1378#endif
1379    return true;
1380  }
1381  return false;
1382}
1383
1384void
1385wait_for_resumption(TCR *tcr)
1386{
1387  if (tcr->suspend_count == 0) {
1388#ifdef DARWIN
1389    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1390      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1391      return;
1392  }
1393#endif
1394#if WAIT_FOR_RESUME_ACK
1395#if SUSPEND_RESUME_VERBOSE
1396    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1397#endif
1398    SEM_WAIT_FOREVER(tcr->suspend);
1399#endif
1400  }
1401}
1402   
1403
1404
1405Boolean
1406lisp_resume_tcr(TCR *tcr)
1407{
1408  Boolean resumed;
1409  TCR *current = get_tcr(true);
1410 
1411  LOCK(lisp_global(TCR_AREA_LOCK),current);
1412  resumed = resume_tcr(tcr);
1413  wait_for_resumption(tcr);
1414  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1415  return resumed;
1416}
1417
1418
1419TCR *freed_tcrs = NULL;
1420
1421void
1422enqueue_freed_tcr (TCR *tcr)
1423{
1424#ifndef HAVE_TLS
1425  tcr->next = freed_tcrs;
1426  freed_tcrs = tcr;
1427#endif
1428}
1429
1430/* It's not clear that we can safely condemn a dead tcr's areas, since
1431   we may not be able to call free() if a suspended thread owns a
1432   malloc lock. At least make the areas appear to be empty.
1433*/
1434   
1435
1436void
1437normalize_dead_tcr_areas(TCR *tcr)
1438{
1439  area *a;
1440
1441  a = tcr->vs_area;
1442  if (a) {
1443    a->active = a->high;
1444  }
1445
1446  a = tcr->ts_area;
1447  if (a) {
1448    a->active = a->high;
1449  }
1450
1451  a = tcr->cs_area;
1452  if (a) {
1453    a->active = a->high;
1454  }
1455}
1456   
1457void
1458free_freed_tcrs ()
1459{
1460  TCR *current, *next;
1461
1462  for (current = freed_tcrs; current; current = next) {
1463    next = current->next;
1464#ifndef HAVE_TLS
1465    free(current);
1466#endif
1467  }
1468  freed_tcrs = NULL;
1469}
1470
1471void
1472suspend_other_threads(Boolean for_gc)
1473{
1474  TCR *current = get_tcr(true), *other, *next;
1475  int dead_tcr_count = 0;
1476  Boolean all_acked;
1477
1478  LOCK(lisp_global(TCR_AREA_LOCK), current);
1479#ifdef DARWIN
1480#if USE_MACH_EXCEPTION_LOCK
1481  if (for_gc && use_mach_exception_handling) {
1482#if SUSPEND_RESUME_VERBOSE
1483    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1484#endif
1485    pthread_mutex_lock(mach_exception_lock);
1486  }
1487#endif
1488#endif
1489  for (other = current->next; other != current; other = other->next) {
1490    if ((other->osid != 0)) {
1491      suspend_tcr(other);
1492      if (other->osid == 0) {
1493        dead_tcr_count++;
1494      }
1495    } else {
1496      dead_tcr_count++;
1497    }
1498  }
1499
1500  do {
1501    all_acked = true;
1502    for (other = current->next; other != current; other = other->next) {
1503      if ((other->osid != 0)) {
1504        if (!tcr_suspend_ack(other)) {
1505          all_acked = false;
1506        }
1507      }
1508    }
1509  } while(! all_acked);
1510
1511     
1512
1513  /* All other threads are suspended; can safely delete dead tcrs now */
1514  if (dead_tcr_count) {
1515    for (other = current->next; other != current; other = next) {
1516      next = other->next;
1517      if ((other->osid == 0))  {
1518        normalize_dead_tcr_areas(other);
1519        dequeue_tcr(other);
1520        enqueue_freed_tcr(other);
1521      }
1522    }
1523  }
1524}
1525
1526void
1527lisp_suspend_other_threads()
1528{
1529  suspend_other_threads(false);
1530}
1531
1532void
1533resume_other_threads(Boolean for_gc)
1534{
1535  TCR *current = get_tcr(true), *other;
1536  for (other = current->next; other != current; other = other->next) {
1537    if ((other->osid != 0)) {
1538      resume_tcr(other);
1539    }
1540  }
1541  for (other = current->next; other != current; other = other->next) {
1542    if ((other->osid != 0)) {
1543      wait_for_resumption(other);
1544    }
1545  }
1546  free_freed_tcrs();
1547#ifdef DARWIN
1548#if USE_MACH_EXCEPTION_LOCK
1549  if (for_gc && use_mach_exception_handling) {
1550#if SUSPEND_RESUME_VERBOSE
1551    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1552#endif
1553    pthread_mutex_unlock(mach_exception_lock);
1554  }
1555#endif
1556#endif
1557
1558  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1559}
1560
1561void
1562lisp_resume_other_threads()
1563{
1564  resume_other_threads(false);
1565}
1566
1567
1568
1569rwlock *
1570rwlock_new()
1571{
1572  extern int cache_block_size;
1573
1574  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1575  rwlock *rw = NULL;;
1576 
1577  if (p) {
1578    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1579    rw->malloced_ptr = p;
1580#ifndef USE_FUTEX
1581    rw->reader_signal = new_semaphore(0);
1582    rw->writer_signal = new_semaphore(0);
1583    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1584      if (rw->reader_signal) {
1585        destroy_semaphore(&(rw->reader_signal));
1586      } else {
1587        destroy_semaphore(&(rw->writer_signal));
1588      }
1589      free(rw);
1590      rw = NULL;
1591    }
1592#endif
1593  }
1594  return rw;
1595}
1596
1597     
1598/*
1599  Try to get read access to a multiple-readers/single-writer lock.  If
1600  we already have read access, return success (indicating that the
1601  lock is held another time.  If we already have write access to the
1602  lock ... that won't work; return EDEADLK.  Wait until no other
1603  thread has or is waiting for write access, then indicate that we
1604  hold read access once.
1605*/
1606#ifndef USE_FUTEX
1607int
1608rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1609{
1610  int err = 0;
1611 
1612  LOCK_SPINLOCK(rw->spin, tcr);
1613
1614  if (rw->writer == tcr) {
1615    RELEASE_SPINLOCK(rw->spin);
1616    return EDEADLK;
1617  }
1618
1619  while (rw->blocked_writers || (rw->state > 0)) {
1620    rw->blocked_readers++;
1621    RELEASE_SPINLOCK(rw->spin);
1622    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1623    LOCK_SPINLOCK(rw->spin,tcr);
1624    rw->blocked_readers--;
1625    if (err == EINTR) {
1626      err = 0;
1627    }
1628    if (err) {
1629      RELEASE_SPINLOCK(rw->spin);
1630      return err;
1631    }
1632  }
1633  rw->state--;
1634  RELEASE_SPINLOCK(rw->spin);
1635  return err;
1636}
1637#else
1638int
1639rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1640{
1641  natural waitval;
1642
1643  lock_futex(&rw->spin);
1644
1645  if (rw->writer == tcr) {
1646    unlock_futex(&rw->spin);
1647    return EDEADLOCK;
1648  }
1649  while (1) {
1650    if (rw->writer == NULL) {
1651      --rw->state;
1652      unlock_futex(&rw->spin);
1653      return 0;
1654    }
1655    rw->blocked_readers++;
1656    waitval = rw->reader_signal;
1657    unlock_futex(&rw->spin);
1658    futex_wait(&rw->reader_signal,waitval);
1659    lock_futex(&rw->spin);
1660    rw->blocked_readers--;
1661  }
1662  return 0;
1663}
1664#endif   
1665
1666
1667/*
1668  Try to obtain write access to the lock.
1669  It is an error if we already have read access, but it's hard to
1670  detect that.
1671  If we already have write access, increment the count that indicates
1672  that.
1673  Otherwise, wait until the lock is not held for reading or writing,
1674  then assert write access.
1675*/
1676
1677#ifndef USE_FUTEX
1678int
1679rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1680{
1681  int err = 0;
1682
1683  LOCK_SPINLOCK(rw->spin,tcr);
1684  if (rw->writer == tcr) {
1685    rw->state++;
1686    RELEASE_SPINLOCK(rw->spin);
1687    return 0;
1688  }
1689
1690  while (rw->state != 0) {
1691    rw->blocked_writers++;
1692    RELEASE_SPINLOCK(rw->spin);
1693    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1694    LOCK_SPINLOCK(rw->spin,tcr);
1695    rw->blocked_writers--;
1696    if (err == EINTR) {
1697      err = 0;
1698    }
1699    if (err) {
1700      RELEASE_SPINLOCK(rw->spin);
1701      return err;
1702    }
1703  }
1704  rw->state = 1;
1705  rw->writer = tcr;
1706  RELEASE_SPINLOCK(rw->spin);
1707  return err;
1708}
1709
1710#else
1711int
1712rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1713{
1714  int err = 0;
1715  natural waitval;
1716
1717  lock_futex(&rw->spin);
1718  if (rw->writer == tcr) {
1719    rw->state++;
1720    unlock_futex(&rw->spin);
1721    return 0;
1722  }
1723
1724  while (rw->state != 0) {
1725    rw->blocked_writers++;
1726    waitval = rw->writer_signal;
1727    unlock_futex(&rw->spin);
1728    futex_wait(&rw->writer_signal,waitval);
1729    lock_futex(&rw->spin);
1730    rw->blocked_writers--;
1731  }
1732  rw->state = 1;
1733  rw->writer = tcr;
1734  unlock_futex(&rw->spin);
1735  return err;
1736}
1737#endif
1738
1739/*
1740  Sort of the same as above, only return EBUSY if we'd have to wait.
1741*/
1742#ifndef USE_FUTEX
1743int
1744rwlock_try_wlock(rwlock *rw, TCR *tcr)
1745{
1746  int ret = EBUSY;
1747
1748  LOCK_SPINLOCK(rw->spin,tcr);
1749  if (rw->writer == tcr) {
1750    rw->state++;
1751    ret = 0;
1752  } else {
1753    if (rw->state == 0) {
1754      rw->writer = tcr;
1755      rw->state = 1;
1756      ret = 0;
1757    }
1758  }
1759  RELEASE_SPINLOCK(rw->spin);
1760  return ret;
1761}
1762#else
1763int
1764rwlock_try_wlock(rwlock *rw, TCR *tcr)
1765{
1766  int ret = EBUSY;
1767
1768  lock_futex(&rw->spin);
1769  if (rw->writer == tcr) {
1770    rw->state++;
1771    ret = 0;
1772  } else {
1773    if (rw->state == 0) {
1774      rw->writer = tcr;
1775      rw->state = 1;
1776      ret = 0;
1777    }
1778  }
1779  unlock_futex(&rw->spin);
1780  return ret;
1781}
1782#endif
1783
1784#ifndef USE_FUTEX
1785int
1786rwlock_try_rlock(rwlock *rw, TCR *tcr)
1787{
1788  int ret = EBUSY;
1789
1790  LOCK_SPINLOCK(rw->spin,tcr);
1791  if (rw->state <= 0) {
1792    --rw->state;
1793    ret = 0;
1794  }
1795  RELEASE_SPINLOCK(rw->spin);
1796  return ret;
1797}
1798#else
1799int
1800rwlock_try_rlock(rwlock *rw, TCR *tcr)
1801{
1802  int ret = EBUSY;
1803
1804  lock_futex(&rw->spin);
1805  if (rw->state <= 0) {
1806    --rw->state;
1807    ret = 0;
1808  }
1809  unlock_futex(&rw->spin);
1810  return ret;
1811}
1812#endif
1813
1814
1815
1816#ifndef USE_FUTEX
1817int
1818rwlock_unlock(rwlock *rw, TCR *tcr)
1819{
1820
1821  int err = 0;
1822  natural blocked_readers = 0;
1823
1824  LOCK_SPINLOCK(rw->spin,tcr);
1825  if (rw->state > 0) {
1826    if (rw->writer != tcr) {
1827      err = EINVAL;
1828    } else {
1829      --rw->state;
1830      if (rw->state == 0) {
1831        rw->writer = NULL;
1832      }
1833    }
1834  } else {
1835    if (rw->state < 0) {
1836      ++rw->state;
1837    } else {
1838      err = EINVAL;
1839    }
1840  }
1841  if (err) {
1842    RELEASE_SPINLOCK(rw->spin);
1843    return err;
1844  }
1845 
1846  if (rw->state == 0) {
1847    if (rw->blocked_writers) {
1848      SEM_RAISE(rw->writer_signal);
1849    } else {
1850      blocked_readers = rw->blocked_readers;
1851      if (blocked_readers) {
1852        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1853      }
1854    }
1855  }
1856  RELEASE_SPINLOCK(rw->spin);
1857  return 0;
1858}
1859#else
1860int
1861rwlock_unlock(rwlock *rw, TCR *tcr)
1862{
1863
1864  int err = 0;
1865
1866  lock_futex(&rw->spin);
1867  if (rw->state > 0) {
1868    if (rw->writer != tcr) {
1869      err = EINVAL;
1870    } else {
1871      --rw->state;
1872      if (rw->state == 0) {
1873        rw->writer = NULL;
1874      }
1875    }
1876  } else {
1877    if (rw->state < 0) {
1878      ++rw->state;
1879    } else {
1880      err = EINVAL;
1881    }
1882  }
1883  if (err) {
1884    unlock_futex(&rw->spin);
1885    return err;
1886  }
1887 
1888  if (rw->state == 0) {
1889    if (rw->blocked_writers) {
1890      ++rw->writer_signal;
1891      unlock_futex(&rw->spin);
1892      futex_wake(&rw->writer_signal,1);
1893      return 0;
1894    }
1895    if (rw->blocked_readers) {
1896      ++rw->reader_signal;
1897      unlock_futex(&rw->spin);
1898      futex_wake(&rw->reader_signal, INT_MAX);
1899      return 0;
1900    }
1901  }
1902  unlock_futex(&rw->spin);
1903  return 0;
1904}
1905#endif
1906
1907       
1908void
1909rwlock_destroy(rwlock *rw)
1910{
1911#ifndef USE_FUTEX
1912  destroy_semaphore((void **)&rw->reader_signal);
1913  destroy_semaphore((void **)&rw->writer_signal);
1914#endif
1915  postGCfree((void *)(rw->malloced_ptr));
1916}
1917
1918
1919
Note: See TracBrowser for help on using the repository browser.