source: release/1.2/source/lisp-kernel/thread_manager.c @ 10178

Last change on this file since 10178 was 10178, checked in by gb, 12 years ago

Propagate r10177 (locking in
lisp_suspend_other_threads/lisp_resume_other_threads) to 1.2.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 37.5 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46#ifdef USE_FUTEX
47#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
48#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
49#define FUTEX_AVAIL (0)
50#define FUTEX_LOCKED (1)
51#define FUTEX_CONTENDED (2)
52#endif
53
54#ifdef WINDOWS
55int
56raise_thread_interrupt(TCR *target)
57{
58}
59#else
60int
61raise_thread_interrupt(TCR *target)
62{
63  pthread_t thread = (pthread_t)target->osid;
64#ifdef DARWIN_not_yet
65  if (use_mach_exception_handling) {
66    return mach_raise_thread_interrupt(target);
67  }
68#endif
69  if (thread != (pthread_t) 0) {
70    return pthread_kill(thread, SIGNAL_FOR_PROCESS_INTERRUPT);
71  }
72  return ESRCH;
73}
74#endif
75
76signed_natural
77atomic_incf_by(signed_natural *ptr, signed_natural by)
78{
79  signed_natural old, new;
80  do {
81    old = *ptr;
82    new = old+by;
83  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
84           (natural) old);
85  return new;
86}
87
88signed_natural
89atomic_incf(signed_natural *ptr)
90{
91  return atomic_incf_by(ptr, 1);
92}
93
94signed_natural
95atomic_decf(signed_natural *ptr)
96{
97  signed_natural old, new;
98  do {
99    old = *ptr;
100    new = old == 0 ? old : old-1;
101  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
102           (natural) old);
103  return old-1;
104}
105
106
107#ifndef USE_FUTEX
108int spin_lock_tries = 1;
109
110void
111get_spin_lock(signed_natural *p, TCR *tcr)
112{
113  int i, n = spin_lock_tries;
114 
115  while (1) {
116    for (i = 0; i < n; i++) {
117      if (atomic_swap(p,(signed_natural)tcr) == 0) {
118        return;
119      }
120    }
121#ifndef WINDOWS
122    sched_yield();
123#endif
124  }
125}
126#endif
127
128#ifndef USE_FUTEX
129int
130lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
131{
132
133  if (tcr == NULL) {
134    tcr = get_tcr(true);
135  }
136  if (m->owner == tcr) {
137    m->count++;
138    return 0;
139  }
140  while (1) {
141    LOCK_SPINLOCK(m->spinlock,tcr);
142    ++m->avail;
143    if (m->avail == 1) {
144      m->owner = tcr;
145      m->count = 1;
146      RELEASE_SPINLOCK(m->spinlock);
147      break;
148    }
149    RELEASE_SPINLOCK(m->spinlock);
150    SEM_WAIT_FOREVER(m->signal);
151  }
152  return 0;
153}
154
155#else /* USE_FUTEX */
156
157static void inline
158lock_futex(natural *p)
159{
160 
161  while (1) {
162    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
163      return;
164    }
165    while (1) {
166      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
167        return;
168      }
169      futex_wait(p,FUTEX_CONTENDED);
170    }
171  }
172}
173
174static void inline
175unlock_futex(natural *p)
176{
177  if (atomic_decf(p) != FUTEX_AVAIL) {
178    *p = FUTEX_AVAIL;
179    futex_wake(p,INT_MAX);
180  }
181}
182   
183int
184lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
185{
186  if (tcr == NULL) {
187    tcr = get_tcr(true);
188  }
189  if (m->owner == tcr) {
190    m->count++;
191    return 0;
192  }
193  lock_futex(&m->avail);
194  m->owner = tcr;
195  m->count = 1;
196  return 0;
197}
198#endif /* USE_FUTEX */
199
200
201#ifndef USE_FUTEX 
202int
203unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
204{
205  int ret = EPERM, pending;
206
207  if (tcr == NULL) {
208    tcr = get_tcr(true);
209  }
210
211  if (m->owner == tcr) {
212    --m->count;
213    if (m->count == 0) {
214      LOCK_SPINLOCK(m->spinlock,tcr);
215      m->owner = NULL;
216      pending = m->avail-1 + m->waiting;     /* Don't count us */
217      m->avail = 0;
218      --pending;
219      if (pending > 0) {
220        m->waiting = pending;
221      } else {
222        m->waiting = 0;
223      }
224      RELEASE_SPINLOCK(m->spinlock);
225      if (pending >= 0) {
226        SEM_RAISE(m->signal);
227      }
228    }
229    ret = 0;
230  }
231  return ret;
232}
233#else /* USE_FUTEX */
234int
235unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
236{
237  int ret = EPERM;
238
239   if (tcr == NULL) {
240    tcr = get_tcr(true);
241  }
242
243  if (m->owner == tcr) {
244    --m->count;
245    if (m->count == 0) {
246      m->owner = NULL;
247      unlock_futex(&m->avail);
248    }
249    ret = 0;
250  }
251  return ret;
252}
253#endif /* USE_FUTEX */
254
255void
256destroy_recursive_lock(RECURSIVE_LOCK m)
257{
258#ifndef USE_FUTEX
259  destroy_semaphore((void **)&m->signal);
260#endif
261  postGCfree((void *)(m->malloced_ptr));
262}
263
264/*
265  If we're already the owner (or if the lock is free), lock it
266  and increment the lock count; otherwise, return EBUSY without
267  waiting.
268*/
269
270#ifndef USE_FUTEX
271int
272recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
273{
274  TCR *owner = m->owner;
275
276  LOCK_SPINLOCK(m->spinlock,tcr);
277  if (owner == tcr) {
278    m->count++;
279    if (was_free) {
280      *was_free = 0;
281      RELEASE_SPINLOCK(m->spinlock);
282      return 0;
283    }
284  }
285  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
286    m->owner = tcr;
287    m->count = 1;
288    if (was_free) {
289      *was_free = 1;
290    }
291    RELEASE_SPINLOCK(m->spinlock);
292    return 0;
293  }
294
295  RELEASE_SPINLOCK(m->spinlock);
296  return EBUSY;
297}
298#else
299int
300recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
301{
302  TCR *owner = m->owner;
303
304  if (owner == tcr) {
305    m->count++;
306    if (was_free) {
307      *was_free = 0;
308      return 0;
309    }
310  }
311  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
312    m->owner = tcr;
313    m->count = 1;
314    if (was_free) {
315      *was_free = 1;
316    }
317    return 0;
318  }
319
320  return EBUSY;
321}
322#endif
323
324void
325sem_wait_forever(SEMAPHORE s)
326{
327  int status;
328
329  do {
330#ifdef USE_MACH_SEMAPHORES
331    mach_timespec_t q = {1,0};
332    status = SEM_TIMEDWAIT(s,q);
333#endif
334#ifdef USE_POSIX_SEMAPHORES
335    struct timespec q;
336    gettimeofday((struct timeval *)&q, NULL);
337    q.tv_sec += 1;
338    status = SEM_TIMEDWAIT(s,&q);
339#endif
340  } while (status != 0);
341}
342
343int
344wait_on_semaphore(void *s, int seconds, int millis)
345{
346  int nanos = (millis % 1000) * 1000000;
347#ifdef USE_POSIX_SEMAPHORES
348  int status;
349
350  struct timespec q;
351  gettimeofday((struct timeval *)&q, NULL);
352  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
353   
354  q.tv_nsec += nanos;
355  if (q.tv_nsec >= 1000000000L) {
356    q.tv_nsec -= 1000000000L;
357    seconds += 1;
358  }
359  q.tv_sec += seconds;
360  status = SEM_TIMEDWAIT(s, &q);
361  if (status < 0) {
362    return errno;
363  }
364  return status;
365#endif
366#ifdef USE_MACH_SEMAPHORES
367  mach_timespec_t q = {seconds, nanos};
368  int status = SEM_TIMEDWAIT(s, q);
369
370 
371  switch (status) {
372  case 0: return 0;
373  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
374  case KERN_ABORTED: return EINTR;
375  default: return EINVAL;
376  }
377
378#endif
379}
380
381
382int
383semaphore_maybe_timedwait(void *s, struct timespec *t)
384{
385  if (t) {
386    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
387  }
388  SEM_WAIT_FOREVER(s);
389  return 0;
390}
391
392void
393signal_semaphore(SEMAPHORE s)
394{
395  SEM_RAISE(s);
396}
397
398 
399#ifdef WINDOWS
400LispObj
401current_thread_osid()
402{
403}
404#else
405LispObj
406current_thread_osid()
407{
408  return (LispObj)ptr_to_lispobj(pthread_self());
409}
410#endif
411
412
413int thread_suspend_signal = 0, thread_resume_signal = 0;
414
415
416
417void
418linux_exception_init(TCR *tcr)
419{
420}
421
422
423TCR *
424get_interrupt_tcr(Boolean create)
425{
426  return get_tcr(create);
427}
428 
429  void
430suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
431{
432#ifdef DARWIN_GS_HACK
433  Boolean gs_was_tcr = ensure_gs_pthread();
434#endif
435  TCR *tcr = get_interrupt_tcr(false);
436
437  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
438    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
439  } else {
440    if (signo == thread_suspend_signal) {
441#if 0
442      sigset_t wait_for;
443#endif
444
445      tcr->suspend_context = context;
446#if 0
447      sigfillset(&wait_for);
448#endif
449      SEM_RAISE(tcr->suspend);
450#if 0
451      sigdelset(&wait_for, thread_resume_signal);
452#endif
453#if 1
454#if RESUME_VIA_RESUME_SEMAPHORE
455      SEM_WAIT_FOREVER(tcr->resume);
456#if SUSPEND_RESUME_VERBOSE
457      fprintf(stderr, "got  resume in 0x%x\n",tcr);
458#endif
459      tcr->suspend_context = NULL;
460#else
461      sigsuspend(&wait_for);
462#endif
463#else
464    do {
465      sigsuspend(&wait_for);
466    } while (tcr->suspend_context);
467#endif 
468    } else {
469      tcr->suspend_context = NULL;
470#if SUSEPEND_RESUME_VERBOSE
471      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
472#endif
473    }
474#if WAIT_FOR_RESUME_ACK
475    SEM_RAISE(tcr->suspend);
476#endif
477  }
478#ifdef DARWIN_GS_HACK
479  if (gs_was_tcr) {
480    set_gs_address(tcr);
481  }
482#endif
483#ifdef DARWIN
484  DarwinSigReturn(context);
485#endif
486#ifdef FREEBSD
487  freebsd_sigreturn(context);
488#endif
489}
490
491 
492
493/*
494  'base' should be set to the bottom (origin) of the stack, e.g., the
495  end from which it grows.
496*/
497 
498#ifdef WINDOWS
499void
500os_get_stack_bounds(LispObj q,void **base, natural *size)
501{
502}
503#else
504void
505os_get_stack_bounds(LispObj q,void **base, natural *size)
506{
507  pthread_t p = (pthread_t)(q);
508#ifdef DARWIN
509  *base = pthread_get_stackaddr_np(p);
510  *size = pthread_get_stacksize_np(p);
511#endif
512#ifdef LINUX
513  pthread_attr_t attr;
514
515  pthread_getattr_np(p,&attr);
516  pthread_attr_getstack(&attr, base, size);
517  *(natural *)base += *size;
518#endif
519#ifdef FREEBSD
520  pthread_attr_t attr;
521  void * temp_base;
522  size_t temp_size;
523 
524
525  pthread_attr_init(&attr); 
526  pthread_attr_get_np(p, &attr);
527  pthread_attr_getstackaddr(&attr,&temp_base);
528  pthread_attr_getstacksize(&attr,&temp_size);
529  *base = (void *)((natural)temp_base + temp_size);
530  *size = temp_size;
531#endif
532
533}
534#endif
535
536void *
537new_semaphore(int count)
538{
539#ifdef USE_POSIX_SEMAPHORES
540  sem_t *s = malloc(sizeof(sem_t));
541  sem_init(s, 0, count);
542  return s;
543#endif
544#ifdef USE_MACH_SEMAPHORES
545  semaphore_t s = (semaphore_t)0;
546  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
547  return (void *)(natural)s;
548#endif
549}
550
551RECURSIVE_LOCK
552new_recursive_lock()
553{
554  extern int cache_block_size;
555  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
556  RECURSIVE_LOCK m = NULL;
557#ifndef USE_FUTEX
558  void *signal = new_semaphore(0);
559#endif
560
561  if (p) {
562    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
563    m->malloced_ptr = p;
564  }
565
566#ifdef USE_FUTEX
567  if (m) {
568    return m;
569  }
570#else
571  if (m && signal) {
572    m->signal = signal;
573    return m;
574  }
575  if (m) {
576    free(p);
577  }
578  if (signal) {
579    destroy_semaphore(&signal);
580  }
581#endif
582  return NULL;
583}
584
585void
586destroy_semaphore(void **s)
587{
588  if (*s) {
589#ifdef USE_POSIX_SEMAPHORES
590    sem_destroy((sem_t *)*s);
591#endif
592#ifdef USE_MACH_SEMAPHORES
593    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
594#endif
595    *s=NULL;
596  }
597}
598
599#ifdef WINDOWS
600void
601tsd_set(LispObj key, void *datum)
602{
603}
604
605void *
606tsd_get(LispObj key)
607{
608}
609#else
610void
611tsd_set(LispObj key, void *datum)
612{
613  pthread_setspecific((pthread_key_t)key, datum);
614}
615
616void *
617tsd_get(LispObj key)
618{
619  return pthread_getspecific((pthread_key_t)key);
620}
621#endif
622
623void
624dequeue_tcr(TCR *tcr)
625{
626  TCR *next, *prev;
627
628  next = tcr->next;
629  prev = tcr->prev;
630
631  prev->next = next;
632  next->prev = prev;
633  tcr->prev = tcr->next = NULL;
634#ifdef X8664
635  tcr->linear = NULL;
636#endif
637}
638 
639void
640enqueue_tcr(TCR *new)
641{
642  TCR *head, *tail;
643 
644  LOCK(lisp_global(TCR_AREA_LOCK),new);
645  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
646  tail = head->prev;
647  tail->next = new;
648  head->prev = new;
649  new->prev = tail;
650  new->next = head;
651  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
652}
653
654TCR *
655allocate_tcr()
656{
657  TCR *tcr, *chain = NULL, *next;
658#ifdef DARWIN
659  extern Boolean use_mach_exception_handling;
660  kern_return_t kret;
661  mach_port_t
662    thread_exception_port,
663    task_self = mach_task_self();
664#endif
665  for (;;) {
666    tcr = calloc(1, sizeof(TCR));
667#ifdef DARWIN
668#if WORD_SIZE == 64
669    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
670      tcr->next = chain;
671      chain = tcr;
672      continue;
673    }
674#endif
675    if (use_mach_exception_handling) {
676      thread_exception_port = (mach_port_t)((natural)tcr);
677      kret = mach_port_allocate_name(task_self,
678                                     MACH_PORT_RIGHT_RECEIVE,
679                                     thread_exception_port);
680    } else {
681      kret = KERN_SUCCESS;
682    }
683
684    if (kret != KERN_SUCCESS) {
685      tcr->next = chain;
686      chain = tcr;
687      continue;
688    }
689#endif
690    for (next = chain; next;) {
691      next = next->next;
692      free(chain);
693    }
694    return tcr;
695  }
696}
697
698#ifdef X8664
699#ifdef LINUX
700#include <asm/prctl.h>
701#include <sys/prctl.h>
702#endif
703#ifdef FREEBSD
704#include <machine/sysarch.h>
705#endif
706
707void
708setup_tcr_extra_segment(TCR *tcr)
709{
710#ifdef FREEBSD
711  amd64_set_gsbase(tcr);
712#endif
713#ifdef LINUX
714  arch_prctl(ARCH_SET_GS, (natural)tcr);
715#endif
716#ifdef DARWIN
717  /* There's no way to do this yet.  See DARWIN_GS_HACK */
718  /* darwin_set_x8664_fs_reg(tcr); */
719#endif
720}
721
722#endif
723
724
725
726/*
727  Caller must hold the area_lock.
728*/
729#ifdef WINDOWS
730TCR *
731new_tcr(natural vstack_size, natural tstack_size)
732{
733}
734#else
735TCR *
736new_tcr(natural vstack_size, natural tstack_size)
737{
738  extern area
739    *allocate_vstack_holding_area_lock(natural),
740    *allocate_tstack_holding_area_lock(natural);
741  area *a;
742  int i;
743  sigset_t sigmask;
744
745  sigemptyset(&sigmask);
746  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
747#ifdef HAVE_TLS
748  TCR *tcr = &current_tcr;
749#else
750  TCR *tcr = allocate_tcr();
751#endif
752
753#ifdef X8664
754  setup_tcr_extra_segment(tcr);
755  tcr->linear = tcr;
756#endif
757
758#if (WORD_SIZE == 64)
759  tcr->single_float_convert.tag = subtag_single_float;
760#endif
761  lisp_global(TCR_COUNT) += (1<<fixnumshift);
762  tcr->suspend = new_semaphore(0);
763  tcr->resume = new_semaphore(0);
764  tcr->reset_completion = new_semaphore(0);
765  tcr->activate = new_semaphore(0);
766  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
767  a = allocate_vstack_holding_area_lock(vstack_size);
768  tcr->vs_area = a;
769  a->owner = tcr;
770  tcr->save_vsp = (LispObj *) a->active; 
771  a = allocate_tstack_holding_area_lock(tstack_size);
772  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
773  tcr->ts_area = a;
774  a->owner = tcr;
775  tcr->save_tsp = (LispObj *) a->active;
776#ifdef X86
777  tcr->next_tsp = tcr->save_tsp;
778#endif
779
780  tcr->valence = TCR_STATE_FOREIGN;
781#ifdef PPC
782  tcr->lisp_fpscr.words.l = 0xd0;
783#endif
784#ifdef X86
785  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
786#if 1                           /* Mask underflow; too hard to
787                                   deal with denorms if underflow is
788                                   enabled */
789    (1 << MXCSR_UM_BIT) | 
790#endif
791    (1 << MXCSR_PM_BIT);
792#endif
793  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
794  tcr->tlb_limit = 2048<<fixnumshift;
795  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
796  for (i = 0; i < 2048; i++) {
797    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
798  }
799  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
800  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
801  return tcr;
802}
803#endif
804
805void
806shutdown_thread_tcr(void *arg)
807{
808  TCR *tcr = TCR_FROM_TSD(arg);
809
810  area *vs, *ts, *cs;
811  void *termination_semaphore;
812 
813  if (--(tcr->shutdown_count) == 0) {
814    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
815      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
816        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
817   
818      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
819      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
820      tsd_set(lisp_global(TCR_KEY), NULL);
821    }
822#ifdef DARWIN
823    darwin_exception_cleanup(tcr);
824#endif
825    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
826    vs = tcr->vs_area;
827    tcr->vs_area = NULL;
828    ts = tcr->ts_area;
829    tcr->ts_area = NULL;
830    cs = tcr->cs_area;
831    tcr->cs_area = NULL;
832    if (vs) {
833      condemn_area_holding_area_lock(vs);
834    }
835    if (ts) {
836      condemn_area_holding_area_lock(ts);
837    }
838    if (cs) {
839      condemn_area_holding_area_lock(cs);
840    }
841    destroy_semaphore(&tcr->suspend);
842    destroy_semaphore(&tcr->resume);
843    destroy_semaphore(&tcr->reset_completion);
844    destroy_semaphore(&tcr->activate);
845    free(tcr->tlb_pointer);
846    tcr->tlb_pointer = NULL;
847    tcr->tlb_limit = 0;
848    tcr->osid = 0;
849    tcr->interrupt_pending = 0;
850    termination_semaphore = tcr->termination_semaphore;
851    tcr->termination_semaphore = NULL;
852#ifdef HAVE_TLS
853    dequeue_tcr(tcr);
854#endif
855    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
856    if (termination_semaphore) {
857      SEM_RAISE(termination_semaphore);
858    }
859  } else {
860    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
861  }
862}
863
864void
865tcr_cleanup(void *arg)
866{
867  TCR *tcr = (TCR *)arg;
868  area *a;
869
870  a = tcr->vs_area;
871  if (a) {
872    a->active = a->high;
873  }
874  a = tcr->ts_area;
875  if (a) {
876    a->active = a->high;
877  }
878  a = tcr->cs_area;
879  if (a) {
880    a->active = a->high;
881  }
882  tcr->valence = TCR_STATE_FOREIGN;
883  tcr->shutdown_count = 1;
884  shutdown_thread_tcr(tcr);
885  tsd_set(lisp_global(TCR_KEY), NULL);
886}
887
888void *
889current_native_thread_id()
890{
891  return ((void *) (natural)
892#ifdef LINUX
893          getpid()
894#endif
895#ifdef DARWIN
896          mach_thread_self()
897#endif
898#ifdef FREEBSD
899          pthread_self()
900#endif
901#ifdef SOLARIS
902          pthread_self()
903#endif
904#ifdef WINDOWS
905          /* ThreadSelf() */ 23
906#endif
907          );
908}
909
910
911void
912thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
913{
914  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
915
916  tcr->osid = current_thread_osid();
917  tcr->native_thread_id = current_native_thread_id();
918  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
919  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
920  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
921  tcr->cs_area = a;
922  a->owner = tcr;
923  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
924    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
925  }
926#ifdef LINUX
927#ifdef PPC
928#ifndef PPC64
929  tcr->native_thread_info = current_r2;
930#endif
931#endif
932#endif
933  tcr->errno_loc = &errno;
934  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
935#ifdef DARWIN
936  extern Boolean use_mach_exception_handling;
937  if (use_mach_exception_handling) {
938    darwin_exception_init(tcr);
939  }
940#endif
941#ifdef LINUX
942  linux_exception_init(tcr);
943#endif
944  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
945}
946
947/*
948  Register the specified tcr as "belonging to" the current thread.
949  Under Darwin, setup Mach exception handling for the thread.
950  Install cleanup handlers for thread termination.
951*/
952void
953register_thread_tcr(TCR *tcr)
954{
955  void *stack_base = NULL;
956  natural stack_size = 0;
957
958  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
959  thread_init_tcr(tcr, stack_base, stack_size);
960  enqueue_tcr(tcr);
961}
962
963
964 
965 
966#ifndef MAP_GROWSDOWN
967#define MAP_GROWSDOWN 0
968#endif
969
970#ifdef WINDOWS
971Ptr
972create_stack(int size)
973{
974}
975#else
976Ptr
977create_stack(natural size)
978{
979  Ptr p;
980  size=align_to_power_of_2(size, log2_page_size);
981  p = (Ptr) mmap(NULL,
982                 (size_t)size,
983                 PROT_READ | PROT_WRITE | PROT_EXEC,
984                 MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
985                 -1,    /* Darwin insists on this when not mmap()ing
986                           a real fd */
987                 0);
988  if (p != (Ptr)(-1)) {
989    *((size_t *)p) = size;
990    return p;
991  }
992  allocation_failure(true, size);
993
994}
995#endif
996
997void *
998allocate_stack(natural size)
999{
1000  return create_stack(size);
1001}
1002
1003#ifdef WINDOWS
1004void
1005free_stack(void *s)
1006{
1007}
1008#else
1009void
1010free_stack(void *s)
1011{
1012  size_t size = *((size_t *)s);
1013  munmap(s, size);
1014}
1015#endif
1016
1017Boolean threads_initialized = false;
1018
1019#ifndef USE_FUTEX
1020#ifdef WINDOWS
1021void
1022count_cpus()
1023{
1024}
1025#else
1026void
1027count_cpus()
1028{
1029#ifdef DARWIN
1030  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
1031#include <mach/host_info.h>
1032
1033  struct host_basic_info info;
1034  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1035 
1036  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
1037    if (info.max_cpus > 1) {
1038      spin_lock_tries = 1024;
1039    }
1040  }
1041#else
1042  int n = sysconf(_SC_NPROCESSORS_ONLN);
1043 
1044  if (n > 1) {
1045    spin_lock_tries = 1024;
1046  }
1047#endif
1048}
1049#endif
1050#endif
1051
1052#ifdef WINDOWS
1053void
1054init_threads(void * stack_base, TCR *tcr)
1055{
1056}
1057void *
1058lisp_thread_entry(void *param)
1059{
1060}
1061#else
1062void
1063init_threads(void * stack_base, TCR *tcr)
1064{
1065  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
1066  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
1067  thread_signal_setup();
1068
1069#ifndef USE_FUTEX
1070  count_cpus();
1071#endif
1072  threads_initialized = true;
1073}
1074
1075
1076void *
1077lisp_thread_entry(void *param)
1078{
1079  thread_activation *activation = (thread_activation *)param;
1080  TCR *tcr = new_tcr(activation->vsize, activation->tsize);
1081  sigset_t mask, old_mask;
1082
1083  sigemptyset(&mask);
1084  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1085
1086  register_thread_tcr(tcr);
1087
1088  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1089  tcr->vs_area->active -= node_size;
1090  *(--tcr->save_vsp) = lisp_nil;
1091  enable_fp_exceptions();
1092  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1093  activation->tcr = tcr;
1094  SEM_RAISE(activation->created);
1095  do {
1096    SEM_RAISE(tcr->reset_completion);
1097    SEM_WAIT_FOREVER(tcr->activate);
1098    /* Now go run some lisp code */
1099    start_lisp(TCR_TO_TSD(tcr),0);
1100  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1101  pthread_cleanup_pop(true);
1102
1103}
1104#endif
1105
1106void *
1107xNewThread(natural control_stack_size,
1108           natural value_stack_size,
1109           natural temp_stack_size)
1110
1111{
1112  thread_activation activation;
1113
1114
1115  activation.tsize = temp_stack_size;
1116  activation.vsize = value_stack_size;
1117  activation.tcr = 0;
1118  activation.created = new_semaphore(0);
1119  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1120                           NULL, 
1121                           lisp_thread_entry,
1122                           (void *) &activation)) {
1123   
1124    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1125  }
1126  destroy_semaphore(&activation.created); 
1127  return TCR_TO_TSD(activation.tcr);
1128}
1129
1130Boolean
1131active_tcr_p(TCR *q)
1132{
1133  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1134 
1135  do {
1136    if (p == q) {
1137      return true;
1138    }
1139    p = p->next;
1140  } while (p != head);
1141  return false;
1142}
1143
1144#ifdef WINDOWS
1145OSErr
1146xDisposeThread(TCR *tcr)
1147{
1148}
1149#else
1150OSErr
1151xDisposeThread(TCR *tcr)
1152{
1153  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1154    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1155      pthread_cancel((pthread_t)(tcr->osid));
1156      return 0;
1157    }
1158  }
1159  return -50;
1160}
1161#endif
1162
1163OSErr
1164xYieldToThread(TCR *target)
1165{
1166  Bug(NULL, "xYieldToThread ?");
1167  return 0;
1168}
1169 
1170OSErr
1171xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1172{
1173  Bug(NULL, "xThreadCurrentStackSpace ?");
1174  return 0;
1175}
1176
1177
1178#ifdef WINDOWS
1179LispObj
1180create_system_thread(size_t stack_size,
1181                     void* stackaddr,
1182                     void* (*start_routine)(void *),
1183                     void* param)
1184{
1185}
1186#else
1187LispObj
1188create_system_thread(size_t stack_size,
1189                     void* stackaddr,
1190                     void* (*start_routine)(void *),
1191                     void* param)
1192{
1193  pthread_attr_t attr;
1194  pthread_t returned_thread = (pthread_t) 0;
1195
1196  pthread_attr_init(&attr);
1197  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1198
1199  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1200    stack_size = PTHREAD_STACK_MIN;
1201  }
1202
1203  stack_size = ensure_stack_limit(stack_size);
1204  if (stackaddr != NULL) {
1205    /* Size must have been specified.  Sort of makes sense ... */
1206#ifdef DARWIN
1207    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1208#else
1209    pthread_attr_setstack(&attr, stackaddr, stack_size);
1210#endif
1211  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1212    pthread_attr_setstacksize(&attr,stack_size);
1213  }
1214
1215  /*
1216     I think that's just about enough ... create the thread.
1217  */
1218  pthread_create(&returned_thread, &attr, start_routine, param);
1219  return (LispObj) ptr_to_lispobj(returned_thread);
1220}
1221#endif
1222
1223TCR *
1224get_tcr(Boolean create)
1225{
1226#ifdef HAVE_TLS
1227  TCR *current = current_tcr.linear;
1228#else
1229  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1230  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1231#endif
1232
1233  if ((current == NULL) && create) {
1234    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1235      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1236    int i, nbindwords = 0;
1237    extern unsigned initial_stack_size;
1238   
1239    /* Make one. */
1240    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1241    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1242    register_thread_tcr(current);
1243#ifdef DEBUG_TCR_CREATION
1244#ifndef WINDOWS
1245    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1246#endif
1247#endif
1248    current->vs_area->active -= node_size;
1249    *(--current->save_vsp) = lisp_nil;
1250#ifdef PPC
1251#define NSAVEREGS 8
1252#endif
1253#ifdef X8664
1254#define NSAVEREGS 4
1255#endif
1256    for (i = 0; i < NSAVEREGS; i++) {
1257      *(--current->save_vsp) = 0;
1258      current->vs_area->active -= node_size;
1259    }
1260    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1261    for (i = 0; i < nbindwords; i++) {
1262      *(--current->save_vsp) = 0;
1263      current->vs_area->active -= node_size;
1264    }
1265    current->shutdown_count = 1;
1266    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1267
1268  }
1269 
1270  return current;
1271}
1272
1273#ifdef WINDOWS
1274Boolean
1275suspend_tcr(TCR *tcr)
1276{
1277}
1278#else
1279Boolean
1280suspend_tcr(TCR *tcr)
1281{
1282  int suspend_count = atomic_incf(&(tcr->suspend_count));
1283  pthread_t thread;
1284  if (suspend_count == 1) {
1285#if SUSPEND_RESUME_VERBOSE
1286    fprintf(stderr,"Suspending 0x%x\n", tcr);
1287#endif
1288#ifdef DARWIN_nope
1289    if (mach_suspend_tcr(tcr)) {
1290      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1291      return true;
1292    }
1293#endif
1294    thread = (pthread_t)(tcr->osid);
1295    if ((thread != (pthread_t) 0) &&
1296        (pthread_kill(thread, thread_suspend_signal) == 0)) {
1297      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1298    } else {
1299      /* A problem using pthread_kill.  On Darwin, this can happen
1300         if the thread has had its signal mask surgically removed
1301         by pthread_exit.  If the native (Mach) thread can be suspended,
1302         do that and return true; otherwise, flag the tcr as belonging
1303         to a dead thread by setting tcr->osid to 0.
1304      */
1305      tcr->osid = 0;
1306      return false;
1307    }
1308    return true;
1309  }
1310  return false;
1311}
1312#endif
1313
1314Boolean
1315tcr_suspend_ack(TCR *tcr)
1316{
1317  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1318    SEM_WAIT_FOREVER(tcr->suspend);
1319    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1320#if SUSPEND_RESUME_VERBOSE
1321    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1322#endif
1323
1324  }
1325  return true;
1326}
1327
1328     
1329
1330
1331Boolean
1332lisp_suspend_tcr(TCR *tcr)
1333{
1334  Boolean suspended;
1335  TCR *current = get_tcr(true);
1336 
1337  LOCK(lisp_global(TCR_AREA_LOCK),current);
1338#ifdef DARWIN
1339#if USE_MACH_EXCEPTION_LOCK
1340  if (use_mach_exception_handling) {
1341    pthread_mutex_lock(mach_exception_lock);
1342  }
1343#endif
1344#endif
1345  suspended = suspend_tcr(tcr);
1346  if (suspended) {
1347    while (!tcr_suspend_ack(tcr));
1348  }
1349#ifdef DARWIN
1350#if USE_MACH_EXCEPTION_LOCK
1351  if (use_mach_exception_handling) {
1352    pthread_mutex_unlock(mach_exception_lock);
1353  }
1354#endif
1355#endif
1356  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1357  return suspended;
1358}
1359         
1360
1361Boolean
1362resume_tcr(TCR *tcr)
1363{
1364  int suspend_count = atomic_decf(&(tcr->suspend_count));
1365  if (suspend_count == 0) {
1366#ifdef DARWIN
1367    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1368#if SUSPEND_RESUME_VERBOSE
1369    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1370#endif
1371      mach_resume_tcr(tcr);
1372      return true;
1373    }
1374#endif
1375#if RESUME_VIA_RESUME_SEMAPHORE
1376    SEM_RAISE(tcr->resume);
1377#else
1378    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1379      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1380    }
1381#endif
1382#if SUSPEND_RESUME_VERBOSE
1383    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1384#endif
1385    return true;
1386  }
1387  return false;
1388}
1389
1390void
1391wait_for_resumption(TCR *tcr)
1392{
1393  if (tcr->suspend_count == 0) {
1394#ifdef DARWIN
1395    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1396      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1397      return;
1398  }
1399#endif
1400#if WAIT_FOR_RESUME_ACK
1401#if SUSPEND_RESUME_VERBOSE
1402    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1403#endif
1404    SEM_WAIT_FOREVER(tcr->suspend);
1405#endif
1406  }
1407}
1408   
1409
1410
1411Boolean
1412lisp_resume_tcr(TCR *tcr)
1413{
1414  Boolean resumed;
1415  TCR *current = get_tcr(true);
1416 
1417  LOCK(lisp_global(TCR_AREA_LOCK),current);
1418  resumed = resume_tcr(tcr);
1419  wait_for_resumption(tcr);
1420  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1421  return resumed;
1422}
1423
1424
1425TCR *freed_tcrs = NULL;
1426
1427void
1428enqueue_freed_tcr (TCR *tcr)
1429{
1430#ifndef HAVE_TLS
1431  tcr->next = freed_tcrs;
1432  freed_tcrs = tcr;
1433#endif
1434}
1435
1436/* It's not clear that we can safely condemn a dead tcr's areas, since
1437   we may not be able to call free() if a suspended thread owns a
1438   malloc lock. At least make the areas appear to be empty.
1439*/
1440   
1441
1442void
1443normalize_dead_tcr_areas(TCR *tcr)
1444{
1445  area *a;
1446
1447  a = tcr->vs_area;
1448  if (a) {
1449    a->active = a->high;
1450  }
1451
1452  a = tcr->ts_area;
1453  if (a) {
1454    a->active = a->high;
1455  }
1456
1457  a = tcr->cs_area;
1458  if (a) {
1459    a->active = a->high;
1460  }
1461}
1462   
1463void
1464free_freed_tcrs ()
1465{
1466  TCR *current, *next;
1467
1468  for (current = freed_tcrs; current; current = next) {
1469    next = current->next;
1470#ifndef HAVE_TLS
1471    free(current);
1472#endif
1473  }
1474  freed_tcrs = NULL;
1475}
1476
1477void
1478suspend_other_threads(Boolean for_gc)
1479{
1480  TCR *current = get_tcr(true), *other, *next;
1481  int dead_tcr_count = 0;
1482  Boolean all_acked;
1483
1484  LOCK(lisp_global(TCR_AREA_LOCK), current);
1485#ifdef DARWIN
1486#if USE_MACH_EXCEPTION_LOCK
1487  if (for_gc && use_mach_exception_handling) {
1488#if SUSPEND_RESUME_VERBOSE
1489    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1490#endif
1491    pthread_mutex_lock(mach_exception_lock);
1492  }
1493#endif
1494#endif
1495  for (other = current->next; other != current; other = other->next) {
1496    if ((other->osid != 0)) {
1497      suspend_tcr(other);
1498      if (other->osid == 0) {
1499        dead_tcr_count++;
1500      }
1501    } else {
1502      dead_tcr_count++;
1503    }
1504  }
1505
1506  do {
1507    all_acked = true;
1508    for (other = current->next; other != current; other = other->next) {
1509      if ((other->osid != 0)) {
1510        if (!tcr_suspend_ack(other)) {
1511          all_acked = false;
1512        }
1513      }
1514    }
1515  } while(! all_acked);
1516
1517     
1518
1519  /* All other threads are suspended; can safely delete dead tcrs now */
1520  if (dead_tcr_count) {
1521    for (other = current->next; other != current; other = next) {
1522      next = other->next;
1523      if ((other->osid == 0))  {
1524        normalize_dead_tcr_areas(other);
1525        dequeue_tcr(other);
1526        enqueue_freed_tcr(other);
1527      }
1528    }
1529  }
1530}
1531
1532void
1533lisp_suspend_other_threads()
1534{
1535  TCR *current = get_tcr(true);
1536  LOCK(lisp_global(TCR_AREA_LOCK),current);
1537  suspend_other_threads(false);
1538}
1539
1540void
1541resume_other_threads(Boolean for_gc)
1542{
1543  TCR *current = get_tcr(true), *other;
1544  for (other = current->next; other != current; other = other->next) {
1545    if ((other->osid != 0)) {
1546      resume_tcr(other);
1547    }
1548  }
1549  for (other = current->next; other != current; other = other->next) {
1550    if ((other->osid != 0)) {
1551      wait_for_resumption(other);
1552    }
1553  }
1554  free_freed_tcrs();
1555#ifdef DARWIN
1556#if USE_MACH_EXCEPTION_LOCK
1557  if (for_gc && use_mach_exception_handling) {
1558#if SUSPEND_RESUME_VERBOSE
1559    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1560#endif
1561    pthread_mutex_unlock(mach_exception_lock);
1562  }
1563#endif
1564#endif
1565
1566  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1567}
1568
1569void
1570lisp_resume_other_threads()
1571{
1572  TCR *current = get_tcr(true);
1573  resume_other_threads(false);
1574  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1575}
1576
1577
1578
1579rwlock *
1580rwlock_new()
1581{
1582  extern int cache_block_size;
1583
1584  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1585  rwlock *rw = NULL;;
1586 
1587  if (p) {
1588    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1589    rw->malloced_ptr = p;
1590#ifndef USE_FUTEX
1591    rw->reader_signal = new_semaphore(0);
1592    rw->writer_signal = new_semaphore(0);
1593    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1594      if (rw->reader_signal) {
1595        destroy_semaphore(&(rw->reader_signal));
1596      } else {
1597        destroy_semaphore(&(rw->writer_signal));
1598      }
1599      free(rw);
1600      rw = NULL;
1601    }
1602#endif
1603  }
1604  return rw;
1605}
1606
1607     
1608/*
1609  Try to get read access to a multiple-readers/single-writer lock.  If
1610  we already have read access, return success (indicating that the
1611  lock is held another time.  If we already have write access to the
1612  lock ... that won't work; return EDEADLK.  Wait until no other
1613  thread has or is waiting for write access, then indicate that we
1614  hold read access once.
1615*/
1616#ifndef USE_FUTEX
1617int
1618rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1619{
1620  int err = 0;
1621 
1622  LOCK_SPINLOCK(rw->spin, tcr);
1623
1624  if (rw->writer == tcr) {
1625    RELEASE_SPINLOCK(rw->spin);
1626    return EDEADLK;
1627  }
1628
1629  while (rw->blocked_writers || (rw->state > 0)) {
1630    rw->blocked_readers++;
1631    RELEASE_SPINLOCK(rw->spin);
1632    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1633    LOCK_SPINLOCK(rw->spin,tcr);
1634    rw->blocked_readers--;
1635    if (err == EINTR) {
1636      err = 0;
1637    }
1638    if (err) {
1639      RELEASE_SPINLOCK(rw->spin);
1640      return err;
1641    }
1642  }
1643  rw->state--;
1644  RELEASE_SPINLOCK(rw->spin);
1645  return err;
1646}
1647#else
1648int
1649rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1650{
1651  natural waitval;
1652
1653  lock_futex(&rw->spin);
1654
1655  if (rw->writer == tcr) {
1656    unlock_futex(&rw->spin);
1657    return EDEADLOCK;
1658  }
1659  while (1) {
1660    if (rw->writer == NULL) {
1661      --rw->state;
1662      unlock_futex(&rw->spin);
1663      return 0;
1664    }
1665    rw->blocked_readers++;
1666    waitval = rw->reader_signal;
1667    unlock_futex(&rw->spin);
1668    futex_wait(&rw->reader_signal,waitval);
1669    lock_futex(&rw->spin);
1670    rw->blocked_readers--;
1671  }
1672  return 0;
1673}
1674#endif   
1675
1676
1677/*
1678  Try to obtain write access to the lock.
1679  It is an error if we already have read access, but it's hard to
1680  detect that.
1681  If we already have write access, increment the count that indicates
1682  that.
1683  Otherwise, wait until the lock is not held for reading or writing,
1684  then assert write access.
1685*/
1686
1687#ifndef USE_FUTEX
1688int
1689rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1690{
1691  int err = 0;
1692
1693  LOCK_SPINLOCK(rw->spin,tcr);
1694  if (rw->writer == tcr) {
1695    rw->state++;
1696    RELEASE_SPINLOCK(rw->spin);
1697    return 0;
1698  }
1699
1700  while (rw->state != 0) {
1701    rw->blocked_writers++;
1702    RELEASE_SPINLOCK(rw->spin);
1703    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1704    LOCK_SPINLOCK(rw->spin,tcr);
1705    rw->blocked_writers--;
1706    if (err == EINTR) {
1707      err = 0;
1708    }
1709    if (err) {
1710      RELEASE_SPINLOCK(rw->spin);
1711      return err;
1712    }
1713  }
1714  rw->state = 1;
1715  rw->writer = tcr;
1716  RELEASE_SPINLOCK(rw->spin);
1717  return err;
1718}
1719
1720#else
1721int
1722rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1723{
1724  int err = 0;
1725  natural waitval;
1726
1727  lock_futex(&rw->spin);
1728  if (rw->writer == tcr) {
1729    rw->state++;
1730    unlock_futex(&rw->spin);
1731    return 0;
1732  }
1733
1734  while (rw->state != 0) {
1735    rw->blocked_writers++;
1736    waitval = rw->writer_signal;
1737    unlock_futex(&rw->spin);
1738    futex_wait(&rw->writer_signal,waitval);
1739    lock_futex(&rw->spin);
1740    rw->blocked_writers--;
1741  }
1742  rw->state = 1;
1743  rw->writer = tcr;
1744  unlock_futex(&rw->spin);
1745  return err;
1746}
1747#endif
1748
1749/*
1750  Sort of the same as above, only return EBUSY if we'd have to wait.
1751*/
1752#ifndef USE_FUTEX
1753int
1754rwlock_try_wlock(rwlock *rw, TCR *tcr)
1755{
1756  int ret = EBUSY;
1757
1758  LOCK_SPINLOCK(rw->spin,tcr);
1759  if (rw->writer == tcr) {
1760    rw->state++;
1761    ret = 0;
1762  } else {
1763    if (rw->state == 0) {
1764      rw->writer = tcr;
1765      rw->state = 1;
1766      ret = 0;
1767    }
1768  }
1769  RELEASE_SPINLOCK(rw->spin);
1770  return ret;
1771}
1772#else
1773int
1774rwlock_try_wlock(rwlock *rw, TCR *tcr)
1775{
1776  int ret = EBUSY;
1777
1778  lock_futex(&rw->spin);
1779  if (rw->writer == tcr) {
1780    rw->state++;
1781    ret = 0;
1782  } else {
1783    if (rw->state == 0) {
1784      rw->writer = tcr;
1785      rw->state = 1;
1786      ret = 0;
1787    }
1788  }
1789  unlock_futex(&rw->spin);
1790  return ret;
1791}
1792#endif
1793
1794#ifndef USE_FUTEX
1795int
1796rwlock_try_rlock(rwlock *rw, TCR *tcr)
1797{
1798  int ret = EBUSY;
1799
1800  LOCK_SPINLOCK(rw->spin,tcr);
1801  if (rw->state <= 0) {
1802    --rw->state;
1803    ret = 0;
1804  }
1805  RELEASE_SPINLOCK(rw->spin);
1806  return ret;
1807}
1808#else
1809int
1810rwlock_try_rlock(rwlock *rw, TCR *tcr)
1811{
1812  int ret = EBUSY;
1813
1814  lock_futex(&rw->spin);
1815  if (rw->state <= 0) {
1816    --rw->state;
1817    ret = 0;
1818  }
1819  unlock_futex(&rw->spin);
1820  return ret;
1821}
1822#endif
1823
1824
1825
1826#ifndef USE_FUTEX
1827int
1828rwlock_unlock(rwlock *rw, TCR *tcr)
1829{
1830
1831  int err = 0;
1832  natural blocked_readers = 0;
1833
1834  LOCK_SPINLOCK(rw->spin,tcr);
1835  if (rw->state > 0) {
1836    if (rw->writer != tcr) {
1837      err = EINVAL;
1838    } else {
1839      --rw->state;
1840      if (rw->state == 0) {
1841        rw->writer = NULL;
1842      }
1843    }
1844  } else {
1845    if (rw->state < 0) {
1846      ++rw->state;
1847    } else {
1848      err = EINVAL;
1849    }
1850  }
1851  if (err) {
1852    RELEASE_SPINLOCK(rw->spin);
1853    return err;
1854  }
1855 
1856  if (rw->state == 0) {
1857    if (rw->blocked_writers) {
1858      SEM_RAISE(rw->writer_signal);
1859    } else {
1860      blocked_readers = rw->blocked_readers;
1861      if (blocked_readers) {
1862        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1863      }
1864    }
1865  }
1866  RELEASE_SPINLOCK(rw->spin);
1867  return 0;
1868}
1869#else
1870int
1871rwlock_unlock(rwlock *rw, TCR *tcr)
1872{
1873
1874  int err = 0;
1875
1876  lock_futex(&rw->spin);
1877  if (rw->state > 0) {
1878    if (rw->writer != tcr) {
1879      err = EINVAL;
1880    } else {
1881      --rw->state;
1882      if (rw->state == 0) {
1883        rw->writer = NULL;
1884      }
1885    }
1886  } else {
1887    if (rw->state < 0) {
1888      ++rw->state;
1889    } else {
1890      err = EINVAL;
1891    }
1892  }
1893  if (err) {
1894    unlock_futex(&rw->spin);
1895    return err;
1896  }
1897 
1898  if (rw->state == 0) {
1899    if (rw->blocked_writers) {
1900      ++rw->writer_signal;
1901      unlock_futex(&rw->spin);
1902      futex_wake(&rw->writer_signal,1);
1903      return 0;
1904    }
1905    if (rw->blocked_readers) {
1906      ++rw->reader_signal;
1907      unlock_futex(&rw->spin);
1908      futex_wake(&rw->reader_signal, INT_MAX);
1909      return 0;
1910    }
1911  }
1912  unlock_futex(&rw->spin);
1913  return 0;
1914}
1915#endif
1916
1917       
1918void
1919rwlock_destroy(rwlock *rw)
1920{
1921#ifndef USE_FUTEX
1922  destroy_semaphore((void **)&rw->reader_signal);
1923  destroy_semaphore((void **)&rw->writer_signal);
1924#endif
1925  postGCfree((void *)(rw->malloced_ptr));
1926}
1927
1928
1929
Note: See TracBrowser for help on using the repository browser.