source: branches/working-0711/ccl/lisp-kernel/thread_manager.c @ 9983

Last change on this file since 9983 was 9983, checked in by gb, 11 years ago

Allow natural-sized stacks.
Try to check for pthread = 0 in pthread_kill calls.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 37.4 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46#ifdef USE_FUTEX
47#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
48#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
49#define FUTEX_AVAIL (0)
50#define FUTEX_LOCKED (1)
51#define FUTEX_CONTENDED (2)
52#endif
53
54#ifdef WINDOWS
55int
56raise_thread_interrupt(TCR *target)
57{
58}
59#else
60int
61raise_thread_interrupt(TCR *target)
62{
63  pthread_t thread = (pthread_t)target->osid;
64#ifdef DARWIN_not_yet
65  if (use_mach_exception_handling) {
66    return mach_raise_thread_interrupt(target);
67  }
68#endif
69  if (thread != (pthread_t) 0) {
70    return pthread_kill(thread, SIGNAL_FOR_PROCESS_INTERRUPT);
71  }
72  return ESRCH;
73}
74#endif
75
76signed_natural
77atomic_incf_by(signed_natural *ptr, signed_natural by)
78{
79  signed_natural old, new;
80  do {
81    old = *ptr;
82    new = old+by;
83  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
84           (natural) old);
85  return new;
86}
87
88signed_natural
89atomic_incf(signed_natural *ptr)
90{
91  return atomic_incf_by(ptr, 1);
92}
93
94signed_natural
95atomic_decf(signed_natural *ptr)
96{
97  signed_natural old, new;
98  do {
99    old = *ptr;
100    new = old == 0 ? old : old-1;
101  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
102           (natural) old);
103  return old-1;
104}
105
106
107#ifndef USE_FUTEX
108int spin_lock_tries = 1;
109
110void
111get_spin_lock(signed_natural *p, TCR *tcr)
112{
113  int i, n = spin_lock_tries;
114 
115  while (1) {
116    for (i = 0; i < n; i++) {
117      if (atomic_swap(p,(signed_natural)tcr) == 0) {
118        return;
119      }
120    }
121#ifndef WINDOWS
122    sched_yield();
123#endif
124  }
125}
126#endif
127
128#ifndef USE_FUTEX
129int
130lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
131{
132
133  if (tcr == NULL) {
134    tcr = get_tcr(true);
135  }
136  if (m->owner == tcr) {
137    m->count++;
138    return 0;
139  }
140  while (1) {
141    LOCK_SPINLOCK(m->spinlock,tcr);
142    ++m->avail;
143    if (m->avail == 1) {
144      m->owner = tcr;
145      m->count = 1;
146      RELEASE_SPINLOCK(m->spinlock);
147      break;
148    }
149    RELEASE_SPINLOCK(m->spinlock);
150    SEM_WAIT_FOREVER(m->signal);
151  }
152  return 0;
153}
154
155#else /* USE_FUTEX */
156
157static void inline
158lock_futex(signed_natural *p)
159{
160 
161  while (1) {
162    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
163      return;
164    }
165    while (1) {
166      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
167        return;
168      }
169      futex_wait(p,FUTEX_CONTENDED);
170    }
171  }
172}
173
174static void inline
175unlock_futex(signed_natural *p)
176{
177  if (atomic_decf(p) != FUTEX_AVAIL) {
178    *p = FUTEX_AVAIL;
179    futex_wake(p,INT_MAX);
180  }
181}
182   
183int
184lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
185{
186  if (tcr == NULL) {
187    tcr = get_tcr(true);
188  }
189  if (m->owner == tcr) {
190    m->count++;
191    return 0;
192  }
193  lock_futex(&m->avail);
194  m->owner = tcr;
195  m->count = 1;
196  return 0;
197}
198#endif /* USE_FUTEX */
199
200
201#ifndef USE_FUTEX 
202int
203unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
204{
205  int ret = EPERM, pending;
206
207  if (tcr == NULL) {
208    tcr = get_tcr(true);
209  }
210
211  if (m->owner == tcr) {
212    --m->count;
213    if (m->count == 0) {
214      LOCK_SPINLOCK(m->spinlock,tcr);
215      m->owner = NULL;
216      pending = m->avail-1 + m->waiting;     /* Don't count us */
217      m->avail = 0;
218      --pending;
219      if (pending > 0) {
220        m->waiting = pending;
221      } else {
222        m->waiting = 0;
223      }
224      RELEASE_SPINLOCK(m->spinlock);
225      if (pending >= 0) {
226        SEM_RAISE(m->signal);
227      }
228    }
229    ret = 0;
230  }
231  return ret;
232}
233#else /* USE_FUTEX */
234int
235unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
236{
237  int ret = EPERM;
238
239   if (tcr == NULL) {
240    tcr = get_tcr(true);
241  }
242
243  if (m->owner == tcr) {
244    --m->count;
245    if (m->count == 0) {
246      m->owner = NULL;
247      unlock_futex(&m->avail);
248    }
249    ret = 0;
250  }
251  return ret;
252}
253#endif /* USE_FUTEX */
254
255void
256destroy_recursive_lock(RECURSIVE_LOCK m)
257{
258#ifndef USE_FUTEX
259  destroy_semaphore((void **)&m->signal);
260#endif
261  postGCfree((void *)(m->malloced_ptr));
262}
263
264/*
265  If we're already the owner (or if the lock is free), lock it
266  and increment the lock count; otherwise, return EBUSY without
267  waiting.
268*/
269
270#ifndef USE_FUTEX
271int
272recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
273{
274  TCR *owner = m->owner;
275
276  LOCK_SPINLOCK(m->spinlock,tcr);
277  if (owner == tcr) {
278    m->count++;
279    if (was_free) {
280      *was_free = 0;
281      RELEASE_SPINLOCK(m->spinlock);
282      return 0;
283    }
284  }
285  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
286    m->owner = tcr;
287    m->count = 1;
288    if (was_free) {
289      *was_free = 1;
290    }
291    RELEASE_SPINLOCK(m->spinlock);
292    return 0;
293  }
294
295  RELEASE_SPINLOCK(m->spinlock);
296  return EBUSY;
297}
298#else
299int
300recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
301{
302  TCR *owner = m->owner;
303
304  if (owner == tcr) {
305    m->count++;
306    if (was_free) {
307      *was_free = 0;
308      return 0;
309    }
310  }
311  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
312    m->owner = tcr;
313    m->count = 1;
314    if (was_free) {
315      *was_free = 1;
316    }
317    return 0;
318  }
319
320  return EBUSY;
321}
322#endif
323
324void
325sem_wait_forever(SEMAPHORE s)
326{
327  int status;
328
329  do {
330#ifdef USE_MACH_SEMAPHORES
331    mach_timespec_t q = {1,0};
332    status = SEM_TIMEDWAIT(s,q);
333#endif
334#ifdef USE_POSIX_SEMAPHORES
335    struct timespec q;
336    gettimeofday((struct timeval *)&q, NULL);
337    q.tv_sec += 1;
338    status = SEM_TIMEDWAIT(s,&q);
339#endif
340  } while (status != 0);
341}
342
343int
344wait_on_semaphore(void *s, int seconds, int millis)
345{
346  int nanos = (millis % 1000) * 1000000;
347#ifdef USE_POSIX_SEMAPHORES
348  int status;
349
350  struct timespec q;
351  gettimeofday((struct timeval *)&q, NULL);
352  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
353   
354  q.tv_nsec += nanos;
355  if (q.tv_nsec >= 1000000000L) {
356    q.tv_nsec -= 1000000000L;
357    seconds += 1;
358  }
359  q.tv_sec += seconds;
360  status = SEM_TIMEDWAIT(s, &q);
361  if (status < 0) {
362    return errno;
363  }
364  return status;
365#endif
366#ifdef USE_MACH_SEMAPHORES
367  mach_timespec_t q = {seconds, nanos};
368  int status = SEM_TIMEDWAIT(s, q);
369
370 
371  switch (status) {
372  case 0: return 0;
373  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
374  case KERN_ABORTED: return EINTR;
375  default: return EINVAL;
376  }
377
378#endif
379}
380
381
382int
383semaphore_maybe_timedwait(void *s, struct timespec *t)
384{
385  if (t) {
386    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
387  }
388  SEM_WAIT_FOREVER(s);
389  return 0;
390}
391
392void
393signal_semaphore(SEMAPHORE s)
394{
395  SEM_RAISE(s);
396}
397
398 
399#ifdef WINDOWS
400LispObj
401current_thread_osid()
402{
403}
404#else
405LispObj
406current_thread_osid()
407{
408  return (LispObj)ptr_to_lispobj(pthread_self());
409}
410#endif
411
412
413int thread_suspend_signal = 0, thread_resume_signal = 0;
414
415
416
417void
418linux_exception_init(TCR *tcr)
419{
420}
421
422
423TCR *
424get_interrupt_tcr(Boolean create)
425{
426  return get_tcr(create);
427}
428 
429  void
430suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
431{
432#ifdef DARWIN_GS_HACK
433  Boolean gs_was_tcr = ensure_gs_pthread();
434#endif
435  TCR *tcr = get_interrupt_tcr(false);
436
437  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
438    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
439  } else {
440    if (signo == thread_suspend_signal) {
441#if 0
442      sigset_t wait_for;
443#endif
444
445      tcr->suspend_context = context;
446#if 0
447      sigfillset(&wait_for);
448#endif
449      SEM_RAISE(tcr->suspend);
450#if 0
451      sigdelset(&wait_for, thread_resume_signal);
452#endif
453#if 1
454#if RESUME_VIA_RESUME_SEMAPHORE
455      SEM_WAIT_FOREVER(tcr->resume);
456#if SUSPEND_RESUME_VERBOSE
457      fprintf(stderr, "got  resume in 0x%x\n",tcr);
458#endif
459      tcr->suspend_context = NULL;
460#else
461      sigsuspend(&wait_for);
462#endif
463#else
464    do {
465      sigsuspend(&wait_for);
466    } while (tcr->suspend_context);
467#endif 
468    } else {
469      tcr->suspend_context = NULL;
470#if SUSEPEND_RESUME_VERBOSE
471      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
472#endif
473    }
474#if WAIT_FOR_RESUME_ACK
475    SEM_RAISE(tcr->suspend);
476#endif
477  }
478#ifdef DARWIN_GS_HACK
479  if (gs_was_tcr) {
480    set_gs_address(tcr);
481  }
482#endif
483#ifdef DARWIN
484  DarwinSigReturn(context);
485#endif
486#ifdef FREEBSD
487  freebsd_sigreturn(context);
488#endif
489}
490
491 
492
493/*
494  'base' should be set to the bottom (origin) of the stack, e.g., the
495  end from which it grows.
496*/
497 
498#ifdef WINDOWS
499void
500os_get_stack_bounds(LispObj q,void **base, natural *size)
501{
502}
503#else
504void
505os_get_stack_bounds(LispObj q,void **base, natural *size)
506{
507  pthread_t p = (pthread_t)(q);
508#ifdef DARWIN
509  *base = pthread_get_stackaddr_np(p);
510  *size = pthread_get_stacksize_np(p);
511#endif
512#ifdef LINUX
513  pthread_attr_t attr;
514
515  pthread_getattr_np(p,&attr);
516  pthread_attr_getstack(&attr, base, size);
517  *(natural *)base += *size;
518#endif
519#ifdef FREEBSD
520  pthread_attr_t attr;
521  void * temp_base;
522  size_t temp_size;
523 
524
525  pthread_attr_init(&attr); 
526  pthread_attr_get_np(p, &attr);
527  pthread_attr_getstackaddr(&attr,&temp_base);
528  pthread_attr_getstacksize(&attr,&temp_size);
529  *base = (void *)((natural)temp_base + temp_size);
530  *size = temp_size;
531#endif
532
533}
534#endif
535
536void *
537new_semaphore(int count)
538{
539#ifdef USE_POSIX_SEMAPHORES
540  sem_t *s = malloc(sizeof(sem_t));
541  sem_init(s, 0, count);
542  return s;
543#endif
544#ifdef USE_MACH_SEMAPHORES
545  semaphore_t s = (semaphore_t)0;
546  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
547  return (void *)(natural)s;
548#endif
549}
550
551RECURSIVE_LOCK
552new_recursive_lock()
553{
554  extern int cache_block_size;
555  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
556  RECURSIVE_LOCK m = NULL;
557#ifndef USE_FUTEX
558  void *signal = new_semaphore(0);
559#endif
560
561  if (p) {
562    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
563    m->malloced_ptr = p;
564  }
565
566#ifdef USE_FUTEX
567  if (m) {
568    return m;
569  }
570#else
571  if (m && signal) {
572    m->signal = signal;
573    return m;
574  }
575  if (m) {
576    free(p);
577  }
578  if (signal) {
579    destroy_semaphore(&signal);
580  }
581#endif
582  return NULL;
583}
584
585void
586destroy_semaphore(void **s)
587{
588  if (*s) {
589#ifdef USE_POSIX_SEMAPHORES
590    sem_destroy((sem_t *)*s);
591#endif
592#ifdef USE_MACH_SEMAPHORES
593    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
594#endif
595    *s=NULL;
596  }
597}
598
599#ifdef WINDOWS
600void
601tsd_set(LispObj key, void *datum)
602{
603}
604
605void *
606tsd_get(LispObj key)
607{
608}
609#else
610void
611tsd_set(LispObj key, void *datum)
612{
613  pthread_setspecific((pthread_key_t)key, datum);
614}
615
616void *
617tsd_get(LispObj key)
618{
619  return pthread_getspecific((pthread_key_t)key);
620}
621#endif
622
623void
624dequeue_tcr(TCR *tcr)
625{
626  TCR *next, *prev;
627
628  next = tcr->next;
629  prev = tcr->prev;
630
631  prev->next = next;
632  next->prev = prev;
633  tcr->prev = tcr->next = NULL;
634#ifdef X8664
635  tcr->linear = NULL;
636#endif
637}
638 
639void
640enqueue_tcr(TCR *new)
641{
642  TCR *head, *tail;
643 
644  LOCK(lisp_global(TCR_AREA_LOCK),new);
645  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
646  tail = head->prev;
647  tail->next = new;
648  head->prev = new;
649  new->prev = tail;
650  new->next = head;
651  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
652}
653
654TCR *
655allocate_tcr()
656{
657  TCR *tcr, *chain = NULL, *next;
658#ifdef DARWIN
659  extern Boolean use_mach_exception_handling;
660  kern_return_t kret;
661  mach_port_t
662    thread_exception_port,
663    task_self = mach_task_self();
664#endif
665  for (;;) {
666    tcr = calloc(1, sizeof(TCR));
667#ifdef DARWIN
668#if WORD_SIZE == 64
669    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
670      tcr->next = chain;
671      chain = tcr;
672      continue;
673    }
674#endif
675    if (use_mach_exception_handling) {
676      thread_exception_port = (mach_port_t)((natural)tcr);
677      kret = mach_port_allocate_name(task_self,
678                                     MACH_PORT_RIGHT_RECEIVE,
679                                     thread_exception_port);
680    } else {
681      kret = KERN_SUCCESS;
682    }
683
684    if (kret != KERN_SUCCESS) {
685      tcr->next = chain;
686      chain = tcr;
687      continue;
688    }
689#endif
690    for (next = chain; next;) {
691      next = next->next;
692      free(chain);
693    }
694    return tcr;
695  }
696}
697
698#ifdef X8664
699#ifdef LINUX
700#include <asm/prctl.h>
701#include <sys/prctl.h>
702#endif
703#ifdef FREEBSD
704#include <machine/sysarch.h>
705#endif
706
707void
708setup_tcr_extra_segment(TCR *tcr)
709{
710#ifdef FREEBSD
711  amd64_set_gsbase(tcr);
712#endif
713#ifdef LINUX
714  arch_prctl(ARCH_SET_GS, (natural)tcr);
715#endif
716#ifdef DARWIN
717  /* There's no way to do this yet.  See DARWIN_GS_HACK */
718  /* darwin_set_x8664_fs_reg(tcr); */
719#endif
720}
721
722#endif
723
724
725
726/*
727  Caller must hold the area_lock.
728*/
729#ifdef WINDOWS
730TCR *
731new_tcr(natural vstack_size, natural tstack_size)
732{
733}
734#else
735TCR *
736new_tcr(natural vstack_size, natural tstack_size)
737{
738  extern area
739    *allocate_vstack_holding_area_lock(natural),
740    *allocate_tstack_holding_area_lock(natural);
741  area *a;
742  int i;
743  sigset_t sigmask;
744
745  sigemptyset(&sigmask);
746  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
747#ifdef HAVE_TLS
748  TCR *tcr = &current_tcr;
749#else
750  TCR *tcr = allocate_tcr();
751#endif
752
753#ifdef X8664
754  setup_tcr_extra_segment(tcr);
755  tcr->linear = tcr;
756#endif
757
758#if (WORD_SIZE == 64)
759  tcr->single_float_convert.tag = subtag_single_float;
760#endif
761  lisp_global(TCR_COUNT) += (1<<fixnumshift);
762  tcr->suspend = new_semaphore(0);
763  tcr->resume = new_semaphore(0);
764  tcr->reset_completion = new_semaphore(0);
765  tcr->activate = new_semaphore(0);
766  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
767  a = allocate_vstack_holding_area_lock(vstack_size);
768  tcr->vs_area = a;
769  a->owner = tcr;
770  tcr->save_vsp = (LispObj *) a->active; 
771  a = allocate_tstack_holding_area_lock(tstack_size);
772  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
773  tcr->ts_area = a;
774  a->owner = tcr;
775  tcr->save_tsp = (LispObj *) a->active;
776#ifdef X86
777  tcr->next_tsp = tcr->save_tsp;
778#endif
779
780  tcr->valence = TCR_STATE_FOREIGN;
781#ifdef PPC
782  tcr->lisp_fpscr.words.l = 0xd0;
783#endif
784#ifdef X86
785  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
786#if 1                           /* Mask underflow; too hard to
787                                   deal with denorms if underflow is
788                                   enabled */
789    (1 << MXCSR_UM_BIT) | 
790#endif
791    (1 << MXCSR_PM_BIT);
792#endif
793  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
794  tcr->tlb_limit = 2048<<fixnumshift;
795  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
796  for (i = 0; i < 2048; i++) {
797    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
798  }
799  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
800  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
801  return tcr;
802}
803#endif
804
805void
806shutdown_thread_tcr(void *arg)
807{
808  TCR *tcr = TCR_FROM_TSD(arg);
809
810  area *vs, *ts, *cs;
811  void *termination_semaphore;
812 
813  if (--(tcr->shutdown_count) == 0) {
814    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
815      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
816        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
817   
818      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
819      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
820      tsd_set(lisp_global(TCR_KEY), NULL);
821    }
822#ifdef DARWIN
823    darwin_exception_cleanup(tcr);
824#endif
825    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
826    vs = tcr->vs_area;
827    tcr->vs_area = NULL;
828    ts = tcr->ts_area;
829    tcr->ts_area = NULL;
830    cs = tcr->cs_area;
831    tcr->cs_area = NULL;
832    if (vs) {
833      condemn_area_holding_area_lock(vs);
834    }
835    if (ts) {
836      condemn_area_holding_area_lock(ts);
837    }
838    if (cs) {
839      condemn_area_holding_area_lock(cs);
840    }
841    destroy_semaphore(&tcr->suspend);
842    destroy_semaphore(&tcr->resume);
843    destroy_semaphore(&tcr->reset_completion);
844    destroy_semaphore(&tcr->activate);
845    free(tcr->tlb_pointer);
846    tcr->tlb_pointer = NULL;
847    tcr->tlb_limit = 0;
848    tcr->osid = 0;
849    tcr->interrupt_pending = 0;
850    termination_semaphore = tcr->termination_semaphore;
851    tcr->termination_semaphore = NULL;
852#ifdef HAVE_TLS
853    dequeue_tcr(tcr);
854#endif
855    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
856    if (termination_semaphore) {
857      SEM_RAISE(termination_semaphore);
858    }
859  } else {
860    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
861  }
862}
863
864void
865tcr_cleanup(void *arg)
866{
867  TCR *tcr = (TCR *)arg;
868  area *a;
869
870  a = tcr->vs_area;
871  if (a) {
872    a->active = a->high;
873  }
874  a = tcr->ts_area;
875  if (a) {
876    a->active = a->high;
877  }
878  a = tcr->cs_area;
879  if (a) {
880    a->active = a->high;
881  }
882  tcr->valence = TCR_STATE_FOREIGN;
883  tcr->shutdown_count = 1;
884  shutdown_thread_tcr(tcr);
885  tsd_set(lisp_global(TCR_KEY), NULL);
886}
887
888void *
889current_native_thread_id()
890{
891  return ((void *) (natural)
892#ifdef LINUX
893          getpid()
894#endif
895#ifdef DARWIN
896          mach_thread_self()
897#endif
898#ifdef FREEBSD
899          pthread_self()
900#endif
901#ifdef SOLARIS
902          pthread_self()
903#endif
904#ifdef WINDOWS
905          /* ThreadSelf() */ 23
906#endif
907          );
908}
909
910
911void
912thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
913{
914  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
915
916  tcr->osid = current_thread_osid();
917  tcr->native_thread_id = current_native_thread_id();
918  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
919  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
920  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
921  tcr->cs_area = a;
922  a->owner = tcr;
923  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
924    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
925  }
926#ifdef LINUX
927#ifdef PPC
928#ifndef PPC64
929  tcr->native_thread_info = current_r2;
930#endif
931#endif
932#endif
933  tcr->errno_loc = &errno;
934  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
935#ifdef DARWIN
936  extern Boolean use_mach_exception_handling;
937  if (use_mach_exception_handling) {
938    darwin_exception_init(tcr);
939  }
940#endif
941#ifdef LINUX
942  linux_exception_init(tcr);
943#endif
944  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
945}
946
947/*
948  Register the specified tcr as "belonging to" the current thread.
949  Under Darwin, setup Mach exception handling for the thread.
950  Install cleanup handlers for thread termination.
951*/
952void
953register_thread_tcr(TCR *tcr)
954{
955  void *stack_base = NULL;
956  natural stack_size = 0;
957
958  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
959  thread_init_tcr(tcr, stack_base, stack_size);
960  enqueue_tcr(tcr);
961}
962
963
964 
965 
966#ifndef MAP_GROWSDOWN
967#define MAP_GROWSDOWN 0
968#endif
969
970#ifdef WINDOWS
971Ptr
972create_stack(int size)
973{
974}
975#else
976Ptr
977create_stack(natural size)
978{
979  Ptr p;
980  size=align_to_power_of_2(size, log2_page_size);
981  p = (Ptr) mmap(NULL,
982                 (size_t)size,
983                 PROT_READ | PROT_WRITE | PROT_EXEC,
984                 MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
985                 -1,    /* Darwin insists on this when not mmap()ing
986                           a real fd */
987                 0);
988  if (p != (Ptr)(-1)) {
989    *((size_t *)p) = size;
990    return p;
991  }
992  allocation_failure(true, size);
993
994}
995#endif
996
997void *
998allocate_stack(natural size)
999{
1000  return create_stack(size);
1001}
1002
1003#ifdef WINDOWS
1004void
1005free_stack(void *s)
1006{
1007}
1008#else
1009void
1010free_stack(void *s)
1011{
1012  size_t size = *((size_t *)s);
1013  munmap(s, size);
1014}
1015#endif
1016
1017Boolean threads_initialized = false;
1018
1019#ifndef USE_FUTEX
1020#ifdef WINDOWS
1021void
1022count_cpus()
1023{
1024}
1025#else
1026void
1027count_cpus()
1028{
1029#ifdef DARWIN
1030  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
1031#include <mach/host_info.h>
1032
1033  struct host_basic_info info;
1034  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1035 
1036  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
1037    if (info.max_cpus > 1) {
1038      spin_lock_tries = 1024;
1039    }
1040  }
1041#else
1042  int n = sysconf(_SC_NPROCESSORS_ONLN);
1043 
1044  if (n > 1) {
1045    spin_lock_tries = 1024;
1046  }
1047#endif
1048}
1049#endif
1050#endif
1051
1052#ifdef WINDOWS
1053void
1054init_threads(void * stack_base, TCR *tcr)
1055{
1056}
1057void *
1058lisp_thread_entry(void *param)
1059{
1060}
1061#else
1062void
1063init_threads(void * stack_base, TCR *tcr)
1064{
1065  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
1066  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
1067  thread_signal_setup();
1068
1069#ifndef USE_FUTEX
1070  count_cpus();
1071#endif
1072  threads_initialized = true;
1073}
1074
1075
1076void *
1077lisp_thread_entry(void *param)
1078{
1079  thread_activation *activation = (thread_activation *)param;
1080  TCR *tcr = new_tcr(activation->vsize, activation->tsize);
1081  sigset_t mask, old_mask;
1082
1083  sigemptyset(&mask);
1084  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1085
1086  register_thread_tcr(tcr);
1087
1088  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1089  tcr->vs_area->active -= node_size;
1090  *(--tcr->save_vsp) = lisp_nil;
1091  enable_fp_exceptions();
1092  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1093  activation->tcr = tcr;
1094  SEM_RAISE(activation->created);
1095  do {
1096    SEM_RAISE(tcr->reset_completion);
1097    SEM_WAIT_FOREVER(tcr->activate);
1098    /* Now go run some lisp code */
1099    start_lisp(TCR_TO_TSD(tcr),0);
1100  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1101  pthread_cleanup_pop(true);
1102
1103}
1104#endif
1105
1106void *
1107xNewThread(natural control_stack_size,
1108           natural value_stack_size,
1109           natural temp_stack_size)
1110
1111{
1112  thread_activation activation;
1113
1114
1115  activation.tsize = temp_stack_size;
1116  activation.vsize = value_stack_size;
1117  activation.tcr = 0;
1118  activation.created = new_semaphore(0);
1119  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1120                           NULL, 
1121                           lisp_thread_entry,
1122                           (void *) &activation)) {
1123   
1124    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1125  }
1126  destroy_semaphore(&activation.created); 
1127  return TCR_TO_TSD(activation.tcr);
1128}
1129
1130Boolean
1131active_tcr_p(TCR *q)
1132{
1133  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1134 
1135  do {
1136    if (p == q) {
1137      return true;
1138    }
1139    p = p->next;
1140  } while (p != head);
1141  return false;
1142}
1143
1144#ifdef WINDOWS
1145OSErr
1146xDisposeThread(TCR *tcr)
1147{
1148}
1149#else
1150OSErr
1151xDisposeThread(TCR *tcr)
1152{
1153  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1154    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1155      pthread_cancel((pthread_t)(tcr->osid));
1156      return 0;
1157    }
1158  }
1159  return -50;
1160}
1161#endif
1162
1163OSErr
1164xYieldToThread(TCR *target)
1165{
1166  Bug(NULL, "xYieldToThread ?");
1167  return 0;
1168}
1169 
1170OSErr
1171xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1172{
1173  Bug(NULL, "xThreadCurrentStackSpace ?");
1174  return 0;
1175}
1176
1177
1178#ifdef WINDOWS
1179LispObj
1180create_system_thread(size_t stack_size,
1181                     void* stackaddr,
1182                     void* (*start_routine)(void *),
1183                     void* param)
1184{
1185}
1186#else
1187LispObj
1188create_system_thread(size_t stack_size,
1189                     void* stackaddr,
1190                     void* (*start_routine)(void *),
1191                     void* param)
1192{
1193  pthread_attr_t attr;
1194  pthread_t returned_thread = (pthread_t) 0;
1195
1196  pthread_attr_init(&attr);
1197  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1198
1199  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1200    stack_size = PTHREAD_STACK_MIN;
1201  }
1202
1203  stack_size = ensure_stack_limit(stack_size);
1204  if (stackaddr != NULL) {
1205    /* Size must have been specified.  Sort of makes sense ... */
1206#ifdef DARWIN
1207    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1208#else
1209    pthread_attr_setstack(&attr, stackaddr, stack_size);
1210#endif
1211  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1212    pthread_attr_setstacksize(&attr,stack_size);
1213  }
1214
1215  /*
1216     I think that's just about enough ... create the thread.
1217  */
1218  pthread_create(&returned_thread, &attr, start_routine, param);
1219  return (LispObj) ptr_to_lispobj(returned_thread);
1220}
1221#endif
1222
1223TCR *
1224get_tcr(Boolean create)
1225{
1226#ifdef HAVE_TLS
1227  TCR *current = current_tcr.linear;
1228#else
1229  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1230  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1231#endif
1232
1233  if ((current == NULL) && create) {
1234    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1235      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1236    int i, nbindwords = 0;
1237    extern unsigned initial_stack_size;
1238   
1239    /* Make one. */
1240    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1241    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1242    register_thread_tcr(current);
1243#ifdef DEBUG_TCR_CREATION
1244#ifndef WINDOWS
1245    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1246#endif
1247#endif
1248    current->vs_area->active -= node_size;
1249    *(--current->save_vsp) = lisp_nil;
1250#ifdef PPC
1251#define NSAVEREGS 8
1252#endif
1253#ifdef X8664
1254#define NSAVEREGS 4
1255#endif
1256    for (i = 0; i < NSAVEREGS; i++) {
1257      *(--current->save_vsp) = 0;
1258      current->vs_area->active -= node_size;
1259    }
1260    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1261    for (i = 0; i < nbindwords; i++) {
1262      *(--current->save_vsp) = 0;
1263      current->vs_area->active -= node_size;
1264    }
1265    current->shutdown_count = 1;
1266    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1267
1268  }
1269 
1270  return current;
1271}
1272
1273#ifdef WINDOWS
1274Boolean
1275suspend_tcr(TCR *tcr)
1276{
1277}
1278#else
1279Boolean
1280suspend_tcr(TCR *tcr)
1281{
1282  int suspend_count = atomic_incf(&(tcr->suspend_count));
1283  pthread_t thread;
1284  if (suspend_count == 1) {
1285#if SUSPEND_RESUME_VERBOSE
1286    fprintf(stderr,"Suspending 0x%x\n", tcr);
1287#endif
1288#ifdef DARWIN_nope
1289    if (mach_suspend_tcr(tcr)) {
1290      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1291      return true;
1292    }
1293#endif
1294    thread = (pthread_t)(tcr->osid);
1295    if ((thread != (pthread_t) 0) &&
1296        (pthread_kill(thread, thread_suspend_signal) == 0)) {
1297      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1298    } else {
1299      /* A problem using pthread_kill.  On Darwin, this can happen
1300         if the thread has had its signal mask surgically removed
1301         by pthread_exit.  If the native (Mach) thread can be suspended,
1302         do that and return true; otherwise, flag the tcr as belonging
1303         to a dead thread by setting tcr->osid to 0.
1304      */
1305      tcr->osid = 0;
1306      return false;
1307    }
1308    return true;
1309  }
1310  return false;
1311}
1312#endif
1313
1314Boolean
1315tcr_suspend_ack(TCR *tcr)
1316{
1317  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1318    SEM_WAIT_FOREVER(tcr->suspend);
1319    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1320#if SUSPEND_RESUME_VERBOSE
1321    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1322#endif
1323
1324  }
1325  return true;
1326}
1327
1328     
1329
1330
1331Boolean
1332lisp_suspend_tcr(TCR *tcr)
1333{
1334  Boolean suspended;
1335  TCR *current = get_tcr(true);
1336 
1337  LOCK(lisp_global(TCR_AREA_LOCK),current);
1338#ifdef DARWIN
1339#if USE_MACH_EXCEPTION_LOCK
1340  if (use_mach_exception_handling) {
1341    pthread_mutex_lock(mach_exception_lock);
1342  }
1343#endif
1344#endif
1345  suspended = suspend_tcr(tcr);
1346  if (suspended) {
1347    while (!tcr_suspend_ack(tcr));
1348  }
1349#ifdef DARWIN
1350#if USE_MACH_EXCEPTION_LOCK
1351  if (use_mach_exception_handling) {
1352    pthread_mutex_unlock(mach_exception_lock);
1353  }
1354#endif
1355#endif
1356  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1357  return suspended;
1358}
1359         
1360
1361Boolean
1362resume_tcr(TCR *tcr)
1363{
1364  int suspend_count = atomic_decf(&(tcr->suspend_count));
1365  if (suspend_count == 0) {
1366#ifdef DARWIN
1367    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1368#if SUSPEND_RESUME_VERBOSE
1369    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1370#endif
1371      mach_resume_tcr(tcr);
1372      return true;
1373    }
1374#endif
1375#if RESUME_VIA_RESUME_SEMAPHORE
1376    SEM_RAISE(tcr->resume);
1377#else
1378    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1379      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1380    }
1381#endif
1382#if SUSPEND_RESUME_VERBOSE
1383    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1384#endif
1385    return true;
1386  }
1387  return false;
1388}
1389
1390void
1391wait_for_resumption(TCR *tcr)
1392{
1393  if (tcr->suspend_count == 0) {
1394#ifdef DARWIN
1395    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1396      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1397      return;
1398  }
1399#endif
1400#if WAIT_FOR_RESUME_ACK
1401#if SUSPEND_RESUME_VERBOSE
1402    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1403#endif
1404    SEM_WAIT_FOREVER(tcr->suspend);
1405#endif
1406  }
1407}
1408   
1409
1410
1411Boolean
1412lisp_resume_tcr(TCR *tcr)
1413{
1414  Boolean resumed;
1415  TCR *current = get_tcr(true);
1416 
1417  LOCK(lisp_global(TCR_AREA_LOCK),current);
1418  resumed = resume_tcr(tcr);
1419  wait_for_resumption(tcr);
1420  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1421  return resumed;
1422}
1423
1424
1425TCR *freed_tcrs = NULL;
1426
1427void
1428enqueue_freed_tcr (TCR *tcr)
1429{
1430#ifndef HAVE_TLS
1431  tcr->next = freed_tcrs;
1432  freed_tcrs = tcr;
1433#endif
1434}
1435
1436/* It's not clear that we can safely condemn a dead tcr's areas, since
1437   we may not be able to call free() if a suspended thread owns a
1438   malloc lock. At least make the areas appear to be empty.
1439*/
1440   
1441
1442void
1443normalize_dead_tcr_areas(TCR *tcr)
1444{
1445  area *a;
1446
1447  a = tcr->vs_area;
1448  if (a) {
1449    a->active = a->high;
1450  }
1451
1452  a = tcr->ts_area;
1453  if (a) {
1454    a->active = a->high;
1455  }
1456
1457  a = tcr->cs_area;
1458  if (a) {
1459    a->active = a->high;
1460  }
1461}
1462   
1463void
1464free_freed_tcrs ()
1465{
1466  TCR *current, *next;
1467
1468  for (current = freed_tcrs; current; current = next) {
1469    next = current->next;
1470#ifndef HAVE_TLS
1471    free(current);
1472#endif
1473  }
1474  freed_tcrs = NULL;
1475}
1476
1477void
1478suspend_other_threads(Boolean for_gc)
1479{
1480  TCR *current = get_tcr(true), *other, *next;
1481  int dead_tcr_count = 0;
1482  Boolean all_acked;
1483
1484  LOCK(lisp_global(TCR_AREA_LOCK), current);
1485#ifdef DARWIN
1486#if USE_MACH_EXCEPTION_LOCK
1487  if (for_gc && use_mach_exception_handling) {
1488#if SUSPEND_RESUME_VERBOSE
1489    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1490#endif
1491    pthread_mutex_lock(mach_exception_lock);
1492  }
1493#endif
1494#endif
1495  for (other = current->next; other != current; other = other->next) {
1496    if ((other->osid != 0)) {
1497      suspend_tcr(other);
1498      if (other->osid == 0) {
1499        dead_tcr_count++;
1500      }
1501    } else {
1502      dead_tcr_count++;
1503    }
1504  }
1505
1506  do {
1507    all_acked = true;
1508    for (other = current->next; other != current; other = other->next) {
1509      if ((other->osid != 0)) {
1510        if (!tcr_suspend_ack(other)) {
1511          all_acked = false;
1512        }
1513      }
1514    }
1515  } while(! all_acked);
1516
1517     
1518
1519  /* All other threads are suspended; can safely delete dead tcrs now */
1520  if (dead_tcr_count) {
1521    for (other = current->next; other != current; other = next) {
1522      next = other->next;
1523      if ((other->osid == 0))  {
1524        normalize_dead_tcr_areas(other);
1525        dequeue_tcr(other);
1526        enqueue_freed_tcr(other);
1527      }
1528    }
1529  }
1530}
1531
1532void
1533lisp_suspend_other_threads()
1534{
1535  suspend_other_threads(false);
1536}
1537
1538void
1539resume_other_threads(Boolean for_gc)
1540{
1541  TCR *current = get_tcr(true), *other;
1542  for (other = current->next; other != current; other = other->next) {
1543    if ((other->osid != 0)) {
1544      resume_tcr(other);
1545    }
1546  }
1547  for (other = current->next; other != current; other = other->next) {
1548    if ((other->osid != 0)) {
1549      wait_for_resumption(other);
1550    }
1551  }
1552  free_freed_tcrs();
1553#ifdef DARWIN
1554#if USE_MACH_EXCEPTION_LOCK
1555  if (for_gc && use_mach_exception_handling) {
1556#if SUSPEND_RESUME_VERBOSE
1557    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1558#endif
1559    pthread_mutex_unlock(mach_exception_lock);
1560  }
1561#endif
1562#endif
1563
1564  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1565}
1566
1567void
1568lisp_resume_other_threads()
1569{
1570  resume_other_threads(false);
1571}
1572
1573
1574
1575rwlock *
1576rwlock_new()
1577{
1578  extern int cache_block_size;
1579
1580  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1581  rwlock *rw = NULL;;
1582 
1583  if (p) {
1584    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1585    rw->malloced_ptr = p;
1586#ifndef USE_FUTEX
1587    rw->reader_signal = new_semaphore(0);
1588    rw->writer_signal = new_semaphore(0);
1589    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1590      if (rw->reader_signal) {
1591        destroy_semaphore(&(rw->reader_signal));
1592      } else {
1593        destroy_semaphore(&(rw->writer_signal));
1594      }
1595      free(rw);
1596      rw = NULL;
1597    }
1598#endif
1599  }
1600  return rw;
1601}
1602
1603     
1604/*
1605  Try to get read access to a multiple-readers/single-writer lock.  If
1606  we already have read access, return success (indicating that the
1607  lock is held another time.  If we already have write access to the
1608  lock ... that won't work; return EDEADLK.  Wait until no other
1609  thread has or is waiting for write access, then indicate that we
1610  hold read access once.
1611*/
1612#ifndef USE_FUTEX
1613int
1614rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1615{
1616  int err = 0;
1617 
1618  LOCK_SPINLOCK(rw->spin, tcr);
1619
1620  if (rw->writer == tcr) {
1621    RELEASE_SPINLOCK(rw->spin);
1622    return EDEADLK;
1623  }
1624
1625  while (rw->blocked_writers || (rw->state > 0)) {
1626    rw->blocked_readers++;
1627    RELEASE_SPINLOCK(rw->spin);
1628    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1629    LOCK_SPINLOCK(rw->spin,tcr);
1630    rw->blocked_readers--;
1631    if (err == EINTR) {
1632      err = 0;
1633    }
1634    if (err) {
1635      RELEASE_SPINLOCK(rw->spin);
1636      return err;
1637    }
1638  }
1639  rw->state--;
1640  RELEASE_SPINLOCK(rw->spin);
1641  return err;
1642}
1643#else
1644int
1645rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1646{
1647  natural waitval;
1648
1649  lock_futex(&rw->spin);
1650
1651  if (rw->writer == tcr) {
1652    unlock_futex(&rw->spin);
1653    return EDEADLOCK;
1654  }
1655  while (1) {
1656    if (rw->writer == NULL) {
1657      --rw->state;
1658      unlock_futex(&rw->spin);
1659      return 0;
1660    }
1661    rw->blocked_readers++;
1662    waitval = rw->reader_signal;
1663    unlock_futex(&rw->spin);
1664    futex_wait(&rw->reader_signal,waitval);
1665    lock_futex(&rw->spin);
1666    rw->blocked_readers--;
1667  }
1668  return 0;
1669}
1670#endif   
1671
1672
1673/*
1674  Try to obtain write access to the lock.
1675  It is an error if we already have read access, but it's hard to
1676  detect that.
1677  If we already have write access, increment the count that indicates
1678  that.
1679  Otherwise, wait until the lock is not held for reading or writing,
1680  then assert write access.
1681*/
1682
1683#ifndef USE_FUTEX
1684int
1685rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1686{
1687  int err = 0;
1688
1689  LOCK_SPINLOCK(rw->spin,tcr);
1690  if (rw->writer == tcr) {
1691    rw->state++;
1692    RELEASE_SPINLOCK(rw->spin);
1693    return 0;
1694  }
1695
1696  while (rw->state != 0) {
1697    rw->blocked_writers++;
1698    RELEASE_SPINLOCK(rw->spin);
1699    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1700    LOCK_SPINLOCK(rw->spin,tcr);
1701    rw->blocked_writers--;
1702    if (err == EINTR) {
1703      err = 0;
1704    }
1705    if (err) {
1706      RELEASE_SPINLOCK(rw->spin);
1707      return err;
1708    }
1709  }
1710  rw->state = 1;
1711  rw->writer = tcr;
1712  RELEASE_SPINLOCK(rw->spin);
1713  return err;
1714}
1715
1716#else
1717int
1718rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1719{
1720  int err = 0;
1721  natural waitval;
1722
1723  lock_futex(&rw->spin);
1724  if (rw->writer == tcr) {
1725    rw->state++;
1726    unlock_futex(&rw->spin);
1727    return 0;
1728  }
1729
1730  while (rw->state != 0) {
1731    rw->blocked_writers++;
1732    waitval = rw->writer_signal;
1733    unlock_futex(&rw->spin);
1734    futex_wait(&rw->writer_signal,waitval);
1735    lock_futex(&rw->spin);
1736    rw->blocked_writers--;
1737  }
1738  rw->state = 1;
1739  rw->writer = tcr;
1740  unlock_futex(&rw->spin);
1741  return err;
1742}
1743#endif
1744
1745/*
1746  Sort of the same as above, only return EBUSY if we'd have to wait.
1747*/
1748#ifndef USE_FUTEX
1749int
1750rwlock_try_wlock(rwlock *rw, TCR *tcr)
1751{
1752  int ret = EBUSY;
1753
1754  LOCK_SPINLOCK(rw->spin,tcr);
1755  if (rw->writer == tcr) {
1756    rw->state++;
1757    ret = 0;
1758  } else {
1759    if (rw->state == 0) {
1760      rw->writer = tcr;
1761      rw->state = 1;
1762      ret = 0;
1763    }
1764  }
1765  RELEASE_SPINLOCK(rw->spin);
1766  return ret;
1767}
1768#else
1769int
1770rwlock_try_wlock(rwlock *rw, TCR *tcr)
1771{
1772  int ret = EBUSY;
1773
1774  lock_futex(&rw->spin);
1775  if (rw->writer == tcr) {
1776    rw->state++;
1777    ret = 0;
1778  } else {
1779    if (rw->state == 0) {
1780      rw->writer = tcr;
1781      rw->state = 1;
1782      ret = 0;
1783    }
1784  }
1785  unlock_futex(&rw->spin);
1786  return ret;
1787}
1788#endif
1789
1790#ifndef USE_FUTEX
1791int
1792rwlock_try_rlock(rwlock *rw, TCR *tcr)
1793{
1794  int ret = EBUSY;
1795
1796  LOCK_SPINLOCK(rw->spin,tcr);
1797  if (rw->state <= 0) {
1798    --rw->state;
1799    ret = 0;
1800  }
1801  RELEASE_SPINLOCK(rw->spin);
1802  return ret;
1803}
1804#else
1805int
1806rwlock_try_rlock(rwlock *rw, TCR *tcr)
1807{
1808  int ret = EBUSY;
1809
1810  lock_futex(&rw->spin);
1811  if (rw->state <= 0) {
1812    --rw->state;
1813    ret = 0;
1814  }
1815  unlock_futex(&rw->spin);
1816  return ret;
1817}
1818#endif
1819
1820
1821
1822#ifndef USE_FUTEX
1823int
1824rwlock_unlock(rwlock *rw, TCR *tcr)
1825{
1826
1827  int err = 0;
1828  natural blocked_readers = 0;
1829
1830  LOCK_SPINLOCK(rw->spin,tcr);
1831  if (rw->state > 0) {
1832    if (rw->writer != tcr) {
1833      err = EINVAL;
1834    } else {
1835      --rw->state;
1836      if (rw->state == 0) {
1837        rw->writer = NULL;
1838      }
1839    }
1840  } else {
1841    if (rw->state < 0) {
1842      ++rw->state;
1843    } else {
1844      err = EINVAL;
1845    }
1846  }
1847  if (err) {
1848    RELEASE_SPINLOCK(rw->spin);
1849    return err;
1850  }
1851 
1852  if (rw->state == 0) {
1853    if (rw->blocked_writers) {
1854      SEM_RAISE(rw->writer_signal);
1855    } else {
1856      blocked_readers = rw->blocked_readers;
1857      if (blocked_readers) {
1858        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1859      }
1860    }
1861  }
1862  RELEASE_SPINLOCK(rw->spin);
1863  return 0;
1864}
1865#else
1866int
1867rwlock_unlock(rwlock *rw, TCR *tcr)
1868{
1869
1870  int err = 0;
1871
1872  lock_futex(&rw->spin);
1873  if (rw->state > 0) {
1874    if (rw->writer != tcr) {
1875      err = EINVAL;
1876    } else {
1877      --rw->state;
1878      if (rw->state == 0) {
1879        rw->writer = NULL;
1880      }
1881    }
1882  } else {
1883    if (rw->state < 0) {
1884      ++rw->state;
1885    } else {
1886      err = EINVAL;
1887    }
1888  }
1889  if (err) {
1890    unlock_futex(&rw->spin);
1891    return err;
1892  }
1893 
1894  if (rw->state == 0) {
1895    if (rw->blocked_writers) {
1896      ++rw->writer_signal;
1897      unlock_futex(&rw->spin);
1898      futex_wake(&rw->writer_signal,1);
1899      return 0;
1900    }
1901    if (rw->blocked_readers) {
1902      ++rw->reader_signal;
1903      unlock_futex(&rw->spin);
1904      futex_wake(&rw->reader_signal, INT_MAX);
1905      return 0;
1906    }
1907  }
1908  unlock_futex(&rw->spin);
1909  return 0;
1910}
1911#endif
1912
1913       
1914void
1915rwlock_destroy(rwlock *rw)
1916{
1917#ifndef USE_FUTEX
1918  destroy_semaphore((void **)&rw->reader_signal);
1919  destroy_semaphore((void **)&rw->writer_signal);
1920#endif
1921  postGCfree((void *)(rw->malloced_ptr));
1922}
1923
1924
1925
Note: See TracBrowser for help on using the repository browser.