source: trunk/ccl/lisp-kernel/thread_manager.c @ 6904

Last change on this file since 6904 was 6904, checked in by gb, 13 years ago

ensure_stack_limit() when creatings stack for pthread.
Foreign exception callbacks: put some 0s on the stack for callback.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 35.5 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46
47int
48raise_thread_interrupt(TCR *target)
49{
50#ifdef DARWIN_not_yet
51  if (use_mach_exception_handling) {
52    return mach_raise_thread_interrupt(target);
53  }
54#endif
55  return pthread_kill((pthread_t)target->osid, SIGNAL_FOR_PROCESS_INTERRUPT);
56}
57
58signed_natural
59atomic_incf_by(signed_natural *ptr, signed_natural by)
60{
61  signed_natural old, new;
62  do {
63    old = *ptr;
64    new = old+by;
65  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
66           (natural) old);
67  return new;
68}
69
70signed_natural
71atomic_incf(signed_natural *ptr)
72{
73  return atomic_incf_by(ptr, 1);
74}
75
76signed_natural
77atomic_decf(signed_natural *ptr)
78{
79  signed_natural old, new;
80  do {
81    old = *ptr;
82    new = old == 0 ? old : old-1;
83  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
84           (natural) old);
85  return old-1;
86}
87
88
89int spin_lock_tries = 1;
90
91void
92get_spin_lock(signed_natural *p, TCR *tcr)
93{
94  int i, n = spin_lock_tries;
95 
96  while (1) {
97    for (i = 0; i < n; i++) {
98      if (atomic_swap(p,(signed_natural)tcr) == 0) {
99        return;
100      }
101    }
102    sched_yield();
103  }
104}
105
106
107int
108lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
109{
110
111  if (tcr == NULL) {
112    tcr = get_tcr(true);
113  }
114  if (m->owner == tcr) {
115    m->count++;
116    return 0;
117  }
118  while (1) {
119    get_spin_lock(&(m->spinlock),tcr);
120    ++m->avail;
121    if (m->avail == 1) {
122      m->owner = tcr;
123      m->count = 1;
124      m->spinlock = 0;
125      break;
126    }
127    m->spinlock = 0;
128    SEM_WAIT_FOREVER(m->signal);
129  }
130  return 0;
131}
132
133 
134int
135unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
136{
137  int ret = EPERM, pending;
138
139  if (tcr == NULL) {
140    tcr = get_tcr(true);
141  }
142
143  if (m->owner == tcr) {
144    --m->count;
145    if (m->count == 0) {
146      get_spin_lock(&(m->spinlock),tcr);
147      m->owner = NULL;
148      pending = m->avail-1 + m->waiting;     /* Don't count us */
149      m->avail = 0;
150      --pending;
151      if (pending > 0) {
152        m->waiting = pending;
153      } else {
154        m->waiting = 0;
155      }
156      m->spinlock = 0;
157      if (pending >= 0) {
158        SEM_RAISE(m->signal);
159      }
160    }
161    ret = 0;
162  }
163  return ret;
164}
165
166void
167destroy_recursive_lock(RECURSIVE_LOCK m)
168{
169  destroy_semaphore((void **)&m->signal);
170  postGCfree((void *)(m->malloced_ptr));
171}
172
173/*
174  If we're already the owner (or if the lock is free), lock it
175  and increment the lock count; otherwise, return EBUSY without
176  waiting.
177*/
178
179int
180recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
181{
182  TCR *owner = m->owner;
183
184  if (owner == tcr) {
185    m->count++;
186    if (was_free) {
187      *was_free = 0;
188      return 0;
189    }
190  }
191  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
192    m->owner = tcr;
193    m->count = 1;
194    if (was_free) {
195      *was_free = 1;
196    }
197    return 0;
198  }
199
200  return EBUSY;
201}
202
203void
204sem_wait_forever(SEMAPHORE s)
205{
206  int status;
207
208  do {
209#ifdef USE_MACH_SEMAPHORES
210    mach_timespec_t q = {1,0};
211    status = SEM_TIMEDWAIT(s,q);
212#endif
213#ifdef USE_POSIX_SEMAPHORES
214    struct timespec q;
215    gettimeofday((struct timeval *)&q, NULL);
216    q.tv_sec += 1;
217    status = SEM_TIMEDWAIT(s,&q);
218#endif
219  } while (status != 0);
220}
221
222int
223wait_on_semaphore(SEMAPHORE s, int seconds, int millis)
224{
225  int nanos = (millis % 1000) * 1000000;
226#if defined(LINUX) || defined(FREEBSD)
227  int status;
228
229  struct timespec q;
230  gettimeofday((struct timeval *)&q, NULL);
231  q.tv_nsec *= 1000L;
232   
233  q.tv_nsec += nanos;
234  if (q.tv_nsec >= 1000000000L) {
235    q.tv_nsec -= 1000000000L;
236    seconds += 1;
237  }
238  q.tv_sec += seconds;
239  status = SEM_TIMEDWAIT(s, &q);
240  if (status < 0) {
241    return errno;
242  }
243  return status;
244#endif
245#ifdef USE_MACH_SEMAPHORES
246  mach_timespec_t q = {seconds, nanos};
247  int status = SEM_TIMEDWAIT(s, q);
248
249 
250  switch (status) {
251  case 0: return 0;
252  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
253  case KERN_ABORTED: return EINTR;
254  default: return EINVAL;
255  }
256
257#endif
258}
259
260
261void
262signal_semaphore(SEMAPHORE s)
263{
264  SEM_RAISE(s);
265}
266
267 
268LispObj
269current_thread_osid()
270{
271  return (LispObj)ptr_to_lispobj(pthread_self());
272}
273
274
275
276int thread_suspend_signal = 0, thread_resume_signal = 0;
277
278
279
280void
281linux_exception_init(TCR *tcr)
282{
283}
284
285
286TCR *
287get_interrupt_tcr(Boolean create)
288{
289  return get_tcr(create);
290}
291 
292  void
293suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
294{
295#ifdef DARWIN_GS_HACK
296  Boolean gs_was_tcr = ensure_gs_pthread();
297#endif
298  TCR *tcr = get_interrupt_tcr(false);
299
300  if (signo == thread_suspend_signal) {
301#if 0
302    sigset_t wait_for;
303#endif
304
305    tcr->suspend_context = context;
306#if 0
307    sigfillset(&wait_for);
308#endif
309    SEM_RAISE(tcr->suspend);
310#if 0
311    sigdelset(&wait_for, thread_resume_signal);
312#endif
313#if 1
314#if RESUME_VIA_RESUME_SEMAPHORE
315    SEM_WAIT_FOREVER(tcr->resume);
316#if SUSPEND_RESUME_VERBOSE
317    fprintf(stderr, "got  resume in 0x%x\n",tcr);
318#endif
319    tcr->suspend_context = NULL;
320#else
321    sigsuspend(&wait_for);
322#endif
323#else
324    do {
325      sigsuspend(&wait_for);
326    } while (tcr->suspend_context);
327#endif 
328  } else {
329    tcr->suspend_context = NULL;
330#if SUSEPEND_RESUME_VERBOSE
331    fprintf(stderr,"got  resume in in 0x%x\n",tcr);
332#endif
333  }
334#if WAIT_FOR_RESUME_ACK
335  SEM_RAISE(tcr->suspend);
336#endif
337#ifdef DARWIN_GS_HACK
338  if (gs_was_tcr) {
339    set_gs_address(tcr);
340  }
341#endif
342#ifdef DARWIN
343  DarwinSigReturn(context);
344#endif
345#ifdef FREEBSD
346  freebsd_sigreturn(context);
347#endif
348}
349
350 
351
352/*
353  'base' should be set to the bottom (origin) of the stack, e.g., the
354  end from which it grows.
355*/
356 
357void
358os_get_stack_bounds(LispObj q,void **base, natural *size)
359{
360  pthread_t p = (pthread_t)(q);
361#ifdef DARWIN
362  *base = pthread_get_stackaddr_np(p);
363  *size = pthread_get_stacksize_np(p);
364#endif
365#ifdef LINUX
366  pthread_attr_t attr;
367
368  pthread_getattr_np(p,&attr);
369  pthread_attr_getstack(&attr, base, size);
370  *(natural *)base += *size;
371#endif
372#ifdef FREEBSD
373  pthread_attr_t attr;
374  void * temp_base;
375  size_t temp_size;
376 
377
378  pthread_attr_init(&attr); 
379  pthread_attr_get_np(p, &attr);
380  pthread_attr_getstackaddr(&attr,&temp_base);
381  pthread_attr_getstacksize(&attr,&temp_size);
382  *base = (void *)((natural)temp_base + temp_size);
383  *size = temp_size;
384#endif
385
386}
387
388void *
389new_semaphore(int count)
390{
391#ifdef USE_POSIX_SEMAPHORES
392  sem_t *s = malloc(sizeof(sem_t));
393  sem_init(s, 0, count);
394  return s;
395#endif
396#ifdef USE_MACH_SEMAPHORES
397  semaphore_t s = (semaphore_t)0;
398  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
399  return (void *)(natural)s;
400#endif
401}
402
403RECURSIVE_LOCK
404new_recursive_lock()
405{
406  extern int cache_block_size;
407  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
408  RECURSIVE_LOCK m = NULL;
409  void *signal = new_semaphore(0);
410
411  if (p) {
412    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
413    m->malloced_ptr = p;
414  }
415
416  if (m && signal) {
417    m->signal = signal;
418    return m;
419  }
420  if (m) {
421    free(p);
422  }
423  if (signal) {
424    destroy_semaphore(&signal);
425  }
426  return NULL;
427}
428
429void
430destroy_semaphore(void **s)
431{
432  if (*s) {
433#ifdef USE_POSIX_SEMAPHORES
434    sem_destroy((sem_t *)*s);
435#endif
436#ifdef USE_MACH_SEMAPHORES
437    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
438#endif
439    *s=NULL;
440  }
441}
442
443void
444tsd_set(LispObj key, void *datum)
445{
446  pthread_setspecific((pthread_key_t)key, datum);
447}
448
449void *
450tsd_get(LispObj key)
451{
452  return pthread_getspecific((pthread_key_t)key);
453}
454
455void
456dequeue_tcr(TCR *tcr)
457{
458  TCR *next, *prev;
459
460  next = tcr->next;
461  prev = tcr->prev;
462
463  prev->next = next;
464  next->prev = prev;
465  tcr->prev = tcr->next = NULL;
466#ifdef X8664
467  tcr->linear = NULL;
468#endif
469}
470 
471void
472enqueue_tcr(TCR *new)
473{
474  TCR *head, *tail;
475 
476  LOCK(lisp_global(TCR_AREA_LOCK),new);
477  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
478  tail = head->prev;
479  tail->next = new;
480  head->prev = new;
481  new->prev = tail;
482  new->next = head;
483  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
484}
485
486TCR *
487allocate_tcr()
488{
489  TCR *tcr, *chain = NULL, *next;
490#ifdef DARWIN
491  extern Boolean use_mach_exception_handling;
492  kern_return_t kret;
493  mach_port_t
494    thread_exception_port,
495    task_self = mach_task_self();
496#endif
497  for (;;) {
498    tcr = calloc(1, sizeof(TCR));
499#ifdef DARWIN
500#if WORD_SIZE == 64
501    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
502      tcr->next = chain;
503      chain = tcr;
504      continue;
505    }
506#endif
507    if (use_mach_exception_handling) {
508      thread_exception_port = (mach_port_t)((natural)tcr);
509      kret = mach_port_allocate_name(task_self,
510                                     MACH_PORT_RIGHT_RECEIVE,
511                                     thread_exception_port);
512    } else {
513      kret = KERN_SUCCESS;
514    }
515
516    if (kret != KERN_SUCCESS) {
517      tcr->next = chain;
518      chain = tcr;
519      continue;
520    }
521#endif
522    for (next = chain; next;) {
523      next = next->next;
524      free(chain);
525    }
526    return tcr;
527  }
528}
529
530#ifdef X8664
531#ifdef LINUX
532#include <asm/prctl.h>
533#include <sys/prctl.h>
534#endif
535#ifdef FREEBSD
536#include <machine/sysarch.h>
537#endif
538
539void
540setup_tcr_extra_segment(TCR *tcr)
541{
542#ifdef FREEBSD
543  amd64_set_gsbase(tcr);
544#endif
545#ifdef LINUX
546  arch_prctl(ARCH_SET_GS, (natural)tcr);
547#endif
548#ifdef DARWIN
549  /* There's no way to do this yet.  See DARWIN_GS_HACK */
550  /* darwin_set_x8664_fs_reg(tcr); */
551#endif
552}
553
554#endif
555
556
557
558/*
559  Caller must hold the area_lock.
560*/
561TCR *
562new_tcr(natural vstack_size, natural tstack_size)
563{
564  extern area
565    *allocate_vstack_holding_area_lock(unsigned),
566    *allocate_tstack_holding_area_lock(unsigned);
567  area *a;
568  int i;
569  sigset_t sigmask;
570
571  sigemptyset(&sigmask);
572  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
573#ifdef HAVE_TLS
574  TCR *tcr = &current_tcr;
575#ifdef X8664
576  setup_tcr_extra_segment(tcr);
577#endif
578#else
579  TCR *tcr = allocate_tcr();
580#endif
581
582#ifdef X8664
583  tcr->linear = tcr;
584#endif
585
586#if (WORD_SIZE == 64)
587  tcr->single_float_convert.tag = subtag_single_float;
588#endif
589  lisp_global(TCR_COUNT) += (1<<fixnumshift);
590  tcr->suspend = new_semaphore(0);
591  tcr->resume = new_semaphore(0);
592  tcr->reset_completion = new_semaphore(0);
593  tcr->activate = new_semaphore(0);
594  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
595  a = allocate_vstack_holding_area_lock(vstack_size);
596  tcr->vs_area = a;
597  a->owner = tcr;
598  tcr->save_vsp = (LispObj *) a->active; 
599  a = allocate_tstack_holding_area_lock(tstack_size);
600  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
601  tcr->ts_area = a;
602  a->owner = tcr;
603  tcr->save_tsp = (LispObj *) a->active;
604#ifdef X86
605  tcr->next_tsp = tcr->save_tsp;
606#endif
607
608  tcr->valence = TCR_STATE_FOREIGN;
609#ifdef PPC
610  tcr->lisp_fpscr.words.l = 0xd0;
611#endif
612#ifdef X86
613  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
614#if 1                           /* Mask underflow; too hard to
615                                   deal with denorms if underflow is
616                                   enabled */
617    (1 << MXCSR_UM_BIT) | 
618#endif
619    (1 << MXCSR_PM_BIT);
620#endif
621  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
622  tcr->tlb_limit = 2048<<fixnumshift;
623  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
624  for (i = 0; i < 2048; i++) {
625    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
626  }
627  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
628  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
629  return tcr;
630}
631
632void
633shutdown_thread_tcr(void *arg)
634{
635  TCR *tcr = TCR_FROM_TSD(arg);
636
637  area *vs, *ts, *cs;
638  void *termination_semaphore;
639 
640  if (--(tcr->shutdown_count) == 0) {
641    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
642      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
643        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
644   
645      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
646      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
647      tsd_set(lisp_global(TCR_KEY), NULL);
648    }
649#ifdef DARWIN
650    darwin_exception_cleanup(tcr);
651#endif
652    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
653    vs = tcr->vs_area;
654    tcr->vs_area = NULL;
655    ts = tcr->ts_area;
656    tcr->ts_area = NULL;
657    cs = tcr->cs_area;
658    tcr->cs_area = NULL;
659    if (vs) {
660      condemn_area_holding_area_lock(vs);
661    }
662    if (ts) {
663      condemn_area_holding_area_lock(ts);
664    }
665    if (cs) {
666      condemn_area_holding_area_lock(cs);
667    }
668    destroy_semaphore(&tcr->suspend);
669    destroy_semaphore(&tcr->resume);
670    destroy_semaphore(&tcr->reset_completion);
671    destroy_semaphore(&tcr->activate);
672    free(tcr->tlb_pointer);
673    tcr->tlb_pointer = NULL;
674    tcr->tlb_limit = 0;
675    tcr->osid = 0;
676    tcr->interrupt_pending = 0;
677    termination_semaphore = tcr->termination_semaphore;
678    tcr->termination_semaphore = NULL;
679#ifdef HAVE_TLS
680    dequeue_tcr(tcr);
681#endif
682    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
683    if (termination_semaphore) {
684      SEM_RAISE(termination_semaphore);
685    }
686  } else {
687    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
688  }
689}
690
691void
692tcr_cleanup(void *arg)
693{
694  TCR *tcr = (TCR *)arg;
695  area *a;
696
697  a = tcr->vs_area;
698  if (a) {
699    a->active = a->high;
700  }
701  a = tcr->ts_area;
702  if (a) {
703    a->active = a->high;
704  }
705  a = tcr->cs_area;
706  if (a) {
707    a->active = a->high;
708  }
709  tcr->valence = TCR_STATE_FOREIGN;
710  tcr->shutdown_count = 1;
711  shutdown_thread_tcr(tcr);
712  tsd_set(lisp_global(TCR_KEY), NULL);
713}
714
715void *
716current_native_thread_id()
717{
718  return ((void *) (natural)
719#ifdef LINUX
720          getpid()
721#endif
722#ifdef DARWIN
723          mach_thread_self()
724#endif
725#ifdef FREEBSD
726          pthread_self()
727#endif
728#ifdef SOLARIS
729          pthread_self()
730#endif
731          );
732}
733
734
735void
736thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
737{
738  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
739
740  tcr->osid = current_thread_osid();
741  tcr->native_thread_id = current_native_thread_id();
742  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
743  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
744  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
745  tcr->cs_area = a;
746  a->owner = tcr;
747  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
748    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
749  }
750#ifdef LINUX
751#ifdef PPC
752#ifndef PPC64
753  tcr->native_thread_info = current_r2;
754#endif
755#endif
756#endif
757  tcr->errno_loc = &errno;
758  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
759#ifdef DARWIN
760  extern Boolean use_mach_exception_handling;
761  if (use_mach_exception_handling) {
762    darwin_exception_init(tcr);
763  }
764#endif
765#ifdef LINUX
766  linux_exception_init(tcr);
767#endif
768  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
769}
770
771/*
772  Register the specified tcr as "belonging to" the current thread.
773  Under Darwin, setup Mach exception handling for the thread.
774  Install cleanup handlers for thread termination.
775*/
776void
777register_thread_tcr(TCR *tcr)
778{
779  void *stack_base = NULL;
780  natural stack_size = 0;
781
782  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
783  thread_init_tcr(tcr, stack_base, stack_size);
784  enqueue_tcr(tcr);
785}
786
787
788 
789 
790#ifndef MAP_GROWSDOWN
791#define MAP_GROWSDOWN 0
792#endif
793
794Ptr
795create_stack(int size)
796{
797  Ptr p;
798  size=align_to_power_of_2(size, log2_page_size);
799  p = (Ptr) mmap(NULL,
800                     (size_t)size,
801                     PROT_READ | PROT_WRITE | PROT_EXEC,
802                     MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
803                     -1,        /* Darwin insists on this when not mmap()ing
804                                 a real fd */
805                     0);
806  if (p != (Ptr)(-1)) {
807    *((size_t *)p) = size;
808    return p;
809  }
810  allocation_failure(true, size);
811
812}
813 
814void *
815allocate_stack(unsigned size)
816{
817  return create_stack(size);
818}
819
820void
821free_stack(void *s)
822{
823  size_t size = *((size_t *)s);
824  munmap(s, size);
825}
826
827Boolean threads_initialized = false;
828
829void
830count_cpus()
831{
832#ifdef DARWIN
833  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
834#include <mach/host_info.h>
835
836  struct host_basic_info info;
837  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
838 
839  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
840    if (info.max_cpus > 1) {
841      spin_lock_tries = 1024;
842    }
843  }
844#else
845  int n = sysconf(_SC_NPROCESSORS_ONLN);
846 
847  if (n > 1) {
848    spin_lock_tries = 1024;
849  }
850#endif
851}
852
853void
854init_threads(void * stack_base, TCR *tcr)
855{
856  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
857  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
858  thread_signal_setup();
859  count_cpus();
860  threads_initialized = true;
861}
862
863
864void *
865lisp_thread_entry(void *param)
866{
867  thread_activation *activation = (thread_activation *)param;
868  TCR *tcr = new_tcr(activation->vsize, activation->vsize);
869  sigset_t mask, old_mask;
870
871  sigemptyset(&mask);
872  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
873
874  register_thread_tcr(tcr);
875
876  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
877  tcr->vs_area->active -= node_size;
878  *(--tcr->save_vsp) = lisp_nil;
879  enable_fp_exceptions();
880  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
881  activation->tcr = tcr;
882  SEM_RAISE(activation->created);
883  do {
884    SEM_RAISE(tcr->reset_completion);
885    SEM_WAIT_FOREVER(tcr->activate);
886    /* Now go run some lisp code */
887    start_lisp(TCR_TO_TSD(tcr),0);
888  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
889  pthread_cleanup_pop(true);
890
891}
892
893
894void *
895xNewThread(natural control_stack_size,
896           natural value_stack_size,
897           natural temp_stack_size)
898
899{
900  thread_activation activation;
901  TCR *current = get_tcr(false);
902
903
904  activation.tsize = temp_stack_size;
905  activation.vsize = value_stack_size;
906  activation.tcr = 0;
907  activation.created = new_semaphore(0);
908  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
909                           NULL, 
910                           lisp_thread_entry,
911                           (void *) &activation)) {
912   
913    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
914  }
915  destroy_semaphore(&activation.created); 
916  return TCR_TO_TSD(activation.tcr);
917}
918
919Boolean
920active_tcr_p(TCR *q)
921{
922  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
923 
924  do {
925    if (p == q) {
926      return true;
927    }
928    p = p->next;
929  } while (p != head);
930  return false;
931}
932
933
934OSErr
935xDisposeThread(TCR *tcr)
936{
937  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
938    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
939      pthread_cancel((pthread_t)(tcr->osid));
940      return 0;
941    }
942  }
943  return -50;
944}
945
946OSErr
947xYieldToThread(TCR *target)
948{
949  Bug(NULL, "xYieldToThread ?");
950  return 0;
951}
952 
953OSErr
954xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
955{
956  Bug(NULL, "xThreadCurrentStackSpace ?");
957  return 0;
958}
959
960
961
962LispObj
963create_system_thread(size_t stack_size,
964                     void* stackaddr,
965                     void* (*start_routine)(void *),
966                     void* param)
967{
968  pthread_attr_t attr;
969  pthread_t returned_thread = (pthread_t) 0;
970
971  pthread_attr_init(&attr);
972  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
973
974  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
975    stack_size = PTHREAD_STACK_MIN;
976  }
977
978  stack_size = ensure_stack_limit(stack_size);
979  if (stackaddr != NULL) {
980    /* Size must have been specified.  Sort of makes sense ... */
981#ifdef DARWIN
982    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
983#else
984    pthread_attr_setstack(&attr, stackaddr, stack_size);
985#endif
986  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
987    pthread_attr_setstacksize(&attr,stack_size);
988  }
989
990  /*
991     I think that's just about enough ... create the thread.
992  */
993  pthread_create(&returned_thread, &attr, start_routine, param);
994  return (LispObj) ptr_to_lispobj(returned_thread);
995}
996
997TCR *
998get_tcr(Boolean create)
999{
1000#ifdef HAVE_TLS
1001  TCR *current = current_tcr.linear;
1002#else
1003  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1004  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1005#endif
1006
1007  if ((current == NULL) && create) {
1008    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1009      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1010    int i, nbindwords = 0;
1011    extern unsigned initial_stack_size;
1012   
1013    /* Make one. */
1014    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1015    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1016    register_thread_tcr(current);
1017#ifdef DEBUG_TCR_CREATION
1018    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1019#endif
1020    current->vs_area->active -= node_size;
1021    *(--current->save_vsp) = lisp_nil;
1022#ifdef PPC
1023#define NSAVEREGS 8
1024#endif
1025#ifdef X8664
1026#define NSAVEREGS 4
1027#endif
1028    for (i = 0; i < NSAVEREGS; i++) {
1029      *(--current->save_vsp) = 0;
1030      current->vs_area->active -= node_size;
1031    }
1032    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1033    for (i = 0; i < nbindwords; i++) {
1034      *(--current->save_vsp) = 0;
1035      current->vs_area->active -= node_size;
1036    }
1037    current->shutdown_count = 1;
1038    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1039
1040  }
1041 
1042  return current;
1043}
1044
1045
1046Boolean
1047suspend_tcr(TCR *tcr)
1048{
1049  int suspend_count = atomic_incf(&(tcr->suspend_count));
1050  if (suspend_count == 1) {
1051#if SUSPEND_RESUME_VERBOSE
1052    fprintf(stderr,"Suspending 0x%x\n", tcr);
1053#endif
1054#ifdef DARWIN_nope
1055    if (mach_suspend_tcr(tcr)) {
1056      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1057      return true;
1058    }
1059#endif
1060    if (pthread_kill((pthread_t)(tcr->osid), thread_suspend_signal) == 0) {
1061      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1062    } else {
1063      /* A problem using pthread_kill.  On Darwin, this can happen
1064         if the thread has had its signal mask surgically removed
1065         by pthread_exit.  If the native (Mach) thread can be suspended,
1066         do that and return true; otherwise, flag the tcr as belonging
1067         to a dead thread by setting tcr->osid to 0.
1068      */
1069      tcr->osid = 0;
1070      return false;
1071    }
1072    return true;
1073  }
1074  return false;
1075}
1076
1077Boolean
1078tcr_suspend_ack(TCR *tcr)
1079{
1080  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1081    SEM_WAIT_FOREVER(tcr->suspend);
1082    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1083#if SUSPEND_RESUME_VERBOSE
1084    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1085#endif
1086
1087  }
1088  return true;
1089}
1090
1091     
1092
1093
1094Boolean
1095lisp_suspend_tcr(TCR *tcr)
1096{
1097  Boolean suspended;
1098  TCR *current = get_tcr(true);
1099 
1100  LOCK(lisp_global(TCR_AREA_LOCK),current);
1101#ifdef DARWIN
1102#if USE_MACH_EXCEPTION_LOCK
1103  if (use_mach_exception_handling) {
1104    pthread_mutex_lock(mach_exception_lock);
1105  }
1106#endif
1107#endif
1108  suspended = suspend_tcr(tcr);
1109  if (suspended) {
1110    while (!tcr_suspend_ack(tcr));
1111  }
1112#ifdef DARWIN
1113#if USE_MACH_EXCEPTION_LOCK
1114  if (use_mach_exception_handling) {
1115    pthread_mutex_unlock(mach_exception_lock);
1116  }
1117#endif
1118#endif
1119  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1120  return suspended;
1121}
1122         
1123
1124Boolean
1125resume_tcr(TCR *tcr)
1126{
1127  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
1128  if (suspend_count == 0) {
1129#ifdef DARWIN
1130    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1131#if SUSPEND_RESUME_VERBOSE
1132    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1133#endif
1134      mach_resume_tcr(tcr);
1135      return true;
1136    }
1137#endif
1138#if RESUME_VIA_RESUME_SEMAPHORE
1139    SEM_RAISE(tcr->resume);
1140#else
1141    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1142      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1143    }
1144#endif
1145#if SUSPEND_RESUME_VERBOSE
1146    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1147#endif
1148    return true;
1149  }
1150  return false;
1151}
1152
1153void
1154wait_for_resumption(TCR *tcr)
1155{
1156  if (tcr->suspend_count == 0) {
1157#ifdef DARWIN
1158    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1159      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1160      return;
1161  }
1162#endif
1163#if WAIT_FOR_RESUME_ACK
1164#if SUSPEND_RESUME_VERBOSE
1165    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1166#endif
1167    SEM_WAIT_FOREVER(tcr->suspend);
1168#endif
1169  }
1170}
1171   
1172
1173
1174Boolean
1175lisp_resume_tcr(TCR *tcr)
1176{
1177  Boolean resumed;
1178  TCR *current = get_tcr(true);
1179 
1180  LOCK(lisp_global(TCR_AREA_LOCK),current);
1181  resumed = resume_tcr(tcr);
1182  wait_for_resumption(tcr);
1183  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1184  return resumed;
1185}
1186
1187
1188TCR *freed_tcrs = NULL;
1189
1190void
1191enqueue_freed_tcr (TCR *tcr)
1192{
1193#ifndef HAVE_TLS
1194  tcr->next = freed_tcrs;
1195  freed_tcrs = tcr;
1196#endif
1197}
1198
1199/* It's not clear that we can safely condemn a dead tcr's areas, since
1200   we may not be able to call free() if a suspended thread owns a
1201   malloc lock. At least make the areas appear to be empty.
1202*/
1203   
1204
1205void
1206normalize_dead_tcr_areas(TCR *tcr)
1207{
1208  area *a;
1209
1210  a = tcr->vs_area;
1211  if (a) {
1212    a->active = a->high;
1213  }
1214
1215  a = tcr->ts_area;
1216  if (a) {
1217    a->active = a->high;
1218  }
1219
1220  a = tcr->cs_area;
1221  if (a) {
1222    a->active = a->high;
1223  }
1224}
1225   
1226void
1227free_freed_tcrs ()
1228{
1229  TCR *current, *next;
1230
1231  for (current = freed_tcrs; current; current = next) {
1232    next = current->next;
1233#ifndef HAVE_TLS
1234    free(current);
1235#endif
1236  }
1237  freed_tcrs = NULL;
1238}
1239
1240void
1241suspend_other_threads(Boolean for_gc)
1242{
1243  TCR *current = get_tcr(true), *other, *next;
1244  int dead_tcr_count = 0;
1245  Boolean all_acked;
1246
1247  LOCK(lisp_global(TCR_AREA_LOCK), current);
1248#ifdef DARWIN
1249#if USE_MACH_EXCEPTION_LOCK
1250  if (for_gc && use_mach_exception_handling) {
1251#if SUSPEND_RESUME_VERBOSE
1252    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1253#endif
1254    pthread_mutex_lock(mach_exception_lock);
1255  }
1256#endif
1257#endif
1258  for (other = current->next; other != current; other = other->next) {
1259    if ((other->osid != 0)) {
1260      suspend_tcr(other);
1261      if (other->osid == 0) {
1262        dead_tcr_count++;
1263      }
1264    } else {
1265      dead_tcr_count++;
1266    }
1267  }
1268
1269  do {
1270    all_acked = true;
1271    for (other = current->next; other != current; other = other->next) {
1272      if ((other->osid != 0)) {
1273        if (!tcr_suspend_ack(other)) {
1274          all_acked = false;
1275        }
1276      }
1277    }
1278  } while(! all_acked);
1279
1280     
1281
1282  /* All other threads are suspended; can safely delete dead tcrs now */
1283  if (dead_tcr_count) {
1284    for (other = current->next; other != current; other = next) {
1285      next = other->next;
1286      if ((other->osid == 0))  {
1287        normalize_dead_tcr_areas(other);
1288        dequeue_tcr(other);
1289        enqueue_freed_tcr(other);
1290      }
1291    }
1292  }
1293}
1294
1295void
1296lisp_suspend_other_threads()
1297{
1298  suspend_other_threads(false);
1299}
1300
1301void
1302resume_other_threads(Boolean for_gc)
1303{
1304  TCR *current = get_tcr(true), *other;
1305  for (other = current->next; other != current; other = other->next) {
1306    if ((other->osid != 0)) {
1307      resume_tcr(other);
1308    }
1309  }
1310  for (other = current->next; other != current; other = other->next) {
1311    if ((other->osid != 0)) {
1312      wait_for_resumption(other);
1313    }
1314  }
1315  free_freed_tcrs();
1316#ifdef DARWIN
1317#if USE_MACH_EXCEPTION_LOCK
1318  if (for_gc && use_mach_exception_handling) {
1319#if SUSPEND_RESUME_VERBOSE
1320    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1321#endif
1322    pthread_mutex_unlock(mach_exception_lock);
1323  }
1324#endif
1325#endif
1326
1327  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1328}
1329
1330void
1331lisp_resume_other_threads()
1332{
1333  resume_other_threads(false);
1334}
1335
1336
1337/*
1338  Try to take an rwquentry off of the rwlock's freelist; failing that,
1339  malloc one.  The caller owns the lock on the rwlock itself, of course.
1340
1341*/
1342rwquentry *
1343recover_rwquentry(rwlock *rw)
1344{
1345  rwquentry *freelist = &(rw->freelist), 
1346    *p = freelist->next, 
1347    *follow = p->next;
1348
1349  if (p == freelist) {
1350    p = NULL;
1351  } else {
1352    follow->prev = freelist;
1353    freelist->next = follow;
1354    p->prev = p->next = NULL;
1355    p->tcr = NULL;
1356    p->count = 0;
1357  }
1358  return p;
1359}
1360
1361rwquentry *
1362new_rwquentry(rwlock *rw)
1363{
1364  rwquentry *p = recover_rwquentry(rw);
1365
1366  if (p == NULL) {
1367    p = calloc(1, sizeof(rwquentry));
1368  }
1369  return p;
1370}
1371
1372
1373void
1374free_rwquentry(rwquentry *p, rwlock *rw)
1375{
1376  rwquentry
1377    *prev = p->prev, 
1378    *next = p->next, 
1379    *freelist = &(rw->freelist),
1380    *follow = freelist->next;
1381 
1382  prev->next = next;
1383  next->prev = prev;
1384  p->prev = freelist;
1385  freelist->next = p;
1386  follow->prev = p;
1387  p->next = follow;
1388  p->prev = freelist;
1389}
1390 
1391void
1392add_rwquentry(rwquentry *p, rwlock *rw)
1393{
1394  rwquentry
1395    *head = &(rw->head),
1396    *follow = head->next;
1397 
1398  head->next = p;
1399  follow->prev = p;
1400  p->next = follow;
1401  p->prev = head;
1402}
1403
1404rwquentry *
1405find_enqueued_tcr(TCR *target, rwlock *rw)
1406{
1407  rwquentry
1408    *head = &(rw->head),
1409    *p = head->next;
1410
1411  do {
1412    if (p->tcr == target) {
1413      return p;
1414    }
1415    p = p->next;
1416  } while (p != head);
1417  return NULL;
1418}
1419   
1420rwlock *
1421rwlock_new()
1422{
1423  rwlock *rw = calloc(1, sizeof(rwlock));
1424 
1425  if (rw) {
1426    pthread_mutex_t *lock = calloc(1, sizeof(pthread_mutex_t));
1427    if (lock == NULL) {
1428      free (rw);
1429      rw = NULL;
1430    } else {
1431      pthread_cond_t *reader_signal = calloc(1, sizeof(pthread_cond_t));
1432      pthread_cond_t *writer_signal = calloc(1, sizeof(pthread_cond_t));
1433      if ((reader_signal == NULL) || (writer_signal == NULL)) {
1434        if (reader_signal) {
1435          free(reader_signal);
1436        } else {
1437          free(writer_signal);
1438        }
1439       
1440        free(lock);
1441        free(rw);
1442        rw = NULL;
1443      } else {
1444        pthread_mutex_init(lock, NULL);
1445        pthread_cond_init(reader_signal, NULL);
1446        pthread_cond_init(writer_signal, NULL);
1447        rw->lock = lock;
1448        rw->reader_signal = reader_signal;
1449        rw->writer_signal = writer_signal;
1450        rw->head.prev = rw->head.next = &(rw->head);
1451        rw->freelist.prev = rw->freelist.next = &(rw->freelist);
1452      }
1453    }
1454  }
1455  return rw;
1456}
1457
1458/*
1459  no thread should be waiting on the lock, and the caller has just
1460  unlocked it.
1461*/
1462static void
1463rwlock_delete(rwlock *rw)
1464{
1465  pthread_mutex_t *lock = rw->lock;
1466  pthread_cond_t *cond;
1467  rwquentry *entry;
1468
1469  rw->lock = NULL;
1470  cond = rw->reader_signal;
1471  rw->reader_signal = NULL;
1472  pthread_cond_destroy(cond);
1473  free(cond);
1474  cond = rw->writer_signal;
1475  rw->writer_signal = NULL;
1476  pthread_cond_destroy(cond);
1477  free(cond);
1478  while (entry = recover_rwquentry(rw)) {
1479    free(entry);
1480  }
1481  free(rw);
1482  pthread_mutex_unlock(lock);
1483  free(lock);
1484}
1485
1486void
1487rwlock_rlock_cleanup(void *arg)
1488{
1489  pthread_mutex_unlock((pthread_mutex_t *)arg);
1490}
1491     
1492/*
1493  Try to get read access to a multiple-readers/single-writer lock.  If
1494  we already have read access, return success (indicating that the
1495  lock is held another time.  If we already have write access to the
1496  lock ... that won't work; return EDEADLK.  Wait until no other
1497  thread has or is waiting for write access, then indicate that we
1498  hold read access once.
1499*/
1500int
1501rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1502{
1503  pthread_mutex_t *lock = rw->lock;
1504  rwquentry *entry;
1505  int err = 0;
1506
1507
1508  pthread_mutex_lock(lock);
1509
1510  if (RWLOCK_WRITER(rw) == tcr) {
1511    pthread_mutex_unlock(lock);
1512    return EDEADLK;
1513  }
1514
1515  if (rw->state > 0) {
1516    /* already some readers, we may be one of them */
1517    entry = find_enqueued_tcr(tcr, rw);
1518    if (entry) {
1519      entry->count++;
1520      rw->state++;
1521      pthread_mutex_unlock(lock);
1522      return 0;
1523    }
1524  }
1525  entry = new_rwquentry(rw);
1526  entry->tcr = tcr;
1527  entry->count = 1;
1528
1529  pthread_cleanup_push(rwlock_rlock_cleanup,lock);
1530
1531  /* Wait for current and pending writers */
1532  while ((err == 0) && ((rw->state < 0) || (rw->write_wait_count > 0))) {
1533    if (waitfor) {
1534      if (pthread_cond_timedwait(rw->reader_signal, lock, waitfor)) {
1535        err = errno;
1536      }
1537    } else {
1538      pthread_cond_wait(rw->reader_signal, lock);
1539    }
1540  }
1541 
1542  if (err == 0) {
1543    add_rwquentry(entry, rw);
1544    rw->state++;
1545  }
1546
1547  pthread_cleanup_pop(1);
1548  return err;
1549}
1550
1551
1552/*
1553   This is here to support cancelation.  Cancelation is evil.
1554*/
1555
1556void
1557rwlock_wlock_cleanup(void *arg)
1558{
1559  rwlock *rw = (rwlock *)arg;
1560
1561  /* If this thread was the only queued writer and the lock
1562     is now available for reading, tell any threads that're
1563     waiting for read access.
1564     This thread owns the lock on the rwlock itself.
1565  */
1566  if ((--(rw->write_wait_count) == 0) &&
1567      (rw->state >= 0)) {
1568    pthread_cond_broadcast(rw->reader_signal);
1569  }
1570 
1571  pthread_mutex_unlock(rw->lock);
1572}
1573
1574/*
1575  Try to obtain write access to the lock.
1576  If we already have read access, fail with EDEADLK.
1577  If we already have write access, increment the count that indicates
1578  that.
1579  Otherwise, wait until the lock is not held for reading or writing,
1580  then assert write access.
1581*/
1582
1583int
1584rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1585{
1586  pthread_mutex_t *lock = rw->lock;
1587  rwquentry *entry;
1588  int err = 0;
1589
1590
1591  pthread_mutex_lock(lock);
1592  if (RWLOCK_WRITER(rw) == tcr) {
1593    --RWLOCK_WRITE_COUNT(rw);
1594    --rw->state;
1595    pthread_mutex_unlock(lock);
1596    return 0;
1597  }
1598 
1599  if (rw->state > 0) {
1600    /* already some readers, we may be one of them */
1601    entry = find_enqueued_tcr(tcr, rw);
1602    if (entry) {
1603      pthread_mutex_unlock(lock);
1604      return EDEADLK;
1605    }
1606  }
1607  rw->write_wait_count++;
1608  pthread_cleanup_push(rwlock_wlock_cleanup,rw);
1609
1610  while ((err == 0) && (rw->state) != 0) {
1611    if (waitfor) {
1612      if (pthread_cond_timedwait(rw->writer_signal, lock, waitfor)) {
1613        err = errno;
1614      }
1615    } else {
1616      pthread_cond_wait(rw->writer_signal, lock);
1617    }
1618  }
1619  if (err == 0) {
1620    RWLOCK_WRITER(rw) = tcr;
1621    RWLOCK_WRITE_COUNT(rw) = -1;
1622    rw->state = -1;
1623  }
1624  pthread_cleanup_pop(1);
1625  return err;
1626}
1627
1628/*
1629  Sort of the same as above, only return EBUSY if we'd have to wait.
1630  In partucular, distinguish between the cases of "some other readers
1631  (EBUSY) another writer/queued writer(s)" (EWOULDBLOK) and "we hold a
1632  read lock" (EDEADLK.)
1633*/
1634int
1635rwlock_try_wlock(rwlock *rw, TCR *tcr)
1636{
1637  pthread_mutex_t *lock = rw->lock;
1638  rwquentry *entry;
1639  int ret = EBUSY;
1640
1641  pthread_mutex_lock(lock);
1642  if ((RWLOCK_WRITER(rw) == tcr) ||
1643      ((rw->state == 0) && (rw->write_wait_count == 0))) {
1644    RWLOCK_WRITER(rw) = tcr;
1645    --RWLOCK_WRITE_COUNT(rw);
1646    --rw->state;
1647    pthread_mutex_unlock(lock);
1648    return 0;
1649  }
1650 
1651  if (rw->state > 0) {
1652    /* already some readers, we may be one of them */
1653    entry = find_enqueued_tcr(tcr, rw);
1654    if (entry) {
1655      ret = EDEADLK;
1656    }
1657  } else {
1658    /* another writer or queued writers */
1659    ret = EWOULDBLOCK;
1660  }
1661  pthread_mutex_unlock(rw->lock);
1662  return ret;
1663}
1664
1665/*
1666  "Upgrade" a lock held once or more for reading to one held the same
1667  number of times for writing.
1668  Upgraders have higher priority than writers do
1669*/
1670
1671int
1672rwlock_read_to_write(rwlock *rw, TCR *tcr)
1673{
1674}
1675
1676
1677int
1678rwlock_unlock(rwlock *rw, TCR *tcr)
1679{
1680  rwquentry *entry;
1681
1682  pthread_mutex_lock(rw->lock);
1683  if (rw->state < 0) {
1684    /* Locked for writing.  By us ? */
1685    if (RWLOCK_WRITER(rw) != tcr) {
1686      pthread_mutex_unlock(rw->lock);
1687      /* Can't unlock: locked for writing by another thread. */
1688      return EPERM;
1689    }
1690    if (++RWLOCK_WRITE_COUNT(rw) == 0) {
1691      rw->state = 0;
1692      RWLOCK_WRITER(rw) = NULL;
1693      if (rw->write_wait_count) {
1694        pthread_cond_signal(rw->writer_signal);
1695      } else {
1696        pthread_cond_broadcast(rw->reader_signal);
1697      }
1698    }
1699    pthread_mutex_unlock(rw->lock);
1700    return 0;
1701  }
1702  entry = find_enqueued_tcr(tcr, rw);
1703  if (entry == NULL) {
1704    /* Not locked for reading by us, so why are we unlocking it ? */
1705    pthread_mutex_unlock(rw->lock);
1706    return EPERM;
1707  }
1708  if (--entry->count == 0) {
1709    free_rwquentry(entry, rw);
1710  }
1711  if (--rw->state == 0) {
1712    pthread_cond_signal(rw->writer_signal);
1713  }
1714  pthread_mutex_unlock(rw->lock);
1715  return 0;
1716}
1717
1718       
1719int
1720rwlock_destroy(rwlock *rw)
1721{
1722  return 0;                     /* for now. */
1723}
1724
1725
1726
Note: See TracBrowser for help on using the repository browser.