source: trunk/source/lisp-kernel/thread_manager.c @ 10464

Last change on this file since 10464 was 10464, checked in by gb, 12 years ago

Call pthread_attr_destroy() on pthread_attr objects when done with
them. (Even though the pthread_attr objects are stack-allocated,
they may contain malloc'ed data when initialized.)

In destroy_semaphore(), free the malloc'ed semaphore in the
USE_POSIX_SEMAPHORES case (after calling sem_destroy().)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.0 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20
21typedef struct {
22  TCR *tcr;
23  natural vsize, tsize;
24  void *created;
25} thread_activation;
26
27#ifdef HAVE_TLS
28__thread TCR current_tcr;
29#endif
30
31extern natural
32store_conditional(natural*, natural, natural);
33
34extern signed_natural
35atomic_swap(signed_natural*, signed_natural);
36
37#ifdef USE_FUTEX
38#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
39#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
40#define FUTEX_AVAIL (0)
41#define FUTEX_LOCKED (1)
42#define FUTEX_CONTENDED (2)
43#endif
44
45#ifdef WINDOWS
46int
47raise_thread_interrupt(TCR *target)
48{
49}
50#else
51int
52raise_thread_interrupt(TCR *target)
53{
54  pthread_t thread = (pthread_t)target->osid;
55#ifdef DARWIN_not_yet
56  if (use_mach_exception_handling) {
57    return mach_raise_thread_interrupt(target);
58  }
59#endif
60  if (thread != (pthread_t) 0) {
61    return pthread_kill(thread, SIGNAL_FOR_PROCESS_INTERRUPT);
62  }
63  return ESRCH;
64}
65#endif
66
67signed_natural
68atomic_incf_by(signed_natural *ptr, signed_natural by)
69{
70  signed_natural old, new;
71  do {
72    old = *ptr;
73    new = old+by;
74  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
75           (natural) old);
76  return new;
77}
78
79signed_natural
80atomic_incf(signed_natural *ptr)
81{
82  return atomic_incf_by(ptr, 1);
83}
84
85signed_natural
86atomic_decf(signed_natural *ptr)
87{
88  signed_natural old, new;
89  do {
90    old = *ptr;
91    new = old == 0 ? old : old-1;
92  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
93           (natural) old);
94  return old-1;
95}
96
97
98#ifndef USE_FUTEX
99int spin_lock_tries = 1;
100
101void
102get_spin_lock(signed_natural *p, TCR *tcr)
103{
104  int i, n = spin_lock_tries;
105 
106  while (1) {
107    for (i = 0; i < n; i++) {
108      if (atomic_swap(p,(signed_natural)tcr) == 0) {
109        return;
110      }
111    }
112#ifndef WINDOWS
113    sched_yield();
114#endif
115  }
116}
117#endif
118
119#ifndef USE_FUTEX
120int
121lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
122{
123
124  if (tcr == NULL) {
125    tcr = get_tcr(true);
126  }
127  if (m->owner == tcr) {
128    m->count++;
129    return 0;
130  }
131  while (1) {
132    LOCK_SPINLOCK(m->spinlock,tcr);
133    ++m->avail;
134    if (m->avail == 1) {
135      m->owner = tcr;
136      m->count = 1;
137      RELEASE_SPINLOCK(m->spinlock);
138      break;
139    }
140    RELEASE_SPINLOCK(m->spinlock);
141    SEM_WAIT_FOREVER(m->signal);
142  }
143  return 0;
144}
145
146#else /* USE_FUTEX */
147
148static void inline
149lock_futex(signed_natural *p)
150{
151 
152  while (1) {
153    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
154      return;
155    }
156    while (1) {
157      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
158        return;
159      }
160      futex_wait(p,FUTEX_CONTENDED);
161    }
162  }
163}
164
165static void inline
166unlock_futex(signed_natural *p)
167{
168  if (atomic_decf(p) != FUTEX_AVAIL) {
169    *p = FUTEX_AVAIL;
170    futex_wake(p,INT_MAX);
171  }
172}
173   
174int
175lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
176{
177  if (tcr == NULL) {
178    tcr = get_tcr(true);
179  }
180  if (m->owner == tcr) {
181    m->count++;
182    return 0;
183  }
184  lock_futex(&m->avail);
185  m->owner = tcr;
186  m->count = 1;
187  return 0;
188}
189#endif /* USE_FUTEX */
190
191
192#ifndef USE_FUTEX 
193int
194unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
195{
196  int ret = EPERM, pending;
197
198  if (tcr == NULL) {
199    tcr = get_tcr(true);
200  }
201
202  if (m->owner == tcr) {
203    --m->count;
204    if (m->count == 0) {
205      LOCK_SPINLOCK(m->spinlock,tcr);
206      m->owner = NULL;
207      pending = m->avail-1 + m->waiting;     /* Don't count us */
208      m->avail = 0;
209      --pending;
210      if (pending > 0) {
211        m->waiting = pending;
212      } else {
213        m->waiting = 0;
214      }
215      RELEASE_SPINLOCK(m->spinlock);
216      if (pending >= 0) {
217        SEM_RAISE(m->signal);
218      }
219    }
220    ret = 0;
221  }
222  return ret;
223}
224#else /* USE_FUTEX */
225int
226unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
227{
228  int ret = EPERM;
229
230   if (tcr == NULL) {
231    tcr = get_tcr(true);
232  }
233
234  if (m->owner == tcr) {
235    --m->count;
236    if (m->count == 0) {
237      m->owner = NULL;
238      unlock_futex(&m->avail);
239    }
240    ret = 0;
241  }
242  return ret;
243}
244#endif /* USE_FUTEX */
245
246void
247destroy_recursive_lock(RECURSIVE_LOCK m)
248{
249#ifndef USE_FUTEX
250  destroy_semaphore((void **)&m->signal);
251#endif
252  postGCfree((void *)(m->malloced_ptr));
253}
254
255/*
256  If we're already the owner (or if the lock is free), lock it
257  and increment the lock count; otherwise, return EBUSY without
258  waiting.
259*/
260
261#ifndef USE_FUTEX
262int
263recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
264{
265  TCR *owner = m->owner;
266
267  LOCK_SPINLOCK(m->spinlock,tcr);
268  if (owner == tcr) {
269    m->count++;
270    if (was_free) {
271      *was_free = 0;
272      RELEASE_SPINLOCK(m->spinlock);
273      return 0;
274    }
275  }
276  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
277    m->owner = tcr;
278    m->count = 1;
279    if (was_free) {
280      *was_free = 1;
281    }
282    RELEASE_SPINLOCK(m->spinlock);
283    return 0;
284  }
285
286  RELEASE_SPINLOCK(m->spinlock);
287  return EBUSY;
288}
289#else
290int
291recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
292{
293  TCR *owner = m->owner;
294
295  if (owner == tcr) {
296    m->count++;
297    if (was_free) {
298      *was_free = 0;
299      return 0;
300    }
301  }
302  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
303    m->owner = tcr;
304    m->count = 1;
305    if (was_free) {
306      *was_free = 1;
307    }
308    return 0;
309  }
310
311  return EBUSY;
312}
313#endif
314
315void
316sem_wait_forever(SEMAPHORE s)
317{
318  int status;
319
320  do {
321#ifdef USE_MACH_SEMAPHORES
322    mach_timespec_t q = {1,0};
323    status = SEM_TIMEDWAIT(s,q);
324#endif
325#ifdef USE_POSIX_SEMAPHORES
326    struct timespec q;
327    gettimeofday((struct timeval *)&q, NULL);
328    q.tv_sec += 1;
329    status = SEM_TIMEDWAIT(s,&q);
330#endif
331  } while (status != 0);
332}
333
334int
335wait_on_semaphore(void *s, int seconds, int millis)
336{
337  int nanos = (millis % 1000) * 1000000;
338#ifdef USE_POSIX_SEMAPHORES
339  int status;
340
341  struct timespec q;
342  gettimeofday((struct timeval *)&q, NULL);
343  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
344   
345  q.tv_nsec += nanos;
346  if (q.tv_nsec >= 1000000000L) {
347    q.tv_nsec -= 1000000000L;
348    seconds += 1;
349  }
350  q.tv_sec += seconds;
351  status = SEM_TIMEDWAIT(s, &q);
352  if (status < 0) {
353    return errno;
354  }
355  return status;
356#endif
357#ifdef USE_MACH_SEMAPHORES
358  mach_timespec_t q = {seconds, nanos};
359  int status = SEM_TIMEDWAIT(s, q);
360
361 
362  switch (status) {
363  case 0: return 0;
364  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
365  case KERN_ABORTED: return EINTR;
366  default: return EINVAL;
367  }
368
369#endif
370}
371
372
373int
374semaphore_maybe_timedwait(void *s, struct timespec *t)
375{
376  if (t) {
377    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
378  }
379  SEM_WAIT_FOREVER(s);
380  return 0;
381}
382
383void
384signal_semaphore(SEMAPHORE s)
385{
386  SEM_RAISE(s);
387}
388
389 
390#ifdef WINDOWS
391LispObj
392current_thread_osid()
393{
394}
395#else
396LispObj
397current_thread_osid()
398{
399  return (LispObj)ptr_to_lispobj(pthread_self());
400}
401#endif
402
403
404int thread_suspend_signal = 0, thread_resume_signal = 0;
405
406
407
408void
409linux_exception_init(TCR *tcr)
410{
411}
412
413
414TCR *
415get_interrupt_tcr(Boolean create)
416{
417  return get_tcr(create);
418}
419 
420void
421suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
422{
423#ifdef DARWIN_GS_HACK
424  Boolean gs_was_tcr = ensure_gs_pthread();
425#endif
426  TCR *tcr = get_interrupt_tcr(false);
427
428  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
429    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
430  } else {
431    tcr->suspend_context = context;
432    SEM_RAISE(tcr->suspend);
433    SEM_WAIT_FOREVER(tcr->resume);
434    tcr->suspend_context = NULL;
435  }
436#ifdef DARWIN_GS_HACK
437  if (gs_was_tcr) {
438    set_gs_address(tcr);
439  }
440#endif
441  SIGRETURN(context);
442}
443
444 
445
446/*
447  'base' should be set to the bottom (origin) of the stack, e.g., the
448  end from which it grows.
449*/
450 
451#ifdef WINDOWS
452void
453os_get_stack_bounds(LispObj q,void **base, natural *size)
454{
455}
456#else
457void
458os_get_stack_bounds(LispObj q,void **base, natural *size)
459{
460  pthread_t p = (pthread_t)(q);
461#ifdef DARWIN
462  *base = pthread_get_stackaddr_np(p);
463  *size = pthread_get_stacksize_np(p);
464#endif
465#ifdef LINUX
466  pthread_attr_t attr;
467
468  pthread_getattr_np(p,&attr);
469  pthread_attr_getstack(&attr, base, size);
470  pthread_attr_destroy(&attr);
471  *(natural *)base += *size;
472#endif
473#ifdef FREEBSD
474  pthread_attr_t attr;
475  void * temp_base;
476  size_t temp_size;
477 
478
479  pthread_attr_init(&attr); 
480  pthread_attr_get_np(p, &attr);
481  pthread_attr_getstackaddr(&attr,&temp_base);
482  pthread_attr_getstacksize(&attr,&temp_size);
483  *base = (void *)((natural)temp_base + temp_size);
484  *size = temp_size;
485  pthread_attr_destroy(&attr);
486#endif
487#ifdef SOLARIS
488  stack_t st;
489 
490  thr_stksegment(&st);
491  *size = st.ss_size;
492  *base = st.ss_sp;
493 
494#endif
495}
496#endif
497
498void *
499new_semaphore(int count)
500{
501#ifdef USE_POSIX_SEMAPHORES
502  sem_t *s = malloc(sizeof(sem_t));
503  sem_init(s, 0, count);
504  return s;
505#endif
506#ifdef USE_MACH_SEMAPHORES
507  semaphore_t s = (semaphore_t)0;
508  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
509  return (void *)(natural)s;
510#endif
511}
512
513RECURSIVE_LOCK
514new_recursive_lock()
515{
516  extern int cache_block_size;
517  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
518  RECURSIVE_LOCK m = NULL;
519#ifndef USE_FUTEX
520  void *signal = new_semaphore(0);
521#endif
522
523  if (p) {
524    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
525    m->malloced_ptr = p;
526  }
527
528#ifdef USE_FUTEX
529  if (m) {
530    return m;
531  }
532#else
533  if (m && signal) {
534    m->signal = signal;
535    return m;
536  }
537  if (m) {
538    free(p);
539  }
540  if (signal) {
541    destroy_semaphore(&signal);
542  }
543#endif
544  return NULL;
545}
546
547void
548destroy_semaphore(void **s)
549{
550  if (*s) {
551#ifdef USE_POSIX_SEMAPHORES
552    sem_destroy((sem_t *)*s);
553    free(*s);
554#endif
555#ifdef USE_MACH_SEMAPHORES
556    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
557#endif
558    *s=NULL;
559  }
560}
561
562#ifdef WINDOWS
563void
564tsd_set(LispObj key, void *datum)
565{
566}
567
568void *
569tsd_get(LispObj key)
570{
571}
572#else
573void
574tsd_set(LispObj key, void *datum)
575{
576  pthread_setspecific((pthread_key_t)key, datum);
577}
578
579void *
580tsd_get(LispObj key)
581{
582  return pthread_getspecific((pthread_key_t)key);
583}
584#endif
585
586void
587dequeue_tcr(TCR *tcr)
588{
589  TCR *next, *prev;
590
591  next = tcr->next;
592  prev = tcr->prev;
593
594  prev->next = next;
595  next->prev = prev;
596  tcr->prev = tcr->next = NULL;
597#ifdef X8664
598  tcr->linear = NULL;
599#endif
600}
601 
602void
603enqueue_tcr(TCR *new)
604{
605  TCR *head, *tail;
606 
607  LOCK(lisp_global(TCR_AREA_LOCK),new);
608  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
609  tail = head->prev;
610  tail->next = new;
611  head->prev = new;
612  new->prev = tail;
613  new->next = head;
614  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
615}
616
617TCR *
618allocate_tcr()
619{
620  TCR *tcr, *chain = NULL, *next;
621#ifdef DARWIN
622  extern Boolean use_mach_exception_handling;
623  kern_return_t kret;
624  mach_port_t
625    thread_exception_port,
626    task_self = mach_task_self();
627#endif
628  for (;;) {
629    tcr = calloc(1, sizeof(TCR));
630#ifdef DARWIN
631#if WORD_SIZE == 64
632    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
633      tcr->next = chain;
634      chain = tcr;
635      continue;
636    }
637#endif
638    if (use_mach_exception_handling) {
639      thread_exception_port = (mach_port_t)((natural)tcr);
640      kret = mach_port_allocate_name(task_self,
641                                     MACH_PORT_RIGHT_RECEIVE,
642                                     thread_exception_port);
643    } else {
644      kret = KERN_SUCCESS;
645    }
646
647    if (kret != KERN_SUCCESS) {
648      tcr->next = chain;
649      chain = tcr;
650      continue;
651    }
652#endif
653    for (next = chain; next;) {
654      next = next->next;
655      free(chain);
656    }
657    return tcr;
658  }
659}
660
661#ifdef X8664
662#ifdef LINUX
663#include <asm/prctl.h>
664#include <sys/prctl.h>
665#endif
666#ifdef FREEBSD
667#include <machine/sysarch.h>
668#endif
669
670void
671setup_tcr_extra_segment(TCR *tcr)
672{
673#ifdef FREEBSD
674  amd64_set_gsbase(tcr);
675#endif
676#ifdef LINUX
677  arch_prctl(ARCH_SET_GS, (natural)tcr);
678#endif
679#ifdef DARWIN
680  /* There's no way to do this yet.  See DARWIN_GS_HACK */
681  /* darwin_set_x8664_fs_reg(tcr); */
682#endif
683#ifdef SOLARIS
684  /* Chris Curtis found this and suggested the use of syscall here */
685  syscall(SYS_lwp_private,_LWP_SETPRIVATE, _LWP_GSBASE, tcr);
686#endif
687}
688
689#endif
690
691#ifdef X8632
692#ifdef DARWIN
693#include <architecture/i386/table.h>
694#include <architecture/i386/sel.h>
695#include <i386/user_ldt.h>
696
697void setup_tcr_extra_segment(TCR *tcr)
698{
699    uintptr_t addr = (uintptr_t)tcr;
700    unsigned int size = sizeof(*tcr);
701    ldt_entry_t desc;
702    sel_t sel;
703    int i;
704
705    desc.data.limit00 = (size - 1) & 0xffff;
706    desc.data.limit16 = ((size - 1) >> 16) & 0xf;
707    desc.data.base00 = addr & 0xffff;
708    desc.data.base16 = (addr >> 16) & 0xff;
709    desc.data.base24 = (addr >> 24) & 0xff;
710    desc.data.type = DESC_DATA_WRITE;
711    desc.data.dpl = USER_PRIV;
712    desc.data.present = 1;
713    desc.data.stksz = DESC_CODE_32B;
714    desc.data.granular = DESC_GRAN_BYTE;
715   
716    i = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1);
717
718    if (i < 0) {
719        perror("i386_set_ldt");
720    } else {
721        sel.index = i;
722        sel.rpl = USER_PRIV;
723        sel.ti = SEL_LDT;
724        tcr->ldt_selector = sel;
725    }
726}
727
728void free_tcr_extra_segment(TCR *tcr)
729{
730  /* load %fs with null segement selector */
731  __asm__ volatile ("mov %0,%%fs" : : "r"(0));
732  if (i386_set_ldt(tcr->ldt_selector.index, NULL, 1) < 0)
733    perror("i386_set_ldt");
734  tcr->ldt_selector = NULL_SEL;
735}
736#endif
737#endif
738
739/*
740  Caller must hold the area_lock.
741*/
742#ifdef WINDOWS
743TCR *
744new_tcr(natural vstack_size, natural tstack_size)
745{
746}
747#else
748TCR *
749new_tcr(natural vstack_size, natural tstack_size)
750{
751  extern area
752    *allocate_vstack_holding_area_lock(natural),
753    *allocate_tstack_holding_area_lock(natural);
754  area *a;
755  int i;
756  sigset_t sigmask;
757
758  sigemptyset(&sigmask);
759  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
760#ifdef HAVE_TLS
761  TCR *tcr = &current_tcr;
762#else /* no TLS */
763  TCR *tcr = allocate_tcr();
764#ifdef X8632
765  setup_tcr_extra_segment(tcr);
766#endif
767#endif
768
769#ifdef X86
770  setup_tcr_extra_segment(tcr);
771  tcr->linear = tcr;
772#ifdef X8632
773  tcr->node_regs_mask = X8632_DEFAULT_NODE_REGS_MASK;
774#endif
775#endif
776
777#if (WORD_SIZE == 64)
778  tcr->single_float_convert.tag = subtag_single_float;
779#endif
780  lisp_global(TCR_COUNT) += (1<<fixnumshift);
781  tcr->suspend = new_semaphore(0);
782  tcr->resume = new_semaphore(0);
783  tcr->reset_completion = new_semaphore(0);
784  tcr->activate = new_semaphore(0);
785  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
786  a = allocate_vstack_holding_area_lock(vstack_size);
787  tcr->vs_area = a;
788  a->owner = tcr;
789  tcr->save_vsp = (LispObj *) a->active; 
790  a = allocate_tstack_holding_area_lock(tstack_size);
791  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
792  tcr->ts_area = a;
793  a->owner = tcr;
794  tcr->save_tsp = (LispObj *) a->active;
795#ifdef X86
796  tcr->next_tsp = tcr->save_tsp;
797#endif
798
799  tcr->valence = TCR_STATE_FOREIGN;
800#ifdef PPC
801  tcr->lisp_fpscr.words.l = 0xd0;
802#endif
803#ifdef X86
804  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
805#if 1                           /* Mask underflow; too hard to
806                                   deal with denorms if underflow is
807                                   enabled */
808    (1 << MXCSR_UM_BIT) | 
809#endif
810    (1 << MXCSR_PM_BIT);
811#endif
812  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
813  tcr->tlb_limit = 2048<<fixnumshift;
814  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
815  for (i = 0; i < 2048; i++) {
816    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
817  }
818  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
819  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
820  return tcr;
821}
822#endif
823
824void
825shutdown_thread_tcr(void *arg)
826{
827  TCR *tcr = TCR_FROM_TSD(arg);
828
829  area *vs, *ts, *cs;
830  void *termination_semaphore;
831 
832  if (--(tcr->shutdown_count) == 0) {
833    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
834      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
835        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
836   
837      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
838      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
839      tsd_set(lisp_global(TCR_KEY), NULL);
840    }
841#ifdef DARWIN
842    darwin_exception_cleanup(tcr);
843#endif
844    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
845    vs = tcr->vs_area;
846    tcr->vs_area = NULL;
847    ts = tcr->ts_area;
848    tcr->ts_area = NULL;
849    cs = tcr->cs_area;
850    tcr->cs_area = NULL;
851    if (vs) {
852      condemn_area_holding_area_lock(vs);
853    }
854    if (ts) {
855      condemn_area_holding_area_lock(ts);
856    }
857    if (cs) {
858      condemn_area_holding_area_lock(cs);
859    }
860    destroy_semaphore(&tcr->suspend);
861    destroy_semaphore(&tcr->resume);
862    destroy_semaphore(&tcr->reset_completion);
863    destroy_semaphore(&tcr->activate);
864    tcr->tlb_limit = 0;
865    free(tcr->tlb_pointer);
866    tcr->tlb_pointer = NULL;
867    tcr->osid = 0;
868    tcr->interrupt_pending = 0;
869    termination_semaphore = tcr->termination_semaphore;
870    tcr->termination_semaphore = NULL;
871#ifdef HAVE_TLS
872    dequeue_tcr(tcr);
873#endif
874#ifdef DARWIN
875#ifdef X8632
876    free_tcr_extra_segment(tcr);
877#endif
878#endif
879    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
880    if (termination_semaphore) {
881      SEM_RAISE(termination_semaphore);
882    }
883  } else {
884    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
885  }
886}
887
888void
889tcr_cleanup(void *arg)
890{
891  TCR *tcr = (TCR *)arg;
892  area *a;
893
894  a = tcr->vs_area;
895  if (a) {
896    a->active = a->high;
897  }
898  a = tcr->ts_area;
899  if (a) {
900    a->active = a->high;
901  }
902  a = tcr->cs_area;
903  if (a) {
904    a->active = a->high;
905  }
906  tcr->valence = TCR_STATE_FOREIGN;
907  tcr->shutdown_count = 1;
908  shutdown_thread_tcr(tcr);
909  tsd_set(lisp_global(TCR_KEY), NULL);
910}
911
912void *
913current_native_thread_id()
914{
915  return ((void *) (natural)
916#ifdef LINUX
917          getpid()
918#endif
919#ifdef DARWIN
920          mach_thread_self()
921#endif
922#ifdef FREEBSD
923          pthread_self()
924#endif
925#ifdef SOLARIS
926          pthread_self()
927#endif
928#ifdef WINDOWS
929          /* ThreadSelf() */ 23
930#endif
931          );
932}
933
934
935void
936thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
937{
938  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
939
940  tcr->osid = current_thread_osid();
941  tcr->native_thread_id = current_native_thread_id();
942  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
943  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
944  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
945  tcr->cs_area = a;
946  a->owner = tcr;
947  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
948    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
949  }
950#ifdef LINUX
951#ifdef PPC
952#ifndef PPC64
953  tcr->native_thread_info = current_r2;
954#endif
955#endif
956#endif
957  tcr->errno_loc = &errno;
958  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
959#ifdef DARWIN
960  extern Boolean use_mach_exception_handling;
961  if (use_mach_exception_handling) {
962    darwin_exception_init(tcr);
963  }
964#endif
965#ifdef LINUX
966  linux_exception_init(tcr);
967#endif
968  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
969}
970
971/*
972  Register the specified tcr as "belonging to" the current thread.
973  Under Darwin, setup Mach exception handling for the thread.
974  Install cleanup handlers for thread termination.
975*/
976void
977register_thread_tcr(TCR *tcr)
978{
979  void *stack_base = NULL;
980  natural stack_size = 0;
981
982  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
983  thread_init_tcr(tcr, stack_base, stack_size);
984  enqueue_tcr(tcr);
985}
986
987
988 
989 
990#ifndef MAP_GROWSDOWN
991#define MAP_GROWSDOWN 0
992#endif
993
994#ifdef WINDOWS
995Ptr
996create_stack(int size)
997{
998}
999#else
1000Ptr
1001create_stack(natural size)
1002{
1003  Ptr p;
1004  size=align_to_power_of_2(size, log2_page_size);
1005  p = (Ptr) mmap(NULL,
1006                 (size_t)size,
1007                 PROT_READ | PROT_WRITE | PROT_EXEC,
1008                 MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
1009                 -1,    /* Darwin insists on this when not mmap()ing
1010                           a real fd */
1011                 0);
1012  if (p != (Ptr)(-1)) {
1013    *((size_t *)p) = size;
1014    return p;
1015  }
1016  allocation_failure(true, size);
1017
1018}
1019#endif
1020
1021void *
1022allocate_stack(natural size)
1023{
1024  return create_stack(size);
1025}
1026
1027#ifdef WINDOWS
1028void
1029free_stack(void *s)
1030{
1031}
1032#else
1033void
1034free_stack(void *s)
1035{
1036  size_t size = *((size_t *)s);
1037  munmap(s, size);
1038}
1039#endif
1040
1041Boolean threads_initialized = false;
1042
1043#ifndef USE_FUTEX
1044#ifdef WINDOWS
1045void
1046count_cpus()
1047{
1048}
1049#else
1050void
1051count_cpus()
1052{
1053#ifdef DARWIN
1054  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
1055#include <mach/host_info.h>
1056
1057  struct host_basic_info info;
1058  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1059 
1060  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
1061    if (info.max_cpus > 1) {
1062      spin_lock_tries = 1024;
1063    }
1064  }
1065#else
1066  int n = sysconf(_SC_NPROCESSORS_ONLN);
1067 
1068  if (n > 1) {
1069    spin_lock_tries = 1024;
1070  }
1071#endif
1072}
1073#endif
1074#endif
1075
1076#ifdef WINDOWS
1077void
1078init_threads(void * stack_base, TCR *tcr)
1079{
1080}
1081void *
1082lisp_thread_entry(void *param)
1083{
1084}
1085#else
1086void
1087init_threads(void * stack_base, TCR *tcr)
1088{
1089  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
1090  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
1091  thread_signal_setup();
1092
1093#ifndef USE_FUTEX
1094  count_cpus();
1095#endif
1096  threads_initialized = true;
1097}
1098
1099
1100void *
1101lisp_thread_entry(void *param)
1102{
1103  thread_activation *activation = (thread_activation *)param;
1104  TCR *tcr = new_tcr(activation->vsize, activation->tsize);
1105  sigset_t mask, old_mask;
1106
1107  sigemptyset(&mask);
1108  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1109
1110  register_thread_tcr(tcr);
1111
1112  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1113  tcr->vs_area->active -= node_size;
1114  *(--tcr->save_vsp) = lisp_nil;
1115  enable_fp_exceptions();
1116  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1117  activation->tcr = tcr;
1118  SEM_RAISE(activation->created);
1119  do {
1120    SEM_RAISE(tcr->reset_completion);
1121    SEM_WAIT_FOREVER(tcr->activate);
1122    /* Now go run some lisp code */
1123    start_lisp(TCR_TO_TSD(tcr),0);
1124  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1125  pthread_cleanup_pop(true);
1126
1127}
1128#endif
1129
1130void *
1131xNewThread(natural control_stack_size,
1132           natural value_stack_size,
1133           natural temp_stack_size)
1134
1135{
1136  thread_activation activation;
1137
1138
1139  activation.tsize = temp_stack_size;
1140  activation.vsize = value_stack_size;
1141  activation.tcr = 0;
1142  activation.created = new_semaphore(0);
1143  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1144                           NULL, 
1145                           lisp_thread_entry,
1146                           (void *) &activation)) {
1147   
1148    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1149  }
1150  destroy_semaphore(&activation.created); 
1151  return TCR_TO_TSD(activation.tcr);
1152}
1153
1154Boolean
1155active_tcr_p(TCR *q)
1156{
1157  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1158 
1159  do {
1160    if (p == q) {
1161      return true;
1162    }
1163    p = p->next;
1164  } while (p != head);
1165  return false;
1166}
1167
1168#ifdef WINDOWS
1169OSErr
1170xDisposeThread(TCR *tcr)
1171{
1172}
1173#else
1174OSErr
1175xDisposeThread(TCR *tcr)
1176{
1177  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1178    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1179      pthread_cancel((pthread_t)(tcr->osid));
1180      return 0;
1181    }
1182  }
1183  return -50;
1184}
1185#endif
1186
1187OSErr
1188xYieldToThread(TCR *target)
1189{
1190  Bug(NULL, "xYieldToThread ?");
1191  return 0;
1192}
1193 
1194OSErr
1195xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1196{
1197  Bug(NULL, "xThreadCurrentStackSpace ?");
1198  return 0;
1199}
1200
1201
1202#ifdef WINDOWS
1203LispObj
1204create_system_thread(size_t stack_size,
1205                     void* stackaddr,
1206                     void* (*start_routine)(void *),
1207                     void* param)
1208{
1209}
1210#else
1211LispObj
1212create_system_thread(size_t stack_size,
1213                     void* stackaddr,
1214                     void* (*start_routine)(void *),
1215                     void* param)
1216{
1217  pthread_attr_t attr;
1218  pthread_t returned_thread = (pthread_t) 0;
1219
1220  pthread_attr_init(&attr);
1221  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1222
1223  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1224    stack_size = PTHREAD_STACK_MIN;
1225  }
1226
1227  stack_size = ensure_stack_limit(stack_size);
1228  if (stackaddr != NULL) {
1229    /* Size must have been specified.  Sort of makes sense ... */
1230#ifdef DARWIN
1231    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1232#else
1233    pthread_attr_setstack(&attr, stackaddr, stack_size);
1234#endif
1235  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1236    pthread_attr_setstacksize(&attr,stack_size);
1237  }
1238
1239  /*
1240     I think that's just about enough ... create the thread.
1241  */
1242  pthread_create(&returned_thread, &attr, start_routine, param);
1243  pthread_attr_destroy(&attr);
1244  return (LispObj) ptr_to_lispobj(returned_thread);
1245}
1246#endif
1247
1248TCR *
1249get_tcr(Boolean create)
1250{
1251#ifdef HAVE_TLS
1252  TCR *current = current_tcr.linear;
1253#else
1254  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1255  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1256#endif
1257
1258  if ((current == NULL) && create) {
1259    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1260      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1261    int i, nbindwords = 0;
1262    extern unsigned initial_stack_size;
1263   
1264    /* Make one. */
1265    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1266    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1267    register_thread_tcr(current);
1268#ifdef DEBUG_TCR_CREATION
1269#ifndef WINDOWS
1270    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1271#endif
1272#endif
1273    current->vs_area->active -= node_size;
1274    *(--current->save_vsp) = lisp_nil;
1275#ifdef PPC
1276#define NSAVEREGS 8
1277#endif
1278#ifdef X8664
1279#define NSAVEREGS 4
1280#endif
1281#ifdef X8632
1282#define NSAVEREGS 0
1283#endif
1284    for (i = 0; i < NSAVEREGS; i++) {
1285      *(--current->save_vsp) = 0;
1286      current->vs_area->active -= node_size;
1287    }
1288    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1289    for (i = 0; i < nbindwords; i++) {
1290      *(--current->save_vsp) = 0;
1291      current->vs_area->active -= node_size;
1292    }
1293    current->shutdown_count = 1;
1294    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1295
1296  }
1297 
1298  return current;
1299}
1300
1301#ifdef WINDOWS
1302Boolean
1303suspend_tcr(TCR *tcr)
1304{
1305}
1306#else
1307Boolean
1308suspend_tcr(TCR *tcr)
1309{
1310  int suspend_count = atomic_incf(&(tcr->suspend_count));
1311  pthread_t thread;
1312  if (suspend_count == 1) {
1313    thread = (pthread_t)(tcr->osid);
1314    if ((thread != (pthread_t) 0) &&
1315        (pthread_kill(thread, thread_suspend_signal) == 0)) {
1316      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1317    } else {
1318      /* A problem using pthread_kill.  On Darwin, this can happen
1319         if the thread has had its signal mask surgically removed
1320         by pthread_exit.  If the native (Mach) thread can be suspended,
1321         do that and return true; otherwise, flag the tcr as belonging
1322         to a dead thread by setting tcr->osid to 0.
1323      */
1324      tcr->osid = 0;
1325      return false;
1326    }
1327    return true;
1328  }
1329  return false;
1330}
1331#endif
1332
1333Boolean
1334tcr_suspend_ack(TCR *tcr)
1335{
1336  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1337    SEM_WAIT_FOREVER(tcr->suspend);
1338    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1339  }
1340  return true;
1341}
1342
1343     
1344
1345
1346Boolean
1347lisp_suspend_tcr(TCR *tcr)
1348{
1349  Boolean suspended;
1350  TCR *current = get_tcr(true);
1351 
1352  LOCK(lisp_global(TCR_AREA_LOCK),current);
1353  suspended = suspend_tcr(tcr);
1354  if (suspended) {
1355    while (!tcr_suspend_ack(tcr));
1356  }
1357  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1358  return suspended;
1359}
1360         
1361
1362Boolean
1363resume_tcr(TCR *tcr)
1364{
1365  int suspend_count = atomic_decf(&(tcr->suspend_count));
1366  if (suspend_count == 0) {
1367    void *s = (tcr->resume);
1368    if (s != NULL) {
1369      SEM_RAISE(s);
1370      return true;
1371    }
1372  }
1373  return false;
1374}
1375
1376   
1377
1378
1379Boolean
1380lisp_resume_tcr(TCR *tcr)
1381{
1382  Boolean resumed;
1383  TCR *current = get_tcr(true);
1384 
1385  LOCK(lisp_global(TCR_AREA_LOCK),current);
1386  resumed = resume_tcr(tcr);
1387  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1388  return resumed;
1389}
1390
1391
1392TCR *freed_tcrs = NULL;
1393
1394void
1395enqueue_freed_tcr (TCR *tcr)
1396{
1397#ifndef HAVE_TLS
1398  tcr->next = freed_tcrs;
1399  freed_tcrs = tcr;
1400#endif
1401}
1402
1403/* It's not clear that we can safely condemn a dead tcr's areas, since
1404   we may not be able to call free() if a suspended thread owns a
1405   malloc lock. At least make the areas appear to be empty.
1406*/
1407   
1408
1409void
1410normalize_dead_tcr_areas(TCR *tcr)
1411{
1412  area *a;
1413
1414  a = tcr->vs_area;
1415  if (a) {
1416    a->active = a->high;
1417  }
1418
1419  a = tcr->ts_area;
1420  if (a) {
1421    a->active = a->high;
1422  }
1423
1424  a = tcr->cs_area;
1425  if (a) {
1426    a->active = a->high;
1427  }
1428}
1429   
1430void
1431free_freed_tcrs ()
1432{
1433  TCR *current, *next;
1434
1435  for (current = freed_tcrs; current; current = next) {
1436    next = current->next;
1437#ifndef HAVE_TLS
1438    free(current);
1439#endif
1440  }
1441  freed_tcrs = NULL;
1442}
1443
1444void
1445suspend_other_threads(Boolean for_gc)
1446{
1447  TCR *current = get_tcr(true), *other, *next;
1448  int dead_tcr_count = 0;
1449  Boolean all_acked;
1450
1451  LOCK(lisp_global(TCR_AREA_LOCK), current);
1452  for (other = current->next; other != current; other = other->next) {
1453    if ((other->osid != 0)) {
1454      suspend_tcr(other);
1455      if (other->osid == 0) {
1456        dead_tcr_count++;
1457      }
1458    } else {
1459      dead_tcr_count++;
1460    }
1461  }
1462
1463  do {
1464    all_acked = true;
1465    for (other = current->next; other != current; other = other->next) {
1466      if ((other->osid != 0)) {
1467        if (!tcr_suspend_ack(other)) {
1468          all_acked = false;
1469        }
1470      }
1471    }
1472  } while(! all_acked);
1473
1474     
1475
1476  /* All other threads are suspended; can safely delete dead tcrs now */
1477  if (dead_tcr_count) {
1478    for (other = current->next; other != current; other = next) {
1479      next = other->next;
1480      if ((other->osid == 0))  {
1481        normalize_dead_tcr_areas(other);
1482        dequeue_tcr(other);
1483        enqueue_freed_tcr(other);
1484      }
1485    }
1486  }
1487}
1488
1489void
1490lisp_suspend_other_threads()
1491{
1492  suspend_other_threads(false);
1493}
1494
1495void
1496resume_other_threads(Boolean for_gc)
1497{
1498  TCR *current = get_tcr(true), *other;
1499  for (other = current->next; other != current; other = other->next) {
1500    if ((other->osid != 0)) {
1501      resume_tcr(other);
1502    }
1503  }
1504  free_freed_tcrs();
1505  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1506}
1507
1508void
1509lisp_resume_other_threads()
1510{
1511  resume_other_threads(false);
1512}
1513
1514
1515
1516rwlock *
1517rwlock_new()
1518{
1519  extern int cache_block_size;
1520
1521  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1522  rwlock *rw = NULL;;
1523 
1524  if (p) {
1525    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1526    rw->malloced_ptr = p;
1527#ifndef USE_FUTEX
1528    rw->reader_signal = new_semaphore(0);
1529    rw->writer_signal = new_semaphore(0);
1530    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1531      if (rw->reader_signal) {
1532        destroy_semaphore(&(rw->reader_signal));
1533      } else {
1534        destroy_semaphore(&(rw->writer_signal));
1535      }
1536      free(rw);
1537      rw = NULL;
1538    }
1539#endif
1540  }
1541  return rw;
1542}
1543
1544     
1545/*
1546  Try to get read access to a multiple-readers/single-writer lock.  If
1547  we already have read access, return success (indicating that the
1548  lock is held another time.  If we already have write access to the
1549  lock ... that won't work; return EDEADLK.  Wait until no other
1550  thread has or is waiting for write access, then indicate that we
1551  hold read access once.
1552*/
1553#ifndef USE_FUTEX
1554int
1555rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1556{
1557  int err = 0;
1558 
1559  LOCK_SPINLOCK(rw->spin, tcr);
1560
1561  if (rw->writer == tcr) {
1562    RELEASE_SPINLOCK(rw->spin);
1563    return EDEADLK;
1564  }
1565
1566  while (rw->blocked_writers || (rw->state > 0)) {
1567    rw->blocked_readers++;
1568    RELEASE_SPINLOCK(rw->spin);
1569    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1570    LOCK_SPINLOCK(rw->spin,tcr);
1571    rw->blocked_readers--;
1572    if (err == EINTR) {
1573      err = 0;
1574    }
1575    if (err) {
1576      RELEASE_SPINLOCK(rw->spin);
1577      return err;
1578    }
1579  }
1580  rw->state--;
1581  RELEASE_SPINLOCK(rw->spin);
1582  return err;
1583}
1584#else
1585int
1586rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1587{
1588  natural waitval;
1589
1590  lock_futex(&rw->spin);
1591
1592  if (rw->writer == tcr) {
1593    unlock_futex(&rw->spin);
1594    return EDEADLOCK;
1595  }
1596  while (1) {
1597    if (rw->writer == NULL) {
1598      --rw->state;
1599      unlock_futex(&rw->spin);
1600      return 0;
1601    }
1602    rw->blocked_readers++;
1603    waitval = rw->reader_signal;
1604    unlock_futex(&rw->spin);
1605    futex_wait(&rw->reader_signal,waitval);
1606    lock_futex(&rw->spin);
1607    rw->blocked_readers--;
1608  }
1609  return 0;
1610}
1611#endif   
1612
1613
1614/*
1615  Try to obtain write access to the lock.
1616  It is an error if we already have read access, but it's hard to
1617  detect that.
1618  If we already have write access, increment the count that indicates
1619  that.
1620  Otherwise, wait until the lock is not held for reading or writing,
1621  then assert write access.
1622*/
1623
1624#ifndef USE_FUTEX
1625int
1626rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1627{
1628  int err = 0;
1629
1630  LOCK_SPINLOCK(rw->spin,tcr);
1631  if (rw->writer == tcr) {
1632    rw->state++;
1633    RELEASE_SPINLOCK(rw->spin);
1634    return 0;
1635  }
1636
1637  while (rw->state != 0) {
1638    rw->blocked_writers++;
1639    RELEASE_SPINLOCK(rw->spin);
1640    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1641    LOCK_SPINLOCK(rw->spin,tcr);
1642    rw->blocked_writers--;
1643    if (err == EINTR) {
1644      err = 0;
1645    }
1646    if (err) {
1647      RELEASE_SPINLOCK(rw->spin);
1648      return err;
1649    }
1650  }
1651  rw->state = 1;
1652  rw->writer = tcr;
1653  RELEASE_SPINLOCK(rw->spin);
1654  return err;
1655}
1656
1657#else
1658int
1659rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1660{
1661  int err = 0;
1662  natural waitval;
1663
1664  lock_futex(&rw->spin);
1665  if (rw->writer == tcr) {
1666    rw->state++;
1667    unlock_futex(&rw->spin);
1668    return 0;
1669  }
1670
1671  while (rw->state != 0) {
1672    rw->blocked_writers++;
1673    waitval = rw->writer_signal;
1674    unlock_futex(&rw->spin);
1675    futex_wait(&rw->writer_signal,waitval);
1676    lock_futex(&rw->spin);
1677    rw->blocked_writers--;
1678  }
1679  rw->state = 1;
1680  rw->writer = tcr;
1681  unlock_futex(&rw->spin);
1682  return err;
1683}
1684#endif
1685
1686/*
1687  Sort of the same as above, only return EBUSY if we'd have to wait.
1688*/
1689#ifndef USE_FUTEX
1690int
1691rwlock_try_wlock(rwlock *rw, TCR *tcr)
1692{
1693  int ret = EBUSY;
1694
1695  LOCK_SPINLOCK(rw->spin,tcr);
1696  if (rw->writer == tcr) {
1697    rw->state++;
1698    ret = 0;
1699  } else {
1700    if (rw->state == 0) {
1701      rw->writer = tcr;
1702      rw->state = 1;
1703      ret = 0;
1704    }
1705  }
1706  RELEASE_SPINLOCK(rw->spin);
1707  return ret;
1708}
1709#else
1710int
1711rwlock_try_wlock(rwlock *rw, TCR *tcr)
1712{
1713  int ret = EBUSY;
1714
1715  lock_futex(&rw->spin);
1716  if (rw->writer == tcr) {
1717    rw->state++;
1718    ret = 0;
1719  } else {
1720    if (rw->state == 0) {
1721      rw->writer = tcr;
1722      rw->state = 1;
1723      ret = 0;
1724    }
1725  }
1726  unlock_futex(&rw->spin);
1727  return ret;
1728}
1729#endif
1730
1731#ifndef USE_FUTEX
1732int
1733rwlock_try_rlock(rwlock *rw, TCR *tcr)
1734{
1735  int ret = EBUSY;
1736
1737  LOCK_SPINLOCK(rw->spin,tcr);
1738  if (rw->state <= 0) {
1739    --rw->state;
1740    ret = 0;
1741  }
1742  RELEASE_SPINLOCK(rw->spin);
1743  return ret;
1744}
1745#else
1746int
1747rwlock_try_rlock(rwlock *rw, TCR *tcr)
1748{
1749  int ret = EBUSY;
1750
1751  lock_futex(&rw->spin);
1752  if (rw->state <= 0) {
1753    --rw->state;
1754    ret = 0;
1755  }
1756  unlock_futex(&rw->spin);
1757  return ret;
1758}
1759#endif
1760
1761
1762
1763#ifndef USE_FUTEX
1764int
1765rwlock_unlock(rwlock *rw, TCR *tcr)
1766{
1767
1768  int err = 0;
1769  natural blocked_readers = 0;
1770
1771  LOCK_SPINLOCK(rw->spin,tcr);
1772  if (rw->state > 0) {
1773    if (rw->writer != tcr) {
1774      err = EINVAL;
1775    } else {
1776      --rw->state;
1777      if (rw->state == 0) {
1778        rw->writer = NULL;
1779      }
1780    }
1781  } else {
1782    if (rw->state < 0) {
1783      ++rw->state;
1784    } else {
1785      err = EINVAL;
1786    }
1787  }
1788  if (err) {
1789    RELEASE_SPINLOCK(rw->spin);
1790    return err;
1791  }
1792 
1793  if (rw->state == 0) {
1794    if (rw->blocked_writers) {
1795      SEM_RAISE(rw->writer_signal);
1796    } else {
1797      blocked_readers = rw->blocked_readers;
1798      if (blocked_readers) {
1799        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1800      }
1801    }
1802  }
1803  RELEASE_SPINLOCK(rw->spin);
1804  return 0;
1805}
1806#else
1807int
1808rwlock_unlock(rwlock *rw, TCR *tcr)
1809{
1810
1811  int err = 0;
1812
1813  lock_futex(&rw->spin);
1814  if (rw->state > 0) {
1815    if (rw->writer != tcr) {
1816      err = EINVAL;
1817    } else {
1818      --rw->state;
1819      if (rw->state == 0) {
1820        rw->writer = NULL;
1821      }
1822    }
1823  } else {
1824    if (rw->state < 0) {
1825      ++rw->state;
1826    } else {
1827      err = EINVAL;
1828    }
1829  }
1830  if (err) {
1831    unlock_futex(&rw->spin);
1832    return err;
1833  }
1834 
1835  if (rw->state == 0) {
1836    if (rw->blocked_writers) {
1837      ++rw->writer_signal;
1838      unlock_futex(&rw->spin);
1839      futex_wake(&rw->writer_signal,1);
1840      return 0;
1841    }
1842    if (rw->blocked_readers) {
1843      ++rw->reader_signal;
1844      unlock_futex(&rw->spin);
1845      futex_wake(&rw->reader_signal, INT_MAX);
1846      return 0;
1847    }
1848  }
1849  unlock_futex(&rw->spin);
1850  return 0;
1851}
1852#endif
1853
1854       
1855void
1856rwlock_destroy(rwlock *rw)
1857{
1858#ifndef USE_FUTEX
1859  destroy_semaphore((void **)&rw->reader_signal);
1860  destroy_semaphore((void **)&rw->writer_signal);
1861#endif
1862  postGCfree((void *)(rw->malloced_ptr));
1863}
1864
1865
1866
Note: See TracBrowser for help on using the repository browser.