source: branches/working-0711/ccl/lisp-kernel/thread_manager.c @ 9337

Last change on this file since 9337 was 9337, checked in by gz, 11 years ago

Propagate r9303 and r9286 from the trunk

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 36.2 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46#ifdef USE_FUTEX
47#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
48#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
49#define FUTEX_AVAIL (0)
50#define FUTEX_LOCKED (1)
51#define FUTEX_CONTENDED (2)
52#endif
53
54int
55raise_thread_interrupt(TCR *target)
56{
57#ifdef DARWIN_not_yet
58  if (use_mach_exception_handling) {
59    return mach_raise_thread_interrupt(target);
60  }
61#endif
62  return pthread_kill((pthread_t)target->osid, SIGNAL_FOR_PROCESS_INTERRUPT);
63}
64
65signed_natural
66atomic_incf_by(signed_natural *ptr, signed_natural by)
67{
68  signed_natural old, new;
69  do {
70    old = *ptr;
71    new = old+by;
72  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
73           (natural) old);
74  return new;
75}
76
77signed_natural
78atomic_incf(signed_natural *ptr)
79{
80  return atomic_incf_by(ptr, 1);
81}
82
83signed_natural
84atomic_decf(signed_natural *ptr)
85{
86  signed_natural old, new;
87  do {
88    old = *ptr;
89    new = old == 0 ? old : old-1;
90  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
91           (natural) old);
92  return old-1;
93}
94
95
96#ifndef USE_FUTEX
97int spin_lock_tries = 1;
98
99void
100get_spin_lock(signed_natural *p, TCR *tcr)
101{
102  int i, n = spin_lock_tries;
103 
104  while (1) {
105    for (i = 0; i < n; i++) {
106      if (atomic_swap(p,(signed_natural)tcr) == 0) {
107        return;
108      }
109    }
110    sched_yield();
111  }
112}
113#endif
114
115#ifndef USE_FUTEX
116int
117lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
118{
119
120  if (tcr == NULL) {
121    tcr = get_tcr(true);
122  }
123  if (m->owner == tcr) {
124    m->count++;
125    return 0;
126  }
127  while (1) {
128    LOCK_SPINLOCK(m->spinlock,tcr);
129    ++m->avail;
130    if (m->avail == 1) {
131      m->owner = tcr;
132      m->count = 1;
133      RELEASE_SPINLOCK(m->spinlock);
134      break;
135    }
136    RELEASE_SPINLOCK(m->spinlock);
137    SEM_WAIT_FOREVER(m->signal);
138  }
139  return 0;
140}
141
142#else /* USE_FUTEX */
143
144static void inline
145lock_futex(natural *p)
146{
147 
148  while (1) {
149    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
150      return;
151    }
152    while (1) {
153      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
154        return;
155      }
156      futex_wait(p,FUTEX_CONTENDED);
157    }
158  }
159}
160
161static void inline
162unlock_futex(natural *p)
163{
164  if (atomic_decf(p) != FUTEX_AVAIL) {
165    *p = FUTEX_AVAIL;
166    futex_wake(p,INT_MAX);
167  }
168}
169   
170int
171lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
172{
173  natural val;
174  if (tcr == NULL) {
175    tcr = get_tcr(true);
176  }
177  if (m->owner == tcr) {
178    m->count++;
179    return 0;
180  }
181  lock_futex(&m->avail);
182  m->owner = tcr;
183  m->count = 1;
184  return 0;
185}
186#endif /* USE_FUTEX */
187
188
189#ifndef USE_FUTEX 
190int
191unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
192{
193  int ret = EPERM, pending;
194
195  if (tcr == NULL) {
196    tcr = get_tcr(true);
197  }
198
199  if (m->owner == tcr) {
200    --m->count;
201    if (m->count == 0) {
202      LOCK_SPINLOCK(m->spinlock,tcr);
203      m->owner = NULL;
204      pending = m->avail-1 + m->waiting;     /* Don't count us */
205      m->avail = 0;
206      --pending;
207      if (pending > 0) {
208        m->waiting = pending;
209      } else {
210        m->waiting = 0;
211      }
212      RELEASE_SPINLOCK(m->spinlock);
213      if (pending >= 0) {
214        SEM_RAISE(m->signal);
215      }
216    }
217    ret = 0;
218  }
219  return ret;
220}
221#else /* USE_FUTEX */
222int
223unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
224{
225  int ret = EPERM, pending;
226
227   if (tcr == NULL) {
228    tcr = get_tcr(true);
229  }
230
231  if (m->owner == tcr) {
232    --m->count;
233    if (m->count == 0) {
234      m->owner = NULL;
235      unlock_futex(&m->avail);
236    }
237    ret = 0;
238  }
239  return ret;
240}
241#endif /* USE_FUTEX */
242
243void
244destroy_recursive_lock(RECURSIVE_LOCK m)
245{
246#ifndef USE_FUTEX
247  destroy_semaphore((void **)&m->signal);
248#endif
249  postGCfree((void *)(m->malloced_ptr));
250}
251
252/*
253  If we're already the owner (or if the lock is free), lock it
254  and increment the lock count; otherwise, return EBUSY without
255  waiting.
256*/
257
258#ifndef USE_FUTEX
259int
260recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
261{
262  TCR *owner = m->owner;
263
264  LOCK_SPINLOCK(m->spinlock,tcr);
265  if (owner == tcr) {
266    m->count++;
267    if (was_free) {
268      *was_free = 0;
269      RELEASE_SPINLOCK(m->spinlock);
270      return 0;
271    }
272  }
273  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
274    m->owner = tcr;
275    m->count = 1;
276    if (was_free) {
277      *was_free = 1;
278    }
279    RELEASE_SPINLOCK(m->spinlock);
280    return 0;
281  }
282
283  RELEASE_SPINLOCK(m->spinlock);
284  return EBUSY;
285}
286#else
287int
288recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
289{
290  TCR *owner = m->owner;
291
292  if (owner == tcr) {
293    m->count++;
294    if (was_free) {
295      *was_free = 0;
296      return 0;
297    }
298  }
299  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
300    m->owner = tcr;
301    m->count = 1;
302    if (was_free) {
303      *was_free = 1;
304    }
305    return 0;
306  }
307
308  return EBUSY;
309}
310#endif
311
312void
313sem_wait_forever(SEMAPHORE s)
314{
315  int status;
316
317  do {
318#ifdef USE_MACH_SEMAPHORES
319    mach_timespec_t q = {1,0};
320    status = SEM_TIMEDWAIT(s,q);
321#endif
322#ifdef USE_POSIX_SEMAPHORES
323    struct timespec q;
324    gettimeofday((struct timeval *)&q, NULL);
325    q.tv_sec += 1;
326    status = SEM_TIMEDWAIT(s,&q);
327#endif
328  } while (status != 0);
329}
330
331int
332wait_on_semaphore(void *s, int seconds, int millis)
333{
334  int nanos = (millis % 1000) * 1000000;
335#ifdef USE_POSIX_SEMAPHORES
336  int status;
337
338  struct timespec q;
339  gettimeofday((struct timeval *)&q, NULL);
340  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
341   
342  q.tv_nsec += nanos;
343  if (q.tv_nsec >= 1000000000L) {
344    q.tv_nsec -= 1000000000L;
345    seconds += 1;
346  }
347  q.tv_sec += seconds;
348  status = SEM_TIMEDWAIT(s, &q);
349  if (status < 0) {
350    return errno;
351  }
352  return status;
353#endif
354#ifdef USE_MACH_SEMAPHORES
355  mach_timespec_t q = {seconds, nanos};
356  int status = SEM_TIMEDWAIT(s, q);
357
358 
359  switch (status) {
360  case 0: return 0;
361  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
362  case KERN_ABORTED: return EINTR;
363  default: return EINVAL;
364  }
365
366#endif
367}
368
369
370int
371semaphore_maybe_timedwait(void *s, struct timespec *t)
372{
373  if (t) {
374    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
375  }
376  SEM_WAIT_FOREVER(s);
377  return 0;
378}
379
380void
381signal_semaphore(SEMAPHORE s)
382{
383  SEM_RAISE(s);
384}
385
386 
387LispObj
388current_thread_osid()
389{
390  return (LispObj)ptr_to_lispobj(pthread_self());
391}
392
393
394
395int thread_suspend_signal = 0, thread_resume_signal = 0;
396
397
398
399void
400linux_exception_init(TCR *tcr)
401{
402}
403
404
405TCR *
406get_interrupt_tcr(Boolean create)
407{
408  return get_tcr(create);
409}
410 
411  void
412suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
413{
414#ifdef DARWIN_GS_HACK
415  Boolean gs_was_tcr = ensure_gs_pthread();
416#endif
417  TCR *tcr = get_interrupt_tcr(false);
418
419  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
420    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
421  } else {
422    if (signo == thread_suspend_signal) {
423#if 0
424      sigset_t wait_for;
425#endif
426
427      tcr->suspend_context = context;
428#if 0
429      sigfillset(&wait_for);
430#endif
431      SEM_RAISE(tcr->suspend);
432#if 0
433      sigdelset(&wait_for, thread_resume_signal);
434#endif
435#if 1
436#if RESUME_VIA_RESUME_SEMAPHORE
437      SEM_WAIT_FOREVER(tcr->resume);
438#if SUSPEND_RESUME_VERBOSE
439      fprintf(stderr, "got  resume in 0x%x\n",tcr);
440#endif
441      tcr->suspend_context = NULL;
442#else
443      sigsuspend(&wait_for);
444#endif
445#else
446    do {
447      sigsuspend(&wait_for);
448    } while (tcr->suspend_context);
449#endif 
450    } else {
451      tcr->suspend_context = NULL;
452#if SUSEPEND_RESUME_VERBOSE
453      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
454#endif
455    }
456#if WAIT_FOR_RESUME_ACK
457    SEM_RAISE(tcr->suspend);
458#endif
459  }
460#ifdef DARWIN_GS_HACK
461  if (gs_was_tcr) {
462    set_gs_address(tcr);
463  }
464#endif
465#ifdef DARWIN
466  DarwinSigReturn(context);
467#endif
468#ifdef FREEBSD
469  freebsd_sigreturn(context);
470#endif
471}
472
473 
474
475/*
476  'base' should be set to the bottom (origin) of the stack, e.g., the
477  end from which it grows.
478*/
479 
480void
481os_get_stack_bounds(LispObj q,void **base, natural *size)
482{
483  pthread_t p = (pthread_t)(q);
484#ifdef DARWIN
485  *base = pthread_get_stackaddr_np(p);
486  *size = pthread_get_stacksize_np(p);
487#endif
488#ifdef LINUX
489  pthread_attr_t attr;
490
491  pthread_getattr_np(p,&attr);
492  pthread_attr_getstack(&attr, base, size);
493  *(natural *)base += *size;
494#endif
495#ifdef FREEBSD
496  pthread_attr_t attr;
497  void * temp_base;
498  size_t temp_size;
499 
500
501  pthread_attr_init(&attr); 
502  pthread_attr_get_np(p, &attr);
503  pthread_attr_getstackaddr(&attr,&temp_base);
504  pthread_attr_getstacksize(&attr,&temp_size);
505  *base = (void *)((natural)temp_base + temp_size);
506  *size = temp_size;
507#endif
508
509}
510
511void *
512new_semaphore(int count)
513{
514#ifdef USE_POSIX_SEMAPHORES
515  sem_t *s = malloc(sizeof(sem_t));
516  sem_init(s, 0, count);
517  return s;
518#endif
519#ifdef USE_MACH_SEMAPHORES
520  semaphore_t s = (semaphore_t)0;
521  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
522  return (void *)(natural)s;
523#endif
524}
525
526RECURSIVE_LOCK
527new_recursive_lock()
528{
529  extern int cache_block_size;
530  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
531  RECURSIVE_LOCK m = NULL;
532#ifndef USE_FUTEX
533  void *signal = new_semaphore(0);
534#endif
535
536  if (p) {
537    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
538    m->malloced_ptr = p;
539  }
540
541#ifdef USE_FUTEX
542  if (m) {
543    return m;
544  }
545#else
546  if (m && signal) {
547    m->signal = signal;
548    return m;
549  }
550  if (m) {
551    free(p);
552  }
553  if (signal) {
554    destroy_semaphore(&signal);
555  }
556#endif
557  return NULL;
558}
559
560void
561destroy_semaphore(void **s)
562{
563  if (*s) {
564#ifdef USE_POSIX_SEMAPHORES
565    sem_destroy((sem_t *)*s);
566#endif
567#ifdef USE_MACH_SEMAPHORES
568    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
569#endif
570    *s=NULL;
571  }
572}
573
574void
575tsd_set(LispObj key, void *datum)
576{
577  pthread_setspecific((pthread_key_t)key, datum);
578}
579
580void *
581tsd_get(LispObj key)
582{
583  return pthread_getspecific((pthread_key_t)key);
584}
585
586void
587dequeue_tcr(TCR *tcr)
588{
589  TCR *next, *prev;
590
591  next = tcr->next;
592  prev = tcr->prev;
593
594  prev->next = next;
595  next->prev = prev;
596  tcr->prev = tcr->next = NULL;
597#ifdef X8664
598  tcr->linear = NULL;
599#endif
600}
601 
602void
603enqueue_tcr(TCR *new)
604{
605  TCR *head, *tail;
606 
607  LOCK(lisp_global(TCR_AREA_LOCK),new);
608  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
609  tail = head->prev;
610  tail->next = new;
611  head->prev = new;
612  new->prev = tail;
613  new->next = head;
614  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
615}
616
617TCR *
618allocate_tcr()
619{
620  TCR *tcr, *chain = NULL, *next;
621#ifdef DARWIN
622  extern Boolean use_mach_exception_handling;
623  kern_return_t kret;
624  mach_port_t
625    thread_exception_port,
626    task_self = mach_task_self();
627#endif
628  for (;;) {
629    tcr = calloc(1, sizeof(TCR));
630#ifdef DARWIN
631#if WORD_SIZE == 64
632    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
633      tcr->next = chain;
634      chain = tcr;
635      continue;
636    }
637#endif
638    if (use_mach_exception_handling) {
639      thread_exception_port = (mach_port_t)((natural)tcr);
640      kret = mach_port_allocate_name(task_self,
641                                     MACH_PORT_RIGHT_RECEIVE,
642                                     thread_exception_port);
643    } else {
644      kret = KERN_SUCCESS;
645    }
646
647    if (kret != KERN_SUCCESS) {
648      tcr->next = chain;
649      chain = tcr;
650      continue;
651    }
652#endif
653    for (next = chain; next;) {
654      next = next->next;
655      free(chain);
656    }
657    return tcr;
658  }
659}
660
661#ifdef X8664
662#ifdef LINUX
663#include <asm/prctl.h>
664#include <sys/prctl.h>
665#endif
666#ifdef FREEBSD
667#include <machine/sysarch.h>
668#endif
669
670void
671setup_tcr_extra_segment(TCR *tcr)
672{
673#ifdef FREEBSD
674  amd64_set_gsbase(tcr);
675#endif
676#ifdef LINUX
677  arch_prctl(ARCH_SET_GS, (natural)tcr);
678#endif
679#ifdef DARWIN
680  /* There's no way to do this yet.  See DARWIN_GS_HACK */
681  /* darwin_set_x8664_fs_reg(tcr); */
682#endif
683}
684
685#endif
686
687
688
689/*
690  Caller must hold the area_lock.
691*/
692TCR *
693new_tcr(natural vstack_size, natural tstack_size)
694{
695  extern area
696    *allocate_vstack_holding_area_lock(unsigned),
697    *allocate_tstack_holding_area_lock(unsigned);
698  area *a;
699  int i;
700  sigset_t sigmask;
701
702  sigemptyset(&sigmask);
703  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
704#ifdef HAVE_TLS
705  TCR *tcr = &current_tcr;
706#else
707  TCR *tcr = allocate_tcr();
708#endif
709
710#ifdef X8664
711  setup_tcr_extra_segment(tcr);
712  tcr->linear = tcr;
713#endif
714
715#if (WORD_SIZE == 64)
716  tcr->single_float_convert.tag = subtag_single_float;
717#endif
718  lisp_global(TCR_COUNT) += (1<<fixnumshift);
719  tcr->suspend = new_semaphore(0);
720  tcr->resume = new_semaphore(0);
721  tcr->reset_completion = new_semaphore(0);
722  tcr->activate = new_semaphore(0);
723  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
724  a = allocate_vstack_holding_area_lock(vstack_size);
725  tcr->vs_area = a;
726  a->owner = tcr;
727  tcr->save_vsp = (LispObj *) a->active; 
728  a = allocate_tstack_holding_area_lock(tstack_size);
729  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
730  tcr->ts_area = a;
731  a->owner = tcr;
732  tcr->save_tsp = (LispObj *) a->active;
733#ifdef X86
734  tcr->next_tsp = tcr->save_tsp;
735#endif
736
737  tcr->valence = TCR_STATE_FOREIGN;
738#ifdef PPC
739  tcr->lisp_fpscr.words.l = 0xd0;
740#endif
741#ifdef X86
742  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
743#if 1                           /* Mask underflow; too hard to
744                                   deal with denorms if underflow is
745                                   enabled */
746    (1 << MXCSR_UM_BIT) | 
747#endif
748    (1 << MXCSR_PM_BIT);
749#endif
750  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
751  tcr->tlb_limit = 2048<<fixnumshift;
752  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
753  for (i = 0; i < 2048; i++) {
754    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
755  }
756  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
757  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
758  return tcr;
759}
760
761void
762shutdown_thread_tcr(void *arg)
763{
764  TCR *tcr = TCR_FROM_TSD(arg);
765
766  area *vs, *ts, *cs;
767  void *termination_semaphore;
768 
769  if (--(tcr->shutdown_count) == 0) {
770    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
771      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
772        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
773   
774      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
775      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
776      tsd_set(lisp_global(TCR_KEY), NULL);
777    }
778#ifdef DARWIN
779    darwin_exception_cleanup(tcr);
780#endif
781    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
782    vs = tcr->vs_area;
783    tcr->vs_area = NULL;
784    ts = tcr->ts_area;
785    tcr->ts_area = NULL;
786    cs = tcr->cs_area;
787    tcr->cs_area = NULL;
788    if (vs) {
789      condemn_area_holding_area_lock(vs);
790    }
791    if (ts) {
792      condemn_area_holding_area_lock(ts);
793    }
794    if (cs) {
795      condemn_area_holding_area_lock(cs);
796    }
797    destroy_semaphore(&tcr->suspend);
798    destroy_semaphore(&tcr->resume);
799    destroy_semaphore(&tcr->reset_completion);
800    destroy_semaphore(&tcr->activate);
801    free(tcr->tlb_pointer);
802    tcr->tlb_pointer = NULL;
803    tcr->tlb_limit = 0;
804    tcr->osid = 0;
805    tcr->interrupt_pending = 0;
806    termination_semaphore = tcr->termination_semaphore;
807    tcr->termination_semaphore = NULL;
808#ifdef HAVE_TLS
809    dequeue_tcr(tcr);
810#endif
811    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
812    if (termination_semaphore) {
813      SEM_RAISE(termination_semaphore);
814    }
815  } else {
816    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
817  }
818}
819
820void
821tcr_cleanup(void *arg)
822{
823  TCR *tcr = (TCR *)arg;
824  area *a;
825
826  a = tcr->vs_area;
827  if (a) {
828    a->active = a->high;
829  }
830  a = tcr->ts_area;
831  if (a) {
832    a->active = a->high;
833  }
834  a = tcr->cs_area;
835  if (a) {
836    a->active = a->high;
837  }
838  tcr->valence = TCR_STATE_FOREIGN;
839  tcr->shutdown_count = 1;
840  shutdown_thread_tcr(tcr);
841  tsd_set(lisp_global(TCR_KEY), NULL);
842}
843
844void *
845current_native_thread_id()
846{
847  return ((void *) (natural)
848#ifdef LINUX
849          getpid()
850#endif
851#ifdef DARWIN
852          mach_thread_self()
853#endif
854#ifdef FREEBSD
855          pthread_self()
856#endif
857#ifdef SOLARIS
858          pthread_self()
859#endif
860          );
861}
862
863
864void
865thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
866{
867  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
868
869  tcr->osid = current_thread_osid();
870  tcr->native_thread_id = current_native_thread_id();
871  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
872  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
873  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
874  tcr->cs_area = a;
875  a->owner = tcr;
876  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
877    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
878  }
879#ifdef LINUX
880#ifdef PPC
881#ifndef PPC64
882  tcr->native_thread_info = current_r2;
883#endif
884#endif
885#endif
886  tcr->errno_loc = &errno;
887  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
888#ifdef DARWIN
889  extern Boolean use_mach_exception_handling;
890  if (use_mach_exception_handling) {
891    darwin_exception_init(tcr);
892  }
893#endif
894#ifdef LINUX
895  linux_exception_init(tcr);
896#endif
897  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
898}
899
900/*
901  Register the specified tcr as "belonging to" the current thread.
902  Under Darwin, setup Mach exception handling for the thread.
903  Install cleanup handlers for thread termination.
904*/
905void
906register_thread_tcr(TCR *tcr)
907{
908  void *stack_base = NULL;
909  natural stack_size = 0;
910
911  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
912  thread_init_tcr(tcr, stack_base, stack_size);
913  enqueue_tcr(tcr);
914}
915
916
917 
918 
919#ifndef MAP_GROWSDOWN
920#define MAP_GROWSDOWN 0
921#endif
922
923Ptr
924create_stack(int size)
925{
926  Ptr p;
927  size=align_to_power_of_2(size, log2_page_size);
928  p = (Ptr) mmap(NULL,
929                     (size_t)size,
930                     PROT_READ | PROT_WRITE | PROT_EXEC,
931                     MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
932                     -1,        /* Darwin insists on this when not mmap()ing
933                                 a real fd */
934                     0);
935  if (p != (Ptr)(-1)) {
936    *((size_t *)p) = size;
937    return p;
938  }
939  allocation_failure(true, size);
940
941}
942 
943void *
944allocate_stack(unsigned size)
945{
946  return create_stack(size);
947}
948
949void
950free_stack(void *s)
951{
952  size_t size = *((size_t *)s);
953  munmap(s, size);
954}
955
956Boolean threads_initialized = false;
957
958#ifndef USE_FUTEX
959void
960count_cpus()
961{
962#ifdef DARWIN
963  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
964#include <mach/host_info.h>
965
966  struct host_basic_info info;
967  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
968 
969  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
970    if (info.max_cpus > 1) {
971      spin_lock_tries = 1024;
972    }
973  }
974#else
975  int n = sysconf(_SC_NPROCESSORS_ONLN);
976 
977  if (n > 1) {
978    spin_lock_tries = 1024;
979  }
980#endif
981}
982#endif
983
984
985void
986init_threads(void * stack_base, TCR *tcr)
987{
988  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
989  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
990  thread_signal_setup();
991
992#ifndef USE_FUTEX
993  count_cpus();
994#endif
995  threads_initialized = true;
996}
997
998
999void *
1000lisp_thread_entry(void *param)
1001{
1002  thread_activation *activation = (thread_activation *)param;
1003  TCR *tcr = new_tcr(activation->vsize, activation->vsize);
1004  sigset_t mask, old_mask;
1005
1006  sigemptyset(&mask);
1007  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1008
1009  register_thread_tcr(tcr);
1010
1011  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1012  tcr->vs_area->active -= node_size;
1013  *(--tcr->save_vsp) = lisp_nil;
1014  enable_fp_exceptions();
1015  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1016  activation->tcr = tcr;
1017  SEM_RAISE(activation->created);
1018  do {
1019    SEM_RAISE(tcr->reset_completion);
1020    SEM_WAIT_FOREVER(tcr->activate);
1021    /* Now go run some lisp code */
1022    start_lisp(TCR_TO_TSD(tcr),0);
1023  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1024  pthread_cleanup_pop(true);
1025
1026}
1027
1028
1029void *
1030xNewThread(natural control_stack_size,
1031           natural value_stack_size,
1032           natural temp_stack_size)
1033
1034{
1035  thread_activation activation;
1036  TCR *current = get_tcr(false);
1037
1038
1039  activation.tsize = temp_stack_size;
1040  activation.vsize = value_stack_size;
1041  activation.tcr = 0;
1042  activation.created = new_semaphore(0);
1043  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1044                           NULL, 
1045                           lisp_thread_entry,
1046                           (void *) &activation)) {
1047   
1048    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1049  }
1050  destroy_semaphore(&activation.created); 
1051  return TCR_TO_TSD(activation.tcr);
1052}
1053
1054Boolean
1055active_tcr_p(TCR *q)
1056{
1057  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1058 
1059  do {
1060    if (p == q) {
1061      return true;
1062    }
1063    p = p->next;
1064  } while (p != head);
1065  return false;
1066}
1067
1068
1069OSErr
1070xDisposeThread(TCR *tcr)
1071{
1072  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1073    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1074      pthread_cancel((pthread_t)(tcr->osid));
1075      return 0;
1076    }
1077  }
1078  return -50;
1079}
1080
1081OSErr
1082xYieldToThread(TCR *target)
1083{
1084  Bug(NULL, "xYieldToThread ?");
1085  return 0;
1086}
1087 
1088OSErr
1089xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1090{
1091  Bug(NULL, "xThreadCurrentStackSpace ?");
1092  return 0;
1093}
1094
1095
1096
1097LispObj
1098create_system_thread(size_t stack_size,
1099                     void* stackaddr,
1100                     void* (*start_routine)(void *),
1101                     void* param)
1102{
1103  pthread_attr_t attr;
1104  pthread_t returned_thread = (pthread_t) 0;
1105
1106  pthread_attr_init(&attr);
1107  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1108
1109  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1110    stack_size = PTHREAD_STACK_MIN;
1111  }
1112
1113  stack_size = ensure_stack_limit(stack_size);
1114  if (stackaddr != NULL) {
1115    /* Size must have been specified.  Sort of makes sense ... */
1116#ifdef DARWIN
1117    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1118#else
1119    pthread_attr_setstack(&attr, stackaddr, stack_size);
1120#endif
1121  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1122    pthread_attr_setstacksize(&attr,stack_size);
1123  }
1124
1125  /*
1126     I think that's just about enough ... create the thread.
1127  */
1128  pthread_create(&returned_thread, &attr, start_routine, param);
1129  return (LispObj) ptr_to_lispobj(returned_thread);
1130}
1131
1132TCR *
1133get_tcr(Boolean create)
1134{
1135#ifdef HAVE_TLS
1136  TCR *current = current_tcr.linear;
1137#else
1138  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1139  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1140#endif
1141
1142  if ((current == NULL) && create) {
1143    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1144      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1145    int i, nbindwords = 0;
1146    extern unsigned initial_stack_size;
1147   
1148    /* Make one. */
1149    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1150    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1151    register_thread_tcr(current);
1152#ifdef DEBUG_TCR_CREATION
1153    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1154#endif
1155    current->vs_area->active -= node_size;
1156    *(--current->save_vsp) = lisp_nil;
1157#ifdef PPC
1158#define NSAVEREGS 8
1159#endif
1160#ifdef X8664
1161#define NSAVEREGS 4
1162#endif
1163    for (i = 0; i < NSAVEREGS; i++) {
1164      *(--current->save_vsp) = 0;
1165      current->vs_area->active -= node_size;
1166    }
1167    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1168    for (i = 0; i < nbindwords; i++) {
1169      *(--current->save_vsp) = 0;
1170      current->vs_area->active -= node_size;
1171    }
1172    current->shutdown_count = 1;
1173    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1174
1175  }
1176 
1177  return current;
1178}
1179
1180
1181Boolean
1182suspend_tcr(TCR *tcr)
1183{
1184  int suspend_count = atomic_incf(&(tcr->suspend_count));
1185  if (suspend_count == 1) {
1186#if SUSPEND_RESUME_VERBOSE
1187    fprintf(stderr,"Suspending 0x%x\n", tcr);
1188#endif
1189#ifdef DARWIN_nope
1190    if (mach_suspend_tcr(tcr)) {
1191      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1192      return true;
1193    }
1194#endif
1195    if (pthread_kill((pthread_t)(tcr->osid), thread_suspend_signal) == 0) {
1196      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1197    } else {
1198      /* A problem using pthread_kill.  On Darwin, this can happen
1199         if the thread has had its signal mask surgically removed
1200         by pthread_exit.  If the native (Mach) thread can be suspended,
1201         do that and return true; otherwise, flag the tcr as belonging
1202         to a dead thread by setting tcr->osid to 0.
1203      */
1204      tcr->osid = 0;
1205      return false;
1206    }
1207    return true;
1208  }
1209  return false;
1210}
1211
1212Boolean
1213tcr_suspend_ack(TCR *tcr)
1214{
1215  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1216    SEM_WAIT_FOREVER(tcr->suspend);
1217    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1218#if SUSPEND_RESUME_VERBOSE
1219    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1220#endif
1221
1222  }
1223  return true;
1224}
1225
1226     
1227
1228
1229Boolean
1230lisp_suspend_tcr(TCR *tcr)
1231{
1232  Boolean suspended;
1233  TCR *current = get_tcr(true);
1234 
1235  LOCK(lisp_global(TCR_AREA_LOCK),current);
1236#ifdef DARWIN
1237#if USE_MACH_EXCEPTION_LOCK
1238  if (use_mach_exception_handling) {
1239    pthread_mutex_lock(mach_exception_lock);
1240  }
1241#endif
1242#endif
1243  suspended = suspend_tcr(tcr);
1244  if (suspended) {
1245    while (!tcr_suspend_ack(tcr));
1246  }
1247#ifdef DARWIN
1248#if USE_MACH_EXCEPTION_LOCK
1249  if (use_mach_exception_handling) {
1250    pthread_mutex_unlock(mach_exception_lock);
1251  }
1252#endif
1253#endif
1254  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1255  return suspended;
1256}
1257         
1258
1259Boolean
1260resume_tcr(TCR *tcr)
1261{
1262  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
1263  if (suspend_count == 0) {
1264#ifdef DARWIN
1265    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1266#if SUSPEND_RESUME_VERBOSE
1267    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1268#endif
1269      mach_resume_tcr(tcr);
1270      return true;
1271    }
1272#endif
1273#if RESUME_VIA_RESUME_SEMAPHORE
1274    SEM_RAISE(tcr->resume);
1275#else
1276    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1277      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1278    }
1279#endif
1280#if SUSPEND_RESUME_VERBOSE
1281    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1282#endif
1283    return true;
1284  }
1285  return false;
1286}
1287
1288void
1289wait_for_resumption(TCR *tcr)
1290{
1291  if (tcr->suspend_count == 0) {
1292#ifdef DARWIN
1293    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1294      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1295      return;
1296  }
1297#endif
1298#if WAIT_FOR_RESUME_ACK
1299#if SUSPEND_RESUME_VERBOSE
1300    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1301#endif
1302    SEM_WAIT_FOREVER(tcr->suspend);
1303#endif
1304  }
1305}
1306   
1307
1308
1309Boolean
1310lisp_resume_tcr(TCR *tcr)
1311{
1312  Boolean resumed;
1313  TCR *current = get_tcr(true);
1314 
1315  LOCK(lisp_global(TCR_AREA_LOCK),current);
1316  resumed = resume_tcr(tcr);
1317  wait_for_resumption(tcr);
1318  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1319  return resumed;
1320}
1321
1322
1323TCR *freed_tcrs = NULL;
1324
1325void
1326enqueue_freed_tcr (TCR *tcr)
1327{
1328#ifndef HAVE_TLS
1329  tcr->next = freed_tcrs;
1330  freed_tcrs = tcr;
1331#endif
1332}
1333
1334/* It's not clear that we can safely condemn a dead tcr's areas, since
1335   we may not be able to call free() if a suspended thread owns a
1336   malloc lock. At least make the areas appear to be empty.
1337*/
1338   
1339
1340void
1341normalize_dead_tcr_areas(TCR *tcr)
1342{
1343  area *a;
1344
1345  a = tcr->vs_area;
1346  if (a) {
1347    a->active = a->high;
1348  }
1349
1350  a = tcr->ts_area;
1351  if (a) {
1352    a->active = a->high;
1353  }
1354
1355  a = tcr->cs_area;
1356  if (a) {
1357    a->active = a->high;
1358  }
1359}
1360   
1361void
1362free_freed_tcrs ()
1363{
1364  TCR *current, *next;
1365
1366  for (current = freed_tcrs; current; current = next) {
1367    next = current->next;
1368#ifndef HAVE_TLS
1369    free(current);
1370#endif
1371  }
1372  freed_tcrs = NULL;
1373}
1374
1375void
1376suspend_other_threads(Boolean for_gc)
1377{
1378  TCR *current = get_tcr(true), *other, *next;
1379  int dead_tcr_count = 0;
1380  Boolean all_acked;
1381
1382  LOCK(lisp_global(TCR_AREA_LOCK), current);
1383#ifdef DARWIN
1384#if USE_MACH_EXCEPTION_LOCK
1385  if (for_gc && use_mach_exception_handling) {
1386#if SUSPEND_RESUME_VERBOSE
1387    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1388#endif
1389    pthread_mutex_lock(mach_exception_lock);
1390  }
1391#endif
1392#endif
1393  for (other = current->next; other != current; other = other->next) {
1394    if ((other->osid != 0)) {
1395      suspend_tcr(other);
1396      if (other->osid == 0) {
1397        dead_tcr_count++;
1398      }
1399    } else {
1400      dead_tcr_count++;
1401    }
1402  }
1403
1404  do {
1405    all_acked = true;
1406    for (other = current->next; other != current; other = other->next) {
1407      if ((other->osid != 0)) {
1408        if (!tcr_suspend_ack(other)) {
1409          all_acked = false;
1410        }
1411      }
1412    }
1413  } while(! all_acked);
1414
1415     
1416
1417  /* All other threads are suspended; can safely delete dead tcrs now */
1418  if (dead_tcr_count) {
1419    for (other = current->next; other != current; other = next) {
1420      next = other->next;
1421      if ((other->osid == 0))  {
1422        normalize_dead_tcr_areas(other);
1423        dequeue_tcr(other);
1424        enqueue_freed_tcr(other);
1425      }
1426    }
1427  }
1428}
1429
1430void
1431lisp_suspend_other_threads()
1432{
1433  suspend_other_threads(false);
1434}
1435
1436void
1437resume_other_threads(Boolean for_gc)
1438{
1439  TCR *current = get_tcr(true), *other;
1440  for (other = current->next; other != current; other = other->next) {
1441    if ((other->osid != 0)) {
1442      resume_tcr(other);
1443    }
1444  }
1445  for (other = current->next; other != current; other = other->next) {
1446    if ((other->osid != 0)) {
1447      wait_for_resumption(other);
1448    }
1449  }
1450  free_freed_tcrs();
1451#ifdef DARWIN
1452#if USE_MACH_EXCEPTION_LOCK
1453  if (for_gc && use_mach_exception_handling) {
1454#if SUSPEND_RESUME_VERBOSE
1455    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1456#endif
1457    pthread_mutex_unlock(mach_exception_lock);
1458  }
1459#endif
1460#endif
1461
1462  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1463}
1464
1465void
1466lisp_resume_other_threads()
1467{
1468  resume_other_threads(false);
1469}
1470
1471
1472
1473rwlock *
1474rwlock_new()
1475{
1476  extern int cache_block_size;
1477
1478  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1479  rwlock *rw;
1480 
1481  if (p) {
1482    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1483    rw->malloced_ptr = p;
1484#ifndef USE_FUTEX
1485    rw->reader_signal = new_semaphore(0);
1486    rw->writer_signal = new_semaphore(0);
1487    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1488      if (rw->reader_signal) {
1489        destroy_semaphore(&(rw->reader_signal));
1490      } else {
1491        destroy_semaphore(&(rw->writer_signal));
1492      }
1493      free(rw);
1494      rw = NULL;
1495    }
1496#endif
1497  }
1498  return rw;
1499}
1500
1501     
1502/*
1503  Try to get read access to a multiple-readers/single-writer lock.  If
1504  we already have read access, return success (indicating that the
1505  lock is held another time.  If we already have write access to the
1506  lock ... that won't work; return EDEADLK.  Wait until no other
1507  thread has or is waiting for write access, then indicate that we
1508  hold read access once.
1509*/
1510#ifndef USE_FUTEX
1511int
1512rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1513{
1514  int err = 0;
1515 
1516  LOCK_SPINLOCK(rw->spin, tcr);
1517
1518  if (rw->writer == tcr) {
1519    RELEASE_SPINLOCK(rw->spin);
1520    return EDEADLK;
1521  }
1522
1523  while (rw->blocked_writers || (rw->state > 0)) {
1524    rw->blocked_readers++;
1525    RELEASE_SPINLOCK(rw->spin);
1526    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1527    LOCK_SPINLOCK(rw->spin,tcr);
1528    rw->blocked_readers--;
1529    if (err == EINTR) {
1530      err = 0;
1531    }
1532    if (err) {
1533      RELEASE_SPINLOCK(rw->spin);
1534      return err;
1535    }
1536  }
1537  rw->state--;
1538  RELEASE_SPINLOCK(rw->spin);
1539  return err;
1540}
1541#else
1542int
1543rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1544{
1545  natural waitval;
1546
1547  lock_futex(&rw->spin);
1548
1549  if (rw->writer == tcr) {
1550    unlock_futex(&rw->spin);
1551    return EDEADLOCK;
1552  }
1553  while (1) {
1554    if (rw->writer == NULL) {
1555      --rw->state;
1556      unlock_futex(&rw->spin);
1557      return 0;
1558    }
1559    rw->blocked_readers++;
1560    waitval = rw->reader_signal;
1561    unlock_futex(&rw->spin);
1562    futex_wait(&rw->reader_signal,waitval);
1563    lock_futex(&rw->spin);
1564    rw->blocked_readers--;
1565  }
1566  return 0;
1567}
1568#endif   
1569
1570
1571/*
1572  Try to obtain write access to the lock.
1573  It is an error if we already have read access, but it's hard to
1574  detect that.
1575  If we already have write access, increment the count that indicates
1576  that.
1577  Otherwise, wait until the lock is not held for reading or writing,
1578  then assert write access.
1579*/
1580
1581#ifndef USE_FUTEX
1582int
1583rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1584{
1585  int err = 0;
1586
1587  LOCK_SPINLOCK(rw->spin,tcr);
1588  if (rw->writer == tcr) {
1589    rw->state++;
1590    RELEASE_SPINLOCK(rw->spin);
1591    return 0;
1592  }
1593
1594  while (rw->state != 0) {
1595    rw->blocked_writers++;
1596    RELEASE_SPINLOCK(rw->spin);
1597    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1598    LOCK_SPINLOCK(rw->spin,tcr);
1599    rw->blocked_writers--;
1600    if (err == EINTR) {
1601      err = 0;
1602    }
1603    if (err) {
1604      RELEASE_SPINLOCK(rw->spin);
1605      return err;
1606    }
1607  }
1608  rw->state = 1;
1609  rw->writer = tcr;
1610  RELEASE_SPINLOCK(rw->spin);
1611  return err;
1612}
1613
1614#else
1615int
1616rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1617{
1618  int err = 0;
1619  natural waitval;
1620
1621  lock_futex(&rw->spin);
1622  if (rw->writer == tcr) {
1623    rw->state++;
1624    unlock_futex(&rw->spin);
1625    return 0;
1626  }
1627
1628  while (rw->state != 0) {
1629    rw->blocked_writers++;
1630    waitval = rw->writer_signal;
1631    unlock_futex(&rw->spin);
1632    futex_wait(&rw->writer_signal,waitval);
1633    lock_futex(&rw->spin);
1634    rw->blocked_writers--;
1635  }
1636  rw->state = 1;
1637  rw->writer = tcr;
1638  unlock_futex(&rw->spin);
1639  return err;
1640}
1641#endif
1642
1643/*
1644  Sort of the same as above, only return EBUSY if we'd have to wait.
1645*/
1646#ifndef USE_FUTEX
1647int
1648rwlock_try_wlock(rwlock *rw, TCR *tcr)
1649{
1650  int ret = EBUSY;
1651
1652  LOCK_SPINLOCK(rw->spin,tcr);
1653  if (rw->writer == tcr) {
1654    rw->state++;
1655    ret = 0;
1656  } else {
1657    if (rw->state == 0) {
1658      rw->writer = tcr;
1659      rw->state = 1;
1660      ret = 0;
1661    }
1662  }
1663  RELEASE_SPINLOCK(rw->spin);
1664  return ret;
1665}
1666#else
1667int
1668rwlock_try_wlock(rwlock *rw, TCR *tcr)
1669{
1670  int ret = EBUSY;
1671
1672  lock_futex(&rw->spin);
1673  if (rw->writer == tcr) {
1674    rw->state++;
1675    ret = 0;
1676  } else {
1677    if (rw->state == 0) {
1678      rw->writer = tcr;
1679      rw->state = 1;
1680      ret = 0;
1681    }
1682  }
1683  unlock_futex(&rw->spin);
1684  return ret;
1685}
1686#endif
1687
1688#ifndef USE_FUTEX
1689int
1690rwlock_try_rlock(rwlock *rw, TCR *tcr)
1691{
1692  int ret = EBUSY;
1693
1694  LOCK_SPINLOCK(rw->spin,tcr);
1695  if (rw->state <= 0) {
1696    --rw->state;
1697    ret = 0;
1698  }
1699  RELEASE_SPINLOCK(rw->spin);
1700  return ret;
1701}
1702#else
1703int
1704rwlock_try_rlock(rwlock *rw, TCR *tcr)
1705{
1706  int ret = EBUSY;
1707
1708  lock_futex(&rw->spin);
1709  if (rw->state <= 0) {
1710    --rw->state;
1711    ret = 0;
1712  }
1713  unlock_futex(&rw->spin);
1714  return ret;
1715}
1716#endif
1717
1718
1719
1720#ifndef USE_FUTEX
1721int
1722rwlock_unlock(rwlock *rw, TCR *tcr)
1723{
1724
1725  int err = 0;
1726  natural blocked_readers = 0;
1727
1728  LOCK_SPINLOCK(rw->spin,tcr);
1729  if (rw->state > 0) {
1730    if (rw->writer != tcr) {
1731      err = EINVAL;
1732    } else {
1733      --rw->state;
1734      if (rw->state == 0) {
1735        rw->writer = NULL;
1736      }
1737    }
1738  } else {
1739    if (rw->state < 0) {
1740      ++rw->state;
1741    } else {
1742      err = EINVAL;
1743    }
1744  }
1745  if (err) {
1746    RELEASE_SPINLOCK(rw->spin);
1747    return err;
1748  }
1749 
1750  if (rw->state == 0) {
1751    if (rw->blocked_writers) {
1752      SEM_RAISE(rw->writer_signal);
1753    } else {
1754      blocked_readers = rw->blocked_readers;
1755      if (blocked_readers) {
1756        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1757      }
1758    }
1759  }
1760  RELEASE_SPINLOCK(rw->spin);
1761  return 0;
1762}
1763#else
1764int
1765rwlock_unlock(rwlock *rw, TCR *tcr)
1766{
1767
1768  int err = 0;
1769
1770  lock_futex(&rw->spin);
1771  if (rw->state > 0) {
1772    if (rw->writer != tcr) {
1773      err = EINVAL;
1774    } else {
1775      --rw->state;
1776      if (rw->state == 0) {
1777        rw->writer = NULL;
1778      }
1779    }
1780  } else {
1781    if (rw->state < 0) {
1782      ++rw->state;
1783    } else {
1784      err = EINVAL;
1785    }
1786  }
1787  if (err) {
1788    unlock_futex(&rw->spin);
1789    return err;
1790  }
1791 
1792  if (rw->state == 0) {
1793    if (rw->blocked_writers) {
1794      ++rw->writer_signal;
1795      unlock_futex(&rw->spin);
1796      futex_wake(&rw->writer_signal,1);
1797      return 0;
1798    }
1799    if (rw->blocked_readers) {
1800      ++rw->reader_signal;
1801      unlock_futex(&rw->spin);
1802      futex_wake(&rw->reader_signal, INT_MAX);
1803      return 0;
1804    }
1805  }
1806  unlock_futex(&rw->spin);
1807  return 0;
1808}
1809#endif
1810
1811       
1812void
1813rwlock_destroy(rwlock *rw)
1814{
1815#ifndef USE_FUTEX
1816  destroy_semaphore((void **)&rw->reader_signal);
1817  destroy_semaphore((void **)&rw->writer_signal);
1818#endif
1819  postGCfree((void *)(rw->malloced_ptr));
1820}
1821
1822
1823
Note: See TracBrowser for help on using the repository browser.