source: trunk/source/lisp-kernel/thread_manager.c @ 8547

Last change on this file since 8547 was 8547, checked in by andreas, 12 years ago

Big #ifdef WINDOWS hack&slash: stub out every function that references
things unimplemented on Windows. This is mainly exception (a.k.a.
signal handling) stuff, pthread-related things and memory management.

Using this code, and the compiler from:


http://downloads.sourceforge.net/mingw-w64/mingw-w64-bin_x86_64-linux_20080203.tar.bz2?modtime=1202034209&big_mirror=0

I am able to cross-compile a Win64 executable from my Ubuntu. Of
course, it crashes pretty soon, but this was to be expected.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 37.2 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46#ifdef USE_FUTEX
47#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
48#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
49#define FUTEX_AVAIL (0)
50#define FUTEX_LOCKED (1)
51#define FUTEX_CONTENDED (2)
52#endif
53
54#ifdef WINDOWS
55int
56raise_thread_interrupt(TCR *target)
57{
58}
59#else
60int
61raise_thread_interrupt(TCR *target)
62{
63#ifdef DARWIN_not_yet
64  if (use_mach_exception_handling) {
65    return mach_raise_thread_interrupt(target);
66  }
67#endif
68 return pthread_kill((pthread_t)target->osid, SIGNAL_FOR_PROCESS_INTERRUPT);
69}
70#endif
71
72signed_natural
73atomic_incf_by(signed_natural *ptr, signed_natural by)
74{
75  signed_natural old, new;
76  do {
77    old = *ptr;
78    new = old+by;
79  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
80           (natural) old);
81  return new;
82}
83
84signed_natural
85atomic_incf(signed_natural *ptr)
86{
87  return atomic_incf_by(ptr, 1);
88}
89
90signed_natural
91atomic_decf(signed_natural *ptr)
92{
93  signed_natural old, new;
94  do {
95    old = *ptr;
96    new = old == 0 ? old : old-1;
97  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
98           (natural) old);
99  return old-1;
100}
101
102
103#ifndef USE_FUTEX
104int spin_lock_tries = 1;
105
106void
107get_spin_lock(signed_natural *p, TCR *tcr)
108{
109  int i, n = spin_lock_tries;
110 
111  while (1) {
112    for (i = 0; i < n; i++) {
113      if (atomic_swap(p,(signed_natural)tcr) == 0) {
114        return;
115      }
116    }
117#ifndef WINDOWS
118    sched_yield();
119#endif
120  }
121}
122#endif
123
124#ifndef USE_FUTEX
125int
126lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
127{
128
129  if (tcr == NULL) {
130    tcr = get_tcr(true);
131  }
132  if (m->owner == tcr) {
133    m->count++;
134    return 0;
135  }
136  while (1) {
137    LOCK_SPINLOCK(m->spinlock,tcr);
138    ++m->avail;
139    if (m->avail == 1) {
140      m->owner = tcr;
141      m->count = 1;
142      RELEASE_SPINLOCK(m->spinlock);
143      break;
144    }
145    RELEASE_SPINLOCK(m->spinlock);
146    SEM_WAIT_FOREVER(m->signal);
147  }
148  return 0;
149}
150
151#else /* USE_FUTEX */
152
153static void inline
154lock_futex(natural *p)
155{
156 
157  while (1) {
158    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
159      return;
160    }
161    while (1) {
162      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
163        return;
164      }
165      futex_wait(p,FUTEX_CONTENDED);
166    }
167  }
168}
169
170static void inline
171unlock_futex(natural *p)
172{
173  if (atomic_decf(p) != FUTEX_AVAIL) {
174    *p = FUTEX_AVAIL;
175    futex_wake(p,INT_MAX);
176  }
177}
178   
179int
180lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
181{
182  natural val;
183  if (tcr == NULL) {
184    tcr = get_tcr(true);
185  }
186  if (m->owner == tcr) {
187    m->count++;
188    return 0;
189  }
190  lock_futex(&m->avail);
191  m->owner = tcr;
192  m->count = 1;
193  return 0;
194}
195#endif /* USE_FUTEX */
196
197
198#ifndef USE_FUTEX 
199int
200unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
201{
202  int ret = EPERM, pending;
203
204  if (tcr == NULL) {
205    tcr = get_tcr(true);
206  }
207
208  if (m->owner == tcr) {
209    --m->count;
210    if (m->count == 0) {
211      LOCK_SPINLOCK(m->spinlock,tcr);
212      m->owner = NULL;
213      pending = m->avail-1 + m->waiting;     /* Don't count us */
214      m->avail = 0;
215      --pending;
216      if (pending > 0) {
217        m->waiting = pending;
218      } else {
219        m->waiting = 0;
220      }
221      RELEASE_SPINLOCK(m->spinlock);
222      if (pending >= 0) {
223        SEM_RAISE(m->signal);
224      }
225    }
226    ret = 0;
227  }
228  return ret;
229}
230#else /* USE_FUTEX */
231int
232unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
233{
234  int ret = EPERM, pending;
235
236   if (tcr == NULL) {
237    tcr = get_tcr(true);
238  }
239
240  if (m->owner == tcr) {
241    --m->count;
242    if (m->count == 0) {
243      m->owner = NULL;
244      unlock_futex(&m->avail);
245    }
246    ret = 0;
247  }
248  return ret;
249}
250#endif /* USE_FUTEX */
251
252void
253destroy_recursive_lock(RECURSIVE_LOCK m)
254{
255#ifndef USE_FUTEX
256  destroy_semaphore((void **)&m->signal);
257#endif
258  postGCfree((void *)(m->malloced_ptr));
259}
260
261/*
262  If we're already the owner (or if the lock is free), lock it
263  and increment the lock count; otherwise, return EBUSY without
264  waiting.
265*/
266
267#ifndef USE_FUTEX
268int
269recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
270{
271  TCR *owner = m->owner;
272
273  LOCK_SPINLOCK(m->spinlock,tcr);
274  if (owner == tcr) {
275    m->count++;
276    if (was_free) {
277      *was_free = 0;
278      RELEASE_SPINLOCK(m->spinlock);
279      return 0;
280    }
281  }
282  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
283    m->owner = tcr;
284    m->count = 1;
285    if (was_free) {
286      *was_free = 1;
287    }
288    RELEASE_SPINLOCK(m->spinlock);
289    return 0;
290  }
291
292  RELEASE_SPINLOCK(m->spinlock);
293  return EBUSY;
294}
295#else
296int
297recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
298{
299  TCR *owner = m->owner;
300
301  if (owner == tcr) {
302    m->count++;
303    if (was_free) {
304      *was_free = 0;
305      return 0;
306    }
307  }
308  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
309    m->owner = tcr;
310    m->count = 1;
311    if (was_free) {
312      *was_free = 1;
313    }
314    return 0;
315  }
316
317  return EBUSY;
318}
319#endif
320
321void
322sem_wait_forever(SEMAPHORE s)
323{
324  int status;
325
326  do {
327#ifdef USE_MACH_SEMAPHORES
328    mach_timespec_t q = {1,0};
329    status = SEM_TIMEDWAIT(s,q);
330#endif
331#ifdef USE_POSIX_SEMAPHORES
332    struct timespec q;
333    gettimeofday((struct timeval *)&q, NULL);
334    q.tv_sec += 1;
335    status = SEM_TIMEDWAIT(s,&q);
336#endif
337  } while (status != 0);
338}
339
340int
341wait_on_semaphore(void *s, int seconds, int millis)
342{
343  int nanos = (millis % 1000) * 1000000;
344#ifdef USE_POSIX_SEMAPHORES
345  int status;
346
347  struct timespec q;
348  gettimeofday((struct timeval *)&q, NULL);
349  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
350   
351  q.tv_nsec += nanos;
352  if (q.tv_nsec >= 1000000000L) {
353    q.tv_nsec -= 1000000000L;
354    seconds += 1;
355  }
356  q.tv_sec += seconds;
357  status = SEM_TIMEDWAIT(s, &q);
358  if (status < 0) {
359    return errno;
360  }
361  return status;
362#endif
363#ifdef USE_MACH_SEMAPHORES
364  mach_timespec_t q = {seconds, nanos};
365  int status = SEM_TIMEDWAIT(s, q);
366
367 
368  switch (status) {
369  case 0: return 0;
370  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
371  case KERN_ABORTED: return EINTR;
372  default: return EINVAL;
373  }
374
375#endif
376}
377
378
379int
380semaphore_maybe_timedwait(void *s, struct timespec *t)
381{
382  if (t) {
383    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
384  }
385  SEM_WAIT_FOREVER(s);
386  return 0;
387}
388
389void
390signal_semaphore(SEMAPHORE s)
391{
392  SEM_RAISE(s);
393}
394
395 
396#ifdef WINDOWS
397LispObj
398current_thread_osid()
399{
400}
401#else
402LispObj
403current_thread_osid()
404{
405  return (LispObj)ptr_to_lispobj(pthread_self());
406}
407#endif
408
409
410int thread_suspend_signal = 0, thread_resume_signal = 0;
411
412
413
414void
415linux_exception_init(TCR *tcr)
416{
417}
418
419
420TCR *
421get_interrupt_tcr(Boolean create)
422{
423  return get_tcr(create);
424}
425 
426  void
427suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
428{
429#ifdef DARWIN_GS_HACK
430  Boolean gs_was_tcr = ensure_gs_pthread();
431#endif
432  TCR *tcr = get_interrupt_tcr(false);
433
434  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
435    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
436  } else {
437    if (signo == thread_suspend_signal) {
438#if 0
439      sigset_t wait_for;
440#endif
441
442      tcr->suspend_context = context;
443#if 0
444      sigfillset(&wait_for);
445#endif
446      SEM_RAISE(tcr->suspend);
447#if 0
448      sigdelset(&wait_for, thread_resume_signal);
449#endif
450#if 1
451#if RESUME_VIA_RESUME_SEMAPHORE
452      SEM_WAIT_FOREVER(tcr->resume);
453#if SUSPEND_RESUME_VERBOSE
454      fprintf(stderr, "got  resume in 0x%x\n",tcr);
455#endif
456      tcr->suspend_context = NULL;
457#else
458      sigsuspend(&wait_for);
459#endif
460#else
461    do {
462      sigsuspend(&wait_for);
463    } while (tcr->suspend_context);
464#endif 
465    } else {
466      tcr->suspend_context = NULL;
467#if SUSEPEND_RESUME_VERBOSE
468      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
469#endif
470    }
471#if WAIT_FOR_RESUME_ACK
472    SEM_RAISE(tcr->suspend);
473#endif
474  }
475#ifdef DARWIN_GS_HACK
476  if (gs_was_tcr) {
477    set_gs_address(tcr);
478  }
479#endif
480#ifdef DARWIN
481  DarwinSigReturn(context);
482#endif
483#ifdef FREEBSD
484  freebsd_sigreturn(context);
485#endif
486}
487
488 
489
490/*
491  'base' should be set to the bottom (origin) of the stack, e.g., the
492  end from which it grows.
493*/
494 
495#ifdef WINDOWS
496void
497os_get_stack_bounds(LispObj q,void **base, natural *size)
498{
499}
500#else
501void
502os_get_stack_bounds(LispObj q,void **base, natural *size)
503{
504  pthread_t p = (pthread_t)(q);
505#ifdef DARWIN
506  *base = pthread_get_stackaddr_np(p);
507  *size = pthread_get_stacksize_np(p);
508#endif
509#ifdef LINUX
510  pthread_attr_t attr;
511
512  pthread_getattr_np(p,&attr);
513  pthread_attr_getstack(&attr, base, size);
514  *(natural *)base += *size;
515#endif
516#ifdef FREEBSD
517  pthread_attr_t attr;
518  void * temp_base;
519  size_t temp_size;
520 
521
522  pthread_attr_init(&attr); 
523  pthread_attr_get_np(p, &attr);
524  pthread_attr_getstackaddr(&attr,&temp_base);
525  pthread_attr_getstacksize(&attr,&temp_size);
526  *base = (void *)((natural)temp_base + temp_size);
527  *size = temp_size;
528#endif
529
530}
531#endif
532
533void *
534new_semaphore(int count)
535{
536#ifdef USE_POSIX_SEMAPHORES
537  sem_t *s = malloc(sizeof(sem_t));
538  sem_init(s, 0, count);
539  return s;
540#endif
541#ifdef USE_MACH_SEMAPHORES
542  semaphore_t s = (semaphore_t)0;
543  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
544  return (void *)(natural)s;
545#endif
546}
547
548RECURSIVE_LOCK
549new_recursive_lock()
550{
551  extern int cache_block_size;
552  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
553  RECURSIVE_LOCK m = NULL;
554#ifndef USE_FUTEX
555  void *signal = new_semaphore(0);
556#endif
557
558  if (p) {
559    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
560    m->malloced_ptr = p;
561  }
562
563#ifdef USE_FUTEX
564  if (m) {
565    return m;
566  }
567#else
568  if (m && signal) {
569    m->signal = signal;
570    return m;
571  }
572  if (m) {
573    free(p);
574  }
575  if (signal) {
576    destroy_semaphore(&signal);
577  }
578#endif
579  return NULL;
580}
581
582void
583destroy_semaphore(void **s)
584{
585  if (*s) {
586#ifdef USE_POSIX_SEMAPHORES
587    sem_destroy((sem_t *)*s);
588#endif
589#ifdef USE_MACH_SEMAPHORES
590    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
591#endif
592    *s=NULL;
593  }
594}
595
596#ifdef WINDOWS
597void
598tsd_set(LispObj key, void *datum)
599{
600}
601
602void *
603tsd_get(LispObj key)
604{
605}
606#else
607void
608tsd_set(LispObj key, void *datum)
609{
610  pthread_setspecific((pthread_key_t)key, datum);
611}
612
613void *
614tsd_get(LispObj key)
615{
616  return pthread_getspecific((pthread_key_t)key);
617}
618#endif
619
620void
621dequeue_tcr(TCR *tcr)
622{
623  TCR *next, *prev;
624
625  next = tcr->next;
626  prev = tcr->prev;
627
628  prev->next = next;
629  next->prev = prev;
630  tcr->prev = tcr->next = NULL;
631#ifdef X8664
632  tcr->linear = NULL;
633#endif
634}
635 
636void
637enqueue_tcr(TCR *new)
638{
639  TCR *head, *tail;
640 
641  LOCK(lisp_global(TCR_AREA_LOCK),new);
642  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
643  tail = head->prev;
644  tail->next = new;
645  head->prev = new;
646  new->prev = tail;
647  new->next = head;
648  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
649}
650
651TCR *
652allocate_tcr()
653{
654  TCR *tcr, *chain = NULL, *next;
655#ifdef DARWIN
656  extern Boolean use_mach_exception_handling;
657  kern_return_t kret;
658  mach_port_t
659    thread_exception_port,
660    task_self = mach_task_self();
661#endif
662  for (;;) {
663    tcr = calloc(1, sizeof(TCR));
664#ifdef DARWIN
665#if WORD_SIZE == 64
666    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
667      tcr->next = chain;
668      chain = tcr;
669      continue;
670    }
671#endif
672    if (use_mach_exception_handling) {
673      thread_exception_port = (mach_port_t)((natural)tcr);
674      kret = mach_port_allocate_name(task_self,
675                                     MACH_PORT_RIGHT_RECEIVE,
676                                     thread_exception_port);
677    } else {
678      kret = KERN_SUCCESS;
679    }
680
681    if (kret != KERN_SUCCESS) {
682      tcr->next = chain;
683      chain = tcr;
684      continue;
685    }
686#endif
687    for (next = chain; next;) {
688      next = next->next;
689      free(chain);
690    }
691    return tcr;
692  }
693}
694
695#ifdef X8664
696#ifdef LINUX
697#include <asm/prctl.h>
698#include <sys/prctl.h>
699#endif
700#ifdef FREEBSD
701#include <machine/sysarch.h>
702#endif
703
704void
705setup_tcr_extra_segment(TCR *tcr)
706{
707#ifdef FREEBSD
708  amd64_set_gsbase(tcr);
709#endif
710#ifdef LINUX
711  arch_prctl(ARCH_SET_GS, (natural)tcr);
712#endif
713#ifdef DARWIN
714  /* There's no way to do this yet.  See DARWIN_GS_HACK */
715  /* darwin_set_x8664_fs_reg(tcr); */
716#endif
717}
718
719#endif
720
721
722
723/*
724  Caller must hold the area_lock.
725*/
726#ifdef WINDOWS
727TCR *
728new_tcr(natural vstack_size, natural tstack_size)
729{
730}
731#else
732TCR *
733new_tcr(natural vstack_size, natural tstack_size)
734{
735  extern area
736    *allocate_vstack_holding_area_lock(unsigned),
737    *allocate_tstack_holding_area_lock(unsigned);
738  area *a;
739  int i;
740  sigset_t sigmask;
741
742  sigemptyset(&sigmask);
743  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
744#ifdef HAVE_TLS
745  TCR *tcr = &current_tcr;
746#else
747  TCR *tcr = allocate_tcr();
748#endif
749
750#ifdef X8664
751  setup_tcr_extra_segment(tcr);
752  tcr->linear = tcr;
753#endif
754
755#if (WORD_SIZE == 64)
756  tcr->single_float_convert.tag = subtag_single_float;
757#endif
758  lisp_global(TCR_COUNT) += (1<<fixnumshift);
759  tcr->suspend = new_semaphore(0);
760  tcr->resume = new_semaphore(0);
761  tcr->reset_completion = new_semaphore(0);
762  tcr->activate = new_semaphore(0);
763  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
764  a = allocate_vstack_holding_area_lock(vstack_size);
765  tcr->vs_area = a;
766  a->owner = tcr;
767  tcr->save_vsp = (LispObj *) a->active; 
768  a = allocate_tstack_holding_area_lock(tstack_size);
769  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
770  tcr->ts_area = a;
771  a->owner = tcr;
772  tcr->save_tsp = (LispObj *) a->active;
773#ifdef X86
774  tcr->next_tsp = tcr->save_tsp;
775#endif
776
777  tcr->valence = TCR_STATE_FOREIGN;
778#ifdef PPC
779  tcr->lisp_fpscr.words.l = 0xd0;
780#endif
781#ifdef X86
782  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
783#if 1                           /* Mask underflow; too hard to
784                                   deal with denorms if underflow is
785                                   enabled */
786    (1 << MXCSR_UM_BIT) | 
787#endif
788    (1 << MXCSR_PM_BIT);
789#endif
790  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
791  tcr->tlb_limit = 2048<<fixnumshift;
792  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
793  for (i = 0; i < 2048; i++) {
794    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
795  }
796  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
797  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
798  return tcr;
799}
800#endif
801
802void
803shutdown_thread_tcr(void *arg)
804{
805  TCR *tcr = TCR_FROM_TSD(arg);
806
807  area *vs, *ts, *cs;
808  void *termination_semaphore;
809 
810  if (--(tcr->shutdown_count) == 0) {
811    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
812      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
813        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
814   
815      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
816      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
817      tsd_set(lisp_global(TCR_KEY), NULL);
818    }
819#ifdef DARWIN
820    darwin_exception_cleanup(tcr);
821#endif
822    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
823    vs = tcr->vs_area;
824    tcr->vs_area = NULL;
825    ts = tcr->ts_area;
826    tcr->ts_area = NULL;
827    cs = tcr->cs_area;
828    tcr->cs_area = NULL;
829    if (vs) {
830      condemn_area_holding_area_lock(vs);
831    }
832    if (ts) {
833      condemn_area_holding_area_lock(ts);
834    }
835    if (cs) {
836      condemn_area_holding_area_lock(cs);
837    }
838    destroy_semaphore(&tcr->suspend);
839    destroy_semaphore(&tcr->resume);
840    destroy_semaphore(&tcr->reset_completion);
841    destroy_semaphore(&tcr->activate);
842    free(tcr->tlb_pointer);
843    tcr->tlb_pointer = NULL;
844    tcr->tlb_limit = 0;
845    tcr->osid = 0;
846    tcr->interrupt_pending = 0;
847    termination_semaphore = tcr->termination_semaphore;
848    tcr->termination_semaphore = NULL;
849#ifdef HAVE_TLS
850    dequeue_tcr(tcr);
851#endif
852    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
853    if (termination_semaphore) {
854      SEM_RAISE(termination_semaphore);
855    }
856  } else {
857    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
858  }
859}
860
861void
862tcr_cleanup(void *arg)
863{
864  TCR *tcr = (TCR *)arg;
865  area *a;
866
867  a = tcr->vs_area;
868  if (a) {
869    a->active = a->high;
870  }
871  a = tcr->ts_area;
872  if (a) {
873    a->active = a->high;
874  }
875  a = tcr->cs_area;
876  if (a) {
877    a->active = a->high;
878  }
879  tcr->valence = TCR_STATE_FOREIGN;
880  tcr->shutdown_count = 1;
881  shutdown_thread_tcr(tcr);
882  tsd_set(lisp_global(TCR_KEY), NULL);
883}
884
885void *
886current_native_thread_id()
887{
888  return ((void *) (natural)
889#ifdef LINUX
890          getpid()
891#endif
892#ifdef DARWIN
893          mach_thread_self()
894#endif
895#ifdef FREEBSD
896          pthread_self()
897#endif
898#ifdef SOLARIS
899          pthread_self()
900#endif
901#ifdef WINDOWS
902          /* ThreadSelf() */ 23
903#endif
904          );
905}
906
907
908void
909thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
910{
911  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
912
913  tcr->osid = current_thread_osid();
914  tcr->native_thread_id = current_native_thread_id();
915  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
916  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
917  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
918  tcr->cs_area = a;
919  a->owner = tcr;
920  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
921    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
922  }
923#ifdef LINUX
924#ifdef PPC
925#ifndef PPC64
926  tcr->native_thread_info = current_r2;
927#endif
928#endif
929#endif
930  tcr->errno_loc = &errno;
931  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
932#ifdef DARWIN
933  extern Boolean use_mach_exception_handling;
934  if (use_mach_exception_handling) {
935    darwin_exception_init(tcr);
936  }
937#endif
938#ifdef LINUX
939  linux_exception_init(tcr);
940#endif
941  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
942}
943
944/*
945  Register the specified tcr as "belonging to" the current thread.
946  Under Darwin, setup Mach exception handling for the thread.
947  Install cleanup handlers for thread termination.
948*/
949void
950register_thread_tcr(TCR *tcr)
951{
952  void *stack_base = NULL;
953  natural stack_size = 0;
954
955  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
956  thread_init_tcr(tcr, stack_base, stack_size);
957  enqueue_tcr(tcr);
958}
959
960
961 
962 
963#ifndef MAP_GROWSDOWN
964#define MAP_GROWSDOWN 0
965#endif
966
967#ifdef WINDOWS
968Ptr
969create_stack(int size)
970{
971}
972#else
973Ptr
974create_stack(int size)
975{
976  Ptr p;
977  size=align_to_power_of_2(size, log2_page_size);
978  p = (Ptr) mmap(NULL,
979                     (size_t)size,
980                     PROT_READ | PROT_WRITE | PROT_EXEC,
981                     MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
982                     -1,        /* Darwin insists on this when not mmap()ing
983                                 a real fd */
984                     0);
985  if (p != (Ptr)(-1)) {
986    *((size_t *)p) = size;
987    return p;
988  }
989  allocation_failure(true, size);
990
991}
992#endif
993
994void *
995allocate_stack(unsigned size)
996{
997  return create_stack(size);
998}
999
1000#ifdef WINDOWS
1001void
1002free_stack(void *s)
1003{
1004}
1005#else
1006void
1007free_stack(void *s)
1008{
1009  size_t size = *((size_t *)s);
1010  munmap(s, size);
1011}
1012#endif
1013
1014Boolean threads_initialized = false;
1015
1016#ifndef USE_FUTEX
1017#ifdef WINDOWS
1018void
1019count_cpus()
1020{
1021}
1022#else
1023void
1024count_cpus()
1025{
1026#ifdef DARWIN
1027  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
1028#include <mach/host_info.h>
1029
1030  struct host_basic_info info;
1031  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1032 
1033  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
1034    if (info.max_cpus > 1) {
1035      spin_lock_tries = 1024;
1036    }
1037  }
1038#else
1039  int n = sysconf(_SC_NPROCESSORS_ONLN);
1040 
1041  if (n > 1) {
1042    spin_lock_tries = 1024;
1043  }
1044#endif
1045}
1046#endif
1047#endif
1048
1049#ifdef WINDOWS
1050void
1051init_threads(void * stack_base, TCR *tcr)
1052{
1053}
1054void *
1055lisp_thread_entry(void *param)
1056{
1057}
1058#else
1059void
1060init_threads(void * stack_base, TCR *tcr)
1061{
1062  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
1063  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
1064  thread_signal_setup();
1065
1066#ifndef USE_FUTEX
1067  count_cpus();
1068#endif
1069  threads_initialized = true;
1070}
1071
1072
1073void *
1074lisp_thread_entry(void *param)
1075{
1076  thread_activation *activation = (thread_activation *)param;
1077  TCR *tcr = new_tcr(activation->vsize, activation->vsize);
1078  sigset_t mask, old_mask;
1079
1080  sigemptyset(&mask);
1081  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1082
1083  register_thread_tcr(tcr);
1084
1085  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1086  tcr->vs_area->active -= node_size;
1087  *(--tcr->save_vsp) = lisp_nil;
1088  enable_fp_exceptions();
1089  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1090  activation->tcr = tcr;
1091  SEM_RAISE(activation->created);
1092  do {
1093    SEM_RAISE(tcr->reset_completion);
1094    SEM_WAIT_FOREVER(tcr->activate);
1095    /* Now go run some lisp code */
1096    start_lisp(TCR_TO_TSD(tcr),0);
1097  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1098  pthread_cleanup_pop(true);
1099
1100}
1101#endif
1102
1103void *
1104xNewThread(natural control_stack_size,
1105           natural value_stack_size,
1106           natural temp_stack_size)
1107
1108{
1109  thread_activation activation;
1110  TCR *current = get_tcr(false);
1111
1112
1113  activation.tsize = temp_stack_size;
1114  activation.vsize = value_stack_size;
1115  activation.tcr = 0;
1116  activation.created = new_semaphore(0);
1117  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1118                           NULL, 
1119                           lisp_thread_entry,
1120                           (void *) &activation)) {
1121   
1122    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1123  }
1124  destroy_semaphore(&activation.created); 
1125  return TCR_TO_TSD(activation.tcr);
1126}
1127
1128Boolean
1129active_tcr_p(TCR *q)
1130{
1131  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1132 
1133  do {
1134    if (p == q) {
1135      return true;
1136    }
1137    p = p->next;
1138  } while (p != head);
1139  return false;
1140}
1141
1142#ifdef WINDOWS
1143OSErr
1144xDisposeThread(TCR *tcr)
1145{
1146}
1147#else
1148OSErr
1149xDisposeThread(TCR *tcr)
1150{
1151  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1152    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1153      pthread_cancel((pthread_t)(tcr->osid));
1154      return 0;
1155    }
1156  }
1157  return -50;
1158}
1159#endif
1160
1161OSErr
1162xYieldToThread(TCR *target)
1163{
1164  Bug(NULL, "xYieldToThread ?");
1165  return 0;
1166}
1167 
1168OSErr
1169xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1170{
1171  Bug(NULL, "xThreadCurrentStackSpace ?");
1172  return 0;
1173}
1174
1175
1176#ifdef WINDOWS
1177LispObj
1178create_system_thread(size_t stack_size,
1179                     void* stackaddr,
1180                     void* (*start_routine)(void *),
1181                     void* param)
1182{
1183}
1184#else
1185LispObj
1186create_system_thread(size_t stack_size,
1187                     void* stackaddr,
1188                     void* (*start_routine)(void *),
1189                     void* param)
1190{
1191  pthread_attr_t attr;
1192  pthread_t returned_thread = (pthread_t) 0;
1193
1194  pthread_attr_init(&attr);
1195  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1196
1197  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1198    stack_size = PTHREAD_STACK_MIN;
1199  }
1200
1201  stack_size = ensure_stack_limit(stack_size);
1202  if (stackaddr != NULL) {
1203    /* Size must have been specified.  Sort of makes sense ... */
1204#ifdef DARWIN
1205    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1206#else
1207    pthread_attr_setstack(&attr, stackaddr, stack_size);
1208#endif
1209  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1210    pthread_attr_setstacksize(&attr,stack_size);
1211  }
1212
1213  /*
1214     I think that's just about enough ... create the thread.
1215  */
1216  pthread_create(&returned_thread, &attr, start_routine, param);
1217  return (LispObj) ptr_to_lispobj(returned_thread);
1218}
1219#endif
1220
1221TCR *
1222get_tcr(Boolean create)
1223{
1224#ifdef HAVE_TLS
1225  TCR *current = current_tcr.linear;
1226#else
1227  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1228  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1229#endif
1230
1231  if ((current == NULL) && create) {
1232    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1233      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1234    int i, nbindwords = 0;
1235    extern unsigned initial_stack_size;
1236   
1237    /* Make one. */
1238    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1239    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1240    register_thread_tcr(current);
1241#ifdef DEBUG_TCR_CREATION
1242#ifndef WINDOWS
1243    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1244#endif
1245#endif
1246    current->vs_area->active -= node_size;
1247    *(--current->save_vsp) = lisp_nil;
1248#ifdef PPC
1249#define NSAVEREGS 8
1250#endif
1251#ifdef X8664
1252#define NSAVEREGS 4
1253#endif
1254    for (i = 0; i < NSAVEREGS; i++) {
1255      *(--current->save_vsp) = 0;
1256      current->vs_area->active -= node_size;
1257    }
1258    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1259    for (i = 0; i < nbindwords; i++) {
1260      *(--current->save_vsp) = 0;
1261      current->vs_area->active -= node_size;
1262    }
1263    current->shutdown_count = 1;
1264    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1265
1266  }
1267 
1268  return current;
1269}
1270
1271#ifdef WINDOWS
1272Boolean
1273suspend_tcr(TCR *tcr)
1274{
1275}
1276#else
1277Boolean
1278suspend_tcr(TCR *tcr)
1279{
1280  int suspend_count = atomic_incf(&(tcr->suspend_count));
1281  if (suspend_count == 1) {
1282#if SUSPEND_RESUME_VERBOSE
1283    fprintf(stderr,"Suspending 0x%x\n", tcr);
1284#endif
1285#ifdef DARWIN_nope
1286    if (mach_suspend_tcr(tcr)) {
1287      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1288      return true;
1289    }
1290#endif
1291    if (pthread_kill((pthread_t)(tcr->osid), thread_suspend_signal) == 0) {
1292      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1293    } else {
1294      /* A problem using pthread_kill.  On Darwin, this can happen
1295         if the thread has had its signal mask surgically removed
1296         by pthread_exit.  If the native (Mach) thread can be suspended,
1297         do that and return true; otherwise, flag the tcr as belonging
1298         to a dead thread by setting tcr->osid to 0.
1299      */
1300      tcr->osid = 0;
1301      return false;
1302    }
1303    return true;
1304  }
1305  return false;
1306}
1307#endif
1308
1309Boolean
1310tcr_suspend_ack(TCR *tcr)
1311{
1312  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1313    SEM_WAIT_FOREVER(tcr->suspend);
1314    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1315#if SUSPEND_RESUME_VERBOSE
1316    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1317#endif
1318
1319  }
1320  return true;
1321}
1322
1323     
1324
1325
1326Boolean
1327lisp_suspend_tcr(TCR *tcr)
1328{
1329  Boolean suspended;
1330  TCR *current = get_tcr(true);
1331 
1332  LOCK(lisp_global(TCR_AREA_LOCK),current);
1333#ifdef DARWIN
1334#if USE_MACH_EXCEPTION_LOCK
1335  if (use_mach_exception_handling) {
1336    pthread_mutex_lock(mach_exception_lock);
1337  }
1338#endif
1339#endif
1340  suspended = suspend_tcr(tcr);
1341  if (suspended) {
1342    while (!tcr_suspend_ack(tcr));
1343  }
1344#ifdef DARWIN
1345#if USE_MACH_EXCEPTION_LOCK
1346  if (use_mach_exception_handling) {
1347    pthread_mutex_unlock(mach_exception_lock);
1348  }
1349#endif
1350#endif
1351  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1352  return suspended;
1353}
1354         
1355
1356Boolean
1357resume_tcr(TCR *tcr)
1358{
1359  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
1360  if (suspend_count == 0) {
1361#ifdef DARWIN
1362    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1363#if SUSPEND_RESUME_VERBOSE
1364    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1365#endif
1366      mach_resume_tcr(tcr);
1367      return true;
1368    }
1369#endif
1370#if RESUME_VIA_RESUME_SEMAPHORE
1371    SEM_RAISE(tcr->resume);
1372#else
1373    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1374      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1375    }
1376#endif
1377#if SUSPEND_RESUME_VERBOSE
1378    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1379#endif
1380    return true;
1381  }
1382  return false;
1383}
1384
1385void
1386wait_for_resumption(TCR *tcr)
1387{
1388  if (tcr->suspend_count == 0) {
1389#ifdef DARWIN
1390    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1391      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1392      return;
1393  }
1394#endif
1395#if WAIT_FOR_RESUME_ACK
1396#if SUSPEND_RESUME_VERBOSE
1397    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1398#endif
1399    SEM_WAIT_FOREVER(tcr->suspend);
1400#endif
1401  }
1402}
1403   
1404
1405
1406Boolean
1407lisp_resume_tcr(TCR *tcr)
1408{
1409  Boolean resumed;
1410  TCR *current = get_tcr(true);
1411 
1412  LOCK(lisp_global(TCR_AREA_LOCK),current);
1413  resumed = resume_tcr(tcr);
1414  wait_for_resumption(tcr);
1415  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1416  return resumed;
1417}
1418
1419
1420TCR *freed_tcrs = NULL;
1421
1422void
1423enqueue_freed_tcr (TCR *tcr)
1424{
1425#ifndef HAVE_TLS
1426  tcr->next = freed_tcrs;
1427  freed_tcrs = tcr;
1428#endif
1429}
1430
1431/* It's not clear that we can safely condemn a dead tcr's areas, since
1432   we may not be able to call free() if a suspended thread owns a
1433   malloc lock. At least make the areas appear to be empty.
1434*/
1435   
1436
1437void
1438normalize_dead_tcr_areas(TCR *tcr)
1439{
1440  area *a;
1441
1442  a = tcr->vs_area;
1443  if (a) {
1444    a->active = a->high;
1445  }
1446
1447  a = tcr->ts_area;
1448  if (a) {
1449    a->active = a->high;
1450  }
1451
1452  a = tcr->cs_area;
1453  if (a) {
1454    a->active = a->high;
1455  }
1456}
1457   
1458void
1459free_freed_tcrs ()
1460{
1461  TCR *current, *next;
1462
1463  for (current = freed_tcrs; current; current = next) {
1464    next = current->next;
1465#ifndef HAVE_TLS
1466    free(current);
1467#endif
1468  }
1469  freed_tcrs = NULL;
1470}
1471
1472void
1473suspend_other_threads(Boolean for_gc)
1474{
1475  TCR *current = get_tcr(true), *other, *next;
1476  int dead_tcr_count = 0;
1477  Boolean all_acked;
1478
1479  LOCK(lisp_global(TCR_AREA_LOCK), current);
1480#ifdef DARWIN
1481#if USE_MACH_EXCEPTION_LOCK
1482  if (for_gc && use_mach_exception_handling) {
1483#if SUSPEND_RESUME_VERBOSE
1484    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1485#endif
1486    pthread_mutex_lock(mach_exception_lock);
1487  }
1488#endif
1489#endif
1490  for (other = current->next; other != current; other = other->next) {
1491    if ((other->osid != 0)) {
1492      suspend_tcr(other);
1493      if (other->osid == 0) {
1494        dead_tcr_count++;
1495      }
1496    } else {
1497      dead_tcr_count++;
1498    }
1499  }
1500
1501  do {
1502    all_acked = true;
1503    for (other = current->next; other != current; other = other->next) {
1504      if ((other->osid != 0)) {
1505        if (!tcr_suspend_ack(other)) {
1506          all_acked = false;
1507        }
1508      }
1509    }
1510  } while(! all_acked);
1511
1512     
1513
1514  /* All other threads are suspended; can safely delete dead tcrs now */
1515  if (dead_tcr_count) {
1516    for (other = current->next; other != current; other = next) {
1517      next = other->next;
1518      if ((other->osid == 0))  {
1519        normalize_dead_tcr_areas(other);
1520        dequeue_tcr(other);
1521        enqueue_freed_tcr(other);
1522      }
1523    }
1524  }
1525}
1526
1527void
1528lisp_suspend_other_threads()
1529{
1530  suspend_other_threads(false);
1531}
1532
1533void
1534resume_other_threads(Boolean for_gc)
1535{
1536  TCR *current = get_tcr(true), *other;
1537  for (other = current->next; other != current; other = other->next) {
1538    if ((other->osid != 0)) {
1539      resume_tcr(other);
1540    }
1541  }
1542  for (other = current->next; other != current; other = other->next) {
1543    if ((other->osid != 0)) {
1544      wait_for_resumption(other);
1545    }
1546  }
1547  free_freed_tcrs();
1548#ifdef DARWIN
1549#if USE_MACH_EXCEPTION_LOCK
1550  if (for_gc && use_mach_exception_handling) {
1551#if SUSPEND_RESUME_VERBOSE
1552    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1553#endif
1554    pthread_mutex_unlock(mach_exception_lock);
1555  }
1556#endif
1557#endif
1558
1559  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1560}
1561
1562void
1563lisp_resume_other_threads()
1564{
1565  resume_other_threads(false);
1566}
1567
1568
1569
1570rwlock *
1571rwlock_new()
1572{
1573  extern int cache_block_size;
1574
1575  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1576  rwlock *rw;
1577 
1578  if (p) {
1579    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1580    rw->malloced_ptr = p;
1581#ifndef USE_FUTEX
1582    rw->reader_signal = new_semaphore(0);
1583    rw->writer_signal = new_semaphore(0);
1584    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1585      if (rw->reader_signal) {
1586        destroy_semaphore(&(rw->reader_signal));
1587      } else {
1588        destroy_semaphore(&(rw->writer_signal));
1589      }
1590      free(rw);
1591      rw = NULL;
1592    }
1593#endif
1594  }
1595  return rw;
1596}
1597
1598     
1599/*
1600  Try to get read access to a multiple-readers/single-writer lock.  If
1601  we already have read access, return success (indicating that the
1602  lock is held another time.  If we already have write access to the
1603  lock ... that won't work; return EDEADLK.  Wait until no other
1604  thread has or is waiting for write access, then indicate that we
1605  hold read access once.
1606*/
1607#ifndef USE_FUTEX
1608int
1609rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1610{
1611  int err = 0;
1612 
1613  LOCK_SPINLOCK(rw->spin, tcr);
1614
1615  if (rw->writer == tcr) {
1616    RELEASE_SPINLOCK(rw->spin);
1617    return EDEADLK;
1618  }
1619
1620  while (rw->blocked_writers || (rw->state > 0)) {
1621    rw->blocked_readers++;
1622    RELEASE_SPINLOCK(rw->spin);
1623    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1624    LOCK_SPINLOCK(rw->spin,tcr);
1625    rw->blocked_readers--;
1626    if (err == EINTR) {
1627      err = 0;
1628    }
1629    if (err) {
1630      RELEASE_SPINLOCK(rw->spin);
1631      return err;
1632    }
1633  }
1634  rw->state--;
1635  RELEASE_SPINLOCK(rw->spin);
1636  return err;
1637}
1638#else
1639int
1640rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1641{
1642  natural waitval;
1643
1644  lock_futex(&rw->spin);
1645
1646  if (rw->writer == tcr) {
1647    unlock_futex(&rw->spin);
1648    return EDEADLOCK;
1649  }
1650  while (1) {
1651    if (rw->writer == NULL) {
1652      --rw->state;
1653      unlock_futex(&rw->spin);
1654      return 0;
1655    }
1656    rw->blocked_readers++;
1657    waitval = rw->reader_signal;
1658    unlock_futex(&rw->spin);
1659    futex_wait(&rw->reader_signal,waitval);
1660    lock_futex(&rw->spin);
1661    rw->blocked_readers--;
1662  }
1663  return 0;
1664}
1665#endif   
1666
1667
1668/*
1669  Try to obtain write access to the lock.
1670  It is an error if we already have read access, but it's hard to
1671  detect that.
1672  If we already have write access, increment the count that indicates
1673  that.
1674  Otherwise, wait until the lock is not held for reading or writing,
1675  then assert write access.
1676*/
1677
1678#ifndef USE_FUTEX
1679int
1680rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1681{
1682  int err = 0;
1683
1684  LOCK_SPINLOCK(rw->spin,tcr);
1685  if (rw->writer == tcr) {
1686    rw->state++;
1687    RELEASE_SPINLOCK(rw->spin);
1688    return 0;
1689  }
1690
1691  while (rw->state != 0) {
1692    rw->blocked_writers++;
1693    RELEASE_SPINLOCK(rw->spin);
1694    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1695    LOCK_SPINLOCK(rw->spin,tcr);
1696    rw->blocked_writers--;
1697    if (err = EINTR) {
1698      err = 0;
1699    }
1700    if (err) {
1701      RELEASE_SPINLOCK(rw->spin);
1702      return err;
1703    }
1704  }
1705  rw->state = 1;
1706  rw->writer = tcr;
1707  RELEASE_SPINLOCK(rw->spin);
1708  return err;
1709}
1710
1711#else
1712int
1713rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1714{
1715  int err = 0;
1716  natural waitval;
1717
1718  lock_futex(&rw->spin);
1719  if (rw->writer == tcr) {
1720    rw->state++;
1721    unlock_futex(&rw->spin);
1722    return 0;
1723  }
1724
1725  while (rw->state != 0) {
1726    rw->blocked_writers++;
1727    waitval = rw->writer_signal;
1728    unlock_futex(&rw->spin);
1729    futex_wait(&rw->writer_signal,waitval);
1730    lock_futex(&rw->spin);
1731    rw->blocked_writers--;
1732  }
1733  rw->state = 1;
1734  rw->writer = tcr;
1735  unlock_futex(&rw->spin);
1736  return err;
1737}
1738#endif
1739
1740/*
1741  Sort of the same as above, only return EBUSY if we'd have to wait.
1742*/
1743#ifndef USE_FUTEX
1744int
1745rwlock_try_wlock(rwlock *rw, TCR *tcr)
1746{
1747  int ret = EBUSY;
1748
1749  LOCK_SPINLOCK(rw->spin,tcr);
1750  if (rw->writer == tcr) {
1751    rw->state++;
1752    ret = 0;
1753  } else {
1754    if (rw->state == 0) {
1755      rw->writer = tcr;
1756      rw->state = 1;
1757      ret = 0;
1758    }
1759  }
1760  RELEASE_SPINLOCK(rw->spin);
1761  return ret;
1762}
1763#else
1764int
1765rwlock_try_wlock(rwlock *rw, TCR *tcr)
1766{
1767  int ret = EBUSY;
1768
1769  lock_futex(&rw->spin);
1770  if (rw->writer == tcr) {
1771    rw->state++;
1772    ret = 0;
1773  } else {
1774    if (rw->state == 0) {
1775      rw->writer = tcr;
1776      rw->state = 1;
1777      ret = 0;
1778    }
1779  }
1780  unlock_futex(&rw->spin);
1781  return ret;
1782}
1783#endif
1784
1785#ifndef USE_FUTEX
1786int
1787rwlock_try_rlock(rwlock *rw, TCR *tcr)
1788{
1789  int ret = EBUSY;
1790
1791  LOCK_SPINLOCK(rw->spin,tcr);
1792  if (rw->state <= 0) {
1793    --rw->state;
1794    ret = 0;
1795  }
1796  RELEASE_SPINLOCK(rw->spin);
1797  return ret;
1798}
1799#else
1800int
1801rwlock_try_rlock(rwlock *rw, TCR *tcr)
1802{
1803  int ret = EBUSY;
1804
1805  lock_futex(&rw->spin);
1806  if (rw->state <= 0) {
1807    --rw->state;
1808    ret = 0;
1809  }
1810  unlock_futex(&rw->spin);
1811  return ret;
1812}
1813#endif
1814
1815
1816
1817#ifndef USE_FUTEX
1818int
1819rwlock_unlock(rwlock *rw, TCR *tcr)
1820{
1821
1822  int err = 0;
1823  natural blocked_readers = 0;
1824
1825  LOCK_SPINLOCK(rw->spin,tcr);
1826  if (rw->state > 0) {
1827    if (rw->writer != tcr) {
1828      err = EINVAL;
1829    } else {
1830      --rw->state;
1831      if (rw->state == 0) {
1832        rw->writer = NULL;
1833      }
1834    }
1835  } else {
1836    if (rw->state < 0) {
1837      ++rw->state;
1838    } else {
1839      err = EINVAL;
1840    }
1841  }
1842  if (err) {
1843    RELEASE_SPINLOCK(rw->spin);
1844    return err;
1845  }
1846 
1847  if (rw->state == 0) {
1848    if (rw->blocked_writers) {
1849      SEM_RAISE(rw->writer_signal);
1850    } else {
1851      blocked_readers = rw->blocked_readers;
1852      if (blocked_readers) {
1853        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1854      }
1855    }
1856  }
1857  RELEASE_SPINLOCK(rw->spin);
1858  return 0;
1859}
1860#else
1861int
1862rwlock_unlock(rwlock *rw, TCR *tcr)
1863{
1864
1865  int err = 0;
1866
1867  lock_futex(&rw->spin);
1868  if (rw->state > 0) {
1869    if (rw->writer != tcr) {
1870      err = EINVAL;
1871    } else {
1872      --rw->state;
1873      if (rw->state == 0) {
1874        rw->writer = NULL;
1875      }
1876    }
1877  } else {
1878    if (rw->state < 0) {
1879      ++rw->state;
1880    } else {
1881      err = EINVAL;
1882    }
1883  }
1884  if (err) {
1885    unlock_futex(&rw->spin);
1886    return err;
1887  }
1888 
1889  if (rw->state == 0) {
1890    if (rw->blocked_writers) {
1891      ++rw->writer_signal;
1892      unlock_futex(&rw->spin);
1893      futex_wake(&rw->writer_signal,1);
1894      return 0;
1895    }
1896    if (rw->blocked_readers) {
1897      ++rw->reader_signal;
1898      unlock_futex(&rw->spin);
1899      futex_wake(&rw->reader_signal, INT_MAX);
1900      return 0;
1901    }
1902  }
1903  unlock_futex(&rw->spin);
1904  return 0;
1905}
1906#endif
1907
1908       
1909void
1910rwlock_destroy(rwlock *rw)
1911{
1912#ifndef USE_FUTEX
1913  destroy_semaphore((void **)&rw->reader_signal);
1914  destroy_semaphore((void **)&rw->writer_signal);
1915#endif
1916  postGCfree((void *)(rw->malloced_ptr));
1917}
1918
1919
1920
Note: See TracBrowser for help on using the repository browser.