source: branches/win64/lisp-kernel/thread_manager.c @ 9297

Last change on this file since 9297 was 9297, checked in by gb, 13 years ago

Call tcr_cleanup() with the tcr as argument.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.5 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46#ifdef USE_FUTEX
47#define futex_wait(futex,val) syscall(SYS_futex,futex,FUTEX_WAIT,val)
48#define futex_wake(futex,n) syscall(SYS_futex,futex,FUTEX_WAKE,n)
49#define FUTEX_AVAIL (0)
50#define FUTEX_LOCKED (1)
51#define FUTEX_CONTENDED (2)
52#endif
53
54#ifdef WINDOWS
55int
56raise_thread_interrupt(TCR *target)
57{
58}
59#else
60int
61raise_thread_interrupt(TCR *target)
62{
63#ifdef DARWIN_not_yet
64  if (use_mach_exception_handling) {
65    return mach_raise_thread_interrupt(target);
66  }
67#endif
68 return pthread_kill((pthread_t)target->osid, SIGNAL_FOR_PROCESS_INTERRUPT);
69}
70#endif
71
72signed_natural
73atomic_incf_by(signed_natural *ptr, signed_natural by)
74{
75  signed_natural old, new;
76  do {
77    old = *ptr;
78    new = old+by;
79  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
80           (natural) old);
81  return new;
82}
83
84signed_natural
85atomic_incf(signed_natural *ptr)
86{
87  return atomic_incf_by(ptr, 1);
88}
89
90signed_natural
91atomic_decf(signed_natural *ptr)
92{
93  signed_natural old, new;
94  do {
95    old = *ptr;
96    new = old == 0 ? old : old-1;
97  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
98           (natural) old);
99  return old-1;
100}
101
102
103#ifndef USE_FUTEX
104int spin_lock_tries = 1;
105
106void
107get_spin_lock(signed_natural *p, TCR *tcr)
108{
109  int i, n = spin_lock_tries;
110 
111  while (1) {
112    for (i = 0; i < n; i++) {
113      if (atomic_swap(p,(signed_natural)tcr) == 0) {
114        return;
115      }
116    }
117#ifndef WINDOWS
118    sched_yield();
119#else
120    SwitchToThread();
121#endif
122  }
123}
124#endif
125
126#ifndef USE_FUTEX
127int
128lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
129{
130
131  if (tcr == NULL) {
132    tcr = get_tcr(true);
133  }
134  if (m->owner == tcr) {
135    m->count++;
136    return 0;
137  }
138  while (1) {
139    LOCK_SPINLOCK(m->spinlock,tcr);
140    ++m->avail;
141    if (m->avail == 1) {
142      m->owner = tcr;
143      m->count = 1;
144      RELEASE_SPINLOCK(m->spinlock);
145      break;
146    }
147    RELEASE_SPINLOCK(m->spinlock);
148    SEM_WAIT_FOREVER(m->signal);
149  }
150  return 0;
151}
152
153#else /* USE_FUTEX */
154
155static void inline
156lock_futex(natural *p)
157{
158 
159  while (1) {
160    if (store_conditional(p,FUTEX_AVAIL,FUTEX_LOCKED) == FUTEX_AVAIL) {
161      return;
162    }
163    while (1) {
164      if (atomic_swap(p,FUTEX_CONTENDED) == FUTEX_AVAIL) {
165        return;
166      }
167      futex_wait(p,FUTEX_CONTENDED);
168    }
169  }
170}
171
172static void inline
173unlock_futex(natural *p)
174{
175  if (atomic_decf(p) != FUTEX_AVAIL) {
176    *p = FUTEX_AVAIL;
177    futex_wake(p,INT_MAX);
178  }
179}
180   
181int
182lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
183{
184  natural val;
185  if (tcr == NULL) {
186    tcr = get_tcr(true);
187  }
188  if (m->owner == tcr) {
189    m->count++;
190    return 0;
191  }
192  lock_futex(&m->avail);
193  m->owner = tcr;
194  m->count = 1;
195  return 0;
196}
197#endif /* USE_FUTEX */
198
199
200#ifndef USE_FUTEX 
201int
202unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
203{
204  int ret = EPERM, pending;
205
206  if (tcr == NULL) {
207    tcr = get_tcr(true);
208  }
209
210  if (m->owner == tcr) {
211    --m->count;
212    if (m->count == 0) {
213      LOCK_SPINLOCK(m->spinlock,tcr);
214      m->owner = NULL;
215      pending = m->avail-1 + m->waiting;     /* Don't count us */
216      m->avail = 0;
217      --pending;
218      if (pending > 0) {
219        m->waiting = pending;
220      } else {
221        m->waiting = 0;
222      }
223      RELEASE_SPINLOCK(m->spinlock);
224      if (pending >= 0) {
225        SEM_RAISE(m->signal);
226      }
227    }
228    ret = 0;
229  }
230  return ret;
231}
232#else /* USE_FUTEX */
233int
234unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
235{
236  int ret = EPERM, pending;
237
238   if (tcr == NULL) {
239    tcr = get_tcr(true);
240  }
241
242  if (m->owner == tcr) {
243    --m->count;
244    if (m->count == 0) {
245      m->owner = NULL;
246      unlock_futex(&m->avail);
247    }
248    ret = 0;
249  }
250  return ret;
251}
252#endif /* USE_FUTEX */
253
254void
255destroy_recursive_lock(RECURSIVE_LOCK m)
256{
257#ifndef USE_FUTEX
258  destroy_semaphore((void **)&m->signal);
259#endif
260  postGCfree((void *)(m->malloced_ptr));
261}
262
263/*
264  If we're already the owner (or if the lock is free), lock it
265  and increment the lock count; otherwise, return EBUSY without
266  waiting.
267*/
268
269#ifndef USE_FUTEX
270int
271recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
272{
273  TCR *owner = m->owner;
274
275  LOCK_SPINLOCK(m->spinlock,tcr);
276  if (owner == tcr) {
277    m->count++;
278    if (was_free) {
279      *was_free = 0;
280      RELEASE_SPINLOCK(m->spinlock);
281      return 0;
282    }
283  }
284  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
285    m->owner = tcr;
286    m->count = 1;
287    if (was_free) {
288      *was_free = 1;
289    }
290    RELEASE_SPINLOCK(m->spinlock);
291    return 0;
292  }
293
294  RELEASE_SPINLOCK(m->spinlock);
295  return EBUSY;
296}
297#else
298int
299recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
300{
301  TCR *owner = m->owner;
302
303  if (owner == tcr) {
304    m->count++;
305    if (was_free) {
306      *was_free = 0;
307      return 0;
308    }
309  }
310  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
311    m->owner = tcr;
312    m->count = 1;
313    if (was_free) {
314      *was_free = 1;
315    }
316    return 0;
317  }
318
319  return EBUSY;
320}
321#endif
322
323void
324sem_wait_forever(SEMAPHORE s)
325{
326  int status;
327
328  do {
329#ifdef USE_MACH_SEMAPHORES
330    mach_timespec_t q = {1,0};
331    status = SEM_TIMEDWAIT(s,q);
332#endif
333#ifdef USE_POSIX_SEMAPHORES
334    struct timespec q;
335    gettimeofday((struct timeval *)&q, NULL);
336    q.tv_sec += 1;
337    status = SEM_TIMEDWAIT(s,&q);
338#endif
339#ifdef USE_WINDOWS_SEMAPHORES
340    status = (WaitForSingleObject(s,1000L) == WAIT_TIMEOUT) ? 1 : 0;
341#endif
342  } while (status != 0);
343}
344
345int
346wait_on_semaphore(void *s, int seconds, int millis)
347{
348#ifdef USE_POSIX_SEMAPHORES
349  int nanos = (millis % 1000) * 1000000;
350  int status;
351
352  struct timespec q;
353  gettimeofday((struct timeval *)&q, NULL);
354  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
355   
356  q.tv_nsec += nanos;
357  if (q.tv_nsec >= 1000000000L) {
358    q.tv_nsec -= 1000000000L;
359    seconds += 1;
360  }
361  q.tv_sec += seconds;
362  status = SEM_TIMEDWAIT(s, &q);
363  if (status < 0) {
364    return errno;
365  }
366  return status;
367#endif
368#ifdef USE_MACH_SEMAPHORES
369  int nanos = (millis % 1000) * 1000000;
370  mach_timespec_t q = {seconds, nanos};
371  int status = SEM_TIMEDWAIT(s, q);
372
373 
374  switch (status) {
375  case 0: return 0;
376  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
377  case KERN_ABORTED: return EINTR;
378  default: return EINVAL;
379  }
380#endif
381#ifdef USE_WINDOWS_SEMAPHORES
382  switch (WaitForSingleObject(s, seconds*1000L+(DWORD)millis)) {
383  case WAIT_OBJECT_0:
384    return 0;
385  case WAIT_TIMEOUT:
386    return WAIT_TIMEOUT;
387  default:
388    break;
389  }
390  return EINVAL;
391
392#endif
393}
394
395
396int
397semaphore_maybe_timedwait(void *s, struct timespec *t)
398{
399  if (t) {
400    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
401  }
402  SEM_WAIT_FOREVER(s);
403  return 0;
404}
405
406void
407signal_semaphore(SEMAPHORE s)
408{
409  SEM_RAISE(s);
410}
411
412 
413#ifdef WINDOWS
414LispObj
415current_thread_osid()
416{
417  TCR *tcr = get_tcr(false);
418  LispObj current = 0;
419
420  if (tcr) {
421    current = tcr->osid;
422  }
423  if (current == 0) {
424    DuplicateHandle(GetCurrentProcess(),
425                    GetCurrentThread(),
426                    GetCurrentProcess(),
427                    &current,
428                    0,
429                    FALSE,
430                    DUPLICATE_SAME_ACCESS);
431    if (tcr) {
432      tcr->osid = current;
433    }
434  }
435  return current;
436}
437#else
438LispObj
439current_thread_osid()
440{
441  return (LispObj)ptr_to_lispobj(pthread_self());
442}
443#endif
444
445
446int thread_suspend_signal = 0, thread_resume_signal = 0;
447
448
449
450void
451linux_exception_init(TCR *tcr)
452{
453}
454
455
456TCR *
457get_interrupt_tcr(Boolean create)
458{
459  return get_tcr(create);
460}
461 
462  void
463suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
464{
465#ifdef DARWIN_GS_HACK
466  Boolean gs_was_tcr = ensure_gs_pthread();
467#endif
468  TCR *tcr = get_interrupt_tcr(false);
469
470  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
471    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
472  } else {
473    if (signo == thread_suspend_signal) {
474#if 0
475      sigset_t wait_for;
476#endif
477
478      tcr->suspend_context = context;
479#if 0
480      sigfillset(&wait_for);
481#endif
482      SEM_RAISE(tcr->suspend);
483#if 0
484      sigdelset(&wait_for, thread_resume_signal);
485#endif
486#if 1
487#if RESUME_VIA_RESUME_SEMAPHORE
488      SEM_WAIT_FOREVER(tcr->resume);
489#if SUSPEND_RESUME_VERBOSE
490      fprintf(stderr, "got  resume in 0x%x\n",tcr);
491#endif
492      tcr->suspend_context = NULL;
493#else
494      sigsuspend(&wait_for);
495#endif
496#else
497    do {
498      sigsuspend(&wait_for);
499    } while (tcr->suspend_context);
500#endif 
501    } else {
502      tcr->suspend_context = NULL;
503#if SUSEPEND_RESUME_VERBOSE
504      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
505#endif
506    }
507#if WAIT_FOR_RESUME_ACK
508    SEM_RAISE(tcr->suspend);
509#endif
510  }
511#ifdef DARWIN_GS_HACK
512  if (gs_was_tcr) {
513    set_gs_address(tcr);
514  }
515#endif
516#ifdef DARWIN
517  DarwinSigReturn(context);
518#endif
519#ifdef FREEBSD
520  freebsd_sigreturn(context);
521#endif
522}
523
524 
525
526/*
527  'base' should be set to the bottom (origin) of the stack, e.g., the
528  end from which it grows.
529*/
530 
531#ifdef WINDOWS
532void
533os_get_current_thread_stack_bounds(void **base, natural *size)
534{
535  natural natbase;
536  MEMORY_BASIC_INFORMATION info;
537 
538  VirtualQuery((void *)current_stack_pointer, &info, sizeof(info));
539  natbase = (natural)info.BaseAddress+info.RegionSize;
540  *size = natbase - (natural)(info.AllocationBase);
541  *base = (void *)natbase;
542}
543#else
544void
545os_get_current_thread_stack_bounds(void **base, natural *size)
546{
547  pthread_t p = pthread_self();
548#ifdef DARWIN
549  *base = pthread_get_stackaddr_np(p);
550  *size = pthread_get_stacksize_np(p);
551#endif
552#ifdef LINUX
553  pthread_attr_t attr;
554
555  pthread_getattr_np(p,&attr);
556  pthread_attr_getstack(&attr, base, size);
557  *(natural *)base += *size;
558#endif
559#ifdef FREEBSD
560  pthread_attr_t attr;
561  void * temp_base;
562  size_t temp_size;
563 
564
565  pthread_attr_init(&attr); 
566  pthread_attr_get_np(p, &attr);
567  pthread_attr_getstackaddr(&attr,&temp_base);
568  pthread_attr_getstacksize(&attr,&temp_size);
569  *base = (void *)((natural)temp_base + temp_size);
570  *size = temp_size;
571#endif
572
573}
574#endif
575
576void *
577new_semaphore(int count)
578{
579#ifdef USE_POSIX_SEMAPHORES
580  sem_t *s = malloc(sizeof(sem_t));
581  sem_init(s, 0, count);
582  return s;
583#endif
584#ifdef USE_MACH_SEMAPHORES
585  semaphore_t s = (semaphore_t)0;
586  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
587  return (void *)(natural)s;
588#endif
589#ifdef USE_WINDOWS_SEMAPHORES
590  return CreateSemaphore(NULL, count, 0x7fffL, NULL);
591#endif
592}
593
594RECURSIVE_LOCK
595new_recursive_lock()
596{
597  extern int cache_block_size;
598  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
599  RECURSIVE_LOCK m = NULL;
600#ifndef USE_FUTEX
601  void *signal = new_semaphore(0);
602#endif
603
604  if (p) {
605    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
606    m->malloced_ptr = p;
607  }
608
609#ifdef USE_FUTEX
610  if (m) {
611    return m;
612  }
613#else
614  if (m && signal) {
615    m->signal = signal;
616    return m;
617  }
618  if (m) {
619    free(p);
620  }
621  if (signal) {
622    destroy_semaphore(&signal);
623  }
624#endif
625  return NULL;
626}
627
628void
629destroy_semaphore(void **s)
630{
631  if (*s) {
632#ifdef USE_POSIX_SEMAPHORES
633    sem_destroy((sem_t *)*s);
634#endif
635#ifdef USE_MACH_SEMAPHORES
636    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
637#endif
638#ifdef USE_WINDOWS_SEMAPHORES
639    CloseHandle(*s);
640#endif
641    *s=NULL;
642  }
643}
644
645#ifdef WINDOWS
646void
647tsd_set(LispObj key, void *datum)
648{
649  TlsSetValue((DWORD)key, datum);
650}
651
652void *
653tsd_get(LispObj key)
654{
655  TlsGetValue((DWORD)key);
656}
657#else
658void
659tsd_set(LispObj key, void *datum)
660{
661  pthread_setspecific((pthread_key_t)key, datum);
662}
663
664void *
665tsd_get(LispObj key)
666{
667  return pthread_getspecific((pthread_key_t)key);
668}
669#endif
670
671void
672dequeue_tcr(TCR *tcr)
673{
674  TCR *next, *prev;
675
676  next = tcr->next;
677  prev = tcr->prev;
678
679  prev->next = next;
680  next->prev = prev;
681  tcr->prev = tcr->next = NULL;
682#ifdef X8664
683  tcr->linear = NULL;
684#endif
685}
686 
687void
688enqueue_tcr(TCR *new)
689{
690  TCR *head, *tail;
691 
692  LOCK(lisp_global(TCR_AREA_LOCK),new);
693  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
694  tail = head->prev;
695  tail->next = new;
696  head->prev = new;
697  new->prev = tail;
698  new->next = head;
699  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
700}
701
702TCR *
703allocate_tcr()
704{
705  TCR *tcr, *chain = NULL, *next;
706#ifdef DARWIN
707  extern Boolean use_mach_exception_handling;
708  kern_return_t kret;
709  mach_port_t
710    thread_exception_port,
711    task_self = mach_task_self();
712#endif
713  for (;;) {
714    tcr = calloc(1, sizeof(TCR));
715#ifdef DARWIN
716#if WORD_SIZE == 64
717    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
718      tcr->next = chain;
719      chain = tcr;
720      continue;
721    }
722#endif
723    if (use_mach_exception_handling) {
724      thread_exception_port = (mach_port_t)((natural)tcr);
725      kret = mach_port_allocate_name(task_self,
726                                     MACH_PORT_RIGHT_RECEIVE,
727                                     thread_exception_port);
728    } else {
729      kret = KERN_SUCCESS;
730    }
731
732    if (kret != KERN_SUCCESS) {
733      tcr->next = chain;
734      chain = tcr;
735      continue;
736    }
737#endif
738    for (next = chain; next;) {
739      next = next->next;
740      free(chain);
741    }
742    return tcr;
743  }
744}
745
746#ifdef X8664
747#ifdef LINUX
748#include <asm/prctl.h>
749#include <sys/prctl.h>
750#endif
751#ifdef FREEBSD
752#include <machine/sysarch.h>
753#endif
754
755void
756setup_tcr_extra_segment(TCR *tcr)
757{
758#ifdef FREEBSD
759  amd64_set_gsbase(tcr);
760#endif
761#ifdef LINUX
762  arch_prctl(ARCH_SET_GS, (natural)tcr);
763#endif
764#ifdef DARWIN
765  /* There's no way to do this yet.  See DARWIN_GS_HACK */
766  /* darwin_set_x8664_fs_reg(tcr); */
767#endif
768}
769
770#endif
771
772
773
774/*
775  Caller must hold the area_lock.
776*/
777TCR *
778new_tcr(natural vstack_size, natural tstack_size)
779{
780  extern area
781    *allocate_vstack_holding_area_lock(unsigned),
782    *allocate_tstack_holding_area_lock(unsigned);
783  area *a;
784  int i;
785#ifndef WINDOWS
786  sigset_t sigmask;
787
788  sigemptyset(&sigmask);
789  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
790#endif
791
792#ifdef HAVE_TLS
793  TCR *tcr = &current_tcr;
794#else
795  TCR *tcr = allocate_tcr();
796#endif
797
798#ifdef X8664
799  setup_tcr_extra_segment(tcr);
800  tcr->linear = tcr;
801#endif
802
803#if (WORD_SIZE == 64)
804  tcr->single_float_convert.tag = subtag_single_float;
805#endif
806  lisp_global(TCR_COUNT) += (1<<fixnumshift);
807  tcr->suspend = new_semaphore(0);
808  tcr->resume = new_semaphore(0);
809  tcr->reset_completion = new_semaphore(0);
810  tcr->activate = new_semaphore(0);
811  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
812  a = allocate_vstack_holding_area_lock(vstack_size);
813  tcr->vs_area = a;
814  a->owner = tcr;
815  tcr->save_vsp = (LispObj *) a->active; 
816  a = allocate_tstack_holding_area_lock(tstack_size);
817  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
818  tcr->ts_area = a;
819  a->owner = tcr;
820  tcr->save_tsp = (LispObj *) a->active;
821#ifdef X86
822  tcr->next_tsp = tcr->save_tsp;
823#endif
824
825  tcr->valence = TCR_STATE_FOREIGN;
826#ifdef PPC
827  tcr->lisp_fpscr.words.l = 0xd0;
828#endif
829#ifdef X86
830  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
831#if 1                           /* Mask underflow; too hard to
832                                   deal with denorms if underflow is
833                                   enabled */
834    (1 << MXCSR_UM_BIT) | 
835#endif
836    (1 << MXCSR_PM_BIT);
837#endif
838  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
839  tcr->tlb_limit = 2048<<fixnumshift;
840  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
841  for (i = 0; i < 2048; i++) {
842    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
843  }
844  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
845#ifndef WINDOWS
846  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
847#endif
848  return tcr;
849}
850
851void
852shutdown_thread_tcr(void *arg)
853{
854  TCR *tcr = TCR_FROM_TSD(arg);
855
856  area *vs, *ts, *cs;
857  void *termination_semaphore;
858 
859  if (--(tcr->shutdown_count) == 0) {
860    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
861      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
862        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
863   
864      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
865      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
866      tsd_set(lisp_global(TCR_KEY), NULL);
867    }
868#ifdef DARWIN
869    darwin_exception_cleanup(tcr);
870#endif
871    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
872    vs = tcr->vs_area;
873    tcr->vs_area = NULL;
874    ts = tcr->ts_area;
875    tcr->ts_area = NULL;
876    cs = tcr->cs_area;
877    tcr->cs_area = NULL;
878    if (vs) {
879      condemn_area_holding_area_lock(vs);
880    }
881    if (ts) {
882      condemn_area_holding_area_lock(ts);
883    }
884    if (cs) {
885      condemn_area_holding_area_lock(cs);
886    }
887    destroy_semaphore(&tcr->suspend);
888    destroy_semaphore(&tcr->resume);
889    destroy_semaphore(&tcr->reset_completion);
890    destroy_semaphore(&tcr->activate);
891    free(tcr->tlb_pointer);
892    tcr->tlb_pointer = NULL;
893    tcr->tlb_limit = 0;
894    tcr->osid = 0;
895    tcr->interrupt_pending = 0;
896    termination_semaphore = tcr->termination_semaphore;
897    tcr->termination_semaphore = NULL;
898#ifdef HAVE_TLS
899    dequeue_tcr(tcr);
900#endif
901    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
902    if (termination_semaphore) {
903      SEM_RAISE(termination_semaphore);
904    }
905  } else {
906    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
907  }
908}
909
910void
911tcr_cleanup(void *arg)
912{
913  TCR *tcr = (TCR *)arg;
914  area *a;
915
916  a = tcr->vs_area;
917  if (a) {
918    a->active = a->high;
919  }
920  a = tcr->ts_area;
921  if (a) {
922    a->active = a->high;
923  }
924  a = tcr->cs_area;
925  if (a) {
926    a->active = a->high;
927  }
928  tcr->valence = TCR_STATE_FOREIGN;
929  tcr->shutdown_count = 1;
930  shutdown_thread_tcr(tcr);
931  tsd_set(lisp_global(TCR_KEY), NULL);
932}
933
934void *
935current_native_thread_id()
936{
937  return ((void *) (natural)
938#ifdef LINUX
939          getpid()
940#endif
941#ifdef DARWIN
942          mach_thread_self()
943#endif
944#ifdef FREEBSD
945          pthread_self()
946#endif
947#ifdef SOLARIS
948          pthread_self()
949#endif
950#ifdef WINDOWS
951          GetCurrentThreadId()
952#endif
953          );
954}
955
956
957void
958thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
959{
960  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
961
962  tcr->osid = current_thread_osid();
963  tcr->native_thread_id = current_native_thread_id();
964  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
965  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
966  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
967  tcr->cs_area = a;
968  a->owner = tcr;
969  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
970    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
971  }
972#ifdef LINUX
973#ifdef PPC
974#ifndef PPC64
975  tcr->native_thread_info = current_r2;
976#endif
977#endif
978#endif
979  tcr->errno_loc = &errno;
980  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
981#ifdef DARWIN
982  extern Boolean use_mach_exception_handling;
983  if (use_mach_exception_handling) {
984    darwin_exception_init(tcr);
985  }
986#endif
987#ifdef LINUX
988  linux_exception_init(tcr);
989#endif
990  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
991}
992
993/*
994  Register the specified tcr as "belonging to" the current thread.
995  Under Darwin, setup Mach exception handling for the thread.
996  Install cleanup handlers for thread termination.
997*/
998void
999register_thread_tcr(TCR *tcr)
1000{
1001  void *stack_base = NULL;
1002  natural stack_size = 0;
1003
1004  os_get_current_thread_stack_bounds(&stack_base, &stack_size);
1005  thread_init_tcr(tcr, stack_base, stack_size);
1006  enqueue_tcr(tcr);
1007}
1008
1009
1010 
1011 
1012#ifndef MAP_GROWSDOWN
1013#define MAP_GROWSDOWN 0
1014#endif
1015
1016Ptr
1017create_stack(int size)
1018{
1019  Ptr p;
1020  size=align_to_power_of_2(size, log2_page_size);
1021  p = (Ptr) MapMemoryForStack((size_t)size);
1022  if (p != (Ptr)(-1)) {
1023    *((size_t *)p) = size;
1024    return p;
1025  }
1026  allocation_failure(true, size);
1027
1028}
1029
1030void *
1031allocate_stack(unsigned size)
1032{
1033  return create_stack(size);
1034}
1035
1036void
1037free_stack(void *s)
1038{
1039  size_t size = *((size_t *)s);
1040  UnMapMemory(s, size);
1041}
1042
1043Boolean threads_initialized = false;
1044
1045#ifndef USE_FUTEX
1046#ifdef WINDOWS
1047void
1048count_cpus()
1049{
1050  SYSTEM_INFO si;
1051
1052  GetSystemInfo(&si);
1053  if (si.dwNumberOfProcessors > 1) {
1054    spin_lock_tries = 1024;
1055  }
1056}
1057#else
1058void
1059count_cpus()
1060{
1061#ifdef DARWIN
1062  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
1063#include <mach/host_info.h>
1064
1065  struct host_basic_info info;
1066  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
1067 
1068  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
1069    if (info.max_cpus > 1) {
1070      spin_lock_tries = 1024;
1071    }
1072  }
1073#else
1074  int n = sysconf(_SC_NPROCESSORS_ONLN);
1075 
1076  if (n > 1) {
1077    spin_lock_tries = 1024;
1078  }
1079#endif
1080}
1081#endif
1082#endif
1083
1084void
1085init_threads(void * stack_base, TCR *tcr)
1086{
1087  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
1088#ifdef WINDOWS
1089  lisp_global(TCR_KEY) = TlsAlloc();
1090#else
1091  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
1092  thread_signal_setup();
1093#endif
1094 
1095#ifndef USE_FUTEX
1096  count_cpus();
1097#endif
1098  threads_initialized = true;
1099}
1100
1101
1102void *
1103lisp_thread_entry(void *param)
1104{
1105  thread_activation *activation = (thread_activation *)param;
1106  TCR *tcr = new_tcr(activation->vsize, activation->tsize);
1107#ifndef WINDOWS
1108  sigset_t mask, old_mask;
1109
1110  sigemptyset(&mask);
1111  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
1112#endif
1113
1114  register_thread_tcr(tcr);
1115
1116#ifndef WINDOWS
1117  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
1118#endif
1119  tcr->vs_area->active -= node_size;
1120  *(--tcr->save_vsp) = lisp_nil;
1121  enable_fp_exceptions();
1122  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
1123  activation->tcr = tcr;
1124  SEM_RAISE(activation->created);
1125  do {
1126    SEM_RAISE(tcr->reset_completion);
1127    SEM_WAIT_FOREVER(tcr->activate);
1128    /* Now go run some lisp code */
1129    start_lisp(TCR_TO_TSD(tcr),0);
1130  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
1131#ifndef WINDOWS
1132  pthread_cleanup_pop(true);
1133#else
1134  tcr_cleanup(tcr);
1135#endif
1136
1137}
1138
1139void *
1140xNewThread(natural control_stack_size,
1141           natural value_stack_size,
1142           natural temp_stack_size)
1143
1144{
1145  thread_activation activation;
1146  TCR *current = get_tcr(false);
1147
1148
1149  activation.tsize = temp_stack_size;
1150  activation.vsize = value_stack_size;
1151  activation.tcr = 0;
1152  activation.created = new_semaphore(0);
1153  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
1154                           NULL, 
1155                           lisp_thread_entry,
1156                           (void *) &activation)) {
1157   
1158    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
1159  }
1160  destroy_semaphore(&activation.created); 
1161  return TCR_TO_TSD(activation.tcr);
1162}
1163
1164Boolean
1165active_tcr_p(TCR *q)
1166{
1167  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
1168 
1169  do {
1170    if (p == q) {
1171      return true;
1172    }
1173    p = p->next;
1174  } while (p != head);
1175  return false;
1176}
1177
1178#ifdef WINDOWS
1179OSErr
1180xDisposeThread(TCR *tcr)
1181{
1182}
1183#else
1184OSErr
1185xDisposeThread(TCR *tcr)
1186{
1187  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
1188    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
1189      pthread_cancel((pthread_t)(tcr->osid));
1190      return 0;
1191    }
1192  }
1193  return -50;
1194}
1195#endif
1196
1197OSErr
1198xYieldToThread(TCR *target)
1199{
1200  Bug(NULL, "xYieldToThread ?");
1201  return 0;
1202}
1203 
1204OSErr
1205xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
1206{
1207  Bug(NULL, "xThreadCurrentStackSpace ?");
1208  return 0;
1209}
1210
1211
1212#ifdef WINDOWS
1213LispObj
1214create_system_thread(size_t stack_size,
1215                     void* stackaddr,
1216                     void* (*start_routine)(void *),
1217                     void* param)
1218{
1219  HANDLE thread_handle;
1220
1221  thread_handle = (HANDLE)_beginthreadex(NULL, 
1222                                        stack_size,
1223                                        start_routine,
1224                                        param,
1225                                        0, 
1226                                        NULL);
1227
1228  if (thread_handle == NULL) {
1229    wperror("CreateThread");
1230  }
1231  return (LispObj) ptr_to_lispobj(thread_handle);
1232}
1233#else
1234LispObj
1235create_system_thread(size_t stack_size,
1236                     void* stackaddr,
1237                     void* (*start_routine)(void *),
1238                     void* param)
1239{
1240  pthread_attr_t attr;
1241  pthread_t returned_thread = (pthread_t) 0;
1242
1243  pthread_attr_init(&attr);
1244  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
1245
1246  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
1247    stack_size = PTHREAD_STACK_MIN;
1248  }
1249
1250  stack_size = ensure_stack_limit(stack_size);
1251  if (stackaddr != NULL) {
1252    /* Size must have been specified.  Sort of makes sense ... */
1253#ifdef DARWIN
1254    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1255#else
1256    pthread_attr_setstack(&attr, stackaddr, stack_size);
1257#endif
1258  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1259    pthread_attr_setstacksize(&attr,stack_size);
1260  }
1261
1262  /*
1263     I think that's just about enough ... create the thread.
1264  */
1265  pthread_create(&returned_thread, &attr, start_routine, param);
1266  return (LispObj) ptr_to_lispobj(returned_thread);
1267}
1268#endif
1269
1270TCR *
1271get_tcr(Boolean create)
1272{
1273#ifdef HAVE_TLS
1274  TCR *current = current_tcr.linear;
1275#else
1276  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1277  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1278#endif
1279
1280  if ((current == NULL) && create) {
1281    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1282      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1283    int i, nbindwords = 0;
1284    extern unsigned initial_stack_size;
1285   
1286    /* Make one. */
1287    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1288    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1289    register_thread_tcr(current);
1290#ifdef DEBUG_TCR_CREATION
1291#ifndef WINDOWS
1292    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1293#endif
1294#endif
1295    current->vs_area->active -= node_size;
1296    *(--current->save_vsp) = lisp_nil;
1297#ifdef PPC
1298#define NSAVEREGS 8
1299#endif
1300#ifdef X8664
1301#define NSAVEREGS 4
1302#endif
1303    for (i = 0; i < NSAVEREGS; i++) {
1304      *(--current->save_vsp) = 0;
1305      current->vs_area->active -= node_size;
1306    }
1307    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1308    for (i = 0; i < nbindwords; i++) {
1309      *(--current->save_vsp) = 0;
1310      current->vs_area->active -= node_size;
1311    }
1312    current->shutdown_count = 1;
1313    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1314
1315  }
1316 
1317  return current;
1318}
1319
1320#ifdef WINDOWS
1321Boolean
1322suspend_tcr(TCR *tcr)
1323{
1324  int suspend_count = atomic_incf(&(tcr->suspend_count));
1325  DWORD rc;
1326  if (suspend_count == 1) {
1327#if SUSPEND_RESUME_VERBOSE
1328    fprintf(stderr,"Suspending 0x%x\n", tcr);
1329#endif
1330    rc = SuspendThread(tcr->osid);
1331    if (rc == -1) {
1332      wperror("SuspendThread");
1333      return false;
1334    }
1335    return true;
1336  }
1337  return false;
1338}
1339#else
1340Boolean
1341suspend_tcr(TCR *tcr)
1342{
1343  int suspend_count = atomic_incf(&(tcr->suspend_count));
1344  if (suspend_count == 1) {
1345#if SUSPEND_RESUME_VERBOSE
1346    fprintf(stderr,"Suspending 0x%x\n", tcr);
1347#endif
1348#ifdef DARWIN_nope
1349    if (mach_suspend_tcr(tcr)) {
1350      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1351      return true;
1352    }
1353#endif
1354    if (pthread_kill((pthread_t)(tcr->osid), thread_suspend_signal) == 0) {
1355      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1356    } else {
1357      /* A problem using pthread_kill.  On Darwin, this can happen
1358         if the thread has had its signal mask surgically removed
1359         by pthread_exit.  If the native (Mach) thread can be suspended,
1360         do that and return true; otherwise, flag the tcr as belonging
1361         to a dead thread by setting tcr->osid to 0.
1362      */
1363      tcr->osid = 0;
1364      return false;
1365    }
1366    return true;
1367  }
1368  return false;
1369}
1370#endif
1371
1372#ifdef WINDOWS
1373Boolean
1374tcr_suspend_ack(TCR *tcr)
1375{
1376  return true;
1377}
1378#else
1379Boolean
1380tcr_suspend_ack(TCR *tcr)
1381{
1382  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1383    SEM_WAIT_FOREVER(tcr->suspend);
1384    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1385#if SUSPEND_RESUME_VERBOSE
1386    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1387#endif
1388
1389  }
1390  return true;
1391}
1392#endif
1393     
1394
1395
1396Boolean
1397lisp_suspend_tcr(TCR *tcr)
1398{
1399  Boolean suspended;
1400  TCR *current = get_tcr(true);
1401 
1402  LOCK(lisp_global(TCR_AREA_LOCK),current);
1403#ifdef DARWIN
1404#if USE_MACH_EXCEPTION_LOCK
1405  if (use_mach_exception_handling) {
1406    pthread_mutex_lock(mach_exception_lock);
1407  }
1408#endif
1409#endif
1410  suspended = suspend_tcr(tcr);
1411  if (suspended) {
1412    while (!tcr_suspend_ack(tcr));
1413  }
1414#ifdef DARWIN
1415#if USE_MACH_EXCEPTION_LOCK
1416  if (use_mach_exception_handling) {
1417    pthread_mutex_unlock(mach_exception_lock);
1418  }
1419#endif
1420#endif
1421  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1422  return suspended;
1423}
1424         
1425#ifdef WINDOWS
1426Boolean
1427resume_tcr(TCR *tcr)
1428{
1429  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
1430  DWORD rc;
1431  if (suspend_count == 0) {
1432    rc = ResumeThread(tcr->osid);
1433    if (rc == -1) {
1434      wperror("ResumeThread");
1435      return false;
1436    }
1437    return true;
1438  }
1439  return false;
1440}   
1441#else
1442Boolean
1443resume_tcr(TCR *tcr)
1444{
1445  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
1446  if (suspend_count == 0) {
1447#ifdef DARWIN
1448    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1449#if SUSPEND_RESUME_VERBOSE
1450    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1451#endif
1452      mach_resume_tcr(tcr);
1453      return true;
1454    }
1455#endif
1456#if RESUME_VIA_RESUME_SEMAPHORE
1457    SEM_RAISE(tcr->resume);
1458#else
1459    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1460      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1461    }
1462#endif
1463#if SUSPEND_RESUME_VERBOSE
1464    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1465#endif
1466    return true;
1467  }
1468  return false;
1469}
1470#endif
1471
1472void
1473wait_for_resumption(TCR *tcr)
1474{
1475  if (tcr->suspend_count == 0) {
1476#ifdef DARWIN
1477    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1478      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1479      return;
1480  }
1481#endif
1482#if WAIT_FOR_RESUME_ACK
1483#if SUSPEND_RESUME_VERBOSE
1484    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1485#endif
1486    SEM_WAIT_FOREVER(tcr->suspend);
1487#endif
1488  }
1489}
1490   
1491
1492
1493Boolean
1494lisp_resume_tcr(TCR *tcr)
1495{
1496  Boolean resumed;
1497  TCR *current = get_tcr(true);
1498 
1499  LOCK(lisp_global(TCR_AREA_LOCK),current);
1500  resumed = resume_tcr(tcr);
1501#ifndef WINDOWS
1502  wait_for_resumption(tcr);
1503#endif
1504  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1505  return resumed;
1506}
1507
1508
1509TCR *freed_tcrs = NULL;
1510
1511void
1512enqueue_freed_tcr (TCR *tcr)
1513{
1514#ifndef HAVE_TLS
1515  tcr->next = freed_tcrs;
1516  freed_tcrs = tcr;
1517#endif
1518}
1519
1520/* It's not clear that we can safely condemn a dead tcr's areas, since
1521   we may not be able to call free() if a suspended thread owns a
1522   malloc lock. At least make the areas appear to be empty.
1523*/
1524   
1525
1526void
1527normalize_dead_tcr_areas(TCR *tcr)
1528{
1529  area *a;
1530
1531  a = tcr->vs_area;
1532  if (a) {
1533    a->active = a->high;
1534  }
1535
1536  a = tcr->ts_area;
1537  if (a) {
1538    a->active = a->high;
1539  }
1540
1541  a = tcr->cs_area;
1542  if (a) {
1543    a->active = a->high;
1544  }
1545}
1546   
1547void
1548free_freed_tcrs ()
1549{
1550  TCR *current, *next;
1551
1552  for (current = freed_tcrs; current; current = next) {
1553    next = current->next;
1554#ifndef HAVE_TLS
1555    free(current);
1556#endif
1557  }
1558  freed_tcrs = NULL;
1559}
1560
1561void
1562suspend_other_threads(Boolean for_gc)
1563{
1564  TCR *current = get_tcr(true), *other, *next;
1565  int dead_tcr_count = 0;
1566  Boolean all_acked;
1567
1568  LOCK(lisp_global(TCR_AREA_LOCK), current);
1569#ifdef DARWIN
1570#if USE_MACH_EXCEPTION_LOCK
1571  if (for_gc && use_mach_exception_handling) {
1572#if SUSPEND_RESUME_VERBOSE
1573    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1574#endif
1575    pthread_mutex_lock(mach_exception_lock);
1576  }
1577#endif
1578#endif
1579  for (other = current->next; other != current; other = other->next) {
1580    if ((other->osid != 0)) {
1581      suspend_tcr(other);
1582      if (other->osid == 0) {
1583        dead_tcr_count++;
1584      }
1585    } else {
1586      dead_tcr_count++;
1587    }
1588  }
1589
1590  do {
1591    all_acked = true;
1592    for (other = current->next; other != current; other = other->next) {
1593      if ((other->osid != 0)) {
1594        if (!tcr_suspend_ack(other)) {
1595          all_acked = false;
1596        }
1597      }
1598    }
1599  } while(! all_acked);
1600
1601     
1602
1603  /* All other threads are suspended; can safely delete dead tcrs now */
1604  if (dead_tcr_count) {
1605    for (other = current->next; other != current; other = next) {
1606      next = other->next;
1607      if ((other->osid == 0))  {
1608        normalize_dead_tcr_areas(other);
1609        dequeue_tcr(other);
1610        enqueue_freed_tcr(other);
1611      }
1612    }
1613  }
1614}
1615
1616void
1617lisp_suspend_other_threads()
1618{
1619  suspend_other_threads(false);
1620}
1621
1622void
1623resume_other_threads(Boolean for_gc)
1624{
1625  TCR *current = get_tcr(true), *other;
1626  for (other = current->next; other != current; other = other->next) {
1627    if ((other->osid != 0)) {
1628      resume_tcr(other);
1629    }
1630  }
1631  for (other = current->next; other != current; other = other->next) {
1632    if ((other->osid != 0)) {
1633      wait_for_resumption(other);
1634    }
1635  }
1636  free_freed_tcrs();
1637#ifdef DARWIN
1638#if USE_MACH_EXCEPTION_LOCK
1639  if (for_gc && use_mach_exception_handling) {
1640#if SUSPEND_RESUME_VERBOSE
1641    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1642#endif
1643    pthread_mutex_unlock(mach_exception_lock);
1644  }
1645#endif
1646#endif
1647
1648  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1649}
1650
1651void
1652lisp_resume_other_threads()
1653{
1654  resume_other_threads(false);
1655}
1656
1657
1658
1659rwlock *
1660rwlock_new()
1661{
1662  extern int cache_block_size;
1663
1664  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1665  rwlock *rw;
1666 
1667  if (p) {
1668    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1669    rw->malloced_ptr = p;
1670#ifndef USE_FUTEX
1671    rw->reader_signal = new_semaphore(0);
1672    rw->writer_signal = new_semaphore(0);
1673    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1674      if (rw->reader_signal) {
1675        destroy_semaphore(&(rw->reader_signal));
1676      } else {
1677        destroy_semaphore(&(rw->writer_signal));
1678      }
1679      free(rw);
1680      rw = NULL;
1681    }
1682#endif
1683  }
1684  return rw;
1685}
1686
1687     
1688/*
1689  Try to get read access to a multiple-readers/single-writer lock.  If
1690  we already have read access, return success (indicating that the
1691  lock is held another time.  If we already have write access to the
1692  lock ... that won't work; return EDEADLK.  Wait until no other
1693  thread has or is waiting for write access, then indicate that we
1694  hold read access once.
1695*/
1696#ifndef USE_FUTEX
1697int
1698rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1699{
1700  int err = 0;
1701 
1702  LOCK_SPINLOCK(rw->spin, tcr);
1703
1704  if (rw->writer == tcr) {
1705    RELEASE_SPINLOCK(rw->spin);
1706    return EDEADLK;
1707  }
1708
1709  while (rw->blocked_writers || (rw->state > 0)) {
1710    rw->blocked_readers++;
1711    RELEASE_SPINLOCK(rw->spin);
1712    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1713    LOCK_SPINLOCK(rw->spin,tcr);
1714    rw->blocked_readers--;
1715    if (err == EINTR) {
1716      err = 0;
1717    }
1718    if (err) {
1719      RELEASE_SPINLOCK(rw->spin);
1720      return err;
1721    }
1722  }
1723  rw->state--;
1724  RELEASE_SPINLOCK(rw->spin);
1725  return err;
1726}
1727#else
1728int
1729rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1730{
1731  natural waitval;
1732
1733  lock_futex(&rw->spin);
1734
1735  if (rw->writer == tcr) {
1736    unlock_futex(&rw->spin);
1737    return EDEADLOCK;
1738  }
1739  while (1) {
1740    if (rw->writer == NULL) {
1741      --rw->state;
1742      unlock_futex(&rw->spin);
1743      return 0;
1744    }
1745    rw->blocked_readers++;
1746    waitval = rw->reader_signal;
1747    unlock_futex(&rw->spin);
1748    futex_wait(&rw->reader_signal,waitval);
1749    lock_futex(&rw->spin);
1750    rw->blocked_readers--;
1751  }
1752  return 0;
1753}
1754#endif   
1755
1756
1757/*
1758  Try to obtain write access to the lock.
1759  It is an error if we already have read access, but it's hard to
1760  detect that.
1761  If we already have write access, increment the count that indicates
1762  that.
1763  Otherwise, wait until the lock is not held for reading or writing,
1764  then assert write access.
1765*/
1766
1767#ifndef USE_FUTEX
1768int
1769rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1770{
1771  int err = 0;
1772
1773  LOCK_SPINLOCK(rw->spin,tcr);
1774  if (rw->writer == tcr) {
1775    rw->state++;
1776    RELEASE_SPINLOCK(rw->spin);
1777    return 0;
1778  }
1779
1780  while (rw->state != 0) {
1781    rw->blocked_writers++;
1782    RELEASE_SPINLOCK(rw->spin);
1783    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1784    LOCK_SPINLOCK(rw->spin,tcr);
1785    rw->blocked_writers--;
1786    if (err = EINTR) {
1787      err = 0;
1788    }
1789    if (err) {
1790      RELEASE_SPINLOCK(rw->spin);
1791      return err;
1792    }
1793  }
1794  rw->state = 1;
1795  rw->writer = tcr;
1796  RELEASE_SPINLOCK(rw->spin);
1797  return err;
1798}
1799
1800#else
1801int
1802rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1803{
1804  int err = 0;
1805  natural waitval;
1806
1807  lock_futex(&rw->spin);
1808  if (rw->writer == tcr) {
1809    rw->state++;
1810    unlock_futex(&rw->spin);
1811    return 0;
1812  }
1813
1814  while (rw->state != 0) {
1815    rw->blocked_writers++;
1816    waitval = rw->writer_signal;
1817    unlock_futex(&rw->spin);
1818    futex_wait(&rw->writer_signal,waitval);
1819    lock_futex(&rw->spin);
1820    rw->blocked_writers--;
1821  }
1822  rw->state = 1;
1823  rw->writer = tcr;
1824  unlock_futex(&rw->spin);
1825  return err;
1826}
1827#endif
1828
1829/*
1830  Sort of the same as above, only return EBUSY if we'd have to wait.
1831*/
1832#ifndef USE_FUTEX
1833int
1834rwlock_try_wlock(rwlock *rw, TCR *tcr)
1835{
1836  int ret = EBUSY;
1837
1838  LOCK_SPINLOCK(rw->spin,tcr);
1839  if (rw->writer == tcr) {
1840    rw->state++;
1841    ret = 0;
1842  } else {
1843    if (rw->state == 0) {
1844      rw->writer = tcr;
1845      rw->state = 1;
1846      ret = 0;
1847    }
1848  }
1849  RELEASE_SPINLOCK(rw->spin);
1850  return ret;
1851}
1852#else
1853int
1854rwlock_try_wlock(rwlock *rw, TCR *tcr)
1855{
1856  int ret = EBUSY;
1857
1858  lock_futex(&rw->spin);
1859  if (rw->writer == tcr) {
1860    rw->state++;
1861    ret = 0;
1862  } else {
1863    if (rw->state == 0) {
1864      rw->writer = tcr;
1865      rw->state = 1;
1866      ret = 0;
1867    }
1868  }
1869  unlock_futex(&rw->spin);
1870  return ret;
1871}
1872#endif
1873
1874#ifndef USE_FUTEX
1875int
1876rwlock_try_rlock(rwlock *rw, TCR *tcr)
1877{
1878  int ret = EBUSY;
1879
1880  LOCK_SPINLOCK(rw->spin,tcr);
1881  if (rw->state <= 0) {
1882    --rw->state;
1883    ret = 0;
1884  }
1885  RELEASE_SPINLOCK(rw->spin);
1886  return ret;
1887}
1888#else
1889int
1890rwlock_try_rlock(rwlock *rw, TCR *tcr)
1891{
1892  int ret = EBUSY;
1893
1894  lock_futex(&rw->spin);
1895  if (rw->state <= 0) {
1896    --rw->state;
1897    ret = 0;
1898  }
1899  unlock_futex(&rw->spin);
1900  return ret;
1901}
1902#endif
1903
1904
1905
1906#ifndef USE_FUTEX
1907int
1908rwlock_unlock(rwlock *rw, TCR *tcr)
1909{
1910
1911  int err = 0;
1912  natural blocked_readers = 0;
1913
1914  LOCK_SPINLOCK(rw->spin,tcr);
1915  if (rw->state > 0) {
1916    if (rw->writer != tcr) {
1917      err = EINVAL;
1918    } else {
1919      --rw->state;
1920      if (rw->state == 0) {
1921        rw->writer = NULL;
1922      }
1923    }
1924  } else {
1925    if (rw->state < 0) {
1926      ++rw->state;
1927    } else {
1928      err = EINVAL;
1929    }
1930  }
1931  if (err) {
1932    RELEASE_SPINLOCK(rw->spin);
1933    return err;
1934  }
1935 
1936  if (rw->state == 0) {
1937    if (rw->blocked_writers) {
1938      SEM_RAISE(rw->writer_signal);
1939    } else {
1940      blocked_readers = rw->blocked_readers;
1941      if (blocked_readers) {
1942        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1943      }
1944    }
1945  }
1946  RELEASE_SPINLOCK(rw->spin);
1947  return 0;
1948}
1949#else
1950int
1951rwlock_unlock(rwlock *rw, TCR *tcr)
1952{
1953
1954  int err = 0;
1955
1956  lock_futex(&rw->spin);
1957  if (rw->state > 0) {
1958    if (rw->writer != tcr) {
1959      err = EINVAL;
1960    } else {
1961      --rw->state;
1962      if (rw->state == 0) {
1963        rw->writer = NULL;
1964      }
1965    }
1966  } else {
1967    if (rw->state < 0) {
1968      ++rw->state;
1969    } else {
1970      err = EINVAL;
1971    }
1972  }
1973  if (err) {
1974    unlock_futex(&rw->spin);
1975    return err;
1976  }
1977 
1978  if (rw->state == 0) {
1979    if (rw->blocked_writers) {
1980      ++rw->writer_signal;
1981      unlock_futex(&rw->spin);
1982      futex_wake(&rw->writer_signal,1);
1983      return 0;
1984    }
1985    if (rw->blocked_readers) {
1986      ++rw->reader_signal;
1987      unlock_futex(&rw->spin);
1988      futex_wake(&rw->reader_signal, INT_MAX);
1989      return 0;
1990    }
1991  }
1992  unlock_futex(&rw->spin);
1993  return 0;
1994}
1995#endif
1996
1997       
1998void
1999rwlock_destroy(rwlock *rw)
2000{
2001#ifndef USE_FUTEX
2002  destroy_semaphore((void **)&rw->reader_signal);
2003  destroy_semaphore((void **)&rw->writer_signal);
2004#endif
2005  postGCfree((void *)(rw->malloced_ptr));
2006}
2007
2008
2009
Note: See TracBrowser for help on using the repository browser.