source: branches/working-0710/ccl/lisp-kernel/thread_manager.c @ 7418

Last change on this file since 7418 was 7418, checked in by gb, 13 years ago

Lots of changes to support new rwlocks, heap freeze, deferred GC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 31.7 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20/*
21   If we suspend via signals - and if the "suspend" signal is maked
22   in the handler for that signal - then it's not possible to suspend
23   a thread that's still waiting to be resumed (which is what
24   WAIT_FOR_RESUME_ACK is all about.)
25*/
26#define WAIT_FOR_RESUME_ACK 0
27#define RESUME_VIA_RESUME_SEMAPHORE 1
28#define SUSPEND_RESUME_VERBOSE 0
29
30typedef struct {
31  TCR *tcr;
32  natural vsize, tsize;
33  void *created;
34} thread_activation;
35
36#ifdef HAVE_TLS
37__thread TCR current_tcr;
38#endif
39
40extern natural
41store_conditional(natural*, natural, natural);
42
43extern signed_natural
44atomic_swap(signed_natural*, signed_natural);
45
46
47int
48raise_thread_interrupt(TCR *target)
49{
50#ifdef DARWIN_not_yet
51  if (use_mach_exception_handling) {
52    return mach_raise_thread_interrupt(target);
53  }
54#endif
55  return pthread_kill((pthread_t)target->osid, SIGNAL_FOR_PROCESS_INTERRUPT);
56}
57
58signed_natural
59atomic_incf_by(signed_natural *ptr, signed_natural by)
60{
61  signed_natural old, new;
62  do {
63    old = *ptr;
64    new = old+by;
65  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
66           (natural) old);
67  return new;
68}
69
70signed_natural
71atomic_incf(signed_natural *ptr)
72{
73  return atomic_incf_by(ptr, 1);
74}
75
76signed_natural
77atomic_decf(signed_natural *ptr)
78{
79  signed_natural old, new;
80  do {
81    old = *ptr;
82    new = old == 0 ? old : old-1;
83  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
84           (natural) old);
85  return old-1;
86}
87
88
89int spin_lock_tries = 1;
90
91void
92get_spin_lock(signed_natural *p, TCR *tcr)
93{
94  int i, n = spin_lock_tries;
95 
96  while (1) {
97    for (i = 0; i < n; i++) {
98      if (atomic_swap(p,(signed_natural)tcr) == 0) {
99        return;
100      }
101    }
102    sched_yield();
103  }
104}
105
106
107int
108lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
109{
110
111  if (tcr == NULL) {
112    tcr = get_tcr(true);
113  }
114  if (m->owner == tcr) {
115    m->count++;
116    return 0;
117  }
118  while (1) {
119    LOCK_SPINLOCK(m->spinlock,tcr);
120    ++m->avail;
121    if (m->avail == 1) {
122      m->owner = tcr;
123      m->count = 1;
124      RELEASE_SPINLOCK(m->spinlock);
125      break;
126    }
127    RELEASE_SPINLOCK(m->spinlock);
128    SEM_WAIT_FOREVER(m->signal);
129  }
130  return 0;
131}
132
133 
134int
135unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
136{
137  int ret = EPERM, pending;
138
139  if (tcr == NULL) {
140    tcr = get_tcr(true);
141  }
142
143  if (m->owner == tcr) {
144    --m->count;
145    if (m->count == 0) {
146      LOCK_SPINLOCK(m->spinlock,tcr);
147      m->owner = NULL;
148      pending = m->avail-1 + m->waiting;     /* Don't count us */
149      m->avail = 0;
150      --pending;
151      if (pending > 0) {
152        m->waiting = pending;
153      } else {
154        m->waiting = 0;
155      }
156      RELEASE_SPINLOCK(m->spinlock);
157      if (pending >= 0) {
158        SEM_RAISE(m->signal);
159      }
160    }
161    ret = 0;
162  }
163  return ret;
164}
165
166void
167destroy_recursive_lock(RECURSIVE_LOCK m)
168{
169  destroy_semaphore((void **)&m->signal);
170  postGCfree((void *)(m->malloced_ptr));
171}
172
173/*
174  If we're already the owner (or if the lock is free), lock it
175  and increment the lock count; otherwise, return EBUSY without
176  waiting.
177*/
178
179int
180recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
181{
182  TCR *owner = m->owner;
183
184  LOCK_SPINLOCK(m->spinlock,tcr);
185  if (owner == tcr) {
186    m->count++;
187    if (was_free) {
188      *was_free = 0;
189      RELEASE_SPINLOCK(m->spinlock);
190      return 0;
191    }
192  }
193  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
194    m->owner = tcr;
195    m->count = 1;
196    if (was_free) {
197      *was_free = 1;
198    }
199    RELEASE_SPINLOCK(m->spinlock);
200    return 0;
201  }
202
203  RELEASE_SPINLOCK(m->spinlock);
204  return EBUSY;
205}
206
207void
208sem_wait_forever(SEMAPHORE s)
209{
210  int status;
211
212  do {
213#ifdef USE_MACH_SEMAPHORES
214    mach_timespec_t q = {1,0};
215    status = SEM_TIMEDWAIT(s,q);
216#endif
217#ifdef USE_POSIX_SEMAPHORES
218    struct timespec q;
219    gettimeofday((struct timeval *)&q, NULL);
220    q.tv_sec += 1;
221    status = SEM_TIMEDWAIT(s,&q);
222#endif
223  } while (status != 0);
224}
225
226int
227wait_on_semaphore(void *s, int seconds, int millis)
228{
229  int nanos = (millis % 1000) * 1000000;
230#ifdef USE_POSIX_SEMAPHORES
231  int status;
232
233  struct timespec q;
234  gettimeofday((struct timeval *)&q, NULL);
235  q.tv_nsec *= 1000L;  /* microseconds -> nanoseconds */
236   
237  q.tv_nsec += nanos;
238  if (q.tv_nsec >= 1000000000L) {
239    q.tv_nsec -= 1000000000L;
240    seconds += 1;
241  }
242  q.tv_sec += seconds;
243  status = SEM_TIMEDWAIT(s, &q);
244  if (status < 0) {
245    return errno;
246  }
247  return status;
248#endif
249#ifdef USE_MACH_SEMAPHORES
250  mach_timespec_t q = {seconds, nanos};
251  int status = SEM_TIMEDWAIT(s, q);
252
253 
254  switch (status) {
255  case 0: return 0;
256  case KERN_OPERATION_TIMED_OUT: return ETIMEDOUT;
257  case KERN_ABORTED: return EINTR;
258  default: return EINVAL;
259  }
260
261#endif
262}
263
264
265int
266semaphore_maybe_timedwait(void *s, struct timespec *t)
267{
268  if (t) {
269    return wait_on_semaphore(s, t->tv_sec, t->tv_nsec/1000000L);
270  }
271  SEM_WAIT_FOREVER(s);
272  return 0;
273}
274
275void
276signal_semaphore(SEMAPHORE s)
277{
278  SEM_RAISE(s);
279}
280
281 
282LispObj
283current_thread_osid()
284{
285  return (LispObj)ptr_to_lispobj(pthread_self());
286}
287
288
289
290int thread_suspend_signal = 0, thread_resume_signal = 0;
291
292
293
294void
295linux_exception_init(TCR *tcr)
296{
297}
298
299
300TCR *
301get_interrupt_tcr(Boolean create)
302{
303  return get_tcr(create);
304}
305 
306  void
307suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
308{
309#ifdef DARWIN_GS_HACK
310  Boolean gs_was_tcr = ensure_gs_pthread();
311#endif
312  TCR *tcr = get_interrupt_tcr(false);
313
314  if (TCR_INTERRUPT_LEVEL(tcr) <= (-2<<fixnumshift)) {
315    SET_TCR_FLAG(tcr,TCR_FLAG_BIT_PENDING_SUSPEND);
316  } else {
317    if (signo == thread_suspend_signal) {
318#if 0
319      sigset_t wait_for;
320#endif
321
322      tcr->suspend_context = context;
323#if 0
324      sigfillset(&wait_for);
325#endif
326      SEM_RAISE(tcr->suspend);
327#if 0
328      sigdelset(&wait_for, thread_resume_signal);
329#endif
330#if 1
331#if RESUME_VIA_RESUME_SEMAPHORE
332      SEM_WAIT_FOREVER(tcr->resume);
333#if SUSPEND_RESUME_VERBOSE
334      fprintf(stderr, "got  resume in 0x%x\n",tcr);
335#endif
336      tcr->suspend_context = NULL;
337#else
338      sigsuspend(&wait_for);
339#endif
340#else
341    do {
342      sigsuspend(&wait_for);
343    } while (tcr->suspend_context);
344#endif 
345    } else {
346      tcr->suspend_context = NULL;
347#if SUSEPEND_RESUME_VERBOSE
348      fprintf(stderr,"got  resume in in 0x%x\n",tcr);
349#endif
350    }
351#if WAIT_FOR_RESUME_ACK
352    SEM_RAISE(tcr->suspend);
353#endif
354  }
355#ifdef DARWIN_GS_HACK
356  if (gs_was_tcr) {
357    set_gs_address(tcr);
358  }
359#endif
360#ifdef DARWIN
361  DarwinSigReturn(context);
362#endif
363#ifdef FREEBSD
364  freebsd_sigreturn(context);
365#endif
366}
367
368 
369
370/*
371  'base' should be set to the bottom (origin) of the stack, e.g., the
372  end from which it grows.
373*/
374 
375void
376os_get_stack_bounds(LispObj q,void **base, natural *size)
377{
378  pthread_t p = (pthread_t)(q);
379#ifdef DARWIN
380  *base = pthread_get_stackaddr_np(p);
381  *size = pthread_get_stacksize_np(p);
382#endif
383#ifdef LINUX
384  pthread_attr_t attr;
385
386  pthread_getattr_np(p,&attr);
387  pthread_attr_getstack(&attr, base, size);
388  *(natural *)base += *size;
389#endif
390#ifdef FREEBSD
391  pthread_attr_t attr;
392  void * temp_base;
393  size_t temp_size;
394 
395
396  pthread_attr_init(&attr); 
397  pthread_attr_get_np(p, &attr);
398  pthread_attr_getstackaddr(&attr,&temp_base);
399  pthread_attr_getstacksize(&attr,&temp_size);
400  *base = (void *)((natural)temp_base + temp_size);
401  *size = temp_size;
402#endif
403
404}
405
406void *
407new_semaphore(int count)
408{
409#ifdef USE_POSIX_SEMAPHORES
410  sem_t *s = malloc(sizeof(sem_t));
411  sem_init(s, 0, count);
412  return s;
413#endif
414#ifdef USE_MACH_SEMAPHORES
415  semaphore_t s = (semaphore_t)0;
416  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
417  return (void *)(natural)s;
418#endif
419}
420
421RECURSIVE_LOCK
422new_recursive_lock()
423{
424  extern int cache_block_size;
425  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
426  RECURSIVE_LOCK m = NULL;
427  void *signal = new_semaphore(0);
428
429  if (p) {
430    m = (RECURSIVE_LOCK) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
431    m->malloced_ptr = p;
432  }
433
434  if (m && signal) {
435    m->signal = signal;
436    return m;
437  }
438  if (m) {
439    free(p);
440  }
441  if (signal) {
442    destroy_semaphore(&signal);
443  }
444  return NULL;
445}
446
447void
448destroy_semaphore(void **s)
449{
450  if (*s) {
451#ifdef USE_POSIX_SEMAPHORES
452    sem_destroy((sem_t *)*s);
453#endif
454#ifdef USE_MACH_SEMAPHORES
455    semaphore_destroy(mach_task_self(),((semaphore_t)(natural) *s));
456#endif
457    *s=NULL;
458  }
459}
460
461void
462tsd_set(LispObj key, void *datum)
463{
464  pthread_setspecific((pthread_key_t)key, datum);
465}
466
467void *
468tsd_get(LispObj key)
469{
470  return pthread_getspecific((pthread_key_t)key);
471}
472
473void
474dequeue_tcr(TCR *tcr)
475{
476  TCR *next, *prev;
477
478  next = tcr->next;
479  prev = tcr->prev;
480
481  prev->next = next;
482  next->prev = prev;
483  tcr->prev = tcr->next = NULL;
484#ifdef X8664
485  tcr->linear = NULL;
486#endif
487}
488 
489void
490enqueue_tcr(TCR *new)
491{
492  TCR *head, *tail;
493 
494  LOCK(lisp_global(TCR_AREA_LOCK),new);
495  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
496  tail = head->prev;
497  tail->next = new;
498  head->prev = new;
499  new->prev = tail;
500  new->next = head;
501  UNLOCK(lisp_global(TCR_AREA_LOCK),new);
502}
503
504TCR *
505allocate_tcr()
506{
507  TCR *tcr, *chain = NULL, *next;
508#ifdef DARWIN
509  extern Boolean use_mach_exception_handling;
510  kern_return_t kret;
511  mach_port_t
512    thread_exception_port,
513    task_self = mach_task_self();
514#endif
515  for (;;) {
516    tcr = calloc(1, sizeof(TCR));
517#ifdef DARWIN
518#if WORD_SIZE == 64
519    if (((unsigned)((natural)tcr)) != ((natural)tcr)) {
520      tcr->next = chain;
521      chain = tcr;
522      continue;
523    }
524#endif
525    if (use_mach_exception_handling) {
526      thread_exception_port = (mach_port_t)((natural)tcr);
527      kret = mach_port_allocate_name(task_self,
528                                     MACH_PORT_RIGHT_RECEIVE,
529                                     thread_exception_port);
530    } else {
531      kret = KERN_SUCCESS;
532    }
533
534    if (kret != KERN_SUCCESS) {
535      tcr->next = chain;
536      chain = tcr;
537      continue;
538    }
539#endif
540    for (next = chain; next;) {
541      next = next->next;
542      free(chain);
543    }
544    return tcr;
545  }
546}
547
548#ifdef X8664
549#ifdef LINUX
550#include <asm/prctl.h>
551#include <sys/prctl.h>
552#endif
553#ifdef FREEBSD
554#include <machine/sysarch.h>
555#endif
556
557void
558setup_tcr_extra_segment(TCR *tcr)
559{
560#ifdef FREEBSD
561  amd64_set_gsbase(tcr);
562#endif
563#ifdef LINUX
564  arch_prctl(ARCH_SET_GS, (natural)tcr);
565#endif
566#ifdef DARWIN
567  /* There's no way to do this yet.  See DARWIN_GS_HACK */
568  /* darwin_set_x8664_fs_reg(tcr); */
569#endif
570}
571
572#endif
573
574
575
576/*
577  Caller must hold the area_lock.
578*/
579TCR *
580new_tcr(natural vstack_size, natural tstack_size)
581{
582  extern area
583    *allocate_vstack_holding_area_lock(unsigned),
584    *allocate_tstack_holding_area_lock(unsigned);
585  area *a;
586  int i;
587  sigset_t sigmask;
588
589  sigemptyset(&sigmask);
590  pthread_sigmask(SIG_SETMASK,&sigmask, NULL);
591#ifdef HAVE_TLS
592  TCR *tcr = &current_tcr;
593#ifdef X8664
594  setup_tcr_extra_segment(tcr);
595#endif
596#else
597  TCR *tcr = allocate_tcr();
598#endif
599
600#ifdef X8664
601  tcr->linear = tcr;
602#endif
603
604#if (WORD_SIZE == 64)
605  tcr->single_float_convert.tag = subtag_single_float;
606#endif
607  lisp_global(TCR_COUNT) += (1<<fixnumshift);
608  tcr->suspend = new_semaphore(0);
609  tcr->resume = new_semaphore(0);
610  tcr->reset_completion = new_semaphore(0);
611  tcr->activate = new_semaphore(0);
612  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
613  a = allocate_vstack_holding_area_lock(vstack_size);
614  tcr->vs_area = a;
615  a->owner = tcr;
616  tcr->save_vsp = (LispObj *) a->active; 
617  a = allocate_tstack_holding_area_lock(tstack_size);
618  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
619  tcr->ts_area = a;
620  a->owner = tcr;
621  tcr->save_tsp = (LispObj *) a->active;
622#ifdef X86
623  tcr->next_tsp = tcr->save_tsp;
624#endif
625
626  tcr->valence = TCR_STATE_FOREIGN;
627#ifdef PPC
628  tcr->lisp_fpscr.words.l = 0xd0;
629#endif
630#ifdef X86
631  tcr->lisp_mxcsr = (1 << MXCSR_DM_BIT) | 
632#if 1                           /* Mask underflow; too hard to
633                                   deal with denorms if underflow is
634                                   enabled */
635    (1 << MXCSR_UM_BIT) | 
636#endif
637    (1 << MXCSR_PM_BIT);
638#endif
639  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
640  tcr->tlb_limit = 2048<<fixnumshift;
641  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
642  for (i = 0; i < 2048; i++) {
643    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
644  }
645  TCR_INTERRUPT_LEVEL(tcr) = (LispObj) (-1<<fixnum_shift);
646  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
647  return tcr;
648}
649
650void
651shutdown_thread_tcr(void *arg)
652{
653  TCR *tcr = TCR_FROM_TSD(arg);
654
655  area *vs, *ts, *cs;
656  void *termination_semaphore;
657 
658  if (--(tcr->shutdown_count) == 0) {
659    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
660      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
661        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
662   
663      tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
664      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
665      tsd_set(lisp_global(TCR_KEY), NULL);
666    }
667#ifdef DARWIN
668    darwin_exception_cleanup(tcr);
669#endif
670    LOCK(lisp_global(TCR_AREA_LOCK),tcr);
671    vs = tcr->vs_area;
672    tcr->vs_area = NULL;
673    ts = tcr->ts_area;
674    tcr->ts_area = NULL;
675    cs = tcr->cs_area;
676    tcr->cs_area = NULL;
677    if (vs) {
678      condemn_area_holding_area_lock(vs);
679    }
680    if (ts) {
681      condemn_area_holding_area_lock(ts);
682    }
683    if (cs) {
684      condemn_area_holding_area_lock(cs);
685    }
686    destroy_semaphore(&tcr->suspend);
687    destroy_semaphore(&tcr->resume);
688    destroy_semaphore(&tcr->reset_completion);
689    destroy_semaphore(&tcr->activate);
690    free(tcr->tlb_pointer);
691    tcr->tlb_pointer = NULL;
692    tcr->tlb_limit = 0;
693    tcr->osid = 0;
694    tcr->interrupt_pending = 0;
695    termination_semaphore = tcr->termination_semaphore;
696    tcr->termination_semaphore = NULL;
697#ifdef HAVE_TLS
698    dequeue_tcr(tcr);
699#endif
700    UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
701    if (termination_semaphore) {
702      SEM_RAISE(termination_semaphore);
703    }
704  } else {
705    tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
706  }
707}
708
709void
710tcr_cleanup(void *arg)
711{
712  TCR *tcr = (TCR *)arg;
713  area *a;
714
715  a = tcr->vs_area;
716  if (a) {
717    a->active = a->high;
718  }
719  a = tcr->ts_area;
720  if (a) {
721    a->active = a->high;
722  }
723  a = tcr->cs_area;
724  if (a) {
725    a->active = a->high;
726  }
727  tcr->valence = TCR_STATE_FOREIGN;
728  tcr->shutdown_count = 1;
729  shutdown_thread_tcr(tcr);
730  tsd_set(lisp_global(TCR_KEY), NULL);
731}
732
733void *
734current_native_thread_id()
735{
736  return ((void *) (natural)
737#ifdef LINUX
738          getpid()
739#endif
740#ifdef DARWIN
741          mach_thread_self()
742#endif
743#ifdef FREEBSD
744          pthread_self()
745#endif
746#ifdef SOLARIS
747          pthread_self()
748#endif
749          );
750}
751
752
753void
754thread_init_tcr(TCR *tcr, void *stack_base, natural stack_size)
755{
756  area *a, *register_cstack_holding_area_lock(BytePtr, natural);
757
758  tcr->osid = current_thread_osid();
759  tcr->native_thread_id = current_native_thread_id();
760  LOCK(lisp_global(TCR_AREA_LOCK),tcr);
761  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
762  UNLOCK(lisp_global(TCR_AREA_LOCK),tcr);
763  tcr->cs_area = a;
764  a->owner = tcr;
765  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
766    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
767  }
768#ifdef LINUX
769#ifdef PPC
770#ifndef PPC64
771  tcr->native_thread_info = current_r2;
772#endif
773#endif
774#endif
775  tcr->errno_loc = &errno;
776  tsd_set(lisp_global(TCR_KEY), TCR_TO_TSD(tcr));
777#ifdef DARWIN
778  extern Boolean use_mach_exception_handling;
779  if (use_mach_exception_handling) {
780    darwin_exception_init(tcr);
781  }
782#endif
783#ifdef LINUX
784  linux_exception_init(tcr);
785#endif
786  tcr->log2_allocation_quantum = unbox_fixnum(lisp_global(DEFAULT_ALLOCATION_QUANTUM));
787}
788
789/*
790  Register the specified tcr as "belonging to" the current thread.
791  Under Darwin, setup Mach exception handling for the thread.
792  Install cleanup handlers for thread termination.
793*/
794void
795register_thread_tcr(TCR *tcr)
796{
797  void *stack_base = NULL;
798  natural stack_size = 0;
799
800  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
801  thread_init_tcr(tcr, stack_base, stack_size);
802  enqueue_tcr(tcr);
803}
804
805
806 
807 
808#ifndef MAP_GROWSDOWN
809#define MAP_GROWSDOWN 0
810#endif
811
812Ptr
813create_stack(int size)
814{
815  Ptr p;
816  size=align_to_power_of_2(size, log2_page_size);
817  p = (Ptr) mmap(NULL,
818                     (size_t)size,
819                     PROT_READ | PROT_WRITE | PROT_EXEC,
820                     MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
821                     -1,        /* Darwin insists on this when not mmap()ing
822                                 a real fd */
823                     0);
824  if (p != (Ptr)(-1)) {
825    *((size_t *)p) = size;
826    return p;
827  }
828  allocation_failure(true, size);
829
830}
831 
832void *
833allocate_stack(unsigned size)
834{
835  return create_stack(size);
836}
837
838void
839free_stack(void *s)
840{
841  size_t size = *((size_t *)s);
842  munmap(s, size);
843}
844
845Boolean threads_initialized = false;
846
847void
848count_cpus()
849{
850#ifdef DARWIN
851  /* As of OSX 10.4, Darwin doesn't define _SC_NPROCESSORS_ONLN */
852#include <mach/host_info.h>
853
854  struct host_basic_info info;
855  mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
856 
857  if (KERN_SUCCESS == host_info(mach_host_self(), HOST_BASIC_INFO,(host_info_t)(&info),&count)) {
858    if (info.max_cpus > 1) {
859      spin_lock_tries = 1024;
860    }
861  }
862#else
863  int n = sysconf(_SC_NPROCESSORS_ONLN);
864 
865  if (n > 1) {
866    spin_lock_tries = 1024;
867  }
868#endif
869}
870
871void
872init_threads(void * stack_base, TCR *tcr)
873{
874  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
875  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
876  thread_signal_setup();
877  count_cpus();
878  threads_initialized = true;
879}
880
881
882void *
883lisp_thread_entry(void *param)
884{
885  thread_activation *activation = (thread_activation *)param;
886  TCR *tcr = new_tcr(activation->vsize, activation->vsize);
887  sigset_t mask, old_mask;
888
889  sigemptyset(&mask);
890  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
891
892  register_thread_tcr(tcr);
893
894  pthread_cleanup_push(tcr_cleanup,(void *)tcr);
895  tcr->vs_area->active -= node_size;
896  *(--tcr->save_vsp) = lisp_nil;
897  enable_fp_exceptions();
898  SET_TCR_FLAG(tcr,TCR_FLAG_BIT_AWAITING_PRESET);
899  activation->tcr = tcr;
900  SEM_RAISE(activation->created);
901  do {
902    SEM_RAISE(tcr->reset_completion);
903    SEM_WAIT_FOREVER(tcr->activate);
904    /* Now go run some lisp code */
905    start_lisp(TCR_TO_TSD(tcr),0);
906  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
907  pthread_cleanup_pop(true);
908
909}
910
911
912void *
913xNewThread(natural control_stack_size,
914           natural value_stack_size,
915           natural temp_stack_size)
916
917{
918  thread_activation activation;
919  TCR *current = get_tcr(false);
920
921
922  activation.tsize = temp_stack_size;
923  activation.vsize = value_stack_size;
924  activation.tcr = 0;
925  activation.created = new_semaphore(0);
926  if (create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
927                           NULL, 
928                           lisp_thread_entry,
929                           (void *) &activation)) {
930   
931    SEM_WAIT_FOREVER(activation.created);       /* Wait until thread's entered its initial function */
932  }
933  destroy_semaphore(&activation.created); 
934  return TCR_TO_TSD(activation.tcr);
935}
936
937Boolean
938active_tcr_p(TCR *q)
939{
940  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
941 
942  do {
943    if (p == q) {
944      return true;
945    }
946    p = p->next;
947  } while (p != head);
948  return false;
949}
950
951
952OSErr
953xDisposeThread(TCR *tcr)
954{
955  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
956    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
957      pthread_cancel((pthread_t)(tcr->osid));
958      return 0;
959    }
960  }
961  return -50;
962}
963
964OSErr
965xYieldToThread(TCR *target)
966{
967  Bug(NULL, "xYieldToThread ?");
968  return 0;
969}
970 
971OSErr
972xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
973{
974  Bug(NULL, "xThreadCurrentStackSpace ?");
975  return 0;
976}
977
978
979
980LispObj
981create_system_thread(size_t stack_size,
982                     void* stackaddr,
983                     void* (*start_routine)(void *),
984                     void* param)
985{
986  pthread_attr_t attr;
987  pthread_t returned_thread = (pthread_t) 0;
988
989  pthread_attr_init(&attr);
990  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
991
992  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
993    stack_size = PTHREAD_STACK_MIN;
994  }
995
996  stack_size = ensure_stack_limit(stack_size);
997  if (stackaddr != NULL) {
998    /* Size must have been specified.  Sort of makes sense ... */
999#ifdef DARWIN
1000    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
1001#else
1002    pthread_attr_setstack(&attr, stackaddr, stack_size);
1003#endif
1004  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
1005    pthread_attr_setstacksize(&attr,stack_size);
1006  }
1007
1008  /*
1009     I think that's just about enough ... create the thread.
1010  */
1011  pthread_create(&returned_thread, &attr, start_routine, param);
1012  return (LispObj) ptr_to_lispobj(returned_thread);
1013}
1014
1015TCR *
1016get_tcr(Boolean create)
1017{
1018#ifdef HAVE_TLS
1019  TCR *current = current_tcr.linear;
1020#else
1021  void *tsd = (void *)tsd_get(lisp_global(TCR_KEY));
1022  TCR *current = (tsd == NULL) ? NULL : TCR_FROM_TSD(tsd);
1023#endif
1024
1025  if ((current == NULL) && create) {
1026    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
1027      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
1028    int i, nbindwords = 0;
1029    extern unsigned initial_stack_size;
1030   
1031    /* Make one. */
1032    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
1033    SET_TCR_FLAG(current,TCR_FLAG_BIT_FOREIGN);
1034    register_thread_tcr(current);
1035#ifdef DEBUG_TCR_CREATION
1036    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
1037#endif
1038    current->vs_area->active -= node_size;
1039    *(--current->save_vsp) = lisp_nil;
1040#ifdef PPC
1041#define NSAVEREGS 8
1042#endif
1043#ifdef X8664
1044#define NSAVEREGS 4
1045#endif
1046    for (i = 0; i < NSAVEREGS; i++) {
1047      *(--current->save_vsp) = 0;
1048      current->vs_area->active -= node_size;
1049    }
1050    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
1051    for (i = 0; i < nbindwords; i++) {
1052      *(--current->save_vsp) = 0;
1053      current->vs_area->active -= node_size;
1054    }
1055    current->shutdown_count = 1;
1056    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
1057
1058  }
1059 
1060  return current;
1061}
1062
1063
1064Boolean
1065suspend_tcr(TCR *tcr)
1066{
1067  int suspend_count = atomic_incf(&(tcr->suspend_count));
1068  if (suspend_count == 1) {
1069#if SUSPEND_RESUME_VERBOSE
1070    fprintf(stderr,"Suspending 0x%x\n", tcr);
1071#endif
1072#ifdef DARWIN_nope
1073    if (mach_suspend_tcr(tcr)) {
1074      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_ALT_SUSPEND);
1075      return true;
1076    }
1077#endif
1078    if (pthread_kill((pthread_t)(tcr->osid), thread_suspend_signal) == 0) {
1079      SET_TCR_FLAG(tcr,TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1080    } else {
1081      /* A problem using pthread_kill.  On Darwin, this can happen
1082         if the thread has had its signal mask surgically removed
1083         by pthread_exit.  If the native (Mach) thread can be suspended,
1084         do that and return true; otherwise, flag the tcr as belonging
1085         to a dead thread by setting tcr->osid to 0.
1086      */
1087      tcr->osid = 0;
1088      return false;
1089    }
1090    return true;
1091  }
1092  return false;
1093}
1094
1095Boolean
1096tcr_suspend_ack(TCR *tcr)
1097{
1098  if (tcr->flags & (1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING)) {
1099    SEM_WAIT_FOREVER(tcr->suspend);
1100    tcr->flags &= ~(1<<TCR_FLAG_BIT_SUSPEND_ACK_PENDING);
1101#if SUSPEND_RESUME_VERBOSE
1102    fprintf(stderr,"Suspend ack from 0x%x\n", tcr);
1103#endif
1104
1105  }
1106  return true;
1107}
1108
1109     
1110
1111
1112Boolean
1113lisp_suspend_tcr(TCR *tcr)
1114{
1115  Boolean suspended;
1116  TCR *current = get_tcr(true);
1117 
1118  LOCK(lisp_global(TCR_AREA_LOCK),current);
1119#ifdef DARWIN
1120#if USE_MACH_EXCEPTION_LOCK
1121  if (use_mach_exception_handling) {
1122    pthread_mutex_lock(mach_exception_lock);
1123  }
1124#endif
1125#endif
1126  suspended = suspend_tcr(tcr);
1127  if (suspended) {
1128    while (!tcr_suspend_ack(tcr));
1129  }
1130#ifdef DARWIN
1131#if USE_MACH_EXCEPTION_LOCK
1132  if (use_mach_exception_handling) {
1133    pthread_mutex_unlock(mach_exception_lock);
1134  }
1135#endif
1136#endif
1137  UNLOCK(lisp_global(TCR_AREA_LOCK),current);
1138  return suspended;
1139}
1140         
1141
1142Boolean
1143resume_tcr(TCR *tcr)
1144{
1145  int suspend_count = atomic_decf(&(tcr->suspend_count)), err;
1146  if (suspend_count == 0) {
1147#ifdef DARWIN
1148    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1149#if SUSPEND_RESUME_VERBOSE
1150    fprintf(stderr,"Mach resume to 0x%x\n", tcr);
1151#endif
1152      mach_resume_tcr(tcr);
1153      return true;
1154    }
1155#endif
1156#if RESUME_VIA_RESUME_SEMAPHORE
1157    SEM_RAISE(tcr->resume);
1158#else
1159    if ((err = (pthread_kill((pthread_t)(tcr->osid), thread_resume_signal))) != 0) {
1160      Bug(NULL, "pthread_kill returned %d on thread #x%x", err, tcr->osid);
1161    }
1162#endif
1163#if SUSPEND_RESUME_VERBOSE
1164    fprintf(stderr, "Sent resume to 0x%x\n", tcr);
1165#endif
1166    return true;
1167  }
1168  return false;
1169}
1170
1171void
1172wait_for_resumption(TCR *tcr)
1173{
1174  if (tcr->suspend_count == 0) {
1175#ifdef DARWIN
1176    if (tcr->flags & (1<<TCR_FLAG_BIT_ALT_SUSPEND)) {
1177      tcr->flags &= ~(1<<TCR_FLAG_BIT_ALT_SUSPEND);
1178      return;
1179  }
1180#endif
1181#if WAIT_FOR_RESUME_ACK
1182#if SUSPEND_RESUME_VERBOSE
1183    fprintf(stderr, "waiting for resume in 0x%x\n",tcr);
1184#endif
1185    SEM_WAIT_FOREVER(tcr->suspend);
1186#endif
1187  }
1188}
1189   
1190
1191
1192Boolean
1193lisp_resume_tcr(TCR *tcr)
1194{
1195  Boolean resumed;
1196  TCR *current = get_tcr(true);
1197 
1198  LOCK(lisp_global(TCR_AREA_LOCK),current);
1199  resumed = resume_tcr(tcr);
1200  wait_for_resumption(tcr);
1201  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1202  return resumed;
1203}
1204
1205
1206TCR *freed_tcrs = NULL;
1207
1208void
1209enqueue_freed_tcr (TCR *tcr)
1210{
1211#ifndef HAVE_TLS
1212  tcr->next = freed_tcrs;
1213  freed_tcrs = tcr;
1214#endif
1215}
1216
1217/* It's not clear that we can safely condemn a dead tcr's areas, since
1218   we may not be able to call free() if a suspended thread owns a
1219   malloc lock. At least make the areas appear to be empty.
1220*/
1221   
1222
1223void
1224normalize_dead_tcr_areas(TCR *tcr)
1225{
1226  area *a;
1227
1228  a = tcr->vs_area;
1229  if (a) {
1230    a->active = a->high;
1231  }
1232
1233  a = tcr->ts_area;
1234  if (a) {
1235    a->active = a->high;
1236  }
1237
1238  a = tcr->cs_area;
1239  if (a) {
1240    a->active = a->high;
1241  }
1242}
1243   
1244void
1245free_freed_tcrs ()
1246{
1247  TCR *current, *next;
1248
1249  for (current = freed_tcrs; current; current = next) {
1250    next = current->next;
1251#ifndef HAVE_TLS
1252    free(current);
1253#endif
1254  }
1255  freed_tcrs = NULL;
1256}
1257
1258void
1259suspend_other_threads(Boolean for_gc)
1260{
1261  TCR *current = get_tcr(true), *other, *next;
1262  int dead_tcr_count = 0;
1263  Boolean all_acked;
1264
1265  LOCK(lisp_global(TCR_AREA_LOCK), current);
1266#ifdef DARWIN
1267#if USE_MACH_EXCEPTION_LOCK
1268  if (for_gc && use_mach_exception_handling) {
1269#if SUSPEND_RESUME_VERBOSE
1270    fprintf(stderr, "obtaining Mach exception lock in GC thread 0x%x\n", current);
1271#endif
1272    pthread_mutex_lock(mach_exception_lock);
1273  }
1274#endif
1275#endif
1276  for (other = current->next; other != current; other = other->next) {
1277    if ((other->osid != 0)) {
1278      suspend_tcr(other);
1279      if (other->osid == 0) {
1280        dead_tcr_count++;
1281      }
1282    } else {
1283      dead_tcr_count++;
1284    }
1285  }
1286
1287  do {
1288    all_acked = true;
1289    for (other = current->next; other != current; other = other->next) {
1290      if ((other->osid != 0)) {
1291        if (!tcr_suspend_ack(other)) {
1292          all_acked = false;
1293        }
1294      }
1295    }
1296  } while(! all_acked);
1297
1298     
1299
1300  /* All other threads are suspended; can safely delete dead tcrs now */
1301  if (dead_tcr_count) {
1302    for (other = current->next; other != current; other = next) {
1303      next = other->next;
1304      if ((other->osid == 0))  {
1305        normalize_dead_tcr_areas(other);
1306        dequeue_tcr(other);
1307        enqueue_freed_tcr(other);
1308      }
1309    }
1310  }
1311}
1312
1313void
1314lisp_suspend_other_threads()
1315{
1316  suspend_other_threads(false);
1317}
1318
1319void
1320resume_other_threads(Boolean for_gc)
1321{
1322  TCR *current = get_tcr(true), *other;
1323  for (other = current->next; other != current; other = other->next) {
1324    if ((other->osid != 0)) {
1325      resume_tcr(other);
1326    }
1327  }
1328  for (other = current->next; other != current; other = other->next) {
1329    if ((other->osid != 0)) {
1330      wait_for_resumption(other);
1331    }
1332  }
1333  free_freed_tcrs();
1334#ifdef DARWIN
1335#if USE_MACH_EXCEPTION_LOCK
1336  if (for_gc && use_mach_exception_handling) {
1337#if SUSPEND_RESUME_VERBOSE
1338    fprintf(stderr, "releasing Mach exception lock in GC thread 0x%x\n", current);
1339#endif
1340    pthread_mutex_unlock(mach_exception_lock);
1341  }
1342#endif
1343#endif
1344
1345  UNLOCK(lisp_global(TCR_AREA_LOCK), current);
1346}
1347
1348void
1349lisp_resume_other_threads()
1350{
1351  resume_other_threads(false);
1352}
1353
1354
1355
1356rwlock *
1357rwlock_new()
1358{
1359  extern int cache_block_size;
1360
1361  void *p = calloc(1,sizeof(rwlock)+cache_block_size-1);
1362  rwlock *rw;
1363 
1364  if (p) {
1365    rw = (rwlock *) ((((natural)p)+cache_block_size-1) & (~(cache_block_size-1)));
1366    rw->malloced_ptr = p;
1367    rw->reader_signal = new_semaphore(0);
1368    rw->writer_signal = new_semaphore(0);
1369    if ((rw->reader_signal == NULL) || (rw->writer_signal == NULL)) {
1370      if (rw->reader_signal) {
1371        destroy_semaphore(&(rw->reader_signal));
1372      } else {
1373        destroy_semaphore(&(rw->writer_signal));
1374      }
1375      free(rw);
1376      rw = NULL;
1377    }
1378  }
1379  return rw;
1380}
1381
1382     
1383/*
1384  Try to get read access to a multiple-readers/single-writer lock.  If
1385  we already have read access, return success (indicating that the
1386  lock is held another time.  If we already have write access to the
1387  lock ... that won't work; return EDEADLK.  Wait until no other
1388  thread has or is waiting for write access, then indicate that we
1389  hold read access once.
1390*/
1391int
1392rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1393{
1394  int err = 0;
1395 
1396  LOCK_SPINLOCK(rw->spin, tcr);
1397
1398  if (rw->writer == tcr) {
1399    RELEASE_SPINLOCK(rw->spin);
1400    return EDEADLK;
1401  }
1402
1403  while (rw->blocked_writers || (rw->state > 0)) {
1404    rw->blocked_readers++;
1405    RELEASE_SPINLOCK(rw->spin);
1406    err = semaphore_maybe_timedwait(rw->reader_signal,waitfor);
1407    LOCK_SPINLOCK(rw->spin,tcr);
1408    rw->blocked_readers--;
1409    if (err == EINTR) {
1410      err = 0;
1411    }
1412    if (err) {
1413      RELEASE_SPINLOCK(rw->spin);
1414      return err;
1415    }
1416  }
1417  rw->state--;
1418  RELEASE_SPINLOCK(rw->spin);
1419  return err;
1420}
1421
1422
1423
1424/*
1425  Try to obtain write access to the lock.
1426  It is an error if we already have read access, but it's hard to
1427  detect that.
1428  If we already have write access, increment the count that indicates
1429  that.
1430  Otherwise, wait until the lock is not held for reading or writing,
1431  then assert write access.
1432*/
1433
1434int
1435rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1436{
1437  int err = 0;
1438
1439  LOCK_SPINLOCK(rw->spin,tcr);
1440  if (rw->writer == tcr) {
1441    rw->state++;
1442    RELEASE_SPINLOCK(rw->spin);
1443    return 0;
1444  }
1445
1446  while (rw->state != 0) {
1447    rw->blocked_writers++;
1448    RELEASE_SPINLOCK(rw->spin);
1449    err = semaphore_maybe_timedwait(rw->writer_signal, waitfor);
1450    LOCK_SPINLOCK(rw->spin,tcr);
1451    rw->blocked_writers--;
1452    if (err = EINTR) {
1453      err = 0;
1454    }
1455    if (err) {
1456      RELEASE_SPINLOCK(rw->spin);
1457      return err;
1458    }
1459  }
1460  rw->state = 1;
1461  rw->writer = tcr;
1462  RELEASE_SPINLOCK(rw->spin);
1463  return err;
1464}
1465
1466/*
1467  Sort of the same as above, only return EBUSY if we'd have to wait.
1468*/
1469int
1470rwlock_try_wlock(rwlock *rw, TCR *tcr)
1471{
1472  int ret = EBUSY;
1473
1474  LOCK_SPINLOCK(rw->spin,tcr);
1475  if (rw->writer == tcr) {
1476    rw->state++;
1477    ret = 0;
1478  } else {
1479    if (rw->state == 0) {
1480      rw->writer = tcr;
1481      rw->state = 1;
1482      ret = 0;
1483    }
1484  }
1485  RELEASE_SPINLOCK(rw->spin);
1486  return ret;
1487}
1488
1489int
1490rwlock_try_rlock(rwlock *rw, TCR *tcr)
1491{
1492  int ret = EBUSY;
1493
1494  LOCK_SPINLOCK(rw->spin,tcr);
1495  if (rw->state <= 0) {
1496    --rw->state;
1497    ret = 0;
1498  }
1499  RELEASE_SPINLOCK(rw->spin);
1500  return ret;
1501}
1502
1503
1504
1505int
1506rwlock_unlock(rwlock *rw, TCR *tcr)
1507{
1508
1509  int err = 0;
1510  natural blocked_readers = 0;
1511
1512  LOCK_SPINLOCK(rw->spin,tcr);
1513  if (rw->state > 0) {
1514    if (rw->writer != tcr) {
1515      err = EINVAL;
1516    } else {
1517      --rw->state;
1518    }
1519  } else {
1520    if (rw->state < 0) {
1521      ++rw->state;
1522    } else {
1523      err = EINVAL;
1524    }
1525  }
1526  if (err) {
1527    RELEASE_SPINLOCK(rw->spin);
1528    return err;
1529  }
1530 
1531  if (rw->state == 0) {
1532    if (rw->blocked_writers) {
1533      SEM_RAISE(rw->writer_signal);
1534    } else {
1535      blocked_readers = rw->blocked_readers;
1536      if (blocked_readers) {
1537        SEM_BROADCAST(rw->reader_signal, blocked_readers);
1538      }
1539    }
1540  }
1541  RELEASE_SPINLOCK(rw->spin);
1542  return 0;
1543}
1544
1545       
1546void
1547rwlock_destroy(rwlock *rw)
1548{
1549  destroy_semaphore((void **)&rw->reader_signal);
1550  destroy_semaphore((void **)&rw->writer_signal);
1551  postGCfree((void *)(rw->malloced_ptr));
1552}
1553
1554
1555
Note: See TracBrowser for help on using the repository browser.