source: trunk/ccl/lisp-kernel/thread_manager.c @ 826

Last change on this file since 826 was 826, checked in by gb, 17 years ago

New post-gc freeing mechanism (suspened thread may own malloc lock.)
Similar mechanism for TCR cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 29.4 KB
Line 
1/*
2   Copyright (C) 1994-2001 Digitool, Inc
3   This file is part of OpenMCL. 
4
5   OpenMCL is licensed under the terms of the Lisp Lesser GNU Public
6   License , known as the LLGPL and distributed with OpenMCL as the
7   file "LICENSE".  The LLGPL consists of a preamble and the LGPL,
8   which is distributed with OpenMCL as the file "LGPL".  Where these
9   conflict, the preamble takes precedence. 
10
11   OpenMCL is referenced in the preamble as the "LIBRARY."
12
13   The LLGPL is also available online at
14   http://opensource.franz.com/preamble.html
15*/
16
17
18#include "Threads.h"
19
20typedef struct {
21  TCR *tcr;
22  void *created;
23} thread_activation;
24
25
26extern natural
27store_conditional(natural*, natural, natural);
28
29extern signed_natural
30atomic_swap(signed_natural*, signed_natural);
31
32signed_natural
33atomic_incf_by(signed_natural *ptr, signed_natural by)
34{
35  signed_natural old, new;
36  do {
37    old = *ptr;
38    new = old+by;
39  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
40           (natural) old);
41  return new;
42}
43
44signed_natural
45atomic_incf(signed_natural *ptr)
46{
47  return atomic_incf_by(ptr, 1);
48}
49
50signed_natural
51atomic_decf(signed_natural *ptr)
52{
53  signed_natural old, new;
54  do {
55    old = *ptr;
56    new = old == 0 ? old : old-1;
57  } while (store_conditional((natural *)ptr, (natural) old, (natural) new) !=
58           (natural) old);
59  return old-1;
60}
61
62
63int
64lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr, struct timespec *waitfor)
65{
66
67  if (tcr == NULL) {
68    tcr = get_tcr(true);
69  }
70  if (m->owner == tcr) {
71    m->count++;
72    return 0;
73  }
74  while (1) {
75    if (atomic_incf(&m->avail) == 1) {
76      m->owner = tcr;
77      m->count = 1;
78      break;
79    }
80    SEM_WAIT(m->signal);
81  }
82  return 0;
83}
84 
85int
86unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr)
87{
88  int ret = EPERM, pending;
89
90  if (tcr == NULL) {
91    tcr = get_tcr(true);
92  }
93
94  if (m->owner == tcr) {
95    --m->count;
96    if (m->count == 0) {
97      m->owner = NULL;
98      pending = atomic_swap(&m->avail, 0) - 1;
99      atomic_incf_by(&m->waiting, pending);
100      /* We're counting on atomic_decf not actually decrementing
101         the location below 0, but returning a negative result
102         in that case.
103      */
104      if (atomic_decf(&m->waiting) >= 0) {
105        SEM_RAISE(m->signal);
106      }
107      ret = 0;
108    }
109  }
110  return ret;
111}
112
113void
114destroy_recursive_lock(RECURSIVE_LOCK m)
115{
116  destroy_semaphore((void **)&m->signal);
117  postGCfree((void *)(m->malloced_ptr));
118}
119
120/*
121  If we're already the owner (or if the lock is free), lock it
122  and increment the lock count; otherwise, return EBUSY without
123  waiting.
124*/
125
126int
127recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free)
128{
129  TCR *owner = m->owner;
130
131  if (owner == tcr) {
132    m->count++;
133    if (was_free) {
134      *was_free = 0;
135      return 0;
136    }
137  }
138  if (store_conditional((natural*)&(m->avail), 0, 1) == 0) {
139    m->owner = tcr;
140    m->count = 1;
141    if (was_free) {
142      *was_free = 1;
143    }
144    return 0;
145  }
146
147  return EBUSY;
148}
149
150int
151wait_on_semaphore(SEMAPHORE s, int seconds, int nanos)
152{
153#ifdef LINUX
154  struct timespec q;
155  gettimeofday((struct timeval *)&q, NULL);
156  q.tv_nsec *= 1000;
157
158  q.tv_nsec += nanos;
159  if (q.tv_nsec >= 1000000000) {
160    q.tv_nsec -= 1000000000;
161    seconds += 1;
162  }
163  q.tv_sec += seconds;
164  return SEM_TIMEDWAIT(s, &q);
165#endif
166#ifdef DARWIN
167  mach_timespec_t q = {seconds, nanos};
168  do {
169    clock_t start = clock();
170
171    int status = SEM_TIMEDWAIT(s, q);
172    clock_t finish = clock();
173
174    if (status == KERN_ABORTED) {
175      clock_t elapsed = (finish - start);
176
177      int elapsed_seconds = elapsed/CLOCKS_PER_SEC;
178      int elapsed_nanos = (elapsed - (elapsed_seconds * CLOCKS_PER_SEC)) * 1000000000/CLOCKS_PER_SEC;
179
180      seconds = seconds - elapsed_seconds - (elapsed_nanos/1000000000);
181      if (nanos  > 0) {
182        nanos = 1000000000 - elapsed_nanos;
183      }
184
185      if ((seconds <= 0) && (nanos <= 0)) {
186        return 0;
187      }
188
189      q.tv_sec = seconds;
190      q.tv_nsec = nanos;
191    } else {
192      return status;
193    }
194  } while (1==1);
195  // Possible limit on number of retries?
196
197#endif
198}
199
200
201void
202signal_semaphore(SEMAPHORE s)
203{
204  SEM_RAISE(s);
205}
206
207 
208LispObj
209current_thread_osid()
210{
211  return (LispObj)ptr_to_lispobj(pthread_self());
212}
213
214#ifdef SIGRTMIN
215#define SIG_SUSPEND_THREAD (SIGRTMIN+6)
216#define SIG_RESUME_THREAD (SIG_SUSPEND_THREAD+1)
217#else
218#define SIG_SUSPEND_THREAD SIGUSR1
219#define SIG_RESUME_THREAD SIGUSR2
220#endif
221
222
223int thread_suspend_signal, thread_resume_signal;
224
225
226
227void
228linux_exception_init(TCR *tcr)
229{
230}
231
232
233TCR *
234get_interrupt_tcr()
235{
236#ifndef LINUX
237  return get_tcr(true);
238#else
239  void* callers_r2 = current_r2;
240
241  if (callers_r2 == NULL) {     /* pre-glibc-2.3.2 Linux */
242    return get_tcr(true);
243  } else {
244    TCR  *head = (TCR *)lisp_global(INITIAL_TCR), *current = head;
245
246    /* We can fairly safely assume that r2 contains either the current
247       tcr or the current (linux) pthread structure, but we don't know
248       which.  We can't lock anything or call any pthreads function until
249       we're sure that r2 contains the current pthread pointer.
250
251       We can identify r2 as a TCR if we find it in the global tcr list.
252       Entries are only ever removed from the list when all threads other
253       than the GC thread are suspended; additions keep the forward
254       link (through tcr->next) consistent, so this traversal is safe.
255   
256  */
257    do {
258      if (current == callers_r2) {
259        /* r2 contained the tcr.  Set r2 to the native_thread */
260        current_r2 = current->native_thread_info;
261        return current;
262      }
263      current = current->next;
264    } while (current != head);
265    /* r2 is non-null and not any tcr.  Assume that r2 is pthread
266       struct pointer and that it's therefore safe to call get_tcr().
267    */
268    return get_tcr(true);
269  }
270#endif
271}
272 
273 
274void
275suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context)
276{
277  TCR *tcr = get_interrupt_tcr(true);
278
279  if (signo == thread_suspend_signal) {
280    sigset_t wait_for;
281
282    tcr->suspend_context = context;
283    tcr->suspend_total++;
284    sigfillset(&wait_for);
285    SEM_RAISE(tcr->suspend);
286    sigdelset(&wait_for, thread_resume_signal);
287    do {
288      sigsuspend(&wait_for);
289    } while (tcr->suspend_context);
290 
291  } else {
292    tcr->suspend_context = NULL;
293  }
294#ifdef DARWIN
295  DarwinSigReturn(context);
296#endif
297}
298
299void
300thread_signal_setup()
301{
302  struct sigaction action;
303  sigset_t mask, old_mask;
304 
305  sigemptyset(&mask);
306  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
307
308  thread_suspend_signal = SIG_SUSPEND_THREAD;
309  thread_resume_signal = SIG_RESUME_THREAD;
310  sigfillset(&action.sa_mask);
311  sigdelset(&action.sa_mask,thread_suspend_signal);
312  action.sa_flags = SA_RESTART | SA_SIGINFO;
313  action.sa_sigaction = (void *) suspend_resume_handler;
314  sigaction(thread_suspend_signal, &action, NULL);
315  sigaction(thread_resume_signal, &action, NULL);
316}
317 
318
319/*
320  'base' should be set to the bottom (origin) of the stack, e.g., the
321  end from which it grows.
322*/
323 
324void
325os_get_stack_bounds(LispObj q,void **base, unsigned *size)
326{
327  pthread_t p = (pthread_t)ptr_from_lispobj(q);
328#ifdef DARWIN
329  *base = pthread_get_stackaddr_np(p);
330  *size = pthread_get_stacksize_np(p);
331#endif
332#ifdef LINUX
333  pthread_attr_t attr;
334 
335  pthread_getattr_np(p,&attr);
336  pthread_attr_getstack(&attr, base, size);
337  *(unsigned *)base += *size;
338#endif
339}
340
341void *
342new_semaphore(int count)
343{
344#ifdef LINUX
345  sem_t *s = malloc(sizeof(sem_t));
346  sem_init(s, 0, count);
347  return s;
348#endif
349#ifdef DARWIN
350  semaphore_t s = (semaphore_t)0;
351  semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count);
352  return (void *)s;
353#endif
354}
355
356RECURSIVE_LOCK
357new_recursive_lock()
358{
359  extern int cache_block_size;
360  void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1);
361  RECURSIVE_LOCK m = NULL;
362  void *signal = new_semaphore(0);
363
364  if (p) {
365    m = (RECURSIVE_LOCK) ((((unsigned)p)+cache_block_size-1) & (~(cache_block_size-1)));
366    m->malloced_ptr = p;
367  }
368
369  if (m && signal) {
370    m->signal = signal;
371    return m;
372  }
373  if (m) {
374    free(p);
375  }
376  if (signal) {
377    destroy_semaphore(&signal);
378  }
379  return NULL;
380}
381
382void
383destroy_semaphore(void **s)
384{
385  if (*s) {
386#ifdef LINUX
387    sem_destroy((sem_t *)*s);
388#endif
389#ifdef DARWIN
390    semaphore_destroy(mach_task_self(),((semaphore_t) *s));
391#endif
392    *s=NULL;
393  }
394}
395
396void
397tsd_set(LispObj key, void *datum)
398{
399  pthread_setspecific((pthread_key_t)key, datum);
400}
401
402void *
403tsd_get(LispObj key)
404{
405  return pthread_getspecific((pthread_key_t)key);
406}
407
408void
409dequeue_tcr(TCR *tcr)
410{
411  TCR *next, *prev;
412
413  next = tcr->next;
414  prev = tcr->prev;
415
416  prev->next = next;
417  next->prev = prev;
418  tcr->prev = tcr->next = NULL;
419}
420 
421void
422enqueue_tcr(TCR *new)
423{
424  TCR *head, *tail;
425 
426  LOCK(lisp_global(TCR_LOCK),new);
427  head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR));
428  tail = head->prev;
429  tail->next = new;
430  head->prev = new;
431  new->prev = tail;
432  new->next = head;
433  UNLOCK(lisp_global(TCR_LOCK),new);
434}
435
436
437/*
438  Caller must hold the area_lock.
439*/
440TCR *
441new_tcr(unsigned vstack_size, unsigned tstack_size)
442{
443  extern area
444    *allocate_vstack_holding_area_lock(unsigned),
445    *allocate_tstack_holding_area_lock(unsigned);
446  area *a;
447  TCR *tcr = calloc(1, sizeof(TCR));
448  int i;
449
450  lisp_global(TCR_COUNT) += (1<<fixnumshift);
451  tcr->suspend = new_semaphore(0);
452  tcr->resume = new_semaphore(0);
453  tcr->reset_completion = new_semaphore(0);
454  tcr->activate = new_semaphore(0);
455  a = allocate_vstack_holding_area_lock(vstack_size);
456  tcr->vs_area = a;
457  tcr->save_vsp = (LispObj *) a->active; 
458  a = allocate_tstack_holding_area_lock(tstack_size);
459  tcr->ts_area = a;
460  tcr->save_tsp = (LispObj *) a->active;
461  tcr->valence = TCR_STATE_FOREIGN;
462  tcr->interrupt_level = (-1<<fixnum_shift);
463  tcr->lisp_fpscr.words.l = 0xd0;
464  tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR;
465  tcr->tlb_limit = 8192;
466  tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit);
467  for (i = 0; i < (8192/sizeof(LispObj)); i++) {
468    tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker;
469  }
470  tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS;
471  return tcr;
472}
473
474void
475shutdown_thread_tcr(void *arg)
476{
477  TCR *tcr = (void *)arg;
478
479  area *vs, *ts, *cs;
480
481 
482  if (--(tcr->shutdown_count) == 0) {
483    if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) {
484      LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
485        callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
486   
487      tsd_set(lisp_global(TCR_KEY), tcr);
488      ((void (*)())ptr_from_lispobj(callback_ptr))(1);
489      tsd_set(lisp_global(TCR_KEY), NULL);
490    }
491#ifdef DARWIN
492    darwin_exception_cleanup(tcr);
493#endif
494    LOCK(lisp_global(AREA_LOCK),tcr);
495    vs = tcr->vs_area;
496    tcr->vs_area = NULL;
497    ts = tcr->ts_area;
498    tcr->ts_area = NULL;
499    cs = tcr->cs_area;
500    tcr->cs_area = NULL;
501    if (vs) {
502      condemn_area_holding_area_lock(vs);
503    }
504    if (ts) {
505      condemn_area_holding_area_lock(ts);
506    }
507    if (cs) {
508      condemn_area_holding_area_lock(cs);
509    }
510    destroy_semaphore(&tcr->suspend);
511    destroy_semaphore(&tcr->resume);
512    destroy_semaphore(&tcr->reset_completion);
513    destroy_semaphore(&tcr->activate);
514    free(tcr->tlb_pointer);
515    tcr->tlb_pointer = NULL;
516    tcr->tlb_limit = 0;
517    tcr->osid = 0;
518    UNLOCK(lisp_global(AREA_LOCK),tcr);
519  } else {
520    tsd_set(lisp_global(TCR_KEY), tcr);
521  }
522}
523
524void *
525current_native_thread_id()
526{
527  return ((void *)
528#ifdef LINUX
529          getpid()
530#endif
531#ifdef DARWIN
532          mach_thread_self()
533#endif
534          );
535}
536
537void
538thread_init_tcr(TCR *tcr, void *stack_base, unsigned stack_size)
539{
540  area *a, *register_cstack_holding_area_lock(BytePtr, unsigned);
541
542  tcr->osid = current_thread_osid();
543  tcr->native_thread_id = current_native_thread_id();
544  LOCK(lisp_global(AREA_LOCK),tcr);
545  a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size);
546  UNLOCK(lisp_global(AREA_LOCK),tcr);
547  tcr->cs_area = a;
548  if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) {
549    tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit);
550  }
551#ifdef LINUX
552  tcr->native_thread_info = current_r2;
553#endif
554  tcr->errno_loc = &errno;
555  tsd_set(lisp_global(TCR_KEY), tcr);
556#ifdef DARWIN
557  darwin_exception_init(tcr);
558#endif
559#ifdef LINUX
560  linux_exception_init(tcr);
561#endif
562}
563
564/*
565  Register the specified tcr as "belonging to" the current thread.
566  Under Darwin, setup Mach exception handling for the thread.
567  Install cleanup handlers for thread termination.
568*/
569void
570register_thread_tcr(TCR *tcr)
571{
572  void *stack_base;
573  unsigned stack_size;
574
575  os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size);
576  thread_init_tcr(tcr, stack_base, stack_size);
577  enqueue_tcr(tcr);
578}
579
580
581 
582 
583#ifndef MAP_GROWSDOWN
584#define MAP_GROWSDOWN 0
585#endif
586
587Ptr
588create_stack(int size)
589{
590  Ptr p;
591  size=align_to_power_of_2(size, 12);
592  p = (Ptr) mmap(NULL,
593                     (size_t)size,
594                     PROT_READ | PROT_WRITE | PROT_EXEC,
595                     MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN,
596                     -1,        /* Darwin insists on this when not mmap()ing
597                                 a real fd */
598                     0);
599  if (p != (Ptr)(-1)) {
600    *((size_t *)p) = size;
601    return p;
602  }
603  allocation_failure(true, size);
604
605}
606 
607void *
608allocate_stack(unsigned size)
609{
610  return create_stack(size);
611}
612
613void
614free_stack(void *s)
615{
616  size_t size = *((size_t *)s);
617  munmap(s, size);
618}
619
620Boolean threads_initialized = false;
621
622void
623init_threads(void * stack_base, TCR *tcr)
624{
625  lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr);
626  pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr);
627  thread_signal_setup();
628  threads_initialized = true;
629}
630
631
632void *
633lisp_thread_entry(void *param)
634{
635  thread_activation *activation = (thread_activation *)param;
636  TCR *tcr = activation->tcr;
637  sigset_t mask, old_mask;
638
639  sigemptyset(&mask);
640  pthread_sigmask(SIG_SETMASK, &mask, &old_mask);
641
642  register_thread_tcr(tcr);
643  tcr->vs_area->active -= 4;
644  *(--tcr->save_vsp) = lisp_nil;
645  enable_fp_exceptions();
646  tcr->flags |= (1<<TCR_FLAG_BIT_AWAITING_PRESET);
647  SEM_RAISE(activation->created);
648  do {
649    SEM_RAISE(tcr->reset_completion);
650    SEM_WAIT(tcr->activate);
651    /* Now go run some lisp code */
652    start_lisp(tcr,0);
653  } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET));
654}
655
656TCR *
657xNewThread(unsigned control_stack_size,
658           unsigned value_stack_size,
659           unsigned temp_stack_size)
660
661{
662  thread_activation activation;
663  TCR *current = get_tcr(false);
664
665  LOCK(lisp_global(AREA_LOCK),current);
666  activation.tcr = new_tcr(value_stack_size, temp_stack_size);
667  UNLOCK(lisp_global(AREA_LOCK),current);
668  activation.created = new_semaphore(0);
669  create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), 
670                       NULL, 
671                       lisp_thread_entry,
672                       (void *) &activation);
673
674  SEM_WAIT(activation.created); /* Wait until thread's entered its initial function */
675  destroy_semaphore(&activation.created);
676  return activation.tcr;
677}
678
679Boolean
680active_tcr_p(TCR *q)
681{
682  TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head;
683 
684  do {
685    if (p == q) {
686      return true;
687    }
688    p = p->next;
689  } while (p != head);
690  return false;
691}
692
693
694OSErr
695xDisposeThread(TCR *tcr)
696{
697  if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) {
698    if (active_tcr_p(tcr) && (tcr != get_tcr(false))) {
699      pthread_cancel((pthread_t)ptr_from_lispobj(tcr->osid));
700      return 0;
701    }
702  }
703  return -50;
704}
705
706OSErr
707xYieldToThread(TCR *target)
708{
709  Bug(NULL, "xYieldToThread ?");
710  return 0;
711}
712 
713OSErr
714xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP)
715{
716  Bug(NULL, "xThreadCurrentStackSpace ?");
717  return 0;
718}
719
720
721LispObj
722create_system_thread(size_t stack_size,
723                     void* stackaddr,
724                     void* (*start_routine)(void *),
725                     void* param)
726{
727  pthread_attr_t attr;
728  pthread_t returned_thread = (pthread_t) 0;
729
730  pthread_attr_init(&attr);
731  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 
732
733  if (stack_size == MINIMAL_THREAD_STACK_SIZE) {
734    stack_size = PTHREAD_STACK_MIN;
735  }
736
737  if (stackaddr != NULL) {
738    /* Size must have been specified.  Sort of makes sense ... */
739#ifdef DARWIN
740    Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?");
741#else
742    pthread_attr_setstack(&attr, stackaddr, stack_size);
743#endif
744  } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) {
745    pthread_attr_setstacksize(&attr,stack_size);
746  }
747
748  /*
749     I think that's just about enough ... create the thread.
750  */
751  pthread_create(&returned_thread, &attr, start_routine, param);
752  return (LispObj) ptr_to_lispobj(returned_thread);
753}
754
755TCR *
756get_tcr(Boolean create)
757{
758  TCR *current = (TCR *)tsd_get(lisp_global(TCR_KEY));
759
760  if ((current == NULL) && create) {
761    LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell,
762      callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address;
763    int i, nbindwords = 0;
764    extern unsigned initial_stack_size;
765   
766    /* Make one. */
767    current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE);
768    current->flags |= (1<<TCR_FLAG_BIT_FOREIGN);
769    register_thread_tcr(current);
770#ifdef DEBUG_TCR_CREATION
771    fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self());
772#endif
773    current->vs_area->active -= 4;
774    *(--current->save_vsp) = lisp_nil;
775    nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1);
776    for (i = 0; i < nbindwords; i++) {
777      *(--current->save_vsp) = 0;
778      current->vs_area->active -= 4;
779    }
780    current->shutdown_count = 1;
781    ((void (*)())ptr_from_lispobj(callback_ptr))(0);
782
783  }
784 
785  return current;
786}
787
788
789Boolean
790suspend_tcr(TCR *tcr)
791{
792  int suspend_count = atomic_incf(&(tcr->suspend_count));
793  if (suspend_count == 1) {
794    if (pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), thread_suspend_signal) == 0) {
795      SEM_WAIT(tcr->suspend);
796    } else {
797      /* A problem using pthread_kill.  On Darwin, this can happen
798         if the thread has had its signal mask surgically removed
799         by pthread_exit.  If the native (Mach) thread can be suspended,
800         do that and return true; otherwise, flag the tcr as belonging
801         to a dead thread by setting tcr->osid to 0.
802      */
803#ifdef DARWIN
804      if (mach_suspend_tcr(tcr)) {
805        tcr->flags |= TCR_FLAG_BIT_ALT_SUSPEND;
806        return true;
807      }
808#endif
809      tcr->osid = 0;
810      return false;
811    }
812    return true;
813  }
814  return false;
815}
816
817Boolean
818lisp_suspend_tcr(TCR *tcr)
819{
820  Boolean suspended;
821  TCR *current = get_tcr(true);
822 
823  LOCK(lisp_global(TCR_LOCK),current);
824  suspended = suspend_tcr(tcr);
825  UNLOCK(lisp_global(TCR_LOCK),current);
826  return suspended;
827}
828         
829
830Boolean
831resume_tcr(TCR *tcr)
832{
833  int suspend_count = atomic_decf(&(tcr->suspend_count));
834  if (suspend_count == 0) {
835#ifdef DARWIN
836    if (tcr->flags & TCR_FLAG_BIT_ALT_SUSPEND) {
837      tcr->flags &= ~TCR_FLAG_BIT_ALT_SUSPEND;
838      mach_resume_tcr(tcr);
839      return true;
840    }
841#endif
842    pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), thread_resume_signal);
843    return true;
844  }
845  return false;
846}
847
848Boolean
849lisp_resume_tcr(TCR *tcr)
850{
851  Boolean resumed;
852  TCR *current = get_tcr(true);
853 
854  LOCK(lisp_global(TCR_LOCK),current);
855  resumed = resume_tcr(tcr);
856  UNLOCK(lisp_global(TCR_LOCK), current);
857  return resumed;
858}
859
860#ifdef DARWIN
861lock_set_t mach_exception_lock_set;
862#endif
863
864TCR *freed_tcrs = NULL;
865
866void
867enqueue_freed_tcr (TCR *tcr)
868{
869  tcr->next = freed_tcrs;
870  freed_tcrs = tcr;
871}
872
873void
874free_freed_tcrs ()
875{
876  TCR *current, *next;
877
878  for (current = freed_tcrs; current; current = next) {
879    next = current->next;
880    free(current);
881  }
882  freed_tcrs = NULL;
883}
884
885void
886suspend_other_threads()
887{
888  TCR *current = get_tcr(true), *other, *next;
889  int dead_tcr_count = 0;
890
891  LOCK(lisp_global(TCR_LOCK), current);
892  LOCK(lisp_global(AREA_LOCK), current);
893  for (other = current->next; other != current; other = other->next) {
894    if ((other->osid != 0)) {
895      suspend_tcr(other);
896      if (other->osid == 0) {
897        dead_tcr_count++;
898      }
899    } else {
900      dead_tcr_count++;
901    }
902  }
903  /* All other threads are suspended; can safely delete dead tcrs now */
904  if (dead_tcr_count) {
905    for (other = current->next; other != current; other = next) {
906      next = other->next;
907      if ((other->osid == 0))  {
908        dequeue_tcr(other);
909        enqueue_freed_tcr(other);
910      }
911    }
912  }
913}
914
915void
916resume_other_threads()
917{
918  TCR *current = get_tcr(true), *other;
919  for (other = current->next; other != current; other = other->next) {
920    resume_tcr(other);
921  }
922  free_freed_tcrs();
923  UNLOCK(lisp_global(AREA_LOCK), current);
924  UNLOCK(lisp_global(TCR_LOCK), current);
925}
926
927/*
928  Try to take an rwquentry off of the rwlock's freelist; failing that,
929  malloc one.  The caller owns the lock on the rwlock itself, of course.
930
931*/
932rwquentry *
933recover_rwquentry(rwlock *rw)
934{
935  rwquentry *freelist = &(rw->freelist), 
936    *p = freelist->next, 
937    *follow = p->next;
938
939  if (p == freelist) {
940    p = NULL;
941  } else {
942    follow->prev = freelist;
943    freelist->next = follow;
944    p->prev = p->next = NULL;
945    p->tcr = NULL;
946    p->count = 0;
947  }
948  return p;
949}
950
951rwquentry *
952new_rwquentry(rwlock *rw)
953{
954  rwquentry *p = recover_rwquentry(rw);
955
956  if (p == NULL) {
957    p = calloc(1, sizeof(rwquentry));
958  }
959  return p;
960}
961
962
963void
964free_rwquentry(rwquentry *p, rwlock *rw)
965{
966  rwquentry
967    *prev = p->prev, 
968    *next = p->next, 
969    *freelist = &(rw->freelist),
970    *follow = freelist->next;
971 
972  prev->next = next;
973  next->prev = prev;
974  p->prev = freelist;
975  freelist->next = p;
976  follow->prev = p;
977  p->next = follow;
978  p->prev = freelist;
979}
980 
981void
982add_rwquentry(rwquentry *p, rwlock *rw)
983{
984  rwquentry
985    *head = &(rw->head),
986    *follow = head->next;
987 
988  head->next = p;
989  follow->prev = p;
990  p->next = follow;
991  p->prev = head;
992}
993
994rwquentry *
995find_enqueued_tcr(TCR *target, rwlock *rw)
996{
997  rwquentry
998    *head = &(rw->head),
999    *p = head->next;
1000
1001  do {
1002    if (p->tcr == target) {
1003      return p;
1004    }
1005    p = p->next;
1006  } while (p != head);
1007  return NULL;
1008}
1009   
1010rwlock *
1011rwlock_new()
1012{
1013  rwlock *rw = calloc(1, sizeof(rwlock));
1014 
1015  if (rw) {
1016    pthread_mutex_t *lock = calloc(1, sizeof(pthread_mutex_t));
1017    if (lock == NULL) {
1018      free (rw);
1019      rw = NULL;
1020    } else {
1021      pthread_cond_t *reader_signal = calloc(1, sizeof(pthread_cond_t));
1022      pthread_cond_t *writer_signal = calloc(1, sizeof(pthread_cond_t));
1023      if ((reader_signal == NULL) || (writer_signal == NULL)) {
1024        if (reader_signal) {
1025          free(reader_signal);
1026        } else {
1027          free(writer_signal);
1028        }
1029       
1030        free(lock);
1031        free(rw);
1032        rw = NULL;
1033      } else {
1034        pthread_mutex_init(lock, NULL);
1035        pthread_cond_init(reader_signal, NULL);
1036        pthread_cond_init(writer_signal, NULL);
1037        rw->lock = lock;
1038        rw->reader_signal = reader_signal;
1039        rw->writer_signal = writer_signal;
1040        rw->head.prev = rw->head.next = &(rw->head);
1041        rw->freelist.prev = rw->freelist.next = &(rw->freelist);
1042      }
1043    }
1044  }
1045  return rw;
1046}
1047
1048/*
1049  no thread should be waiting on the lock, and the caller has just
1050  unlocked it.
1051*/
1052static void
1053rwlock_delete(rwlock *rw)
1054{
1055  pthread_mutex_t *lock = rw->lock;
1056  pthread_cond_t *cond;
1057  rwquentry *entry;
1058
1059  rw->lock = NULL;
1060  cond = rw->reader_signal;
1061  rw->reader_signal = NULL;
1062  pthread_cond_destroy(cond);
1063  free(cond);
1064  cond = rw->writer_signal;
1065  rw->writer_signal = NULL;
1066  pthread_cond_destroy(cond);
1067  free(cond);
1068  while (entry = recover_rwquentry(rw)) {
1069    free(entry);
1070  }
1071  free(rw);
1072  pthread_mutex_unlock(lock);
1073  free(lock);
1074}
1075
1076void
1077rwlock_rlock_cleanup(void *arg)
1078{
1079  pthread_mutex_unlock((pthread_mutex_t *)arg);
1080}
1081     
1082/*
1083  Try to get read access to a multiple-readers/single-writer lock.  If
1084  we already have read access, return success (indicating that the
1085  lock is held another time.  If we already have write access to the
1086  lock ... that won't work; return EDEADLK.  Wait until no other
1087  thread has or is waiting for write access, then indicate that we
1088  hold read access once.
1089*/
1090int
1091rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1092{
1093  pthread_mutex_t *lock = rw->lock;
1094  rwquentry *entry;
1095  int err = 0;
1096
1097
1098  pthread_mutex_lock(lock);
1099
1100  if (RWLOCK_WRITER(rw) == tcr) {
1101    pthread_mutex_unlock(lock);
1102    return EDEADLK;
1103  }
1104
1105  if (rw->state > 0) {
1106    /* already some readers, we may be one of them */
1107    entry = find_enqueued_tcr(tcr, rw);
1108    if (entry) {
1109      entry->count++;
1110      rw->state++;
1111      pthread_mutex_unlock(lock);
1112      return 0;
1113    }
1114  }
1115  entry = new_rwquentry(rw);
1116  entry->tcr = tcr;
1117  entry->count = 1;
1118
1119  pthread_cleanup_push(rwlock_rlock_cleanup,lock);
1120
1121  /* Wait for current and pending writers */
1122  while ((err == 0) && ((rw->state < 0) || (rw->write_wait_count > 0))) {
1123    if (waitfor) {
1124      if (pthread_cond_timedwait(rw->reader_signal, lock, waitfor)) {
1125        err = errno;
1126      }
1127    } else {
1128      pthread_cond_wait(rw->reader_signal, lock);
1129    }
1130  }
1131 
1132  if (err == 0) {
1133    add_rwquentry(entry, rw);
1134    rw->state++;
1135  }
1136
1137  pthread_cleanup_pop(1);
1138  return err;
1139}
1140
1141
1142/*
1143   This is here to support cancelation.  Cancelation is evil.
1144*/
1145
1146void
1147rwlock_wlock_cleanup(void *arg)
1148{
1149  rwlock *rw = (rwlock *)arg;
1150
1151  /* If this thread was the only queued writer and the lock
1152     is now available for reading, tell any threads that're
1153     waiting for read access.
1154     This thread owns the lock on the rwlock itself.
1155  */
1156  if ((--(rw->write_wait_count) == 0) &&
1157      (rw->state >= 0)) {
1158    pthread_cond_broadcast(rw->reader_signal);
1159  }
1160 
1161  pthread_mutex_unlock(rw->lock);
1162}
1163
1164/*
1165  Try to obtain write access to the lock.
1166  If we already have read access, fail with EDEADLK.
1167  If we already have write access, increment the count that indicates
1168  that.
1169  Otherwise, wait until the lock is not held for reading or writing,
1170  then assert write access.
1171*/
1172
1173int
1174rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor)
1175{
1176  pthread_mutex_t *lock = rw->lock;
1177  rwquentry *entry;
1178  int err = 0;
1179
1180
1181  pthread_mutex_lock(lock);
1182  if (RWLOCK_WRITER(rw) == tcr) {
1183    --RWLOCK_WRITE_COUNT(rw);
1184    --rw->state;
1185    pthread_mutex_unlock(lock);
1186    return 0;
1187  }
1188 
1189  if (rw->state > 0) {
1190    /* already some readers, we may be one of them */
1191    entry = find_enqueued_tcr(tcr, rw);
1192    if (entry) {
1193      pthread_mutex_unlock(lock);
1194      return EDEADLK;
1195    }
1196  }
1197  rw->write_wait_count++;
1198  pthread_cleanup_push(rwlock_wlock_cleanup,rw);
1199
1200  while ((err == 0) && (rw->state) != 0) {
1201    if (waitfor) {
1202      if (pthread_cond_timedwait(rw->writer_signal, lock, waitfor)) {
1203        err = errno;
1204      }
1205    } else {
1206      pthread_cond_wait(rw->writer_signal, lock);
1207    }
1208  }
1209  if (err == 0) {
1210    RWLOCK_WRITER(rw) = tcr;
1211    RWLOCK_WRITE_COUNT(rw) = -1;
1212    rw->state = -1;
1213  }
1214  pthread_cleanup_pop(1);
1215  return err;
1216}
1217
1218/*
1219  Sort of the same as above, only return EBUSY if we'd have to wait.
1220  In partucular, distinguish between the cases of "some other readers
1221  (EBUSY) another writer/queued writer(s)" (EWOULDBLOK) and "we hold a
1222  read lock" (EDEADLK.)
1223*/
1224int
1225rwlock_try_wlock(rwlock *rw, TCR *tcr)
1226{
1227  pthread_mutex_t *lock = rw->lock;
1228  rwquentry *entry;
1229  int ret = EBUSY;
1230
1231  pthread_mutex_lock(lock);
1232  if ((RWLOCK_WRITER(rw) == tcr) ||
1233      ((rw->state == 0) && (rw->write_wait_count == 0))) {
1234    RWLOCK_WRITER(rw) = tcr;
1235    --RWLOCK_WRITE_COUNT(rw);
1236    --rw->state;
1237    pthread_mutex_unlock(lock);
1238    return 0;
1239  }
1240 
1241  if (rw->state > 0) {
1242    /* already some readers, we may be one of them */
1243    entry = find_enqueued_tcr(tcr, rw);
1244    if (entry) {
1245      ret = EDEADLK;
1246    }
1247  } else {
1248    /* another writer or queued writers */
1249    ret = EWOULDBLOCK;
1250  }
1251  pthread_mutex_unlock(rw->lock);
1252  return ret;
1253}
1254
1255/*
1256  "Upgrade" a lock held once or more for reading to one held the same
1257  number of times for writing.
1258  Upgraders have higher priority than writers do
1259*/
1260
1261int
1262rwlock_read_to_write(rwlock *rw, TCR *tcr)
1263{
1264}
1265
1266
1267int
1268rwlock_unlock(rwlock *rw, TCR *tcr)
1269{
1270  rwquentry *entry;
1271
1272  pthread_mutex_lock(rw->lock);
1273  if (rw->state < 0) {
1274    /* Locked for writing.  By us ? */
1275    if (RWLOCK_WRITER(rw) != tcr) {
1276      pthread_mutex_unlock(rw->lock);
1277      /* Can't unlock: locked for writing by another thread. */
1278      return EPERM;
1279    }
1280    if (++RWLOCK_WRITE_COUNT(rw) == 0) {
1281      rw->state = 0;
1282      RWLOCK_WRITER(rw) = NULL;
1283      if (rw->write_wait_count) {
1284        pthread_cond_signal(rw->writer_signal);
1285      } else {
1286        pthread_cond_broadcast(rw->reader_signal);
1287      }
1288    }
1289    pthread_mutex_unlock(rw->lock);
1290    return 0;
1291  }
1292  entry = find_enqueued_tcr(tcr, rw);
1293  if (entry == NULL) {
1294    /* Not locked for reading by us, so why are we unlocking it ? */
1295    pthread_mutex_unlock(rw->lock);
1296    return EPERM;
1297  }
1298  if (--entry->count == 0) {
1299    free_rwquentry(entry, rw);
1300  }
1301  if (--rw->state == 0) {
1302    pthread_cond_signal(rw->writer_signal);
1303  }
1304  pthread_mutex_unlock(rw->lock);
1305  return 0;
1306}
1307
1308       
1309int
1310rwlock_destroy(rwlock *rw)
1311{
1312  return 0;                     /* for now. */
1313}
1314
1315/*
1316  A binding subprim has just done "twlle limit_regno,idx_regno" and
1317  the trap's been taken.  Extend the tcr's tlb so that the index will
1318  be in bounds and the new limit will be on a page boundary, filling
1319  in the new page(s) with 'no_thread_local_binding_marker'.  Update
1320  the tcr fields and the registers in the xp and return true if this
1321  all works, false otherwise.
1322
1323  Note that the tlb was allocated via malloc, so realloc can do some
1324  of the hard work.
1325*/
1326Boolean
1327extend_tcr_tlb(TCR *tcr, 
1328               ExceptionInformation *xp, 
1329               unsigned limit_regno,
1330               unsigned idx_regno)
1331{
1332  unsigned
1333    index = (unsigned) (xpGPR(xp,idx_regno)),
1334    old_limit = tcr->tlb_limit,
1335    new_limit = align_to_power_of_2(index+1,12),
1336    new_bytes = new_limit-old_limit;
1337  LispObj
1338    *old_tlb = tcr->tlb_pointer,
1339    *new_tlb = realloc(old_tlb, new_limit),
1340    *work;
1341
1342  if (new_tlb == NULL) {
1343    return false;
1344  }
1345 
1346  work = (LispObj *) ((BytePtr)new_tlb+old_limit);
1347
1348  while (new_bytes) {
1349    *work++ = no_thread_local_binding_marker;
1350    new_bytes -= sizeof(LispObj);
1351  }
1352  tcr->tlb_pointer = new_tlb;
1353  tcr->tlb_limit = new_limit;
1354  xpGPR(xp, limit_regno) = new_limit;
1355  return true;
1356}
1357
1358
Note: See TracBrowser for help on using the repository browser.