1 | /* |
---|
2 | Copyright (C) 1994-2001 Digitool, Inc |
---|
3 | This file is part of OpenMCL. |
---|
4 | |
---|
5 | OpenMCL is licensed under the terms of the Lisp Lesser GNU Public |
---|
6 | License , known as the LLGPL and distributed with OpenMCL as the |
---|
7 | file "LICENSE". The LLGPL consists of a preamble and the LGPL, |
---|
8 | which is distributed with OpenMCL as the file "LGPL". Where these |
---|
9 | conflict, the preamble takes precedence. |
---|
10 | |
---|
11 | OpenMCL is referenced in the preamble as the "LIBRARY." |
---|
12 | |
---|
13 | The LLGPL is also available online at |
---|
14 | http://opensource.franz.com/preamble.html |
---|
15 | */ |
---|
16 | |
---|
17 | |
---|
18 | #include "Threads.h" |
---|
19 | |
---|
20 | typedef struct { |
---|
21 | TCR *tcr; |
---|
22 | void *created; |
---|
23 | } thread_activation; |
---|
24 | |
---|
25 | |
---|
26 | extern natural |
---|
27 | store_conditional(natural*, natural, natural); |
---|
28 | |
---|
29 | extern signed_natural |
---|
30 | atomic_swap(signed_natural*, signed_natural); |
---|
31 | |
---|
32 | signed_natural |
---|
33 | atomic_incf_by(signed_natural *ptr, signed_natural by) |
---|
34 | { |
---|
35 | signed_natural old, new; |
---|
36 | do { |
---|
37 | old = *ptr; |
---|
38 | new = old+by; |
---|
39 | } while (store_conditional((natural *)ptr, (natural) old, (natural) new) != |
---|
40 | (natural) old); |
---|
41 | return new; |
---|
42 | } |
---|
43 | |
---|
44 | signed_natural |
---|
45 | atomic_incf(signed_natural *ptr) |
---|
46 | { |
---|
47 | return atomic_incf_by(ptr, 1); |
---|
48 | } |
---|
49 | |
---|
50 | signed_natural |
---|
51 | atomic_decf(signed_natural *ptr) |
---|
52 | { |
---|
53 | signed_natural old, new; |
---|
54 | do { |
---|
55 | old = *ptr; |
---|
56 | new = old == 0 ? old : old-1; |
---|
57 | } while (store_conditional((natural *)ptr, (natural) old, (natural) new) != |
---|
58 | (natural) old); |
---|
59 | return old-1; |
---|
60 | } |
---|
61 | |
---|
62 | |
---|
63 | int |
---|
64 | lock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr, struct timespec *waitfor) |
---|
65 | { |
---|
66 | |
---|
67 | if (tcr == NULL) { |
---|
68 | tcr = get_tcr(true); |
---|
69 | } |
---|
70 | if (m->owner == tcr) { |
---|
71 | m->count++; |
---|
72 | return 0; |
---|
73 | } |
---|
74 | while (1) { |
---|
75 | if (atomic_incf(&m->avail) == 1) { |
---|
76 | m->owner = tcr; |
---|
77 | m->count = 1; |
---|
78 | break; |
---|
79 | } |
---|
80 | SEM_WAIT(m->signal); |
---|
81 | } |
---|
82 | return 0; |
---|
83 | } |
---|
84 | |
---|
85 | int |
---|
86 | unlock_recursive_lock(RECURSIVE_LOCK m, TCR *tcr) |
---|
87 | { |
---|
88 | int ret = EPERM, pending; |
---|
89 | |
---|
90 | if (tcr == NULL) { |
---|
91 | tcr = get_tcr(true); |
---|
92 | } |
---|
93 | |
---|
94 | if (m->owner == tcr) { |
---|
95 | --m->count; |
---|
96 | if (m->count == 0) { |
---|
97 | m->owner = NULL; |
---|
98 | pending = atomic_swap(&m->avail, 0) - 1; |
---|
99 | atomic_incf_by(&m->waiting, pending); |
---|
100 | /* We're counting on atomic_decf not actually decrementing |
---|
101 | the location below 0, but returning a negative result |
---|
102 | in that case. |
---|
103 | */ |
---|
104 | if (atomic_decf(&m->waiting) >= 0) { |
---|
105 | SEM_RAISE(m->signal); |
---|
106 | } |
---|
107 | ret = 0; |
---|
108 | } |
---|
109 | } |
---|
110 | return ret; |
---|
111 | } |
---|
112 | |
---|
113 | void |
---|
114 | destroy_recursive_lock(RECURSIVE_LOCK m) |
---|
115 | { |
---|
116 | destroy_semaphore((void **)&m->signal); |
---|
117 | postGCfree((void *)(m->malloced_ptr)); |
---|
118 | } |
---|
119 | |
---|
120 | /* |
---|
121 | If we're already the owner (or if the lock is free), lock it |
---|
122 | and increment the lock count; otherwise, return EBUSY without |
---|
123 | waiting. |
---|
124 | */ |
---|
125 | |
---|
126 | int |
---|
127 | recursive_lock_trylock(RECURSIVE_LOCK m, TCR *tcr, int *was_free) |
---|
128 | { |
---|
129 | TCR *owner = m->owner; |
---|
130 | |
---|
131 | if (owner == tcr) { |
---|
132 | m->count++; |
---|
133 | if (was_free) { |
---|
134 | *was_free = 0; |
---|
135 | return 0; |
---|
136 | } |
---|
137 | } |
---|
138 | if (store_conditional((natural*)&(m->avail), 0, 1) == 0) { |
---|
139 | m->owner = tcr; |
---|
140 | m->count = 1; |
---|
141 | if (was_free) { |
---|
142 | *was_free = 1; |
---|
143 | } |
---|
144 | return 0; |
---|
145 | } |
---|
146 | |
---|
147 | return EBUSY; |
---|
148 | } |
---|
149 | |
---|
150 | int |
---|
151 | wait_on_semaphore(SEMAPHORE s, int seconds, int nanos) |
---|
152 | { |
---|
153 | #ifdef LINUX |
---|
154 | struct timespec q; |
---|
155 | gettimeofday((struct timeval *)&q, NULL); |
---|
156 | q.tv_nsec *= 1000; |
---|
157 | |
---|
158 | q.tv_nsec += nanos; |
---|
159 | if (q.tv_nsec >= 1000000000) { |
---|
160 | q.tv_nsec -= 1000000000; |
---|
161 | seconds += 1; |
---|
162 | } |
---|
163 | q.tv_sec += seconds; |
---|
164 | return SEM_TIMEDWAIT(s, &q); |
---|
165 | #endif |
---|
166 | #ifdef DARWIN |
---|
167 | mach_timespec_t q = {seconds, nanos}; |
---|
168 | do { |
---|
169 | clock_t start = clock(); |
---|
170 | |
---|
171 | int status = SEM_TIMEDWAIT(s, q); |
---|
172 | clock_t finish = clock(); |
---|
173 | |
---|
174 | if (status == KERN_ABORTED) { |
---|
175 | clock_t elapsed = (finish - start); |
---|
176 | |
---|
177 | int elapsed_seconds = elapsed/CLOCKS_PER_SEC; |
---|
178 | int elapsed_nanos = (elapsed - (elapsed_seconds * CLOCKS_PER_SEC)) * 1000000000/CLOCKS_PER_SEC; |
---|
179 | |
---|
180 | seconds = seconds - elapsed_seconds - (elapsed_nanos/1000000000); |
---|
181 | if (nanos > 0) { |
---|
182 | nanos = 1000000000 - elapsed_nanos; |
---|
183 | } |
---|
184 | |
---|
185 | if ((seconds <= 0) && (nanos <= 0)) { |
---|
186 | return KERN_OPERATION_TIMED_OUT; |
---|
187 | } |
---|
188 | |
---|
189 | q.tv_sec = seconds; |
---|
190 | q.tv_nsec = nanos; |
---|
191 | } else { |
---|
192 | return status; |
---|
193 | } |
---|
194 | } while (1==1); |
---|
195 | // Possible limit on number of retries? |
---|
196 | |
---|
197 | #endif |
---|
198 | } |
---|
199 | |
---|
200 | |
---|
201 | void |
---|
202 | signal_semaphore(SEMAPHORE s) |
---|
203 | { |
---|
204 | SEM_RAISE(s); |
---|
205 | } |
---|
206 | |
---|
207 | |
---|
208 | LispObj |
---|
209 | current_thread_osid() |
---|
210 | { |
---|
211 | return (LispObj)ptr_to_lispobj(pthread_self()); |
---|
212 | } |
---|
213 | |
---|
214 | #ifdef SIGRTMIN |
---|
215 | #define SIG_SUSPEND_THREAD (SIGRTMIN+6) |
---|
216 | #define SIG_RESUME_THREAD (SIG_SUSPEND_THREAD+1) |
---|
217 | #else |
---|
218 | #define SIG_SUSPEND_THREAD SIGUSR1 |
---|
219 | #define SIG_RESUME_THREAD SIGUSR2 |
---|
220 | #endif |
---|
221 | |
---|
222 | |
---|
223 | int thread_suspend_signal, thread_resume_signal; |
---|
224 | |
---|
225 | |
---|
226 | |
---|
227 | void |
---|
228 | linux_exception_init(TCR *tcr) |
---|
229 | { |
---|
230 | } |
---|
231 | |
---|
232 | |
---|
233 | TCR * |
---|
234 | get_interrupt_tcr() |
---|
235 | { |
---|
236 | #ifndef LINUX |
---|
237 | return get_tcr(true); |
---|
238 | #else |
---|
239 | void* callers_r2 = current_r2; |
---|
240 | |
---|
241 | if (callers_r2 == NULL) { /* pre-glibc-2.3.2 Linux */ |
---|
242 | return get_tcr(true); |
---|
243 | } else { |
---|
244 | TCR *head = (TCR *)lisp_global(INITIAL_TCR), *current = head; |
---|
245 | |
---|
246 | /* We can fairly safely assume that r2 contains either the current |
---|
247 | tcr or the current (linux) pthread structure, but we don't know |
---|
248 | which. We can't lock anything or call any pthreads function until |
---|
249 | we're sure that r2 contains the current pthread pointer. |
---|
250 | |
---|
251 | We can identify r2 as a TCR if we find it in the global tcr list. |
---|
252 | Entries are only ever removed from the list when all threads other |
---|
253 | than the GC thread are suspended; additions keep the forward |
---|
254 | link (through tcr->next) consistent, so this traversal is safe. |
---|
255 | |
---|
256 | */ |
---|
257 | do { |
---|
258 | if (current == callers_r2) { |
---|
259 | /* r2 contained the tcr. Set r2 to the native_thread */ |
---|
260 | current_r2 = current->native_thread_info; |
---|
261 | return current; |
---|
262 | } |
---|
263 | current = current->next; |
---|
264 | } while (current != head); |
---|
265 | /* r2 is non-null and not any tcr. Assume that r2 is pthread |
---|
266 | struct pointer and that it's therefore safe to call get_tcr(). |
---|
267 | */ |
---|
268 | return get_tcr(true); |
---|
269 | } |
---|
270 | #endif |
---|
271 | } |
---|
272 | |
---|
273 | |
---|
274 | void |
---|
275 | suspend_resume_handler(int signo, siginfo_t *info, ExceptionInformation *context) |
---|
276 | { |
---|
277 | TCR *tcr = get_interrupt_tcr(true); |
---|
278 | |
---|
279 | if (signo == thread_suspend_signal) { |
---|
280 | sigset_t wait_for; |
---|
281 | |
---|
282 | tcr->suspend_context = context; |
---|
283 | tcr->suspend_total++; |
---|
284 | sigfillset(&wait_for); |
---|
285 | SEM_RAISE(tcr->suspend); |
---|
286 | sigdelset(&wait_for, thread_resume_signal); |
---|
287 | do { |
---|
288 | sigsuspend(&wait_for); |
---|
289 | } while (tcr->suspend_context); |
---|
290 | |
---|
291 | } else { |
---|
292 | tcr->suspend_context = NULL; |
---|
293 | } |
---|
294 | #ifdef DARWIN |
---|
295 | DarwinSigReturn(context); |
---|
296 | #endif |
---|
297 | } |
---|
298 | |
---|
299 | void |
---|
300 | thread_signal_setup() |
---|
301 | { |
---|
302 | struct sigaction action; |
---|
303 | sigset_t mask, old_mask; |
---|
304 | |
---|
305 | sigemptyset(&mask); |
---|
306 | pthread_sigmask(SIG_SETMASK, &mask, &old_mask); |
---|
307 | |
---|
308 | thread_suspend_signal = SIG_SUSPEND_THREAD; |
---|
309 | thread_resume_signal = SIG_RESUME_THREAD; |
---|
310 | sigfillset(&action.sa_mask); |
---|
311 | sigdelset(&action.sa_mask,thread_suspend_signal); |
---|
312 | action.sa_flags = SA_RESTART | SA_SIGINFO; |
---|
313 | action.sa_sigaction = (void *) suspend_resume_handler; |
---|
314 | sigaction(thread_suspend_signal, &action, NULL); |
---|
315 | sigaction(thread_resume_signal, &action, NULL); |
---|
316 | } |
---|
317 | |
---|
318 | |
---|
319 | /* |
---|
320 | 'base' should be set to the bottom (origin) of the stack, e.g., the |
---|
321 | end from which it grows. |
---|
322 | */ |
---|
323 | |
---|
324 | void |
---|
325 | os_get_stack_bounds(LispObj q,void **base, unsigned *size) |
---|
326 | { |
---|
327 | pthread_t p = (pthread_t)ptr_from_lispobj(q); |
---|
328 | #ifdef DARWIN |
---|
329 | *base = pthread_get_stackaddr_np(p); |
---|
330 | *size = pthread_get_stacksize_np(p); |
---|
331 | #endif |
---|
332 | #ifdef LINUX |
---|
333 | pthread_attr_t attr; |
---|
334 | |
---|
335 | pthread_getattr_np(p,&attr); |
---|
336 | pthread_attr_getstack(&attr, base, size); |
---|
337 | *(unsigned *)base += *size; |
---|
338 | #endif |
---|
339 | } |
---|
340 | |
---|
341 | void * |
---|
342 | new_semaphore(int count) |
---|
343 | { |
---|
344 | #ifdef LINUX |
---|
345 | sem_t *s = malloc(sizeof(sem_t)); |
---|
346 | sem_init(s, 0, count); |
---|
347 | return s; |
---|
348 | #endif |
---|
349 | #ifdef DARWIN |
---|
350 | semaphore_t s = (semaphore_t)0; |
---|
351 | semaphore_create(mach_task_self(),&s, SYNC_POLICY_FIFO, count); |
---|
352 | return (void *)s; |
---|
353 | #endif |
---|
354 | } |
---|
355 | |
---|
356 | RECURSIVE_LOCK |
---|
357 | new_recursive_lock() |
---|
358 | { |
---|
359 | extern int cache_block_size; |
---|
360 | void *p = calloc(1,sizeof(_recursive_lock)+cache_block_size-1); |
---|
361 | RECURSIVE_LOCK m = NULL; |
---|
362 | void *signal = new_semaphore(0); |
---|
363 | |
---|
364 | if (p) { |
---|
365 | m = (RECURSIVE_LOCK) ((((unsigned)p)+cache_block_size-1) & (~(cache_block_size-1))); |
---|
366 | m->malloced_ptr = p; |
---|
367 | } |
---|
368 | |
---|
369 | if (m && signal) { |
---|
370 | m->signal = signal; |
---|
371 | return m; |
---|
372 | } |
---|
373 | if (m) { |
---|
374 | free(p); |
---|
375 | } |
---|
376 | if (signal) { |
---|
377 | destroy_semaphore(&signal); |
---|
378 | } |
---|
379 | return NULL; |
---|
380 | } |
---|
381 | |
---|
382 | void |
---|
383 | destroy_semaphore(void **s) |
---|
384 | { |
---|
385 | if (*s) { |
---|
386 | #ifdef LINUX |
---|
387 | sem_destroy((sem_t *)*s); |
---|
388 | #endif |
---|
389 | #ifdef DARWIN |
---|
390 | semaphore_destroy(mach_task_self(),((semaphore_t) *s)); |
---|
391 | #endif |
---|
392 | *s=NULL; |
---|
393 | } |
---|
394 | } |
---|
395 | |
---|
396 | void |
---|
397 | tsd_set(LispObj key, void *datum) |
---|
398 | { |
---|
399 | pthread_setspecific((pthread_key_t)key, datum); |
---|
400 | } |
---|
401 | |
---|
402 | void * |
---|
403 | tsd_get(LispObj key) |
---|
404 | { |
---|
405 | return pthread_getspecific((pthread_key_t)key); |
---|
406 | } |
---|
407 | |
---|
408 | void |
---|
409 | dequeue_tcr(TCR *tcr) |
---|
410 | { |
---|
411 | TCR *next, *prev; |
---|
412 | |
---|
413 | next = tcr->next; |
---|
414 | prev = tcr->prev; |
---|
415 | |
---|
416 | prev->next = next; |
---|
417 | next->prev = prev; |
---|
418 | tcr->prev = tcr->next = NULL; |
---|
419 | } |
---|
420 | |
---|
421 | void |
---|
422 | enqueue_tcr(TCR *new) |
---|
423 | { |
---|
424 | TCR *head, *tail; |
---|
425 | |
---|
426 | LOCK(lisp_global(TCR_LOCK),new); |
---|
427 | head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)); |
---|
428 | tail = head->prev; |
---|
429 | tail->next = new; |
---|
430 | head->prev = new; |
---|
431 | new->prev = tail; |
---|
432 | new->next = head; |
---|
433 | UNLOCK(lisp_global(TCR_LOCK),new); |
---|
434 | } |
---|
435 | |
---|
436 | |
---|
437 | /* |
---|
438 | Caller must hold the area_lock. |
---|
439 | */ |
---|
440 | TCR * |
---|
441 | new_tcr(unsigned vstack_size, unsigned tstack_size) |
---|
442 | { |
---|
443 | extern area |
---|
444 | *allocate_vstack_holding_area_lock(unsigned), |
---|
445 | *allocate_tstack_holding_area_lock(unsigned); |
---|
446 | area *a; |
---|
447 | TCR *tcr = calloc(1, sizeof(TCR)); |
---|
448 | int i; |
---|
449 | |
---|
450 | lisp_global(TCR_COUNT) += (1<<fixnumshift); |
---|
451 | tcr->suspend = new_semaphore(0); |
---|
452 | tcr->resume = new_semaphore(0); |
---|
453 | tcr->reset_completion = new_semaphore(0); |
---|
454 | tcr->activate = new_semaphore(0); |
---|
455 | a = allocate_vstack_holding_area_lock(vstack_size); |
---|
456 | tcr->vs_area = a; |
---|
457 | tcr->save_vsp = (LispObj *) a->active; |
---|
458 | a = allocate_tstack_holding_area_lock(tstack_size); |
---|
459 | tcr->ts_area = a; |
---|
460 | tcr->save_tsp = (LispObj *) a->active; |
---|
461 | tcr->valence = TCR_STATE_FOREIGN; |
---|
462 | tcr->interrupt_level = (-1<<fixnum_shift); |
---|
463 | tcr->lisp_fpscr.words.l = 0xd0; |
---|
464 | tcr->save_allocbase = tcr->save_allocptr = (void *) VOID_ALLOCPTR; |
---|
465 | tcr->tlb_limit = 8192; |
---|
466 | tcr->tlb_pointer = (LispObj *)malloc(tcr->tlb_limit); |
---|
467 | for (i = 0; i < (8192/sizeof(LispObj)); i++) { |
---|
468 | tcr->tlb_pointer[i] = (LispObj) no_thread_local_binding_marker; |
---|
469 | } |
---|
470 | tcr->shutdown_count = PTHREAD_DESTRUCTOR_ITERATIONS; |
---|
471 | return tcr; |
---|
472 | } |
---|
473 | |
---|
474 | void |
---|
475 | shutdown_thread_tcr(void *arg) |
---|
476 | { |
---|
477 | TCR *tcr = (void *)arg; |
---|
478 | |
---|
479 | area *vs, *ts, *cs; |
---|
480 | |
---|
481 | |
---|
482 | if (--(tcr->shutdown_count) == 0) { |
---|
483 | if (tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN)) { |
---|
484 | LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell, |
---|
485 | callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address; |
---|
486 | |
---|
487 | tsd_set(lisp_global(TCR_KEY), tcr); |
---|
488 | ((void (*)())ptr_from_lispobj(callback_ptr))(1); |
---|
489 | tsd_set(lisp_global(TCR_KEY), NULL); |
---|
490 | } |
---|
491 | #ifdef DARWIN |
---|
492 | darwin_exception_cleanup(tcr); |
---|
493 | #endif |
---|
494 | LOCK(lisp_global(AREA_LOCK),tcr); |
---|
495 | vs = tcr->vs_area; |
---|
496 | tcr->vs_area = NULL; |
---|
497 | ts = tcr->ts_area; |
---|
498 | tcr->ts_area = NULL; |
---|
499 | cs = tcr->cs_area; |
---|
500 | tcr->cs_area = NULL; |
---|
501 | if (vs) { |
---|
502 | condemn_area_holding_area_lock(vs); |
---|
503 | } |
---|
504 | if (ts) { |
---|
505 | condemn_area_holding_area_lock(ts); |
---|
506 | } |
---|
507 | if (cs) { |
---|
508 | condemn_area_holding_area_lock(cs); |
---|
509 | } |
---|
510 | destroy_semaphore(&tcr->suspend); |
---|
511 | destroy_semaphore(&tcr->resume); |
---|
512 | destroy_semaphore(&tcr->reset_completion); |
---|
513 | destroy_semaphore(&tcr->activate); |
---|
514 | free(tcr->tlb_pointer); |
---|
515 | tcr->tlb_pointer = NULL; |
---|
516 | tcr->tlb_limit = 0; |
---|
517 | tcr->osid = 0; |
---|
518 | UNLOCK(lisp_global(AREA_LOCK),tcr); |
---|
519 | } else { |
---|
520 | tsd_set(lisp_global(TCR_KEY), tcr); |
---|
521 | } |
---|
522 | } |
---|
523 | |
---|
524 | void * |
---|
525 | current_native_thread_id() |
---|
526 | { |
---|
527 | return ((void *) |
---|
528 | #ifdef LINUX |
---|
529 | getpid() |
---|
530 | #endif |
---|
531 | #ifdef DARWIN |
---|
532 | mach_thread_self() |
---|
533 | #endif |
---|
534 | ); |
---|
535 | } |
---|
536 | |
---|
537 | void |
---|
538 | thread_init_tcr(TCR *tcr, void *stack_base, unsigned stack_size) |
---|
539 | { |
---|
540 | area *a, *register_cstack_holding_area_lock(BytePtr, unsigned); |
---|
541 | |
---|
542 | tcr->osid = current_thread_osid(); |
---|
543 | tcr->native_thread_id = current_native_thread_id(); |
---|
544 | LOCK(lisp_global(AREA_LOCK),tcr); |
---|
545 | a = register_cstack_holding_area_lock((BytePtr)stack_base, stack_size); |
---|
546 | UNLOCK(lisp_global(AREA_LOCK),tcr); |
---|
547 | tcr->cs_area = a; |
---|
548 | if (!(tcr->flags & (1<<TCR_FLAG_BIT_FOREIGN))) { |
---|
549 | tcr->cs_limit = (LispObj)ptr_to_lispobj(a->softlimit); |
---|
550 | } |
---|
551 | #ifdef LINUX |
---|
552 | tcr->native_thread_info = current_r2; |
---|
553 | #endif |
---|
554 | tcr->errno_loc = &errno; |
---|
555 | tsd_set(lisp_global(TCR_KEY), tcr); |
---|
556 | #ifdef DARWIN |
---|
557 | darwin_exception_init(tcr); |
---|
558 | #endif |
---|
559 | #ifdef LINUX |
---|
560 | linux_exception_init(tcr); |
---|
561 | #endif |
---|
562 | } |
---|
563 | |
---|
564 | /* |
---|
565 | Register the specified tcr as "belonging to" the current thread. |
---|
566 | Under Darwin, setup Mach exception handling for the thread. |
---|
567 | Install cleanup handlers for thread termination. |
---|
568 | */ |
---|
569 | void |
---|
570 | register_thread_tcr(TCR *tcr) |
---|
571 | { |
---|
572 | void *stack_base; |
---|
573 | unsigned stack_size; |
---|
574 | |
---|
575 | os_get_stack_bounds(current_thread_osid(),&stack_base, &stack_size); |
---|
576 | thread_init_tcr(tcr, stack_base, stack_size); |
---|
577 | enqueue_tcr(tcr); |
---|
578 | } |
---|
579 | |
---|
580 | |
---|
581 | |
---|
582 | |
---|
583 | #ifndef MAP_GROWSDOWN |
---|
584 | #define MAP_GROWSDOWN 0 |
---|
585 | #endif |
---|
586 | |
---|
587 | Ptr |
---|
588 | create_stack(int size) |
---|
589 | { |
---|
590 | Ptr p; |
---|
591 | size=align_to_power_of_2(size, 12); |
---|
592 | p = (Ptr) mmap(NULL, |
---|
593 | (size_t)size, |
---|
594 | PROT_READ | PROT_WRITE | PROT_EXEC, |
---|
595 | MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN, |
---|
596 | -1, /* Darwin insists on this when not mmap()ing |
---|
597 | a real fd */ |
---|
598 | 0); |
---|
599 | if (p != (Ptr)(-1)) { |
---|
600 | *((size_t *)p) = size; |
---|
601 | return p; |
---|
602 | } |
---|
603 | allocation_failure(true, size); |
---|
604 | |
---|
605 | } |
---|
606 | |
---|
607 | void * |
---|
608 | allocate_stack(unsigned size) |
---|
609 | { |
---|
610 | return create_stack(size); |
---|
611 | } |
---|
612 | |
---|
613 | void |
---|
614 | free_stack(void *s) |
---|
615 | { |
---|
616 | size_t size = *((size_t *)s); |
---|
617 | munmap(s, size); |
---|
618 | } |
---|
619 | |
---|
620 | Boolean threads_initialized = false; |
---|
621 | |
---|
622 | void |
---|
623 | init_threads(void * stack_base, TCR *tcr) |
---|
624 | { |
---|
625 | lisp_global(INITIAL_TCR) = (LispObj)ptr_to_lispobj(tcr); |
---|
626 | pthread_key_create((pthread_key_t *)&(lisp_global(TCR_KEY)), shutdown_thread_tcr); |
---|
627 | thread_signal_setup(); |
---|
628 | threads_initialized = true; |
---|
629 | } |
---|
630 | |
---|
631 | |
---|
632 | void * |
---|
633 | lisp_thread_entry(void *param) |
---|
634 | { |
---|
635 | thread_activation *activation = (thread_activation *)param; |
---|
636 | TCR *tcr = activation->tcr; |
---|
637 | sigset_t mask, old_mask; |
---|
638 | |
---|
639 | sigemptyset(&mask); |
---|
640 | pthread_sigmask(SIG_SETMASK, &mask, &old_mask); |
---|
641 | |
---|
642 | register_thread_tcr(tcr); |
---|
643 | tcr->vs_area->active -= 4; |
---|
644 | *(--tcr->save_vsp) = lisp_nil; |
---|
645 | enable_fp_exceptions(); |
---|
646 | tcr->flags |= (1<<TCR_FLAG_BIT_AWAITING_PRESET); |
---|
647 | SEM_RAISE(activation->created); |
---|
648 | do { |
---|
649 | SEM_RAISE(tcr->reset_completion); |
---|
650 | SEM_WAIT(tcr->activate); |
---|
651 | /* Now go run some lisp code */ |
---|
652 | start_lisp(tcr,0); |
---|
653 | } while (tcr->flags & (1<<TCR_FLAG_BIT_AWAITING_PRESET)); |
---|
654 | } |
---|
655 | |
---|
656 | TCR * |
---|
657 | xNewThread(unsigned control_stack_size, |
---|
658 | unsigned value_stack_size, |
---|
659 | unsigned temp_stack_size) |
---|
660 | |
---|
661 | { |
---|
662 | thread_activation activation; |
---|
663 | TCR *current = get_tcr(false); |
---|
664 | |
---|
665 | LOCK(lisp_global(AREA_LOCK),current); |
---|
666 | activation.tcr = new_tcr(value_stack_size, temp_stack_size); |
---|
667 | UNLOCK(lisp_global(AREA_LOCK),current); |
---|
668 | activation.created = new_semaphore(0); |
---|
669 | create_system_thread(control_stack_size +(CSTACK_HARDPROT+CSTACK_SOFTPROT), |
---|
670 | NULL, |
---|
671 | lisp_thread_entry, |
---|
672 | (void *) &activation); |
---|
673 | |
---|
674 | SEM_WAIT(activation.created); /* Wait until thread's entered its initial function */ |
---|
675 | destroy_semaphore(&activation.created); |
---|
676 | return activation.tcr; |
---|
677 | } |
---|
678 | |
---|
679 | Boolean |
---|
680 | active_tcr_p(TCR *q) |
---|
681 | { |
---|
682 | TCR *head = (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR)), *p = head; |
---|
683 | |
---|
684 | do { |
---|
685 | if (p == q) { |
---|
686 | return true; |
---|
687 | } |
---|
688 | p = p->next; |
---|
689 | } while (p != head); |
---|
690 | return false; |
---|
691 | } |
---|
692 | |
---|
693 | |
---|
694 | OSErr |
---|
695 | xDisposeThread(TCR *tcr) |
---|
696 | { |
---|
697 | if (tcr != (TCR *)ptr_from_lispobj(lisp_global(INITIAL_TCR))) { |
---|
698 | if (active_tcr_p(tcr) && (tcr != get_tcr(false))) { |
---|
699 | pthread_cancel((pthread_t)ptr_from_lispobj(tcr->osid)); |
---|
700 | return 0; |
---|
701 | } |
---|
702 | } |
---|
703 | return -50; |
---|
704 | } |
---|
705 | |
---|
706 | OSErr |
---|
707 | xYieldToThread(TCR *target) |
---|
708 | { |
---|
709 | Bug(NULL, "xYieldToThread ?"); |
---|
710 | return 0; |
---|
711 | } |
---|
712 | |
---|
713 | OSErr |
---|
714 | xThreadCurrentStackSpace(TCR *tcr, unsigned *resultP) |
---|
715 | { |
---|
716 | Bug(NULL, "xThreadCurrentStackSpace ?"); |
---|
717 | return 0; |
---|
718 | } |
---|
719 | |
---|
720 | |
---|
721 | LispObj |
---|
722 | create_system_thread(size_t stack_size, |
---|
723 | void* stackaddr, |
---|
724 | void* (*start_routine)(void *), |
---|
725 | void* param) |
---|
726 | { |
---|
727 | pthread_attr_t attr; |
---|
728 | pthread_t returned_thread = (pthread_t) 0; |
---|
729 | |
---|
730 | pthread_attr_init(&attr); |
---|
731 | pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); |
---|
732 | |
---|
733 | if (stack_size == MINIMAL_THREAD_STACK_SIZE) { |
---|
734 | stack_size = PTHREAD_STACK_MIN; |
---|
735 | } |
---|
736 | |
---|
737 | if (stackaddr != NULL) { |
---|
738 | /* Size must have been specified. Sort of makes sense ... */ |
---|
739 | #ifdef DARWIN |
---|
740 | Fatal("no pthread_attr_setsetstack. "," Which end of stack does address refer to?"); |
---|
741 | #else |
---|
742 | pthread_attr_setstack(&attr, stackaddr, stack_size); |
---|
743 | #endif |
---|
744 | } else if (stack_size != DEFAULT_THREAD_STACK_SIZE) { |
---|
745 | pthread_attr_setstacksize(&attr,stack_size); |
---|
746 | } |
---|
747 | |
---|
748 | /* |
---|
749 | I think that's just about enough ... create the thread. |
---|
750 | */ |
---|
751 | pthread_create(&returned_thread, &attr, start_routine, param); |
---|
752 | return (LispObj) ptr_to_lispobj(returned_thread); |
---|
753 | } |
---|
754 | |
---|
755 | TCR * |
---|
756 | get_tcr(Boolean create) |
---|
757 | { |
---|
758 | TCR *current = (TCR *)tsd_get(lisp_global(TCR_KEY)); |
---|
759 | |
---|
760 | if ((current == NULL) && create) { |
---|
761 | LispObj callback_macptr = nrs_FOREIGN_THREAD_CONTROL.vcell, |
---|
762 | callback_ptr = ((macptr *)ptr_from_lispobj(untag(callback_macptr)))->address; |
---|
763 | int i, nbindwords = 0; |
---|
764 | extern unsigned initial_stack_size; |
---|
765 | |
---|
766 | /* Make one. */ |
---|
767 | current = new_tcr(initial_stack_size, MIN_TSTACK_SIZE); |
---|
768 | current->flags |= (1<<TCR_FLAG_BIT_FOREIGN); |
---|
769 | register_thread_tcr(current); |
---|
770 | #ifdef DEBUG_TCR_CREATION |
---|
771 | fprintf(stderr, "\ncreating TCR for pthread 0x%x", pthread_self()); |
---|
772 | #endif |
---|
773 | current->vs_area->active -= 4; |
---|
774 | *(--current->save_vsp) = lisp_nil; |
---|
775 | nbindwords = ((int (*)())ptr_from_lispobj(callback_ptr))(-1); |
---|
776 | for (i = 0; i < nbindwords; i++) { |
---|
777 | *(--current->save_vsp) = 0; |
---|
778 | current->vs_area->active -= 4; |
---|
779 | } |
---|
780 | current->shutdown_count = 1; |
---|
781 | ((void (*)())ptr_from_lispobj(callback_ptr))(0); |
---|
782 | |
---|
783 | } |
---|
784 | |
---|
785 | return current; |
---|
786 | } |
---|
787 | |
---|
788 | |
---|
789 | Boolean |
---|
790 | suspend_tcr(TCR *tcr) |
---|
791 | { |
---|
792 | int suspend_count = atomic_incf(&(tcr->suspend_count)); |
---|
793 | if (suspend_count == 1) { |
---|
794 | #ifdef DARWIN |
---|
795 | if (mach_suspend_tcr(tcr)) { |
---|
796 | tcr->flags |= TCR_FLAG_BIT_ALT_SUSPEND; |
---|
797 | return true; |
---|
798 | } |
---|
799 | #endif |
---|
800 | if (pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), thread_suspend_signal) == 0) { |
---|
801 | SEM_WAIT(tcr->suspend); |
---|
802 | } else { |
---|
803 | /* A problem using pthread_kill. On Darwin, this can happen |
---|
804 | if the thread has had its signal mask surgically removed |
---|
805 | by pthread_exit. If the native (Mach) thread can be suspended, |
---|
806 | do that and return true; otherwise, flag the tcr as belonging |
---|
807 | to a dead thread by setting tcr->osid to 0. |
---|
808 | */ |
---|
809 | #ifdef DARWIN |
---|
810 | if (mach_suspend_tcr(tcr)) { |
---|
811 | tcr->flags |= TCR_FLAG_BIT_ALT_SUSPEND; |
---|
812 | return true; |
---|
813 | } |
---|
814 | #endif |
---|
815 | tcr->osid = 0; |
---|
816 | return false; |
---|
817 | } |
---|
818 | return true; |
---|
819 | } |
---|
820 | return false; |
---|
821 | } |
---|
822 | |
---|
823 | Boolean |
---|
824 | lisp_suspend_tcr(TCR *tcr) |
---|
825 | { |
---|
826 | Boolean suspended; |
---|
827 | TCR *current = get_tcr(true); |
---|
828 | |
---|
829 | LOCK(lisp_global(TCR_LOCK),current); |
---|
830 | suspended = suspend_tcr(tcr); |
---|
831 | UNLOCK(lisp_global(TCR_LOCK),current); |
---|
832 | return suspended; |
---|
833 | } |
---|
834 | |
---|
835 | |
---|
836 | Boolean |
---|
837 | resume_tcr(TCR *tcr) |
---|
838 | { |
---|
839 | int suspend_count = atomic_decf(&(tcr->suspend_count)); |
---|
840 | if (suspend_count == 0) { |
---|
841 | #ifdef DARWIN |
---|
842 | if (tcr->flags & TCR_FLAG_BIT_ALT_SUSPEND) { |
---|
843 | tcr->flags &= ~TCR_FLAG_BIT_ALT_SUSPEND; |
---|
844 | mach_resume_tcr(tcr); |
---|
845 | return true; |
---|
846 | } |
---|
847 | #endif |
---|
848 | pthread_kill((pthread_t)ptr_from_lispobj(tcr->osid), thread_resume_signal); |
---|
849 | return true; |
---|
850 | } |
---|
851 | return false; |
---|
852 | } |
---|
853 | |
---|
854 | Boolean |
---|
855 | lisp_resume_tcr(TCR *tcr) |
---|
856 | { |
---|
857 | Boolean resumed; |
---|
858 | TCR *current = get_tcr(true); |
---|
859 | |
---|
860 | LOCK(lisp_global(TCR_LOCK),current); |
---|
861 | resumed = resume_tcr(tcr); |
---|
862 | UNLOCK(lisp_global(TCR_LOCK), current); |
---|
863 | return resumed; |
---|
864 | } |
---|
865 | |
---|
866 | #ifdef DARWIN |
---|
867 | lock_set_t mach_exception_lock_set; |
---|
868 | #endif |
---|
869 | |
---|
870 | TCR *freed_tcrs = NULL; |
---|
871 | |
---|
872 | void |
---|
873 | enqueue_freed_tcr (TCR *tcr) |
---|
874 | { |
---|
875 | tcr->next = freed_tcrs; |
---|
876 | freed_tcrs = tcr; |
---|
877 | } |
---|
878 | |
---|
879 | void |
---|
880 | free_freed_tcrs () |
---|
881 | { |
---|
882 | TCR *current, *next; |
---|
883 | |
---|
884 | for (current = freed_tcrs; current; current = next) { |
---|
885 | next = current->next; |
---|
886 | free(current); |
---|
887 | } |
---|
888 | freed_tcrs = NULL; |
---|
889 | } |
---|
890 | |
---|
891 | void |
---|
892 | suspend_other_threads() |
---|
893 | { |
---|
894 | TCR *current = get_tcr(true), *other, *next; |
---|
895 | int dead_tcr_count = 0; |
---|
896 | |
---|
897 | LOCK(lisp_global(TCR_LOCK), current); |
---|
898 | LOCK(lisp_global(AREA_LOCK), current); |
---|
899 | for (other = current->next; other != current; other = other->next) { |
---|
900 | if ((other->osid != 0)) { |
---|
901 | suspend_tcr(other); |
---|
902 | if (other->osid == 0) { |
---|
903 | dead_tcr_count++; |
---|
904 | } |
---|
905 | } else { |
---|
906 | dead_tcr_count++; |
---|
907 | } |
---|
908 | } |
---|
909 | /* All other threads are suspended; can safely delete dead tcrs now */ |
---|
910 | if (dead_tcr_count) { |
---|
911 | for (other = current->next; other != current; other = next) { |
---|
912 | next = other->next; |
---|
913 | if ((other->osid == 0)) { |
---|
914 | dequeue_tcr(other); |
---|
915 | enqueue_freed_tcr(other); |
---|
916 | } |
---|
917 | } |
---|
918 | } |
---|
919 | } |
---|
920 | |
---|
921 | void |
---|
922 | resume_other_threads() |
---|
923 | { |
---|
924 | TCR *current = get_tcr(true), *other; |
---|
925 | for (other = current->next; other != current; other = other->next) { |
---|
926 | resume_tcr(other); |
---|
927 | } |
---|
928 | free_freed_tcrs(); |
---|
929 | UNLOCK(lisp_global(AREA_LOCK), current); |
---|
930 | UNLOCK(lisp_global(TCR_LOCK), current); |
---|
931 | } |
---|
932 | |
---|
933 | /* |
---|
934 | Try to take an rwquentry off of the rwlock's freelist; failing that, |
---|
935 | malloc one. The caller owns the lock on the rwlock itself, of course. |
---|
936 | |
---|
937 | */ |
---|
938 | rwquentry * |
---|
939 | recover_rwquentry(rwlock *rw) |
---|
940 | { |
---|
941 | rwquentry *freelist = &(rw->freelist), |
---|
942 | *p = freelist->next, |
---|
943 | *follow = p->next; |
---|
944 | |
---|
945 | if (p == freelist) { |
---|
946 | p = NULL; |
---|
947 | } else { |
---|
948 | follow->prev = freelist; |
---|
949 | freelist->next = follow; |
---|
950 | p->prev = p->next = NULL; |
---|
951 | p->tcr = NULL; |
---|
952 | p->count = 0; |
---|
953 | } |
---|
954 | return p; |
---|
955 | } |
---|
956 | |
---|
957 | rwquentry * |
---|
958 | new_rwquentry(rwlock *rw) |
---|
959 | { |
---|
960 | rwquentry *p = recover_rwquentry(rw); |
---|
961 | |
---|
962 | if (p == NULL) { |
---|
963 | p = calloc(1, sizeof(rwquentry)); |
---|
964 | } |
---|
965 | return p; |
---|
966 | } |
---|
967 | |
---|
968 | |
---|
969 | void |
---|
970 | free_rwquentry(rwquentry *p, rwlock *rw) |
---|
971 | { |
---|
972 | rwquentry |
---|
973 | *prev = p->prev, |
---|
974 | *next = p->next, |
---|
975 | *freelist = &(rw->freelist), |
---|
976 | *follow = freelist->next; |
---|
977 | |
---|
978 | prev->next = next; |
---|
979 | next->prev = prev; |
---|
980 | p->prev = freelist; |
---|
981 | freelist->next = p; |
---|
982 | follow->prev = p; |
---|
983 | p->next = follow; |
---|
984 | p->prev = freelist; |
---|
985 | } |
---|
986 | |
---|
987 | void |
---|
988 | add_rwquentry(rwquentry *p, rwlock *rw) |
---|
989 | { |
---|
990 | rwquentry |
---|
991 | *head = &(rw->head), |
---|
992 | *follow = head->next; |
---|
993 | |
---|
994 | head->next = p; |
---|
995 | follow->prev = p; |
---|
996 | p->next = follow; |
---|
997 | p->prev = head; |
---|
998 | } |
---|
999 | |
---|
1000 | rwquentry * |
---|
1001 | find_enqueued_tcr(TCR *target, rwlock *rw) |
---|
1002 | { |
---|
1003 | rwquentry |
---|
1004 | *head = &(rw->head), |
---|
1005 | *p = head->next; |
---|
1006 | |
---|
1007 | do { |
---|
1008 | if (p->tcr == target) { |
---|
1009 | return p; |
---|
1010 | } |
---|
1011 | p = p->next; |
---|
1012 | } while (p != head); |
---|
1013 | return NULL; |
---|
1014 | } |
---|
1015 | |
---|
1016 | rwlock * |
---|
1017 | rwlock_new() |
---|
1018 | { |
---|
1019 | rwlock *rw = calloc(1, sizeof(rwlock)); |
---|
1020 | |
---|
1021 | if (rw) { |
---|
1022 | pthread_mutex_t *lock = calloc(1, sizeof(pthread_mutex_t)); |
---|
1023 | if (lock == NULL) { |
---|
1024 | free (rw); |
---|
1025 | rw = NULL; |
---|
1026 | } else { |
---|
1027 | pthread_cond_t *reader_signal = calloc(1, sizeof(pthread_cond_t)); |
---|
1028 | pthread_cond_t *writer_signal = calloc(1, sizeof(pthread_cond_t)); |
---|
1029 | if ((reader_signal == NULL) || (writer_signal == NULL)) { |
---|
1030 | if (reader_signal) { |
---|
1031 | free(reader_signal); |
---|
1032 | } else { |
---|
1033 | free(writer_signal); |
---|
1034 | } |
---|
1035 | |
---|
1036 | free(lock); |
---|
1037 | free(rw); |
---|
1038 | rw = NULL; |
---|
1039 | } else { |
---|
1040 | pthread_mutex_init(lock, NULL); |
---|
1041 | pthread_cond_init(reader_signal, NULL); |
---|
1042 | pthread_cond_init(writer_signal, NULL); |
---|
1043 | rw->lock = lock; |
---|
1044 | rw->reader_signal = reader_signal; |
---|
1045 | rw->writer_signal = writer_signal; |
---|
1046 | rw->head.prev = rw->head.next = &(rw->head); |
---|
1047 | rw->freelist.prev = rw->freelist.next = &(rw->freelist); |
---|
1048 | } |
---|
1049 | } |
---|
1050 | } |
---|
1051 | return rw; |
---|
1052 | } |
---|
1053 | |
---|
1054 | /* |
---|
1055 | no thread should be waiting on the lock, and the caller has just |
---|
1056 | unlocked it. |
---|
1057 | */ |
---|
1058 | static void |
---|
1059 | rwlock_delete(rwlock *rw) |
---|
1060 | { |
---|
1061 | pthread_mutex_t *lock = rw->lock; |
---|
1062 | pthread_cond_t *cond; |
---|
1063 | rwquentry *entry; |
---|
1064 | |
---|
1065 | rw->lock = NULL; |
---|
1066 | cond = rw->reader_signal; |
---|
1067 | rw->reader_signal = NULL; |
---|
1068 | pthread_cond_destroy(cond); |
---|
1069 | free(cond); |
---|
1070 | cond = rw->writer_signal; |
---|
1071 | rw->writer_signal = NULL; |
---|
1072 | pthread_cond_destroy(cond); |
---|
1073 | free(cond); |
---|
1074 | while (entry = recover_rwquentry(rw)) { |
---|
1075 | free(entry); |
---|
1076 | } |
---|
1077 | free(rw); |
---|
1078 | pthread_mutex_unlock(lock); |
---|
1079 | free(lock); |
---|
1080 | } |
---|
1081 | |
---|
1082 | void |
---|
1083 | rwlock_rlock_cleanup(void *arg) |
---|
1084 | { |
---|
1085 | pthread_mutex_unlock((pthread_mutex_t *)arg); |
---|
1086 | } |
---|
1087 | |
---|
1088 | /* |
---|
1089 | Try to get read access to a multiple-readers/single-writer lock. If |
---|
1090 | we already have read access, return success (indicating that the |
---|
1091 | lock is held another time. If we already have write access to the |
---|
1092 | lock ... that won't work; return EDEADLK. Wait until no other |
---|
1093 | thread has or is waiting for write access, then indicate that we |
---|
1094 | hold read access once. |
---|
1095 | */ |
---|
1096 | int |
---|
1097 | rwlock_rlock(rwlock *rw, TCR *tcr, struct timespec *waitfor) |
---|
1098 | { |
---|
1099 | pthread_mutex_t *lock = rw->lock; |
---|
1100 | rwquentry *entry; |
---|
1101 | int err = 0; |
---|
1102 | |
---|
1103 | |
---|
1104 | pthread_mutex_lock(lock); |
---|
1105 | |
---|
1106 | if (RWLOCK_WRITER(rw) == tcr) { |
---|
1107 | pthread_mutex_unlock(lock); |
---|
1108 | return EDEADLK; |
---|
1109 | } |
---|
1110 | |
---|
1111 | if (rw->state > 0) { |
---|
1112 | /* already some readers, we may be one of them */ |
---|
1113 | entry = find_enqueued_tcr(tcr, rw); |
---|
1114 | if (entry) { |
---|
1115 | entry->count++; |
---|
1116 | rw->state++; |
---|
1117 | pthread_mutex_unlock(lock); |
---|
1118 | return 0; |
---|
1119 | } |
---|
1120 | } |
---|
1121 | entry = new_rwquentry(rw); |
---|
1122 | entry->tcr = tcr; |
---|
1123 | entry->count = 1; |
---|
1124 | |
---|
1125 | pthread_cleanup_push(rwlock_rlock_cleanup,lock); |
---|
1126 | |
---|
1127 | /* Wait for current and pending writers */ |
---|
1128 | while ((err == 0) && ((rw->state < 0) || (rw->write_wait_count > 0))) { |
---|
1129 | if (waitfor) { |
---|
1130 | if (pthread_cond_timedwait(rw->reader_signal, lock, waitfor)) { |
---|
1131 | err = errno; |
---|
1132 | } |
---|
1133 | } else { |
---|
1134 | pthread_cond_wait(rw->reader_signal, lock); |
---|
1135 | } |
---|
1136 | } |
---|
1137 | |
---|
1138 | if (err == 0) { |
---|
1139 | add_rwquentry(entry, rw); |
---|
1140 | rw->state++; |
---|
1141 | } |
---|
1142 | |
---|
1143 | pthread_cleanup_pop(1); |
---|
1144 | return err; |
---|
1145 | } |
---|
1146 | |
---|
1147 | |
---|
1148 | /* |
---|
1149 | This is here to support cancelation. Cancelation is evil. |
---|
1150 | */ |
---|
1151 | |
---|
1152 | void |
---|
1153 | rwlock_wlock_cleanup(void *arg) |
---|
1154 | { |
---|
1155 | rwlock *rw = (rwlock *)arg; |
---|
1156 | |
---|
1157 | /* If this thread was the only queued writer and the lock |
---|
1158 | is now available for reading, tell any threads that're |
---|
1159 | waiting for read access. |
---|
1160 | This thread owns the lock on the rwlock itself. |
---|
1161 | */ |
---|
1162 | if ((--(rw->write_wait_count) == 0) && |
---|
1163 | (rw->state >= 0)) { |
---|
1164 | pthread_cond_broadcast(rw->reader_signal); |
---|
1165 | } |
---|
1166 | |
---|
1167 | pthread_mutex_unlock(rw->lock); |
---|
1168 | } |
---|
1169 | |
---|
1170 | /* |
---|
1171 | Try to obtain write access to the lock. |
---|
1172 | If we already have read access, fail with EDEADLK. |
---|
1173 | If we already have write access, increment the count that indicates |
---|
1174 | that. |
---|
1175 | Otherwise, wait until the lock is not held for reading or writing, |
---|
1176 | then assert write access. |
---|
1177 | */ |
---|
1178 | |
---|
1179 | int |
---|
1180 | rwlock_wlock(rwlock *rw, TCR *tcr, struct timespec *waitfor) |
---|
1181 | { |
---|
1182 | pthread_mutex_t *lock = rw->lock; |
---|
1183 | rwquentry *entry; |
---|
1184 | int err = 0; |
---|
1185 | |
---|
1186 | |
---|
1187 | pthread_mutex_lock(lock); |
---|
1188 | if (RWLOCK_WRITER(rw) == tcr) { |
---|
1189 | --RWLOCK_WRITE_COUNT(rw); |
---|
1190 | --rw->state; |
---|
1191 | pthread_mutex_unlock(lock); |
---|
1192 | return 0; |
---|
1193 | } |
---|
1194 | |
---|
1195 | if (rw->state > 0) { |
---|
1196 | /* already some readers, we may be one of them */ |
---|
1197 | entry = find_enqueued_tcr(tcr, rw); |
---|
1198 | if (entry) { |
---|
1199 | pthread_mutex_unlock(lock); |
---|
1200 | return EDEADLK; |
---|
1201 | } |
---|
1202 | } |
---|
1203 | rw->write_wait_count++; |
---|
1204 | pthread_cleanup_push(rwlock_wlock_cleanup,rw); |
---|
1205 | |
---|
1206 | while ((err == 0) && (rw->state) != 0) { |
---|
1207 | if (waitfor) { |
---|
1208 | if (pthread_cond_timedwait(rw->writer_signal, lock, waitfor)) { |
---|
1209 | err = errno; |
---|
1210 | } |
---|
1211 | } else { |
---|
1212 | pthread_cond_wait(rw->writer_signal, lock); |
---|
1213 | } |
---|
1214 | } |
---|
1215 | if (err == 0) { |
---|
1216 | RWLOCK_WRITER(rw) = tcr; |
---|
1217 | RWLOCK_WRITE_COUNT(rw) = -1; |
---|
1218 | rw->state = -1; |
---|
1219 | } |
---|
1220 | pthread_cleanup_pop(1); |
---|
1221 | return err; |
---|
1222 | } |
---|
1223 | |
---|
1224 | /* |
---|
1225 | Sort of the same as above, only return EBUSY if we'd have to wait. |
---|
1226 | In partucular, distinguish between the cases of "some other readers |
---|
1227 | (EBUSY) another writer/queued writer(s)" (EWOULDBLOK) and "we hold a |
---|
1228 | read lock" (EDEADLK.) |
---|
1229 | */ |
---|
1230 | int |
---|
1231 | rwlock_try_wlock(rwlock *rw, TCR *tcr) |
---|
1232 | { |
---|
1233 | pthread_mutex_t *lock = rw->lock; |
---|
1234 | rwquentry *entry; |
---|
1235 | int ret = EBUSY; |
---|
1236 | |
---|
1237 | pthread_mutex_lock(lock); |
---|
1238 | if ((RWLOCK_WRITER(rw) == tcr) || |
---|
1239 | ((rw->state == 0) && (rw->write_wait_count == 0))) { |
---|
1240 | RWLOCK_WRITER(rw) = tcr; |
---|
1241 | --RWLOCK_WRITE_COUNT(rw); |
---|
1242 | --rw->state; |
---|
1243 | pthread_mutex_unlock(lock); |
---|
1244 | return 0; |
---|
1245 | } |
---|
1246 | |
---|
1247 | if (rw->state > 0) { |
---|
1248 | /* already some readers, we may be one of them */ |
---|
1249 | entry = find_enqueued_tcr(tcr, rw); |
---|
1250 | if (entry) { |
---|
1251 | ret = EDEADLK; |
---|
1252 | } |
---|
1253 | } else { |
---|
1254 | /* another writer or queued writers */ |
---|
1255 | ret = EWOULDBLOCK; |
---|
1256 | } |
---|
1257 | pthread_mutex_unlock(rw->lock); |
---|
1258 | return ret; |
---|
1259 | } |
---|
1260 | |
---|
1261 | /* |
---|
1262 | "Upgrade" a lock held once or more for reading to one held the same |
---|
1263 | number of times for writing. |
---|
1264 | Upgraders have higher priority than writers do |
---|
1265 | */ |
---|
1266 | |
---|
1267 | int |
---|
1268 | rwlock_read_to_write(rwlock *rw, TCR *tcr) |
---|
1269 | { |
---|
1270 | } |
---|
1271 | |
---|
1272 | |
---|
1273 | int |
---|
1274 | rwlock_unlock(rwlock *rw, TCR *tcr) |
---|
1275 | { |
---|
1276 | rwquentry *entry; |
---|
1277 | |
---|
1278 | pthread_mutex_lock(rw->lock); |
---|
1279 | if (rw->state < 0) { |
---|
1280 | /* Locked for writing. By us ? */ |
---|
1281 | if (RWLOCK_WRITER(rw) != tcr) { |
---|
1282 | pthread_mutex_unlock(rw->lock); |
---|
1283 | /* Can't unlock: locked for writing by another thread. */ |
---|
1284 | return EPERM; |
---|
1285 | } |
---|
1286 | if (++RWLOCK_WRITE_COUNT(rw) == 0) { |
---|
1287 | rw->state = 0; |
---|
1288 | RWLOCK_WRITER(rw) = NULL; |
---|
1289 | if (rw->write_wait_count) { |
---|
1290 | pthread_cond_signal(rw->writer_signal); |
---|
1291 | } else { |
---|
1292 | pthread_cond_broadcast(rw->reader_signal); |
---|
1293 | } |
---|
1294 | } |
---|
1295 | pthread_mutex_unlock(rw->lock); |
---|
1296 | return 0; |
---|
1297 | } |
---|
1298 | entry = find_enqueued_tcr(tcr, rw); |
---|
1299 | if (entry == NULL) { |
---|
1300 | /* Not locked for reading by us, so why are we unlocking it ? */ |
---|
1301 | pthread_mutex_unlock(rw->lock); |
---|
1302 | return EPERM; |
---|
1303 | } |
---|
1304 | if (--entry->count == 0) { |
---|
1305 | free_rwquentry(entry, rw); |
---|
1306 | } |
---|
1307 | if (--rw->state == 0) { |
---|
1308 | pthread_cond_signal(rw->writer_signal); |
---|
1309 | } |
---|
1310 | pthread_mutex_unlock(rw->lock); |
---|
1311 | return 0; |
---|
1312 | } |
---|
1313 | |
---|
1314 | |
---|
1315 | int |
---|
1316 | rwlock_destroy(rwlock *rw) |
---|
1317 | { |
---|
1318 | return 0; /* for now. */ |
---|
1319 | } |
---|
1320 | |
---|
1321 | /* |
---|
1322 | A binding subprim has just done "twlle limit_regno,idx_regno" and |
---|
1323 | the trap's been taken. Extend the tcr's tlb so that the index will |
---|
1324 | be in bounds and the new limit will be on a page boundary, filling |
---|
1325 | in the new page(s) with 'no_thread_local_binding_marker'. Update |
---|
1326 | the tcr fields and the registers in the xp and return true if this |
---|
1327 | all works, false otherwise. |
---|
1328 | |
---|
1329 | Note that the tlb was allocated via malloc, so realloc can do some |
---|
1330 | of the hard work. |
---|
1331 | */ |
---|
1332 | Boolean |
---|
1333 | extend_tcr_tlb(TCR *tcr, |
---|
1334 | ExceptionInformation *xp, |
---|
1335 | unsigned limit_regno, |
---|
1336 | unsigned idx_regno) |
---|
1337 | { |
---|
1338 | unsigned |
---|
1339 | index = (unsigned) (xpGPR(xp,idx_regno)), |
---|
1340 | old_limit = tcr->tlb_limit, |
---|
1341 | new_limit = align_to_power_of_2(index+1,12), |
---|
1342 | new_bytes = new_limit-old_limit; |
---|
1343 | LispObj |
---|
1344 | *old_tlb = tcr->tlb_pointer, |
---|
1345 | *new_tlb = realloc(old_tlb, new_limit), |
---|
1346 | *work; |
---|
1347 | |
---|
1348 | if (new_tlb == NULL) { |
---|
1349 | return false; |
---|
1350 | } |
---|
1351 | |
---|
1352 | work = (LispObj *) ((BytePtr)new_tlb+old_limit); |
---|
1353 | |
---|
1354 | while (new_bytes) { |
---|
1355 | *work++ = no_thread_local_binding_marker; |
---|
1356 | new_bytes -= sizeof(LispObj); |
---|
1357 | } |
---|
1358 | tcr->tlb_pointer = new_tlb; |
---|
1359 | tcr->tlb_limit = new_limit; |
---|
1360 | xpGPR(xp, limit_regno) = new_limit; |
---|
1361 | return true; |
---|
1362 | } |
---|
1363 | |
---|
1364 | |
---|