Ruby 3.4.7p58 (2025-10-08 revision 7a5688e2a27668e48f8d6ff4af5b2208b98a2f5e)
thread_sync.c
1/* included by thread.c */
2#include "ccan/list/list.h"
3#include "builtin.h"
4
5static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
6static VALUE rb_eClosedQueueError;
7
8/* Mutex */
9typedef struct rb_mutex_struct {
10 rb_fiber_t *fiber;
11 VALUE thread; // even if the fiber is collected, we might need access to the thread in mutex_free
12 struct rb_mutex_struct *next_mutex;
13 struct ccan_list_head waitq; /* protected by GVL */
14} rb_mutex_t;
15
16/* sync_waiter is always on-stack */
18 VALUE self;
19 rb_thread_t *th;
20 rb_fiber_t *fiber;
21 struct ccan_list_node node;
22};
23
24static inline rb_fiber_t*
25nonblocking_fiber(rb_fiber_t *fiber)
26{
27 if (rb_fiberptr_blocking(fiber)) {
28 return NULL;
29 }
30
31 return fiber;
32}
33
35 VALUE self;
36 VALUE timeout;
37 rb_hrtime_t end;
38};
39
40#define MUTEX_ALLOW_TRAP FL_USER1
41
42static void
43sync_wakeup(struct ccan_list_head *head, long max)
44{
45 RUBY_DEBUG_LOG("max:%ld", max);
46
47 struct sync_waiter *cur = 0, *next;
48
49 ccan_list_for_each_safe(head, cur, next, node) {
50 ccan_list_del_init(&cur->node);
51
52 if (cur->th->status != THREAD_KILLED) {
53 if (cur->th->scheduler != Qnil && cur->fiber) {
54 rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
55 }
56 else {
57 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(cur->th));
58 rb_threadptr_interrupt(cur->th);
59 cur->th->status = THREAD_RUNNABLE;
60 }
61
62 if (--max == 0) return;
63 }
64 }
65}
66
67static void
68wakeup_one(struct ccan_list_head *head)
69{
70 sync_wakeup(head, 1);
71}
72
73static void
74wakeup_all(struct ccan_list_head *head)
75{
76 sync_wakeup(head, LONG_MAX);
77}
78
79#if defined(HAVE_WORKING_FORK)
80static void rb_mutex_abandon_all(rb_mutex_t *mutexes);
81static void rb_mutex_abandon_keeping_mutexes(rb_thread_t *th);
82static void rb_mutex_abandon_locking_mutex(rb_thread_t *th);
83#endif
84static const char* rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber);
85
86/*
87 * Document-class: Thread::Mutex
88 *
89 * Thread::Mutex implements a simple semaphore that can be used to
90 * coordinate access to shared data from multiple concurrent threads.
91 *
92 * Example:
93 *
94 * semaphore = Thread::Mutex.new
95 *
96 * a = Thread.new {
97 * semaphore.synchronize {
98 * # access shared resource
99 * }
100 * }
101 *
102 * b = Thread.new {
103 * semaphore.synchronize {
104 * # access shared resource
105 * }
106 * }
107 *
108 */
109
110static size_t
111rb_mutex_num_waiting(rb_mutex_t *mutex)
112{
113 struct sync_waiter *w = 0;
114 size_t n = 0;
115
116 ccan_list_for_each(&mutex->waitq, w, node) {
117 n++;
118 }
119
120 return n;
121}
122
123rb_thread_t* rb_fiber_threadptr(const rb_fiber_t *fiber);
124
125static bool
126locked_p(rb_mutex_t *mutex)
127{
128 return mutex->fiber != 0;
129}
130
131static void
132mutex_mark(void *ptr)
133{
134 rb_mutex_t *mutex = ptr;
135 VALUE fiber;
136 if (locked_p(mutex)) {
137 fiber = rb_fiberptr_self(mutex->fiber); // rb_fiber_t* doesn't move along with fiber object
138 if (fiber) rb_gc_mark_movable(fiber);
139 rb_gc_mark_movable(mutex->thread);
140 }
141}
142
143static void
144mutex_compact(void *ptr)
145{
146 rb_mutex_t *mutex = ptr;
147 if (locked_p(mutex)) {
148 mutex->thread = rb_gc_location(mutex->thread);
149 }
150}
151
152static void
153mutex_free(void *ptr)
154{
155 rb_mutex_t *mutex = ptr;
156 if (locked_p(mutex)) {
157 const char *err = rb_mutex_unlock_th(mutex, rb_thread_ptr(mutex->thread), mutex->fiber);
158 if (err) rb_bug("%s", err);
159 }
160 ruby_xfree(ptr);
161}
162
163static size_t
164mutex_memsize(const void *ptr)
165{
166 return sizeof(rb_mutex_t);
167}
168
169static const rb_data_type_t mutex_data_type = {
170 "mutex",
171 {mutex_mark, mutex_free, mutex_memsize, mutex_compact,},
172 0, 0, RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
173};
174
175static rb_mutex_t *
176mutex_ptr(VALUE obj)
177{
178 rb_mutex_t *mutex;
179
180 TypedData_Get_Struct(obj, rb_mutex_t, &mutex_data_type, mutex);
181
182 return mutex;
183}
184
185VALUE
186rb_obj_is_mutex(VALUE obj)
187{
188 return RBOOL(rb_typeddata_is_kind_of(obj, &mutex_data_type));
189}
190
191static VALUE
192mutex_alloc(VALUE klass)
193{
194 VALUE obj;
195 rb_mutex_t *mutex;
196
197 obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
198
199 ccan_list_head_init(&mutex->waitq);
200 return obj;
201}
202
203/*
204 * call-seq:
205 * Thread::Mutex.new -> mutex
206 *
207 * Creates a new Mutex
208 */
209static VALUE
210mutex_initialize(VALUE self)
211{
212 return self;
213}
214
215VALUE
217{
218 return mutex_alloc(rb_cMutex);
219}
220
221/*
222 * call-seq:
223 * mutex.locked? -> true or false
224 *
225 * Returns +true+ if this lock is currently held by some thread.
226 */
227VALUE
229{
230 rb_mutex_t *mutex = mutex_ptr(self);
231
232 return RBOOL(locked_p(mutex));
233}
234
235static void
236thread_mutex_insert(rb_thread_t *thread, rb_mutex_t *mutex)
237{
238 RUBY_ASSERT(!mutex->next_mutex);
239 if (thread->keeping_mutexes) {
240 mutex->next_mutex = thread->keeping_mutexes;
241 }
242
243 thread->keeping_mutexes = mutex;
244}
245
246static void
247thread_mutex_remove(rb_thread_t *thread, rb_mutex_t *mutex)
248{
249 rb_mutex_t **keeping_mutexes = &thread->keeping_mutexes;
250
251 while (*keeping_mutexes && *keeping_mutexes != mutex) {
252 // Move to the next mutex in the list:
253 keeping_mutexes = &(*keeping_mutexes)->next_mutex;
254 }
255
256 if (*keeping_mutexes) {
257 *keeping_mutexes = mutex->next_mutex;
258 mutex->next_mutex = NULL;
259 }
260}
261
262static void
263mutex_set_owner(VALUE self, rb_thread_t *th, rb_fiber_t *fiber)
264{
265 rb_mutex_t *mutex = mutex_ptr(self);
266
267 mutex->thread = th->self;
268 mutex->fiber = fiber;
269 RB_OBJ_WRITTEN(self, Qundef, th->self);
270 if (fiber) {
271 RB_OBJ_WRITTEN(self, Qundef, rb_fiberptr_self(fiber));
272 }
273}
274
275static void
276mutex_locked(rb_thread_t *th, rb_fiber_t *fiber, VALUE self)
277{
278 rb_mutex_t *mutex = mutex_ptr(self);
279
280 mutex_set_owner(self, th, fiber);
281 thread_mutex_insert(th, mutex);
282}
283
284/*
285 * call-seq:
286 * mutex.try_lock -> true or false
287 *
288 * Attempts to obtain the lock and returns immediately. Returns +true+ if the
289 * lock was granted.
290 */
291VALUE
293{
294 rb_mutex_t *mutex = mutex_ptr(self);
295
296 if (mutex->fiber == 0) {
297 RUBY_DEBUG_LOG("%p ok", mutex);
298
299 rb_fiber_t *fiber = GET_EC()->fiber_ptr;
300 rb_thread_t *th = GET_THREAD();
301
302 mutex_locked(th, fiber, self);
303 return Qtrue;
304 }
305 else {
306 RUBY_DEBUG_LOG("%p ng", mutex);
307 return Qfalse;
308 }
309}
310
311static VALUE
312mutex_owned_p(rb_fiber_t *fiber, rb_mutex_t *mutex)
313{
314 return RBOOL(mutex->fiber == fiber);
315}
316
317static VALUE
318call_rb_fiber_scheduler_block(VALUE mutex)
319{
321}
322
323static VALUE
324delete_from_waitq(VALUE value)
325{
326 struct sync_waiter *sync_waiter = (void *)value;
327 ccan_list_del(&sync_waiter->node);
328
329 return Qnil;
330}
331
332static inline rb_atomic_t threadptr_get_interrupts(rb_thread_t *th);
333
334static VALUE
335do_mutex_lock(VALUE self, int interruptible_p)
336{
337 rb_execution_context_t *ec = GET_EC();
338 rb_thread_t *th = ec->thread_ptr;
339 rb_fiber_t *fiber = ec->fiber_ptr;
340 rb_mutex_t *mutex = mutex_ptr(self);
341 rb_atomic_t saved_ints = 0;
342
343 /* When running trap handler */
344 if (!FL_TEST_RAW(self, MUTEX_ALLOW_TRAP) &&
345 th->ec->interrupt_mask & TRAP_INTERRUPT_MASK) {
346 rb_raise(rb_eThreadError, "can't be called from trap context");
347 }
348
349 if (rb_mutex_trylock(self) == Qfalse) {
350 if (mutex->fiber == fiber) {
351 rb_raise(rb_eThreadError, "deadlock; recursive locking");
352 }
353
354 while (mutex->fiber != fiber) {
355 VM_ASSERT(mutex->fiber != NULL);
356
357 VALUE scheduler = rb_fiber_scheduler_current();
358 if (scheduler != Qnil) {
359 struct sync_waiter sync_waiter = {
360 .self = self,
361 .th = th,
362 .fiber = nonblocking_fiber(fiber)
363 };
364
365 ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
366
367 rb_ensure(call_rb_fiber_scheduler_block, self, delete_from_waitq, (VALUE)&sync_waiter);
368
369 if (!mutex->fiber) {
370 mutex_set_owner(self, th, fiber);
371 }
372 }
373 else {
374 if (!th->vm->thread_ignore_deadlock && rb_fiber_threadptr(mutex->fiber) == th) {
375 rb_raise(rb_eThreadError, "deadlock; lock already owned by another fiber belonging to the same thread");
376 }
377
378 struct sync_waiter sync_waiter = {
379 .self = self,
380 .th = th,
381 .fiber = nonblocking_fiber(fiber),
382 };
383
384 RUBY_DEBUG_LOG("%p wait", mutex);
385
386 // similar code with `sleep_forever`, but
387 // sleep_forever(SLEEP_DEADLOCKABLE) raises an exception.
388 // Ensure clause is needed like but `rb_ensure` a bit slow.
389 //
390 // begin
391 // sleep_forever(th, SLEEP_DEADLOCKABLE);
392 // ensure
393 // ccan_list_del(&sync_waiter.node);
394 // end
395 enum rb_thread_status prev_status = th->status;
396 th->status = THREAD_STOPPED_FOREVER;
397 rb_ractor_sleeper_threads_inc(th->ractor);
398 rb_check_deadlock(th->ractor);
399
400 RUBY_ASSERT(!th->locking_mutex);
401 th->locking_mutex = self;
402
403 ccan_list_add_tail(&mutex->waitq, &sync_waiter.node);
404 {
405 native_sleep(th, NULL);
406 }
407 ccan_list_del(&sync_waiter.node);
408
409 // unlocked by another thread while sleeping
410 if (!mutex->fiber) {
411 mutex_set_owner(self, th, fiber);
412 }
413
414 rb_ractor_sleeper_threads_dec(th->ractor);
415 th->status = prev_status;
416 th->locking_mutex = Qfalse;
417 th->locking_mutex = Qfalse;
418
419 RUBY_DEBUG_LOG("%p wakeup", mutex);
420 }
421
422 if (interruptible_p) {
423 /* release mutex before checking for interrupts...as interrupt checking
424 * code might call rb_raise() */
425 if (mutex->fiber == fiber) {
426 mutex->thread = Qfalse;
427 mutex->fiber = NULL;
428 }
429 RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may release mutex */
430 if (!mutex->fiber) {
431 mutex_set_owner(self, th, fiber);
432 }
433 }
434 else {
435 // clear interrupt information
436 if (RUBY_VM_INTERRUPTED(th->ec)) {
437 // reset interrupts
438 if (saved_ints == 0) {
439 saved_ints = threadptr_get_interrupts(th);
440 }
441 else {
442 // ignore additional interrupts
443 threadptr_get_interrupts(th);
444 }
445 }
446 }
447 }
448
449 if (saved_ints) th->ec->interrupt_flag = saved_ints;
450 if (mutex->fiber == fiber) mutex_locked(th, fiber, self);
451 }
452
453 RUBY_DEBUG_LOG("%p locked", mutex);
454
455 // assertion
456 if (mutex_owned_p(fiber, mutex) == Qfalse) rb_bug("do_mutex_lock: mutex is not owned.");
457
458 return self;
459}
460
461static VALUE
462mutex_lock_uninterruptible(VALUE self)
463{
464 return do_mutex_lock(self, 0);
465}
466
467/*
468 * call-seq:
469 * mutex.lock -> self
470 *
471 * Attempts to grab the lock and waits if it isn't available.
472 * Raises +ThreadError+ if +mutex+ was locked by the current thread.
473 */
474VALUE
476{
477 return do_mutex_lock(self, 1);
478}
479
480/*
481 * call-seq:
482 * mutex.owned? -> true or false
483 *
484 * Returns +true+ if this lock is currently held by current thread.
485 */
486VALUE
487rb_mutex_owned_p(VALUE self)
488{
489 rb_fiber_t *fiber = GET_EC()->fiber_ptr;
490 rb_mutex_t *mutex = mutex_ptr(self);
491
492 return mutex_owned_p(fiber, mutex);
493}
494
495static const char *
496rb_mutex_unlock_th(rb_mutex_t *mutex, rb_thread_t *th, rb_fiber_t *fiber)
497{
498 RUBY_DEBUG_LOG("%p", mutex);
499
500 if (mutex->fiber == 0) {
501 return "Attempt to unlock a mutex which is not locked";
502 }
503 else if (mutex->fiber != fiber) {
504 return "Attempt to unlock a mutex which is locked by another thread/fiber";
505 }
506
507 struct sync_waiter *cur = 0, *next;
508
509 mutex->fiber = 0;
510 thread_mutex_remove(th, mutex);
511
512 ccan_list_for_each_safe(&mutex->waitq, cur, next, node) {
513 ccan_list_del_init(&cur->node);
514
515 if (cur->th->scheduler != Qnil && cur->fiber) {
516 rb_fiber_scheduler_unblock(cur->th->scheduler, cur->self, rb_fiberptr_self(cur->fiber));
517 return NULL;
518 }
519 else {
520 switch (cur->th->status) {
521 case THREAD_RUNNABLE: /* from someone else calling Thread#run */
522 case THREAD_STOPPED_FOREVER: /* likely (rb_mutex_lock) */
523 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(cur->th));
524 rb_threadptr_interrupt(cur->th);
525 return NULL;
526 case THREAD_STOPPED: /* probably impossible */
527 rb_bug("unexpected THREAD_STOPPED");
528 case THREAD_KILLED:
529 /* not sure about this, possible in exit GC? */
530 rb_bug("unexpected THREAD_KILLED");
531 continue;
532 }
533 }
534 }
535
536 // We did not find any threads to wake up, so we can just return with no error:
537 return NULL;
538}
539
540/*
541 * call-seq:
542 * mutex.unlock -> self
543 *
544 * Releases the lock.
545 * Raises +ThreadError+ if +mutex+ wasn't locked by the current thread.
546 */
547VALUE
549{
550 const char *err;
551 rb_mutex_t *mutex = mutex_ptr(self);
552 rb_thread_t *th = GET_THREAD();
553
554 err = rb_mutex_unlock_th(mutex, th, GET_EC()->fiber_ptr);
555 if (err) rb_raise(rb_eThreadError, "%s", err);
556
557 return self;
558}
559
560#if defined(HAVE_WORKING_FORK)
561static void
562rb_mutex_abandon_keeping_mutexes(rb_thread_t *th)
563{
564 rb_mutex_abandon_all(th->keeping_mutexes);
565 th->keeping_mutexes = NULL;
566}
567
568static void
569rb_mutex_abandon_locking_mutex(rb_thread_t *th)
570{
571 if (th->locking_mutex) {
572 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
573
574 ccan_list_head_init(&mutex->waitq);
575 th->locking_mutex = Qfalse;
576 }
577}
578
579static void
580rb_mutex_abandon_all(rb_mutex_t *mutexes)
581{
582 rb_mutex_t *mutex;
583
584 while (mutexes) {
585 mutex = mutexes;
586 mutexes = mutex->next_mutex;
587 mutex->fiber = 0;
588 mutex->next_mutex = 0;
589 ccan_list_head_init(&mutex->waitq);
590 }
591}
592#endif
593
595 VALUE self;
596 VALUE timeout;
597};
598
599static VALUE
600mutex_sleep_begin(VALUE _arguments)
601{
602 struct rb_mutex_sleep_arguments *arguments = (struct rb_mutex_sleep_arguments *)_arguments;
603 VALUE timeout = arguments->timeout;
604 VALUE woken = Qtrue;
605
606 VALUE scheduler = rb_fiber_scheduler_current();
607 if (scheduler != Qnil) {
608 rb_fiber_scheduler_kernel_sleep(scheduler, timeout);
609 }
610 else {
611 if (NIL_P(timeout)) {
612 rb_thread_sleep_deadly_allow_spurious_wakeup(arguments->self, Qnil, 0);
613 }
614 else {
615 struct timeval timeout_value = rb_time_interval(timeout);
616 rb_hrtime_t relative_timeout = rb_timeval2hrtime(&timeout_value);
617 /* permit spurious check */
618 woken = RBOOL(sleep_hrtime(GET_THREAD(), relative_timeout, 0));
619 }
620 }
621
622 return woken;
623}
624
625VALUE
627{
628 if (!NIL_P(timeout)) {
629 // Validate the argument:
630 rb_time_interval(timeout);
631 }
632
633 rb_mutex_unlock(self);
634 time_t beg = time(0);
635
636 struct rb_mutex_sleep_arguments arguments = {
637 .self = self,
638 .timeout = timeout,
639 };
640
641 VALUE woken = rb_ensure(mutex_sleep_begin, (VALUE)&arguments, mutex_lock_uninterruptible, self);
642
643 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
644 if (!woken) return Qnil;
645 time_t end = time(0) - beg;
646 return TIMET2NUM(end);
647}
648
649/*
650 * call-seq:
651 * mutex.sleep(timeout = nil) -> number or nil
652 *
653 * Releases the lock and sleeps +timeout+ seconds if it is given and
654 * non-nil or forever. Raises +ThreadError+ if +mutex+ wasn't locked by
655 * the current thread.
656 *
657 * When the thread is next woken up, it will attempt to reacquire
658 * the lock.
659 *
660 * Note that this method can wakeup without explicit Thread#wakeup call.
661 * For example, receiving signal and so on.
662 *
663 * Returns the slept time in seconds if woken up, or +nil+ if timed out.
664 */
665static VALUE
666mutex_sleep(int argc, VALUE *argv, VALUE self)
667{
668 VALUE timeout;
669
670 timeout = rb_check_arity(argc, 0, 1) ? argv[0] : Qnil;
671 return rb_mutex_sleep(self, timeout);
672}
673
674/*
675 * call-seq:
676 * mutex.synchronize { ... } -> result of the block
677 *
678 * Obtains a lock, runs the block, and releases the lock when the block
679 * completes. See the example under Thread::Mutex.
680 */
681
682VALUE
683rb_mutex_synchronize(VALUE mutex, VALUE (*func)(VALUE arg), VALUE arg)
684{
685 rb_mutex_lock(mutex);
686 return rb_ensure(func, arg, rb_mutex_unlock, mutex);
687}
688
689/*
690 * call-seq:
691 * mutex.synchronize { ... } -> result of the block
692 *
693 * Obtains a lock, runs the block, and releases the lock when the block
694 * completes. See the example under Thread::Mutex.
695 */
696static VALUE
697rb_mutex_synchronize_m(VALUE self)
698{
699 if (!rb_block_given_p()) {
700 rb_raise(rb_eThreadError, "must be called with a block");
701 }
702
703 return rb_mutex_synchronize(self, rb_yield, Qundef);
704}
705
706void
707rb_mutex_allow_trap(VALUE self, int val)
708{
709 Check_TypedStruct(self, &mutex_data_type);
710
711 if (val)
712 FL_SET_RAW(self, MUTEX_ALLOW_TRAP);
713 else
714 FL_UNSET_RAW(self, MUTEX_ALLOW_TRAP);
715}
716
717/* Queue */
718
719#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
720#define queue_list(q) UNALIGNED_MEMBER_PTR(q, que)
721RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN()
722struct rb_queue {
723 struct ccan_list_head waitq;
724 rb_serial_t fork_gen;
725 const VALUE que;
726 int num_waiting;
727} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END();
728
729#define szqueue_waitq(sq) UNALIGNED_MEMBER_PTR(sq, q.waitq)
730#define szqueue_list(sq) UNALIGNED_MEMBER_PTR(sq, q.que)
731#define szqueue_pushq(sq) UNALIGNED_MEMBER_PTR(sq, pushq)
732RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_BEGIN()
734 struct rb_queue q;
735 int num_waiting_push;
736 struct ccan_list_head pushq;
737 long max;
738} RBIMPL_ATTR_PACKED_STRUCT_UNALIGNED_END();
739
740static void
741queue_mark(void *ptr)
742{
743 struct rb_queue *q = ptr;
744
745 /* no need to mark threads in waitq, they are on stack */
746 rb_gc_mark(q->que);
747}
748
749static size_t
750queue_memsize(const void *ptr)
751{
752 return sizeof(struct rb_queue);
753}
754
755static const rb_data_type_t queue_data_type = {
756 "queue",
757 {queue_mark, RUBY_TYPED_DEFAULT_FREE, queue_memsize,},
758 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
759};
760
761static VALUE
762queue_alloc(VALUE klass)
763{
764 VALUE obj;
765 struct rb_queue *q;
766
767 obj = TypedData_Make_Struct(klass, struct rb_queue, &queue_data_type, q);
768 ccan_list_head_init(queue_waitq(q));
769 return obj;
770}
771
772static int
773queue_fork_check(struct rb_queue *q)
774{
775 rb_serial_t fork_gen = GET_VM()->fork_gen;
776
777 if (q->fork_gen == fork_gen) {
778 return 0;
779 }
780 /* forked children can't reach into parent thread stacks */
781 q->fork_gen = fork_gen;
782 ccan_list_head_init(queue_waitq(q));
783 q->num_waiting = 0;
784 return 1;
785}
786
787static struct rb_queue *
788queue_ptr(VALUE obj)
789{
790 struct rb_queue *q;
791
792 TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
793 queue_fork_check(q);
794
795 return q;
796}
797
798#define QUEUE_CLOSED FL_USER5
799
800static rb_hrtime_t
801queue_timeout2hrtime(VALUE timeout)
802{
803 if (NIL_P(timeout)) {
804 return (rb_hrtime_t)0;
805 }
806 rb_hrtime_t rel = 0;
807 if (FIXNUM_P(timeout)) {
808 rel = rb_sec2hrtime(NUM2TIMET(timeout));
809 }
810 else {
811 double2hrtime(&rel, rb_num2dbl(timeout));
812 }
813 return rb_hrtime_add(rel, rb_hrtime_now());
814}
815
816static void
817szqueue_mark(void *ptr)
818{
819 struct rb_szqueue *sq = ptr;
820
821 queue_mark(&sq->q);
822}
823
824static size_t
825szqueue_memsize(const void *ptr)
826{
827 return sizeof(struct rb_szqueue);
828}
829
830static const rb_data_type_t szqueue_data_type = {
831 "sized_queue",
832 {szqueue_mark, RUBY_TYPED_DEFAULT_FREE, szqueue_memsize,},
833 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
834};
835
836static VALUE
837szqueue_alloc(VALUE klass)
838{
839 struct rb_szqueue *sq;
840 VALUE obj = TypedData_Make_Struct(klass, struct rb_szqueue,
841 &szqueue_data_type, sq);
842 ccan_list_head_init(szqueue_waitq(sq));
843 ccan_list_head_init(szqueue_pushq(sq));
844 return obj;
845}
846
847static struct rb_szqueue *
848szqueue_ptr(VALUE obj)
849{
850 struct rb_szqueue *sq;
851
852 TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
853 if (queue_fork_check(&sq->q)) {
854 ccan_list_head_init(szqueue_pushq(sq));
855 sq->num_waiting_push = 0;
856 }
857
858 return sq;
859}
860
861static VALUE
862ary_buf_new(void)
863{
864 return rb_ary_hidden_new(1);
865}
866
867static VALUE
868check_array(VALUE obj, VALUE ary)
869{
870 if (!RB_TYPE_P(ary, T_ARRAY)) {
871 rb_raise(rb_eTypeError, "%+"PRIsVALUE" not initialized", obj);
872 }
873 return ary;
874}
875
876static long
877queue_length(VALUE self, struct rb_queue *q)
878{
879 return RARRAY_LEN(check_array(self, q->que));
880}
881
882static int
883queue_closed_p(VALUE self)
884{
885 return FL_TEST_RAW(self, QUEUE_CLOSED) != 0;
886}
887
888/*
889 * Document-class: ClosedQueueError
890 *
891 * The exception class which will be raised when pushing into a closed
892 * Queue. See Thread::Queue#close and Thread::SizedQueue#close.
893 */
894
895NORETURN(static void raise_closed_queue_error(VALUE self));
896
897static void
898raise_closed_queue_error(VALUE self)
899{
900 rb_raise(rb_eClosedQueueError, "queue closed");
901}
902
903static VALUE
904queue_closed_result(VALUE self, struct rb_queue *q)
905{
906 RUBY_ASSERT(queue_length(self, q) == 0);
907 return Qnil;
908}
909
910/*
911 * Document-class: Thread::Queue
912 *
913 * The Thread::Queue class implements multi-producer, multi-consumer
914 * queues. It is especially useful in threaded programming when
915 * information must be exchanged safely between multiple threads. The
916 * Thread::Queue class implements all the required locking semantics.
917 *
918 * The class implements FIFO (first in, first out) type of queue.
919 * In a FIFO queue, the first tasks added are the first retrieved.
920 *
921 * Example:
922 *
923 * queue = Thread::Queue.new
924 *
925 * producer = Thread.new do
926 * 5.times do |i|
927 * sleep rand(i) # simulate expense
928 * queue << i
929 * puts "#{i} produced"
930 * end
931 * end
932 *
933 * consumer = Thread.new do
934 * 5.times do |i|
935 * value = queue.pop
936 * sleep rand(i/2) # simulate expense
937 * puts "consumed #{value}"
938 * end
939 * end
940 *
941 * consumer.join
942 *
943 */
944
945/*
946 * Document-method: Queue::new
947 *
948 * call-seq:
949 * Thread::Queue.new -> empty_queue
950 * Thread::Queue.new(enumerable) -> queue
951 *
952 * Creates a new queue instance, optionally using the contents of an +enumerable+
953 * for its initial state.
954 *
955 * Example:
956 *
957 * q = Thread::Queue.new
958 * #=> #<Thread::Queue:0x00007ff7501110d0>
959 * q.empty?
960 * #=> true
961 *
962 * q = Thread::Queue.new([1, 2, 3])
963 * #=> #<Thread::Queue:0x00007ff7500ec500>
964 * q.empty?
965 * #=> false
966 * q.pop
967 * #=> 1
968 */
969
970static VALUE
971rb_queue_initialize(int argc, VALUE *argv, VALUE self)
972{
973 VALUE initial;
974 struct rb_queue *q = queue_ptr(self);
975 if ((argc = rb_scan_args(argc, argv, "01", &initial)) == 1) {
976 initial = rb_to_array(initial);
977 }
978 RB_OBJ_WRITE(self, queue_list(q), ary_buf_new());
979 ccan_list_head_init(queue_waitq(q));
980 if (argc == 1) {
981 rb_ary_concat(q->que, initial);
982 }
983 return self;
984}
985
986static VALUE
987queue_do_push(VALUE self, struct rb_queue *q, VALUE obj)
988{
989 if (queue_closed_p(self)) {
990 raise_closed_queue_error(self);
991 }
992 rb_ary_push(check_array(self, q->que), obj);
993 wakeup_one(queue_waitq(q));
994 return self;
995}
996
997/*
998 * Document-method: Thread::Queue#close
999 * call-seq:
1000 * close
1001 *
1002 * Closes the queue. A closed queue cannot be re-opened.
1003 *
1004 * After the call to close completes, the following are true:
1005 *
1006 * - +closed?+ will return true
1007 *
1008 * - +close+ will be ignored.
1009 *
1010 * - calling enq/push/<< will raise a +ClosedQueueError+.
1011 *
1012 * - when +empty?+ is false, calling deq/pop/shift will return an object
1013 * from the queue as usual.
1014 * - when +empty?+ is true, deq(false) will not suspend the thread and will return nil.
1015 * deq(true) will raise a +ThreadError+.
1016 *
1017 * ClosedQueueError is inherited from StopIteration, so that you can break loop block.
1018 *
1019 * Example:
1020 *
1021 * q = Thread::Queue.new
1022 * Thread.new{
1023 * while e = q.deq # wait for nil to break loop
1024 * # ...
1025 * end
1026 * }
1027 * q.close
1028 */
1029
1030static VALUE
1031rb_queue_close(VALUE self)
1032{
1033 struct rb_queue *q = queue_ptr(self);
1034
1035 if (!queue_closed_p(self)) {
1036 FL_SET(self, QUEUE_CLOSED);
1037
1038 wakeup_all(queue_waitq(q));
1039 }
1040
1041 return self;
1042}
1043
1044/*
1045 * Document-method: Thread::Queue#closed?
1046 * call-seq: closed?
1047 *
1048 * Returns +true+ if the queue is closed.
1049 */
1050
1051static VALUE
1052rb_queue_closed_p(VALUE self)
1053{
1054 return RBOOL(queue_closed_p(self));
1055}
1056
1057/*
1058 * Document-method: Thread::Queue#push
1059 * call-seq:
1060 * push(object)
1061 * enq(object)
1062 * <<(object)
1063 *
1064 * Pushes the given +object+ to the queue.
1065 */
1066
1067static VALUE
1068rb_queue_push(VALUE self, VALUE obj)
1069{
1070 return queue_do_push(self, queue_ptr(self), obj);
1071}
1072
1073static VALUE
1074queue_sleep(VALUE _args)
1075{
1076 struct queue_sleep_arg *args = (struct queue_sleep_arg *)_args;
1077 rb_thread_sleep_deadly_allow_spurious_wakeup(args->self, args->timeout, args->end);
1078 return Qnil;
1079}
1080
1082 struct sync_waiter w;
1083 union {
1084 struct rb_queue *q;
1085 struct rb_szqueue *sq;
1086 } as;
1087};
1088
1089static VALUE
1090queue_sleep_done(VALUE p)
1091{
1092 struct queue_waiter *qw = (struct queue_waiter *)p;
1093
1094 ccan_list_del(&qw->w.node);
1095 qw->as.q->num_waiting--;
1096
1097 return Qfalse;
1098}
1099
1100static VALUE
1101szqueue_sleep_done(VALUE p)
1102{
1103 struct queue_waiter *qw = (struct queue_waiter *)p;
1104
1105 ccan_list_del(&qw->w.node);
1106 qw->as.sq->num_waiting_push--;
1107
1108 return Qfalse;
1109}
1110
1111static VALUE
1112queue_do_pop(VALUE self, struct rb_queue *q, int should_block, VALUE timeout)
1113{
1114 check_array(self, q->que);
1115 if (RARRAY_LEN(q->que) == 0) {
1116 if (!should_block) {
1117 rb_raise(rb_eThreadError, "queue empty");
1118 }
1119
1120 if (RTEST(rb_equal(INT2FIX(0), timeout))) {
1121 return Qnil;
1122 }
1123 }
1124
1125 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1126 while (RARRAY_LEN(q->que) == 0) {
1127 if (queue_closed_p(self)) {
1128 return queue_closed_result(self, q);
1129 }
1130 else {
1131 rb_execution_context_t *ec = GET_EC();
1132
1133 RUBY_ASSERT(RARRAY_LEN(q->que) == 0);
1134 RUBY_ASSERT(queue_closed_p(self) == 0);
1135
1136 struct queue_waiter queue_waiter = {
1137 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1138 .as = {.q = q}
1139 };
1140
1141 struct ccan_list_head *waitq = queue_waitq(q);
1142
1143 ccan_list_add_tail(waitq, &queue_waiter.w.node);
1144 queue_waiter.as.q->num_waiting++;
1145
1147 .self = self,
1148 .timeout = timeout,
1149 .end = end
1150 };
1151
1152 rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, queue_sleep_done, (VALUE)&queue_waiter);
1153 if (!NIL_P(timeout) && (rb_hrtime_now() >= end))
1154 break;
1155 }
1156 }
1157
1158 return rb_ary_shift(q->que);
1159}
1160
1161static VALUE
1162rb_queue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
1163{
1164 return queue_do_pop(self, queue_ptr(self), !RTEST(non_block), timeout);
1165}
1166
1167/*
1168 * Document-method: Thread::Queue#empty?
1169 * call-seq: empty?
1170 *
1171 * Returns +true+ if the queue is empty.
1172 */
1173
1174static VALUE
1175rb_queue_empty_p(VALUE self)
1176{
1177 return RBOOL(queue_length(self, queue_ptr(self)) == 0);
1178}
1179
1180/*
1181 * Document-method: Thread::Queue#clear
1182 *
1183 * Removes all objects from the queue.
1184 */
1185
1186static VALUE
1187rb_queue_clear(VALUE self)
1188{
1189 struct rb_queue *q = queue_ptr(self);
1190
1191 rb_ary_clear(check_array(self, q->que));
1192 return self;
1193}
1194
1195/*
1196 * Document-method: Thread::Queue#length
1197 * call-seq:
1198 * length
1199 * size
1200 *
1201 * Returns the length of the queue.
1202 */
1203
1204static VALUE
1205rb_queue_length(VALUE self)
1206{
1207 return LONG2NUM(queue_length(self, queue_ptr(self)));
1208}
1209
1210NORETURN(static VALUE rb_queue_freeze(VALUE self));
1211/*
1212 * call-seq:
1213 * freeze
1214 *
1215 * The queue can't be frozen, so this method raises an exception:
1216 * Thread::Queue.new.freeze # Raises TypeError (cannot freeze #<Thread::Queue:0x...>)
1217 *
1218 */
1219static VALUE
1220rb_queue_freeze(VALUE self)
1221{
1222 rb_raise(rb_eTypeError, "cannot freeze " "%+"PRIsVALUE, self);
1223 UNREACHABLE_RETURN(self);
1224}
1225
1226/*
1227 * Document-method: Thread::Queue#num_waiting
1228 *
1229 * Returns the number of threads waiting on the queue.
1230 */
1231
1232static VALUE
1233rb_queue_num_waiting(VALUE self)
1234{
1235 struct rb_queue *q = queue_ptr(self);
1236
1237 return INT2NUM(q->num_waiting);
1238}
1239
1240/*
1241 * Document-class: Thread::SizedQueue
1242 *
1243 * This class represents queues of specified size capacity. The push operation
1244 * may be blocked if the capacity is full.
1245 *
1246 * See Thread::Queue for an example of how a Thread::SizedQueue works.
1247 */
1248
1249/*
1250 * Document-method: SizedQueue::new
1251 * call-seq: new(max)
1252 *
1253 * Creates a fixed-length queue with a maximum size of +max+.
1254 */
1255
1256static VALUE
1257rb_szqueue_initialize(VALUE self, VALUE vmax)
1258{
1259 long max;
1260 struct rb_szqueue *sq = szqueue_ptr(self);
1261
1262 max = NUM2LONG(vmax);
1263 if (max <= 0) {
1264 rb_raise(rb_eArgError, "queue size must be positive");
1265 }
1266
1267 RB_OBJ_WRITE(self, szqueue_list(sq), ary_buf_new());
1268 ccan_list_head_init(szqueue_waitq(sq));
1269 ccan_list_head_init(szqueue_pushq(sq));
1270 sq->max = max;
1271
1272 return self;
1273}
1274
1275/*
1276 * Document-method: Thread::SizedQueue#close
1277 * call-seq:
1278 * close
1279 *
1280 * Similar to Thread::Queue#close.
1281 *
1282 * The difference is behavior with waiting enqueuing threads.
1283 *
1284 * If there are waiting enqueuing threads, they are interrupted by
1285 * raising ClosedQueueError('queue closed').
1286 */
1287static VALUE
1288rb_szqueue_close(VALUE self)
1289{
1290 if (!queue_closed_p(self)) {
1291 struct rb_szqueue *sq = szqueue_ptr(self);
1292
1293 FL_SET(self, QUEUE_CLOSED);
1294 wakeup_all(szqueue_waitq(sq));
1295 wakeup_all(szqueue_pushq(sq));
1296 }
1297 return self;
1298}
1299
1300/*
1301 * Document-method: Thread::SizedQueue#max
1302 *
1303 * Returns the maximum size of the queue.
1304 */
1305
1306static VALUE
1307rb_szqueue_max_get(VALUE self)
1308{
1309 return LONG2NUM(szqueue_ptr(self)->max);
1310}
1311
1312/*
1313 * Document-method: Thread::SizedQueue#max=
1314 * call-seq: max=(number)
1315 *
1316 * Sets the maximum size of the queue to the given +number+.
1317 */
1318
1319static VALUE
1320rb_szqueue_max_set(VALUE self, VALUE vmax)
1321{
1322 long max = NUM2LONG(vmax);
1323 long diff = 0;
1324 struct rb_szqueue *sq = szqueue_ptr(self);
1325
1326 if (max <= 0) {
1327 rb_raise(rb_eArgError, "queue size must be positive");
1328 }
1329 if (max > sq->max) {
1330 diff = max - sq->max;
1331 }
1332 sq->max = max;
1333 sync_wakeup(szqueue_pushq(sq), diff);
1334 return vmax;
1335}
1336
1337static VALUE
1338rb_szqueue_push(rb_execution_context_t *ec, VALUE self, VALUE object, VALUE non_block, VALUE timeout)
1339{
1340 struct rb_szqueue *sq = szqueue_ptr(self);
1341
1342 if (queue_length(self, &sq->q) >= sq->max) {
1343 if (RTEST(non_block)) {
1344 rb_raise(rb_eThreadError, "queue full");
1345 }
1346
1347 if (RTEST(rb_equal(INT2FIX(0), timeout))) {
1348 return Qnil;
1349 }
1350 }
1351
1352 rb_hrtime_t end = queue_timeout2hrtime(timeout);
1353 while (queue_length(self, &sq->q) >= sq->max) {
1354 if (queue_closed_p(self)) {
1355 raise_closed_queue_error(self);
1356 }
1357 else {
1358 rb_execution_context_t *ec = GET_EC();
1359 struct queue_waiter queue_waiter = {
1360 .w = {.self = self, .th = ec->thread_ptr, .fiber = nonblocking_fiber(ec->fiber_ptr)},
1361 .as = {.sq = sq}
1362 };
1363
1364 struct ccan_list_head *pushq = szqueue_pushq(sq);
1365
1366 ccan_list_add_tail(pushq, &queue_waiter.w.node);
1367 sq->num_waiting_push++;
1368
1370 .self = self,
1371 .timeout = timeout,
1372 .end = end
1373 };
1374 rb_ensure(queue_sleep, (VALUE)&queue_sleep_arg, szqueue_sleep_done, (VALUE)&queue_waiter);
1375 if (!NIL_P(timeout) && rb_hrtime_now() >= end) {
1376 return Qnil;
1377 }
1378 }
1379 }
1380
1381 return queue_do_push(self, &sq->q, object);
1382}
1383
1384static VALUE
1385szqueue_do_pop(VALUE self, int should_block, VALUE timeout)
1386{
1387 struct rb_szqueue *sq = szqueue_ptr(self);
1388 VALUE retval = queue_do_pop(self, &sq->q, should_block, timeout);
1389
1390 if (queue_length(self, &sq->q) < sq->max) {
1391 wakeup_one(szqueue_pushq(sq));
1392 }
1393
1394 return retval;
1395}
1396static VALUE
1397rb_szqueue_pop(rb_execution_context_t *ec, VALUE self, VALUE non_block, VALUE timeout)
1398{
1399 return szqueue_do_pop(self, !RTEST(non_block), timeout);
1400}
1401
1402/*
1403 * Document-method: Thread::SizedQueue#clear
1404 *
1405 * Removes all objects from the queue.
1406 */
1407
1408static VALUE
1409rb_szqueue_clear(VALUE self)
1410{
1411 struct rb_szqueue *sq = szqueue_ptr(self);
1412
1413 rb_ary_clear(check_array(self, sq->q.que));
1414 wakeup_all(szqueue_pushq(sq));
1415 return self;
1416}
1417
1418/*
1419 * Document-method: Thread::SizedQueue#length
1420 * call-seq:
1421 * length
1422 * size
1423 *
1424 * Returns the length of the queue.
1425 */
1426
1427static VALUE
1428rb_szqueue_length(VALUE self)
1429{
1430 struct rb_szqueue *sq = szqueue_ptr(self);
1431
1432 return LONG2NUM(queue_length(self, &sq->q));
1433}
1434
1435/*
1436 * Document-method: Thread::SizedQueue#num_waiting
1437 *
1438 * Returns the number of threads waiting on the queue.
1439 */
1440
1441static VALUE
1442rb_szqueue_num_waiting(VALUE self)
1443{
1444 struct rb_szqueue *sq = szqueue_ptr(self);
1445
1446 return INT2NUM(sq->q.num_waiting + sq->num_waiting_push);
1447}
1448
1449/*
1450 * Document-method: Thread::SizedQueue#empty?
1451 * call-seq: empty?
1452 *
1453 * Returns +true+ if the queue is empty.
1454 */
1455
1456static VALUE
1457rb_szqueue_empty_p(VALUE self)
1458{
1459 struct rb_szqueue *sq = szqueue_ptr(self);
1460
1461 return RBOOL(queue_length(self, &sq->q) == 0);
1462}
1463
1464
1465/* ConditionalVariable */
1467 struct ccan_list_head waitq;
1468 rb_serial_t fork_gen;
1469};
1470
1471/*
1472 * Document-class: Thread::ConditionVariable
1473 *
1474 * ConditionVariable objects augment class Mutex. Using condition variables,
1475 * it is possible to suspend while in the middle of a critical section until a
1476 * resource becomes available.
1477 *
1478 * Example:
1479 *
1480 * mutex = Thread::Mutex.new
1481 * resource = Thread::ConditionVariable.new
1482 *
1483 * a = Thread.new {
1484 * mutex.synchronize {
1485 * # Thread 'a' now needs the resource
1486 * resource.wait(mutex)
1487 * # 'a' can now have the resource
1488 * }
1489 * }
1490 *
1491 * b = Thread.new {
1492 * mutex.synchronize {
1493 * # Thread 'b' has finished using the resource
1494 * resource.signal
1495 * }
1496 * }
1497 */
1498
1499static size_t
1500condvar_memsize(const void *ptr)
1501{
1502 return sizeof(struct rb_condvar);
1503}
1504
1505static const rb_data_type_t cv_data_type = {
1506 "condvar",
1507 {0, RUBY_TYPED_DEFAULT_FREE, condvar_memsize,},
1508 0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
1509};
1510
1511static struct rb_condvar *
1512condvar_ptr(VALUE self)
1513{
1514 struct rb_condvar *cv;
1515 rb_serial_t fork_gen = GET_VM()->fork_gen;
1516
1517 TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
1518
1519 /* forked children can't reach into parent thread stacks */
1520 if (cv->fork_gen != fork_gen) {
1521 cv->fork_gen = fork_gen;
1522 ccan_list_head_init(&cv->waitq);
1523 }
1524
1525 return cv;
1526}
1527
1528static VALUE
1529condvar_alloc(VALUE klass)
1530{
1531 struct rb_condvar *cv;
1532 VALUE obj;
1533
1534 obj = TypedData_Make_Struct(klass, struct rb_condvar, &cv_data_type, cv);
1535 ccan_list_head_init(&cv->waitq);
1536
1537 return obj;
1538}
1539
1540/*
1541 * Document-method: ConditionVariable::new
1542 *
1543 * Creates a new condition variable instance.
1544 */
1545
1546static VALUE
1547rb_condvar_initialize(VALUE self)
1548{
1549 struct rb_condvar *cv = condvar_ptr(self);
1550 ccan_list_head_init(&cv->waitq);
1551 return self;
1552}
1553
1555 VALUE mutex;
1556 VALUE timeout;
1557};
1558
1559static ID id_sleep;
1560
1561static VALUE
1562do_sleep(VALUE args)
1563{
1564 struct sleep_call *p = (struct sleep_call *)args;
1565 return rb_funcallv(p->mutex, id_sleep, 1, &p->timeout);
1566}
1567
1568/*
1569 * Document-method: Thread::ConditionVariable#wait
1570 * call-seq: wait(mutex, timeout=nil)
1571 *
1572 * Releases the lock held in +mutex+ and waits; reacquires the lock on wakeup.
1573 *
1574 * If +timeout+ is given, this method returns after +timeout+ seconds passed,
1575 * even if no other thread doesn't signal.
1576 *
1577 * Returns the slept result on +mutex+.
1578 */
1579
1580static VALUE
1581rb_condvar_wait(int argc, VALUE *argv, VALUE self)
1582{
1583 rb_execution_context_t *ec = GET_EC();
1584
1585 struct rb_condvar *cv = condvar_ptr(self);
1586 struct sleep_call args;
1587
1588 rb_scan_args(argc, argv, "11", &args.mutex, &args.timeout);
1589
1590 struct sync_waiter sync_waiter = {
1591 .self = args.mutex,
1592 .th = ec->thread_ptr,
1593 .fiber = nonblocking_fiber(ec->fiber_ptr)
1594 };
1595
1596 ccan_list_add_tail(&cv->waitq, &sync_waiter.node);
1597 return rb_ensure(do_sleep, (VALUE)&args, delete_from_waitq, (VALUE)&sync_waiter);
1598}
1599
1600/*
1601 * Document-method: Thread::ConditionVariable#signal
1602 *
1603 * Wakes up the first thread in line waiting for this lock.
1604 */
1605
1606static VALUE
1607rb_condvar_signal(VALUE self)
1608{
1609 struct rb_condvar *cv = condvar_ptr(self);
1610 wakeup_one(&cv->waitq);
1611 return self;
1612}
1613
1614/*
1615 * Document-method: Thread::ConditionVariable#broadcast
1616 *
1617 * Wakes up all threads waiting for this lock.
1618 */
1619
1620static VALUE
1621rb_condvar_broadcast(VALUE self)
1622{
1623 struct rb_condvar *cv = condvar_ptr(self);
1624 wakeup_all(&cv->waitq);
1625 return self;
1626}
1627
1628NORETURN(static VALUE undumpable(VALUE obj));
1629/* :nodoc: */
1630static VALUE
1631undumpable(VALUE obj)
1632{
1633 rb_raise(rb_eTypeError, "can't dump %"PRIsVALUE, rb_obj_class(obj));
1635}
1636
1637static VALUE
1638define_thread_class(VALUE outer, const ID name, VALUE super)
1639{
1640 VALUE klass = rb_define_class_id_under(outer, name, super);
1641 rb_const_set(rb_cObject, name, klass);
1642 return klass;
1643}
1644
1645static void
1646Init_thread_sync(void)
1647{
1648#undef rb_intern
1649#if defined(TEACH_RDOC) && TEACH_RDOC == 42
1650 rb_cMutex = rb_define_class_under(rb_cThread, "Mutex", rb_cObject);
1651 rb_cConditionVariable = rb_define_class_under(rb_cThread, "ConditionVariable", rb_cObject);
1652 rb_cQueue = rb_define_class_under(rb_cThread, "Queue", rb_cObject);
1653 rb_cSizedQueue = rb_define_class_under(rb_cThread, "SizedQueue", rb_cObject);
1654#endif
1655
1656#define DEFINE_CLASS(name, super) \
1657 rb_c##name = define_thread_class(rb_cThread, rb_intern(#name), rb_c##super)
1658
1659 /* Mutex */
1660 DEFINE_CLASS(Mutex, Object);
1661 rb_define_alloc_func(rb_cMutex, mutex_alloc);
1662 rb_define_method(rb_cMutex, "initialize", mutex_initialize, 0);
1663 rb_define_method(rb_cMutex, "locked?", rb_mutex_locked_p, 0);
1664 rb_define_method(rb_cMutex, "try_lock", rb_mutex_trylock, 0);
1665 rb_define_method(rb_cMutex, "lock", rb_mutex_lock, 0);
1666 rb_define_method(rb_cMutex, "unlock", rb_mutex_unlock, 0);
1667 rb_define_method(rb_cMutex, "sleep", mutex_sleep, -1);
1668 rb_define_method(rb_cMutex, "synchronize", rb_mutex_synchronize_m, 0);
1669 rb_define_method(rb_cMutex, "owned?", rb_mutex_owned_p, 0);
1670
1671 /* Queue */
1672 DEFINE_CLASS(Queue, Object);
1673 rb_define_alloc_func(rb_cQueue, queue_alloc);
1674
1675 rb_eClosedQueueError = rb_define_class("ClosedQueueError", rb_eStopIteration);
1676
1677 rb_define_method(rb_cQueue, "initialize", rb_queue_initialize, -1);
1678 rb_undef_method(rb_cQueue, "initialize_copy");
1679 rb_define_method(rb_cQueue, "marshal_dump", undumpable, 0);
1680 rb_define_method(rb_cQueue, "close", rb_queue_close, 0);
1681 rb_define_method(rb_cQueue, "closed?", rb_queue_closed_p, 0);
1682 rb_define_method(rb_cQueue, "push", rb_queue_push, 1);
1683 rb_define_method(rb_cQueue, "empty?", rb_queue_empty_p, 0);
1684 rb_define_method(rb_cQueue, "clear", rb_queue_clear, 0);
1685 rb_define_method(rb_cQueue, "length", rb_queue_length, 0);
1686 rb_define_method(rb_cQueue, "num_waiting", rb_queue_num_waiting, 0);
1687 rb_define_method(rb_cQueue, "freeze", rb_queue_freeze, 0);
1688
1689 rb_define_alias(rb_cQueue, "enq", "push");
1690 rb_define_alias(rb_cQueue, "<<", "push");
1691 rb_define_alias(rb_cQueue, "size", "length");
1692
1693 DEFINE_CLASS(SizedQueue, Queue);
1694 rb_define_alloc_func(rb_cSizedQueue, szqueue_alloc);
1695
1696 rb_define_method(rb_cSizedQueue, "initialize", rb_szqueue_initialize, 1);
1697 rb_define_method(rb_cSizedQueue, "close", rb_szqueue_close, 0);
1698 rb_define_method(rb_cSizedQueue, "max", rb_szqueue_max_get, 0);
1699 rb_define_method(rb_cSizedQueue, "max=", rb_szqueue_max_set, 1);
1700 rb_define_method(rb_cSizedQueue, "empty?", rb_szqueue_empty_p, 0);
1701 rb_define_method(rb_cSizedQueue, "clear", rb_szqueue_clear, 0);
1702 rb_define_method(rb_cSizedQueue, "length", rb_szqueue_length, 0);
1703 rb_define_method(rb_cSizedQueue, "num_waiting", rb_szqueue_num_waiting, 0);
1704 rb_define_alias(rb_cSizedQueue, "size", "length");
1705
1706 /* CVar */
1707 DEFINE_CLASS(ConditionVariable, Object);
1708 rb_define_alloc_func(rb_cConditionVariable, condvar_alloc);
1709
1710 id_sleep = rb_intern("sleep");
1711
1712 rb_define_method(rb_cConditionVariable, "initialize", rb_condvar_initialize, 0);
1713 rb_undef_method(rb_cConditionVariable, "initialize_copy");
1714 rb_define_method(rb_cConditionVariable, "marshal_dump", undumpable, 0);
1715 rb_define_method(rb_cConditionVariable, "wait", rb_condvar_wait, -1);
1716 rb_define_method(rb_cConditionVariable, "signal", rb_condvar_signal, 0);
1717 rb_define_method(rb_cConditionVariable, "broadcast", rb_condvar_broadcast, 0);
1718
1719 rb_provide("thread.rb");
1720}
1721
1722#include "thread_sync.rbinc"
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:980
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:1012
VALUE rb_define_class_id_under(VALUE outer, ID id, VALUE super)
Identical to rb_define_class_under(), except it takes the name in ID instead of C's string.
Definition class.c:1051
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2350
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2171
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2640
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:937
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:134
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:129
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define Qtrue
Old name of RUBY_Qtrue.
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define Check_TypedStruct(v, t)
Old name of rb_check_typeddata.
Definition rtypeddata.h:105
#define NUM2LONG
Old name of RB_NUM2LONG.
Definition long.h:51
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:130
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition error.c:1380
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
VALUE rb_eStopIteration
StopIteration exception.
Definition enumerator.c:181
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition eval.c:1066
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:955
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cThread
Thread class.
Definition vm.c:544
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3715
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:179
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
void rb_provide(const char *feature)
Declares that the given feature is already provided by someone else.
Definition load.c:723
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_mutex_trylock(VALUE mutex)
Attempts to lock the mutex, without waiting for other threads to unlock it.
VALUE rb_mutex_locked_p(VALUE mutex)
Queries if there are any threads that holds the lock.
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
struct timeval rb_time_interval(VALUE num)
Creates a "time interval".
Definition time.c:2935
void rb_const_set(VALUE space, ID name, VALUE val)
Names a constant.
Definition variable.c:3703
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
struct rb_data_type_struct rb_data_type_t
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:197
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:229
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:393
VALUE rb_fiber_scheduler_kernel_sleep(VALUE scheduler, VALUE duration)
Non-blocking sleep.
Definition scheduler.c:293
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:412
#define RTEST
This is an old name of RB_TEST.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition gc.c:4703