Ruby 3.4.8p72 (2025-12-17 revision 995b59f66677d44767ce9faac6957e5543617ff9)
thread_pthread.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_pthread.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "internal/gc.h"
15#include "internal/sanitizers.h"
16#include "rjit.h"
17
18#ifdef HAVE_SYS_RESOURCE_H
19#include <sys/resource.h>
20#endif
21#ifdef HAVE_THR_STKSEGMENT
22#include <thread.h>
23#endif
24#if defined(HAVE_FCNTL_H)
25#include <fcntl.h>
26#elif defined(HAVE_SYS_FCNTL_H)
27#include <sys/fcntl.h>
28#endif
29#ifdef HAVE_SYS_PRCTL_H
30#include <sys/prctl.h>
31#endif
32#if defined(HAVE_SYS_TIME_H)
33#include <sys/time.h>
34#endif
35#if defined(__HAIKU__)
36#include <kernel/OS.h>
37#endif
38#ifdef __linux__
39#include <sys/syscall.h> /* for SYS_gettid */
40#endif
41#include <time.h>
42#include <signal.h>
43
44#if defined __APPLE__
45# include <AvailabilityMacros.h>
46#endif
47
48#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
49# define USE_EVENTFD (1)
50# include <sys/eventfd.h>
51#else
52# define USE_EVENTFD (0)
53#endif
54
55#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME)
58static pthread_condattr_t condattr_mono;
59static pthread_condattr_t *condattr_monotonic = &condattr_mono;
60#else
61static const void *const condattr_monotonic = NULL;
62#endif
63
64#include COROUTINE_H
65
66#ifndef HAVE_SYS_EVENT_H
67#define HAVE_SYS_EVENT_H 0
68#endif
69
70#ifndef HAVE_SYS_EPOLL_H
71#define HAVE_SYS_EPOLL_H 0
72#else
73// force setting for debug
74// #undef HAVE_SYS_EPOLL_H
75// #define HAVE_SYS_EPOLL_H 0
76#endif
77
78#ifndef USE_MN_THREADS
79 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
80 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
81 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
82 #define USE_MN_THREADS 0
83 #elif HAVE_SYS_EPOLL_H
84 #include <sys/epoll.h>
85 #define USE_MN_THREADS 1
86 #elif HAVE_SYS_EVENT_H
87 #include <sys/event.h>
88 #define USE_MN_THREADS 1
89 #else
90 #define USE_MN_THREADS 0
91 #endif
92#endif
93
94// native thread wrappers
95
96#define NATIVE_MUTEX_LOCK_DEBUG 0
97
98static void
99mutex_debug(const char *msg, void *lock)
100{
101 if (NATIVE_MUTEX_LOCK_DEBUG) {
102 int r;
103 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
104
105 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
106 fprintf(stdout, "%s: %p\n", msg, lock);
107 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
108 }
109}
110
111void
112rb_native_mutex_lock(pthread_mutex_t *lock)
113{
114 int r;
115 mutex_debug("lock", lock);
116 if ((r = pthread_mutex_lock(lock)) != 0) {
117 rb_bug_errno("pthread_mutex_lock", r);
118 }
119}
120
121void
122rb_native_mutex_unlock(pthread_mutex_t *lock)
123{
124 int r;
125 mutex_debug("unlock", lock);
126 if ((r = pthread_mutex_unlock(lock)) != 0) {
127 rb_bug_errno("pthread_mutex_unlock", r);
128 }
129}
130
131int
132rb_native_mutex_trylock(pthread_mutex_t *lock)
133{
134 int r;
135 mutex_debug("trylock", lock);
136 if ((r = pthread_mutex_trylock(lock)) != 0) {
137 if (r == EBUSY) {
138 return EBUSY;
139 }
140 else {
141 rb_bug_errno("pthread_mutex_trylock", r);
142 }
143 }
144 return 0;
145}
146
147void
148rb_native_mutex_initialize(pthread_mutex_t *lock)
149{
150 int r = pthread_mutex_init(lock, 0);
151 mutex_debug("init", lock);
152 if (r != 0) {
153 rb_bug_errno("pthread_mutex_init", r);
154 }
155}
156
157void
158rb_native_mutex_destroy(pthread_mutex_t *lock)
159{
160 int r = pthread_mutex_destroy(lock);
161 mutex_debug("destroy", lock);
162 if (r != 0) {
163 rb_bug_errno("pthread_mutex_destroy", r);
164 }
165}
166
167void
168rb_native_cond_initialize(rb_nativethread_cond_t *cond)
169{
170 int r = pthread_cond_init(cond, condattr_monotonic);
171 if (r != 0) {
172 rb_bug_errno("pthread_cond_init", r);
173 }
174}
175
176void
177rb_native_cond_destroy(rb_nativethread_cond_t *cond)
178{
179 int r = pthread_cond_destroy(cond);
180 if (r != 0) {
181 rb_bug_errno("pthread_cond_destroy", r);
182 }
183}
184
185/*
186 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
187 * EAGAIN after retrying 8192 times. You can see them in the following page:
188 *
189 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
190 *
191 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
192 * need to retrying until pthread functions don't return EAGAIN.
193 */
194
195void
196rb_native_cond_signal(rb_nativethread_cond_t *cond)
197{
198 int r;
199 do {
200 r = pthread_cond_signal(cond);
201 } while (r == EAGAIN);
202 if (r != 0) {
203 rb_bug_errno("pthread_cond_signal", r);
204 }
205}
206
207void
208rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
209{
210 int r;
211 do {
212 r = pthread_cond_broadcast(cond);
213 } while (r == EAGAIN);
214 if (r != 0) {
215 rb_bug_errno("rb_native_cond_broadcast", r);
216 }
217}
218
219void
220rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
221{
222 int r = pthread_cond_wait(cond, mutex);
223 if (r != 0) {
224 rb_bug_errno("pthread_cond_wait", r);
225 }
226}
227
228static int
229native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
230{
231 int r;
232 struct timespec ts;
233
234 /*
235 * An old Linux may return EINTR. Even though POSIX says
236 * "These functions shall not return an error code of [EINTR]".
237 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
238 * Let's hide it from arch generic code.
239 */
240 do {
241 rb_hrtime2timespec(&ts, abs);
242 r = pthread_cond_timedwait(cond, mutex, &ts);
243 } while (r == EINTR);
244
245 if (r != 0 && r != ETIMEDOUT) {
246 rb_bug_errno("pthread_cond_timedwait", r);
247 }
248
249 return r;
250}
251
252static rb_hrtime_t
253native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
254{
255 if (condattr_monotonic) {
256 return rb_hrtime_add(rb_hrtime_now(), rel);
257 }
258 else {
259 struct timespec ts;
260
261 rb_timespec_now(&ts);
262 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
263 }
264}
265
266void
267rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
268{
269 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
270 native_cond_timedwait(cond, mutex, &hrmsec);
271}
272
273// thread scheduling
274
275static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
276static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
277
278#if 0
279static const char *
280event_name(rb_event_flag_t event)
281{
282 switch (event) {
284 return "STARTED";
286 return "READY";
288 return "RESUMED";
290 return "SUSPENDED";
292 return "EXITED";
293 }
294 return "no-event";
295}
296
297#define RB_INTERNAL_THREAD_HOOK(event, th) \
298 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
299 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
300 rb_thread_execute_hooks(event, th); \
301 }
302#else
303#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
304#endif
305
306static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
307
308#if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
309# define USE_UBF_LIST 1
310#endif
311
312static void threadptr_trap_interrupt(rb_thread_t *);
313
314#ifdef HAVE_SCHED_YIELD
315#define native_thread_yield() (void)sched_yield()
316#else
317#define native_thread_yield() ((void)0)
318#endif
319
320static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
321static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
322static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
323
324static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
325static void timer_thread_wakeup(void);
326static void timer_thread_wakeup_locked(rb_vm_t *vm);
327static void timer_thread_wakeup_force(void);
328static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
329static void coroutine_transfer0(struct coroutine_context *transfer_from,
330 struct coroutine_context *transfer_to, bool to_dead);
331
332#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
333
334static bool
335th_has_dedicated_nt(const rb_thread_t *th)
336{
337 // TODO: th->has_dedicated_nt
338 return th->nt->dedicated > 0;
339}
340
342static void
343thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
344{
345 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
346 rb_thread_t *th;
347 int i = 0;
348 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
349 i++; if (i>10) rb_bug("too many");
350 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
351 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
352 th->nt ? (int)th->nt->serial : -1);
353 }
354}
355
356#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
357
359static void
360ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
361{
362 rb_ractor_t *r;
363
364 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
365
366 int i = 0;
367 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
368 i++;
369 if (i>10) rb_bug("!!");
370 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
371 }
372}
373
374#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
375#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
376
377static void
378thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
379{
380 rb_native_mutex_lock(&sched->lock_);
381
382#if VM_CHECK_MODE
383 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
384 VM_ASSERT(sched->lock_owner == NULL);
385 sched->lock_owner = th;
386#else
387 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
388#endif
389}
390
391static void
392thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
393{
394 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
395
396#if VM_CHECK_MODE
397 VM_ASSERT(sched->lock_owner == th);
398 sched->lock_owner = NULL;
399#endif
400
401 rb_native_mutex_unlock(&sched->lock_);
402}
403
404static void
405thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
406{
407 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
408
409#if VM_CHECK_MODE > 0
410 sched->lock_owner = th;
411#endif
412}
413
414static void
415ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
416{
417 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
418
419#if VM_CHECK_MODE
420 if (th) {
421 VM_ASSERT(sched->lock_owner == th);
422 }
423 else {
424 VM_ASSERT(sched->lock_owner != NULL);
425 }
426#endif
427}
428
429#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
430#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
431
433static unsigned int
434rb_ractor_serial(const rb_ractor_t *r) {
435 if (r) {
436 return rb_ractor_id(r);
437 }
438 else {
439 return 0;
440 }
441}
442
443static void
444ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
445{
446#if VM_CHECK_MODE > 0
447 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
448 VM_ASSERT(vm->ractor.sched.locked == false);
449
450 vm->ractor.sched.lock_owner = cr;
451 vm->ractor.sched.locked = true;
452#endif
453}
454
455static void
456ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
457{
458#if VM_CHECK_MODE > 0
459 VM_ASSERT(vm->ractor.sched.locked);
460 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
461
462 vm->ractor.sched.locked = false;
463 vm->ractor.sched.lock_owner = NULL;
464#endif
465}
466
467static void
468ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
469{
470 rb_native_mutex_lock(&vm->ractor.sched.lock);
471
472#if VM_CHECK_MODE
473 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
474#else
475 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
476#endif
477
478 ractor_sched_set_locked(vm, cr);
479}
480
481static void
482ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
483{
484 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
485
486 ractor_sched_set_unlocked(vm, cr);
487 rb_native_mutex_unlock(&vm->ractor.sched.lock);
488}
489
490static void
491ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
492{
493 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
494 VM_ASSERT(vm->ractor.sched.locked);
495 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
496}
497
499static bool
500ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
501{
502 rb_thread_t *rth;
503 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
504 if (rth == th) return true;
505 }
506 return false;
507}
508
510static unsigned int
511ractor_sched_running_threads_size(rb_vm_t *vm)
512{
513 rb_thread_t *th;
514 unsigned int i = 0;
515 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
516 i++;
517 }
518 return i;
519}
520
522static unsigned int
523ractor_sched_timeslice_threads_size(rb_vm_t *vm)
524{
525 rb_thread_t *th;
526 unsigned int i = 0;
527 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
528 i++;
529 }
530 return i;
531}
532
534static bool
535ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
536{
537 rb_thread_t *rth;
538 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
539 if (rth == th) return true;
540 }
541 return false;
542}
543
544static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
545static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
546
547// setup timeslice signals by the timer thread.
548static void
549thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
550 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
551{
552#if USE_RUBY_DEBUG_LOG
553 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
554#endif
555
556 rb_thread_t *del_timeslice_th;
557
558 if (del_th && sched->is_running_timeslice) {
559 del_timeslice_th = del_th;
560 sched->is_running_timeslice = false;
561 }
562 else {
563 del_timeslice_th = NULL;
564 }
565
566 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
567 rb_th_serial(add_th), rb_th_serial(del_th),
568 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
569
570 ractor_sched_lock(vm, cr);
571 {
572 // update running_threads
573 if (del_th) {
574 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
575 VM_ASSERT(del_timeslice_th != NULL ||
576 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
577
578 ccan_list_del_init(&del_th->sched.node.running_threads);
579 vm->ractor.sched.running_cnt--;
580
581 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
582 ractor_sched_barrier_join_signal_locked(vm);
583 }
584 sched->is_running = false;
585 }
586
587 if (add_th) {
588 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
589 RUBY_DEBUG_LOG("barrier-wait");
590
591 ractor_sched_barrier_join_signal_locked(vm);
592 ractor_sched_barrier_join_wait_locked(vm, add_th);
593 }
594
595 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
596 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
597
598 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
599 vm->ractor.sched.running_cnt++;
600 sched->is_running = true;
601 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
602 }
603
604 if (add_timeslice_th) {
605 // update timeslice threads
606 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
607 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
608 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
609 sched->is_running_timeslice = true;
610 if (was_empty) {
611 timer_thread_wakeup_locked(vm);
612 }
613 }
614
615 if (del_timeslice_th) {
616 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
617 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
618 }
619
620 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
621 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
622 }
623 ractor_sched_unlock(vm, cr);
624
625 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
626 // it can be after barrier synchronization by another ractor
627 rb_thread_t *lock_owner = NULL;
628#if VM_CHECK_MODE
629 lock_owner = sched->lock_owner;
630#endif
631 thread_sched_unlock(sched, lock_owner);
632 {
633 RB_VM_LOCK_ENTER();
634 RB_VM_LOCK_LEAVE();
635 }
636 thread_sched_lock(sched, lock_owner);
637 }
638
639 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
640 // rb_th_serial(add_th), rb_th_serial(del_th),
641 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
642 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
643}
644
645static void
646thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
647{
648 ASSERT_thread_sched_locked(sched, th);
649 VM_ASSERT(sched->running == th);
650
651 rb_vm_t *vm = th->vm;
652 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
653}
654
655static void
656thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
657{
658 ASSERT_thread_sched_locked(sched, th);
659
660 rb_vm_t *vm = th->vm;
661 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
662}
663
664void
665rb_add_running_thread(rb_thread_t *th)
666{
667 struct rb_thread_sched *sched = TH_SCHED(th);
668
669 thread_sched_lock(sched, th);
670 {
671 thread_sched_add_running_thread(sched, th);
672 }
673 thread_sched_unlock(sched, th);
674}
675
676void
677rb_del_running_thread(rb_thread_t *th)
678{
679 struct rb_thread_sched *sched = TH_SCHED(th);
680
681 thread_sched_lock(sched, th);
682 {
683 thread_sched_del_running_thread(sched, th);
684 }
685 thread_sched_unlock(sched, th);
686}
687
688// setup current or next running thread
689// sched->running should be set only on this function.
690//
691// if th is NULL, there is no running threads.
692static void
693thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
694{
695 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
696 VM_ASSERT(sched->running != th);
697
698 sched->running = th;
699}
700
702static bool
703thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
704{
705 rb_thread_t *rth;
706 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
707 if (rth == th) return true;
708 }
709 return false;
710}
711
712// deque thread from the ready queue.
713// if the ready queue is empty, return NULL.
714//
715// return deque'ed running thread (or NULL).
716static rb_thread_t *
717thread_sched_deq(struct rb_thread_sched *sched)
718{
719 ASSERT_thread_sched_locked(sched, NULL);
720 rb_thread_t *next_th;
721
722 VM_ASSERT(sched->running != NULL);
723
724 if (ccan_list_empty(&sched->readyq)) {
725 next_th = NULL;
726 }
727 else {
728 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
729
730 VM_ASSERT(sched->readyq_cnt > 0);
731 sched->readyq_cnt--;
732 ccan_list_node_init(&next_th->sched.node.readyq);
733 }
734
735 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
736
737 return next_th;
738}
739
740// enqueue ready thread to the ready queue.
741static void
742thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
743{
744 ASSERT_thread_sched_locked(sched, NULL);
745 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
746
747 VM_ASSERT(sched->running != NULL);
748 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
749
750 if (sched->is_running) {
751 if (ccan_list_empty(&sched->readyq)) {
752 // add sched->running to timeslice
753 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
754 }
755 }
756 else {
757 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
758 }
759
760 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
761 sched->readyq_cnt++;
762}
763
764// DNT: kick condvar
765// SNT: TODO
766static void
767thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
768{
769 ASSERT_thread_sched_locked(sched, NULL);
770 VM_ASSERT(sched->running == next_th);
771
772 if (next_th) {
773 if (next_th->nt) {
774 if (th_has_dedicated_nt(next_th)) {
775 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
776 rb_native_cond_signal(&next_th->nt->cond.readyq);
777 }
778 else {
779 // TODO
780 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
781 }
782 }
783 else {
784 if (will_switch) {
785 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
786 }
787 else {
788 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
789 ractor_sched_enq(next_th->vm, next_th->ractor);
790 }
791 }
792 }
793 else {
794 RUBY_DEBUG_LOG("no waiting threads%s", "");
795 }
796}
797
798// waiting -> ready (locked)
799static void
800thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
801{
802 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
803
804 VM_ASSERT(sched->running != th);
805 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
806 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
807
808 if (sched->running == NULL) {
809 thread_sched_set_running(sched, th);
810 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
811 }
812 else {
813 thread_sched_enq(sched, th);
814 }
815}
816
817// waiting -> ready
818//
819// `th` had became "waiting" state by `thread_sched_to_waiting`
820// and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
822static void
823thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
824{
825 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
826
827 thread_sched_lock(sched, th);
828 {
829 thread_sched_to_ready_common(sched, th, true, false);
830 }
831 thread_sched_unlock(sched, th);
832}
833
834// wait until sched->running is `th`.
835static void
836thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
837{
838 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
839
840 ASSERT_thread_sched_locked(sched, th);
841 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
842
843 if (th != sched->running) {
844 // already deleted from running threads
845 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
846
847 // wait for execution right
848 rb_thread_t *next_th;
849 while((next_th = sched->running) != th) {
850 if (th_has_dedicated_nt(th)) {
851 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
852
853 thread_sched_set_lock_owner(sched, NULL);
854 {
855 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
856 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
857 }
858 thread_sched_set_lock_owner(sched, th);
859
860 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
861 if (th == sched->running) {
862 rb_ractor_thread_switch(th->ractor, th);
863 }
864 }
865 else {
866 // search another ready thread
867 if (can_direct_transfer &&
868 (next_th = sched->running) != NULL &&
869 !next_th->nt // next_th is running or has dedicated nt
870 ) {
871
872 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
873
874 thread_sched_set_lock_owner(sched, NULL);
875 {
876 rb_ractor_set_current_ec(th->ractor, NULL);
877 thread_sched_switch(th, next_th);
878 }
879 thread_sched_set_lock_owner(sched, th);
880 }
881 else {
882 // search another ready ractor
883 struct rb_native_thread *nt = th->nt;
884 native_thread_assign(NULL, th);
885
886 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
887
888 thread_sched_set_lock_owner(sched, NULL);
889 {
890 rb_ractor_set_current_ec(th->ractor, NULL);
891 coroutine_transfer0(th->sched.context, nt->nt_context, false);
892 }
893 thread_sched_set_lock_owner(sched, th);
894 }
895
896 VM_ASSERT(rb_current_ec_noinline() == th->ec);
897 }
898 }
899
900 VM_ASSERT(th->nt != NULL);
901 VM_ASSERT(rb_current_ec_noinline() == th->ec);
902 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
903
904 // add th to running threads
905 thread_sched_add_running_thread(sched, th);
906 }
907
908 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
909 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
910}
911
912// waiting -> ready -> running (locked)
913static void
914thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
915{
916 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
917
918 VM_ASSERT(sched->running != th);
919 VM_ASSERT(th_has_dedicated_nt(th));
920 VM_ASSERT(GET_THREAD() == th);
921
922 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
923
924 // waiting -> ready
925 thread_sched_to_ready_common(sched, th, false, false);
926
927 if (sched->running == th) {
928 thread_sched_add_running_thread(sched, th);
929 }
930
931 // TODO: check SNT number
932 thread_sched_wait_running_turn(sched, th, false);
933}
934
935// waiting -> ready -> running
936//
937// `th` had been waiting by `thread_sched_to_waiting()`
938// and run a dedicated task (like waitpid and so on).
939// After the dedicated task, this function is called
940// to join a normal thread-scheduling.
941static void
942thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
943{
944 thread_sched_lock(sched, th);
945 {
946 thread_sched_to_running_common(sched, th);
947 }
948 thread_sched_unlock(sched, th);
949}
950
951// resume a next thread in the thread ready queue.
952//
953// deque next running thread from the ready thread queue and
954// resume this thread if available.
955//
956// If the next therad has a dedicated native thraed, simply signal to resume.
957// Otherwise, make the ractor ready and other nt will run the ractor and the thread.
958static void
959thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
960{
961 ASSERT_thread_sched_locked(sched, th);
962
963 VM_ASSERT(sched->running == th);
964 VM_ASSERT(sched->running->nt != NULL);
965
966 rb_thread_t *next_th = thread_sched_deq(sched);
967
968 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
969 VM_ASSERT(th != next_th);
970
971 thread_sched_set_running(sched, next_th);
972 VM_ASSERT(next_th == sched->running);
973 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
974
975 if (th != next_th) {
976 thread_sched_del_running_thread(sched, th);
977 }
978}
979
980// running -> waiting
981//
982// to_dead: false
983// th will run dedicated task.
984// run another ready thread.
985// to_dead: true
986// th will be dead.
987// run another ready thread.
988static void
989thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
990{
991 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
992
993 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
994
995 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
996
997 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
998 thread_sched_wakeup_next_thread(sched, th, can_switch);
999}
1000
1001// running -> dead (locked)
1002static void
1003thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1004{
1005 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1006 thread_sched_to_waiting_common0(sched, th, true);
1007 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1008}
1009
1010// running -> dead
1011static void
1012thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1013{
1014 thread_sched_lock(sched, th);
1015 {
1016 thread_sched_to_dead_common(sched, th);
1017 }
1018 thread_sched_unlock(sched, th);
1019}
1020
1021// running -> waiting (locked)
1022//
1023// This thread will run dedicated task (th->nt->dedicated++).
1024static void
1025thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1026{
1027 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1028 thread_sched_to_waiting_common0(sched, th, false);
1029}
1030
1031// running -> waiting
1032//
1033// This thread will run a dedicated task.
1034static void
1035thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1036{
1037 thread_sched_lock(sched, th);
1038 {
1039 thread_sched_to_waiting_common(sched, th);
1040 }
1041 thread_sched_unlock(sched, th);
1042}
1043
1044// mini utility func
1045static void
1046setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1047{
1048 rb_native_mutex_lock(&th->interrupt_lock);
1049 {
1050 th->unblock.func = func;
1051 th->unblock.arg = arg;
1052 }
1053 rb_native_mutex_unlock(&th->interrupt_lock);
1054}
1055
1056static void
1057ubf_waiting(void *ptr)
1058{
1059 rb_thread_t *th = (rb_thread_t *)ptr;
1060 struct rb_thread_sched *sched = TH_SCHED(th);
1061
1062 // only once. it is safe because th->interrupt_lock is already acquired.
1063 th->unblock.func = NULL;
1064 th->unblock.arg = NULL;
1065
1066 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1067
1068 thread_sched_lock(sched, th);
1069 {
1070 if (sched->running == th) {
1071 // not sleeping yet.
1072 }
1073 else {
1074 thread_sched_to_ready_common(sched, th, true, false);
1075 }
1076 }
1077 thread_sched_unlock(sched, th);
1078}
1079
1080// running -> waiting
1081//
1082// This thread will sleep until other thread wakeup the thread.
1083static void
1084thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1085{
1086 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1087
1088 RB_VM_SAVE_MACHINE_CONTEXT(th);
1089 setup_ubf(th, ubf_waiting, (void *)th);
1090
1091 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1092
1093 thread_sched_lock(sched, th);
1094 {
1095 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1096 bool can_direct_transfer = !th_has_dedicated_nt(th);
1097 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1098 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1099 }
1100 else {
1101 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1102 }
1103 }
1104 thread_sched_unlock(sched, th);
1105
1106 setup_ubf(th, NULL, NULL);
1107}
1108
1109// run another thread in the ready queue.
1110// continue to run if there are no ready threads.
1111static void
1112thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1113{
1114 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1115
1116 thread_sched_lock(sched, th);
1117 {
1118 if (!ccan_list_empty(&sched->readyq)) {
1119 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1120 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1121 bool can_direct_transfer = !th_has_dedicated_nt(th);
1122 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1123 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1124 }
1125 else {
1126 VM_ASSERT(sched->readyq_cnt == 0);
1127 }
1128 }
1129 thread_sched_unlock(sched, th);
1130}
1131
1132void
1133rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1134{
1135 rb_native_mutex_initialize(&sched->lock_);
1136
1137#if VM_CHECK_MODE
1138 sched->lock_owner = NULL;
1139#endif
1140
1141 ccan_list_head_init(&sched->readyq);
1142 sched->readyq_cnt = 0;
1143
1144#if USE_MN_THREADS
1145 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1146#endif
1147}
1148
1149static void
1150coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead)
1151{
1152#ifdef RUBY_ASAN_ENABLED
1153 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1154 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1155#endif
1156
1158 struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1159
1160 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1161 * never be resumed! */
1162 VM_ASSERT(!to_dead);
1163#ifdef RUBY_ASAN_ENABLED
1164 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1165 (const void**)&returning_from->stack_base, &returning_from->stack_size);
1166#endif
1167
1168}
1169
1170static void
1171thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead)
1172{
1173 VM_ASSERT(!nt->dedicated);
1174 VM_ASSERT(next_th->nt == NULL);
1175
1176 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1177
1178 ruby_thread_set_native(next_th);
1179 native_thread_assign(nt, next_th);
1180
1181 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1182}
1183
1184static void
1185thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1186{
1187 struct rb_native_thread *nt = cth->nt;
1188 native_thread_assign(NULL, cth);
1189 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1190 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1191}
1192
1193#if VM_CHECK_MODE > 0
1195static unsigned int
1196grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1197{
1198 ASSERT_ractor_sched_locked(vm, cr);
1199
1200 rb_ractor_t *r, *prev_r = NULL;
1201 unsigned int i = 0;
1202
1203 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1204 i++;
1205
1206 VM_ASSERT(r != prev_r);
1207 prev_r = r;
1208 }
1209 return i;
1210}
1211#endif
1212
1213static void
1214ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1215{
1216 struct rb_thread_sched *sched = &r->threads.sched;
1217 rb_ractor_t *cr = NULL; // timer thread can call this function
1218
1219 VM_ASSERT(sched->running != NULL);
1220 VM_ASSERT(sched->running->nt == NULL);
1221
1222 ractor_sched_lock(vm, cr);
1223 {
1224#if VM_CHECK_MODE > 0
1225 // check if grq contains r
1226 rb_ractor_t *tr;
1227 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1228 VM_ASSERT(r != tr);
1229 }
1230#endif
1231
1232 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1233 vm->ractor.sched.grq_cnt++;
1234 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1235
1236 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1237
1238 rb_native_cond_signal(&vm->ractor.sched.cond);
1239
1240 // ractor_sched_dump(vm);
1241 }
1242 ractor_sched_unlock(vm, cr);
1243}
1244
1245
1246#ifndef SNT_KEEP_SECONDS
1247#define SNT_KEEP_SECONDS 0
1248#endif
1249
1250#ifndef MINIMUM_SNT
1251// make at least MINIMUM_SNT snts for debug.
1252#define MINIMUM_SNT 0
1253#endif
1254
1255static rb_ractor_t *
1256ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1257{
1258 rb_ractor_t *r;
1259
1260 ractor_sched_lock(vm, cr);
1261 {
1262 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1263 // ractor_sched_dump(vm);
1264
1265 VM_ASSERT(rb_current_execution_context(false) == NULL);
1266 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1267
1268 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1269 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1270
1271#if SNT_KEEP_SECONDS > 0
1272 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1273 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1274 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1275 VM_ASSERT(r == NULL);
1276 vm->ractor.sched.snt_cnt--;
1277 vm->ractor.sched.running_cnt--;
1278 break;
1279 }
1280 else {
1281 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1282 }
1283#else
1284 ractor_sched_set_unlocked(vm, cr);
1285 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1286 ractor_sched_set_locked(vm, cr);
1287
1288 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1289#endif
1290 }
1291
1292 VM_ASSERT(rb_current_execution_context(false) == NULL);
1293
1294 if (r) {
1295 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1296 vm->ractor.sched.grq_cnt--;
1297 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1298 }
1299 else {
1300 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1301 // timeout
1302 }
1303 }
1304 ractor_sched_unlock(vm, cr);
1305
1306 return r;
1307}
1308
1309void rb_ractor_lock_self(rb_ractor_t *r);
1310void rb_ractor_unlock_self(rb_ractor_t *r);
1311
1312void
1313rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1314{
1315 // ractor lock of cr is acquired
1316 // r is sleeping status
1317 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1318 struct rb_thread_sched *sched = TH_SCHED(th);
1319 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1320
1321 setup_ubf(th, ubf, (void *)cr);
1322
1323 thread_sched_lock(sched, th);
1324 {
1325 rb_ractor_unlock_self(cr);
1326 {
1327 if (RUBY_VM_INTERRUPTED(th->ec)) {
1328 RUBY_DEBUG_LOG("interrupted");
1329 }
1330 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1331 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1332 }
1333 else {
1334 // sleep
1335 RB_VM_SAVE_MACHINE_CONTEXT(th);
1336 th->status = THREAD_STOPPED_FOREVER;
1337
1338 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1339
1340 bool can_direct_transfer = !th_has_dedicated_nt(th);
1341 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1342 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1343 th->status = THREAD_RUNNABLE;
1344 // wakeup
1345 }
1346 }
1347 }
1348 thread_sched_unlock(sched, th);
1349
1350 setup_ubf(th, NULL, NULL);
1351
1352 rb_ractor_lock_self(cr);
1353 cr->sync.wait.waiting_thread = NULL;
1354}
1355
1356void
1357rb_ractor_sched_wakeup(rb_ractor_t *r)
1358{
1359 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1360 // ractor lock of r is acquired
1361 struct rb_thread_sched *sched = TH_SCHED(r_th);
1362
1363 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1364
1365 thread_sched_lock(sched, r_th);
1366 {
1367 if (r_th->status == THREAD_STOPPED_FOREVER) {
1368 thread_sched_to_ready_common(sched, r_th, true, false);
1369 }
1370 }
1371 thread_sched_unlock(sched, r_th);
1372}
1373
1374static bool
1375ractor_sched_barrier_completed_p(rb_vm_t *vm)
1376{
1377 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1378 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1379 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1380}
1381
1382void
1383rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1384{
1385 VM_ASSERT(cr == GET_RACTOR());
1386 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1387 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1388 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1389
1390 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1391
1392 unsigned int lock_rec;
1393
1394 ractor_sched_lock(vm, cr);
1395 {
1396 vm->ractor.sched.barrier_waiting = true;
1397
1398 // release VM lock
1399 lock_rec = vm->ractor.sync.lock_rec;
1400 vm->ractor.sync.lock_rec = 0;
1401 vm->ractor.sync.lock_owner = NULL;
1402 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1403 {
1404 // interrupts all running threads
1405 rb_thread_t *ith;
1406 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1407 if (ith->ractor != cr) {
1408 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1409 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1410 }
1411 }
1412
1413 // wait for other ractors
1414 while (!ractor_sched_barrier_completed_p(vm)) {
1415 ractor_sched_set_unlocked(vm, cr);
1416 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1417 ractor_sched_set_locked(vm, cr);
1418 }
1419 }
1420 }
1421 ractor_sched_unlock(vm, cr);
1422
1423 // acquire VM lock
1424 rb_native_mutex_lock(&vm->ractor.sync.lock);
1425 vm->ractor.sync.lock_rec = lock_rec;
1426 vm->ractor.sync.lock_owner = cr;
1427
1428 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1429
1430 ractor_sched_lock(vm, cr);
1431 {
1432 vm->ractor.sched.barrier_waiting = false;
1433 vm->ractor.sched.barrier_serial++;
1434 vm->ractor.sched.barrier_waiting_cnt = 0;
1435 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1436 }
1437 ractor_sched_unlock(vm, cr);
1438}
1439
1440static void
1441ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1442{
1443 if (ractor_sched_barrier_completed_p(vm)) {
1444 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1445 }
1446}
1447
1448static void
1449ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1450{
1451 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1452
1453 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1454
1455 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1456 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1458
1459 rb_ractor_t *cr = th->ractor;
1460 ractor_sched_set_unlocked(vm, cr);
1461 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1462 ractor_sched_set_locked(vm, cr);
1463
1464 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1465 }
1466}
1467
1468void
1469rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1470{
1471 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1472 VM_ASSERT(cr == GET_RACTOR());
1473 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1474 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1475
1476#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1477 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1478#endif
1479
1480 RUBY_DEBUG_LOG("join");
1481
1482 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1483 {
1484 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1485 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1486
1487 ractor_sched_lock(vm, cr);
1488 {
1489 // running_cnt
1490 vm->ractor.sched.barrier_waiting_cnt++;
1491 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1492
1493 ractor_sched_barrier_join_signal_locked(vm);
1494 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1495 }
1496 ractor_sched_unlock(vm, cr);
1497 }
1498
1499 rb_native_mutex_lock(&vm->ractor.sync.lock);
1500 // VM locked here
1501}
1502
1503#if 0
1504// TODO
1505
1506static void clear_thread_cache_altstack(void);
1507
1508static void
1509rb_thread_sched_destroy(struct rb_thread_sched *sched)
1510{
1511 /*
1512 * only called once at VM shutdown (not atfork), another thread
1513 * may still grab vm->gvl.lock when calling gvl_release at
1514 * the end of thread_start_func_2
1515 */
1516 if (0) {
1517 rb_native_mutex_destroy(&sched->lock);
1518 }
1519 clear_thread_cache_altstack();
1520}
1521#endif
1522
1523#ifdef RB_THREAD_T_HAS_NATIVE_ID
1524static int
1525get_native_thread_id(void)
1526{
1527#ifdef __linux__
1528 return (int)syscall(SYS_gettid);
1529#elif defined(__FreeBSD__)
1530 return pthread_getthreadid_np();
1531#endif
1532}
1533#endif
1534
1535#if defined(HAVE_WORKING_FORK)
1536static void
1537thread_sched_atfork(struct rb_thread_sched *sched)
1538{
1539 current_fork_gen++;
1540 rb_thread_sched_init(sched, true);
1541 rb_thread_t *th = GET_THREAD();
1542 rb_vm_t *vm = GET_VM();
1543
1544 if (th_has_dedicated_nt(th)) {
1545 vm->ractor.sched.snt_cnt = 0;
1546 }
1547 else {
1548 vm->ractor.sched.snt_cnt = 1;
1549 }
1550 vm->ractor.sched.running_cnt = 0;
1551
1552 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1553 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1554 rb_native_cond_initialize(&vm->ractor.sched.cond);
1555 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1556 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1557
1558 ccan_list_head_init(&vm->ractor.sched.grq);
1559 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1560 ccan_list_head_init(&vm->ractor.sched.running_threads);
1561
1562 VM_ASSERT(sched->is_running);
1563 sched->is_running_timeslice = false;
1564
1565 if (sched->running != th) {
1566 thread_sched_to_running(sched, th);
1567 }
1568 else {
1569 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1570 }
1571
1572#ifdef RB_THREAD_T_HAS_NATIVE_ID
1573 if (th->nt) {
1574 th->nt->tid = get_native_thread_id();
1575 }
1576#endif
1577}
1578
1579#endif
1580
1581#ifdef RB_THREAD_LOCAL_SPECIFIER
1582static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1583#else
1584static pthread_key_t ruby_native_thread_key;
1585#endif
1586
1587static void
1588null_func(int i)
1589{
1590 /* null */
1591 // This function can be called from signal handler
1592 // RUBY_DEBUG_LOG("i:%d", i);
1593}
1594
1595rb_thread_t *
1596ruby_thread_from_native(void)
1597{
1598#ifdef RB_THREAD_LOCAL_SPECIFIER
1599 return ruby_native_thread;
1600#else
1601 return pthread_getspecific(ruby_native_thread_key);
1602#endif
1603}
1604
1605int
1606ruby_thread_set_native(rb_thread_t *th)
1607{
1608 if (th) {
1609#ifdef USE_UBF_LIST
1610 ccan_list_node_init(&th->sched.node.ubf);
1611#endif
1612 }
1613
1614 // setup TLS
1615
1616 if (th && th->ec) {
1617 rb_ractor_set_current_ec(th->ractor, th->ec);
1618 }
1619#ifdef RB_THREAD_LOCAL_SPECIFIER
1620 ruby_native_thread = th;
1621 return 1;
1622#else
1623 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1624#endif
1625}
1626
1627static void native_thread_setup(struct rb_native_thread *nt);
1628static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1629
1630void
1631Init_native_thread(rb_thread_t *main_th)
1632{
1633#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1634 if (condattr_monotonic) {
1635 int r = pthread_condattr_init(condattr_monotonic);
1636 if (r == 0) {
1637 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1638 }
1639 if (r) condattr_monotonic = NULL;
1640 }
1641#endif
1642
1643#ifndef RB_THREAD_LOCAL_SPECIFIER
1644 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1645 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1646 }
1647 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1648 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1649 }
1650#endif
1651 ruby_posix_signal(SIGVTALRM, null_func);
1652
1653 // setup vm
1654 rb_vm_t *vm = main_th->vm;
1655 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1656 rb_native_cond_initialize(&vm->ractor.sched.cond);
1657 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1658 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1659
1660 ccan_list_head_init(&vm->ractor.sched.grq);
1661 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1662 ccan_list_head_init(&vm->ractor.sched.running_threads);
1663
1664 // setup main thread
1665 main_th->nt->thread_id = pthread_self();
1666 main_th->nt->serial = 1;
1667#ifdef RUBY_NT_SERIAL
1668 ruby_nt_serial = 1;
1669#endif
1670 ruby_thread_set_native(main_th);
1671 native_thread_setup(main_th->nt);
1672 native_thread_setup_on_thread(main_th->nt);
1673
1674 TH_SCHED(main_th)->running = main_th;
1675 main_th->has_dedicated_nt = 1;
1676
1677 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1678
1679 // setup main NT
1680 main_th->nt->dedicated = 1;
1681 main_th->nt->vm = vm;
1682
1683 // setup mn
1684 vm->ractor.sched.dnt_cnt = 1;
1685}
1686
1687extern int ruby_mn_threads_enabled;
1688
1689void
1690ruby_mn_threads_params(void)
1691{
1692 rb_vm_t *vm = GET_VM();
1693 rb_ractor_t *main_ractor = GET_RACTOR();
1694
1695 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1696 bool enable_mn_threads = false;
1697
1698 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1699 // enabled
1700 ruby_mn_threads_enabled = 1;
1701 }
1702 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1703
1704 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1705 const int default_max_cpu = 8; // TODO: CPU num?
1706 int max_cpu = default_max_cpu;
1707
1708 if (USE_MN_THREADS && max_cpu_cstr) {
1709 int given_max_cpu = atoi(max_cpu_cstr);
1710 if (given_max_cpu > 0) {
1711 max_cpu = given_max_cpu;
1712 }
1713 }
1714
1715 vm->ractor.sched.max_cpu = max_cpu;
1716}
1717
1718static void
1719native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1720{
1721 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1722
1723 if (nt->dedicated == 0) {
1724 ractor_sched_lock(vm, cr);
1725 {
1726 vm->ractor.sched.snt_cnt--;
1727 vm->ractor.sched.dnt_cnt++;
1728 }
1729 ractor_sched_unlock(vm, cr);
1730 }
1731
1732 nt->dedicated++;
1733}
1734
1735static void
1736native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1737{
1738 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1739 VM_ASSERT(nt->dedicated > 0);
1740 nt->dedicated--;
1741
1742 if (nt->dedicated == 0) {
1743 ractor_sched_lock(vm, cr);
1744 {
1745 nt->vm->ractor.sched.snt_cnt++;
1746 nt->vm->ractor.sched.dnt_cnt--;
1747 }
1748 ractor_sched_unlock(vm, cr);
1749 }
1750}
1751
1752static void
1753native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1754{
1755#if USE_RUBY_DEBUG_LOG
1756 if (nt) {
1757 if (th->nt) {
1758 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1759 }
1760 else {
1761 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1762 }
1763 }
1764 else {
1765 if (th->nt) {
1766 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1767 }
1768 else {
1769 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1770 }
1771 }
1772#endif
1773
1774 th->nt = nt;
1775}
1776
1777static void
1778native_thread_destroy(struct rb_native_thread *nt)
1779{
1780 if (nt) {
1781 rb_native_cond_destroy(&nt->cond.readyq);
1782
1783 if (&nt->cond.readyq != &nt->cond.intr) {
1784 rb_native_cond_destroy(&nt->cond.intr);
1785 }
1786
1787 RB_ALTSTACK_FREE(nt->altstack);
1788 ruby_xfree(nt->nt_context);
1789 ruby_xfree(nt);
1790 }
1791}
1792
1793#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1794#define STACKADDR_AVAILABLE 1
1795#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1796#define STACKADDR_AVAILABLE 1
1797#undef MAINSTACKADDR_AVAILABLE
1798#define MAINSTACKADDR_AVAILABLE 1
1799void *pthread_get_stackaddr_np(pthread_t);
1800size_t pthread_get_stacksize_np(pthread_t);
1801#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1802#define STACKADDR_AVAILABLE 1
1803#elif defined HAVE_PTHREAD_GETTHRDS_NP
1804#define STACKADDR_AVAILABLE 1
1805#elif defined __HAIKU__
1806#define STACKADDR_AVAILABLE 1
1807#endif
1808
1809#ifndef MAINSTACKADDR_AVAILABLE
1810# ifdef STACKADDR_AVAILABLE
1811# define MAINSTACKADDR_AVAILABLE 1
1812# else
1813# define MAINSTACKADDR_AVAILABLE 0
1814# endif
1815#endif
1816#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1817# define get_main_stack(addr, size) get_stack(addr, size)
1818#endif
1819
1820#ifdef STACKADDR_AVAILABLE
1821/*
1822 * Get the initial address and size of current thread's stack
1823 */
1824static int
1825get_stack(void **addr, size_t *size)
1826{
1827#define CHECK_ERR(expr) \
1828 {int err = (expr); if (err) return err;}
1829#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1830 pthread_attr_t attr;
1831 size_t guard = 0;
1832 STACK_GROW_DIR_DETECTION;
1833 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1834# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1835 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1836 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1837# else
1838 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1839 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1840# endif
1841# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1842 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1843# else
1844 guard = getpagesize();
1845# endif
1846 *size -= guard;
1847 pthread_attr_destroy(&attr);
1848#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1849 pthread_attr_t attr;
1850 CHECK_ERR(pthread_attr_init(&attr));
1851 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1852# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1853 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1854# else
1855 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1856 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1857# endif
1858 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1859 pthread_attr_destroy(&attr);
1860#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1861 pthread_t th = pthread_self();
1862 *addr = pthread_get_stackaddr_np(th);
1863 *size = pthread_get_stacksize_np(th);
1864#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1865 stack_t stk;
1866# if defined HAVE_THR_STKSEGMENT /* Solaris */
1867 CHECK_ERR(thr_stksegment(&stk));
1868# else /* OpenBSD */
1869 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1870# endif
1871 *addr = stk.ss_sp;
1872 *size = stk.ss_size;
1873#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1874 pthread_t th = pthread_self();
1875 struct __pthrdsinfo thinfo;
1876 char reg[256];
1877 int regsiz=sizeof(reg);
1878 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1879 &thinfo, sizeof(thinfo),
1880 &reg, &regsiz));
1881 *addr = thinfo.__pi_stackaddr;
1882 /* Must not use thinfo.__pi_stacksize for size.
1883 It is around 3KB smaller than the correct size
1884 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1885 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1886 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1887#elif defined __HAIKU__
1888 thread_info info;
1889 STACK_GROW_DIR_DETECTION;
1890 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1891 *addr = info.stack_base;
1892 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1893 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1894#else
1895#error STACKADDR_AVAILABLE is defined but not implemented.
1896#endif
1897 return 0;
1898#undef CHECK_ERR
1899}
1900#endif
1901
1902static struct {
1903 rb_nativethread_id_t id;
1904 size_t stack_maxsize;
1905 VALUE *stack_start;
1906} native_main_thread;
1907
1908#ifdef STACK_END_ADDRESS
1909extern void *STACK_END_ADDRESS;
1910#endif
1911
1912enum {
1913 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1914 RUBY_STACK_SPACE_RATIO = 5
1915};
1916
1917static size_t
1918space_size(size_t stack_size)
1919{
1920 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1921 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1922 return RUBY_STACK_SPACE_LIMIT;
1923 }
1924 else {
1925 return space_size;
1926 }
1927}
1928
1929static void
1930native_thread_init_main_thread_stack(void *addr)
1931{
1932 native_main_thread.id = pthread_self();
1933#ifdef RUBY_ASAN_ENABLED
1934 addr = asan_get_real_stack_addr((void *)addr);
1935#endif
1936
1937#if MAINSTACKADDR_AVAILABLE
1938 if (native_main_thread.stack_maxsize) return;
1939 {
1940 void* stackaddr;
1941 size_t size;
1942 if (get_main_stack(&stackaddr, &size) == 0) {
1943 native_main_thread.stack_maxsize = size;
1944 native_main_thread.stack_start = stackaddr;
1945 goto bound_check;
1946 }
1947 }
1948#endif
1949#ifdef STACK_END_ADDRESS
1950 native_main_thread.stack_start = STACK_END_ADDRESS;
1951#else
1952 if (!native_main_thread.stack_start ||
1953 STACK_UPPER((VALUE *)(void *)&addr,
1954 native_main_thread.stack_start > (VALUE *)addr,
1955 native_main_thread.stack_start < (VALUE *)addr)) {
1956 native_main_thread.stack_start = (VALUE *)addr;
1957 }
1958#endif
1959 {
1960#if defined(HAVE_GETRLIMIT)
1961#if defined(PTHREAD_STACK_DEFAULT)
1962# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1963# error "PTHREAD_STACK_DEFAULT is too small"
1964# endif
1965 size_t size = PTHREAD_STACK_DEFAULT;
1966#else
1967 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1968#endif
1969 size_t space;
1970 int pagesize = getpagesize();
1971 struct rlimit rlim;
1972 STACK_GROW_DIR_DETECTION;
1973 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1974 size = (size_t)rlim.rlim_cur;
1975 }
1976 addr = native_main_thread.stack_start;
1977 if (IS_STACK_DIR_UPPER()) {
1978 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1979 }
1980 else {
1981 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
1982 }
1983 native_main_thread.stack_maxsize = space;
1984#endif
1985 }
1986
1987#if MAINSTACKADDR_AVAILABLE
1988 bound_check:
1989#endif
1990 /* If addr is out of range of main-thread stack range estimation, */
1991 /* it should be on co-routine (alternative stack). [Feature #2294] */
1992 {
1993 void *start, *end;
1994 STACK_GROW_DIR_DETECTION;
1995
1996 if (IS_STACK_DIR_UPPER()) {
1997 start = native_main_thread.stack_start;
1998 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
1999 }
2000 else {
2001 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2002 end = native_main_thread.stack_start;
2003 }
2004
2005 if ((void *)addr < start || (void *)addr > end) {
2006 /* out of range */
2007 native_main_thread.stack_start = (VALUE *)addr;
2008 native_main_thread.stack_maxsize = 0; /* unknown */
2009 }
2010 }
2011}
2012
2013#define CHECK_ERR(expr) \
2014 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2015
2016static int
2017native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
2018{
2019 rb_nativethread_id_t curr = pthread_self();
2020#ifdef RUBY_ASAN_ENABLED
2021 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2022 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2023#endif
2024
2025 if (!native_main_thread.id) {
2026 /* This thread is the first thread, must be the main thread -
2027 * configure the native_main_thread object */
2028 native_thread_init_main_thread_stack(local_in_parent_frame);
2029 }
2030
2031 if (pthread_equal(curr, native_main_thread.id)) {
2032 th->ec->machine.stack_start = native_main_thread.stack_start;
2033 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2034 }
2035 else {
2036#ifdef STACKADDR_AVAILABLE
2037 if (th_has_dedicated_nt(th)) {
2038 void *start;
2039 size_t size;
2040
2041 if (get_stack(&start, &size) == 0) {
2042 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2043 th->ec->machine.stack_start = local_in_parent_frame;
2044 th->ec->machine.stack_maxsize = size - diff;
2045 }
2046 }
2047#else
2048 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2049#endif
2050 }
2051
2052 return 0;
2053}
2054
2055struct nt_param {
2056 rb_vm_t *vm;
2057 struct rb_native_thread *nt;
2058};
2059
2060static void *
2061nt_start(void *ptr);
2062
2063static int
2064native_thread_create0(struct rb_native_thread *nt)
2065{
2066 int err = 0;
2067 pthread_attr_t attr;
2068
2069 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2070 const size_t space = space_size(stack_size);
2071
2072 nt->machine_stack_maxsize = stack_size - space;
2073
2074#ifdef USE_SIGALTSTACK
2075 nt->altstack = rb_allocate_sigaltstack();
2076#endif
2077
2078 CHECK_ERR(pthread_attr_init(&attr));
2079
2080# ifdef PTHREAD_STACK_MIN
2081 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2082 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2083# endif
2084
2085# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2086 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2087# endif
2088 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2089
2090 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2091
2092 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2093
2094 CHECK_ERR(pthread_attr_destroy(&attr));
2095
2096 return err;
2097}
2098
2099static void
2100native_thread_setup(struct rb_native_thread *nt)
2101{
2102 // init cond
2103 rb_native_cond_initialize(&nt->cond.readyq);
2104
2105 if (&nt->cond.readyq != &nt->cond.intr) {
2106 rb_native_cond_initialize(&nt->cond.intr);
2107 }
2108}
2109
2110static void
2111native_thread_setup_on_thread(struct rb_native_thread *nt)
2112{
2113 // init tid
2114#ifdef RB_THREAD_T_HAS_NATIVE_ID
2115 nt->tid = get_native_thread_id();
2116#endif
2117
2118 // init signal handler
2119 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2120}
2121
2122static struct rb_native_thread *
2123native_thread_alloc(void)
2124{
2125 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2126 native_thread_setup(nt);
2127
2128#if USE_MN_THREADS
2129 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2130#endif
2131
2132#if USE_RUBY_DEBUG_LOG
2133 static rb_atomic_t nt_serial = 2;
2134 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2135#endif
2136 return nt;
2137}
2138
2139static int
2140native_thread_create_dedicated(rb_thread_t *th)
2141{
2142 th->nt = native_thread_alloc();
2143 th->nt->vm = th->vm;
2144 th->nt->running_thread = th;
2145 th->nt->dedicated = 1;
2146
2147 // vm stack
2148 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2149 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2150 th->sched.malloc_stack = true;
2151 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2152 th->sched.context_stack = vm_stack;
2153
2154
2155 int err = native_thread_create0(th->nt);
2156 if (!err) {
2157 // setup
2158 thread_sched_to_ready(TH_SCHED(th), th);
2159 }
2160 return err;
2161}
2162
2163static void
2164call_thread_start_func_2(rb_thread_t *th)
2165{
2166 /* Capture the address of a local in this stack frame to mark the beginning of the
2167 machine stack for this thread. This is required even if we can tell the real
2168 stack beginning from the pthread API in native_thread_init_stack, because
2169 glibc stores some of its own data on the stack before calling into user code
2170 on a new thread, and replacing that data on fiber-switch would break it (see
2171 bug #13887) */
2172 VALUE stack_start = 0;
2173 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2174
2175 native_thread_init_stack(th, stack_start_addr);
2176 thread_start_func_2(th, th->ec->machine.stack_start);
2177}
2178
2179static void *
2180nt_start(void *ptr)
2181{
2182 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2183 rb_vm_t *vm = nt->vm;
2184
2185 native_thread_setup_on_thread(nt);
2186
2187 // init tid
2188#ifdef RB_THREAD_T_HAS_NATIVE_ID
2189 nt->tid = get_native_thread_id();
2190#endif
2191
2192#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2193 ruby_nt_serial = nt->serial;
2194#endif
2195
2196 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2197
2198 if (!nt->dedicated) {
2199 coroutine_initialize_main(nt->nt_context);
2200 }
2201
2202 while (1) {
2203 if (nt->dedicated) {
2204 // wait running turn
2205 rb_thread_t *th = nt->running_thread;
2206 struct rb_thread_sched *sched = TH_SCHED(th);
2207
2208 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2209 ruby_thread_set_native(th);
2210
2211 thread_sched_lock(sched, th);
2212 {
2213 if (sched->running == th) {
2214 thread_sched_add_running_thread(sched, th);
2215 }
2216 thread_sched_wait_running_turn(sched, th, false);
2217 }
2218 thread_sched_unlock(sched, th);
2219
2220 // start threads
2221 call_thread_start_func_2(th);
2222 break; // TODO: allow to change to the SNT
2223 }
2224 else {
2225 RUBY_DEBUG_LOG("check next");
2226 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2227
2228 if (r) {
2229 struct rb_thread_sched *sched = &r->threads.sched;
2230
2231 thread_sched_lock(sched, NULL);
2232 {
2233 rb_thread_t *next_th = sched->running;
2234
2235 if (next_th && next_th->nt == NULL) {
2236 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2237 thread_sched_switch0(nt->nt_context, next_th, nt, false);
2238 }
2239 else {
2240 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2241 }
2242 }
2243 thread_sched_unlock(sched, NULL);
2244 }
2245 else {
2246 // timeout -> deleted.
2247 break;
2248 }
2249
2250 if (nt->dedicated) {
2251 // SNT becomes DNT while running
2252 break;
2253 }
2254 }
2255 }
2256
2257 return NULL;
2258}
2259
2260static int native_thread_create_shared(rb_thread_t *th);
2261
2262#if USE_MN_THREADS
2263static void nt_free_stack(void *mstack);
2264#endif
2265
2266void
2267rb_threadptr_remove(rb_thread_t *th)
2268{
2269#if USE_MN_THREADS
2270 if (th->sched.malloc_stack) {
2271 // dedicated
2272 return;
2273 }
2274 else {
2275 rb_vm_t *vm = th->vm;
2276 th->sched.finished = false;
2277
2278 RB_VM_LOCK_ENTER();
2279 {
2280 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2281 }
2282 RB_VM_LOCK_LEAVE();
2283 }
2284#endif
2285}
2286
2287void
2288rb_threadptr_sched_free(rb_thread_t *th)
2289{
2290#if USE_MN_THREADS
2291 if (th->sched.malloc_stack) {
2292 // has dedicated
2293 ruby_xfree(th->sched.context_stack);
2294 native_thread_destroy(th->nt);
2295 }
2296 else {
2297 nt_free_stack(th->sched.context_stack);
2298 // TODO: how to free nt and nt->altstack?
2299 }
2300
2301 ruby_xfree(th->sched.context);
2302 th->sched.context = NULL;
2303 // VM_ASSERT(th->sched.context == NULL);
2304#else
2305 ruby_xfree(th->sched.context_stack);
2306 native_thread_destroy(th->nt);
2307#endif
2308
2309 th->nt = NULL;
2310}
2311
2312void
2313rb_thread_sched_mark_zombies(rb_vm_t *vm)
2314{
2315 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2316 rb_thread_t *zombie_th, *next_zombie_th;
2317 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2318 if (zombie_th->sched.finished) {
2319 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2320 }
2321 else {
2322 rb_gc_mark(zombie_th->self);
2323 }
2324 }
2325 }
2326}
2327
2328static int
2329native_thread_create(rb_thread_t *th)
2330{
2331 VM_ASSERT(th->nt == 0);
2332 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2333 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2334
2335 if (!th->ractor->threads.sched.enable_mn_threads) {
2336 th->has_dedicated_nt = 1;
2337 }
2338
2339 if (th->has_dedicated_nt) {
2340 return native_thread_create_dedicated(th);
2341 }
2342 else {
2343 return native_thread_create_shared(th);
2344 }
2345}
2346
2347#if USE_NATIVE_THREAD_PRIORITY
2348
2349static void
2350native_thread_apply_priority(rb_thread_t *th)
2351{
2352#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2353 struct sched_param sp;
2354 int policy;
2355 int priority = 0 - th->priority;
2356 int max, min;
2357 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2358 max = sched_get_priority_max(policy);
2359 min = sched_get_priority_min(policy);
2360
2361 if (min > priority) {
2362 priority = min;
2363 }
2364 else if (max < priority) {
2365 priority = max;
2366 }
2367
2368 sp.sched_priority = priority;
2369 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2370#else
2371 /* not touched */
2372#endif
2373}
2374
2375#endif /* USE_NATIVE_THREAD_PRIORITY */
2376
2377static int
2378native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2379{
2380 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2381}
2382
2383static void
2384ubf_pthread_cond_signal(void *ptr)
2385{
2386 rb_thread_t *th = (rb_thread_t *)ptr;
2387 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2388 rb_native_cond_signal(&th->nt->cond.intr);
2389}
2390
2391static void
2392native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2393{
2394 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2395 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2396
2397 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2398 * current_time + 100,000,000. So cut up to 100,000,000. This is
2399 * considered as a kind of spurious wakeup. The caller to native_sleep
2400 * should care about spurious wakeup.
2401 *
2402 * See also [Bug #1341] [ruby-core:29702]
2403 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2404 */
2405 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2406
2407 THREAD_BLOCKING_BEGIN(th);
2408 {
2410 th->unblock.func = ubf_pthread_cond_signal;
2411 th->unblock.arg = th;
2412
2413 if (RUBY_VM_INTERRUPTED(th->ec)) {
2414 /* interrupted. return immediate */
2415 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2416 }
2417 else {
2418 if (!rel) {
2419 rb_native_cond_wait(cond, lock);
2420 }
2421 else {
2422 rb_hrtime_t end;
2423
2424 if (*rel > max) {
2425 *rel = max;
2426 }
2427
2428 end = native_cond_timeout(cond, *rel);
2429 native_cond_timedwait(cond, lock, &end);
2430 }
2431 }
2432 th->unblock.func = 0;
2433
2435 }
2436 THREAD_BLOCKING_END(th);
2437
2438 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2439}
2440
2441#ifdef USE_UBF_LIST
2442static CCAN_LIST_HEAD(ubf_list_head);
2443static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2444
2445static void
2446ubf_list_atfork(void)
2447{
2448 ccan_list_head_init(&ubf_list_head);
2449 rb_native_mutex_initialize(&ubf_list_lock);
2450}
2451
2453static bool
2454ubf_list_contain_p(rb_thread_t *th)
2455{
2456 rb_thread_t *list_th;
2457 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2458 if (list_th == th) return true;
2459 }
2460 return false;
2461}
2462
2463/* The thread 'th' is registered to be trying unblock. */
2464static void
2465register_ubf_list(rb_thread_t *th)
2466{
2467 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2468 struct ccan_list_node *node = &th->sched.node.ubf;
2469
2470 VM_ASSERT(th->unblock.func != NULL);
2471
2472 rb_native_mutex_lock(&ubf_list_lock);
2473 {
2474 // check not connected yet
2475 if (ccan_list_empty((struct ccan_list_head*)node)) {
2476 VM_ASSERT(!ubf_list_contain_p(th));
2477 ccan_list_add(&ubf_list_head, node);
2478 }
2479 }
2480 rb_native_mutex_unlock(&ubf_list_lock);
2481
2482 timer_thread_wakeup();
2483}
2484
2485/* The thread 'th' is unblocked. It no longer need to be registered. */
2486static void
2487unregister_ubf_list(rb_thread_t *th)
2488{
2489 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2490 struct ccan_list_node *node = &th->sched.node.ubf;
2491
2492 /* we can't allow re-entry into ubf_list_head */
2493 VM_ASSERT(th->unblock.func == NULL);
2494
2495 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2496 rb_native_mutex_lock(&ubf_list_lock);
2497 {
2498 VM_ASSERT(ubf_list_contain_p(th));
2499 ccan_list_del_init(node);
2500 }
2501 rb_native_mutex_unlock(&ubf_list_lock);
2502 }
2503}
2504
2505/*
2506 * send a signal to intent that a target thread return from blocking syscall.
2507 * Maybe any signal is ok, but we chose SIGVTALRM.
2508 */
2509static void
2510ubf_wakeup_thread(rb_thread_t *th)
2511{
2512 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2513
2514 pthread_kill(th->nt->thread_id, SIGVTALRM);
2515}
2516
2517static void
2518ubf_select(void *ptr)
2519{
2520 rb_thread_t *th = (rb_thread_t *)ptr;
2521 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2522 ubf_wakeup_thread(th);
2523 register_ubf_list(th);
2524}
2525
2526static bool
2527ubf_threads_empty(void)
2528{
2529 return ccan_list_empty(&ubf_list_head) != 0;
2530}
2531
2532static void
2533ubf_wakeup_all_threads(void)
2534{
2535 if (!ubf_threads_empty()) {
2536 rb_thread_t *th;
2537 rb_native_mutex_lock(&ubf_list_lock);
2538 {
2539 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2540 ubf_wakeup_thread(th);
2541 }
2542 }
2543 rb_native_mutex_unlock(&ubf_list_lock);
2544 }
2545}
2546
2547#else /* USE_UBF_LIST */
2548#define register_ubf_list(th) (void)(th)
2549#define unregister_ubf_list(th) (void)(th)
2550#define ubf_select 0
2551static void ubf_wakeup_all_threads(void) { return; }
2552static bool ubf_threads_empty(void) { return true; }
2553#define ubf_list_atfork() do {} while (0)
2554#endif /* USE_UBF_LIST */
2555
2556#define TT_DEBUG 0
2557#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2558
2559void
2560rb_thread_wakeup_timer_thread(int sig)
2561{
2562 // This function can be called from signal handlers so that
2563 // pthread_mutex_lock() should not be used.
2564
2565 // wakeup timer thread
2566 timer_thread_wakeup_force();
2567
2568 // interrupt main thread if main thread is available
2569 if (system_working) {
2570 rb_vm_t *vm = GET_VM();
2571 rb_thread_t *main_th = vm->ractor.main_thread;
2572
2573 if (main_th) {
2574 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2575
2576 if (main_th_ec) {
2577 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2578
2579 if (vm->ubf_async_safe && main_th->unblock.func) {
2580 (main_th->unblock.func)(main_th->unblock.arg);
2581 }
2582 }
2583 }
2584 }
2585}
2586
2587#define CLOSE_INVALIDATE_PAIR(expr) \
2588 close_invalidate_pair(expr,"close_invalidate: "#expr)
2589static void
2590close_invalidate(int *fdp, const char *msg)
2591{
2592 int fd = *fdp;
2593
2594 *fdp = -1;
2595 if (close(fd) < 0) {
2596 async_bug_fd(msg, errno, fd);
2597 }
2598}
2599
2600static void
2601close_invalidate_pair(int fds[2], const char *msg)
2602{
2603 if (USE_EVENTFD && fds[0] == fds[1]) {
2604 fds[1] = -1; // disable write port first
2605 close_invalidate(&fds[0], msg);
2606 }
2607 else {
2608 close_invalidate(&fds[1], msg);
2609 close_invalidate(&fds[0], msg);
2610 }
2611}
2612
2613static void
2614set_nonblock(int fd)
2615{
2616 int oflags;
2617 int err;
2618
2619 oflags = fcntl(fd, F_GETFL);
2620 if (oflags == -1)
2621 rb_sys_fail(0);
2622 oflags |= O_NONBLOCK;
2623 err = fcntl(fd, F_SETFL, oflags);
2624 if (err == -1)
2625 rb_sys_fail(0);
2626}
2627
2628/* communication pipe with timer thread and signal handler */
2629static void
2630setup_communication_pipe_internal(int pipes[2])
2631{
2632 int err;
2633
2634 if (pipes[0] > 0 || pipes[1] > 0) {
2635 VM_ASSERT(pipes[0] > 0);
2636 VM_ASSERT(pipes[1] > 0);
2637 return;
2638 }
2639
2640 /*
2641 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2642 * missing EFD_* flags, they can fall back to pipe
2643 */
2644#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2645 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2646
2647 if (pipes[0] >= 0) {
2648 rb_update_max_fd(pipes[0]);
2649 return;
2650 }
2651#endif
2652
2653 err = rb_cloexec_pipe(pipes);
2654 if (err != 0) {
2655 rb_bug("can not create communication pipe");
2656 }
2657 rb_update_max_fd(pipes[0]);
2658 rb_update_max_fd(pipes[1]);
2659 set_nonblock(pipes[0]);
2660 set_nonblock(pipes[1]);
2661}
2662
2663#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2664# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2665#endif
2666
2667enum {
2668 THREAD_NAME_MAX =
2669#if defined(__linux__)
2670 16
2671#elif defined(__APPLE__)
2672/* Undocumented, and main thread seems unlimited */
2673 64
2674#else
2675 16
2676#endif
2677};
2678
2679static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2680
2681static void
2682native_set_thread_name(rb_thread_t *th)
2683{
2684#ifdef SET_CURRENT_THREAD_NAME
2685 VALUE loc;
2686 if (!NIL_P(loc = th->name)) {
2687 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2688 }
2689 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2690 char *name, *p;
2691 char buf[THREAD_NAME_MAX];
2692 size_t len;
2693 int n;
2694
2695 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2696 p = strrchr(name, '/'); /* show only the basename of the path. */
2697 if (p && p[1])
2698 name = p + 1;
2699
2700 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2701 RB_GC_GUARD(loc);
2702
2703 len = (size_t)n;
2704 if (len >= sizeof(buf)) {
2705 buf[sizeof(buf)-2] = '*';
2706 buf[sizeof(buf)-1] = '\0';
2707 }
2708 SET_CURRENT_THREAD_NAME(buf);
2709 }
2710#endif
2711}
2712
2713static void
2714native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2715{
2716#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2717 char buf[THREAD_NAME_MAX];
2718 const char *s = "";
2719# if !defined SET_ANOTHER_THREAD_NAME
2720 if (!pthread_equal(pthread_self(), thread_id)) return;
2721# endif
2722 if (!NIL_P(name)) {
2723 long n;
2724 RSTRING_GETMEM(name, s, n);
2725 if (n >= (int)sizeof(buf)) {
2726 memcpy(buf, s, sizeof(buf)-1);
2727 buf[sizeof(buf)-1] = '\0';
2728 s = buf;
2729 }
2730 }
2731# if defined SET_ANOTHER_THREAD_NAME
2732 SET_ANOTHER_THREAD_NAME(thread_id, s);
2733# elif defined SET_CURRENT_THREAD_NAME
2734 SET_CURRENT_THREAD_NAME(s);
2735# endif
2736#endif
2737}
2738
2739#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2740static VALUE
2741native_thread_native_thread_id(rb_thread_t *target_th)
2742{
2743 if (!target_th->nt) return Qnil;
2744
2745#ifdef RB_THREAD_T_HAS_NATIVE_ID
2746 int tid = target_th->nt->tid;
2747 if (tid == 0) return Qnil;
2748 return INT2FIX(tid);
2749#elif defined(__APPLE__)
2750 uint64_t tid;
2751/* The first condition is needed because MAC_OS_X_VERSION_10_6
2752 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2753 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2754 so it has C++17 and everything needed to build modern Ruby. */
2755# if (!defined(MAC_OS_X_VERSION_10_6) || \
2756 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2757 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2758 const bool no_pthread_threadid_np = true;
2759# define NO_PTHREAD_MACH_THREAD_NP 1
2760# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2761 const bool no_pthread_threadid_np = false;
2762# else
2763# if !(defined(__has_attribute) && __has_attribute(availability))
2764 /* __API_AVAILABLE macro does nothing on gcc */
2765 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2766# endif
2767 /* Check weakly linked symbol */
2768 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2769# endif
2770 if (no_pthread_threadid_np) {
2771 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2772 }
2773# ifndef NO_PTHREAD_MACH_THREAD_NP
2774 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2775 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2776 return ULL2NUM((unsigned long long)tid);
2777# endif
2778#endif
2779}
2780# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2781#else
2782# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2783#endif
2784
2785static struct {
2786 rb_serial_t created_fork_gen;
2787 pthread_t pthread_id;
2788
2789 int comm_fds[2]; // r, w
2790
2791#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2792 int event_fd; // kernel event queue fd (epoll/kqueue)
2793#endif
2794#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2795#define EPOLL_EVENTS_MAX 0x10
2796 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2797#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2798#define KQUEUE_EVENTS_MAX 0x10
2799 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2800#endif
2801
2802 // waiting threads list
2803 struct ccan_list_head waiting; // waiting threads in ractors
2804 pthread_mutex_t waiting_lock;
2805} timer_th = {
2806 .created_fork_gen = 0,
2807};
2808
2809#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2810
2811static void timer_thread_check_timeslice(rb_vm_t *vm);
2812static int timer_thread_set_timeout(rb_vm_t *vm);
2813static void timer_thread_wakeup_thread(rb_thread_t *th);
2814
2815#include "thread_pthread_mn.c"
2816
2817static rb_thread_t *
2818thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
2819{
2820 if (w) {
2821 return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
2822 }
2823 else {
2824 return NULL;
2825 }
2826}
2827
2828static int
2829timer_thread_set_timeout(rb_vm_t *vm)
2830{
2831#if 0
2832 return 10; // ms
2833#else
2834 int timeout = -1;
2835
2836 ractor_sched_lock(vm, NULL);
2837 {
2838 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2839 || !ubf_threads_empty() // (1-3) Periodic UBF
2840 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2841 ) {
2842
2843 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2844 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2845 !ubf_threads_empty(),
2846 (vm->ractor.sched.grq_cnt > 0));
2847
2848 timeout = 10; // ms
2849 vm->ractor.sched.timeslice_wait_inf = false;
2850 }
2851 else {
2852 vm->ractor.sched.timeslice_wait_inf = true;
2853 }
2854 }
2855 ractor_sched_unlock(vm, NULL);
2856
2857 // Always check waiting threads to find minimum timeout
2858 // even when scheduler has work (grq_cnt > 0)
2859 rb_native_mutex_lock(&timer_th.waiting_lock);
2860 {
2861 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2862 rb_thread_t *th = thread_sched_waiting_thread(w);
2863
2864 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2865 rb_hrtime_t now = rb_hrtime_now();
2866 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2867
2868 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2869
2870 // TODO: overflow?
2871 int thread_timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2872
2873 // Use minimum of scheduler timeout and thread sleep timeout
2874 if (timeout < 0 || thread_timeout < timeout) {
2875 timeout = thread_timeout;
2876 }
2877 }
2878 }
2879 rb_native_mutex_unlock(&timer_th.waiting_lock);
2880
2881 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2882
2883 // fprintf(stderr, "timeout:%d\n", timeout);
2884 return timeout;
2885#endif
2886}
2887
2888static void
2889timer_thread_check_signal(rb_vm_t *vm)
2890{
2891 // ruby_sigchld_handler(vm); TODO
2892
2893 int signum = rb_signal_buff_size();
2894 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2895 RUBY_DEBUG_LOG("signum:%d", signum);
2896 threadptr_trap_interrupt(vm->ractor.main_thread);
2897 }
2898}
2899
2900static bool
2901timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2902{
2903 if (abs < now) {
2904 return true;
2905 }
2906 else if (abs - now < RB_HRTIME_PER_MSEC) {
2907 return true; // too short time
2908 }
2909 else {
2910 return false;
2911 }
2912}
2913
2914static rb_thread_t *
2915timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2916{
2917 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2918
2919 if (w != NULL &&
2920 (w->flags & thread_sched_waiting_timeout) &&
2921 timer_thread_check_exceed(w->data.timeout, now)) {
2922
2923 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2924
2925 // delete from waiting list
2926 ccan_list_del_init(&w->node);
2927
2928 // setup result
2929 w->flags = thread_sched_waiting_none;
2930 w->data.result = 0;
2931
2932 return thread_sched_waiting_thread(w);
2933 }
2934
2935 return NULL;
2936}
2937
2938static void
2939timer_thread_wakeup_thread(rb_thread_t *th)
2940{
2941 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2942 struct rb_thread_sched *sched = TH_SCHED(th);
2943
2944 thread_sched_lock(sched, th);
2945 {
2946 if (sched->running != th) {
2947 thread_sched_to_ready_common(sched, th, true, false);
2948 }
2949 else {
2950 // will be release the execution right
2951 }
2952 }
2953 thread_sched_unlock(sched, th);
2954}
2955
2956static void
2957timer_thread_check_timeout(rb_vm_t *vm)
2958{
2959 rb_hrtime_t now = rb_hrtime_now();
2960 rb_thread_t *th;
2961
2962 rb_native_mutex_lock(&timer_th.waiting_lock);
2963 {
2964 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2965 timer_thread_wakeup_thread(th);
2966 }
2967 }
2968 rb_native_mutex_unlock(&timer_th.waiting_lock);
2969}
2970
2971static void
2972timer_thread_check_timeslice(rb_vm_t *vm)
2973{
2974 // TODO: check time
2975 rb_thread_t *th;
2976 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2977 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
2978 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2979 }
2980}
2981
2982void
2983rb_assert_sig(void)
2984{
2985 sigset_t oldmask;
2986 pthread_sigmask(0, NULL, &oldmask);
2987 if (sigismember(&oldmask, SIGVTALRM)) {
2988 rb_bug("!!!");
2989 }
2990 else {
2991 RUBY_DEBUG_LOG("ok");
2992 }
2993}
2994
2995static void *
2996timer_thread_func(void *ptr)
2997{
2998 rb_vm_t *vm = (rb_vm_t *)ptr;
2999#if defined(RUBY_NT_SERIAL)
3000 ruby_nt_serial = (rb_atomic_t)-1;
3001#endif
3002
3003 RUBY_DEBUG_LOG("started%s", "");
3004
3005 while (system_working) {
3006 timer_thread_check_signal(vm);
3007 timer_thread_check_timeout(vm);
3008 ubf_wakeup_all_threads();
3009
3010 RUBY_DEBUG_LOG("system_working:%d", system_working);
3011 timer_thread_polling(vm);
3012 }
3013
3014 RUBY_DEBUG_LOG("terminated");
3015 return NULL;
3016}
3017
3018/* only use signal-safe system calls here */
3019static void
3020signal_communication_pipe(int fd)
3021{
3022#if USE_EVENTFD
3023 const uint64_t buff = 1;
3024#else
3025 const char buff = '!';
3026#endif
3027 ssize_t result;
3028
3029 /* already opened */
3030 if (fd >= 0) {
3031 retry:
3032 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3033 int e = errno;
3034 switch (e) {
3035 case EINTR: goto retry;
3036 case EAGAIN:
3037#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3038 case EWOULDBLOCK:
3039#endif
3040 break;
3041 default:
3042 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3043 }
3044 }
3045 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3046 }
3047 else {
3048 // ignore wakeup
3049 }
3050}
3051
3052static void
3053timer_thread_wakeup_force(void)
3054{
3055 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3056 signal_communication_pipe(timer_th.comm_fds[1]);
3057}
3058
3059static void
3060timer_thread_wakeup_locked(rb_vm_t *vm)
3061{
3062 // should be locked before.
3063 ASSERT_ractor_sched_locked(vm, NULL);
3064
3065 if (timer_th.created_fork_gen == current_fork_gen) {
3066 if (vm->ractor.sched.timeslice_wait_inf) {
3067 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3068 timer_thread_wakeup_force();
3069 }
3070 else {
3071 RUBY_DEBUG_LOG("will be wakeup...");
3072 }
3073 }
3074}
3075
3076static void
3077timer_thread_wakeup(void)
3078{
3079 rb_vm_t *vm = GET_VM();
3080
3081 ractor_sched_lock(vm, NULL);
3082 {
3083 timer_thread_wakeup_locked(vm);
3084 }
3085 ractor_sched_unlock(vm, NULL);
3086}
3087
3088static void
3089rb_thread_create_timer_thread(void)
3090{
3091 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3092
3093 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3094
3095 timer_th.created_fork_gen = current_fork_gen;
3096
3097 if (created_fork_gen != current_fork_gen) {
3098 if (created_fork_gen != 0) {
3099 RUBY_DEBUG_LOG("forked child process");
3100
3101 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3102#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3103 close_invalidate(&timer_th.event_fd, "close event_fd");
3104#endif
3105 rb_native_mutex_destroy(&timer_th.waiting_lock);
3106 }
3107
3108 ccan_list_head_init(&timer_th.waiting);
3109 rb_native_mutex_initialize(&timer_th.waiting_lock);
3110
3111 // open communication channel
3112 setup_communication_pipe_internal(timer_th.comm_fds);
3113
3114 // open event fd
3115 timer_thread_setup_mn();
3116 }
3117
3118 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3119}
3120
3121static int
3122native_stop_timer_thread(void)
3123{
3124 int stopped;
3125 stopped = --system_working <= 0;
3126
3127 if (stopped) {
3128 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3129 timer_thread_wakeup_force();
3130 RUBY_DEBUG_LOG("wakeup sent");
3131 pthread_join(timer_th.pthread_id, NULL);
3132 }
3133
3134 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3135 return stopped;
3136}
3137
3138static void
3139native_reset_timer_thread(void)
3140{
3141 //
3142}
3143
3144#ifdef HAVE_SIGALTSTACK
3145int
3146ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3147{
3148 void *base;
3149 size_t size;
3150 const size_t water_mark = 1024 * 1024;
3151 STACK_GROW_DIR_DETECTION;
3152
3153#ifdef STACKADDR_AVAILABLE
3154 if (get_stack(&base, &size) == 0) {
3155# ifdef __APPLE__
3156 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3157 struct rlimit rlim;
3158 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3159 size = (size_t)rlim.rlim_cur;
3160 }
3161 }
3162# endif
3163 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3164 }
3165 else
3166#endif
3167 if (th) {
3168 size = th->ec->machine.stack_maxsize;
3169 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3170 }
3171 else {
3172 return 0;
3173 }
3174 size /= RUBY_STACK_SPACE_RATIO;
3175 if (size > water_mark) size = water_mark;
3176 if (IS_STACK_DIR_UPPER()) {
3177 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3178 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3179 }
3180 else {
3181 if (size > (size_t)base) size = (size_t)base;
3182 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3183 }
3184 return 0;
3185}
3186#endif
3187
3188int
3189rb_reserved_fd_p(int fd)
3190{
3191 /* no false-positive if out-of-FD at startup */
3192 if (fd < 0) return 0;
3193
3194 if (fd == timer_th.comm_fds[0] ||
3195 fd == timer_th.comm_fds[1]
3196#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3197 || fd == timer_th.event_fd
3198#endif
3199 ) {
3200 goto check_fork_gen;
3201 }
3202 return 0;
3203
3204 check_fork_gen:
3205 if (timer_th.created_fork_gen == current_fork_gen) {
3206 /* async-signal-safe */
3207 return 1;
3208 }
3209 else {
3210 return 0;
3211 }
3212}
3213
3214rb_nativethread_id_t
3216{
3217 return pthread_self();
3218}
3219
3220#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3221/* TODO: don't ignore sigmask */
3222static int
3223ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3224 const struct timespec *ts, const sigset_t *sigmask)
3225{
3226 int timeout_ms;
3227
3228 if (ts) {
3229 int tmp, tmp2;
3230
3231 if (ts->tv_sec > INT_MAX/1000)
3232 timeout_ms = INT_MAX;
3233 else {
3234 tmp = (int)(ts->tv_sec * 1000);
3235 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3236 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3237 if (INT_MAX - tmp < tmp2)
3238 timeout_ms = INT_MAX;
3239 else
3240 timeout_ms = (int)(tmp + tmp2);
3241 }
3242 }
3243 else
3244 timeout_ms = -1;
3245
3246 return poll(fds, nfds, timeout_ms);
3247}
3248# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3249#endif
3250
3251/*
3252 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3253 * since threads may be too starved to enter the GVL waitqueue for
3254 * us to detect contention. Instead, we want to kick other threads
3255 * so they can run and possibly prevent us from entering slow paths
3256 * in ppoll() or similar syscalls.
3257 *
3258 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3259 * [ruby-core:90417] [Bug #15398]
3260 */
3261#define THREAD_BLOCKING_YIELD(th) do { \
3262 const rb_thread_t *next_th; \
3263 struct rb_thread_sched *sched = TH_SCHED(th); \
3264 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3265 thread_sched_to_waiting(sched, (th)); \
3266 next_th = sched->running; \
3267 rb_native_mutex_unlock(&sched->lock_); \
3268 native_thread_yield(); /* TODO: needed? */ \
3269 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3270 native_thread_yield(); \
3271 }
3272
3273static void
3274native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3275{
3276 struct rb_thread_sched *sched = TH_SCHED(th);
3277
3278 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3279 if (rel) {
3280 if (th_has_dedicated_nt(th)) {
3281 native_cond_sleep(th, rel);
3282 }
3283 else {
3284 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3285 }
3286 }
3287 else {
3288 thread_sched_to_waiting_until_wakeup(sched, th);
3289 }
3290
3291 RUBY_DEBUG_LOG("wakeup");
3292}
3293
3294// fork read-write lock (only for pthread)
3295static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3296
3297void
3298rb_thread_release_fork_lock(void)
3299{
3300 int r;
3301 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3302 rb_bug_errno("pthread_rwlock_unlock", r);
3303 }
3304}
3305
3306void
3307rb_thread_reset_fork_lock(void)
3308{
3309 int r;
3310 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3311 rb_bug_errno("pthread_rwlock_destroy", r);
3312 }
3313
3314 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3315 rb_bug_errno("pthread_rwlock_init", r);
3316 }
3317}
3318
3319void *
3320rb_thread_prevent_fork(void *(*func)(void *), void *data)
3321{
3322 int r;
3323 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3324 rb_bug_errno("pthread_rwlock_rdlock", r);
3325 }
3326 void *result = func(data);
3327 rb_thread_release_fork_lock();
3328 return result;
3329}
3330
3331void
3332rb_thread_acquire_fork_lock(void)
3333{
3334 int r;
3335 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3336 rb_bug_errno("pthread_rwlock_wrlock", r);
3337 }
3338}
3339
3340// thread internal event hooks (only for pthread)
3341
3342struct rb_internal_thread_event_hook {
3343 rb_internal_thread_event_callback callback;
3344 rb_event_flag_t event;
3345 void *user_data;
3346
3347 struct rb_internal_thread_event_hook *next;
3348};
3349
3350static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3351
3352rb_internal_thread_event_hook_t *
3353rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3354{
3355 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3356 hook->callback = callback;
3357 hook->user_data = user_data;
3358 hook->event = internal_event;
3359
3360 int r;
3361 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3362 rb_bug_errno("pthread_rwlock_wrlock", r);
3363 }
3364
3365 hook->next = rb_internal_thread_event_hooks;
3366 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3367
3368 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3369 rb_bug_errno("pthread_rwlock_unlock", r);
3370 }
3371 return hook;
3372}
3373
3374bool
3375rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3376{
3377 int r;
3378 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3379 rb_bug_errno("pthread_rwlock_wrlock", r);
3380 }
3381
3382 bool success = FALSE;
3383
3384 if (rb_internal_thread_event_hooks == hook) {
3385 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3386 success = TRUE;
3387 }
3388 else {
3389 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3390
3391 do {
3392 if (h->next == hook) {
3393 h->next = hook->next;
3394 success = TRUE;
3395 break;
3396 }
3397 } while ((h = h->next));
3398 }
3399
3400 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3401 rb_bug_errno("pthread_rwlock_unlock", r);
3402 }
3403
3404 if (success) {
3405 ruby_xfree(hook);
3406 }
3407 return success;
3408}
3409
3410static void
3411rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3412{
3413 int r;
3414 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3415 rb_bug_errno("pthread_rwlock_rdlock", r);
3416 }
3417
3418 if (rb_internal_thread_event_hooks) {
3419 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3420 do {
3421 if (h->event & event) {
3422 rb_internal_thread_event_data_t event_data = {
3423 .thread = th->self,
3424 };
3425 (*h->callback)(event, &event_data, h->user_data);
3426 }
3427 } while((h = h->next));
3428 }
3429 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3430 rb_bug_errno("pthread_rwlock_unlock", r);
3431 }
3432}
3433
3434// return true if the current thread acquires DNT.
3435// return false if the current thread already acquires DNT.
3436bool
3438{
3439 rb_thread_t *th = GET_THREAD();
3440 bool is_snt = th->nt->dedicated == 0;
3441 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
3442
3443 return is_snt;
3444}
3445
3446#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eNotImpError
NotImplementedError exception.
Definition error.c:1440
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3877
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
Definition error.c:1140
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
Definition io.c:427
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
Definition io.c:248
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1992
int len
Length of the buffer.
Definition io.h:8
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
Definition thread.h:238
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
Definition thread.h:252
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
Definition thread.h:245
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
Definition thread.h:224
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
Definition thread.h:231
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
Definition rstring.h:450
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
Definition string.c:8274
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition gc.c:4705
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition gc.c:4583