LLVM OpenMP* Runtime Library
z_Linux_util.cpp
1/*
2 * z_Linux_util.cpp -- platform specific routines.
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#include "kmp.h"
14#include "kmp_affinity.h"
15#include "kmp_i18n.h"
16#include "kmp_io.h"
17#include "kmp_itt.h"
18#include "kmp_lock.h"
19#include "kmp_stats.h"
20#include "kmp_str.h"
21#include "kmp_wait_release.h"
22#include "kmp_wrapper_getpid.h"
23
24#if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25#include <alloca.h>
26#endif
27#include <math.h> // HUGE_VAL.
28#if KMP_OS_LINUX
29#include <semaphore.h>
30#endif // KMP_OS_LINUX
31#include <sys/resource.h>
32#include <sys/syscall.h>
33#include <sys/time.h>
34#include <sys/times.h>
35#include <unistd.h>
36
37#if KMP_OS_LINUX
38#include <sys/sysinfo.h>
39#if KMP_USE_FUTEX
40// We should really include <futex.h>, but that causes compatibility problems on
41// different Linux* OS distributions that either require that you include (or
42// break when you try to include) <pci/types.h>. Since all we need is the two
43// macros below (which are part of the kernel ABI, so can't change) we just
44// define the constants here and don't include <futex.h>
45#ifndef FUTEX_WAIT
46#define FUTEX_WAIT 0
47#endif
48#ifndef FUTEX_WAKE
49#define FUTEX_WAKE 1
50#endif
51#endif
52#elif KMP_OS_DARWIN
53#include <mach/mach.h>
54#include <sys/sysctl.h>
55#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
56#include <sys/types.h>
57#include <sys/sysctl.h>
58#include <sys/user.h>
59#include <pthread_np.h>
60#elif KMP_OS_NETBSD || KMP_OS_OPENBSD
61#include <sys/types.h>
62#include <sys/sysctl.h>
63#endif
64
65#include <ctype.h>
66#include <dirent.h>
67#include <fcntl.h>
68
69struct kmp_sys_timer {
70 struct timespec start;
71};
72
73// Convert timespec to nanoseconds.
74#define TS2NS(timespec) \
75 (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
76
77static struct kmp_sys_timer __kmp_sys_timer_data;
78
79#if KMP_HANDLE_SIGNALS
80typedef void (*sig_func_t)(int);
81STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
82static sigset_t __kmp_sigset;
83#endif
84
85static int __kmp_init_runtime = FALSE;
86
87static int __kmp_fork_count = 0;
88
89static pthread_condattr_t __kmp_suspend_cond_attr;
90static pthread_mutexattr_t __kmp_suspend_mutex_attr;
91
92static kmp_cond_align_t __kmp_wait_cv;
93static kmp_mutex_align_t __kmp_wait_mx;
94
95kmp_uint64 __kmp_ticks_per_msec = 1000000;
96
97#ifdef DEBUG_SUSPEND
98static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
99 KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
100 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
101 cond->c_cond.__c_waiting);
102}
103#endif
104
105#if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
106
107/* Affinity support */
108
109void __kmp_affinity_bind_thread(int which) {
110 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
111 "Illegal set affinity operation when not capable");
112
113 kmp_affin_mask_t *mask;
114 KMP_CPU_ALLOC_ON_STACK(mask);
115 KMP_CPU_ZERO(mask);
116 KMP_CPU_SET(which, mask);
117 __kmp_set_system_affinity(mask, TRUE);
118 KMP_CPU_FREE_FROM_STACK(mask);
119}
120
121/* Determine if we can access affinity functionality on this version of
122 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
123 * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
124void __kmp_affinity_determine_capable(const char *env_var) {
125 // Check and see if the OS supports thread affinity.
126
127#if KMP_OS_LINUX
128#define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
129#define KMP_CPU_SET_TRY_SIZE CACHE_LINE
130#elif KMP_OS_FREEBSD
131#define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
132#endif
133
134#if KMP_OS_LINUX
135 long gCode;
136 unsigned char *buf;
137 buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
138
139 // If the syscall returns a suggestion for the size,
140 // then we don't have to search for an appropriate size.
141 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
142 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
143 "initial getaffinity call returned %ld errno = %d\n",
144 gCode, errno));
145
146 if (gCode < 0 && errno != EINVAL) {
147 // System call not supported
148 if (__kmp_affinity_verbose ||
149 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
150 (__kmp_affinity_type != affinity_default) &&
151 (__kmp_affinity_type != affinity_disabled))) {
152 int error = errno;
153 kmp_msg_t err_code = KMP_ERR(error);
154 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
155 err_code, __kmp_msg_null);
156 if (__kmp_generate_warnings == kmp_warnings_off) {
157 __kmp_str_free(&err_code.str);
158 }
159 }
160 KMP_AFFINITY_DISABLE();
161 KMP_INTERNAL_FREE(buf);
162 return;
163 } else if (gCode > 0) {
164 // The optimal situation: the OS returns the size of the buffer it expects.
165 KMP_AFFINITY_ENABLE(gCode);
166 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
167 "affinity supported (mask size %d)\n",
168 (int)__kmp_affin_mask_size));
169 KMP_INTERNAL_FREE(buf);
170 return;
171 }
172
173 // Call the getaffinity system call repeatedly with increasing set sizes
174 // until we succeed, or reach an upper bound on the search.
175 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
176 "searching for proper set size\n"));
177 int size;
178 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
179 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
180 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
181 "getaffinity for mask size %ld returned %ld errno = %d\n",
182 size, gCode, errno));
183
184 if (gCode < 0) {
185 if (errno == ENOSYS) {
186 // We shouldn't get here
187 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
188 "inconsistent OS call behavior: errno == ENOSYS for mask "
189 "size %d\n",
190 size));
191 if (__kmp_affinity_verbose ||
192 (__kmp_affinity_warnings &&
193 (__kmp_affinity_type != affinity_none) &&
194 (__kmp_affinity_type != affinity_default) &&
195 (__kmp_affinity_type != affinity_disabled))) {
196 int error = errno;
197 kmp_msg_t err_code = KMP_ERR(error);
198 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
199 err_code, __kmp_msg_null);
200 if (__kmp_generate_warnings == kmp_warnings_off) {
201 __kmp_str_free(&err_code.str);
202 }
203 }
204 KMP_AFFINITY_DISABLE();
205 KMP_INTERNAL_FREE(buf);
206 return;
207 }
208 continue;
209 }
210
211 KMP_AFFINITY_ENABLE(gCode);
212 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
213 "affinity supported (mask size %d)\n",
214 (int)__kmp_affin_mask_size));
215 KMP_INTERNAL_FREE(buf);
216 return;
217 }
218#elif KMP_OS_FREEBSD
219 long gCode;
220 unsigned char *buf;
221 buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
222 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
223 reinterpret_cast<cpuset_t *>(buf));
224 KA_TRACE(30, ("__kmp_affinity_determine_capable: "
225 "initial getaffinity call returned %d errno = %d\n",
226 gCode, errno));
227 if (gCode == 0) {
228 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
229 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
230 "affinity supported (mask size %d)\n",
231 (int)__kmp_affin_mask_size));
232 KMP_INTERNAL_FREE(buf);
233 return;
234 }
235#endif
236 KMP_INTERNAL_FREE(buf);
237
238 // Affinity is not supported
239 KMP_AFFINITY_DISABLE();
240 KA_TRACE(10, ("__kmp_affinity_determine_capable: "
241 "cannot determine mask size - affinity not supported\n"));
242 if (__kmp_affinity_verbose ||
243 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
244 (__kmp_affinity_type != affinity_default) &&
245 (__kmp_affinity_type != affinity_disabled))) {
246 KMP_WARNING(AffCantGetMaskSize, env_var);
247 }
248}
249
250#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
251
252#if KMP_USE_FUTEX
253
254int __kmp_futex_determine_capable() {
255 int loc = 0;
256 long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
257 int retval = (rc == 0) || (errno != ENOSYS);
258
259 KA_TRACE(10,
260 ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
261 KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
262 retval ? "" : " not"));
263
264 return retval;
265}
266
267#endif // KMP_USE_FUTEX
268
269#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
270/* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
271 use compare_and_store for these routines */
272
273kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
274 kmp_int8 old_value, new_value;
275
276 old_value = TCR_1(*p);
277 new_value = old_value | d;
278
279 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
280 KMP_CPU_PAUSE();
281 old_value = TCR_1(*p);
282 new_value = old_value | d;
283 }
284 return old_value;
285}
286
287kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
288 kmp_int8 old_value, new_value;
289
290 old_value = TCR_1(*p);
291 new_value = old_value & d;
292
293 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
294 KMP_CPU_PAUSE();
295 old_value = TCR_1(*p);
296 new_value = old_value & d;
297 }
298 return old_value;
299}
300
301kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
302 kmp_uint32 old_value, new_value;
303
304 old_value = TCR_4(*p);
305 new_value = old_value | d;
306
307 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
308 KMP_CPU_PAUSE();
309 old_value = TCR_4(*p);
310 new_value = old_value | d;
311 }
312 return old_value;
313}
314
315kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
316 kmp_uint32 old_value, new_value;
317
318 old_value = TCR_4(*p);
319 new_value = old_value & d;
320
321 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
322 KMP_CPU_PAUSE();
323 old_value = TCR_4(*p);
324 new_value = old_value & d;
325 }
326 return old_value;
327}
328
329#if KMP_ARCH_X86
330kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
331 kmp_int8 old_value, new_value;
332
333 old_value = TCR_1(*p);
334 new_value = old_value + d;
335
336 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
337 KMP_CPU_PAUSE();
338 old_value = TCR_1(*p);
339 new_value = old_value + d;
340 }
341 return old_value;
342}
343
344kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
345 kmp_int64 old_value, new_value;
346
347 old_value = TCR_8(*p);
348 new_value = old_value + d;
349
350 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
351 KMP_CPU_PAUSE();
352 old_value = TCR_8(*p);
353 new_value = old_value + d;
354 }
355 return old_value;
356}
357#endif /* KMP_ARCH_X86 */
358
359kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
360 kmp_uint64 old_value, new_value;
361
362 old_value = TCR_8(*p);
363 new_value = old_value | d;
364 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
365 KMP_CPU_PAUSE();
366 old_value = TCR_8(*p);
367 new_value = old_value | d;
368 }
369 return old_value;
370}
371
372kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
373 kmp_uint64 old_value, new_value;
374
375 old_value = TCR_8(*p);
376 new_value = old_value & d;
377 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
378 KMP_CPU_PAUSE();
379 old_value = TCR_8(*p);
380 new_value = old_value & d;
381 }
382 return old_value;
383}
384
385#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
386
387void __kmp_terminate_thread(int gtid) {
388 int status;
389 kmp_info_t *th = __kmp_threads[gtid];
390
391 if (!th)
392 return;
393
394#ifdef KMP_CANCEL_THREADS
395 KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
396 status = pthread_cancel(th->th.th_info.ds.ds_thread);
397 if (status != 0 && status != ESRCH) {
398 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
399 __kmp_msg_null);
400 }
401#endif
402 KMP_YIELD(TRUE);
403} //
404
405/* Set thread stack info according to values returned by pthread_getattr_np().
406 If values are unreasonable, assume call failed and use incremental stack
407 refinement method instead. Returns TRUE if the stack parameters could be
408 determined exactly, FALSE if incremental refinement is necessary. */
409static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
410 int stack_data;
411#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
412 KMP_OS_HURD
413 pthread_attr_t attr;
414 int status;
415 size_t size = 0;
416 void *addr = 0;
417
418 /* Always do incremental stack refinement for ubermaster threads since the
419 initial thread stack range can be reduced by sibling thread creation so
420 pthread_attr_getstack may cause thread gtid aliasing */
421 if (!KMP_UBER_GTID(gtid)) {
422
423 /* Fetch the real thread attributes */
424 status = pthread_attr_init(&attr);
425 KMP_CHECK_SYSFAIL("pthread_attr_init", status);
426#if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
427 status = pthread_attr_get_np(pthread_self(), &attr);
428 KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
429#else
430 status = pthread_getattr_np(pthread_self(), &attr);
431 KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
432#endif
433 status = pthread_attr_getstack(&attr, &addr, &size);
434 KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
435 KA_TRACE(60,
436 ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
437 " %lu, low addr: %p\n",
438 gtid, size, addr));
439 status = pthread_attr_destroy(&attr);
440 KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
441 }
442
443 if (size != 0 && addr != 0) { // was stack parameter determination successful?
444 /* Store the correct base and size */
445 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
446 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
447 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
448 return TRUE;
449 }
450#endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD \
451 || KMP_OS_HURD */
452 /* Use incremental refinement starting from initial conservative estimate */
453 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
454 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
455 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
456 return FALSE;
457}
458
459static void *__kmp_launch_worker(void *thr) {
460 int status, old_type, old_state;
461#ifdef KMP_BLOCK_SIGNALS
462 sigset_t new_set, old_set;
463#endif /* KMP_BLOCK_SIGNALS */
464 void *exit_val;
465#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
466 KMP_OS_OPENBSD || KMP_OS_HURD
467 void *volatile padding = 0;
468#endif
469 int gtid;
470
471 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
472 __kmp_gtid_set_specific(gtid);
473#ifdef KMP_TDATA_GTID
474 __kmp_gtid = gtid;
475#endif
476#if KMP_STATS_ENABLED
477 // set thread local index to point to thread-specific stats
478 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
479 __kmp_stats_thread_ptr->startLife();
480 KMP_SET_THREAD_STATE(IDLE);
482#endif
483
484#if USE_ITT_BUILD
485 __kmp_itt_thread_name(gtid);
486#endif /* USE_ITT_BUILD */
487
488#if KMP_AFFINITY_SUPPORTED
489 __kmp_affinity_set_init_mask(gtid, FALSE);
490#endif
491
492#ifdef KMP_CANCEL_THREADS
493 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
494 KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
495 // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
496 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
497 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
498#endif
499
500#if KMP_ARCH_X86 || KMP_ARCH_X86_64
501 // Set FP control regs to be a copy of the parallel initialization thread's.
502 __kmp_clear_x87_fpu_status_word();
503 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
504 __kmp_load_mxcsr(&__kmp_init_mxcsr);
505#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
506
507#ifdef KMP_BLOCK_SIGNALS
508 status = sigfillset(&new_set);
509 KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
510 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
511 KMP_CHECK_SYSFAIL("pthread_sigmask", status);
512#endif /* KMP_BLOCK_SIGNALS */
513
514#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
515 KMP_OS_OPENBSD
516 if (__kmp_stkoffset > 0 && gtid > 0) {
517 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
518 (void)padding;
519 }
520#endif
521
522 KMP_MB();
523 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
524
525 __kmp_check_stack_overlap((kmp_info_t *)thr);
526
527 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
528
529#ifdef KMP_BLOCK_SIGNALS
530 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
531 KMP_CHECK_SYSFAIL("pthread_sigmask", status);
532#endif /* KMP_BLOCK_SIGNALS */
533
534 return exit_val;
535}
536
537#if KMP_USE_MONITOR
538/* The monitor thread controls all of the threads in the complex */
539
540static void *__kmp_launch_monitor(void *thr) {
541 int status, old_type, old_state;
542#ifdef KMP_BLOCK_SIGNALS
543 sigset_t new_set;
544#endif /* KMP_BLOCK_SIGNALS */
545 struct timespec interval;
546
547 KMP_MB(); /* Flush all pending memory write invalidates. */
548
549 KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
550
551 /* register us as the monitor thread */
552 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
553#ifdef KMP_TDATA_GTID
554 __kmp_gtid = KMP_GTID_MONITOR;
555#endif
556
557 KMP_MB();
558
559#if USE_ITT_BUILD
560 // Instruct Intel(R) Threading Tools to ignore monitor thread.
561 __kmp_itt_thread_ignore();
562#endif /* USE_ITT_BUILD */
563
564 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
565 (kmp_info_t *)thr);
566
567 __kmp_check_stack_overlap((kmp_info_t *)thr);
568
569#ifdef KMP_CANCEL_THREADS
570 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
571 KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
572 // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
573 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
574 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
575#endif
576
577#if KMP_REAL_TIME_FIX
578 // This is a potential fix which allows application with real-time scheduling
579 // policy work. However, decision about the fix is not made yet, so it is
580 // disabled by default.
581 { // Are program started with real-time scheduling policy?
582 int sched = sched_getscheduler(0);
583 if (sched == SCHED_FIFO || sched == SCHED_RR) {
584 // Yes, we are a part of real-time application. Try to increase the
585 // priority of the monitor.
586 struct sched_param param;
587 int max_priority = sched_get_priority_max(sched);
588 int rc;
589 KMP_WARNING(RealTimeSchedNotSupported);
590 sched_getparam(0, &param);
591 if (param.sched_priority < max_priority) {
592 param.sched_priority += 1;
593 rc = sched_setscheduler(0, sched, &param);
594 if (rc != 0) {
595 int error = errno;
596 kmp_msg_t err_code = KMP_ERR(error);
597 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
598 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
599 if (__kmp_generate_warnings == kmp_warnings_off) {
600 __kmp_str_free(&err_code.str);
601 }
602 }
603 } else {
604 // We cannot abort here, because number of CPUs may be enough for all
605 // the threads, including the monitor thread, so application could
606 // potentially work...
607 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
608 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
609 __kmp_msg_null);
610 }
611 }
612 // AC: free thread that waits for monitor started
613 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
614 }
615#endif // KMP_REAL_TIME_FIX
616
617 KMP_MB(); /* Flush all pending memory write invalidates. */
618
619 if (__kmp_monitor_wakeups == 1) {
620 interval.tv_sec = 1;
621 interval.tv_nsec = 0;
622 } else {
623 interval.tv_sec = 0;
624 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
625 }
626
627 KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
628
629 while (!TCR_4(__kmp_global.g.g_done)) {
630 struct timespec now;
631 struct timeval tval;
632
633 /* This thread monitors the state of the system */
634
635 KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
636
637 status = gettimeofday(&tval, NULL);
638 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
639 TIMEVAL_TO_TIMESPEC(&tval, &now);
640
641 now.tv_sec += interval.tv_sec;
642 now.tv_nsec += interval.tv_nsec;
643
644 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
645 now.tv_sec += 1;
646 now.tv_nsec -= KMP_NSEC_PER_SEC;
647 }
648
649 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
650 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
651 // AC: the monitor should not fall asleep if g_done has been set
652 if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
653 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
654 &__kmp_wait_mx.m_mutex, &now);
655 if (status != 0) {
656 if (status != ETIMEDOUT && status != EINTR) {
657 KMP_SYSFAIL("pthread_cond_timedwait", status);
658 }
659 }
660 }
661 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
662 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
663
664 TCW_4(__kmp_global.g.g_time.dt.t_value,
665 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
666
667 KMP_MB(); /* Flush all pending memory write invalidates. */
668 }
669
670 KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
671
672#ifdef KMP_BLOCK_SIGNALS
673 status = sigfillset(&new_set);
674 KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
675 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
676 KMP_CHECK_SYSFAIL("pthread_sigmask", status);
677#endif /* KMP_BLOCK_SIGNALS */
678
679 KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
680
681 if (__kmp_global.g.g_abort != 0) {
682 /* now we need to terminate the worker threads */
683 /* the value of t_abort is the signal we caught */
684
685 int gtid;
686
687 KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
688 __kmp_global.g.g_abort));
689
690 /* terminate the OpenMP worker threads */
691 /* TODO this is not valid for sibling threads!!
692 * the uber master might not be 0 anymore.. */
693 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
694 __kmp_terminate_thread(gtid);
695
696 __kmp_cleanup();
697
698 KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
699 __kmp_global.g.g_abort));
700
701 if (__kmp_global.g.g_abort > 0)
702 raise(__kmp_global.g.g_abort);
703 }
704
705 KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
706
707 return thr;
708}
709#endif // KMP_USE_MONITOR
710
711void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
712 pthread_t handle;
713 pthread_attr_t thread_attr;
714 int status;
715
716 th->th.th_info.ds.ds_gtid = gtid;
717
718#if KMP_STATS_ENABLED
719 // sets up worker thread stats
720 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
721
722 // th->th.th_stats is used to transfer thread-specific stats-pointer to
723 // __kmp_launch_worker. So when thread is created (goes into
724 // __kmp_launch_worker) it will set its thread local pointer to
725 // th->th.th_stats
726 if (!KMP_UBER_GTID(gtid)) {
727 th->th.th_stats = __kmp_stats_list->push_back(gtid);
728 } else {
729 // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
730 // so set the th->th.th_stats field to it.
731 th->th.th_stats = __kmp_stats_thread_ptr;
732 }
733 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
734
735#endif // KMP_STATS_ENABLED
736
737 if (KMP_UBER_GTID(gtid)) {
738 KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
739 th->th.th_info.ds.ds_thread = pthread_self();
740 __kmp_set_stack_info(gtid, th);
741 __kmp_check_stack_overlap(th);
742 return;
743 }
744
745 KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
746
747 KMP_MB(); /* Flush all pending memory write invalidates. */
748
749#ifdef KMP_THREAD_ATTR
750 status = pthread_attr_init(&thread_attr);
751 if (status != 0) {
752 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
753 }
754 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
755 if (status != 0) {
756 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
757 }
758
759 /* Set stack size for this thread now.
760 The multiple of 2 is there because on some machines, requesting an unusual
761 stacksize causes the thread to have an offset before the dummy alloca()
762 takes place to create the offset. Since we want the user to have a
763 sufficient stacksize AND support a stack offset, we alloca() twice the
764 offset so that the upcoming alloca() does not eliminate any premade offset,
765 and also gives the user the stack space they requested for all threads */
766 stack_size += gtid * __kmp_stkoffset * 2;
767
768#if defined(__ANDROID__) && __ANDROID_API__ < 19
769 // Round the stack size to a multiple of the page size. Older versions of
770 // Android (until KitKat) would fail pthread_attr_setstacksize with EINVAL
771 // if the stack size was not a multiple of the page size.
772 stack_size = (stack_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
773#endif
774
775 KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
776 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
777 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
778
779#ifdef _POSIX_THREAD_ATTR_STACKSIZE
780 status = pthread_attr_setstacksize(&thread_attr, stack_size);
781#ifdef KMP_BACKUP_STKSIZE
782 if (status != 0) {
783 if (!__kmp_env_stksize) {
784 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
785 __kmp_stksize = KMP_BACKUP_STKSIZE;
786 KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
787 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
788 "bytes\n",
789 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
790 status = pthread_attr_setstacksize(&thread_attr, stack_size);
791 }
792 }
793#endif /* KMP_BACKUP_STKSIZE */
794 if (status != 0) {
795 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
796 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
797 }
798#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
799
800#endif /* KMP_THREAD_ATTR */
801
802 status =
803 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
804 if (status != 0 || !handle) { // ??? Why do we check handle??
805#ifdef _POSIX_THREAD_ATTR_STACKSIZE
806 if (status == EINVAL) {
807 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
808 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
809 }
810 if (status == ENOMEM) {
811 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
812 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
813 }
814#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
815 if (status == EAGAIN) {
816 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
817 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
818 }
819 KMP_SYSFAIL("pthread_create", status);
820 }
821
822 th->th.th_info.ds.ds_thread = handle;
823
824#ifdef KMP_THREAD_ATTR
825 status = pthread_attr_destroy(&thread_attr);
826 if (status) {
827 kmp_msg_t err_code = KMP_ERR(status);
828 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
829 __kmp_msg_null);
830 if (__kmp_generate_warnings == kmp_warnings_off) {
831 __kmp_str_free(&err_code.str);
832 }
833 }
834#endif /* KMP_THREAD_ATTR */
835
836 KMP_MB(); /* Flush all pending memory write invalidates. */
837
838 KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
839
840} // __kmp_create_worker
841
842#if KMP_USE_MONITOR
843void __kmp_create_monitor(kmp_info_t *th) {
844 pthread_t handle;
845 pthread_attr_t thread_attr;
846 size_t size;
847 int status;
848 int auto_adj_size = FALSE;
849
850 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
851 // We don't need monitor thread in case of MAX_BLOCKTIME
852 KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
853 "MAX blocktime\n"));
854 th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
855 th->th.th_info.ds.ds_gtid = 0;
856 return;
857 }
858 KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
859
860 KMP_MB(); /* Flush all pending memory write invalidates. */
861
862 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
863 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
864#if KMP_REAL_TIME_FIX
865 TCW_4(__kmp_global.g.g_time.dt.t_value,
866 -1); // Will use it for synchronization a bit later.
867#else
868 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
869#endif // KMP_REAL_TIME_FIX
870
871#ifdef KMP_THREAD_ATTR
872 if (__kmp_monitor_stksize == 0) {
873 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
874 auto_adj_size = TRUE;
875 }
876 status = pthread_attr_init(&thread_attr);
877 if (status != 0) {
878 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
879 }
880 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
881 if (status != 0) {
882 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
883 }
884
885#ifdef _POSIX_THREAD_ATTR_STACKSIZE
886 status = pthread_attr_getstacksize(&thread_attr, &size);
887 KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
888#else
889 size = __kmp_sys_min_stksize;
890#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
891#endif /* KMP_THREAD_ATTR */
892
893 if (__kmp_monitor_stksize == 0) {
894 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
895 }
896 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
897 __kmp_monitor_stksize = __kmp_sys_min_stksize;
898 }
899
900 KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
901 "requested stacksize = %lu bytes\n",
902 size, __kmp_monitor_stksize));
903
904retry:
905
906/* Set stack size for this thread now. */
907#ifdef _POSIX_THREAD_ATTR_STACKSIZE
908 KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
909 __kmp_monitor_stksize));
910 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
911 if (status != 0) {
912 if (auto_adj_size) {
913 __kmp_monitor_stksize *= 2;
914 goto retry;
915 }
916 kmp_msg_t err_code = KMP_ERR(status);
917 __kmp_msg(kmp_ms_warning, // should this be fatal? BB
918 KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
919 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
920 if (__kmp_generate_warnings == kmp_warnings_off) {
921 __kmp_str_free(&err_code.str);
922 }
923 }
924#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
925
926 status =
927 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
928
929 if (status != 0) {
930#ifdef _POSIX_THREAD_ATTR_STACKSIZE
931 if (status == EINVAL) {
932 if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
933 __kmp_monitor_stksize *= 2;
934 goto retry;
935 }
936 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
937 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
938 __kmp_msg_null);
939 }
940 if (status == ENOMEM) {
941 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
942 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
943 __kmp_msg_null);
944 }
945#endif /* _POSIX_THREAD_ATTR_STACKSIZE */
946 if (status == EAGAIN) {
947 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
948 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
949 }
950 KMP_SYSFAIL("pthread_create", status);
951 }
952
953 th->th.th_info.ds.ds_thread = handle;
954
955#if KMP_REAL_TIME_FIX
956 // Wait for the monitor thread is really started and set its *priority*.
957 KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
958 sizeof(__kmp_global.g.g_time.dt.t_value));
959 __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
960 &__kmp_neq_4, NULL);
961#endif // KMP_REAL_TIME_FIX
962
963#ifdef KMP_THREAD_ATTR
964 status = pthread_attr_destroy(&thread_attr);
965 if (status != 0) {
966 kmp_msg_t err_code = KMP_ERR(status);
967 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
968 __kmp_msg_null);
969 if (__kmp_generate_warnings == kmp_warnings_off) {
970 __kmp_str_free(&err_code.str);
971 }
972 }
973#endif
974
975 KMP_MB(); /* Flush all pending memory write invalidates. */
976
977 KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
978 th->th.th_info.ds.ds_thread));
979
980} // __kmp_create_monitor
981#endif // KMP_USE_MONITOR
982
983void __kmp_exit_thread(int exit_status) {
984 pthread_exit((void *)(intptr_t)exit_status);
985} // __kmp_exit_thread
986
987#if KMP_USE_MONITOR
988void __kmp_resume_monitor();
989
990void __kmp_reap_monitor(kmp_info_t *th) {
991 int status;
992 void *exit_val;
993
994 KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
995 " %#.8lx\n",
996 th->th.th_info.ds.ds_thread));
997
998 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
999 // If both tid and gtid are 0, it means the monitor did not ever start.
1000 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1001 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1002 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1003 KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1004 return;
1005 }
1006
1007 KMP_MB(); /* Flush all pending memory write invalidates. */
1008
1009 /* First, check to see whether the monitor thread exists to wake it up. This
1010 is to avoid performance problem when the monitor sleeps during
1011 blocktime-size interval */
1012
1013 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1014 if (status != ESRCH) {
1015 __kmp_resume_monitor(); // Wake up the monitor thread
1016 }
1017 KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1018 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1019 if (exit_val != th) {
1020 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1021 }
1022
1023 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1024 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1025
1026 KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1027 " %#.8lx\n",
1028 th->th.th_info.ds.ds_thread));
1029
1030 KMP_MB(); /* Flush all pending memory write invalidates. */
1031}
1032#endif // KMP_USE_MONITOR
1033
1034void __kmp_reap_worker(kmp_info_t *th) {
1035 int status;
1036 void *exit_val;
1037
1038 KMP_MB(); /* Flush all pending memory write invalidates. */
1039
1040 KA_TRACE(
1041 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1042
1043 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1044#ifdef KMP_DEBUG
1045 /* Don't expose these to the user until we understand when they trigger */
1046 if (status != 0) {
1047 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1048 }
1049 if (exit_val != th) {
1050 KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1051 "exit_val = %p\n",
1052 th->th.th_info.ds.ds_gtid, exit_val));
1053 }
1054#else
1055 (void)status; // unused variable
1056#endif /* KMP_DEBUG */
1057
1058 KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1059 th->th.th_info.ds.ds_gtid));
1060
1061 KMP_MB(); /* Flush all pending memory write invalidates. */
1062}
1063
1064#if KMP_HANDLE_SIGNALS
1065
1066static void __kmp_null_handler(int signo) {
1067 // Do nothing, for doing SIG_IGN-type actions.
1068} // __kmp_null_handler
1069
1070static void __kmp_team_handler(int signo) {
1071 if (__kmp_global.g.g_abort == 0) {
1072/* Stage 1 signal handler, let's shut down all of the threads */
1073#ifdef KMP_DEBUG
1074 __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1075#endif
1076 switch (signo) {
1077 case SIGHUP:
1078 case SIGINT:
1079 case SIGQUIT:
1080 case SIGILL:
1081 case SIGABRT:
1082 case SIGFPE:
1083 case SIGBUS:
1084 case SIGSEGV:
1085#ifdef SIGSYS
1086 case SIGSYS:
1087#endif
1088 case SIGTERM:
1089 if (__kmp_debug_buf) {
1090 __kmp_dump_debug_buffer();
1091 }
1092 __kmp_unregister_library(); // cleanup shared memory
1093 KMP_MB(); // Flush all pending memory write invalidates.
1094 TCW_4(__kmp_global.g.g_abort, signo);
1095 KMP_MB(); // Flush all pending memory write invalidates.
1096 TCW_4(__kmp_global.g.g_done, TRUE);
1097 KMP_MB(); // Flush all pending memory write invalidates.
1098 break;
1099 default:
1100#ifdef KMP_DEBUG
1101 __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1102#endif
1103 break;
1104 }
1105 }
1106} // __kmp_team_handler
1107
1108static void __kmp_sigaction(int signum, const struct sigaction *act,
1109 struct sigaction *oldact) {
1110 int rc = sigaction(signum, act, oldact);
1111 KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1112}
1113
1114static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1115 int parallel_init) {
1116 KMP_MB(); // Flush all pending memory write invalidates.
1117 KB_TRACE(60,
1118 ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1119 if (parallel_init) {
1120 struct sigaction new_action;
1121 struct sigaction old_action;
1122 new_action.sa_handler = handler_func;
1123 new_action.sa_flags = 0;
1124 sigfillset(&new_action.sa_mask);
1125 __kmp_sigaction(sig, &new_action, &old_action);
1126 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1127 sigaddset(&__kmp_sigset, sig);
1128 } else {
1129 // Restore/keep user's handler if one previously installed.
1130 __kmp_sigaction(sig, &old_action, NULL);
1131 }
1132 } else {
1133 // Save initial/system signal handlers to see if user handlers installed.
1134 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1135 }
1136 KMP_MB(); // Flush all pending memory write invalidates.
1137} // __kmp_install_one_handler
1138
1139static void __kmp_remove_one_handler(int sig) {
1140 KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1141 if (sigismember(&__kmp_sigset, sig)) {
1142 struct sigaction old;
1143 KMP_MB(); // Flush all pending memory write invalidates.
1144 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1145 if ((old.sa_handler != __kmp_team_handler) &&
1146 (old.sa_handler != __kmp_null_handler)) {
1147 // Restore the users signal handler.
1148 KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1149 "restoring: sig=%d\n",
1150 sig));
1151 __kmp_sigaction(sig, &old, NULL);
1152 }
1153 sigdelset(&__kmp_sigset, sig);
1154 KMP_MB(); // Flush all pending memory write invalidates.
1155 }
1156} // __kmp_remove_one_handler
1157
1158void __kmp_install_signals(int parallel_init) {
1159 KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1160 if (__kmp_handle_signals || !parallel_init) {
1161 // If ! parallel_init, we do not install handlers, just save original
1162 // handlers. Let us do it even __handle_signals is 0.
1163 sigemptyset(&__kmp_sigset);
1164 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1165 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1166 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1167 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1168 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1169 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1170 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1171 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1172#ifdef SIGSYS
1173 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1174#endif // SIGSYS
1175 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1176#ifdef SIGPIPE
1177 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1178#endif // SIGPIPE
1179 }
1180} // __kmp_install_signals
1181
1182void __kmp_remove_signals(void) {
1183 int sig;
1184 KB_TRACE(10, ("__kmp_remove_signals()\n"));
1185 for (sig = 1; sig < NSIG; ++sig) {
1186 __kmp_remove_one_handler(sig);
1187 }
1188} // __kmp_remove_signals
1189
1190#endif // KMP_HANDLE_SIGNALS
1191
1192void __kmp_enable(int new_state) {
1193#ifdef KMP_CANCEL_THREADS
1194 int status, old_state;
1195 status = pthread_setcancelstate(new_state, &old_state);
1196 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1197 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1198#endif
1199}
1200
1201void __kmp_disable(int *old_state) {
1202#ifdef KMP_CANCEL_THREADS
1203 int status;
1204 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1205 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1206#endif
1207}
1208
1209static void __kmp_atfork_prepare(void) {
1210 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1211 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1212}
1213
1214static void __kmp_atfork_parent(void) {
1215 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1216 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1217}
1218
1219/* Reset the library so execution in the child starts "all over again" with
1220 clean data structures in initial states. Don't worry about freeing memory
1221 allocated by parent, just abandon it to be safe. */
1222static void __kmp_atfork_child(void) {
1223 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1224 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1225 /* TODO make sure this is done right for nested/sibling */
1226 // ATT: Memory leaks are here? TODO: Check it and fix.
1227 /* KMP_ASSERT( 0 ); */
1228
1229 ++__kmp_fork_count;
1230
1231#if KMP_AFFINITY_SUPPORTED
1232#if KMP_OS_LINUX || KMP_OS_FREEBSD
1233 // reset the affinity in the child to the initial thread
1234 // affinity in the parent
1235 kmp_set_thread_affinity_mask_initial();
1236#endif
1237 // Set default not to bind threads tightly in the child (we're expecting
1238 // over-subscription after the fork and this can improve things for
1239 // scripting languages that use OpenMP inside process-parallel code).
1240 __kmp_affinity_type = affinity_none;
1241 if (__kmp_nested_proc_bind.bind_types != NULL) {
1242 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1243 }
1244 __kmp_affinity_masks = NULL;
1245 __kmp_affinity_num_masks = 0;
1246#endif // KMP_AFFINITY_SUPPORTED
1247
1248#if KMP_USE_MONITOR
1249 __kmp_init_monitor = 0;
1250#endif
1251 __kmp_init_parallel = FALSE;
1252 __kmp_init_middle = FALSE;
1253 __kmp_init_serial = FALSE;
1254 TCW_4(__kmp_init_gtid, FALSE);
1255 __kmp_init_common = FALSE;
1256
1257 TCW_4(__kmp_init_user_locks, FALSE);
1258#if !KMP_USE_DYNAMIC_LOCK
1259 __kmp_user_lock_table.used = 1;
1260 __kmp_user_lock_table.allocated = 0;
1261 __kmp_user_lock_table.table = NULL;
1262 __kmp_lock_blocks = NULL;
1263#endif
1264
1265 __kmp_all_nth = 0;
1266 TCW_4(__kmp_nth, 0);
1267
1268 __kmp_thread_pool = NULL;
1269 __kmp_thread_pool_insert_pt = NULL;
1270 __kmp_team_pool = NULL;
1271
1272 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1273 here so threadprivate doesn't use stale data */
1274 KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1275 __kmp_threadpriv_cache_list));
1276
1277 while (__kmp_threadpriv_cache_list != NULL) {
1278
1279 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1280 KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1281 &(*__kmp_threadpriv_cache_list->addr)));
1282
1283 *__kmp_threadpriv_cache_list->addr = NULL;
1284 }
1285 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1286 }
1287
1288 __kmp_init_runtime = FALSE;
1289
1290 /* reset statically initialized locks */
1291 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1292 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1293 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1294 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1295
1296#if USE_ITT_BUILD
1297 __kmp_itt_reset(); // reset ITT's global state
1298#endif /* USE_ITT_BUILD */
1299
1300 {
1301 // Child process often get terminated without any use of OpenMP. That might
1302 // cause mapped shared memory file to be left unattended. Thus we postpone
1303 // library registration till middle initialization in the child process.
1304 __kmp_need_register_serial = FALSE;
1305 __kmp_serial_initialize();
1306 }
1307
1308 /* This is necessary to make sure no stale data is left around */
1309 /* AC: customers complain that we use unsafe routines in the atfork
1310 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1311 in dynamic_link when check the presence of shared tbbmalloc library.
1312 Suggestion is to make the library initialization lazier, similar
1313 to what done for __kmpc_begin(). */
1314 // TODO: synchronize all static initializations with regular library
1315 // startup; look at kmp_global.cpp and etc.
1316 //__kmp_internal_begin ();
1317}
1318
1319void __kmp_register_atfork(void) {
1320 if (__kmp_need_register_atfork) {
1321 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1322 __kmp_atfork_child);
1323 KMP_CHECK_SYSFAIL("pthread_atfork", status);
1324 __kmp_need_register_atfork = FALSE;
1325 }
1326}
1327
1328void __kmp_suspend_initialize(void) {
1329 int status;
1330 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1331 KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1332 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1333 KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1334}
1335
1336void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1337 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1338 int new_value = __kmp_fork_count + 1;
1339 // Return if already initialized
1340 if (old_value == new_value)
1341 return;
1342 // Wait, then return if being initialized
1343 if (old_value == -1 || !__kmp_atomic_compare_store(
1344 &th->th.th_suspend_init_count, old_value, -1)) {
1345 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1346 KMP_CPU_PAUSE();
1347 }
1348 } else {
1349 // Claim to be the initializer and do initializations
1350 int status;
1351 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1352 &__kmp_suspend_cond_attr);
1353 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1354 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1355 &__kmp_suspend_mutex_attr);
1356 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1357 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1358 }
1359}
1360
1361void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1362 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1363 /* this means we have initialize the suspension pthread objects for this
1364 thread in this instance of the process */
1365 int status;
1366
1367 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1368 if (status != 0 && status != EBUSY) {
1369 KMP_SYSFAIL("pthread_cond_destroy", status);
1370 }
1371 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1372 if (status != 0 && status != EBUSY) {
1373 KMP_SYSFAIL("pthread_mutex_destroy", status);
1374 }
1375 --th->th.th_suspend_init_count;
1376 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1377 __kmp_fork_count);
1378 }
1379}
1380
1381// return true if lock obtained, false otherwise
1382int __kmp_try_suspend_mx(kmp_info_t *th) {
1383 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1384}
1385
1386void __kmp_lock_suspend_mx(kmp_info_t *th) {
1387 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1388 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1389}
1390
1391void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1392 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1393 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1394}
1395
1396/* This routine puts the calling thread to sleep after setting the
1397 sleep bit for the indicated flag variable to true. */
1398template <class C>
1399static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1400 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1401 kmp_info_t *th = __kmp_threads[th_gtid];
1402 int status;
1403 typename C::flag_t old_spin;
1404
1405 KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1406 flag->get()));
1407
1408 __kmp_suspend_initialize_thread(th);
1409
1410 __kmp_lock_suspend_mx(th);
1411
1412 KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1413 th_gtid, flag->get()));
1414
1415 /* TODO: shouldn't this use release semantics to ensure that
1416 __kmp_suspend_initialize_thread gets called first? */
1417 old_spin = flag->set_sleeping();
1418 TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1419 th->th.th_sleep_loc_type = flag->get_type();
1420 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1421 __kmp_pause_status != kmp_soft_paused) {
1422 flag->unset_sleeping();
1423 TCW_PTR(th->th.th_sleep_loc, NULL);
1424 th->th.th_sleep_loc_type = flag_unset;
1425 __kmp_unlock_suspend_mx(th);
1426 return;
1427 }
1428 KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1429 " was %x\n",
1430 th_gtid, flag->get(), flag->load(), old_spin));
1431
1432 if (flag->done_check_val(old_spin) || flag->done_check()) {
1433 flag->unset_sleeping();
1434 TCW_PTR(th->th.th_sleep_loc, NULL);
1435 th->th.th_sleep_loc_type = flag_unset;
1436 KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1437 "for spin(%p)\n",
1438 th_gtid, flag->get()));
1439 } else {
1440 /* Encapsulate in a loop as the documentation states that this may
1441 "with low probability" return when the condition variable has
1442 not been signaled or broadcast */
1443 int deactivated = FALSE;
1444
1445 while (flag->is_sleeping()) {
1446#ifdef DEBUG_SUSPEND
1447 char buffer[128];
1448 __kmp_suspend_count++;
1449 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1450 __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1451 buffer);
1452#endif
1453 // Mark the thread as no longer active (only in the first iteration of the
1454 // loop).
1455 if (!deactivated) {
1456 th->th.th_active = FALSE;
1457 if (th->th.th_active_in_pool) {
1458 th->th.th_active_in_pool = FALSE;
1459 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1460 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1461 }
1462 deactivated = TRUE;
1463 }
1464
1465 KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1466 KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1467
1468#if USE_SUSPEND_TIMEOUT
1469 struct timespec now;
1470 struct timeval tval;
1471 int msecs;
1472
1473 status = gettimeofday(&tval, NULL);
1474 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1475 TIMEVAL_TO_TIMESPEC(&tval, &now);
1476
1477 msecs = (4 * __kmp_dflt_blocktime) + 200;
1478 now.tv_sec += msecs / 1000;
1479 now.tv_nsec += (msecs % 1000) * 1000;
1480
1481 KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1482 "pthread_cond_timedwait\n",
1483 th_gtid));
1484 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1485 &th->th.th_suspend_mx.m_mutex, &now);
1486#else
1487 KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1488 " pthread_cond_wait\n",
1489 th_gtid));
1490 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1491 &th->th.th_suspend_mx.m_mutex);
1492#endif // USE_SUSPEND_TIMEOUT
1493
1494 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1495 KMP_SYSFAIL("pthread_cond_wait", status);
1496 }
1497
1498 KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1499
1500 if (!flag->is_sleeping() &&
1501 ((status == EINTR) || (status == ETIMEDOUT))) {
1502 // if interrupt or timeout, and thread is no longer sleeping, we need to
1503 // make sure sleep_loc gets reset; however, this shouldn't be needed if
1504 // we woke up with resume
1505 flag->unset_sleeping();
1506 TCW_PTR(th->th.th_sleep_loc, NULL);
1507 th->th.th_sleep_loc_type = flag_unset;
1508 }
1509#ifdef KMP_DEBUG
1510 if (status == ETIMEDOUT) {
1511 if (flag->is_sleeping()) {
1512 KF_TRACE(100,
1513 ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1514 } else {
1515 KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1516 "not set!\n",
1517 th_gtid));
1518 TCW_PTR(th->th.th_sleep_loc, NULL);
1519 th->th.th_sleep_loc_type = flag_unset;
1520 }
1521 } else if (flag->is_sleeping()) {
1522 KF_TRACE(100,
1523 ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1524 }
1525#endif
1526 } // while
1527
1528 // Mark the thread as active again (if it was previous marked as inactive)
1529 if (deactivated) {
1530 th->th.th_active = TRUE;
1531 if (TCR_4(th->th.th_in_pool)) {
1532 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1533 th->th.th_active_in_pool = TRUE;
1534 }
1535 }
1536 }
1537 // We may have had the loop variable set before entering the loop body;
1538 // so we need to reset sleep_loc.
1539 TCW_PTR(th->th.th_sleep_loc, NULL);
1540 th->th.th_sleep_loc_type = flag_unset;
1541
1542 KMP_DEBUG_ASSERT(!flag->is_sleeping());
1543 KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1544#ifdef DEBUG_SUSPEND
1545 {
1546 char buffer[128];
1547 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1548 __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1549 buffer);
1550 }
1551#endif
1552
1553 __kmp_unlock_suspend_mx(th);
1554 KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1555}
1556
1557template <bool C, bool S>
1558void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1559 __kmp_suspend_template(th_gtid, flag);
1560}
1561template <bool C, bool S>
1562void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1563 __kmp_suspend_template(th_gtid, flag);
1564}
1565template <bool C, bool S>
1566void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1567 __kmp_suspend_template(th_gtid, flag);
1568}
1569void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1570 __kmp_suspend_template(th_gtid, flag);
1571}
1572
1573template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1574template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1575template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1576template void
1577__kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1578template void
1579__kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1580
1581/* This routine signals the thread specified by target_gtid to wake up
1582 after setting the sleep bit indicated by the flag argument to FALSE.
1583 The target thread must already have called __kmp_suspend_template() */
1584template <class C>
1585static inline void __kmp_resume_template(int target_gtid, C *flag) {
1586 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1587 kmp_info_t *th = __kmp_threads[target_gtid];
1588 int status;
1589
1590#ifdef KMP_DEBUG
1591 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1592#endif
1593
1594 KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1595 gtid, target_gtid));
1596 KMP_DEBUG_ASSERT(gtid != target_gtid);
1597
1598 __kmp_suspend_initialize_thread(th);
1599
1600 __kmp_lock_suspend_mx(th);
1601
1602 if (!flag || flag != th->th.th_sleep_loc) {
1603 // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1604 // different location; wake up at new location
1605 flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1606 }
1607
1608 // First, check if the flag is null or its type has changed. If so, someone
1609 // else woke it up.
1610 if (!flag) { // Thread doesn't appear to be sleeping on anything
1611 KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1612 "awake: flag(%p)\n",
1613 gtid, target_gtid, (void *)NULL));
1614 __kmp_unlock_suspend_mx(th);
1615 return;
1616 } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1617 // Flag type does not appear to match this function template; possibly the
1618 // thread is sleeping on something else. Try null resume again.
1619 KF_TRACE(
1620 5,
1621 ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1622 "spin(%p) type=%d ptr_type=%d\n",
1623 gtid, target_gtid, flag, flag->get(), flag->get_type(),
1624 th->th.th_sleep_loc_type));
1625 __kmp_unlock_suspend_mx(th);
1626 __kmp_null_resume_wrapper(th);
1627 return;
1628 } else { // if multiple threads are sleeping, flag should be internally
1629 // referring to a specific thread here
1630 if (!flag->is_sleeping()) {
1631 KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1632 "awake: flag(%p): %u\n",
1633 gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1634 __kmp_unlock_suspend_mx(th);
1635 return;
1636 }
1637 }
1638 KMP_DEBUG_ASSERT(flag);
1639 flag->unset_sleeping();
1640 TCW_PTR(th->th.th_sleep_loc, NULL);
1641 th->th.th_sleep_loc_type = flag_unset;
1642
1643 KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1644 "sleep bit for flag's loc(%p): %u\n",
1645 gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1646
1647#ifdef DEBUG_SUSPEND
1648 {
1649 char buffer[128];
1650 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1651 __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1652 target_gtid, buffer);
1653 }
1654#endif
1655 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1656 KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1657 __kmp_unlock_suspend_mx(th);
1658 KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1659 " for T#%d\n",
1660 gtid, target_gtid));
1661}
1662
1663template <bool C, bool S>
1664void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1665 __kmp_resume_template(target_gtid, flag);
1666}
1667template <bool C, bool S>
1668void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1669 __kmp_resume_template(target_gtid, flag);
1670}
1671template <bool C, bool S>
1672void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1673 __kmp_resume_template(target_gtid, flag);
1674}
1675void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1676 __kmp_resume_template(target_gtid, flag);
1677}
1678
1679template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1680template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1681template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1682template void
1683__kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1684
1685#if KMP_USE_MONITOR
1686void __kmp_resume_monitor() {
1687 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1688 int status;
1689#ifdef KMP_DEBUG
1690 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1691 KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1692 KMP_GTID_MONITOR));
1693 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1694#endif
1695 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1696 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1697#ifdef DEBUG_SUSPEND
1698 {
1699 char buffer[128];
1700 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1701 __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1702 KMP_GTID_MONITOR, buffer);
1703 }
1704#endif
1705 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1706 KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1707 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1708 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1709 KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1710 " for T#%d\n",
1711 gtid, KMP_GTID_MONITOR));
1712}
1713#endif // KMP_USE_MONITOR
1714
1715void __kmp_yield() { sched_yield(); }
1716
1717void __kmp_gtid_set_specific(int gtid) {
1718 if (__kmp_init_gtid) {
1719 int status;
1720 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1721 (void *)(intptr_t)(gtid + 1));
1722 KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1723 } else {
1724 KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1725 }
1726}
1727
1728int __kmp_gtid_get_specific() {
1729 int gtid;
1730 if (!__kmp_init_gtid) {
1731 KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1732 "KMP_GTID_SHUTDOWN\n"));
1733 return KMP_GTID_SHUTDOWN;
1734 }
1735 gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1736 if (gtid == 0) {
1737 gtid = KMP_GTID_DNE;
1738 } else {
1739 gtid--;
1740 }
1741 KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1742 __kmp_gtid_threadprivate_key, gtid));
1743 return gtid;
1744}
1745
1746double __kmp_read_cpu_time(void) {
1747 /*clock_t t;*/
1748 struct tms buffer;
1749
1750 /*t =*/times(&buffer);
1751
1752 return (double)(buffer.tms_utime + buffer.tms_cutime) /
1753 (double)CLOCKS_PER_SEC;
1754}
1755
1756int __kmp_read_system_info(struct kmp_sys_info *info) {
1757 int status;
1758 struct rusage r_usage;
1759
1760 memset(info, 0, sizeof(*info));
1761
1762 status = getrusage(RUSAGE_SELF, &r_usage);
1763 KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1764
1765 // The maximum resident set size utilized (in kilobytes)
1766 info->maxrss = r_usage.ru_maxrss;
1767 // The number of page faults serviced without any I/O
1768 info->minflt = r_usage.ru_minflt;
1769 // The number of page faults serviced that required I/O
1770 info->majflt = r_usage.ru_majflt;
1771 // The number of times a process was "swapped" out of memory
1772 info->nswap = r_usage.ru_nswap;
1773 // The number of times the file system had to perform input
1774 info->inblock = r_usage.ru_inblock;
1775 // The number of times the file system had to perform output
1776 info->oublock = r_usage.ru_oublock;
1777 // The number of times a context switch was voluntarily
1778 info->nvcsw = r_usage.ru_nvcsw;
1779 // The number of times a context switch was forced
1780 info->nivcsw = r_usage.ru_nivcsw;
1781
1782 return (status != 0);
1783}
1784
1785void __kmp_read_system_time(double *delta) {
1786 double t_ns;
1787 struct timeval tval;
1788 struct timespec stop;
1789 int status;
1790
1791 status = gettimeofday(&tval, NULL);
1792 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1793 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1794 t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1795 *delta = (t_ns * 1e-9);
1796}
1797
1798void __kmp_clear_system_time(void) {
1799 struct timeval tval;
1800 int status;
1801 status = gettimeofday(&tval, NULL);
1802 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1803 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1804}
1805
1806static int __kmp_get_xproc(void) {
1807
1808 int r = 0;
1809
1810#if KMP_OS_LINUX
1811
1812 __kmp_type_convert(sysconf(_SC_NPROCESSORS_CONF), &(r));
1813
1814#elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_OPENBSD || \
1815 KMP_OS_HURD
1816
1817 __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1818
1819#elif KMP_OS_DARWIN
1820
1821 // Bug C77011 High "OpenMP Threads and number of active cores".
1822
1823 // Find the number of available CPUs.
1824 kern_return_t rc;
1825 host_basic_info_data_t info;
1826 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1827 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1828 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1829 // Cannot use KA_TRACE() here because this code works before trace support
1830 // is initialized.
1831 r = info.avail_cpus;
1832 } else {
1833 KMP_WARNING(CantGetNumAvailCPU);
1834 KMP_INFORM(AssumedNumCPU);
1835 }
1836
1837#else
1838
1839#error "Unknown or unsupported OS."
1840
1841#endif
1842
1843 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1844
1845} // __kmp_get_xproc
1846
1847int __kmp_read_from_file(char const *path, char const *format, ...) {
1848 int result;
1849 va_list args;
1850
1851 va_start(args, format);
1852 FILE *f = fopen(path, "rb");
1853 if (f == NULL)
1854 return 0;
1855 result = vfscanf(f, format, args);
1856 fclose(f);
1857
1858 return result;
1859}
1860
1861void __kmp_runtime_initialize(void) {
1862 int status;
1863 pthread_mutexattr_t mutex_attr;
1864 pthread_condattr_t cond_attr;
1865
1866 if (__kmp_init_runtime) {
1867 return;
1868 }
1869
1870#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1871 if (!__kmp_cpuinfo.initialized) {
1872 __kmp_query_cpuid(&__kmp_cpuinfo);
1873 }
1874#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1875
1876 __kmp_xproc = __kmp_get_xproc();
1877
1878#if !KMP_32_BIT_ARCH
1879 struct rlimit rlim;
1880 // read stack size of calling thread, save it as default for worker threads;
1881 // this should be done before reading environment variables
1882 status = getrlimit(RLIMIT_STACK, &rlim);
1883 if (status == 0) { // success?
1884 __kmp_stksize = rlim.rlim_cur;
1885 __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1886 }
1887#endif /* KMP_32_BIT_ARCH */
1888
1889 if (sysconf(_SC_THREADS)) {
1890
1891 /* Query the maximum number of threads */
1892 __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1893 if (__kmp_sys_max_nth == -1) {
1894 /* Unlimited threads for NPTL */
1895 __kmp_sys_max_nth = INT_MAX;
1896 } else if (__kmp_sys_max_nth <= 1) {
1897 /* Can't tell, just use PTHREAD_THREADS_MAX */
1898 __kmp_sys_max_nth = KMP_MAX_NTH;
1899 }
1900
1901 /* Query the minimum stack size */
1902 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1903 if (__kmp_sys_min_stksize <= 1) {
1904 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1905 }
1906 }
1907
1908 /* Set up minimum number of threads to switch to TLS gtid */
1909 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1910
1911 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1912 __kmp_internal_end_dest);
1913 KMP_CHECK_SYSFAIL("pthread_key_create", status);
1914 status = pthread_mutexattr_init(&mutex_attr);
1915 KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1916 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1917 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1918 status = pthread_mutexattr_destroy(&mutex_attr);
1919 KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1920 status = pthread_condattr_init(&cond_attr);
1921 KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1922 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1923 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1924 status = pthread_condattr_destroy(&cond_attr);
1925 KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1926#if USE_ITT_BUILD
1927 __kmp_itt_initialize();
1928#endif /* USE_ITT_BUILD */
1929
1930 __kmp_init_runtime = TRUE;
1931}
1932
1933void __kmp_runtime_destroy(void) {
1934 int status;
1935
1936 if (!__kmp_init_runtime) {
1937 return; // Nothing to do.
1938 }
1939
1940#if USE_ITT_BUILD
1941 __kmp_itt_destroy();
1942#endif /* USE_ITT_BUILD */
1943
1944 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1945 KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1946
1947 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1948 if (status != 0 && status != EBUSY) {
1949 KMP_SYSFAIL("pthread_mutex_destroy", status);
1950 }
1951 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1952 if (status != 0 && status != EBUSY) {
1953 KMP_SYSFAIL("pthread_cond_destroy", status);
1954 }
1955#if KMP_AFFINITY_SUPPORTED
1956 __kmp_affinity_uninitialize();
1957#endif
1958
1959 __kmp_init_runtime = FALSE;
1960}
1961
1962/* Put the thread to sleep for a time period */
1963/* NOTE: not currently used anywhere */
1964void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1965
1966/* Calculate the elapsed wall clock time for the user */
1967void __kmp_elapsed(double *t) {
1968 int status;
1969#ifdef FIX_SGI_CLOCK
1970 struct timespec ts;
1971
1972 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1973 KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
1974 *t =
1975 (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
1976#else
1977 struct timeval tv;
1978
1979 status = gettimeofday(&tv, NULL);
1980 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1981 *t =
1982 (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
1983#endif
1984}
1985
1986/* Calculate the elapsed wall clock tick for the user */
1987void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1988
1989/* Return the current time stamp in nsec */
1990kmp_uint64 __kmp_now_nsec() {
1991 struct timeval t;
1992 gettimeofday(&t, NULL);
1993 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1994 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1995 return nsec;
1996}
1997
1998#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1999/* Measure clock ticks per millisecond */
2000void __kmp_initialize_system_tick() {
2001 kmp_uint64 now, nsec2, diff;
2002 kmp_uint64 delay = 100000; // 50~100 usec on most machines.
2003 kmp_uint64 nsec = __kmp_now_nsec();
2004 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
2005 while ((now = __kmp_hardware_timestamp()) < goal)
2006 ;
2007 nsec2 = __kmp_now_nsec();
2008 diff = nsec2 - nsec;
2009 if (diff > 0) {
2010 kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
2011 if (tpms > 0)
2012 __kmp_ticks_per_msec = tpms;
2013 }
2014}
2015#endif
2016
2017/* Determine whether the given address is mapped into the current address
2018 space. */
2019
2020int __kmp_is_address_mapped(void *addr) {
2021
2022 int found = 0;
2023 int rc;
2024
2025#if KMP_OS_LINUX || KMP_OS_HURD
2026
2027 /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2028 address ranges mapped into the address space. */
2029
2030 char *name = __kmp_str_format("/proc/%d/maps", getpid());
2031 FILE *file = NULL;
2032
2033 file = fopen(name, "r");
2034 KMP_ASSERT(file != NULL);
2035
2036 for (;;) {
2037
2038 void *beginning = NULL;
2039 void *ending = NULL;
2040 char perms[5];
2041
2042 rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2043 if (rc == EOF) {
2044 break;
2045 }
2046 KMP_ASSERT(rc == 3 &&
2047 KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2048
2049 // Ending address is not included in the region, but beginning is.
2050 if ((addr >= beginning) && (addr < ending)) {
2051 perms[2] = 0; // 3th and 4th character does not matter.
2052 if (strcmp(perms, "rw") == 0) {
2053 // Memory we are looking for should be readable and writable.
2054 found = 1;
2055 }
2056 break;
2057 }
2058 }
2059
2060 // Free resources.
2061 fclose(file);
2062 KMP_INTERNAL_FREE(name);
2063#elif KMP_OS_FREEBSD
2064 char *buf;
2065 size_t lstsz;
2066 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2067 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2068 if (rc < 0)
2069 return 0;
2070 // We pass from number of vm entry's semantic
2071 // to size of whole entry map list.
2072 lstsz = lstsz * 4 / 3;
2073 buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2074 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2075 if (rc < 0) {
2076 kmpc_free(buf);
2077 return 0;
2078 }
2079
2080 char *lw = buf;
2081 char *up = buf + lstsz;
2082
2083 while (lw < up) {
2084 struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2085 size_t cursz = cur->kve_structsize;
2086 if (cursz == 0)
2087 break;
2088 void *start = reinterpret_cast<void *>(cur->kve_start);
2089 void *end = reinterpret_cast<void *>(cur->kve_end);
2090 // Readable/Writable addresses within current map entry
2091 if ((addr >= start) && (addr < end)) {
2092 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2093 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2094 found = 1;
2095 break;
2096 }
2097 }
2098 lw += cursz;
2099 }
2100 kmpc_free(buf);
2101
2102#elif KMP_OS_DARWIN
2103
2104 /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2105 using vm interface. */
2106
2107 int buffer;
2108 vm_size_t count;
2109 rc = vm_read_overwrite(
2110 mach_task_self(), // Task to read memory of.
2111 (vm_address_t)(addr), // Address to read from.
2112 1, // Number of bytes to be read.
2113 (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2114 &count // Address of var to save number of read bytes in.
2115 );
2116 if (rc == 0) {
2117 // Memory successfully read.
2118 found = 1;
2119 }
2120
2121#elif KMP_OS_NETBSD
2122
2123 int mib[5];
2124 mib[0] = CTL_VM;
2125 mib[1] = VM_PROC;
2126 mib[2] = VM_PROC_MAP;
2127 mib[3] = getpid();
2128 mib[4] = sizeof(struct kinfo_vmentry);
2129
2130 size_t size;
2131 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2132 KMP_ASSERT(!rc);
2133 KMP_ASSERT(size);
2134
2135 size = size * 4 / 3;
2136 struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2137 KMP_ASSERT(kiv);
2138
2139 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2140 KMP_ASSERT(!rc);
2141 KMP_ASSERT(size);
2142
2143 for (size_t i = 0; i < size; i++) {
2144 if (kiv[i].kve_start >= (uint64_t)addr &&
2145 kiv[i].kve_end <= (uint64_t)addr) {
2146 found = 1;
2147 break;
2148 }
2149 }
2150 KMP_INTERNAL_FREE(kiv);
2151#elif KMP_OS_OPENBSD
2152
2153 int mib[3];
2154 mib[0] = CTL_KERN;
2155 mib[1] = KERN_PROC_VMMAP;
2156 mib[2] = getpid();
2157
2158 size_t size;
2159 uint64_t end;
2160 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2161 KMP_ASSERT(!rc);
2162 KMP_ASSERT(size);
2163 end = size;
2164
2165 struct kinfo_vmentry kiv = {.kve_start = 0};
2166
2167 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2168 KMP_ASSERT(size);
2169 if (kiv.kve_end == end)
2170 break;
2171
2172 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2173 found = 1;
2174 break;
2175 }
2176 kiv.kve_start += 1;
2177 }
2178#elif KMP_OS_DRAGONFLY
2179
2180 // FIXME(DragonFly): Implement this
2181 found = 1;
2182
2183#else
2184
2185#error "Unknown or unsupported OS"
2186
2187#endif
2188
2189 return found;
2190
2191} // __kmp_is_address_mapped
2192
2193#ifdef USE_LOAD_BALANCE
2194
2195#if KMP_OS_DARWIN || KMP_OS_NETBSD
2196
2197// The function returns the rounded value of the system load average
2198// during given time interval which depends on the value of
2199// __kmp_load_balance_interval variable (default is 60 sec, other values
2200// may be 300 sec or 900 sec).
2201// It returns -1 in case of error.
2202int __kmp_get_load_balance(int max) {
2203 double averages[3];
2204 int ret_avg = 0;
2205
2206 int res = getloadavg(averages, 3);
2207
2208 // Check __kmp_load_balance_interval to determine which of averages to use.
2209 // getloadavg() may return the number of samples less than requested that is
2210 // less than 3.
2211 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2212 ret_avg = (int)averages[0]; // 1 min
2213 } else if ((__kmp_load_balance_interval >= 180 &&
2214 __kmp_load_balance_interval < 600) &&
2215 (res >= 2)) {
2216 ret_avg = (int)averages[1]; // 5 min
2217 } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2218 ret_avg = (int)averages[2]; // 15 min
2219 } else { // Error occurred
2220 return -1;
2221 }
2222
2223 return ret_avg;
2224}
2225
2226#else // Linux* OS
2227
2228// The function returns number of running (not sleeping) threads, or -1 in case
2229// of error. Error could be reported if Linux* OS kernel too old (without
2230// "/proc" support). Counting running threads stops if max running threads
2231// encountered.
2232int __kmp_get_load_balance(int max) {
2233 static int permanent_error = 0;
2234 static int glb_running_threads = 0; // Saved count of the running threads for
2235 // the thread balance algorithm
2236 static double glb_call_time = 0; /* Thread balance algorithm call time */
2237
2238 int running_threads = 0; // Number of running threads in the system.
2239
2240 DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2241 struct dirent *proc_entry = NULL;
2242
2243 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2244 DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2245 struct dirent *task_entry = NULL;
2246 int task_path_fixed_len;
2247
2248 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2249 int stat_file = -1;
2250 int stat_path_fixed_len;
2251
2252 int total_processes = 0; // Total number of processes in system.
2253 int total_threads = 0; // Total number of threads in system.
2254
2255 double call_time = 0.0;
2256
2257 __kmp_str_buf_init(&task_path);
2258 __kmp_str_buf_init(&stat_path);
2259
2260 __kmp_elapsed(&call_time);
2261
2262 if (glb_call_time &&
2263 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2264 running_threads = glb_running_threads;
2265 goto finish;
2266 }
2267
2268 glb_call_time = call_time;
2269
2270 // Do not spend time on scanning "/proc/" if we have a permanent error.
2271 if (permanent_error) {
2272 running_threads = -1;
2273 goto finish;
2274 }
2275
2276 if (max <= 0) {
2277 max = INT_MAX;
2278 }
2279
2280 // Open "/proc/" directory.
2281 proc_dir = opendir("/proc");
2282 if (proc_dir == NULL) {
2283 // Cannot open "/prroc/". Probably the kernel does not support it. Return an
2284 // error now and in subsequent calls.
2285 running_threads = -1;
2286 permanent_error = 1;
2287 goto finish;
2288 }
2289
2290 // Initialize fixed part of task_path. This part will not change.
2291 __kmp_str_buf_cat(&task_path, "/proc/", 6);
2292 task_path_fixed_len = task_path.used; // Remember number of used characters.
2293
2294 proc_entry = readdir(proc_dir);
2295 while (proc_entry != NULL) {
2296 // Proc entry is a directory and name starts with a digit. Assume it is a
2297 // process' directory.
2298 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2299
2300 ++total_processes;
2301 // Make sure init process is the very first in "/proc", so we can replace
2302 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2303 // 1. We are going to check that total_processes == 1 => d_name == "1" is
2304 // true (where "=>" is implication). Since C++ does not have => operator,
2305 // let us replace it with its equivalent: a => b == ! a || b.
2306 KMP_DEBUG_ASSERT(total_processes != 1 ||
2307 strcmp(proc_entry->d_name, "1") == 0);
2308
2309 // Construct task_path.
2310 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2311 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2312 KMP_STRLEN(proc_entry->d_name));
2313 __kmp_str_buf_cat(&task_path, "/task", 5);
2314
2315 task_dir = opendir(task_path.str);
2316 if (task_dir == NULL) {
2317 // Process can finish between reading "/proc/" directory entry and
2318 // opening process' "task/" directory. So, in general case we should not
2319 // complain, but have to skip this process and read the next one. But on
2320 // systems with no "task/" support we will spend lot of time to scan
2321 // "/proc/" tree again and again without any benefit. "init" process
2322 // (its pid is 1) should exist always, so, if we cannot open
2323 // "/proc/1/task/" directory, it means "task/" is not supported by
2324 // kernel. Report an error now and in the future.
2325 if (strcmp(proc_entry->d_name, "1") == 0) {
2326 running_threads = -1;
2327 permanent_error = 1;
2328 goto finish;
2329 }
2330 } else {
2331 // Construct fixed part of stat file path.
2332 __kmp_str_buf_clear(&stat_path);
2333 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2334 __kmp_str_buf_cat(&stat_path, "/", 1);
2335 stat_path_fixed_len = stat_path.used;
2336
2337 task_entry = readdir(task_dir);
2338 while (task_entry != NULL) {
2339 // It is a directory and name starts with a digit.
2340 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2341 ++total_threads;
2342
2343 // Construct complete stat file path. Easiest way would be:
2344 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2345 // task_entry->d_name );
2346 // but seriae of __kmp_str_buf_cat works a bit faster.
2347 stat_path.used =
2348 stat_path_fixed_len; // Reset stat path to its fixed part.
2349 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2350 KMP_STRLEN(task_entry->d_name));
2351 __kmp_str_buf_cat(&stat_path, "/stat", 5);
2352
2353 // Note: Low-level API (open/read/close) is used. High-level API
2354 // (fopen/fclose) works ~ 30 % slower.
2355 stat_file = open(stat_path.str, O_RDONLY);
2356 if (stat_file == -1) {
2357 // We cannot report an error because task (thread) can terminate
2358 // just before reading this file.
2359 } else {
2360 /* Content of "stat" file looks like:
2361 24285 (program) S ...
2362
2363 It is a single line (if program name does not include funny
2364 symbols). First number is a thread id, then name of executable
2365 file name in paretheses, then state of the thread. We need just
2366 thread state.
2367
2368 Good news: Length of program name is 15 characters max. Longer
2369 names are truncated.
2370
2371 Thus, we need rather short buffer: 15 chars for program name +
2372 2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2373
2374 Bad news: Program name may contain special symbols like space,
2375 closing parenthesis, or even new line. This makes parsing
2376 "stat" file not 100 % reliable. In case of fanny program names
2377 parsing may fail (report incorrect thread state).
2378
2379 Parsing "status" file looks more promissing (due to different
2380 file structure and escaping special symbols) but reading and
2381 parsing of "status" file works slower.
2382 -- ln
2383 */
2384 char buffer[65];
2385 ssize_t len;
2386 len = read(stat_file, buffer, sizeof(buffer) - 1);
2387 if (len >= 0) {
2388 buffer[len] = 0;
2389 // Using scanf:
2390 // sscanf( buffer, "%*d (%*s) %c ", & state );
2391 // looks very nice, but searching for a closing parenthesis
2392 // works a bit faster.
2393 char *close_parent = strstr(buffer, ") ");
2394 if (close_parent != NULL) {
2395 char state = *(close_parent + 2);
2396 if (state == 'R') {
2397 ++running_threads;
2398 if (running_threads >= max) {
2399 goto finish;
2400 }
2401 }
2402 }
2403 }
2404 close(stat_file);
2405 stat_file = -1;
2406 }
2407 }
2408 task_entry = readdir(task_dir);
2409 }
2410 closedir(task_dir);
2411 task_dir = NULL;
2412 }
2413 }
2414 proc_entry = readdir(proc_dir);
2415 }
2416
2417 // There _might_ be a timing hole where the thread executing this
2418 // code get skipped in the load balance, and running_threads is 0.
2419 // Assert in the debug builds only!!!
2420 KMP_DEBUG_ASSERT(running_threads > 0);
2421 if (running_threads <= 0) {
2422 running_threads = 1;
2423 }
2424
2425finish: // Clean up and exit.
2426 if (proc_dir != NULL) {
2427 closedir(proc_dir);
2428 }
2429 __kmp_str_buf_free(&task_path);
2430 if (task_dir != NULL) {
2431 closedir(task_dir);
2432 }
2433 __kmp_str_buf_free(&stat_path);
2434 if (stat_file != -1) {
2435 close(stat_file);
2436 }
2437
2438 glb_running_threads = running_threads;
2439
2440 return running_threads;
2441
2442} // __kmp_get_load_balance
2443
2444#endif // KMP_OS_DARWIN
2445
2446#endif // USE_LOAD_BALANCE
2447
2448#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2449 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2450 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64)
2451
2452// we really only need the case with 1 argument, because CLANG always build
2453// a struct of pointers to shared variables referenced in the outlined function
2454int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2455 void *p_argv[]
2456#if OMPT_SUPPORT
2457 ,
2458 void **exit_frame_ptr
2459#endif
2460) {
2461#if OMPT_SUPPORT
2462 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2463#endif
2464
2465 switch (argc) {
2466 default:
2467 fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2468 fflush(stderr);
2469 exit(-1);
2470 case 0:
2471 (*pkfn)(&gtid, &tid);
2472 break;
2473 case 1:
2474 (*pkfn)(&gtid, &tid, p_argv[0]);
2475 break;
2476 case 2:
2477 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2478 break;
2479 case 3:
2480 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2481 break;
2482 case 4:
2483 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2484 break;
2485 case 5:
2486 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2487 break;
2488 case 6:
2489 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2490 p_argv[5]);
2491 break;
2492 case 7:
2493 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2494 p_argv[5], p_argv[6]);
2495 break;
2496 case 8:
2497 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2498 p_argv[5], p_argv[6], p_argv[7]);
2499 break;
2500 case 9:
2501 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2502 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2503 break;
2504 case 10:
2505 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2506 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2507 break;
2508 case 11:
2509 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2510 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2511 break;
2512 case 12:
2513 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2514 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2515 p_argv[11]);
2516 break;
2517 case 13:
2518 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2519 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2520 p_argv[11], p_argv[12]);
2521 break;
2522 case 14:
2523 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2524 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2525 p_argv[11], p_argv[12], p_argv[13]);
2526 break;
2527 case 15:
2528 (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2529 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2530 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2531 break;
2532 }
2533
2534 return 1;
2535}
2536
2537#endif
2538
2539#if KMP_OS_LINUX
2540// Functions for hidden helper task
2541namespace {
2542// Condition variable for initializing hidden helper team
2543pthread_cond_t hidden_helper_threads_initz_cond_var;
2544pthread_mutex_t hidden_helper_threads_initz_lock;
2545volatile int hidden_helper_initz_signaled = FALSE;
2546
2547// Condition variable for deinitializing hidden helper team
2548pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2549pthread_mutex_t hidden_helper_threads_deinitz_lock;
2550volatile int hidden_helper_deinitz_signaled = FALSE;
2551
2552// Condition variable for the wrapper function of main thread
2553pthread_cond_t hidden_helper_main_thread_cond_var;
2554pthread_mutex_t hidden_helper_main_thread_lock;
2555volatile int hidden_helper_main_thread_signaled = FALSE;
2556
2557// Semaphore for worker threads. We don't use condition variable here in case
2558// that when multiple signals are sent at the same time, only one thread might
2559// be waken.
2560sem_t hidden_helper_task_sem;
2561} // namespace
2562
2563void __kmp_hidden_helper_worker_thread_wait() {
2564 int status = sem_wait(&hidden_helper_task_sem);
2565 KMP_CHECK_SYSFAIL("sem_wait", status);
2566}
2567
2568void __kmp_do_initialize_hidden_helper_threads() {
2569 // Initialize condition variable
2570 int status =
2571 pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2572 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2573
2574 status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2575 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2576
2577 status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2578 KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2579
2580 status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2581 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2582
2583 status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2584 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2585
2586 status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2587 KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2588
2589 // Initialize the semaphore
2590 status = sem_init(&hidden_helper_task_sem, 0, 0);
2591 KMP_CHECK_SYSFAIL("sem_init", status);
2592
2593 // Create a new thread to finish initialization
2594 pthread_t handle;
2595 status = pthread_create(
2596 &handle, nullptr,
2597 [](void *) -> void * {
2598 __kmp_hidden_helper_threads_initz_routine();
2599 return nullptr;
2600 },
2601 nullptr);
2602 KMP_CHECK_SYSFAIL("pthread_create", status);
2603}
2604
2605void __kmp_hidden_helper_threads_initz_wait() {
2606 // Initial thread waits here for the completion of the initialization. The
2607 // condition variable will be notified by main thread of hidden helper teams.
2608 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2609 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2610
2611 if (!TCR_4(hidden_helper_initz_signaled)) {
2612 status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2613 &hidden_helper_threads_initz_lock);
2614 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2615 }
2616
2617 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2618 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2619}
2620
2621void __kmp_hidden_helper_initz_release() {
2622 // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2623 int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2624 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2625
2626 status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2627 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2628
2629 TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2630
2631 status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2632 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2633}
2634
2635void __kmp_hidden_helper_main_thread_wait() {
2636 // The main thread of hidden helper team will be blocked here. The
2637 // condition variable can only be signal in the destructor of RTL.
2638 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2639 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2640
2641 if (!TCR_4(hidden_helper_main_thread_signaled)) {
2642 status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2643 &hidden_helper_main_thread_lock);
2644 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2645 }
2646
2647 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2648 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2649}
2650
2651void __kmp_hidden_helper_main_thread_release() {
2652 // The initial thread of OpenMP RTL should call this function to wake up the
2653 // main thread of hidden helper team.
2654 int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2655 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2656
2657 status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2658 KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2659
2660 // The hidden helper team is done here
2661 TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2662
2663 status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2664 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2665}
2666
2667void __kmp_hidden_helper_worker_thread_signal() {
2668 int status = sem_post(&hidden_helper_task_sem);
2669 KMP_CHECK_SYSFAIL("sem_post", status);
2670}
2671
2672void __kmp_hidden_helper_threads_deinitz_wait() {
2673 // Initial thread waits here for the completion of the deinitialization. The
2674 // condition variable will be notified by main thread of hidden helper teams.
2675 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2676 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2677
2678 if (!TCR_4(hidden_helper_deinitz_signaled)) {
2679 status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2680 &hidden_helper_threads_deinitz_lock);
2681 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2682 }
2683
2684 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2685 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2686}
2687
2688void __kmp_hidden_helper_threads_deinitz_release() {
2689 int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2690 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2691
2692 status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2693 KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2694
2695 TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2696
2697 status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2698 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2699}
2700#else // KMP_OS_LINUX
2701void __kmp_hidden_helper_worker_thread_wait() {
2702 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2703}
2704
2705void __kmp_do_initialize_hidden_helper_threads() {
2706 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2707}
2708
2709void __kmp_hidden_helper_threads_initz_wait() {
2710 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2711}
2712
2713void __kmp_hidden_helper_initz_release() {
2714 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2715}
2716
2717void __kmp_hidden_helper_main_thread_wait() {
2718 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2719}
2720
2721void __kmp_hidden_helper_main_thread_release() {
2722 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2723}
2724
2725void __kmp_hidden_helper_worker_thread_signal() {
2726 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2727}
2728
2729void __kmp_hidden_helper_threads_deinitz_wait() {
2730 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2731}
2732
2733void __kmp_hidden_helper_threads_deinitz_release() {
2734 KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2735}
2736#endif // KMP_OS_LINUX
2737
2738// end of file //
#define KMP_INIT_PARTITIONED_TIMERS(name)
Initializes the partitioned timers to begin with name.
Definition: kmp_stats.h:937