LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1/*
2 * kmp_taskdeps.cpp
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13//#define KMP_SUPPORT_GRAPH_OUTPUT 1
14
15#include "kmp.h"
16#include "kmp_io.h"
17#include "kmp_wait_release.h"
18#include "kmp_taskdeps.h"
19#if OMPT_SUPPORT
20#include "ompt-specific.h"
21#endif
22
23// TODO: Improve memory allocation? keep a list of pre-allocated structures?
24// allocate in blocks? re-use list finished list entries?
25// TODO: don't use atomic ref counters for stack-allocated nodes.
26// TODO: find an alternate to atomic refs for heap-allocated nodes?
27// TODO: Finish graph output support
28// TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
29// runtime locks
30// TODO: Any ITT support needed?
31
32#ifdef KMP_SUPPORT_GRAPH_OUTPUT
33static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
34#endif
35
36static void __kmp_init_node(kmp_depnode_t *node) {
37 node->dn.successors = NULL;
38 node->dn.task = NULL; // will point to the right task
39 // once dependences have been processed
40 for (int i = 0; i < MAX_MTX_DEPS; ++i)
41 node->dn.mtx_locks[i] = NULL;
42 node->dn.mtx_num_locks = 0;
43 __kmp_init_lock(&node->dn.lock);
44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference
45#ifdef KMP_SUPPORT_GRAPH_OUTPUT
46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
47#endif
48}
49
50static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51 KMP_ATOMIC_INC(&node->dn.nrefs);
52 return node;
53}
54
55enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
56
57size_t sizes[] = {997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029};
58const size_t MAX_GEN = 8;
59
60static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
61 // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
62 // m_num_sets );
63 return ((addr >> 6) ^ (addr >> 2)) % hsize;
64}
65
66static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
67 kmp_dephash_t *current_dephash) {
68 kmp_dephash_t *h;
69
70 size_t gen = current_dephash->generation + 1;
71 if (gen >= MAX_GEN)
72 return current_dephash;
73 size_t new_size = sizes[gen];
74
75 size_t size_to_allocate =
76 new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
77
78#if USE_FAST_MEMORY
79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
80#else
81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
82#endif
83
84 h->size = new_size;
85 h->nelements = current_dephash->nelements;
86 h->buckets = (kmp_dephash_entry **)(h + 1);
87 h->generation = gen;
88 h->nconflicts = 0;
89 h->last_all = current_dephash->last_all;
90
91 // make sure buckets are properly initialized
92 for (size_t i = 0; i < new_size; i++) {
93 h->buckets[i] = NULL;
94 }
95
96 // insert existing elements in the new table
97 for (size_t i = 0; i < current_dephash->size; i++) {
98 kmp_dephash_entry_t *next, *entry;
99 for (entry = current_dephash->buckets[i]; entry; entry = next) {
100 next = entry->next_in_bucket;
101 // Compute the new hash using the new size, and insert the entry in
102 // the new bucket.
103 size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
104 entry->next_in_bucket = h->buckets[new_bucket];
105 if (entry->next_in_bucket) {
106 h->nconflicts++;
107 }
108 h->buckets[new_bucket] = entry;
109 }
110 }
111
112 // Free old hash table
113#if USE_FAST_MEMORY
114 __kmp_fast_free(thread, current_dephash);
115#else
116 __kmp_thread_free(thread, current_dephash);
117#endif
118
119 return h;
120}
121
122static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
123 kmp_taskdata_t *current_task) {
124 kmp_dephash_t *h;
125
126 size_t h_size;
127
128 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
129 h_size = KMP_DEPHASH_MASTER_SIZE;
130 else
131 h_size = KMP_DEPHASH_OTHER_SIZE;
132
133 size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
134
135#if USE_FAST_MEMORY
136 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
137#else
138 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
139#endif
140 h->size = h_size;
141
142 h->generation = 0;
143 h->nelements = 0;
144 h->nconflicts = 0;
145 h->buckets = (kmp_dephash_entry **)(h + 1);
146 h->last_all = NULL;
147
148 for (size_t i = 0; i < h_size; i++)
149 h->buckets[i] = 0;
150
151 return h;
152}
153
154static kmp_dephash_entry *__kmp_dephash_find(kmp_info_t *thread,
155 kmp_dephash_t **hash,
156 kmp_intptr_t addr) {
157 kmp_dephash_t *h = *hash;
158 if (h->nelements != 0 && h->nconflicts / h->size >= 1) {
159 *hash = __kmp_dephash_extend(thread, h);
160 h = *hash;
161 }
162 size_t bucket = __kmp_dephash_hash(addr, h->size);
163
164 kmp_dephash_entry_t *entry;
165 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
166 if (entry->addr == addr)
167 break;
168
169 if (entry == NULL) {
170// create entry. This is only done by one thread so no locking required
171#if USE_FAST_MEMORY
172 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
173 thread, sizeof(kmp_dephash_entry_t));
174#else
175 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
176 thread, sizeof(kmp_dephash_entry_t));
177#endif
178 entry->addr = addr;
179 if (!h->last_all) // no predecessor task with omp_all_memory dependence
180 entry->last_out = NULL;
181 else // else link the omp_all_memory depnode to the new entry
182 entry->last_out = __kmp_node_ref(h->last_all);
183 entry->last_set = NULL;
184 entry->prev_set = NULL;
185 entry->last_flag = 0;
186 entry->mtx_lock = NULL;
187 entry->next_in_bucket = h->buckets[bucket];
188 h->buckets[bucket] = entry;
189 h->nelements++;
190 if (entry->next_in_bucket)
191 h->nconflicts++;
192 }
193 return entry;
194}
195
196static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
197 kmp_depnode_list_t *list,
198 kmp_depnode_t *node) {
199 kmp_depnode_list_t *new_head;
200
201#if USE_FAST_MEMORY
202 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
203 thread, sizeof(kmp_depnode_list_t));
204#else
205 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
206 thread, sizeof(kmp_depnode_list_t));
207#endif
208
209 new_head->node = __kmp_node_ref(node);
210 new_head->next = list;
211
212 return new_head;
213}
214
215static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
216 kmp_depnode_t *sink,
217 kmp_task_t *sink_task) {
218#ifdef KMP_SUPPORT_GRAPH_OUTPUT
219 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
220 // do not use sink->dn.task as that is only filled after the dependences
221 // are already processed!
222 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
223
224 __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
225 task_source->td_ident->psource, sink->dn.id,
226 task_sink->td_ident->psource);
227#endif
228#if OMPT_SUPPORT && OMPT_OPTIONAL
229 /* OMPT tracks dependences between task (a=source, b=sink) in which
230 task a blocks the execution of b through the ompt_new_dependence_callback
231 */
232 if (ompt_enabled.ompt_callback_task_dependence) {
233 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
234 ompt_data_t *sink_data;
235 if (sink_task)
236 sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
237 else
238 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
239
240 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
241 &(task_source->ompt_task_info.task_data), sink_data);
242 }
243#endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
244}
245
246static inline kmp_int32
247__kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
248 kmp_task_t *task, kmp_depnode_t *node,
249 kmp_depnode_list_t *plist) {
250 if (!plist)
251 return 0;
252 kmp_int32 npredecessors = 0;
253 // link node as successor of list elements
254 for (kmp_depnode_list_t *p = plist; p; p = p->next) {
255 kmp_depnode_t *dep = p->node;
256 if (dep->dn.task) {
257 KMP_ACQUIRE_DEPNODE(gtid, dep);
258 if (dep->dn.task) {
259 __kmp_track_dependence(gtid, dep, node, task);
260 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
261 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
262 "%p\n",
263 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
264 KMP_TASK_TO_TASKDATA(task)));
265 npredecessors++;
266 }
267 KMP_RELEASE_DEPNODE(gtid, dep);
268 }
269 }
270 return npredecessors;
271}
272
273static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
274 kmp_info_t *thread,
275 kmp_task_t *task,
276 kmp_depnode_t *source,
277 kmp_depnode_t *sink) {
278 if (!sink)
279 return 0;
280 kmp_int32 npredecessors = 0;
281 if (sink->dn.task) {
282 // synchronously add source to sink' list of successors
283 KMP_ACQUIRE_DEPNODE(gtid, sink);
284 if (sink->dn.task) {
285 __kmp_track_dependence(gtid, sink, source, task);
286 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
287 KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
288 "%p\n",
289 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
290 KMP_TASK_TO_TASKDATA(task)));
291 npredecessors++;
292 }
293 KMP_RELEASE_DEPNODE(gtid, sink);
294 }
295 return npredecessors;
296}
297
298static inline kmp_int32
299__kmp_process_dep_all(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *h,
300 bool dep_barrier, kmp_task_t *task) {
301 KA_TRACE(30, ("__kmp_process_dep_all: T#%d processing dep_all, "
302 "dep_barrier = %d\n",
303 gtid, dep_barrier));
304 kmp_info_t *thread = __kmp_threads[gtid];
305 kmp_int32 npredecessors = 0;
306
307 // process previous omp_all_memory node if any
308 npredecessors +=
309 __kmp_depnode_link_successor(gtid, thread, task, node, h->last_all);
310 __kmp_node_deref(thread, h->last_all);
311 if (!dep_barrier) {
312 h->last_all = __kmp_node_ref(node);
313 } else {
314 // if this is a sync point in the serial sequence, then the previous
315 // outputs are guaranteed to be completed after the execution of this
316 // task so the previous output nodes can be cleared.
317 h->last_all = NULL;
318 }
319
320 // process all regular dependences
321 for (size_t i = 0; i < h->size; i++) {
322 kmp_dephash_entry_t *info = h->buckets[i];
323 if (!info) // skip empty slots in dephash
324 continue;
325 for (; info; info = info->next_in_bucket) {
326 // for each entry the omp_all_memory works as OUT dependence
327 kmp_depnode_t *last_out = info->last_out;
328 kmp_depnode_list_t *last_set = info->last_set;
329 kmp_depnode_list_t *prev_set = info->prev_set;
330 if (last_set) {
331 npredecessors +=
332 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
333 __kmp_depnode_list_free(thread, last_set);
334 __kmp_depnode_list_free(thread, prev_set);
335 info->last_set = NULL;
336 info->prev_set = NULL;
337 info->last_flag = 0; // no sets in this dephash entry
338 } else {
339 npredecessors +=
340 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
341 }
342 __kmp_node_deref(thread, last_out);
343 if (!dep_barrier) {
344 info->last_out = __kmp_node_ref(node);
345 } else {
346 info->last_out = NULL;
347 }
348 }
349 }
350 KA_TRACE(30, ("__kmp_process_dep_all: T#%d found %d predecessors\n", gtid,
351 npredecessors));
352 return npredecessors;
353}
354
355template <bool filter>
356static inline kmp_int32
357__kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
358 bool dep_barrier, kmp_int32 ndeps,
359 kmp_depend_info_t *dep_list, kmp_task_t *task) {
360 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependences : "
361 "dep_barrier = %d\n",
362 filter, gtid, ndeps, dep_barrier));
363
364 kmp_info_t *thread = __kmp_threads[gtid];
365 kmp_int32 npredecessors = 0;
366 for (kmp_int32 i = 0; i < ndeps; i++) {
367 const kmp_depend_info_t *dep = &dep_list[i];
368
369 if (filter && dep->base_addr == 0)
370 continue; // skip filtered entries
371
372 kmp_dephash_entry_t *info =
373 __kmp_dephash_find(thread, hash, dep->base_addr);
374 kmp_depnode_t *last_out = info->last_out;
375 kmp_depnode_list_t *last_set = info->last_set;
376 kmp_depnode_list_t *prev_set = info->prev_set;
377
378 if (dep->flags.out) { // out or inout --> clean lists if any
379 if (last_set) {
380 npredecessors +=
381 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
382 __kmp_depnode_list_free(thread, last_set);
383 __kmp_depnode_list_free(thread, prev_set);
384 info->last_set = NULL;
385 info->prev_set = NULL;
386 info->last_flag = 0; // no sets in this dephash entry
387 } else {
388 npredecessors +=
389 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
390 }
391 __kmp_node_deref(thread, last_out);
392 if (!dep_barrier) {
393 info->last_out = __kmp_node_ref(node);
394 } else {
395 // if this is a sync point in the serial sequence, then the previous
396 // outputs are guaranteed to be completed after the execution of this
397 // task so the previous output nodes can be cleared.
398 info->last_out = NULL;
399 }
400 } else { // either IN or MTX or SET
401 if (info->last_flag == 0 || info->last_flag == dep->flag) {
402 // last_set either didn't exist or of same dep kind
403 // link node as successor of the last_out if any
404 npredecessors +=
405 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
406 // link node as successor of all nodes in the prev_set if any
407 npredecessors +=
408 __kmp_depnode_link_successor(gtid, thread, task, node, prev_set);
409 if (dep_barrier) {
410 // clean last_out and prev_set if any; don't touch last_set
411 __kmp_node_deref(thread, last_out);
412 info->last_out = NULL;
413 __kmp_depnode_list_free(thread, prev_set);
414 info->prev_set = NULL;
415 }
416 } else { // last_set is of different dep kind, make it prev_set
417 // link node as successor of all nodes in the last_set
418 npredecessors +=
419 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
420 // clean last_out if any
421 __kmp_node_deref(thread, last_out);
422 info->last_out = NULL;
423 // clean prev_set if any
424 __kmp_depnode_list_free(thread, prev_set);
425 if (!dep_barrier) {
426 // move last_set to prev_set, new last_set will be allocated
427 info->prev_set = last_set;
428 } else {
429 info->prev_set = NULL;
430 info->last_flag = 0;
431 }
432 info->last_set = NULL;
433 }
434 // for dep_barrier last_flag value should remain:
435 // 0 if last_set is empty, unchanged otherwise
436 if (!dep_barrier) {
437 info->last_flag = dep->flag; // store dep kind of the last_set
438 info->last_set = __kmp_add_node(thread, info->last_set, node);
439 }
440 // check if we are processing MTX dependency
441 if (dep->flag == KMP_DEP_MTX) {
442 if (info->mtx_lock == NULL) {
443 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
444 __kmp_init_lock(info->mtx_lock);
445 }
446 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
447 kmp_int32 m;
448 // Save lock in node's array
449 for (m = 0; m < MAX_MTX_DEPS; ++m) {
450 // sort pointers in decreasing order to avoid potential livelock
451 if (node->dn.mtx_locks[m] < info->mtx_lock) {
452 KMP_DEBUG_ASSERT(!node->dn.mtx_locks[node->dn.mtx_num_locks]);
453 for (int n = node->dn.mtx_num_locks; n > m; --n) {
454 // shift right all lesser non-NULL pointers
455 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
456 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
457 }
458 node->dn.mtx_locks[m] = info->mtx_lock;
459 break;
460 }
461 }
462 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
463 node->dn.mtx_num_locks++;
464 }
465 }
466 }
467 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
468 gtid, npredecessors));
469 return npredecessors;
470}
471
472#define NO_DEP_BARRIER (false)
473#define DEP_BARRIER (true)
474
475// returns true if the task has any outstanding dependence
476static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
477 kmp_task_t *task, kmp_dephash_t **hash,
478 bool dep_barrier, kmp_int32 ndeps,
479 kmp_depend_info_t *dep_list,
480 kmp_int32 ndeps_noalias,
481 kmp_depend_info_t *noalias_dep_list) {
482 int i, n_mtxs = 0, dep_all = 0;
483#if KMP_DEBUG
484 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
485#endif
486 KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependences for task %p : %d "
487 "possibly aliased dependences, %d non-aliased dependences : "
488 "dep_barrier=%d .\n",
489 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
490
491 // Filter deps in dep_list
492 // TODO: Different algorithm for large dep_list ( > 10 ? )
493 for (i = 0; i < ndeps; i++) {
494 if (dep_list[i].base_addr != 0 &&
495 dep_list[i].base_addr != (kmp_intptr_t)KMP_SIZE_T_MAX) {
496 KMP_DEBUG_ASSERT(
497 dep_list[i].flag == KMP_DEP_IN || dep_list[i].flag == KMP_DEP_OUT ||
498 dep_list[i].flag == KMP_DEP_INOUT ||
499 dep_list[i].flag == KMP_DEP_MTX || dep_list[i].flag == KMP_DEP_SET);
500 for (int j = i + 1; j < ndeps; j++) {
501 if (dep_list[i].base_addr == dep_list[j].base_addr) {
502 if (dep_list[i].flag != dep_list[j].flag) {
503 // two different dependences on same address work identical to OUT
504 dep_list[i].flag = KMP_DEP_OUT;
505 }
506 dep_list[j].base_addr = 0; // Mark j element as void
507 }
508 }
509 if (dep_list[i].flag == KMP_DEP_MTX) {
510 // limit number of mtx deps to MAX_MTX_DEPS per node
511 if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
512 ++n_mtxs;
513 } else {
514 dep_list[i].flag = KMP_DEP_OUT; // downgrade mutexinoutset to inout
515 }
516 }
517 } else if (dep_list[i].flag == KMP_DEP_ALL ||
518 dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX) {
519 // omp_all_memory dependence can be marked by compiler by either
520 // (addr=0 && flag=0x80) (flag KMP_DEP_ALL), or (addr=-1).
521 // omp_all_memory overrides all other dependences if any
522 dep_all = 1;
523 break;
524 }
525 }
526
527 // doesn't need to be atomic as no other thread is going to be accessing this
528 // node just yet.
529 // npredecessors is set -1 to ensure that none of the releasing tasks queues
530 // this task before we have finished processing all the dependences
531 node->dn.npredecessors = -1;
532
533 // used to pack all npredecessors additions into a single atomic operation at
534 // the end
535 int npredecessors;
536
537 if (!dep_all) { // regular dependences
538 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier,
539 ndeps, dep_list, task);
540 npredecessors += __kmp_process_deps<false>(
541 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
542 } else { // omp_all_memory dependence
543 npredecessors = __kmp_process_dep_all(gtid, node, *hash, dep_barrier, task);
544 }
545
546 node->dn.task = task;
547 KMP_MB();
548
549 // Account for our initial fake value
550 npredecessors++;
551
552 // Update predecessors and obtain current value to check if there are still
553 // any outstanding dependences (some tasks may have finished while we
554 // processed the dependences)
555 npredecessors =
556 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
557
558 KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
559 gtid, npredecessors, taskdata));
560
561 // beyond this point the task could be queued (and executed) by a releasing
562 // task...
563 return npredecessors > 0 ? true : false;
564}
565
582kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
583 kmp_task_t *new_task, kmp_int32 ndeps,
584 kmp_depend_info_t *dep_list,
585 kmp_int32 ndeps_noalias,
586 kmp_depend_info_t *noalias_dep_list) {
587
588 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
589 KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
590 loc_ref, new_taskdata));
591 __kmp_assert_valid_gtid(gtid);
592 kmp_info_t *thread = __kmp_threads[gtid];
593 kmp_taskdata_t *current_task = thread->th.th_current_task;
594
595#if OMPT_SUPPORT
596 if (ompt_enabled.enabled) {
597 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
598 current_task->ompt_task_info.frame.enter_frame.ptr =
599 OMPT_GET_FRAME_ADDRESS(0);
600 if (ompt_enabled.ompt_callback_task_create) {
601 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
602 &(current_task->ompt_task_info.task_data),
603 &(current_task->ompt_task_info.frame),
604 &(new_taskdata->ompt_task_info.task_data),
605 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
606 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
607 }
608
609 new_taskdata->ompt_task_info.frame.enter_frame.ptr =
610 OMPT_GET_FRAME_ADDRESS(0);
611 }
612
613#if OMPT_OPTIONAL
614 /* OMPT grab all dependences if requested by the tool */
615 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
616 kmp_int32 i;
617
618 int ompt_ndeps = ndeps + ndeps_noalias;
619 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
620 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
621
622 KMP_ASSERT(ompt_deps != NULL);
623
624 for (i = 0; i < ndeps; i++) {
625 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
626 if (dep_list[i].flags.in && dep_list[i].flags.out)
627 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
628 else if (dep_list[i].flags.out)
629 ompt_deps[i].dependence_type = ompt_dependence_type_out;
630 else if (dep_list[i].flags.in)
631 ompt_deps[i].dependence_type = ompt_dependence_type_in;
632 else if (dep_list[i].flags.mtx)
633 ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
634 else if (dep_list[i].flags.set)
635 ompt_deps[i].dependence_type = ompt_dependence_type_inoutset;
636 }
637 for (i = 0; i < ndeps_noalias; i++) {
638 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
639 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
640 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
641 else if (noalias_dep_list[i].flags.out)
642 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
643 else if (noalias_dep_list[i].flags.in)
644 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
645 else if (noalias_dep_list[i].flags.mtx)
646 ompt_deps[ndeps + i].dependence_type =
647 ompt_dependence_type_mutexinoutset;
648 else if (noalias_dep_list[i].flags.set)
649 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
650 }
651 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
652 &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
653 /* We can now free the allocated memory for the dependences */
654 /* For OMPD we might want to delay the free until end of this function */
655 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
656 }
657#endif /* OMPT_OPTIONAL */
658#endif /* OMPT_SUPPORT */
659
660 bool serial = current_task->td_flags.team_serial ||
661 current_task->td_flags.tasking_ser ||
662 current_task->td_flags.final;
663 kmp_task_team_t *task_team = thread->th.th_task_team;
664 serial = serial &&
665 !(task_team && (task_team->tt.tt_found_proxy_tasks ||
666 task_team->tt.tt_hidden_helper_task_encountered));
667
668 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
669 /* if no dependences have been tracked yet, create the dependence hash */
670 if (current_task->td_dephash == NULL)
671 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
672
673#if USE_FAST_MEMORY
674 kmp_depnode_t *node =
675 (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
676#else
677 kmp_depnode_t *node =
678 (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
679#endif
680
681 __kmp_init_node(node);
682 new_taskdata->td_depnode = node;
683
684 if (__kmp_check_deps(gtid, node, new_task, &current_task->td_dephash,
685 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
686 noalias_dep_list)) {
687 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
688 "dependences: "
689 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
690 gtid, loc_ref, new_taskdata));
691#if OMPT_SUPPORT
692 if (ompt_enabled.enabled) {
693 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
694 }
695#endif
696 return TASK_CURRENT_NOT_QUEUED;
697 }
698 } else {
699 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependences "
700 "for task (serialized) loc=%p task=%p\n",
701 gtid, loc_ref, new_taskdata));
702 }
703
704 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
705 "dependences : "
706 "loc=%p task=%p, transferring to __kmp_omp_task\n",
707 gtid, loc_ref, new_taskdata));
708
709 kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
710#if OMPT_SUPPORT
711 if (ompt_enabled.enabled) {
712 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
713 }
714#endif
715 return ret;
716}
717
718#if OMPT_SUPPORT
719void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
720 ompt_data_t *taskwait_task_data) {
721 if (ompt_enabled.ompt_callback_task_schedule) {
722 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
723 taskwait_task_data, ompt_taskwait_complete, NULL);
724 }
725 current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
726 *taskwait_task_data = ompt_data_none;
727}
728#endif /* OMPT_SUPPORT */
729
741void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
742 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
743 kmp_depend_info_t *noalias_dep_list) {
744 KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
745
746 if (ndeps == 0 && ndeps_noalias == 0) {
747 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependences to "
748 "wait upon : loc=%p\n",
749 gtid, loc_ref));
750 return;
751 }
752 __kmp_assert_valid_gtid(gtid);
753 kmp_info_t *thread = __kmp_threads[gtid];
754 kmp_taskdata_t *current_task = thread->th.th_current_task;
755
756#if OMPT_SUPPORT
757 // this function represents a taskwait construct with depend clause
758 // We signal 4 events:
759 // - creation of the taskwait task
760 // - dependences of the taskwait task
761 // - schedule and finish of the taskwait task
762 ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
763 KMP_ASSERT(taskwait_task_data->ptr == NULL);
764 if (ompt_enabled.enabled) {
765 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
766 current_task->ompt_task_info.frame.enter_frame.ptr =
767 OMPT_GET_FRAME_ADDRESS(0);
768 if (ompt_enabled.ompt_callback_task_create) {
769 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
770 &(current_task->ompt_task_info.task_data),
771 &(current_task->ompt_task_info.frame), taskwait_task_data,
772 ompt_task_taskwait | ompt_task_undeferred | ompt_task_mergeable, 1,
773 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
774 }
775 }
776
777#if OMPT_OPTIONAL
778 /* OMPT grab all dependences if requested by the tool */
779 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
780 kmp_int32 i;
781
782 int ompt_ndeps = ndeps + ndeps_noalias;
783 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
784 thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
785
786 KMP_ASSERT(ompt_deps != NULL);
787
788 for (i = 0; i < ndeps; i++) {
789 ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
790 if (dep_list[i].flags.in && dep_list[i].flags.out)
791 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
792 else if (dep_list[i].flags.out)
793 ompt_deps[i].dependence_type = ompt_dependence_type_out;
794 else if (dep_list[i].flags.in)
795 ompt_deps[i].dependence_type = ompt_dependence_type_in;
796 else if (dep_list[i].flags.mtx)
797 ompt_deps[ndeps + i].dependence_type =
798 ompt_dependence_type_mutexinoutset;
799 else if (dep_list[i].flags.set)
800 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
801 }
802 for (i = 0; i < ndeps_noalias; i++) {
803 ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
804 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
805 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
806 else if (noalias_dep_list[i].flags.out)
807 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
808 else if (noalias_dep_list[i].flags.in)
809 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
810 else if (noalias_dep_list[i].flags.mtx)
811 ompt_deps[ndeps + i].dependence_type =
812 ompt_dependence_type_mutexinoutset;
813 else if (noalias_dep_list[i].flags.set)
814 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
815 }
816 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
817 taskwait_task_data, ompt_deps, ompt_ndeps);
818 /* We can now free the allocated memory for the dependences */
819 /* For OMPD we might want to delay the free until end of this function */
820 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
821 ompt_deps = NULL;
822 }
823#endif /* OMPT_OPTIONAL */
824#endif /* OMPT_SUPPORT */
825
826 // We can return immediately as:
827 // - dependences are not computed in serial teams (except with proxy tasks)
828 // - if the dephash is not yet created it means we have nothing to wait for
829 bool ignore = current_task->td_flags.team_serial ||
830 current_task->td_flags.tasking_ser ||
831 current_task->td_flags.final;
832 ignore =
833 ignore && thread->th.th_task_team != NULL &&
834 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE &&
835 thread->th.th_task_team->tt.tt_hidden_helper_task_encountered == FALSE;
836 ignore = ignore || current_task->td_dephash == NULL;
837
838 if (ignore) {
839 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
840 "dependences : loc=%p\n",
841 gtid, loc_ref));
842#if OMPT_SUPPORT
843 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
844#endif /* OMPT_SUPPORT */
845 return;
846 }
847
848 kmp_depnode_t node = {0};
849 __kmp_init_node(&node);
850
851 if (!__kmp_check_deps(gtid, &node, NULL, &current_task->td_dephash,
852 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
853 noalias_dep_list)) {
854 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
855 "dependences : loc=%p\n",
856 gtid, loc_ref));
857#if OMPT_SUPPORT
858 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
859#endif /* OMPT_SUPPORT */
860 return;
861 }
862
863 int thread_finished = FALSE;
864 kmp_flag_32<false, false> flag(
865 (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
866 while (node.dn.npredecessors > 0) {
867 flag.execute_tasks(thread, gtid, FALSE,
868 &thread_finished USE_ITT_BUILD_ARG(NULL),
869 __kmp_task_stealing_constraint);
870 }
871
872#if OMPT_SUPPORT
873 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
874#endif /* OMPT_SUPPORT */
875 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
876 gtid, loc_ref));
877}
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:234