LLVM OpenMP* Runtime Library
kmp_gsupport.cpp
1 /*
2  * kmp_gsupport.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_atomic.h"
15 
16 #if OMPT_SUPPORT
17 #include "ompt-specific.h"
18 #endif
19 
20 #ifdef __cplusplus
21 extern "C" {
22 #endif // __cplusplus
23 
24 #define MKLOC(loc, routine) \
25  static ident_t loc = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"};
26 
27 #include "kmp_ftn_os.h"
28 
29 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) {
30  int gtid = __kmp_entry_gtid();
31  MKLOC(loc, "GOMP_barrier");
32  KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
33 #if OMPT_SUPPORT && OMPT_OPTIONAL
34  ompt_frame_t *ompt_frame;
35  if (ompt_enabled.enabled) {
36  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
37  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
38  OMPT_STORE_RETURN_ADDRESS(gtid);
39  }
40 #endif
41  __kmpc_barrier(&loc, gtid);
42 #if OMPT_SUPPORT && OMPT_OPTIONAL
43  if (ompt_enabled.enabled) {
44  ompt_frame->enter_frame = ompt_data_none;
45  }
46 #endif
47 }
48 
49 // Mutual exclusion
50 
51 // The symbol that icc/ifort generates for unnamed for unnamed critical sections
52 // - .gomp_critical_user_ - is defined using .comm in any objects reference it.
53 // We can't reference it directly here in C code, as the symbol contains a ".".
54 //
55 // The RTL contains an assembly language definition of .gomp_critical_user_
56 // with another symbol __kmp_unnamed_critical_addr initialized with it's
57 // address.
58 extern kmp_critical_name *__kmp_unnamed_critical_addr;
59 
60 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) {
61  int gtid = __kmp_entry_gtid();
62  MKLOC(loc, "GOMP_critical_start");
63  KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
64 #if OMPT_SUPPORT && OMPT_OPTIONAL
65  OMPT_STORE_RETURN_ADDRESS(gtid);
66 #endif
67  __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
68 }
69 
70 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) {
71  int gtid = __kmp_get_gtid();
72  MKLOC(loc, "GOMP_critical_end");
73  KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
74 #if OMPT_SUPPORT && OMPT_OPTIONAL
75  OMPT_STORE_RETURN_ADDRESS(gtid);
76 #endif
77  __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
78 }
79 
80 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) {
81  int gtid = __kmp_entry_gtid();
82  MKLOC(loc, "GOMP_critical_name_start");
83  KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
84  __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
85 }
86 
87 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) {
88  int gtid = __kmp_get_gtid();
89  MKLOC(loc, "GOMP_critical_name_end");
90  KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
91  __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
92 }
93 
94 // The Gnu codegen tries to use locked operations to perform atomic updates
95 // inline. If it can't, then it calls GOMP_atomic_start() before performing
96 // the update and GOMP_atomic_end() afterward, regardless of the data type.
97 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) {
98  int gtid = __kmp_entry_gtid();
99  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
100 
101 #if OMPT_SUPPORT
102  __ompt_thread_assign_wait_id(0);
103 #endif
104 
105  __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
106 }
107 
108 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) {
109  int gtid = __kmp_get_gtid();
110  KA_TRACE(20, ("GOMP_atomic_end: T#%d\n", gtid));
111  __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
112 }
113 
114 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) {
115  int gtid = __kmp_entry_gtid();
116  MKLOC(loc, "GOMP_single_start");
117  KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
118 
119  if (!TCR_4(__kmp_init_parallel))
120  __kmp_parallel_initialize();
121  __kmp_resume_if_soft_paused();
122 
123  // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
124  // workshare when USE_CHECKS is defined. We need to avoid the push,
125  // as there is no corresponding GOMP_single_end() call.
126  kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE);
127 
128 #if OMPT_SUPPORT && OMPT_OPTIONAL
129  kmp_info_t *this_thr = __kmp_threads[gtid];
130  kmp_team_t *team = this_thr->th.th_team;
131  int tid = __kmp_tid_from_gtid(gtid);
132 
133  if (ompt_enabled.enabled) {
134  if (rc) {
135  if (ompt_enabled.ompt_callback_work) {
136  ompt_callbacks.ompt_callback(ompt_callback_work)(
137  ompt_work_single_executor, ompt_scope_begin,
138  &(team->t.ompt_team_info.parallel_data),
139  &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
140  1, OMPT_GET_RETURN_ADDRESS(0));
141  }
142  } else {
143  if (ompt_enabled.ompt_callback_work) {
144  ompt_callbacks.ompt_callback(ompt_callback_work)(
145  ompt_work_single_other, ompt_scope_begin,
146  &(team->t.ompt_team_info.parallel_data),
147  &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
148  1, OMPT_GET_RETURN_ADDRESS(0));
149  ompt_callbacks.ompt_callback(ompt_callback_work)(
150  ompt_work_single_other, ompt_scope_end,
151  &(team->t.ompt_team_info.parallel_data),
152  &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
153  1, OMPT_GET_RETURN_ADDRESS(0));
154  }
155  }
156  }
157 #endif
158 
159  return rc;
160 }
161 
162 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) {
163  void *retval;
164  int gtid = __kmp_entry_gtid();
165  MKLOC(loc, "GOMP_single_copy_start");
166  KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
167 
168  if (!TCR_4(__kmp_init_parallel))
169  __kmp_parallel_initialize();
170  __kmp_resume_if_soft_paused();
171 
172  // If this is the first thread to enter, return NULL. The generated code will
173  // then call GOMP_single_copy_end() for this thread only, with the
174  // copyprivate data pointer as an argument.
175  if (__kmp_enter_single(gtid, &loc, FALSE))
176  return NULL;
177 
178 // Wait for the first thread to set the copyprivate data pointer,
179 // and for all other threads to reach this point.
180 
181 #if OMPT_SUPPORT && OMPT_OPTIONAL
182  ompt_frame_t *ompt_frame;
183  if (ompt_enabled.enabled) {
184  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
185  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
186  OMPT_STORE_RETURN_ADDRESS(gtid);
187  }
188 #endif
189  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
190 
191  // Retrieve the value of the copyprivate data point, and wait for all
192  // threads to do likewise, then return.
193  retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
194 #if OMPT_SUPPORT && OMPT_OPTIONAL
195  if (ompt_enabled.enabled) {
196  OMPT_STORE_RETURN_ADDRESS(gtid);
197  }
198 #endif
199  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
200 #if OMPT_SUPPORT && OMPT_OPTIONAL
201  if (ompt_enabled.enabled) {
202  ompt_frame->enter_frame = ompt_data_none;
203  }
204 #endif
205  return retval;
206 }
207 
208 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) {
209  int gtid = __kmp_get_gtid();
210  KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
211 
212  // Set the copyprivate data pointer fo the team, then hit the barrier so that
213  // the other threads will continue on and read it. Hit another barrier before
214  // continuing, so that the know that the copyprivate data pointer has been
215  // propagated to all threads before trying to reuse the t_copypriv_data field.
216  __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
217 #if OMPT_SUPPORT && OMPT_OPTIONAL
218  ompt_frame_t *ompt_frame;
219  if (ompt_enabled.enabled) {
220  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
221  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
222  OMPT_STORE_RETURN_ADDRESS(gtid);
223  }
224 #endif
225  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
226 #if OMPT_SUPPORT && OMPT_OPTIONAL
227  if (ompt_enabled.enabled) {
228  OMPT_STORE_RETURN_ADDRESS(gtid);
229  }
230 #endif
231  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
232 #if OMPT_SUPPORT && OMPT_OPTIONAL
233  if (ompt_enabled.enabled) {
234  ompt_frame->enter_frame = ompt_data_none;
235  }
236 #endif
237 }
238 
239 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) {
240  int gtid = __kmp_entry_gtid();
241  MKLOC(loc, "GOMP_ordered_start");
242  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
243 #if OMPT_SUPPORT && OMPT_OPTIONAL
244  OMPT_STORE_RETURN_ADDRESS(gtid);
245 #endif
246  __kmpc_ordered(&loc, gtid);
247 }
248 
249 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
250  int gtid = __kmp_get_gtid();
251  MKLOC(loc, "GOMP_ordered_end");
252  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
253 #if OMPT_SUPPORT && OMPT_OPTIONAL
254  OMPT_STORE_RETURN_ADDRESS(gtid);
255 #endif
256  __kmpc_end_ordered(&loc, gtid);
257 }
258 
259 // Dispatch macro defs
260 //
261 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
262 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
263 
264 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
265 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
266 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
267 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
268 #else
269 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
270 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
271 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
272 #endif /* KMP_ARCH_X86 */
273 
274 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
275 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
276 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
277 
278 // The parallel construct
279 
280 #ifndef KMP_DEBUG
281 static
282 #endif /* KMP_DEBUG */
283  void
284  __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
285  void *data) {
286 #if OMPT_SUPPORT
287  kmp_info_t *thr;
288  ompt_frame_t *ompt_frame;
289  ompt_state_t enclosing_state;
290 
291  if (ompt_enabled.enabled) {
292  // get pointer to thread data structure
293  thr = __kmp_threads[*gtid];
294 
295  // save enclosing task state; set current state for task
296  enclosing_state = thr->th.ompt_thread_info.state;
297  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
298 
299  // set task frame
300  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
301  ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
302  }
303 #endif
304 
305  task(data);
306 
307 #if OMPT_SUPPORT
308  if (ompt_enabled.enabled) {
309  // clear task frame
310  ompt_frame->exit_frame = ompt_data_none;
311 
312  // restore enclosing state
313  thr->th.ompt_thread_info.state = enclosing_state;
314  }
315 #endif
316 }
317 
318 #ifndef KMP_DEBUG
319 static
320 #endif /* KMP_DEBUG */
321  void
322  __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
323  void (*task)(void *), void *data,
324  unsigned num_threads, ident_t *loc,
325  enum sched_type schedule, long start,
326  long end, long incr,
327  long chunk_size) {
328  // Initialize the loop worksharing construct.
329 
330  KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
331  schedule != kmp_sch_static);
332 
333 #if OMPT_SUPPORT
334  kmp_info_t *thr;
335  ompt_frame_t *ompt_frame;
336  ompt_state_t enclosing_state;
337 
338  if (ompt_enabled.enabled) {
339  thr = __kmp_threads[*gtid];
340  // save enclosing task state; set current state for task
341  enclosing_state = thr->th.ompt_thread_info.state;
342  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
343 
344  // set task frame
345  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
346  ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
347  }
348 #endif
349 
350  // Now invoke the microtask.
351  task(data);
352 
353 #if OMPT_SUPPORT
354  if (ompt_enabled.enabled) {
355  // clear task frame
356  ompt_frame->exit_frame = ompt_data_none;
357 
358  // reset enclosing state
359  thr->th.ompt_thread_info.state = enclosing_state;
360  }
361 #endif
362 }
363 
364 #ifndef KMP_DEBUG
365 static
366 #endif /* KMP_DEBUG */
367  void
368  __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *),
369  microtask_t wrapper, int argc, ...) {
370  int rc;
371  kmp_info_t *thr = __kmp_threads[gtid];
372  kmp_team_t *team = thr->th.th_team;
373  int tid = __kmp_tid_from_gtid(gtid);
374 
375  va_list ap;
376  va_start(ap, argc);
377 
378  rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper,
379  __kmp_invoke_task_func,
380 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
381  &ap
382 #else
383  ap
384 #endif
385  );
386 
387  va_end(ap);
388 
389  if (rc) {
390  __kmp_run_before_invoked_task(gtid, tid, thr, team);
391  }
392 
393 #if OMPT_SUPPORT
394  int ompt_team_size;
395  if (ompt_enabled.enabled) {
396  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
397  ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
398 
399  // implicit task callback
400  if (ompt_enabled.ompt_callback_implicit_task) {
401  ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
402  ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
403  ompt_scope_begin, &(team_info->parallel_data),
404  &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
405  task_info->thread_num = __kmp_tid_from_gtid(gtid);
406  }
407  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
408  }
409 #endif
410 }
411 
412 static void __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid,
413  void (*task)(void *)) {
414 #if OMPT_SUPPORT
415  OMPT_STORE_RETURN_ADDRESS(gtid);
416 #endif
417  __kmp_serialized_parallel(loc, gtid);
418 }
419 
420 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *),
421  void *data,
422  unsigned num_threads) {
423  int gtid = __kmp_entry_gtid();
424 
425 #if OMPT_SUPPORT
426  ompt_frame_t *parent_frame, *frame;
427 
428  if (ompt_enabled.enabled) {
429  __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
430  parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
431  OMPT_STORE_RETURN_ADDRESS(gtid);
432  }
433 #endif
434 
435  MKLOC(loc, "GOMP_parallel_start");
436  KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
437 
438  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
439  if (num_threads != 0) {
440  __kmp_push_num_threads(&loc, gtid, num_threads);
441  }
442  __kmp_GOMP_fork_call(&loc, gtid, task,
443  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
444  data);
445  } else {
446  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
447  }
448 
449 #if OMPT_SUPPORT
450  if (ompt_enabled.enabled) {
451  __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL);
452  frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
453  }
454 #endif
455 }
456 
457 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) {
458  int gtid = __kmp_get_gtid();
459  kmp_info_t *thr;
460 
461  thr = __kmp_threads[gtid];
462 
463  MKLOC(loc, "GOMP_parallel_end");
464  KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
465 
466  if (!thr->th.th_team->t.t_serialized) {
467  __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
468  thr->th.th_team);
469 
470 #if OMPT_SUPPORT
471  if (ompt_enabled.enabled) {
472  // Implicit task is finished here, in the barrier we might schedule
473  // deferred tasks,
474  // these don't see the implicit task on the stack
475  OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none;
476  }
477 #endif
478 
479  __kmp_join_call(&loc, gtid
480 #if OMPT_SUPPORT
481  ,
482  fork_context_gnu
483 #endif
484  );
485  } else {
486  __kmpc_end_serialized_parallel(&loc, gtid);
487  }
488 }
489 
490 // Loop worksharing constructs
491 
492 // The Gnu codegen passes in an exclusive upper bound for the overall range,
493 // but the libguide dispatch code expects an inclusive upper bound, hence the
494 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
495 // argument to __kmp_GOMP_fork_call).
496 //
497 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
498 // but the Gnu codegen expects an exclusive upper bound, so the adjustment
499 // "*p_ub += stride" compensates for the discrepancy.
500 //
501 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
502 // stride value. We adjust the dispatch parameters accordingly (by +-1), but
503 // we still adjust p_ub by the actual stride value.
504 //
505 // The "runtime" versions do not take a chunk_sz parameter.
506 //
507 // The profile lib cannot support construct checking of unordered loops that
508 // are predetermined by the compiler to be statically scheduled, as the gcc
509 // codegen will not always emit calls to GOMP_loop_static_next() to get the
510 // next iteration. Instead, it emits inline code to call omp_get_thread_num()
511 // num and calculate the iteration space using the result. It doesn't do this
512 // with ordered static loop, so they can be checked.
513 
514 #if OMPT_SUPPORT
515 #define IF_OMPT_SUPPORT(code) code
516 #else
517 #define IF_OMPT_SUPPORT(code)
518 #endif
519 
520 #define LOOP_START(func, schedule) \
521  int func(long lb, long ub, long str, long chunk_sz, long *p_lb, \
522  long *p_ub) { \
523  int status; \
524  long stride; \
525  int gtid = __kmp_entry_gtid(); \
526  MKLOC(loc, KMP_STR(func)); \
527  KA_TRACE( \
528  20, \
529  (KMP_STR( \
530  func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
531  gtid, lb, ub, str, chunk_sz)); \
532  \
533  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
534  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
535  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
536  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
537  (schedule) != kmp_sch_static); \
538  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
539  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
540  (kmp_int *)p_ub, (kmp_int *)&stride); \
541  if (status) { \
542  KMP_DEBUG_ASSERT(stride == str); \
543  *p_ub += (str > 0) ? 1 : -1; \
544  } \
545  } else { \
546  status = 0; \
547  } \
548  \
549  KA_TRACE( \
550  20, \
551  (KMP_STR( \
552  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
553  gtid, *p_lb, *p_ub, status)); \
554  return status; \
555  }
556 
557 #define LOOP_RUNTIME_START(func, schedule) \
558  int func(long lb, long ub, long str, long *p_lb, long *p_ub) { \
559  int status; \
560  long stride; \
561  long chunk_sz = 0; \
562  int gtid = __kmp_entry_gtid(); \
563  MKLOC(loc, KMP_STR(func)); \
564  KA_TRACE( \
565  20, \
566  (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
567  gtid, lb, ub, str, chunk_sz)); \
568  \
569  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
570  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
571  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
572  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
573  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
574  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
575  (kmp_int *)p_ub, (kmp_int *)&stride); \
576  if (status) { \
577  KMP_DEBUG_ASSERT(stride == str); \
578  *p_ub += (str > 0) ? 1 : -1; \
579  } \
580  } else { \
581  status = 0; \
582  } \
583  \
584  KA_TRACE( \
585  20, \
586  (KMP_STR( \
587  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
588  gtid, *p_lb, *p_ub, status)); \
589  return status; \
590  }
591 
592 #define KMP_DOACROSS_FINI(status, gtid) \
593  if (!status && __kmp_threads[gtid]->th.th_dispatch->th_doacross_flags) { \
594  __kmpc_doacross_fini(NULL, gtid); \
595  }
596 
597 #define LOOP_NEXT(func, fini_code) \
598  int func(long *p_lb, long *p_ub) { \
599  int status; \
600  long stride; \
601  int gtid = __kmp_get_gtid(); \
602  MKLOC(loc, KMP_STR(func)); \
603  KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \
604  \
605  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
606  fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
607  (kmp_int *)p_ub, (kmp_int *)&stride); \
608  if (status) { \
609  *p_ub += (stride > 0) ? 1 : -1; \
610  } \
611  KMP_DOACROSS_FINI(status, gtid) \
612  \
613  KA_TRACE( \
614  20, \
615  (KMP_STR(func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
616  "returning %d\n", \
617  gtid, *p_lb, *p_ub, stride, status)); \
618  return status; \
619  }
620 
621 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
622 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
623 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START),
624  kmp_sch_dynamic_chunked)
625 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START),
626  kmp_sch_dynamic_chunked)
627 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
628 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT), {})
629 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START),
631 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START),
633 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
634 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT), {})
635 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START),
636  kmp_sch_runtime)
637 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
638 LOOP_RUNTIME_START(
639  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START),
640  kmp_sch_runtime)
641 LOOP_RUNTIME_START(
642  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START),
643  kmp_sch_runtime)
644 LOOP_NEXT(
645  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT), {})
646 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT), {})
647 
648 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START),
650 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT),
651  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
652 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START),
653  kmp_ord_dynamic_chunked)
654 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT),
655  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
656 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START),
657  kmp_ord_guided_chunked)
658 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT),
659  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
660 LOOP_RUNTIME_START(
661  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START),
662  kmp_ord_runtime)
663 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT),
664  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
665 
666 #define LOOP_DOACROSS_START(func, schedule) \
667  bool func(unsigned ncounts, long *counts, long chunk_sz, long *p_lb, \
668  long *p_ub) { \
669  int status; \
670  long stride, lb, ub, str; \
671  int gtid = __kmp_entry_gtid(); \
672  struct kmp_dim *dims = \
673  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
674  MKLOC(loc, KMP_STR(func)); \
675  for (unsigned i = 0; i < ncounts; ++i) { \
676  dims[i].lo = 0; \
677  dims[i].up = counts[i] - 1; \
678  dims[i].st = 1; \
679  } \
680  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
681  lb = 0; \
682  ub = counts[0]; \
683  str = 1; \
684  KA_TRACE(20, (KMP_STR(func) ": T#%d, ncounts %u, lb 0x%lx, ub 0x%lx, str " \
685  "0x%lx, chunk_sz " \
686  "0x%lx\n", \
687  gtid, ncounts, lb, ub, str, chunk_sz)); \
688  \
689  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
690  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
691  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
692  (schedule) != kmp_sch_static); \
693  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
694  (kmp_int *)p_ub, (kmp_int *)&stride); \
695  if (status) { \
696  KMP_DEBUG_ASSERT(stride == str); \
697  *p_ub += (str > 0) ? 1 : -1; \
698  } \
699  } else { \
700  status = 0; \
701  } \
702  KMP_DOACROSS_FINI(status, gtid); \
703  \
704  KA_TRACE( \
705  20, \
706  (KMP_STR( \
707  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
708  gtid, *p_lb, *p_ub, status)); \
709  __kmp_free(dims); \
710  return status; \
711  }
712 
713 #define LOOP_DOACROSS_RUNTIME_START(func, schedule) \
714  int func(unsigned ncounts, long *counts, long *p_lb, long *p_ub) { \
715  int status; \
716  long stride, lb, ub, str; \
717  long chunk_sz = 0; \
718  int gtid = __kmp_entry_gtid(); \
719  struct kmp_dim *dims = \
720  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
721  MKLOC(loc, KMP_STR(func)); \
722  for (unsigned i = 0; i < ncounts; ++i) { \
723  dims[i].lo = 0; \
724  dims[i].up = counts[i] - 1; \
725  dims[i].st = 1; \
726  } \
727  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
728  lb = 0; \
729  ub = counts[0]; \
730  str = 1; \
731  KA_TRACE( \
732  20, \
733  (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
734  gtid, lb, ub, str, chunk_sz)); \
735  \
736  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
737  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
738  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
739  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
740  (kmp_int *)p_ub, (kmp_int *)&stride); \
741  if (status) { \
742  KMP_DEBUG_ASSERT(stride == str); \
743  *p_ub += (str > 0) ? 1 : -1; \
744  } \
745  } else { \
746  status = 0; \
747  } \
748  KMP_DOACROSS_FINI(status, gtid); \
749  \
750  KA_TRACE( \
751  20, \
752  (KMP_STR( \
753  func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
754  gtid, *p_lb, *p_ub, status)); \
755  __kmp_free(dims); \
756  return status; \
757  }
758 
759 LOOP_DOACROSS_START(
760  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START),
762 LOOP_DOACROSS_START(
763  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START),
764  kmp_sch_dynamic_chunked)
765 LOOP_DOACROSS_START(
766  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START),
768 LOOP_DOACROSS_RUNTIME_START(
769  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START),
770  kmp_sch_runtime)
771 
772 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) {
773  int gtid = __kmp_get_gtid();
774  KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
775 
776 #if OMPT_SUPPORT && OMPT_OPTIONAL
777  ompt_frame_t *ompt_frame;
778  if (ompt_enabled.enabled) {
779  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
780  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
781  OMPT_STORE_RETURN_ADDRESS(gtid);
782  }
783 #endif
784  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
785 #if OMPT_SUPPORT && OMPT_OPTIONAL
786  if (ompt_enabled.enabled) {
787  ompt_frame->enter_frame = ompt_data_none;
788  }
789 #endif
790 
791  KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
792 }
793 
794 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) {
795  KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
796 }
797 
798 // Unsigned long long loop worksharing constructs
799 //
800 // These are new with gcc 4.4
801 
802 #define LOOP_START_ULL(func, schedule) \
803  int func(int up, unsigned long long lb, unsigned long long ub, \
804  unsigned long long str, unsigned long long chunk_sz, \
805  unsigned long long *p_lb, unsigned long long *p_ub) { \
806  int status; \
807  long long str2 = up ? ((long long)str) : -((long long)str); \
808  long long stride; \
809  int gtid = __kmp_entry_gtid(); \
810  MKLOC(loc, KMP_STR(func)); \
811  \
812  KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \
813  "0x%llx, chunk_sz 0x%llx\n", \
814  gtid, up, lb, ub, str, chunk_sz)); \
815  \
816  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
817  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
818  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
819  (schedule) != kmp_sch_static); \
820  status = \
821  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
822  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
823  if (status) { \
824  KMP_DEBUG_ASSERT(stride == str2); \
825  *p_ub += (str > 0) ? 1 : -1; \
826  } \
827  } else { \
828  status = 0; \
829  } \
830  \
831  KA_TRACE( \
832  20, \
833  (KMP_STR( \
834  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
835  gtid, *p_lb, *p_ub, status)); \
836  return status; \
837  }
838 
839 #define LOOP_RUNTIME_START_ULL(func, schedule) \
840  int func(int up, unsigned long long lb, unsigned long long ub, \
841  unsigned long long str, unsigned long long *p_lb, \
842  unsigned long long *p_ub) { \
843  int status; \
844  long long str2 = up ? ((long long)str) : -((long long)str); \
845  unsigned long long stride; \
846  unsigned long long chunk_sz = 0; \
847  int gtid = __kmp_entry_gtid(); \
848  MKLOC(loc, KMP_STR(func)); \
849  \
850  KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \
851  "0x%llx, chunk_sz 0x%llx\n", \
852  gtid, up, lb, ub, str, chunk_sz)); \
853  \
854  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
855  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
856  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
857  TRUE); \
858  status = \
859  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
860  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
861  if (status) { \
862  KMP_DEBUG_ASSERT((long long)stride == str2); \
863  *p_ub += (str > 0) ? 1 : -1; \
864  } \
865  } else { \
866  status = 0; \
867  } \
868  \
869  KA_TRACE( \
870  20, \
871  (KMP_STR( \
872  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
873  gtid, *p_lb, *p_ub, status)); \
874  return status; \
875  }
876 
877 #define LOOP_NEXT_ULL(func, fini_code) \
878  int func(unsigned long long *p_lb, unsigned long long *p_ub) { \
879  int status; \
880  long long stride; \
881  int gtid = __kmp_get_gtid(); \
882  MKLOC(loc, KMP_STR(func)); \
883  KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \
884  \
885  fini_code status = \
886  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
887  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
888  if (status) { \
889  *p_ub += (stride > 0) ? 1 : -1; \
890  } \
891  \
892  KA_TRACE( \
893  20, \
894  (KMP_STR( \
895  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
896  "returning %d\n", \
897  gtid, *p_lb, *p_ub, stride, status)); \
898  return status; \
899  }
900 
901 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START),
903 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
904 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START),
905  kmp_sch_dynamic_chunked)
906 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
907 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START),
909 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
910 LOOP_START_ULL(
911  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START),
912  kmp_sch_dynamic_chunked)
913 LOOP_NEXT_ULL(
914  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT), {})
915 LOOP_START_ULL(
916  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START),
918 LOOP_NEXT_ULL(
919  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT), {})
920 LOOP_RUNTIME_START_ULL(
921  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
922 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
923 LOOP_RUNTIME_START_ULL(
924  KMP_EXPAND_NAME(
925  KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START),
926  kmp_sch_runtime)
927 LOOP_RUNTIME_START_ULL(
928  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START),
929  kmp_sch_runtime)
930 LOOP_NEXT_ULL(
931  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT),
932  {})
933 LOOP_NEXT_ULL(
934  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT), {})
935 
936 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START),
938 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT),
939  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
940 LOOP_START_ULL(
941  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START),
942  kmp_ord_dynamic_chunked)
943 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT),
944  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
945 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START),
946  kmp_ord_guided_chunked)
947 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT),
948  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
949 LOOP_RUNTIME_START_ULL(
950  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START),
951  kmp_ord_runtime)
952 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT),
953  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
954 
955 #define LOOP_DOACROSS_START_ULL(func, schedule) \
956  int func(unsigned ncounts, unsigned long long *counts, \
957  unsigned long long chunk_sz, unsigned long long *p_lb, \
958  unsigned long long *p_ub) { \
959  int status; \
960  long long stride, str, lb, ub; \
961  int gtid = __kmp_entry_gtid(); \
962  struct kmp_dim *dims = \
963  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
964  MKLOC(loc, KMP_STR(func)); \
965  for (unsigned i = 0; i < ncounts; ++i) { \
966  dims[i].lo = 0; \
967  dims[i].up = counts[i] - 1; \
968  dims[i].st = 1; \
969  } \
970  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
971  lb = 0; \
972  ub = counts[0]; \
973  str = 1; \
974  \
975  KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \
976  "0x%llx, chunk_sz 0x%llx\n", \
977  gtid, lb, ub, str, chunk_sz)); \
978  \
979  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
980  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
981  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
982  (schedule) != kmp_sch_static); \
983  status = \
984  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
985  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
986  if (status) { \
987  KMP_DEBUG_ASSERT(stride == str); \
988  *p_ub += (str > 0) ? 1 : -1; \
989  } \
990  } else { \
991  status = 0; \
992  } \
993  KMP_DOACROSS_FINI(status, gtid); \
994  \
995  KA_TRACE( \
996  20, \
997  (KMP_STR( \
998  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
999  gtid, *p_lb, *p_ub, status)); \
1000  __kmp_free(dims); \
1001  return status; \
1002  }
1003 
1004 #define LOOP_DOACROSS_RUNTIME_START_ULL(func, schedule) \
1005  int func(unsigned ncounts, unsigned long long *counts, \
1006  unsigned long long *p_lb, unsigned long long *p_ub) { \
1007  int status; \
1008  unsigned long long stride, str, lb, ub; \
1009  unsigned long long chunk_sz = 0; \
1010  int gtid = __kmp_entry_gtid(); \
1011  struct kmp_dim *dims = \
1012  (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1013  MKLOC(loc, KMP_STR(func)); \
1014  for (unsigned i = 0; i < ncounts; ++i) { \
1015  dims[i].lo = 0; \
1016  dims[i].up = counts[i] - 1; \
1017  dims[i].st = 1; \
1018  } \
1019  __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
1020  lb = 0; \
1021  ub = counts[0]; \
1022  str = 1; \
1023  KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \
1024  "0x%llx, chunk_sz 0x%llx\n", \
1025  gtid, lb, ub, str, chunk_sz)); \
1026  \
1027  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
1028  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
1029  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1030  TRUE); \
1031  status = \
1032  KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
1033  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1034  if (status) { \
1035  KMP_DEBUG_ASSERT(stride == str); \
1036  *p_ub += (str > 0) ? 1 : -1; \
1037  } \
1038  } else { \
1039  status = 0; \
1040  } \
1041  KMP_DOACROSS_FINI(status, gtid); \
1042  \
1043  KA_TRACE( \
1044  20, \
1045  (KMP_STR( \
1046  func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
1047  gtid, *p_lb, *p_ub, status)); \
1048  __kmp_free(dims); \
1049  return status; \
1050  }
1051 
1052 LOOP_DOACROSS_START_ULL(
1053  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START),
1055 LOOP_DOACROSS_START_ULL(
1056  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START),
1057  kmp_sch_dynamic_chunked)
1058 LOOP_DOACROSS_START_ULL(
1059  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START),
1061 LOOP_DOACROSS_RUNTIME_START_ULL(
1062  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START),
1063  kmp_sch_runtime)
1064 
1065 // Combined parallel / loop worksharing constructs
1066 //
1067 // There are no ull versions (yet).
1068 
1069 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \
1070  void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \
1071  long ub, long str, long chunk_sz) { \
1072  int gtid = __kmp_entry_gtid(); \
1073  MKLOC(loc, KMP_STR(func)); \
1074  KA_TRACE( \
1075  20, \
1076  (KMP_STR( \
1077  func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1078  gtid, lb, ub, str, chunk_sz)); \
1079  \
1080  ompt_pre(); \
1081  \
1082  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1083  if (num_threads != 0) { \
1084  __kmp_push_num_threads(&loc, gtid, num_threads); \
1085  } \
1086  __kmp_GOMP_fork_call(&loc, gtid, task, \
1087  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1088  9, task, data, num_threads, &loc, (schedule), lb, \
1089  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1090  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \
1091  } else { \
1092  __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
1093  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \
1094  } \
1095  \
1096  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1097  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1098  (schedule) != kmp_sch_static); \
1099  \
1100  ompt_post(); \
1101  \
1102  KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \
1103  }
1104 
1105 #if OMPT_SUPPORT && OMPT_OPTIONAL
1106 
1107 #define OMPT_LOOP_PRE() \
1108  ompt_frame_t *parent_frame; \
1109  if (ompt_enabled.enabled) { \
1110  __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); \
1111  parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); \
1112  OMPT_STORE_RETURN_ADDRESS(gtid); \
1113  }
1114 
1115 #define OMPT_LOOP_POST() \
1116  if (ompt_enabled.enabled) { \
1117  parent_frame->enter_frame = ompt_data_none; \
1118  }
1119 
1120 #else
1121 
1122 #define OMPT_LOOP_PRE()
1123 
1124 #define OMPT_LOOP_POST()
1125 
1126 #endif
1127 
1128 PARALLEL_LOOP_START(
1129  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
1130  kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1131 PARALLEL_LOOP_START(
1132  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
1133  kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1134 PARALLEL_LOOP_START(
1135  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
1136  kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1137 PARALLEL_LOOP_START(
1138  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
1139  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1140 
1141 // Tasking constructs
1142 
1143 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data,
1144  void (*copy_func)(void *, void *),
1145  long arg_size, long arg_align,
1146  bool if_cond, unsigned gomp_flags,
1147  void **depend) {
1148  MKLOC(loc, "GOMP_task");
1149  int gtid = __kmp_entry_gtid();
1150  kmp_int32 flags = 0;
1151  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1152 
1153  KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
1154 
1155  // The low-order bit is the "untied" flag
1156  if (!(gomp_flags & 1)) {
1157  input_flags->tiedness = 1;
1158  }
1159  // The second low-order bit is the "final" flag
1160  if (gomp_flags & 2) {
1161  input_flags->final = 1;
1162  }
1163  input_flags->native = 1;
1164  // __kmp_task_alloc() sets up all other flags
1165 
1166  if (!if_cond) {
1167  arg_size = 0;
1168  }
1169 
1170  kmp_task_t *task = __kmp_task_alloc(
1171  &loc, gtid, input_flags, sizeof(kmp_task_t),
1172  arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func);
1173 
1174  if (arg_size > 0) {
1175  if (arg_align > 0) {
1176  task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
1177  arg_align * arg_align);
1178  }
1179  // else error??
1180 
1181  if (copy_func) {
1182  (*copy_func)(task->shareds, data);
1183  } else {
1184  KMP_MEMCPY(task->shareds, data, arg_size);
1185  }
1186  }
1187 
1188 #if OMPT_SUPPORT
1189  kmp_taskdata_t *current_task;
1190  if (ompt_enabled.enabled) {
1191  OMPT_STORE_RETURN_ADDRESS(gtid);
1192  current_task = __kmp_threads[gtid]->th.th_current_task;
1193  current_task->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1194  }
1195 #endif
1196 
1197  if (if_cond) {
1198  if (gomp_flags & 8) {
1199  KMP_ASSERT(depend);
1200  const size_t ndeps = (kmp_intptr_t)depend[0];
1201  const size_t nout = (kmp_intptr_t)depend[1];
1202  kmp_depend_info_t dep_list[ndeps];
1203 
1204  for (size_t i = 0U; i < ndeps; i++) {
1205  dep_list[i].base_addr = (kmp_intptr_t)depend[2U + i];
1206  dep_list[i].len = 0U;
1207  dep_list[i].flags.in = 1;
1208  dep_list[i].flags.out = (i < nout);
1209  }
1210  __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL);
1211  } else {
1212  __kmpc_omp_task(&loc, gtid, task);
1213  }
1214  } else {
1215 #if OMPT_SUPPORT
1216  ompt_thread_info_t oldInfo;
1217  kmp_info_t *thread;
1218  kmp_taskdata_t *taskdata;
1219  if (ompt_enabled.enabled) {
1220  // Store the threads states and restore them after the task
1221  thread = __kmp_threads[gtid];
1222  taskdata = KMP_TASK_TO_TASKDATA(task);
1223  oldInfo = thread->th.ompt_thread_info;
1224  thread->th.ompt_thread_info.wait_id = 0;
1225  thread->th.ompt_thread_info.state = ompt_state_work_parallel;
1226  taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1227  OMPT_STORE_RETURN_ADDRESS(gtid);
1228  }
1229 #endif
1230 
1231  __kmpc_omp_task_begin_if0(&loc, gtid, task);
1232  func(data);
1233  __kmpc_omp_task_complete_if0(&loc, gtid, task);
1234 
1235 #if OMPT_SUPPORT
1236  if (ompt_enabled.enabled) {
1237  thread->th.ompt_thread_info = oldInfo;
1238  taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1239  }
1240 #endif
1241  }
1242 #if OMPT_SUPPORT
1243  if (ompt_enabled.enabled) {
1244  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
1245  }
1246 #endif
1247 
1248  KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
1249 }
1250 
1251 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) {
1252  MKLOC(loc, "GOMP_taskwait");
1253  int gtid = __kmp_entry_gtid();
1254 
1255 #if OMPT_SUPPORT
1256  if (ompt_enabled.enabled)
1257  OMPT_STORE_RETURN_ADDRESS(gtid);
1258 #endif
1259 
1260  KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
1261 
1262  __kmpc_omp_taskwait(&loc, gtid);
1263 
1264  KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1265 }
1266 
1267 // Sections worksharing constructs
1268 //
1269 // For the sections construct, we initialize a dynamically scheduled loop
1270 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
1271 // that its returns as sections ids.
1272 //
1273 // There are no special entry points for ordered sections, so we always use
1274 // the dynamically scheduled workshare, even if the sections aren't ordered.
1275 
1276 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) {
1277  int status;
1278  kmp_int lb, ub, stride;
1279  int gtid = __kmp_entry_gtid();
1280  MKLOC(loc, "GOMP_sections_start");
1281  KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1282 
1283  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1284 
1285  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1286  if (status) {
1287  KMP_DEBUG_ASSERT(stride == 1);
1288  KMP_DEBUG_ASSERT(lb > 0);
1289  KMP_ASSERT(lb == ub);
1290  } else {
1291  lb = 0;
1292  }
1293 
1294  KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1295  (unsigned)lb));
1296  return (unsigned)lb;
1297 }
1298 
1299 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) {
1300  int status;
1301  kmp_int lb, ub, stride;
1302  int gtid = __kmp_get_gtid();
1303  MKLOC(loc, "GOMP_sections_next");
1304  KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1305 
1306 #if OMPT_SUPPORT
1307  OMPT_STORE_RETURN_ADDRESS(gtid);
1308 #endif
1309 
1310  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1311  if (status) {
1312  KMP_DEBUG_ASSERT(stride == 1);
1313  KMP_DEBUG_ASSERT(lb > 0);
1314  KMP_ASSERT(lb == ub);
1315  } else {
1316  lb = 0;
1317  }
1318 
1319  KA_TRACE(
1320  20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb));
1321  return (unsigned)lb;
1322 }
1323 
1324 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(
1325  void (*task)(void *), void *data, unsigned num_threads, unsigned count) {
1326  int gtid = __kmp_entry_gtid();
1327 
1328 #if OMPT_SUPPORT
1329  ompt_frame_t *parent_frame;
1330 
1331  if (ompt_enabled.enabled) {
1332  __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
1333  parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1334  OMPT_STORE_RETURN_ADDRESS(gtid);
1335  }
1336 #endif
1337 
1338  MKLOC(loc, "GOMP_parallel_sections_start");
1339  KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1340 
1341  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1342  if (num_threads != 0) {
1343  __kmp_push_num_threads(&loc, gtid, num_threads);
1344  }
1345  __kmp_GOMP_fork_call(&loc, gtid, task,
1346  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1347  task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1348  (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1349  } else {
1350  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1351  }
1352 
1353 #if OMPT_SUPPORT
1354  if (ompt_enabled.enabled) {
1355  parent_frame->enter_frame = ompt_data_none;
1356  }
1357 #endif
1358 
1359  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1360 
1361  KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1362 }
1363 
1364 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) {
1365  int gtid = __kmp_get_gtid();
1366  KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1367 
1368 #if OMPT_SUPPORT
1369  ompt_frame_t *ompt_frame;
1370  if (ompt_enabled.enabled) {
1371  __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1372  ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1373  OMPT_STORE_RETURN_ADDRESS(gtid);
1374  }
1375 #endif
1376  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1377 #if OMPT_SUPPORT
1378  if (ompt_enabled.enabled) {
1379  ompt_frame->enter_frame = ompt_data_none;
1380  }
1381 #endif
1382 
1383  KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1384 }
1385 
1386 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) {
1387  KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1388 }
1389 
1390 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1391 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) {
1392  KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1393  return;
1394 }
1395 
1396 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *),
1397  void *data,
1398  unsigned num_threads,
1399  unsigned int flags) {
1400  int gtid = __kmp_entry_gtid();
1401  MKLOC(loc, "GOMP_parallel");
1402  KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1403 
1404 #if OMPT_SUPPORT
1405  ompt_task_info_t *parent_task_info, *task_info;
1406  if (ompt_enabled.enabled) {
1407  parent_task_info = __ompt_get_task_info_object(0);
1408  parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1409  OMPT_STORE_RETURN_ADDRESS(gtid);
1410  }
1411 #endif
1412  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1413  if (num_threads != 0) {
1414  __kmp_push_num_threads(&loc, gtid, num_threads);
1415  }
1416  if (flags != 0) {
1417  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1418  }
1419  __kmp_GOMP_fork_call(&loc, gtid, task,
1420  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
1421  data);
1422  } else {
1423  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1424  }
1425 #if OMPT_SUPPORT
1426  if (ompt_enabled.enabled) {
1427  task_info = __ompt_get_task_info_object(0);
1428  task_info->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1429  }
1430 #endif
1431  task(data);
1432 #if OMPT_SUPPORT
1433  if (ompt_enabled.enabled) {
1434  OMPT_STORE_RETURN_ADDRESS(gtid);
1435  }
1436 #endif
1437  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1438 #if OMPT_SUPPORT
1439  if (ompt_enabled.enabled) {
1440  task_info->frame.exit_frame = ompt_data_none;
1441  parent_task_info->frame.enter_frame = ompt_data_none;
1442  }
1443 #endif
1444 }
1445 
1446 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *),
1447  void *data,
1448  unsigned num_threads,
1449  unsigned count,
1450  unsigned flags) {
1451  int gtid = __kmp_entry_gtid();
1452  MKLOC(loc, "GOMP_parallel_sections");
1453  KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1454 
1455 #if OMPT_SUPPORT
1456  OMPT_STORE_RETURN_ADDRESS(gtid);
1457 #endif
1458 
1459  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1460  if (num_threads != 0) {
1461  __kmp_push_num_threads(&loc, gtid, num_threads);
1462  }
1463  if (flags != 0) {
1464  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1465  }
1466  __kmp_GOMP_fork_call(&loc, gtid, task,
1467  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1468  task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1469  (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1470  } else {
1471  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1472  }
1473 
1474 #if OMPT_SUPPORT
1475  OMPT_STORE_RETURN_ADDRESS(gtid);
1476 #endif
1477 
1478  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1479 
1480  task(data);
1481  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1482  KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1483 }
1484 
1485 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \
1486  void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \
1487  long ub, long str, long chunk_sz, unsigned flags) { \
1488  int gtid = __kmp_entry_gtid(); \
1489  MKLOC(loc, KMP_STR(func)); \
1490  KA_TRACE( \
1491  20, \
1492  (KMP_STR( \
1493  func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1494  gtid, lb, ub, str, chunk_sz)); \
1495  \
1496  ompt_pre(); \
1497  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1498  if (num_threads != 0) { \
1499  __kmp_push_num_threads(&loc, gtid, num_threads); \
1500  } \
1501  if (flags != 0) { \
1502  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \
1503  } \
1504  __kmp_GOMP_fork_call(&loc, gtid, task, \
1505  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1506  9, task, data, num_threads, &loc, (schedule), lb, \
1507  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1508  } else { \
1509  __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
1510  } \
1511  \
1512  IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
1513  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1514  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1515  (schedule) != kmp_sch_static); \
1516  task(data); \
1517  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1518  ompt_post(); \
1519  \
1520  KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \
1521  }
1522 
1523 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC),
1524  kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1525 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC),
1526  kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1527 PARALLEL_LOOP(
1528  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED),
1529  kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1530 PARALLEL_LOOP(
1531  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC),
1532  kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1533 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED),
1534  kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1535 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME),
1536  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1537 PARALLEL_LOOP(
1538  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME),
1539  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1540 PARALLEL_LOOP(
1541  KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME),
1542  kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1543 
1544 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) {
1545  int gtid = __kmp_entry_gtid();
1546  MKLOC(loc, "GOMP_taskgroup_start");
1547  KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1548 
1549 #if OMPT_SUPPORT
1550  if (ompt_enabled.enabled)
1551  OMPT_STORE_RETURN_ADDRESS(gtid);
1552 #endif
1553 
1554  __kmpc_taskgroup(&loc, gtid);
1555 
1556  return;
1557 }
1558 
1559 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) {
1560  int gtid = __kmp_get_gtid();
1561  MKLOC(loc, "GOMP_taskgroup_end");
1562  KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1563 
1564 #if OMPT_SUPPORT
1565  if (ompt_enabled.enabled)
1566  OMPT_STORE_RETURN_ADDRESS(gtid);
1567 #endif
1568 
1569  __kmpc_end_taskgroup(&loc, gtid);
1570 
1571  return;
1572 }
1573 
1574 static kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1575  kmp_int32 cncl_kind = 0;
1576  switch (gomp_kind) {
1577  case 1:
1578  cncl_kind = cancel_parallel;
1579  break;
1580  case 2:
1581  cncl_kind = cancel_loop;
1582  break;
1583  case 4:
1584  cncl_kind = cancel_sections;
1585  break;
1586  case 8:
1587  cncl_kind = cancel_taskgroup;
1588  break;
1589  }
1590  return cncl_kind;
1591 }
1592 
1593 // Return true if cancellation should take place, false otherwise
1594 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) {
1595  int gtid = __kmp_get_gtid();
1596  MKLOC(loc, "GOMP_cancellation_point");
1597  KA_TRACE(20, ("GOMP_cancellation_point: T#%d which:%d\n", gtid, which));
1598  kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1599  return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1600 }
1601 
1602 // Return true if cancellation should take place, false otherwise
1603 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) {
1604  int gtid = __kmp_get_gtid();
1605  MKLOC(loc, "GOMP_cancel");
1606  KA_TRACE(20, ("GOMP_cancel: T#%d which:%d do_cancel:%d\n", gtid, which,
1607  (int)do_cancel));
1608  kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1609 
1610  if (do_cancel == FALSE) {
1611  return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1612  } else {
1613  return __kmpc_cancel(&loc, gtid, cncl_kind);
1614  }
1615 }
1616 
1617 // Return true if cancellation should take place, false otherwise
1618 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) {
1619  int gtid = __kmp_get_gtid();
1620  KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1621  return __kmp_barrier_gomp_cancel(gtid);
1622 }
1623 
1624 // Return true if cancellation should take place, false otherwise
1625 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) {
1626  int gtid = __kmp_get_gtid();
1627  KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1628  return __kmp_barrier_gomp_cancel(gtid);
1629 }
1630 
1631 // Return true if cancellation should take place, false otherwise
1632 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) {
1633  int gtid = __kmp_get_gtid();
1634  KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1635  return __kmp_barrier_gomp_cancel(gtid);
1636 }
1637 
1638 // All target functions are empty as of 2014-05-29
1639 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *),
1640  const void *openmp_target,
1641  size_t mapnum, void **hostaddrs,
1642  size_t *sizes,
1643  unsigned char *kinds) {
1644  return;
1645 }
1646 
1647 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)(
1648  int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1649  size_t *sizes, unsigned char *kinds) {
1650  return;
1651 }
1652 
1653 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; }
1654 
1655 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)(
1656  int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1657  size_t *sizes, unsigned char *kinds) {
1658  return;
1659 }
1660 
1661 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams,
1662  unsigned int thread_limit) {
1663  return;
1664 }
1665 
1666 // Task duplication function which copies src to dest (both are
1667 // preallocated task structures)
1668 static void __kmp_gomp_task_dup(kmp_task_t *dest, kmp_task_t *src,
1669  kmp_int32 last_private) {
1670  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(src);
1671  if (taskdata->td_copy_func) {
1672  (taskdata->td_copy_func)(dest->shareds, src->shareds);
1673  }
1674 }
1675 
1676 #ifdef __cplusplus
1677 } // extern "C"
1678 #endif
1679 
1680 template <typename T>
1681 void __GOMP_taskloop(void (*func)(void *), void *data,
1682  void (*copy_func)(void *, void *), long arg_size,
1683  long arg_align, unsigned gomp_flags,
1684  unsigned long num_tasks, int priority, T start, T end,
1685  T step) {
1686  typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
1687  MKLOC(loc, "GOMP_taskloop");
1688  int sched;
1689  T *loop_bounds;
1690  int gtid = __kmp_entry_gtid();
1691  kmp_int32 flags = 0;
1692  int if_val = gomp_flags & (1u << 10);
1693  int nogroup = gomp_flags & (1u << 11);
1694  int up = gomp_flags & (1u << 8);
1695  p_task_dup_t task_dup = NULL;
1696  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1697 #ifdef KMP_DEBUG
1698  {
1699  char *buff;
1700  buff = __kmp_str_format(
1701  "GOMP_taskloop: T#%%d: func:%%p data:%%p copy_func:%%p "
1702  "arg_size:%%ld arg_align:%%ld gomp_flags:0x%%x num_tasks:%%lu "
1703  "priority:%%d start:%%%s end:%%%s step:%%%s\n",
1704  traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec);
1705  KA_TRACE(20, (buff, gtid, func, data, copy_func, arg_size, arg_align,
1706  gomp_flags, num_tasks, priority, start, end, step));
1707  __kmp_str_free(&buff);
1708  }
1709 #endif
1710  KMP_ASSERT((size_t)arg_size >= 2 * sizeof(T));
1711  KMP_ASSERT(arg_align > 0);
1712  // The low-order bit is the "untied" flag
1713  if (!(gomp_flags & 1)) {
1714  input_flags->tiedness = 1;
1715  }
1716  // The second low-order bit is the "final" flag
1717  if (gomp_flags & 2) {
1718  input_flags->final = 1;
1719  }
1720  // Negative step flag
1721  if (!up) {
1722  // If step is flagged as negative, but isn't properly sign extended
1723  // Then manually sign extend it. Could be a short, int, char embedded
1724  // in a long. So cannot assume any cast.
1725  if (step > 0) {
1726  for (int i = sizeof(T) * CHAR_BIT - 1; i >= 0L; --i) {
1727  // break at the first 1 bit
1728  if (step & ((T)1 << i))
1729  break;
1730  step |= ((T)1 << i);
1731  }
1732  }
1733  }
1734  input_flags->native = 1;
1735  // Figure out if none/grainsize/num_tasks clause specified
1736  if (num_tasks > 0) {
1737  if (gomp_flags & (1u << 9))
1738  sched = 1; // grainsize specified
1739  else
1740  sched = 2; // num_tasks specified
1741  // neither grainsize nor num_tasks specified
1742  } else {
1743  sched = 0;
1744  }
1745 
1746  // __kmp_task_alloc() sets up all other flags
1747  kmp_task_t *task =
1748  __kmp_task_alloc(&loc, gtid, input_flags, sizeof(kmp_task_t),
1749  arg_size + arg_align - 1, (kmp_routine_entry_t)func);
1750  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1751  taskdata->td_copy_func = copy_func;
1752  taskdata->td_size_loop_bounds = sizeof(T);
1753 
1754  // re-align shareds if needed and setup firstprivate copy constructors
1755  // through the task_dup mechanism
1756  task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
1757  arg_align * arg_align);
1758  if (copy_func) {
1759  task_dup = __kmp_gomp_task_dup;
1760  }
1761  KMP_MEMCPY(task->shareds, data, arg_size);
1762 
1763  loop_bounds = (T *)task->shareds;
1764  loop_bounds[0] = start;
1765  loop_bounds[1] = end + (up ? -1 : 1);
1766  __kmpc_taskloop(&loc, gtid, task, if_val, (kmp_uint64 *)&(loop_bounds[0]),
1767  (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, nogroup,
1768  sched, (kmp_uint64)num_tasks, (void *)task_dup);
1769 }
1770 
1771 // 4 byte version of GOMP_doacross_post
1772 // This verison needs to create a temporary array which converts 4 byte
1773 // integers into 8 byte integers
1774 template <typename T, bool need_conversion = (sizeof(long) == 4)>
1775 void __kmp_GOMP_doacross_post(T *count);
1776 
1777 template <> void __kmp_GOMP_doacross_post<long, true>(long *count) {
1778  int gtid = __kmp_entry_gtid();
1779  kmp_info_t *th = __kmp_threads[gtid];
1780  MKLOC(loc, "GOMP_doacross_post");
1781  kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
1782  kmp_int64 *vec =
1783  (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims);
1784  for (kmp_int64 i = 0; i < num_dims; ++i) {
1785  vec[i] = (kmp_int64)count[i];
1786  }
1787  __kmpc_doacross_post(&loc, gtid, vec);
1788  __kmp_thread_free(th, vec);
1789 }
1790 
1791 // 8 byte versions of GOMP_doacross_post
1792 // This version can just pass in the count array directly instead of creating
1793 // a temporary array
1794 template <> void __kmp_GOMP_doacross_post<long, false>(long *count) {
1795  int gtid = __kmp_entry_gtid();
1796  MKLOC(loc, "GOMP_doacross_post");
1797  __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count));
1798 }
1799 
1800 template <typename T> void __kmp_GOMP_doacross_wait(T first, va_list args) {
1801  int gtid = __kmp_entry_gtid();
1802  kmp_info_t *th = __kmp_threads[gtid];
1803  MKLOC(loc, "GOMP_doacross_wait");
1804  kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
1805  kmp_int64 *vec =
1806  (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims);
1807  vec[0] = (kmp_int64)first;
1808  for (kmp_int64 i = 1; i < num_dims; ++i) {
1809  T item = va_arg(args, T);
1810  vec[i] = (kmp_int64)item;
1811  }
1812  __kmpc_doacross_wait(&loc, gtid, vec);
1813  __kmp_thread_free(th, vec);
1814  return;
1815 }
1816 
1817 #ifdef __cplusplus
1818 extern "C" {
1819 #endif // __cplusplus
1820 
1821 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP)(
1822  void (*func)(void *), void *data, void (*copy_func)(void *, void *),
1823  long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks,
1824  int priority, long start, long end, long step) {
1825  __GOMP_taskloop<long>(func, data, copy_func, arg_size, arg_align, gomp_flags,
1826  num_tasks, priority, start, end, step);
1827 }
1828 
1829 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP_ULL)(
1830  void (*func)(void *), void *data, void (*copy_func)(void *, void *),
1831  long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks,
1832  int priority, unsigned long long start, unsigned long long end,
1833  unsigned long long step) {
1834  __GOMP_taskloop<unsigned long long>(func, data, copy_func, arg_size,
1835  arg_align, gomp_flags, num_tasks,
1836  priority, start, end, step);
1837 }
1838 
1839 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_POST)(long *count) {
1840  __kmp_GOMP_doacross_post(count);
1841 }
1842 
1843 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_WAIT)(long first, ...) {
1844  va_list args;
1845  va_start(args, first);
1846  __kmp_GOMP_doacross_wait<long>(first, args);
1847  va_end(args);
1848 }
1849 
1850 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_POST)(
1851  unsigned long long *count) {
1852  int gtid = __kmp_entry_gtid();
1853  MKLOC(loc, "GOMP_doacross_ull_post");
1854  __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count));
1855 }
1856 
1857 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT)(
1858  unsigned long long first, ...) {
1859  va_list args;
1860  va_start(args, first);
1861  __kmp_GOMP_doacross_wait<unsigned long long>(first, args);
1862  va_end(args);
1863 }
1864 
1865 /* The following sections of code create aliases for the GOMP_* functions, then
1866  create versioned symbols using the assembler directive .symver. This is only
1867  pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in
1868  kmp_os.h */
1869 
1870 #ifdef KMP_USE_VERSION_SYMBOLS
1871 // GOMP_1.0 versioned symbols
1872 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1873 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1874 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1875 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1876 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1877 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1878 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1879 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1880 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1881 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1882 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1883 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1884 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1885 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1886 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10,
1887  "GOMP_1.0");
1888 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1889 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1890 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1891 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10,
1892  "GOMP_1.0");
1893 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1894 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1895 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1896 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1897 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1898 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1899 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1900 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1901 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1902 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10,
1903  "GOMP_1.0");
1904 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10,
1905  "GOMP_1.0");
1906 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10,
1907  "GOMP_1.0");
1908 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10,
1909  "GOMP_1.0");
1910 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1911 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1912 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1913 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1914 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1915 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1916 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1917 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1918 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1919 
1920 // GOMP_2.0 versioned symbols
1921 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1922 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1923 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1924 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1925 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1926 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1927 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20,
1928  "GOMP_2.0");
1929 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20,
1930  "GOMP_2.0");
1931 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20,
1932  "GOMP_2.0");
1933 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20,
1934  "GOMP_2.0");
1935 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20,
1936  "GOMP_2.0");
1937 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20,
1938  "GOMP_2.0");
1939 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20,
1940  "GOMP_2.0");
1941 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20,
1942  "GOMP_2.0");
1943 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1944 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1945 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1946 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1947 
1948 // GOMP_3.0 versioned symbols
1949 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1950 
1951 // GOMP_4.0 versioned symbols
1952 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1953 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1954 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1955 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1956 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1957 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1958 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1959 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1960 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1961 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1962 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1963 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1964 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1965 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1966 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1967 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1968 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1969 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1970 
1971 // GOMP_4.5 versioned symbols
1972 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP, 45, "GOMP_4.5");
1973 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP_ULL, 45, "GOMP_4.5");
1974 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_POST, 45, "GOMP_4.5");
1975 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_WAIT, 45, "GOMP_4.5");
1976 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START, 45,
1977  "GOMP_4.5");
1978 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START, 45,
1979  "GOMP_4.5");
1980 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START, 45,
1981  "GOMP_4.5");
1982 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START, 45,
1983  "GOMP_4.5");
1984 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_POST, 45, "GOMP_4.5");
1985 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT, 45, "GOMP_4.5");
1986 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START, 45,
1987  "GOMP_4.5");
1988 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, 45,
1989  "GOMP_4.5");
1990 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, 45,
1991  "GOMP_4.5");
1992 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, 45,
1993  "GOMP_4.5");
1994 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START, 45,
1995  "GOMP_4.5");
1996 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT, 45,
1997  "GOMP_4.5");
1998 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START, 45,
1999  "GOMP_4.5");
2000 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT, 45,
2001  "GOMP_4.5");
2002 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START, 45,
2003  "GOMP_4.5");
2004 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT, 45,
2005  "GOMP_4.5");
2006 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START, 45,
2007  "GOMP_4.5");
2008 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT, 45,
2009  "GOMP_4.5");
2010 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC, 45,
2011  "GOMP_4.5");
2012 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED, 45,
2013  "GOMP_4.5");
2014 
2015 // GOMP_5.0 versioned symbols
2016 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT, 50,
2017  "GOMP_5.0");
2018 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START, 50,
2019  "GOMP_5.0");
2020 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT, 50,
2021  "GOMP_5.0");
2022 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START, 50,
2023  "GOMP_5.0");
2024 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
2025  50, "GOMP_5.0");
2026 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START,
2027  50, "GOMP_5.0");
2028 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT, 50,
2029  "GOMP_5.0");
2030 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START, 50,
2031  "GOMP_5.0");
2032 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME, 50,
2033  "GOMP_5.0");
2034 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME,
2035  50, "GOMP_5.0");
2036 
2037 #endif // KMP_USE_VERSION_SYMBOLS
2038 
2039 #ifdef __cplusplus
2040 } // extern "C"
2041 #endif // __cplusplus
void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, void *task_dup)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, const struct kmp_dim *dims)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
sched_type
Definition: kmp.h:336
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
Definition: kmp.h:222
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)