28 #include "kmp_error.h" 29 #include "kmp_stats.h" 33 #include "ompt-specific.h" 37 template<
typename T >
43 struct i_maxmin< int > {
44 static const int mx = 0x7fffffff;
45 static const int mn = 0x80000000;
48 struct i_maxmin< unsigned int > {
49 static const unsigned int mx = 0xffffffff;
50 static const unsigned int mn = 0x00000000;
53 struct i_maxmin< long long > {
54 static const long long mx = 0x7fffffffffffffffLL;
55 static const long long mn = 0x8000000000000000LL;
58 struct i_maxmin< unsigned long long > {
59 static const unsigned long long mx = 0xffffffffffffffffLL;
60 static const unsigned long long mn = 0x0000000000000000LL;
66 char const * traits_t< int >::spec =
"d";
67 char const * traits_t< unsigned int >::spec =
"u";
68 char const * traits_t< long long >::spec =
"lld";
69 char const * traits_t< unsigned long long >::spec =
"llu";
73 template<
typename T >
75 __kmp_for_static_init(
82 typename traits_t< T >::signed_t *pstride,
83 typename traits_t< T >::signed_t incr,
84 typename traits_t< T >::signed_t chunk
87 KMP_TIME_PARTITIONED_BLOCK(FOR_static_scheduling);
89 typedef typename traits_t< T >::unsigned_t UT;
90 typedef typename traits_t< T >::signed_t ST;
92 register kmp_int32 gtid = global_tid;
93 register kmp_uint32 tid;
94 register kmp_uint32 nth;
95 register UT trip_count;
96 register kmp_team_t *team;
97 register kmp_info_t *th = __kmp_threads[ gtid ];
99 #if OMPT_SUPPORT && OMPT_TRACE 100 ompt_team_info_t *team_info = NULL;
101 ompt_task_info_t *task_info = NULL;
105 team_info = __ompt_get_teaminfo(0, NULL);
106 task_info = __ompt_get_taskinfo(0);
110 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pstride );
111 KE_TRACE( 10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
116 buff = __kmp_str_format(
117 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s," \
118 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
119 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
120 traits_t< ST >::spec, traits_t< ST >::spec, traits_t< T >::spec );
121 KD_TRACE(100, ( buff, global_tid, schedtype, *plastiter,
122 *plower, *pupper, *pstride, incr, chunk ) );
123 __kmp_str_free( &buff );
127 if ( __kmp_env_consistency_check ) {
128 __kmp_push_workshare( global_tid, ct_pdo, loc );
130 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
134 if ( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
135 if( plastiter != NULL )
145 buff = __kmp_str_format(
146 "__kmpc_for_static_init:(ZERO TRIP) liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>, loc = %%s\n",
147 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
148 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride, loc->
psource ) );
149 __kmp_str_free( &buff );
152 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
154 #if OMPT_SUPPORT && OMPT_TRACE 156 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
157 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
158 team_info->parallel_id, task_info->task_id,
159 team_info->microtask);
173 tid = th->th.th_team->t.t_master_tid;
174 team = th->th.th_team->t.t_parent;
178 tid = __kmp_tid_from_gtid( global_tid );
179 team = th->th.th_team;
183 if ( team -> t.t_serialized ) {
185 if( plastiter != NULL )
188 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
194 buff = __kmp_str_format(
195 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
196 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
197 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
198 __kmp_str_free( &buff );
201 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
203 #if OMPT_SUPPORT && OMPT_TRACE 205 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
206 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
207 team_info->parallel_id, task_info->task_id,
208 team_info->microtask);
213 nth = team->t.t_nproc;
215 if( plastiter != NULL )
217 *pstride = (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
222 buff = __kmp_str_format(
223 "__kmpc_for_static_init: (serial) liter=%%d lower=%%%s upper=%%%s stride = %%%s\n",
224 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec );
225 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
226 __kmp_str_free( &buff );
229 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
231 #if OMPT_SUPPORT && OMPT_TRACE 233 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
234 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
235 team_info->parallel_id, task_info->task_id,
236 team_info->microtask);
244 trip_count = *pupper - *plower + 1;
245 }
else if (incr == -1) {
246 trip_count = *plower - *pupper + 1;
247 }
else if ( incr > 0 ) {
249 trip_count = (UT)(*pupper - *plower) / incr + 1;
251 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
254 if ( __kmp_env_consistency_check ) {
256 if ( trip_count == 0 && *pupper != *plower ) {
257 __kmp_error_construct( kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo, loc );
263 switch ( schedtype ) {
266 if ( trip_count < nth ) {
268 __kmp_static == kmp_sch_static_greedy || \
269 __kmp_static == kmp_sch_static_balanced
271 if ( tid < trip_count ) {
272 *pupper = *plower = *plower + tid * incr;
274 *plower = *pupper + incr;
276 if( plastiter != NULL )
277 *plastiter = ( tid == trip_count - 1 );
279 if ( __kmp_static == kmp_sch_static_balanced ) {
280 register UT small_chunk = trip_count / nth;
281 register UT extras = trip_count % nth;
282 *plower += incr * ( tid * small_chunk + ( tid < extras ? tid : extras ) );
283 *pupper = *plower + small_chunk * incr - ( tid < extras ? 0 : incr );
284 if( plastiter != NULL )
285 *plastiter = ( tid == nth - 1 );
287 register T big_chunk_inc_count = ( trip_count/nth +
288 ( ( trip_count % nth ) ? 1 : 0) ) * incr;
289 register T old_upper = *pupper;
291 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
294 *plower += tid * big_chunk_inc_count;
295 *pupper = *plower + big_chunk_inc_count - incr;
297 if( *pupper < *plower )
298 *pupper = i_maxmin< T >::mx;
299 if( plastiter != NULL )
300 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
301 if ( *pupper > old_upper ) *pupper = old_upper;
303 if( *pupper > *plower )
304 *pupper = i_maxmin< T >::mn;
305 if( plastiter != NULL )
306 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
307 if ( *pupper < old_upper ) *pupper = old_upper;
313 case kmp_sch_static_chunked:
320 *pstride = span * nth;
321 *plower = *plower + (span * tid);
322 *pupper = *plower + span - incr;
323 if( plastiter != NULL )
324 *plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
328 case kmp_sch_static_balanced_chunked:
330 register T old_upper = *pupper;
332 register UT span = (trip_count+nth-1) / nth;
335 chunk = (span + chunk - 1) & ~(chunk-1);
338 *plower = *plower + (span * tid);
339 *pupper = *plower + span - incr;
341 if ( *pupper > old_upper ) *pupper = old_upper;
343 if ( *pupper < old_upper ) *pupper = old_upper;
345 if( plastiter != NULL )
346 *plastiter = ( tid == ((trip_count - 1)/( UT )chunk) );
351 KMP_ASSERT2( 0,
"__kmpc_for_static_init: unknown scheduling type" );
357 if ( KMP_MASTER_TID(tid) && __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
359 th->th.th_teams_microtask == NULL &&
361 team->t.t_active_level == 1 )
363 kmp_uint64 cur_chunk = chunk;
366 cur_chunk = trip_count / nth + ( ( trip_count % nth ) ? 1 : 0);
369 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
376 buff = __kmp_str_format(
377 "__kmpc_for_static_init: liter=%%d lower=%%%s upper=%%%s stride = %%%s signed?<%s>\n",
378 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec, traits_t< T >::spec );
379 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pstride ) );
380 __kmp_str_free( &buff );
383 KE_TRACE( 10, (
"__kmpc_for_static_init: T#%d return\n", global_tid ) );
385 #if OMPT_SUPPORT && OMPT_TRACE 387 ompt_callbacks.ompt_callback(ompt_event_loop_begin)) {
388 ompt_callbacks.ompt_callback(ompt_event_loop_begin)(
389 team_info->parallel_id, task_info->task_id, team_info->microtask);
396 template<
typename T >
398 __kmp_dist_for_static_init(
402 kmp_int32 *plastiter,
406 typename traits_t< T >::signed_t *pstride,
407 typename traits_t< T >::signed_t incr,
408 typename traits_t< T >::signed_t chunk
411 typedef typename traits_t< T >::unsigned_t UT;
412 typedef typename traits_t< T >::signed_t ST;
413 register kmp_uint32 tid;
414 register kmp_uint32 nth;
415 register kmp_uint32 team_id;
416 register kmp_uint32 nteams;
417 register UT trip_count;
418 register kmp_team_t *team;
421 KMP_DEBUG_ASSERT( plastiter && plower && pupper && pupperDist && pstride );
422 KE_TRACE( 10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
427 buff = __kmp_str_format(
428 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "\
429 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
430 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
431 traits_t< ST >::spec, traits_t< T >::spec );
432 KD_TRACE(100, ( buff, gtid, schedule, *plastiter,
433 *plower, *pupper, incr, chunk ) );
434 __kmp_str_free( &buff );
438 if( __kmp_env_consistency_check ) {
439 __kmp_push_workshare( gtid, ct_pdo, loc );
441 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
443 if( incr > 0 ? (*pupper < *plower) : (*plower < *pupper) ) {
453 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
456 tid = __kmp_tid_from_gtid( gtid );
457 th = __kmp_threads[gtid];
458 nth = th->th.th_team_nproc;
459 team = th->th.th_team;
461 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
462 nteams = th->th.th_teams_size.nteams;
464 team_id = team->t.t_master_tid;
465 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
469 trip_count = *pupper - *plower + 1;
470 }
else if(incr == -1) {
471 trip_count = *plower - *pupper + 1;
472 }
else if ( incr > 0 ) {
474 trip_count = (UT)(*pupper - *plower) / incr + 1;
476 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
479 *pstride = *pupper - *plower;
480 if( trip_count <= nteams ) {
482 __kmp_static == kmp_sch_static_greedy || \
483 __kmp_static == kmp_sch_static_balanced
486 if( team_id < trip_count && tid == 0 ) {
487 *pupper = *pupperDist = *plower = *plower + team_id * incr;
489 *pupperDist = *pupper;
490 *plower = *pupper + incr;
492 if( plastiter != NULL )
493 *plastiter = ( tid == 0 && team_id == trip_count - 1 );
496 if( __kmp_static == kmp_sch_static_balanced ) {
497 register UT chunkD = trip_count / nteams;
498 register UT extras = trip_count % nteams;
499 *plower += incr * ( team_id * chunkD + ( team_id < extras ? team_id : extras ) );
500 *pupperDist = *plower + chunkD * incr - ( team_id < extras ? 0 : incr );
501 if( plastiter != NULL )
502 *plastiter = ( team_id == nteams - 1 );
504 register T chunk_inc_count =
505 ( trip_count / nteams + ( ( trip_count % nteams ) ? 1 : 0) ) * incr;
506 register T upper = *pupper;
507 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
509 *plower += team_id * chunk_inc_count;
510 *pupperDist = *plower + chunk_inc_count - incr;
513 if( *pupperDist < *plower )
514 *pupperDist = i_maxmin< T >::mx;
515 if( plastiter != NULL )
516 *plastiter = *plower <= upper && *pupperDist > upper - incr;
517 if( *pupperDist > upper )
519 if( *plower > *pupperDist ) {
520 *pupper = *pupperDist;
524 if( *pupperDist > *plower )
525 *pupperDist = i_maxmin< T >::mn;
526 if( plastiter != NULL )
527 *plastiter = *plower >= upper && *pupperDist < upper - incr;
528 if( *pupperDist < upper )
530 if( *plower < *pupperDist ) {
531 *pupper = *pupperDist;
539 trip_count = *pupperDist - *plower + 1;
540 }
else if(incr == -1) {
541 trip_count = *plower - *pupperDist + 1;
542 }
else if ( incr > 1 ) {
544 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
546 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
548 KMP_DEBUG_ASSERT( trip_count );
552 if( trip_count <= nth ) {
554 __kmp_static == kmp_sch_static_greedy || \
555 __kmp_static == kmp_sch_static_balanced
557 if( tid < trip_count )
558 *pupper = *plower = *plower + tid * incr;
560 *plower = *pupper + incr;
561 if( plastiter != NULL )
562 if( *plastiter != 0 && !( tid == trip_count - 1 ) )
565 if( __kmp_static == kmp_sch_static_balanced ) {
566 register UT chunkL = trip_count / nth;
567 register UT extras = trip_count % nth;
568 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
569 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
570 if( plastiter != NULL )
571 if( *plastiter != 0 && !( tid == nth - 1 ) )
574 register T chunk_inc_count =
575 ( trip_count / nth + ( ( trip_count % nth ) ? 1 : 0) ) * incr;
576 register T upper = *pupperDist;
577 KMP_DEBUG_ASSERT( __kmp_static == kmp_sch_static_greedy );
579 *plower += tid * chunk_inc_count;
580 *pupper = *plower + chunk_inc_count - incr;
582 if( *pupper < *plower )
583 *pupper = i_maxmin< T >::mx;
584 if( plastiter != NULL )
585 if( *plastiter != 0 && !(*plower <= upper && *pupper > upper - incr) )
587 if( *pupper > upper )
590 if( *pupper > *plower )
591 *pupper = i_maxmin< T >::mn;
592 if( plastiter != NULL )
593 if( *plastiter != 0 && !(*plower >= upper && *pupper < upper - incr) )
595 if( *pupper < upper )
602 case kmp_sch_static_chunked:
608 *pstride = span * nth;
609 *plower = *plower + (span * tid);
610 *pupper = *plower + span - incr;
611 if( plastiter != NULL )
612 if( *plastiter != 0 && !(tid == ((trip_count - 1) / ( UT )chunk) % nth) )
617 KMP_ASSERT2( 0,
"__kmpc_dist_for_static_init: unknown loop scheduling type" );
626 buff = __kmp_str_format(
627 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "\
628 "stride=%%%s signed?<%s>\n",
629 traits_t< T >::spec, traits_t< T >::spec, traits_t< T >::spec,
630 traits_t< ST >::spec, traits_t< T >::spec );
631 KD_TRACE(100, ( buff, *plastiter, *plower, *pupper, *pupperDist, *pstride ) );
632 __kmp_str_free( &buff );
635 KE_TRACE( 10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid ) );
639 template<
typename T >
641 __kmp_team_static_init(
647 typename traits_t< T >::signed_t *p_st,
648 typename traits_t< T >::signed_t incr,
649 typename traits_t< T >::signed_t chunk
656 typedef typename traits_t< T >::unsigned_t UT;
657 typedef typename traits_t< T >::signed_t ST;
667 KMP_DEBUG_ASSERT( p_last && p_lb && p_ub && p_st );
668 KE_TRACE( 10, (
"__kmp_team_static_init called (%d)\n", gtid));
673 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "\
674 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
675 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
676 traits_t< ST >::spec, traits_t< T >::spec );
677 KD_TRACE(100, ( buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
678 __kmp_str_free( &buff );
684 if( __kmp_env_consistency_check ) {
686 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo, loc );
688 if( incr > 0 ? (upper < lower) : (lower < upper) ) {
698 __kmp_error_construct( kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc );
701 th = __kmp_threads[gtid];
702 team = th->th.th_team;
704 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
705 nteams = th->th.th_teams_size.nteams;
707 team_id = team->t.t_master_tid;
708 KMP_DEBUG_ASSERT(nteams == team->t.t_parent->t.t_nproc);
712 trip_count = upper - lower + 1;
713 }
else if(incr == -1) {
714 trip_count = lower - upper + 1;
715 }
else if ( incr > 0 ) {
717 trip_count = (UT)(upper - lower) / incr + 1;
719 trip_count = (UT)(lower - upper) / (-incr) + 1;
724 *p_st = span * nteams;
725 *p_lb = lower + (span * team_id);
726 *p_ub = *p_lb + span - incr;
727 if ( p_last != NULL )
728 *p_last = (team_id == ((trip_count - 1)/(UT)chunk) % nteams);
732 *p_ub = i_maxmin< T >::mx;
737 *p_ub = i_maxmin< T >::mn;
745 buff = __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "\
746 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
747 traits_t< T >::spec, traits_t< T >::spec, traits_t< ST >::spec,
748 traits_t< ST >::spec );
749 KD_TRACE(100, ( buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk ) );
750 __kmp_str_free( &buff );
780 kmp_int32 *plower, kmp_int32 *pupper,
781 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
783 __kmp_for_static_init< kmp_int32 >(
784 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
792 kmp_uint32 *plower, kmp_uint32 *pupper,
793 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
795 __kmp_for_static_init< kmp_uint32 >(
796 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
804 kmp_int64 *plower, kmp_int64 *pupper,
805 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
807 __kmp_for_static_init< kmp_int64 >(
808 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
816 kmp_uint64 *plower, kmp_uint64 *pupper,
817 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
819 __kmp_for_static_init< kmp_uint64 >(
820 loc, gtid, schedtype, plastiter, plower, pupper, pstride, incr, chunk );
850 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
851 kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD,
852 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
854 __kmp_dist_for_static_init< kmp_int32 >(
855 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
863 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
864 kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD,
865 kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk )
867 __kmp_dist_for_static_init< kmp_uint32 >(
868 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
876 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
877 kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD,
878 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
880 __kmp_dist_for_static_init< kmp_int64 >(
881 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
889 ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter,
890 kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD,
891 kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk )
893 __kmp_dist_for_static_init< kmp_uint64 >(
894 loc, gtid, schedule, plastiter, plower, pupper, pupperD, pstride, incr, chunk );
927 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
928 kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
930 KMP_DEBUG_ASSERT( __kmp_init_serial );
931 __kmp_team_static_init< kmp_int32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
939 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
940 kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk )
942 KMP_DEBUG_ASSERT( __kmp_init_serial );
943 __kmp_team_static_init< kmp_uint32 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
951 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
952 kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
954 KMP_DEBUG_ASSERT( __kmp_init_serial );
955 __kmp_team_static_init< kmp_int64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
963 ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last,
964 kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk )
966 KMP_DEBUG_ASSERT( __kmp_init_serial );
967 __kmp_team_static_init< kmp_uint64 >( loc, gtid, p_last, p_lb, p_ub, p_st, incr, chunk );
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)