1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
|
/*! \file */
/*
* kmp.h -- KPTS runtime header file.
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef KMP_H
#define KMP_H
#include "kmp_config.h"
/* #define BUILD_PARALLEL_ORDERED 1 */
/* This fix replaces gettimeofday with clock_gettime for better scalability on
the Altix. Requires user code to be linked with -lrt. */
//#define FIX_SGI_CLOCK
/* Defines for OpenMP 3.0 tasking and auto scheduling */
#ifndef KMP_STATIC_STEAL_ENABLED
#define KMP_STATIC_STEAL_ENABLED 1
#endif
#define TASK_CURRENT_NOT_QUEUED 0
#define TASK_CURRENT_QUEUED 1
#ifdef BUILD_TIED_TASK_STACK
#define TASK_STACK_EMPTY 0 // entries when the stack is empty
#define TASK_STACK_BLOCK_BITS 5 // Used in TASK_STACK_SIZE and TASK_STACK_MASK
// Number of entries in each task stack array
#define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
// Mask for determining index into stack block
#define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
#endif // BUILD_TIED_TASK_STACK
#define TASK_NOT_PUSHED 1
#define TASK_SUCCESSFULLY_PUSHED 0
#define TASK_TIED 1
#define TASK_UNTIED 0
#define TASK_EXPLICIT 1
#define TASK_IMPLICIT 0
#define TASK_PROXY 1
#define TASK_FULL 0
#define KMP_CANCEL_THREADS
#define KMP_THREAD_ATTR
// Android does not have pthread_cancel. Undefine KMP_CANCEL_THREADS if being
// built on Android
#if defined(__ANDROID__)
#undef KMP_CANCEL_THREADS
#endif
#include <signal.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* include <ctype.h> don't use; problems with /MD on Windows* OS NT due to bad
Microsoft library. Some macros provided below to replace these functions */
#ifndef __ABSOFT_WIN
#include <sys/types.h>
#endif
#include <limits.h>
#include <time.h>
#include <errno.h>
#include "kmp_os.h"
#include "kmp_safe_c_api.h"
#if KMP_STATS_ENABLED
class kmp_stats_list;
#endif
#if KMP_USE_HIER_SCHED
// Only include hierarchical scheduling if affinity is supported
#undef KMP_USE_HIER_SCHED
#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
#endif
#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED
#include "hwloc.h"
#ifndef HWLOC_OBJ_NUMANODE
#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
#endif
#ifndef HWLOC_OBJ_PACKAGE
#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
#endif
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#include <xmmintrin.h>
#endif
#include "kmp_debug.h"
#include "kmp_lock.h"
#include "kmp_version.h"
#if USE_DEBUGGER
#include "kmp_debugger.h"
#endif
#include "kmp_i18n.h"
#define KMP_HANDLE_SIGNALS (KMP_OS_UNIX || KMP_OS_WINDOWS)
#include "kmp_wrapper_malloc.h"
#if KMP_OS_UNIX
#include <unistd.h>
#if !defined NSIG && defined _NSIG
#define NSIG _NSIG
#endif
#endif
#if KMP_OS_LINUX
#pragma weak clock_gettime
#endif
#if OMPT_SUPPORT
#include "ompt-internal.h"
#endif
#if OMP_50_ENABLED
// Affinity format function
#include "kmp_str.h"
#endif
// 0 - no fast memory allocation, alignment: 8-byte on x86, 16-byte on x64.
// 3 - fast allocation using sync, non-sync free lists of any size, non-self
// free lists of limited size.
#ifndef USE_FAST_MEMORY
#define USE_FAST_MEMORY 3
#endif
#ifndef KMP_NESTED_HOT_TEAMS
#define KMP_NESTED_HOT_TEAMS 0
#define USE_NESTED_HOT_ARG(x)
#else
#if KMP_NESTED_HOT_TEAMS
#if OMP_40_ENABLED
#define USE_NESTED_HOT_ARG(x) , x
#else
// Nested hot teams feature depends on omp 4.0, disable it for earlier versions
#undef KMP_NESTED_HOT_TEAMS
#define KMP_NESTED_HOT_TEAMS 0
#define USE_NESTED_HOT_ARG(x)
#endif
#else
#define USE_NESTED_HOT_ARG(x)
#endif
#endif
// Assume using BGET compare_exchange instruction instead of lock by default.
#ifndef USE_CMP_XCHG_FOR_BGET
#define USE_CMP_XCHG_FOR_BGET 1
#endif
// Test to see if queuing lock is better than bootstrap lock for bget
// #ifndef USE_QUEUING_LOCK_FOR_BGET
// #define USE_QUEUING_LOCK_FOR_BGET
// #endif
#define KMP_NSEC_PER_SEC 1000000000L
#define KMP_USEC_PER_SEC 1000000L
/*!
@ingroup BASIC_TYPES
@{
*/
/*!
Values for bit flags used in the ident_t to describe the fields.
*/
enum {
/*! Use trampoline for internal microtasks */
KMP_IDENT_IMB = 0x01,
/*! Use c-style ident structure */
KMP_IDENT_KMPC = 0x02,
/* 0x04 is no longer used */
/*! Entry point generated by auto-parallelization */
KMP_IDENT_AUTOPAR = 0x08,
/*! Compiler generates atomic reduction option for kmpc_reduce* */
KMP_IDENT_ATOMIC_REDUCE = 0x10,
/*! To mark a 'barrier' directive in user code */
KMP_IDENT_BARRIER_EXPL = 0x20,
/*! To Mark implicit barriers. */
KMP_IDENT_BARRIER_IMPL = 0x0040,
KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
/*! To mark a static loop in OMPT callbacks */
KMP_IDENT_WORK_LOOP = 0x200,
/*! To mark a sections directive in OMPT callbacks */
KMP_IDENT_WORK_SECTIONS = 0x400,
/*! To mark a distirbute construct in OMPT callbacks */
KMP_IDENT_WORK_DISTRIBUTE = 0x800,
/*! Atomic hint; bottom four bits as omp_sync_hint_t. Top four reserved and
not currently used. If one day we need more bits, then we can use
an invalid combination of hints to mean that another, larger field
should be used in a different flag. */
KMP_IDENT_ATOMIC_HINT_MASK = 0xFF0000,
KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
};
/*!
* The ident structure that describes a source location.
*/
typedef struct ident {
kmp_int32 reserved_1; /**< might be used in Fortran; see above */
kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC
identifies this union member */
kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */
#if USE_ITT_BUILD
/* but currently used for storing region-specific ITT */
/* contextual information. */
#endif /* USE_ITT_BUILD */
kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */
char const *psource; /**< String describing the source location.
The string is composed of semi-colon separated fields
which describe the source file, the function and a pair
of line numbers that delimit the construct. */
} ident_t;
/*!
@}
*/
// Some forward declarations.
typedef union kmp_team kmp_team_t;
typedef struct kmp_taskdata kmp_taskdata_t;
typedef union kmp_task_team kmp_task_team_t;
typedef union kmp_team kmp_team_p;
typedef union kmp_info kmp_info_p;
typedef union kmp_root kmp_root_p;
#ifdef __cplusplus
extern "C" {
#endif
/* ------------------------------------------------------------------------ */
/* Pack two 32-bit signed integers into a 64-bit signed integer */
/* ToDo: Fix word ordering for big-endian machines. */
#define KMP_PACK_64(HIGH_32, LOW_32) \
((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
// Generic string manipulation macros. Assume that _x is of type char *
#define SKIP_WS(_x) \
{ \
while (*(_x) == ' ' || *(_x) == '\t') \
(_x)++; \
}
#define SKIP_DIGITS(_x) \
{ \
while (*(_x) >= '0' && *(_x) <= '9') \
(_x)++; \
}
#define SKIP_TOKEN(_x) \
{ \
while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
(*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
(_x)++; \
}
#define SKIP_TO(_x, _c) \
{ \
while (*(_x) != '\0' && *(_x) != (_c)) \
(_x)++; \
}
/* ------------------------------------------------------------------------ */
#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
/* ------------------------------------------------------------------------ */
/* Enumeration types */
enum kmp_state_timer {
ts_stop,
ts_start,
ts_pause,
ts_last_state
};
enum dynamic_mode {
dynamic_default,
#ifdef USE_LOAD_BALANCE
dynamic_load_balance,
#endif /* USE_LOAD_BALANCE */
dynamic_random,
dynamic_thread_limit,
dynamic_max
};
/* external schedule constants, duplicate enum omp_sched in omp.h in order to
* not include it here */
#ifndef KMP_SCHED_TYPE_DEFINED
#define KMP_SCHED_TYPE_DEFINED
typedef enum kmp_sched {
kmp_sched_lower = 0, // lower and upper bounds are for routine parameter check
// Note: need to adjust __kmp_sch_map global array in case enum is changed
kmp_sched_static = 1, // mapped to kmp_sch_static_chunked (33)
kmp_sched_dynamic = 2, // mapped to kmp_sch_dynamic_chunked (35)
kmp_sched_guided = 3, // mapped to kmp_sch_guided_chunked (36)
kmp_sched_auto = 4, // mapped to kmp_sch_auto (38)
kmp_sched_upper_std = 5, // upper bound for standard schedules
kmp_sched_lower_ext = 100, // lower bound of Intel extension schedules
kmp_sched_trapezoidal = 101, // mapped to kmp_sch_trapezoidal (39)
#if KMP_STATIC_STEAL_ENABLED
kmp_sched_static_steal = 102, // mapped to kmp_sch_static_steal (44)
#endif
kmp_sched_upper,
kmp_sched_default = kmp_sched_static // default scheduling
} kmp_sched_t;
#endif
/*!
@ingroup WORK_SHARING
* Describes the loop schedule to be used for a parallel for loop.
*/
enum sched_type : kmp_int32 {
kmp_sch_lower = 32, /**< lower bound for unordered values */
kmp_sch_static_chunked = 33,
kmp_sch_static = 34, /**< static unspecialized */
kmp_sch_dynamic_chunked = 35,
kmp_sch_guided_chunked = 36, /**< guided unspecialized */
kmp_sch_runtime = 37,
kmp_sch_auto = 38, /**< auto */
kmp_sch_trapezoidal = 39,
/* accessible only through KMP_SCHEDULE environment variable */
kmp_sch_static_greedy = 40,
kmp_sch_static_balanced = 41,
/* accessible only through KMP_SCHEDULE environment variable */
kmp_sch_guided_iterative_chunked = 42,
kmp_sch_guided_analytical_chunked = 43,
/* accessible only through KMP_SCHEDULE environment variable */
kmp_sch_static_steal = 44,
#if OMP_45_ENABLED
/* static with chunk adjustment (e.g., simd) */
kmp_sch_static_balanced_chunked = 45,
kmp_sch_guided_simd = 46, /**< guided with chunk adjustment */
kmp_sch_runtime_simd = 47, /**< runtime with chunk adjustment */
#endif
/* accessible only through KMP_SCHEDULE environment variable */
kmp_sch_upper, /**< upper bound for unordered values */
kmp_ord_lower = 64, /**< lower bound for ordered values, must be power of 2 */
kmp_ord_static_chunked = 65,
kmp_ord_static = 66, /**< ordered static unspecialized */
kmp_ord_dynamic_chunked = 67,
kmp_ord_guided_chunked = 68,
kmp_ord_runtime = 69,
kmp_ord_auto = 70, /**< ordered auto */
kmp_ord_trapezoidal = 71,
kmp_ord_upper, /**< upper bound for ordered values */
#if OMP_40_ENABLED
/* Schedules for Distribute construct */
kmp_distribute_static_chunked = 91, /**< distribute static chunked */
kmp_distribute_static = 92, /**< distribute static unspecialized */
#endif
/* For the "nomerge" versions, kmp_dispatch_next*() will always return a
single iteration/chunk, even if the loop is serialized. For the schedule
types listed above, the entire iteration vector is returned if the loop is
serialized. This doesn't work for gcc/gcomp sections. */
kmp_nm_lower = 160, /**< lower bound for nomerge values */
kmp_nm_static_chunked =
(kmp_sch_static_chunked - kmp_sch_lower + kmp_nm_lower),
kmp_nm_static = 162, /**< static unspecialized */
kmp_nm_dynamic_chunked = 163,
kmp_nm_guided_chunked = 164, /**< guided unspecialized */
kmp_nm_runtime = 165,
kmp_nm_auto = 166, /**< auto */
kmp_nm_trapezoidal = 167,
/* accessible only through KMP_SCHEDULE environment variable */
kmp_nm_static_greedy = 168,
kmp_nm_static_balanced = 169,
/* accessible only through KMP_SCHEDULE environment variable */
kmp_nm_guided_iterative_chunked = 170,
kmp_nm_guided_analytical_chunked = 171,
kmp_nm_static_steal =
172, /* accessible only through OMP_SCHEDULE environment variable */
kmp_nm_ord_static_chunked = 193,
kmp_nm_ord_static = 194, /**< ordered static unspecialized */
kmp_nm_ord_dynamic_chunked = 195,
kmp_nm_ord_guided_chunked = 196,
kmp_nm_ord_runtime = 197,
kmp_nm_ord_auto = 198, /**< auto */
kmp_nm_ord_trapezoidal = 199,
kmp_nm_upper, /**< upper bound for nomerge values */
#if OMP_45_ENABLED
/* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers. Since
we need to distinguish the three possible cases (no modifier, monotonic
modifier, nonmonotonic modifier), we need separate bits for each modifier.
The absence of monotonic does not imply nonmonotonic, especially since 4.5
says that the behaviour of the "no modifier" case is implementation defined
in 4.5, but will become "nonmonotonic" in 5.0.
Since we're passing a full 32 bit value, we can use a couple of high bits
for these flags; out of paranoia we avoid the sign bit.
These modifiers can be or-ed into non-static schedules by the compiler to
pass the additional information. They will be stripped early in the
processing in __kmp_dispatch_init when setting up schedules, so most of the
code won't ever see schedules with these bits set. */
kmp_sch_modifier_monotonic =
(1 << 29), /**< Set if the monotonic schedule modifier was present */
kmp_sch_modifier_nonmonotonic =
(1 << 30), /**< Set if the nonmonotonic schedule modifier was present */
#define SCHEDULE_WITHOUT_MODIFIERS(s) \
(enum sched_type)( \
(s) & ~(kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic))
#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
#define SCHEDULE_HAS_NO_MODIFIERS(s) \
(((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
#else
/* By doing this we hope to avoid multiple tests on OMP_45_ENABLED. Compilers
can now eliminate tests on compile time constants and dead code that results
from them, so we can leave code guarded by such an if in place. */
#define SCHEDULE_WITHOUT_MODIFIERS(s) (s)
#define SCHEDULE_HAS_MONOTONIC(s) false
#define SCHEDULE_HAS_NONMONOTONIC(s) false
#define SCHEDULE_HAS_NO_MODIFIERS(s) true
#endif
kmp_sch_default = kmp_sch_static /**< default scheduling algorithm */
};
/* Type to keep runtime schedule set via OMP_SCHEDULE or omp_set_schedule() */
typedef union kmp_r_sched {
struct {
enum sched_type r_sched_type;
int chunk;
};
kmp_int64 sched;
} kmp_r_sched_t;
extern enum sched_type __kmp_sch_map[]; // map OMP 3.0 schedule types with our
// internal schedule types
enum library_type {
library_none,
library_serial,
library_turnaround,
library_throughput
};
#if KMP_OS_LINUX
enum clock_function_type {
clock_function_gettimeofday,
clock_function_clock_gettime
};
#endif /* KMP_OS_LINUX */
#if KMP_MIC_SUPPORTED
enum mic_type { non_mic, mic1, mic2, mic3, dummy };
#endif
/* -- fast reduction stuff ------------------------------------------------ */
#undef KMP_FAST_REDUCTION_BARRIER
#define KMP_FAST_REDUCTION_BARRIER 1
#undef KMP_FAST_REDUCTION_CORE_DUO
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#define KMP_FAST_REDUCTION_CORE_DUO 1
#endif
enum _reduction_method {
reduction_method_not_defined = 0,
critical_reduce_block = (1 << 8),
atomic_reduce_block = (2 << 8),
tree_reduce_block = (3 << 8),
empty_reduce_block = (4 << 8)
};
// Description of the packed_reduction_method variable:
// The packed_reduction_method variable consists of two enum types variables
// that are packed together into 0-th byte and 1-st byte:
// 0: (packed_reduction_method & 0x000000FF) is a 'enum barrier_type' value of
// barrier that will be used in fast reduction: bs_plain_barrier or
// bs_reduction_barrier
// 1: (packed_reduction_method & 0x0000FF00) is a reduction method that will
// be used in fast reduction;
// Reduction method is of 'enum _reduction_method' type and it's defined the way
// so that the bits of 0-th byte are empty, so no need to execute a shift
// instruction while packing/unpacking
#if KMP_FAST_REDUCTION_BARRIER
#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
((reduction_method) | (barrier_type))
#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
#else
#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
(reduction_method)
#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
(packed_reduction_method)
#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
#endif
#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
(which_reduction_block))
#if KMP_FAST_REDUCTION_BARRIER
#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
(PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
(PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
#endif
typedef int PACKED_REDUCTION_METHOD_T;
/* -- end of fast reduction stuff ----------------------------------------- */
#if KMP_OS_WINDOWS
#define USE_CBLKDATA
#if KMP_MSVC_COMPAT
#pragma warning(push)
#pragma warning(disable : 271 310)
#endif
#include <windows.h>
#if KMP_MSVC_COMPAT
#pragma warning(pop)
#endif
#endif
#if KMP_OS_UNIX
#include <dlfcn.h>
#include <pthread.h>
#endif
/* Only Linux* OS and Windows* OS support thread affinity. */
#if KMP_AFFINITY_SUPPORTED
// GROUP_AFFINITY is already defined for _MSC_VER>=1600 (VS2010 and later).
#if KMP_OS_WINDOWS
#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
typedef struct GROUP_AFFINITY {
KAFFINITY Mask;
WORD Group;
WORD Reserved[3];
} GROUP_AFFINITY;
#endif /* _MSC_VER < 1600 */
#if KMP_GROUP_AFFINITY
extern int __kmp_num_proc_groups;
#else
static const int __kmp_num_proc_groups = 1;
#endif /* KMP_GROUP_AFFINITY */
typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE, const GROUP_AFFINITY *,
GROUP_AFFINITY *);
extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
#endif /* KMP_OS_WINDOWS */
#if KMP_USE_HWLOC
extern hwloc_topology_t __kmp_hwloc_topology;
extern int __kmp_hwloc_error;
extern int __kmp_numa_detected;
extern int __kmp_tile_depth;
#endif
extern size_t __kmp_affin_mask_size;
#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
#define KMP_CPU_SET_ITERATE(i, mask) \
for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
#define KMP_CPU_SET(i, mask) (mask)->set(i)
#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
#define KMP_CPU_ZERO(mask) (mask)->zero()
#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
#define KMP_CPU_ALLOC_ARRAY(arr, n) \
(arr = __kmp_affinity_dispatch->allocate_mask_array(n))
#define KMP_CPU_FREE_ARRAY(arr, n) \
__kmp_affinity_dispatch->deallocate_mask_array(arr)
#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
#define __kmp_get_system_affinity(mask, abort_bool) \
(mask)->get_system_affinity(abort_bool)
#define __kmp_set_system_affinity(mask, abort_bool) \
(mask)->set_system_affinity(abort_bool)
#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
class KMPAffinity {
public:
class Mask {
public:
void *operator new(size_t n);
void operator delete(void *p);
void *operator new[](size_t n);
void operator delete[](void *p);
virtual ~Mask() {}
// Set bit i to 1
virtual void set(int i) {}
// Return bit i
virtual bool is_set(int i) const { return false; }
// Set bit i to 0
virtual void clear(int i) {}
// Zero out entire mask
virtual void zero() {}
// Copy src into this mask
virtual void copy(const Mask *src) {}
// this &= rhs
virtual void bitwise_and(const Mask *rhs) {}
// this |= rhs
virtual void bitwise_or(const Mask *rhs) {}
// this = ~this
virtual void bitwise_not() {}
// API for iterating over an affinity mask
// for (int i = mask->begin(); i != mask->end(); i = mask->next(i))
virtual int begin() const { return 0; }
virtual int end() const { return 0; }
virtual int next(int previous) const { return 0; }
// Set the system's affinity to this affinity mask's value
virtual int set_system_affinity(bool abort_on_error) const { return -1; }
// Set this affinity mask to the current system affinity
virtual int get_system_affinity(bool abort_on_error) { return -1; }
// Only 1 DWORD in the mask should have any procs set.
// Return the appropriate index, or -1 for an invalid mask.
virtual int get_proc_group() const { return -1; }
};
void *operator new(size_t n);
void operator delete(void *p);
// Need virtual destructor
virtual ~KMPAffinity() = default;
// Determine if affinity is capable
virtual void determine_capable(const char *env_var) {}
// Bind the current thread to os proc
virtual void bind_thread(int proc) {}
// Factory functions to allocate/deallocate a mask
virtual Mask *allocate_mask() { return nullptr; }
virtual void deallocate_mask(Mask *m) {}
virtual Mask *allocate_mask_array(int num) { return nullptr; }
virtual void deallocate_mask_array(Mask *m) {}
virtual Mask *index_mask_array(Mask *m, int index) { return nullptr; }
static void pick_api();
static void destroy_api();
enum api_type {
NATIVE_OS
#if KMP_USE_HWLOC
,
HWLOC
#endif
};
virtual api_type get_api_type() const {
KMP_ASSERT(0);
return NATIVE_OS;
}
private:
static bool picked_api;
};
typedef KMPAffinity::Mask kmp_affin_mask_t;
extern KMPAffinity *__kmp_affinity_dispatch;
// Declare local char buffers with this size for printing debug and info
// messages, using __kmp_affinity_print_mask().
#define KMP_AFFIN_MASK_PRINT_LEN 1024
enum affinity_type {
affinity_none = 0,
affinity_physical,
affinity_logical,
affinity_compact,
affinity_scatter,
affinity_explicit,
affinity_balanced,
affinity_disabled, // not used outsize the env var parser
affinity_default
};
enum affinity_gran {
affinity_gran_fine = 0,
affinity_gran_thread,
affinity_gran_core,
affinity_gran_tile,
affinity_gran_numa,
affinity_gran_package,
affinity_gran_node,
#if KMP_GROUP_AFFINITY
// The "group" granularity isn't necesssarily coarser than all of the
// other levels, but we put it last in the enum.
affinity_gran_group,
#endif /* KMP_GROUP_AFFINITY */
affinity_gran_default
};
enum affinity_top_method {
affinity_top_method_all = 0, // try all (supported) methods, in order
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
affinity_top_method_apicid,
affinity_top_method_x2apicid,
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
affinity_top_method_cpuinfo, // KMP_CPUINFO_FILE is usable on Windows* OS, too
#if KMP_GROUP_AFFINITY
affinity_top_method_group,
#endif /* KMP_GROUP_AFFINITY */
affinity_top_method_flat,
#if KMP_USE_HWLOC
affinity_top_method_hwloc,
#endif
affinity_top_method_default
};
#define affinity_respect_mask_default (-1)
extern enum affinity_type __kmp_affinity_type; /* Affinity type */
extern enum affinity_gran __kmp_affinity_gran; /* Affinity granularity */
extern int __kmp_affinity_gran_levels; /* corresponding int value */
extern int __kmp_affinity_dups; /* Affinity duplicate masks */
extern enum affinity_top_method __kmp_affinity_top_method;
extern int __kmp_affinity_compact; /* Affinity 'compact' value */
extern int __kmp_affinity_offset; /* Affinity offset value */
extern int __kmp_affinity_verbose; /* Was verbose specified for KMP_AFFINITY? */
extern int __kmp_affinity_warnings; /* KMP_AFFINITY warnings enabled ? */
extern int __kmp_affinity_respect_mask; // Respect process' init affinity mask?
extern char *__kmp_affinity_proclist; /* proc ID list */
extern kmp_affin_mask_t *__kmp_affinity_masks;
extern unsigned __kmp_affinity_num_masks;
extern void __kmp_affinity_bind_thread(int which);
extern kmp_affin_mask_t *__kmp_affin_fullMask;
extern char *__kmp_cpuinfo_file;
#endif /* KMP_AFFINITY_SUPPORTED */
#if OMP_40_ENABLED
// This needs to be kept in sync with the values in omp.h !!!
typedef enum kmp_proc_bind_t {
proc_bind_false = 0,
proc_bind_true,
proc_bind_master,
proc_bind_close,
proc_bind_spread,
proc_bind_intel, // use KMP_AFFINITY interface
proc_bind_default
} kmp_proc_bind_t;
typedef struct kmp_nested_proc_bind_t {
kmp_proc_bind_t *bind_types;
int size;
int used;
} kmp_nested_proc_bind_t;
extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
#endif /* OMP_40_ENABLED */
#if OMP_50_ENABLED
extern int __kmp_display_affinity;
extern char *__kmp_affinity_format;
static const size_t KMP_AFFINITY_FORMAT_SIZE = 512;
#endif // OMP_50_ENABLED
#if KMP_AFFINITY_SUPPORTED
#define KMP_PLACE_ALL (-1)
#define KMP_PLACE_UNDEFINED (-2)
// Is KMP_AFFINITY is being used instead of OMP_PROC_BIND/OMP_PLACES?
#define KMP_AFFINITY_NON_PROC_BIND \
((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
(__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced))
#endif /* KMP_AFFINITY_SUPPORTED */
extern int __kmp_affinity_num_places;
#if OMP_40_ENABLED
typedef enum kmp_cancel_kind_t {
cancel_noreq = 0,
cancel_parallel = 1,
cancel_loop = 2,
cancel_sections = 3,
cancel_taskgroup = 4
} kmp_cancel_kind_t;
#endif // OMP_40_ENABLED
// KMP_HW_SUBSET support:
typedef struct kmp_hws_item {
int num;
int offset;
} kmp_hws_item_t;
extern kmp_hws_item_t __kmp_hws_socket;
extern kmp_hws_item_t __kmp_hws_node;
extern kmp_hws_item_t __kmp_hws_tile;
extern kmp_hws_item_t __kmp_hws_core;
extern kmp_hws_item_t __kmp_hws_proc;
extern int __kmp_hws_requested;
extern int __kmp_hws_abs_flag; // absolute or per-item number requested
/* ------------------------------------------------------------------------ */
#define KMP_PAD(type, sz) \
(sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
// We need to avoid using -1 as a GTID as +1 is added to the gtid
// when storing it in a lock, and the value 0 is reserved.
#define KMP_GTID_DNE (-2) /* Does not exist */
#define KMP_GTID_SHUTDOWN (-3) /* Library is shutting down */
#define KMP_GTID_MONITOR (-4) /* Monitor thread ID */
#define KMP_GTID_UNKNOWN (-5) /* Is not known */
#define KMP_GTID_MIN (-6) /* Minimal gtid for low bound check in DEBUG */
#if OMP_50_ENABLED
/* OpenMP 5.0 Memory Management support */
extern int __kmp_memkind_available;
extern int __kmp_hbw_mem_available;
typedef void *omp_allocator_t;
extern const omp_allocator_t *OMP_NULL_ALLOCATOR;
extern const omp_allocator_t *omp_default_mem_alloc;
extern const omp_allocator_t *omp_large_cap_mem_alloc;
extern const omp_allocator_t *omp_const_mem_alloc;
extern const omp_allocator_t *omp_high_bw_mem_alloc;
extern const omp_allocator_t *omp_low_lat_mem_alloc;
extern const omp_allocator_t *omp_cgroup_mem_alloc;
extern const omp_allocator_t *omp_pteam_mem_alloc;
extern const omp_allocator_t *omp_thread_mem_alloc;
extern const omp_allocator_t *__kmp_def_allocator;
extern void __kmpc_set_default_allocator(int gtid, const omp_allocator_t *al);
extern const omp_allocator_t *__kmpc_get_default_allocator(int gtid);
extern void *__kmpc_alloc(int gtid, size_t sz, const omp_allocator_t *al);
extern void __kmpc_free(int gtid, void *ptr, const omp_allocator_t *al);
extern void __kmp_init_memkind();
extern void __kmp_fini_memkind();
#endif // OMP_50_ENABLED
/* ------------------------------------------------------------------------ */
#define KMP_UINT64_MAX \
(~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
#define KMP_MIN_NTH 1
#ifndef KMP_MAX_NTH
#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
#define KMP_MAX_NTH PTHREAD_THREADS_MAX
#else
#define KMP_MAX_NTH INT_MAX
#endif
#endif /* KMP_MAX_NTH */
#ifdef PTHREAD_STACK_MIN
#define KMP_MIN_STKSIZE PTHREAD_STACK_MIN
#else
#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
#endif
#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
#if KMP_ARCH_X86
#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
#elif KMP_ARCH_X86_64
#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
#else
#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
#endif
#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
#define KMP_MAX_MALLOC_POOL_INCR \
(~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
#define KMP_MIN_STKOFFSET (0)
#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
#if KMP_OS_DARWIN
#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
#else
#define KMP_DEFAULT_STKOFFSET CACHE_LINE
#endif
#define KMP_MIN_STKPADDING (0)
#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
#define KMP_BLOCKTIME_MULTIPLIER \
(1000) /* number of blocktime units per second */
#define KMP_MIN_BLOCKTIME (0)
#define KMP_MAX_BLOCKTIME \
(INT_MAX) /* Must be this for "infinite" setting the work */
#define KMP_DEFAULT_BLOCKTIME (200) /* __kmp_blocktime is in milliseconds */
#if KMP_USE_MONITOR
#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
#define KMP_MIN_MONITOR_WAKEUPS (1) // min times monitor wakes up per second
#define KMP_MAX_MONITOR_WAKEUPS (1000) // max times monitor can wake up per sec
/* Calculate new number of monitor wakeups for a specific block time based on
previous monitor_wakeups. Only allow increasing number of wakeups */
#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
(((blocktime) == KMP_MAX_BLOCKTIME) \
? (monitor_wakeups) \
: ((blocktime) == KMP_MIN_BLOCKTIME) \
? KMP_MAX_MONITOR_WAKEUPS \
: ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
? (monitor_wakeups) \
: (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
/* Calculate number of intervals for a specific block time based on
monitor_wakeups */
#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
(((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
(KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
#else
#define KMP_BLOCKTIME(team, tid) \
(get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
// HW TSC is used to reduce overhead (clock tick instead of nanosecond).
extern kmp_uint64 __kmp_ticks_per_msec;
#if KMP_COMPILER_ICC
#define KMP_NOW() ((kmp_uint64)_rdtsc())
#else
#define KMP_NOW() __kmp_hardware_timestamp()
#endif
#define KMP_NOW_MSEC() (KMP_NOW() / __kmp_ticks_per_msec)
#define KMP_BLOCKTIME_INTERVAL(team, tid) \
(KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_msec)
#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
#else
// System time is retrieved sporadically while blocking.
extern kmp_uint64 __kmp_now_nsec();
#define KMP_NOW() __kmp_now_nsec()
#define KMP_NOW_MSEC() (KMP_NOW() / KMP_USEC_PER_SEC)
#define KMP_BLOCKTIME_INTERVAL(team, tid) \
(KMP_BLOCKTIME(team, tid) * KMP_USEC_PER_SEC)
#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
#endif
#define KMP_YIELD_NOW() \
(KMP_NOW_MSEC() / KMP_MAX(__kmp_dflt_blocktime, 1) % \
(__kmp_yield_on_count + __kmp_yield_off_count) < \
(kmp_uint32)__kmp_yield_on_count)
#endif // KMP_USE_MONITOR
#define KMP_MIN_STATSCOLS 40
#define KMP_MAX_STATSCOLS 4096
#define KMP_DEFAULT_STATSCOLS 80
#define KMP_MIN_INTERVAL 0
#define KMP_MAX_INTERVAL (INT_MAX - 1)
#define KMP_DEFAULT_INTERVAL 0
#define KMP_MIN_CHUNK 1
#define KMP_MAX_CHUNK (INT_MAX - 1)
#define KMP_DEFAULT_CHUNK 1
#define KMP_MIN_INIT_WAIT 1
#define KMP_MAX_INIT_WAIT (INT_MAX / 2)
#define KMP_DEFAULT_INIT_WAIT 2048U
#define KMP_MIN_NEXT_WAIT 1
#define KMP_MAX_NEXT_WAIT (INT_MAX / 2)
#define KMP_DEFAULT_NEXT_WAIT 1024U
#define KMP_DFLT_DISP_NUM_BUFF 7
#define KMP_MAX_ORDERED 8
#define KMP_MAX_FIELDS 32
#define KMP_MAX_BRANCH_BITS 31
#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
/* Minimum number of threads before switch to TLS gtid (experimentally
determined) */
/* josh TODO: what about OS X* tuning? */
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#define KMP_TLS_GTID_MIN 5
#else
#define KMP_TLS_GTID_MIN INT_MAX
#endif
#define KMP_MASTER_TID(tid) ((tid) == 0)
#define KMP_WORKER_TID(tid) ((tid) != 0)
#define KMP_MASTER_GTID(gtid) (__kmp_tid_from_gtid((gtid)) == 0)
#define KMP_WORKER_GTID(gtid) (__kmp_tid_from_gtid((gtid)) != 0)
#define KMP_INITIAL_GTID(gtid) ((gtid) == 0)
#ifndef TRUE
#define FALSE 0
#define TRUE (!FALSE)
#endif
/* NOTE: all of the following constants must be even */
#if KMP_OS_WINDOWS
#define KMP_INIT_WAIT 64U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 32U /* susequent number of spin-tests */
#elif KMP_OS_CNK
#define KMP_INIT_WAIT 16U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 8U /* susequent number of spin-tests */
#elif KMP_OS_LINUX
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#elif KMP_OS_DARWIN
/* TODO: tune for KMP_OS_DARWIN */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#elif KMP_OS_DRAGONFLY
/* TODO: tune for KMP_OS_DRAGONFLY */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#elif KMP_OS_FREEBSD
/* TODO: tune for KMP_OS_FREEBSD */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#elif KMP_OS_NETBSD
/* TODO: tune for KMP_OS_NETBSD */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#elif KMP_OS_HURD
/* TODO: tune for KMP_OS_HURD */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#elif KMP_OS_OPENBSD
/* TODO: tune for KMP_OS_OPENBSD */
#define KMP_INIT_WAIT 1024U /* initial number of spin-tests */
#define KMP_NEXT_WAIT 512U /* susequent number of spin-tests */
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
typedef struct kmp_cpuid {
kmp_uint32 eax;
kmp_uint32 ebx;
kmp_uint32 ecx;
kmp_uint32 edx;
} kmp_cpuid_t;
extern void __kmp_x86_cpuid(int mode, int mode2, struct kmp_cpuid *p);
#if KMP_ARCH_X86
extern void __kmp_x86_pause(void);
#elif KMP_MIC
// Performance testing on KNC (C0QS-7120 P/A/X/D, 61-core, 16 GB Memory) showed
// regression after removal of extra PAUSE from KMP_YIELD_SPIN(). Changing
// the delay from 100 to 300 showed even better performance than double PAUSE
// on Spec OMP2001 and LCPC tasking tests, no regressions on EPCC.
static inline void __kmp_x86_pause(void) { _mm_delay_32(300); }
#else
static inline void __kmp_x86_pause(void) { _mm_pause(); }
#endif
#define KMP_CPU_PAUSE() __kmp_x86_pause()
#elif KMP_ARCH_PPC64
#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
#define KMP_CPU_PAUSE() \
do { \
KMP_PPC64_PRI_LOW(); \
KMP_PPC64_PRI_MED(); \
KMP_PPC64_PRI_LOC_MB(); \
} while (0)
#else
#define KMP_CPU_PAUSE() /* nothing to do */
#endif
#define KMP_INIT_YIELD(count) \
{ (count) = __kmp_yield_init; }
#define KMP_YIELD(cond) \
{ \
KMP_CPU_PAUSE(); \
__kmp_yield((cond)); \
}
// Note the decrement of 2 in the following Macros. With KMP_LIBRARY=turnaround,
// there should be no yielding since initial value from KMP_INIT_YIELD() is odd.
#define KMP_YIELD_WHEN(cond, count) \
{ \
KMP_CPU_PAUSE(); \
(count) -= 2; \
if (!(count)) { \
__kmp_yield(cond); \
(count) = __kmp_yield_next; \
} \
}
#define KMP_YIELD_SPIN(count) \
{ \
KMP_CPU_PAUSE(); \
(count) -= 2; \
if (!(count)) { \
__kmp_yield(1); \
(count) = __kmp_yield_next; \
} \
}
/* ------------------------------------------------------------------------ */
/* Support datatypes for the orphaned construct nesting checks. */
/* ------------------------------------------------------------------------ */
enum cons_type {
ct_none,
ct_parallel,
ct_pdo,
ct_pdo_ordered,
ct_psections,
ct_psingle,
/* the following must be left in order and not split up */
ct_taskq,
ct_task, // really task inside non-ordered taskq, considered worksharing type
ct_task_ordered, /* really task inside ordered taskq, considered a worksharing
type */
/* the preceding must be left in order and not split up */
ct_critical,
ct_ordered_in_parallel,
ct_ordered_in_pdo,
ct_ordered_in_taskq,
ct_master,
ct_reduce,
ct_barrier
};
/* test to see if we are in a taskq construct */
#define IS_CONS_TYPE_TASKQ(ct) \
(((int)(ct)) >= ((int)ct_taskq) && ((int)(ct)) <= ((int)ct_task_ordered))
#define IS_CONS_TYPE_ORDERED(ct) \
((ct) == ct_pdo_ordered || (ct) == ct_task_ordered)
struct cons_data {
ident_t const *ident;
enum cons_type type;
int prev;
kmp_user_lock_p
name; /* address exclusively for critical section name comparison */
};
struct cons_header {
int p_top, w_top, s_top;
int stack_size, stack_top;
struct cons_data *stack_data;
};
struct kmp_region_info {
char *text;
int offset[KMP_MAX_FIELDS];
int length[KMP_MAX_FIELDS];
};
/* ---------------------------------------------------------------------- */
/* ---------------------------------------------------------------------- */
#if KMP_OS_WINDOWS
typedef HANDLE kmp_thread_t;
typedef DWORD kmp_key_t;
#endif /* KMP_OS_WINDOWS */
#if KMP_OS_UNIX
typedef pthread_t kmp_thread_t;
typedef pthread_key_t kmp_key_t;
#endif
extern kmp_key_t __kmp_gtid_threadprivate_key;
typedef struct kmp_sys_info {
long maxrss; /* the maximum resident set size utilized (in kilobytes) */
long minflt; /* the number of page faults serviced without any I/O */
long majflt; /* the number of page faults serviced that required I/O */
long nswap; /* the number of times a process was "swapped" out of memory */
long inblock; /* the number of times the file system had to perform input */
long oublock; /* the number of times the file system had to perform output */
long nvcsw; /* the number of times a context switch was voluntarily */
long nivcsw; /* the number of times a context switch was forced */
} kmp_sys_info_t;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
typedef struct kmp_cpuinfo {
int initialized; // If 0, other fields are not initialized.
int signature; // CPUID(1).EAX
int family; // CPUID(1).EAX[27:20]+CPUID(1).EAX[11:8] (Extended Family+Family)
int model; // ( CPUID(1).EAX[19:16] << 4 ) + CPUID(1).EAX[7:4] ( ( Extended
// Model << 4 ) + Model)
int stepping; // CPUID(1).EAX[3:0] ( Stepping )
int sse2; // 0 if SSE2 instructions are not supported, 1 otherwise.
int rtm; // 0 if RTM instructions are not supported, 1 otherwise.
int cpu_stackoffset;
int apic_id;
int physical_id;
int logical_id;
kmp_uint64 frequency; // Nominal CPU frequency in Hz.
char name[3 * sizeof(kmp_cpuid_t)]; // CPUID(0x80000002,0x80000003,0x80000004)
} kmp_cpuinfo_t;
#endif
#if USE_ITT_BUILD
// We cannot include "kmp_itt.h" due to circular dependency. Declare the only
// required type here. Later we will check the type meets requirements.
typedef int kmp_itt_mark_t;
#define KMP_ITT_DEBUG 0
#endif /* USE_ITT_BUILD */
/* Taskq data structures */
#define HIGH_WATER_MARK(nslots) (((nslots)*3) / 4)
// num thunks that each thread can simultaneously execute from a task queue
#define __KMP_TASKQ_THUNKS_PER_TH 1
/* flags for taskq_global_flags, kmp_task_queue_t tq_flags, kmpc_thunk_t
th_flags */
#define TQF_IS_ORDERED 0x0001 // __kmpc_taskq interface, taskq ordered
// __kmpc_taskq interface, taskq with lastprivate list
#define TQF_IS_LASTPRIVATE 0x0002
#define TQF_IS_NOWAIT 0x0004 // __kmpc_taskq interface, end taskq nowait
// __kmpc_taskq interface, use heuristics to decide task queue size
#define TQF_HEURISTICS 0x0008
// __kmpc_taskq interface, reserved for future use
#define TQF_INTERFACE_RESERVED1 0x0010
// __kmpc_taskq interface, reserved for future use
#define TQF_INTERFACE_RESERVED2 0x0020
// __kmpc_taskq interface, reserved for future use
#define TQF_INTERFACE_RESERVED3 0x0040
// __kmpc_taskq interface, reserved for future use
#define TQF_INTERFACE_RESERVED4 0x0080
#define TQF_INTERFACE_FLAGS 0x00ff // all the __kmpc_taskq interface flags
// internal/read by instrumentation; only used with TQF_IS_LASTPRIVATE
#define TQF_IS_LAST_TASK 0x0100
// internal use only; this thunk->th_task is the taskq_task
#define TQF_TASKQ_TASK 0x0200
// internal use only; must release worker threads once ANY queued task
// exists (global)
#define TQF_RELEASE_WORKERS 0x0400
// internal use only; notify workers that master has finished enqueuing tasks
#define TQF_ALL_TASKS_QUEUED 0x0800
// internal use only: this queue encountered in parallel context: not serialized
#define TQF_PARALLEL_CONTEXT 0x1000
// internal use only; this queue is on the freelist and not in use
#define TQF_DEALLOCATED 0x2000
#define TQF_INTERNAL_FLAGS 0x3f00 // all the internal use only flags
typedef struct KMP_ALIGN_CACHE kmpc_aligned_int32_t {
kmp_int32 ai_data;
} kmpc_aligned_int32_t;
typedef struct KMP_ALIGN_CACHE kmpc_aligned_queue_slot_t {
struct kmpc_thunk_t *qs_thunk;
} kmpc_aligned_queue_slot_t;
typedef struct kmpc_task_queue_t {
/* task queue linkage fields for n-ary tree of queues (locked with global
taskq_tree_lck) */
kmp_lock_t tq_link_lck; /* lock for child link, child next/prev links and
child ref counts */
union {
struct kmpc_task_queue_t *tq_parent; // pointer to parent taskq, not locked
// for taskq internal freelists, locked with global taskq_freelist_lck
struct kmpc_task_queue_t *tq_next_free;
} tq;
// pointer to linked-list of children, locked by tq's tq_link_lck
volatile struct kmpc_task_queue_t *tq_first_child;
// next child in linked-list, locked by parent tq's tq_link_lck
struct kmpc_task_queue_t *tq_next_child;
// previous child in linked-list, locked by parent tq's tq_link_lck
struct kmpc_task_queue_t *tq_prev_child;
// reference count of threads with access to this task queue
volatile kmp_int32 tq_ref_count;
/* (other than the thread executing the kmpc_end_taskq call) */
/* locked by parent tq's tq_link_lck */
/* shared data for task queue */
/* per-thread array of pointers to shared variable structures */
struct kmpc_aligned_shared_vars_t *tq_shareds;
/* only one array element exists for all but outermost taskq */
/* bookkeeping for ordered task queue */
kmp_uint32 tq_tasknum_queuing; // ordered task # assigned while queuing tasks
// ordered number of next task to be served (executed)
volatile kmp_uint32 tq_tasknum_serving;
/* thunk storage management for task queue */
kmp_lock_t tq_free_thunks_lck; /* lock for thunk freelist manipulation */
// thunk freelist, chained via th.th_next_free
struct kmpc_thunk_t *tq_free_thunks;
// space allocated for thunks for this task queue
struct kmpc_thunk_t *tq_thunk_space;
/* data fields for queue itself */
kmp_lock_t tq_queue_lck; /* lock for [de]enqueue operations: tq_queue,
tq_head, tq_tail, tq_nfull */
/* array of queue slots to hold thunks for tasks */
kmpc_aligned_queue_slot_t *tq_queue;
volatile struct kmpc_thunk_t *tq_taskq_slot; /* special slot for taskq task
thunk, occupied if not NULL */
kmp_int32 tq_nslots; /* # of tq_thunk_space thunks alloc'd (not incl.
tq_taskq_slot space) */
kmp_int32 tq_head; // enqueue puts item here (index into tq_queue array)
kmp_int32 tq_tail; // dequeue takes item from here (index into tq_queue array)
volatile kmp_int32 tq_nfull; // # of occupied entries in task queue right now
kmp_int32 tq_hiwat; /* high-water mark for tq_nfull and queue scheduling */
volatile kmp_int32 tq_flags; /* TQF_xxx */
/* bookkeeping for outstanding thunks */
/* per-thread array for # of regular thunks currently being executed */
struct kmpc_aligned_int32_t *tq_th_thunks;
kmp_int32 tq_nproc; /* number of thunks in the th_thunks array */
/* statistics library bookkeeping */
ident_t *tq_loc; /* source location information for taskq directive */
} kmpc_task_queue_t;
typedef void (*kmpc_task_t)(kmp_int32 global_tid, struct kmpc_thunk_t *thunk);
/* sizeof_shareds passed as arg to __kmpc_taskq call */
typedef struct kmpc_shared_vars_t { /* aligned during dynamic allocation */
kmpc_task_queue_t *sv_queue; /* (pointers to) shared vars */
} kmpc_shared_vars_t;
typedef struct KMP_ALIGN_CACHE kmpc_aligned_shared_vars_t {
volatile struct kmpc_shared_vars_t *ai_data;
} kmpc_aligned_shared_vars_t;
/* sizeof_thunk passed as arg to kmpc_taskq call */
typedef struct kmpc_thunk_t { /* aligned during dynamic allocation */
union { /* field used for internal freelists too */
kmpc_shared_vars_t *th_shareds;
struct kmpc_thunk_t *th_next_free; /* freelist of individual thunks within
queue, head at tq_free_thunks */
} th;
kmpc_task_t th_task; /* taskq_task if flags & TQF_TASKQ_TASK */
struct kmpc_thunk_t *th_encl_thunk; /* pointer to dynamically enclosing thunk
on this thread's call stack */
// TQF_xxx(tq_flags interface plus possible internal flags)
kmp_int32 th_flags;
kmp_int32 th_status;
kmp_uint32 th_tasknum; /* task number assigned in order of queuing, used for
ordered sections */
/* private vars */
} kmpc_thunk_t;
typedef struct KMP_ALIGN_CACHE kmp_taskq {
int tq_curr_thunk_capacity;
kmpc_task_queue_t *tq_root;
kmp_int32 tq_global_flags;
kmp_lock_t tq_freelist_lck;
kmpc_task_queue_t *tq_freelist;
kmpc_thunk_t **tq_curr_thunk;
} kmp_taskq_t;
/* END Taskq data structures */
typedef kmp_int32 kmp_critical_name[8];
/*!
@ingroup PARALLEL
The type for a microtask which gets passed to @ref __kmpc_fork_call().
The arguments to the outlined function are
@param global_tid the global thread identity of the thread executing the
function.
@param bound_tid the local identitiy of the thread executing the function
@param ... pointers to shared variables accessed by the function.
*/
typedef void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
...);
/*!
@ingroup THREADPRIVATE
@{
*/
/* ---------------------------------------------------------------------------
*/
/* Threadprivate initialization/finalization function declarations */
/* for non-array objects: __kmpc_threadprivate_register() */
/*!
Pointer to the constructor function.
The first argument is the <tt>this</tt> pointer
*/
typedef void *(*kmpc_ctor)(void *);
/*!
Pointer to the destructor function.
The first argument is the <tt>this</tt> pointer
*/
typedef void (*kmpc_dtor)(
void * /*, size_t */); /* 2nd arg: magic number for KCC unused by Intel
compiler */
/*!
Pointer to an alternate constructor.
The first argument is the <tt>this</tt> pointer.
*/
typedef void *(*kmpc_cctor)(void *, void *);
/* for array objects: __kmpc_threadprivate_register_vec() */
/* First arg: "this" pointer */
/* Last arg: number of array elements */
/*!
Array constructor.
First argument is the <tt>this</tt> pointer
Second argument the number of array elements.
*/
typedef void *(*kmpc_ctor_vec)(void *, size_t);
/*!
Pointer to the array destructor function.
The first argument is the <tt>this</tt> pointer
Second argument the number of array elements.
*/
typedef void (*kmpc_dtor_vec)(void *, size_t);
/*!
Array constructor.
First argument is the <tt>this</tt> pointer
Third argument the number of array elements.
*/
typedef void *(*kmpc_cctor_vec)(void *, void *,
size_t); /* function unused by compiler */
/*!
@}
*/
/* keeps tracked of threadprivate cache allocations for cleanup later */
typedef struct kmp_cached_addr {
void **addr; /* address of allocated cache */
void ***compiler_cache; /* pointer to compiler's cache */
void *data; /* pointer to global data */
struct kmp_cached_addr *next; /* pointer to next cached address */
} kmp_cached_addr_t;
struct private_data {
struct private_data *next; /* The next descriptor in the list */
void *data; /* The data buffer for this descriptor */
int more; /* The repeat count for this descriptor */
size_t size; /* The data size for this descriptor */
};
struct private_common {
struct private_common *next;
struct private_common *link;
void *gbl_addr;
void *par_addr; /* par_addr == gbl_addr for MASTER thread */
size_t cmn_size;
};
struct shared_common {
struct shared_common *next;
struct private_data *pod_init;
void *obj_init;
void *gbl_addr;
union {
kmpc_ctor ctor;
kmpc_ctor_vec ctorv;
} ct;
union {
kmpc_cctor cctor;
kmpc_cctor_vec cctorv;
} cct;
union {
kmpc_dtor dtor;
kmpc_dtor_vec dtorv;
} dt;
size_t vec_len;
int is_vec;
size_t cmn_size;
};
#define KMP_HASH_TABLE_LOG2 9 /* log2 of the hash table size */
#define KMP_HASH_TABLE_SIZE \
(1 << KMP_HASH_TABLE_LOG2) /* size of the hash table */
#define KMP_HASH_SHIFT 3 /* throw away this many low bits from the address */
#define KMP_HASH(x) \
((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
struct common_table {
struct private_common *data[KMP_HASH_TABLE_SIZE];
};
struct shared_table {
struct shared_common *data[KMP_HASH_TABLE_SIZE];
};
/* ------------------------------------------------------------------------ */
#if KMP_USE_HIER_SCHED
// Shared barrier data that exists inside a single unit of the scheduling
// hierarchy
typedef struct kmp_hier_private_bdata_t {
kmp_int32 num_active;
kmp_uint64 index;
kmp_uint64 wait_val[2];
} kmp_hier_private_bdata_t;
#endif
typedef struct kmp_sched_flags {
unsigned ordered : 1;
unsigned nomerge : 1;
unsigned contains_last : 1;
#if KMP_USE_HIER_SCHED
unsigned use_hier : 1;
unsigned unused : 28;
#else
unsigned unused : 29;
#endif
} kmp_sched_flags_t;
KMP_BUILD_ASSERT(sizeof(kmp_sched_flags_t) == 4);
#if KMP_STATIC_STEAL_ENABLED
typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
kmp_int32 count;
kmp_int32 ub;
/* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */
kmp_int32 lb;
kmp_int32 st;
kmp_int32 tc;
kmp_int32 static_steal_counter; /* for static_steal only; maybe better to put
after ub */
// KMP_ALIGN( 16 ) ensures ( if the KMP_ALIGN macro is turned on )
// a) parm3 is properly aligned and
// b) all parm1-4 are in the same cache line.
// Because of parm1-4 are used together, performance seems to be better
// if they are in the same line (not measured though).
struct KMP_ALIGN(32) { // AC: changed 16 to 32 in order to simplify template
kmp_int32 parm1; // structures in kmp_dispatch.cpp. This should
kmp_int32 parm2; // make no real change at least while padding is off.
kmp_int32 parm3;
kmp_int32 parm4;
};
kmp_uint32 ordered_lower;
kmp_uint32 ordered_upper;
#if KMP_OS_WINDOWS
// This var can be placed in the hole between 'tc' and 'parm1', instead of
// 'static_steal_counter'. It would be nice to measure execution times.
// Conditional if/endif can be removed at all.
kmp_int32 last_upper;
#endif /* KMP_OS_WINDOWS */
} dispatch_private_info32_t;
typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
kmp_int64 count; // current chunk number for static & static-steal scheduling
kmp_int64 ub; /* upper-bound */
/* Adding KMP_ALIGN_CACHE here doesn't help / can hurt performance */
kmp_int64 lb; /* lower-bound */
kmp_int64 st; /* stride */
kmp_int64 tc; /* trip count (number of iterations) */
kmp_int64 static_steal_counter; /* for static_steal only; maybe better to put
after ub */
/* parm[1-4] are used in different ways by different scheduling algorithms */
// KMP_ALIGN( 32 ) ensures ( if the KMP_ALIGN macro is turned on )
// a) parm3 is properly aligned and
// b) all parm1-4 are in the same cache line.
// Because of parm1-4 are used together, performance seems to be better
// if they are in the same line (not measured though).
struct KMP_ALIGN(32) {
kmp_int64 parm1;
kmp_int64 parm2;
kmp_int64 parm3;
kmp_int64 parm4;
};
kmp_uint64 ordered_lower;
kmp_uint64 ordered_upper;
#if KMP_OS_WINDOWS
// This var can be placed in the hole between 'tc' and 'parm1', instead of
// 'static_steal_counter'. It would be nice to measure execution times.
// Conditional if/endif can be removed at all.
kmp_int64 last_upper;
#endif /* KMP_OS_WINDOWS */
} dispatch_private_info64_t;
#else /* KMP_STATIC_STEAL_ENABLED */
typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
kmp_int32 lb;
kmp_int32 ub;
kmp_int32 st;
kmp_int32 tc;
kmp_int32 parm1;
kmp_int32 parm2;
kmp_int32 parm3;
kmp_int32 parm4;
kmp_int32 count;
kmp_uint32 ordered_lower;
kmp_uint32 ordered_upper;
#if KMP_OS_WINDOWS
kmp_int32 last_upper;
#endif /* KMP_OS_WINDOWS */
} dispatch_private_info32_t;
typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
kmp_int64 lb; /* lower-bound */
kmp_int64 ub; /* upper-bound */
kmp_int64 st; /* stride */
kmp_int64 tc; /* trip count (number of iterations) */
/* parm[1-4] are used in different ways by different scheduling algorithms */
kmp_int64 parm1;
kmp_int64 parm2;
kmp_int64 parm3;
kmp_int64 parm4;
kmp_int64 count; /* current chunk number for static scheduling */
kmp_uint64 ordered_lower;
kmp_uint64 ordered_upper;
#if KMP_OS_WINDOWS
kmp_int64 last_upper;
#endif /* KMP_OS_WINDOWS */
} dispatch_private_info64_t;
#endif /* KMP_STATIC_STEAL_ENABLED */
typedef struct KMP_ALIGN_CACHE dispatch_private_info {
union private_info {
dispatch_private_info32_t p32;
dispatch_private_info64_t p64;
} u;
enum sched_type schedule; /* scheduling algorithm */
kmp_sched_flags_t flags; /* flags (e.g., ordered, nomerge, etc.) */
kmp_int32 ordered_bumped;
// To retain the structure size after making ordered_iteration scalar
kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 3];
// Stack of buffers for nest of serial regions
struct dispatch_private_info *next;
kmp_int32 type_size; /* the size of types in private_info */
#if KMP_USE_HIER_SCHED
kmp_int32 hier_id;
void *parent; /* hierarchical scheduling parent pointer */
#endif
enum cons_type pushed_ws;
} dispatch_private_info_t;
typedef struct dispatch_shared_info32 {
/* chunk index under dynamic, number of idle threads under static-steal;
iteration index otherwise */
volatile kmp_uint32 iteration;
volatile kmp_uint32 num_done;
volatile kmp_uint32 ordered_iteration;
// Dummy to retain the structure size after making ordered_iteration scalar
kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
} dispatch_shared_info32_t;
typedef struct dispatch_shared_info64 {
/* chunk index under dynamic, number of idle threads under static-steal;
iteration index otherwise */
volatile kmp_uint64 iteration;
volatile kmp_uint64 num_done;
volatile kmp_uint64 ordered_iteration;
// Dummy to retain the structure size after making ordered_iteration scalar
kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
} dispatch_shared_info64_t;
typedef struct dispatch_shared_info {
union shared_info {
dispatch_shared_info32_t s32;
dispatch_shared_info64_t s64;
} u;
volatile kmp_uint32 buffer_index;
#if OMP_45_ENABLED
volatile kmp_int32 doacross_buf_idx; // teamwise index
volatile kmp_uint32 *doacross_flags; // shared array of iteration flags (0/1)
kmp_int32 doacross_num_done; // count finished threads
#endif
#if KMP_USE_HIER_SCHED
void *hier;
#endif
#if KMP_USE_HWLOC
// When linking with libhwloc, the ORDERED EPCC test slows down on big
// machines (> 48 cores). Performance analysis showed that a cache thrash
// was occurring and this padding helps alleviate the problem.
char padding[64];
#endif
} dispatch_shared_info_t;
typedef struct kmp_disp {
/* Vector for ORDERED SECTION */
void (*th_deo_fcn)(int *gtid, int *cid, ident_t *);
/* Vector for END ORDERED SECTION */
void (*th_dxo_fcn)(int *gtid, int *cid, ident_t *);
dispatch_shared_info_t *th_dispatch_sh_current;
dispatch_private_info_t *th_dispatch_pr_current;
dispatch_private_info_t *th_disp_buffer;
kmp_int32 th_disp_index;
#if OMP_45_ENABLED
kmp_int32 th_doacross_buf_idx; // thread's doacross buffer index
volatile kmp_uint32 *th_doacross_flags; // pointer to shared array of flags
union { // we can use union here because doacross cannot be used in
// nonmonotonic loops
kmp_int64 *th_doacross_info; // info on loop bounds
kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable)
};
#else
#if KMP_STATIC_STEAL_ENABLED
kmp_lock_t *th_steal_lock; // lock used for chunk stealing (8-byte variable)
void *dummy_padding[1]; // make it 64 bytes on Intel(R) 64
#else
void *dummy_padding[2]; // make it 64 bytes on Intel(R) 64
#endif
#endif
#if KMP_USE_INTERNODE_ALIGNMENT
char more_padding[INTERNODE_CACHE_LINE];
#endif
} kmp_disp_t;
/* ------------------------------------------------------------------------ */
/* Barrier stuff */
/* constants for barrier state update */
#define KMP_INIT_BARRIER_STATE 0 /* should probably start from zero */
#define KMP_BARRIER_SLEEP_BIT 0 /* bit used for suspend/sleep part of state */
#define KMP_BARRIER_UNUSED_BIT 1 // bit that must never be set for valid state
#define KMP_BARRIER_BUMP_BIT 2 /* lsb used for bump of go/arrived state */
#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
#error "Barrier sleep bit must be smaller than barrier bump bit"
#endif
#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
#error "Barrier unused bit must be smaller than barrier bump bit"
#endif
// Constants for release barrier wait state: currently, hierarchical only
#define KMP_BARRIER_NOT_WAITING 0 // Normal state; worker not in wait_sleep
#define KMP_BARRIER_OWN_FLAG \
1 // Normal state; worker waiting on own b_go flag in release
#define KMP_BARRIER_PARENT_FLAG \
2 // Special state; worker waiting on parent's b_go flag in release
#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
3 // Special state; tells worker to shift from parent to own b_go
#define KMP_BARRIER_SWITCHING \
4 // Special state; worker resets appropriate flag on wake-up
#define KMP_NOT_SAFE_TO_REAP \
0 // Thread th_reap_state: not safe to reap (tasking)
#define KMP_SAFE_TO_REAP 1 // Thread th_reap_state: safe to reap (not tasking)
enum barrier_type {
bs_plain_barrier = 0, /* 0, All non-fork/join barriers (except reduction
barriers if enabled) */
bs_forkjoin_barrier, /* 1, All fork/join (parallel region) barriers */
#if KMP_FAST_REDUCTION_BARRIER
bs_reduction_barrier, /* 2, All barriers that are used in reduction */
#endif // KMP_FAST_REDUCTION_BARRIER
bs_last_barrier /* Just a placeholder to mark the end */
};
// to work with reduction barriers just like with plain barriers
#if !KMP_FAST_REDUCTION_BARRIER
#define bs_reduction_barrier bs_plain_barrier
#endif // KMP_FAST_REDUCTION_BARRIER
typedef enum kmp_bar_pat { /* Barrier communication patterns */
bp_linear_bar =
0, /* Single level (degenerate) tree */
bp_tree_bar =
1, /* Balanced tree with branching factor 2^n */
bp_hyper_bar =
2, /* Hypercube-embedded tree with min branching
factor 2^n */
bp_hierarchical_bar = 3, /* Machine hierarchy tree */
bp_last_bar /* Placeholder to mark the end */
} kmp_bar_pat_e;
#define KMP_BARRIER_ICV_PUSH 1
/* Record for holding the values of the internal controls stack records */
typedef struct kmp_internal_control {
int serial_nesting_level; /* corresponds to the value of the
th_team_serialized field */
kmp_int8 nested; /* internal control for nested parallelism (per thread) */
kmp_int8 dynamic; /* internal control for dynamic adjustment of threads (per
thread) */
kmp_int8
bt_set; /* internal control for whether blocktime is explicitly set */
int blocktime; /* internal control for blocktime */
#if KMP_USE_MONITOR
int bt_intervals; /* internal control for blocktime intervals */
#endif
int nproc; /* internal control for #threads for next parallel region (per
thread) */
int thread_limit; /* internal control for thread-limit-var */
int max_active_levels; /* internal control for max_active_levels */
kmp_r_sched_t
sched; /* internal control for runtime schedule {sched,chunk} pair */
#if OMP_40_ENABLED
kmp_proc_bind_t proc_bind; /* internal control for affinity */
kmp_int32 default_device; /* internal control for default device */
#endif // OMP_40_ENABLED
struct kmp_internal_control *next;
} kmp_internal_control_t;
static inline void copy_icvs(kmp_internal_control_t *dst,
kmp_internal_control_t *src) {
*dst = *src;
}
/* Thread barrier needs volatile barrier fields */
typedef struct KMP_ALIGN_CACHE kmp_bstate {
// th_fixed_icvs is aligned by virtue of kmp_bstate being aligned (and all
// uses of it). It is not explicitly aligned below, because we *don't* want
// it to be padded -- instead, we fit b_go into the same cache line with
// th_fixed_icvs, enabling NGO cache lines stores in the hierarchical barrier.
kmp_internal_control_t th_fixed_icvs; // Initial ICVs for the thread
// Tuck b_go into end of th_fixed_icvs cache line, so it can be stored with
// same NGO store
volatile kmp_uint64 b_go; // STATE => task should proceed (hierarchical)
KMP_ALIGN_CACHE volatile kmp_uint64
b_arrived; // STATE => task reached synch point.
kmp_uint32 *skip_per_level;
kmp_uint32 my_level;
kmp_int32 parent_tid;
kmp_int32 old_tid;
kmp_uint32 depth;
struct kmp_bstate *parent_bar;
kmp_team_t *team;
kmp_uint64 leaf_state;
kmp_uint32 nproc;
kmp_uint8 base_leaf_kids;
kmp_uint8 leaf_kids;
kmp_uint8 offset;
kmp_uint8 wait_flag;
kmp_uint8 use_oncore_barrier;
#if USE_DEBUGGER
// The following field is intended for the debugger solely. Only the worker
// thread itself accesses this field: the worker increases it by 1 when it
// arrives to a barrier.
KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
#endif /* USE_DEBUGGER */
} kmp_bstate_t;
union KMP_ALIGN_CACHE kmp_barrier_union {
double b_align; /* use worst case alignment */
char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
kmp_bstate_t bb;
};
typedef union kmp_barrier_union kmp_balign_t;
/* Team barrier needs only non-volatile arrived counter */
union KMP_ALIGN_CACHE kmp_barrier_team_union {
double b_align; /* use worst case alignment */
char b_pad[CACHE_LINE];
struct {
kmp_uint64 b_arrived; /* STATE => task reached synch point. */
#if USE_DEBUGGER
// The following two fields are indended for the debugger solely. Only
// master of the team accesses these fields: the first one is increased by
// 1 when master arrives to a barrier, the second one is increased by one
// when all the threads arrived.
kmp_uint b_master_arrived;
kmp_uint b_team_arrived;
#endif
};
};
typedef union kmp_barrier_team_union kmp_balign_team_t;
/* Padding for Linux* OS pthreads condition variables and mutexes used to signal
threads when a condition changes. This is to workaround an NPTL bug where
padding was added to pthread_cond_t which caused the initialization routine
to write outside of the structure if compiled on pre-NPTL threads. */
#if KMP_OS_WINDOWS
typedef struct kmp_win32_mutex {
/* The Lock */
CRITICAL_SECTION cs;
} kmp_win32_mutex_t;
typedef struct kmp_win32_cond {
/* Count of the number of waiters. */
int waiters_count_;
/* Serialize access to <waiters_count_> */
kmp_win32_mutex_t waiters_count_lock_;
/* Number of threads to release via a <cond_broadcast> or a <cond_signal> */
int release_count_;
/* Keeps track of the current "generation" so that we don't allow */
/* one thread to steal all the "releases" from the broadcast. */
int wait_generation_count_;
/* A manual-reset event that's used to block and release waiting threads. */
HANDLE event_;
} kmp_win32_cond_t;
#endif
#if KMP_OS_UNIX
union KMP_ALIGN_CACHE kmp_cond_union {
double c_align;
char c_pad[CACHE_LINE];
pthread_cond_t c_cond;
};
typedef union kmp_cond_union kmp_cond_align_t;
union KMP_ALIGN_CACHE kmp_mutex_union {
double m_align;
char m_pad[CACHE_LINE];
pthread_mutex_t m_mutex;
};
typedef union kmp_mutex_union kmp_mutex_align_t;
#endif /* KMP_OS_UNIX */
typedef struct kmp_desc_base {
void *ds_stackbase;
size_t ds_stacksize;
int ds_stackgrow;
kmp_thread_t ds_thread;
volatile int ds_tid;
int ds_gtid;
#if KMP_OS_WINDOWS
volatile int ds_alive;
DWORD ds_thread_id;
/* ds_thread keeps thread handle on Windows* OS. It is enough for RTL purposes.
However, debugger support (libomp_db) cannot work with handles, because they
uncomparable. For example, debugger requests info about thread with handle h.
h is valid within debugger process, and meaningless within debugee process.
Even if h is duped by call to DuplicateHandle(), so the result h' is valid
within debugee process, but it is a *new* handle which does *not* equal to
any other handle in debugee... The only way to compare handles is convert
them to system-wide ids. GetThreadId() function is available only in
Longhorn and Server 2003. :-( In contrast, GetCurrentThreadId() is available
on all Windows* OS flavours (including Windows* 95). Thus, we have to get
thread id by call to GetCurrentThreadId() from within the thread and save it
to let libomp_db identify threads. */
#endif /* KMP_OS_WINDOWS */
} kmp_desc_base_t;
typedef union KMP_ALIGN_CACHE kmp_desc {
double ds_align; /* use worst case alignment */
char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
kmp_desc_base_t ds;
} kmp_desc_t;
typedef struct kmp_local {
volatile int this_construct; /* count of single's encountered by thread */
void *reduce_data;
#if KMP_USE_BGET
void *bget_data;
void *bget_list;
#if !USE_CMP_XCHG_FOR_BGET
#ifdef USE_QUEUING_LOCK_FOR_BGET
kmp_lock_t bget_lock; /* Lock for accessing bget free list */
#else
kmp_bootstrap_lock_t bget_lock; // Lock for accessing bget free list. Must be
// bootstrap lock so we can use it at library
// shutdown.
#endif /* USE_LOCK_FOR_BGET */
#endif /* ! USE_CMP_XCHG_FOR_BGET */
#endif /* KMP_USE_BGET */
PACKED_REDUCTION_METHOD_T
packed_reduction_method; /* stored by __kmpc_reduce*(), used by
__kmpc_end_reduce*() */
} kmp_local_t;
#define KMP_CHECK_UPDATE(a, b) \
if ((a) != (b)) \
(a) = (b)
#define KMP_CHECK_UPDATE_SYNC(a, b) \
if ((a) != (b)) \
TCW_SYNC_PTR((a), (b))
#define get__blocktime(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
#define get__bt_set(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
#if KMP_USE_MONITOR
#define get__bt_intervals(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
#endif
#define get__nested_2(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nested)
#define get__dynamic_2(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
#define get__nproc_2(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
#define get__sched_2(xteam, xtid) \
((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
#define set__blocktime_team(xteam, xtid, xval) \
(((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
(xval))
#if KMP_USE_MONITOR
#define set__bt_intervals_team(xteam, xtid, xval) \
(((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
(xval))
#endif
#define set__bt_set_team(xteam, xtid, xval) \
(((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
#define set__nested(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.nested) = (xval))
#define get__nested(xthread) \
(((xthread)->th.th_current_task->td_icvs.nested) ? (FTN_TRUE) : (FTN_FALSE))
#define set__dynamic(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
#define get__dynamic(xthread) \
(((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
#define set__nproc(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
#define set__thread_limit(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
#define set__max_active_levels(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
#define set__sched(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.sched) = (xval))
#if OMP_40_ENABLED
#define set__proc_bind(xthread, xval) \
(((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
#define get__proc_bind(xthread) \
((xthread)->th.th_current_task->td_icvs.proc_bind)
#endif /* OMP_40_ENABLED */
// OpenMP tasking data structures
typedef enum kmp_tasking_mode {
tskm_immediate_exec = 0,
tskm_extra_barrier = 1,
tskm_task_teams = 2,
tskm_max = 2
} kmp_tasking_mode_t;
extern kmp_tasking_mode_t
__kmp_tasking_mode; /* determines how/when to execute tasks */
extern int __kmp_task_stealing_constraint;
#if OMP_40_ENABLED
extern kmp_int32 __kmp_default_device; // Set via OMP_DEFAULT_DEVICE if
// specified, defaults to 0 otherwise
#endif
#if OMP_45_ENABLED
// Set via OMP_MAX_TASK_PRIORITY if specified, defaults to 0 otherwise
extern kmp_int32 __kmp_max_task_priority;
// Set via KMP_TASKLOOP_MIN_TASKS if specified, defaults to 0 otherwise
extern kmp_uint64 __kmp_taskloop_min_tasks;
#endif
/* NOTE: kmp_taskdata_t and kmp_task_t structures allocated in single block with
taskdata first */
#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
// The tt_found_tasks flag is a signal to all threads in the team that tasks
// were spawned and queued since the previous barrier release.
#define KMP_TASKING_ENABLED(task_team) \
(TCR_SYNC_4((task_team)->tt.tt_found_tasks) == TRUE)
/*!
@ingroup BASIC_TYPES
@{
*/
/*!
*/
typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32, void *);
#if OMP_40_ENABLED || OMP_45_ENABLED
typedef union kmp_cmplrdata {
#if OMP_45_ENABLED
kmp_int32 priority; /**< priority specified by user for the task */
#endif // OMP_45_ENABLED
#if OMP_40_ENABLED
kmp_routine_entry_t
destructors; /* pointer to function to invoke deconstructors of
firstprivate C++ objects */
#endif // OMP_40_ENABLED
/* future data */
} kmp_cmplrdata_t;
#endif
/* sizeof_kmp_task_t passed as arg to kmpc_omp_task call */
/*!
*/
typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */
void *shareds; /**< pointer to block of pointers to shared vars */
kmp_routine_entry_t
routine; /**< pointer to routine to call for executing task */
kmp_int32 part_id; /**< part id for the task */
#if OMP_40_ENABLED || OMP_45_ENABLED
kmp_cmplrdata_t
data1; /* Two known optional additions: destructors and priority */
kmp_cmplrdata_t data2; /* Process destructors first, priority second */
/* future data */
#endif
/* private vars */
} kmp_task_t;
/*!
@}
*/
#if OMP_40_ENABLED
typedef struct kmp_taskgroup {
std::atomic<kmp_int32> count; // number of allocated and incomplete tasks
std::atomic<kmp_int32>
cancel_request; // request for cancellation of this taskgroup
struct kmp_taskgroup *parent; // parent taskgroup
#if OMP_50_ENABLED
// Block of data to perform task reduction
void *reduce_data; // reduction related info
kmp_int32 reduce_num_data; // number of data items to reduce
#endif
} kmp_taskgroup_t;
// forward declarations
typedef union kmp_depnode kmp_depnode_t;
typedef struct kmp_depnode_list kmp_depnode_list_t;
typedef struct kmp_dephash_entry kmp_dephash_entry_t;
// Compiler sends us this info:
typedef struct kmp_depend_info {
kmp_intptr_t base_addr;
size_t len;
struct {
bool in : 1;
bool out : 1;
bool mtx : 1;
} flags;
} kmp_depend_info_t;
// Internal structures to work with task dependencies:
struct kmp_depnode_list {
kmp_depnode_t *node;
kmp_depnode_list_t *next;
};
// Max number of mutexinoutset dependencies per node
#define MAX_MTX_DEPS 4
typedef struct kmp_base_depnode {
kmp_depnode_list_t *successors; /* used under lock */
kmp_task_t *task; /* non-NULL if depnode is active, used under lock */
kmp_lock_t *mtx_locks[MAX_MTX_DEPS]; /* lock mutexinoutset dependent tasks */
kmp_int32 mtx_num_locks; /* number of locks in mtx_locks array */
kmp_lock_t lock; /* guards shared fields: task, successors */
#if KMP_SUPPORT_GRAPH_OUTPUT
kmp_uint32 id;
#endif
std::atomic<kmp_int32> npredecessors;
std::atomic<kmp_int32> nrefs;
} kmp_base_depnode_t;
union KMP_ALIGN_CACHE kmp_depnode {
double dn_align; /* use worst case alignment */
char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
kmp_base_depnode_t dn;
};
struct kmp_dephash_entry {
kmp_intptr_t addr;
kmp_depnode_t *last_out;
kmp_depnode_list_t *last_ins;
kmp_depnode_list_t *last_mtxs;
kmp_int32 last_flag;
kmp_lock_t *mtx_lock; /* is referenced by depnodes w/mutexinoutset dep */
kmp_dephash_entry_t *next_in_bucket;
};
typedef struct kmp_dephash {
kmp_dephash_entry_t **buckets;
size_t size;
#ifdef KMP_DEBUG
kmp_uint32 nelements;
kmp_uint32 nconflicts;
#endif
} kmp_dephash_t;
#if OMP_50_ENABLED
typedef struct kmp_task_affinity_info {
kmp_intptr_t base_addr;
size_t len;
struct {
bool flag1 : 1;
bool flag2 : 1;
kmp_int32 reserved : 30;
} flags;
} kmp_task_affinity_info_t;
#endif
#endif
#ifdef BUILD_TIED_TASK_STACK
/* Tied Task stack definitions */
typedef struct kmp_stack_block {
kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
struct kmp_stack_block *sb_next;
struct kmp_stack_block *sb_prev;
} kmp_stack_block_t;
typedef struct kmp_task_stack {
kmp_stack_block_t ts_first_block; // first block of stack entries
kmp_taskdata_t **ts_top; // pointer to the top of stack
kmp_int32 ts_entries; // number of entries on the stack
} kmp_task_stack_t;
#endif // BUILD_TIED_TASK_STACK
typedef struct kmp_tasking_flags { /* Total struct must be exactly 32 bits */
/* Compiler flags */ /* Total compiler flags must be 16 bits */
unsigned tiedness : 1; /* task is either tied (1) or untied (0) */
unsigned final : 1; /* task is final(1) so execute immediately */
unsigned merged_if0 : 1; /* no __kmpc_task_{begin/complete}_if0 calls in if0
code path */
#if OMP_40_ENABLED
unsigned destructors_thunk : 1; /* set if the compiler creates a thunk to
invoke destructors from the runtime */
#if OMP_45_ENABLED
unsigned proxy : 1; /* task is a proxy task (it will be executed outside the
context of the RTL) */
unsigned priority_specified : 1; /* set if the compiler provides priority
setting for the task */
unsigned reserved : 10; /* reserved for compiler use */
#else
unsigned reserved : 12; /* reserved for compiler use */
#endif
#else // OMP_40_ENABLED
unsigned reserved : 13; /* reserved for compiler use */
#endif // OMP_40_ENABLED
/* Library flags */ /* Total library flags must be 16 bits */
unsigned tasktype : 1; /* task is either explicit(1) or implicit (0) */
unsigned task_serial : 1; // task is executed immediately (1) or deferred (0)
unsigned tasking_ser : 1; // all tasks in team are either executed immediately
// (1) or may be deferred (0)
unsigned team_serial : 1; // entire team is serial (1) [1 thread] or parallel
// (0) [>= 2 threads]
/* If either team_serial or tasking_ser is set, task team may be NULL */
/* Task State Flags: */
unsigned started : 1; /* 1==started, 0==not started */
unsigned executing : 1; /* 1==executing, 0==not executing */
unsigned complete : 1; /* 1==complete, 0==not complete */
unsigned freed : 1; /* 1==freed, 0==allocateed */
unsigned native : 1; /* 1==gcc-compiled task, 0==intel */
unsigned reserved31 : 7; /* reserved for library use */
} kmp_tasking_flags_t;
struct kmp_taskdata { /* aligned during dynamic allocation */
kmp_int32 td_task_id; /* id, assigned by debugger */
kmp_tasking_flags_t td_flags; /* task flags */
kmp_team_t *td_team; /* team for this task */
kmp_info_p *td_alloc_thread; /* thread that allocated data structures */
/* Currently not used except for perhaps IDB */
kmp_taskdata_t *td_parent; /* parent task */
kmp_int32 td_level; /* task nesting level */
std::atomic<kmp_int32> td_untied_count; // untied task active parts counter
ident_t *td_ident; /* task identifier */
// Taskwait data.
ident_t *td_taskwait_ident;
kmp_uint32 td_taskwait_counter;
kmp_int32 td_taskwait_thread; /* gtid + 1 of thread encountered taskwait */
KMP_ALIGN_CACHE kmp_internal_control_t
td_icvs; /* Internal control variables for the task */
KMP_ALIGN_CACHE std::atomic<kmp_int32>
td_allocated_child_tasks; /* Child tasks (+ current task) not yet
deallocated */
std::atomic<kmp_int32>
td_incomplete_child_tasks; /* Child tasks not yet complete */
#if OMP_40_ENABLED
kmp_taskgroup_t
*td_taskgroup; // Each task keeps pointer to its current taskgroup
kmp_dephash_t
*td_dephash; // Dependencies for children tasks are tracked from here
kmp_depnode_t
*td_depnode; // Pointer to graph node if this task has dependencies
#endif // OMP_40_ENABLED
#if OMP_45_ENABLED
kmp_task_team_t *td_task_team;
kmp_int32 td_size_alloc; // The size of task structure, including shareds etc.
#if defined(KMP_GOMP_COMPAT)
// 4 or 8 byte integers for the loop bounds in GOMP_taskloop
kmp_int32 td_size_loop_bounds;
#endif
#endif // OMP_45_ENABLED
kmp_taskdata_t *td_last_tied; // keep tied task for task scheduling constraint
#if defined(KMP_GOMP_COMPAT) && OMP_45_ENABLED
// GOMP sends in a copy function for copy constructors
void (*td_copy_func)(void *, void *);
#endif
#if OMPT_SUPPORT
ompt_task_info_t ompt_task_info;
#endif
}; // struct kmp_taskdata
// Make sure padding above worked
KMP_BUILD_ASSERT(sizeof(kmp_taskdata_t) % sizeof(void *) == 0);
// Data for task team but per thread
typedef struct kmp_base_thread_data {
kmp_info_p *td_thr; // Pointer back to thread info
// Used only in __kmp_execute_tasks_template, maybe not avail until task is
// queued?
kmp_bootstrap_lock_t td_deque_lock; // Lock for accessing deque
kmp_taskdata_t *
*td_deque; // Deque of tasks encountered by td_thr, dynamically allocated
kmp_int32 td_deque_size; // Size of deck
kmp_uint32 td_deque_head; // Head of deque (will wrap)
kmp_uint32 td_deque_tail; // Tail of deque (will wrap)
kmp_int32 td_deque_ntasks; // Number of tasks in deque
// GEH: shouldn't this be volatile since used in while-spin?
kmp_int32 td_deque_last_stolen; // Thread number of last successful steal
#ifdef BUILD_TIED_TASK_STACK
kmp_task_stack_t td_susp_tied_tasks; // Stack of suspended tied tasks for task
// scheduling constraint
#endif // BUILD_TIED_TASK_STACK
} kmp_base_thread_data_t;
#define TASK_DEQUE_BITS 8 // Used solely to define INITIAL_TASK_DEQUE_SIZE
#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
typedef union KMP_ALIGN_CACHE kmp_thread_data {
kmp_base_thread_data_t td;
double td_align; /* use worst case alignment */
char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
} kmp_thread_data_t;
// Data for task teams which are used when tasking is enabled for the team
typedef struct kmp_base_task_team {
kmp_bootstrap_lock_t
tt_threads_lock; /* Lock used to allocate per-thread part of task team */
/* must be bootstrap lock since used at library shutdown*/
kmp_task_team_t *tt_next; /* For linking the task team free list */
kmp_thread_data_t
*tt_threads_data; /* Array of per-thread structures for task team */
/* Data survives task team deallocation */
kmp_int32 tt_found_tasks; /* Have we found tasks and queued them while
executing this team? */
/* TRUE means tt_threads_data is set up and initialized */
kmp_int32 tt_nproc; /* #threads in team */
kmp_int32
tt_max_threads; /* number of entries allocated for threads_data array */
#if OMP_45_ENABLED
kmp_int32
tt_found_proxy_tasks; /* Have we found proxy tasks since last barrier */
#endif
kmp_int32 tt_untied_task_encountered;
KMP_ALIGN_CACHE
std::atomic<kmp_int32> tt_unfinished_threads; /* #threads still active */
KMP_ALIGN_CACHE
volatile kmp_uint32
tt_active; /* is the team still actively executing tasks */
} kmp_base_task_team_t;
union KMP_ALIGN_CACHE kmp_task_team {
kmp_base_task_team_t tt;
double tt_align; /* use worst case alignment */
char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
};
#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
// Free lists keep same-size free memory slots for fast memory allocation
// routines
typedef struct kmp_free_list {
void *th_free_list_self; // Self-allocated tasks free list
void *th_free_list_sync; // Self-allocated tasks stolen/returned by other
// threads
void *th_free_list_other; // Non-self free list (to be returned to owner's
// sync list)
} kmp_free_list_t;
#endif
#if KMP_NESTED_HOT_TEAMS
// Hot teams array keeps hot teams and their sizes for given thread. Hot teams
// are not put in teams pool, and they don't put threads in threads pool.
typedef struct kmp_hot_team_ptr {
kmp_team_p *hot_team; // pointer to hot_team of given nesting level
kmp_int32 hot_team_nth; // number of threads allocated for the hot_team
} kmp_hot_team_ptr_t;
#endif
#if OMP_40_ENABLED
typedef struct kmp_teams_size {
kmp_int32 nteams; // number of teams in a league
kmp_int32 nth; // number of threads in each team of the league
} kmp_teams_size_t;
#endif
// This struct stores a thread that acts as a "root" for a contention
// group. Contention groups are rooted at kmp_root threads, but also at
// each master thread of each team created in the teams construct.
// This struct therefore also stores a thread_limit associated with
// that contention group, and a counter to track the number of threads
// active in that contention group. Each thread has a list of these: CG
// root threads have an entry in their list in which cg_root refers to
// the thread itself, whereas other workers in the CG will have a
// single entry where cg_root is same as the entry containing their CG
// root. When a thread encounters a teams construct, it will add a new
// entry to the front of its list, because it now roots a new CG.
typedef struct kmp_cg_root {
kmp_info_p *cg_root; // "root" thread for a contention group
// The CG root's limit comes from OMP_THREAD_LIMIT for root threads, or
// thread_limit clause for teams masters
kmp_int32 cg_thread_limit;
kmp_int32 cg_nthreads; // Count of active threads in CG rooted at cg_root
struct kmp_cg_root *up; // pointer to higher level CG root in list
} kmp_cg_root_t;
// OpenMP thread data structures
typedef struct KMP_ALIGN_CACHE kmp_base_info {
/* Start with the readonly data which is cache aligned and padded. This is
written before the thread starts working by the master. Uber masters may
update themselves later. Usage does not consider serialized regions. */
kmp_desc_t th_info;
kmp_team_p *th_team; /* team we belong to */
kmp_root_p *th_root; /* pointer to root of task hierarchy */
kmp_info_p *th_next_pool; /* next available thread in the pool */
kmp_disp_t *th_dispatch; /* thread's dispatch data */
int th_in_pool; /* in thread pool (32 bits for TCR/TCW) */
/* The following are cached from the team info structure */
/* TODO use these in more places as determined to be needed via profiling */
int th_team_nproc; /* number of threads in a team */
kmp_info_p *th_team_master; /* the team's master thread */
int th_team_serialized; /* team is serialized */
#if OMP_40_ENABLED
microtask_t th_teams_microtask; /* save entry address for teams construct */
int th_teams_level; /* save initial level of teams construct */
/* it is 0 on device but may be any on host */
#endif
/* The blocktime info is copied from the team struct to the thread sruct */
/* at the start of a barrier, and the values stored in the team are used */
/* at points in the code where the team struct is no longer guaranteed */
/* to exist (from the POV of worker threads). */
#if KMP_USE_MONITOR
int th_team_bt_intervals;
int th_team_bt_set;
#else
kmp_uint64 th_team_bt_intervals;
#endif
#if KMP_AFFINITY_SUPPORTED
kmp_affin_mask_t *th_affin_mask; /* thread's current affinity mask */
#endif
#if OMP_50_ENABLED
void *const *th_def_allocator; /* per implicit task default allocator */
#endif
/* The data set by the master at reinit, then R/W by the worker */
KMP_ALIGN_CACHE int
th_set_nproc; /* if > 0, then only use this request for the next fork */
#if KMP_NESTED_HOT_TEAMS
kmp_hot_team_ptr_t *th_hot_teams; /* array of hot teams */
#endif
#if OMP_40_ENABLED
kmp_proc_bind_t
th_set_proc_bind; /* if != proc_bind_default, use request for next fork */
kmp_teams_size_t
th_teams_size; /* number of teams/threads in teams construct */
#if KMP_AFFINITY_SUPPORTED
int th_current_place; /* place currently bound to */
int th_new_place; /* place to bind to in par reg */
int th_first_place; /* first place in partition */
int th_last_place; /* last place in partition */
#endif
#endif
#if OMP_50_ENABLED
int th_prev_level; /* previous level for affinity format */
int th_prev_num_threads; /* previous num_threads for affinity format */
#endif
#if USE_ITT_BUILD
kmp_uint64 th_bar_arrive_time; /* arrival to barrier timestamp */
kmp_uint64 th_bar_min_time; /* minimum arrival time at the barrier */
kmp_uint64 th_frame_time; /* frame timestamp */
#endif /* USE_ITT_BUILD */
kmp_local_t th_local;
struct private_common *th_pri_head;
/* Now the data only used by the worker (after initial allocation) */
/* TODO the first serial team should actually be stored in the info_t
structure. this will help reduce initial allocation overhead */
KMP_ALIGN_CACHE kmp_team_p
*th_serial_team; /*serialized team held in reserve*/
#if OMPT_SUPPORT
ompt_thread_info_t ompt_thread_info;
#endif
/* The following are also read by the master during reinit */
struct common_table *th_pri_common;
volatile kmp_uint32 th_spin_here; /* thread-local location for spinning */
/* while awaiting queuing lock acquire */
volatile void *th_sleep_loc; // this points at a kmp_flag<T>
ident_t *th_ident;
unsigned th_x; // Random number generator data
unsigned th_a; // Random number generator data
/* Tasking-related data for the thread */
kmp_task_team_t *th_task_team; // Task team struct
kmp_taskdata_t *th_current_task; // Innermost Task being executed
kmp_uint8 th_task_state; // alternating 0/1 for task team identification
kmp_uint8 *th_task_state_memo_stack; // Stack holding memos of th_task_state
// at nested levels
kmp_uint32 th_task_state_top; // Top element of th_task_state_memo_stack
kmp_uint32 th_task_state_stack_sz; // Size of th_task_state_memo_stack
kmp_uint32 th_reap_state; // Non-zero indicates thread is not
// tasking, thus safe to reap
/* More stuff for keeping track of active/sleeping threads (this part is
written by the worker thread) */
kmp_uint8 th_active_in_pool; // included in count of #active threads in pool
int th_active; // ! sleeping; 32 bits for TCR/TCW
struct cons_header *th_cons; // used for consistency check
#if KMP_USE_HIER_SCHED
// used for hierarchical scheduling
kmp_hier_private_bdata_t *th_hier_bar_data;
#endif
/* Add the syncronizing data which is cache aligned and padded. */
KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
KMP_ALIGN_CACHE volatile kmp_int32
th_next_waiting; /* gtid+1 of next thread on lock wait queue, 0 if none */
#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
#define NUM_LISTS 4
kmp_free_list_t th_free_lists[NUM_LISTS]; // Free lists for fast memory
// allocation routines
#endif
#if KMP_OS_WINDOWS
kmp_win32_cond_t th_suspend_cv;
kmp_win32_mutex_t th_suspend_mx;
int th_suspend_init;
#endif
#if KMP_OS_UNIX
kmp_cond_align_t th_suspend_cv;
kmp_mutex_align_t th_suspend_mx;
int th_suspend_init_count;
#endif
#if USE_ITT_BUILD
kmp_itt_mark_t th_itt_mark_single;
// alignment ???
#endif /* USE_ITT_BUILD */
#if KMP_STATS_ENABLED
kmp_stats_list *th_stats;
#endif
#if KMP_OS_UNIX
std::atomic<bool> th_blocking;
#endif
kmp_cg_root_t *th_cg_roots; // list of cg_roots associated with this thread
} kmp_base_info_t;
typedef union KMP_ALIGN_CACHE kmp_info {
double th_align; /* use worst case alignment */
char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
kmp_base_info_t th;
} kmp_info_t;
// OpenMP thread team data structures
typedef struct kmp_base_data { volatile kmp_uint32 t_value; } kmp_base_data_t;
typedef union KMP_ALIGN_CACHE kmp_sleep_team {
double dt_align; /* use worst case alignment */
char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
kmp_base_data_t dt;
} kmp_sleep_team_t;
typedef union KMP_ALIGN_CACHE kmp_ordered_team {
double dt_align; /* use worst case alignment */
char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
kmp_base_data_t dt;
} kmp_ordered_team_t;
typedef int (*launch_t)(int gtid);
/* Minimum number of ARGV entries to malloc if necessary */
#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
// Set up how many argv pointers will fit in cache lines containing
// t_inline_argv. Historically, we have supported at least 96 bytes. Using a
// larger value for more space between the master write/worker read section and
// read/write by all section seems to buy more performance on EPCC PARALLEL.
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
#define KMP_INLINE_ARGV_BYTES \
(4 * CACHE_LINE - \
((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
CACHE_LINE))
#else
#define KMP_INLINE_ARGV_BYTES \
(2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
#endif
#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
typedef struct KMP_ALIGN_CACHE kmp_base_team {
// Synchronization Data
// ---------------------------------------------------------------------------
KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
kmp_balign_team_t t_bar[bs_last_barrier];
std::atomic<int> t_construct; // count of single directive encountered by team
char pad[sizeof(kmp_lock_t)]; // padding to maintain performance on big iron
// Master only
// ---------------------------------------------------------------------------
KMP_ALIGN_CACHE int t_master_tid; // tid of master in parent team
int t_master_this_cons; // "this_construct" single counter of master in parent
// team
ident_t *t_ident; // if volatile, have to change too much other crud to
// volatile too
kmp_team_p *t_parent; // parent team
kmp_team_p *t_next_pool; // next free team in the team pool
kmp_disp_t *t_dispatch; // thread's dispatch data
kmp_task_team_t *t_task_team[2]; // Task team struct; switch between 2
#if OMP_40_ENABLED
kmp_proc_bind_t t_proc_bind; // bind type for par region
#endif // OMP_40_ENABLED
#if USE_ITT_BUILD
kmp_uint64 t_region_time; // region begin timestamp
#endif /* USE_ITT_BUILD */
// Master write, workers read
// --------------------------------------------------------------------------
KMP_ALIGN_CACHE void **t_argv;
int t_argc;
int t_nproc; // number of threads in team
microtask_t t_pkfn;
launch_t t_invoke; // procedure to launch the microtask
#if OMPT_SUPPORT
ompt_team_info_t ompt_team_info;
ompt_lw_taskteam_t *ompt_serialized_team_info;
#endif
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
kmp_int8 t_fp_control_saved;
kmp_int8 t_pad2b;
kmp_int16 t_x87_fpu_control_word; // FP control regs
kmp_uint32 t_mxcsr;
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
KMP_ALIGN_CACHE kmp_info_t **t_threads;
kmp_taskdata_t
*t_implicit_task_taskdata; // Taskdata for the thread's implicit task
int t_level; // nested parallel level
KMP_ALIGN_CACHE int t_max_argc;
int t_max_nproc; // max threads this team can handle (dynamicly expandable)
int t_serialized; // levels deep of serialized teams
dispatch_shared_info_t *t_disp_buffer; // buffers for dispatch system
int t_id; // team's id, assigned by debugger.
int t_active_level; // nested active parallel level
kmp_r_sched_t t_sched; // run-time schedule for the team
#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
int t_first_place; // first & last place in parent thread's partition.
int t_last_place; // Restore these values to master after par region.
#endif // OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
#if OMP_50_ENABLED
int t_display_affinity;
#endif
int t_size_changed; // team size was changed?: 0: no, 1: yes, -1: changed via
// omp_set_num_threads() call
#if OMP_50_ENABLED
void *const *t_def_allocator; /* per implicit task default allocator */
#endif
// Read/write by workers as well
#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
// Using CACHE_LINE=64 reduces memory footprint, but causes a big perf
// regression of epcc 'parallel' and 'barrier' on fxe256lin01. This extra
// padding serves to fix the performance of epcc 'parallel' and 'barrier' when
// CACHE_LINE=64. TODO: investigate more and get rid if this padding.
char dummy_padding[1024];
#endif
// Internal control stack for additional nested teams.
KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
// for SERIALIZED teams nested 2 or more levels deep
#if OMP_40_ENABLED
// typed flag to store request state of cancellation
std::atomic<kmp_int32> t_cancel_request;
#endif
int t_master_active; // save on fork, restore on join
kmp_taskq_t t_taskq; // this team's task queue
void *t_copypriv_data; // team specific pointer to copyprivate data array
#if KMP_OS_WINDOWS
std::atomic<kmp_uint32> t_copyin_counter;
#endif
#if USE_ITT_BUILD
void *t_stack_id; // team specific stack stitching id (for ittnotify)
#endif /* USE_ITT_BUILD */
} kmp_base_team_t;
union KMP_ALIGN_CACHE kmp_team {
kmp_base_team_t t;
double t_align; /* use worst case alignment */
char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
};
typedef union KMP_ALIGN_CACHE kmp_time_global {
double dt_align; /* use worst case alignment */
char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
kmp_base_data_t dt;
} kmp_time_global_t;
typedef struct kmp_base_global {
/* cache-aligned */
kmp_time_global_t g_time;
/* non cache-aligned */
volatile int g_abort;
volatile int g_done;
int g_dynamic;
enum dynamic_mode g_dynamic_mode;
} kmp_base_global_t;
typedef union KMP_ALIGN_CACHE kmp_global {
kmp_base_global_t g;
double g_align; /* use worst case alignment */
char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
} kmp_global_t;
typedef struct kmp_base_root {
// TODO: GEH - combine r_active with r_in_parallel then r_active ==
// (r_in_parallel>= 0)
// TODO: GEH - then replace r_active with t_active_levels if we can to reduce
// the synch overhead or keeping r_active
volatile int r_active; /* TRUE if some region in a nest has > 1 thread */
// GEH: This is misnamed, should be r_in_parallel
volatile int r_nested; // TODO: GEH - This is unused, just remove it entirely.
// keeps a count of active parallel regions per root
std::atomic<int> r_in_parallel;
// GEH: This is misnamed, should be r_active_levels
kmp_team_t *r_root_team;
kmp_team_t *r_hot_team;
kmp_info_t *r_uber_thread;
kmp_lock_t r_begin_lock;
volatile int r_begin;
int r_blocktime; /* blocktime for this root and descendants */
} kmp_base_root_t;
typedef union KMP_ALIGN_CACHE kmp_root {
kmp_base_root_t r;
double r_align; /* use worst case alignment */
char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
} kmp_root_t;
struct fortran_inx_info {
kmp_int32 data;
};
/* ------------------------------------------------------------------------ */
extern int __kmp_settings;
extern int __kmp_duplicate_library_ok;
#if USE_ITT_BUILD
extern int __kmp_forkjoin_frames;
extern int __kmp_forkjoin_frames_mode;
#endif
extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
extern int __kmp_determ_red;
#ifdef KMP_DEBUG
extern int kmp_a_debug;
extern int kmp_b_debug;
extern int kmp_c_debug;
extern int kmp_d_debug;
extern int kmp_e_debug;
extern int kmp_f_debug;
#endif /* KMP_DEBUG */
/* For debug information logging using rotating buffer */
#define KMP_DEBUG_BUF_LINES_INIT 512
#define KMP_DEBUG_BUF_LINES_MIN 1
#define KMP_DEBUG_BUF_CHARS_INIT 128
#define KMP_DEBUG_BUF_CHARS_MIN 2
extern int
__kmp_debug_buf; /* TRUE means use buffer, FALSE means print to stderr */
extern int __kmp_debug_buf_lines; /* How many lines of debug stored in buffer */
extern int
__kmp_debug_buf_chars; /* How many characters allowed per line in buffer */
extern int __kmp_debug_buf_atomic; /* TRUE means use atomic update of buffer
entry pointer */
extern char *__kmp_debug_buffer; /* Debug buffer itself */
extern std::atomic<int> __kmp_debug_count; /* Counter for number of lines
printed in buffer so far */
extern int __kmp_debug_buf_warn_chars; /* Keep track of char increase
recommended in warnings */
/* end rotating debug buffer */
#ifdef KMP_DEBUG
extern int __kmp_par_range; /* +1 => only go par for constructs in range */
#define KMP_PAR_RANGE_ROUTINE_LEN 1024
extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
#define KMP_PAR_RANGE_FILENAME_LEN 1024
extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
extern int __kmp_par_range_lb;
extern int __kmp_par_range_ub;
#endif
/* For printing out dynamic storage map for threads and teams */
extern int
__kmp_storage_map; /* True means print storage map for threads and teams */
extern int __kmp_storage_map_verbose; /* True means storage map includes
placement info */
extern int __kmp_storage_map_verbose_specified;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
extern kmp_cpuinfo_t __kmp_cpuinfo;
#endif
extern volatile int __kmp_init_serial;
extern volatile int __kmp_init_gtid;
extern volatile int __kmp_init_common;
extern volatile int __kmp_init_middle;
extern volatile int __kmp_init_parallel;
#if KMP_USE_MONITOR
extern volatile int __kmp_init_monitor;
#endif
extern volatile int __kmp_init_user_locks;
extern int __kmp_init_counter;
extern int __kmp_root_counter;
extern int __kmp_version;
/* list of address of allocated caches for commons */
extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
/* Barrier algorithm types and options */
extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
extern kmp_uint32 __kmp_barrier_release_bb_dflt;
extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
extern char const *__kmp_barrier_type_name[bs_last_barrier];
extern char const *__kmp_barrier_pattern_name[bp_last_bar];
/* Global Locks */
extern kmp_bootstrap_lock_t __kmp_initz_lock; /* control initialization */
extern kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */
extern kmp_bootstrap_lock_t __kmp_task_team_lock;
extern kmp_bootstrap_lock_t
__kmp_exit_lock; /* exit() is not always thread-safe */
#if KMP_USE_MONITOR
extern kmp_bootstrap_lock_t
__kmp_monitor_lock; /* control monitor thread creation */
#endif
extern kmp_bootstrap_lock_t
__kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and
__kmp_threads expansion to co-exist */
extern kmp_lock_t __kmp_global_lock; /* control OS/global access */
extern kmp_queuing_lock_t __kmp_dispatch_lock; /* control dispatch access */
extern kmp_lock_t __kmp_debug_lock; /* control I/O access for KMP_DEBUG */
/* used for yielding spin-waits */
extern unsigned int __kmp_init_wait; /* initial number of spin-tests */
extern unsigned int __kmp_next_wait; /* susequent number of spin-tests */
extern enum library_type __kmp_library;
extern enum sched_type __kmp_sched; /* default runtime scheduling */
extern enum sched_type __kmp_static; /* default static scheduling method */
extern enum sched_type __kmp_guided; /* default guided scheduling method */
extern enum sched_type __kmp_auto; /* default auto scheduling method */
extern int __kmp_chunk; /* default runtime chunk size */
extern size_t __kmp_stksize; /* stack size per thread */
#if KMP_USE_MONITOR
extern size_t __kmp_monitor_stksize; /* stack size for monitor thread */
#endif
extern size_t __kmp_stkoffset; /* stack offset per thread */
extern int __kmp_stkpadding; /* Should we pad root thread(s) stack */
extern size_t
__kmp_malloc_pool_incr; /* incremental size of pool for kmp_malloc() */
extern int __kmp_env_stksize; /* was KMP_STACKSIZE specified? */
extern int __kmp_env_blocktime; /* was KMP_BLOCKTIME specified? */
extern int __kmp_env_checks; /* was KMP_CHECKS specified? */
extern int __kmp_env_consistency_check; // was KMP_CONSISTENCY_CHECK specified?
extern int __kmp_generate_warnings; /* should we issue warnings? */
extern int __kmp_reserve_warn; /* have we issued reserve_threads warning? */
#ifdef DEBUG_SUSPEND
extern int __kmp_suspend_count; /* count inside __kmp_suspend_template() */
#endif
extern kmp_uint32 __kmp_yield_init;
extern kmp_uint32 __kmp_yield_next;
#if KMP_USE_MONITOR
extern kmp_uint32 __kmp_yielding_on;
#endif
extern kmp_uint32 __kmp_yield_cycle;
extern kmp_int32 __kmp_yield_on_count;
extern kmp_int32 __kmp_yield_off_count;
/* ------------------------------------------------------------------------- */
extern int __kmp_allThreadsSpecified;
extern size_t __kmp_align_alloc;
/* following data protected by initialization routines */
extern int __kmp_xproc; /* number of processors in the system */
extern int __kmp_avail_proc; /* number of processors available to the process */
extern size_t __kmp_sys_min_stksize; /* system-defined minimum stack size */
extern int __kmp_sys_max_nth; /* system-imposed maximum number of threads */
// maximum total number of concurrently-existing threads on device
extern int __kmp_max_nth;
// maximum total number of concurrently-existing threads in a contention group
extern int __kmp_cg_max_nth;
extern int __kmp_teams_max_nth; // max threads used in a teams construct
extern int __kmp_threads_capacity; /* capacity of the arrays __kmp_threads and
__kmp_root */
extern int __kmp_dflt_team_nth; /* default number of threads in a parallel
region a la OMP_NUM_THREADS */
extern int __kmp_dflt_team_nth_ub; /* upper bound on "" determined at serial
initialization */
extern int __kmp_tp_capacity; /* capacity of __kmp_threads if threadprivate is
used (fixed) */
extern int __kmp_tp_cached; /* whether threadprivate cache has been created
(__kmpc_threadprivate_cached()) */
extern int __kmp_dflt_nested; /* nested parallelism enabled by default a la
OMP_NESTED */
extern int __kmp_dflt_blocktime; /* number of milliseconds to wait before
blocking (env setting) */
#if KMP_USE_MONITOR
extern int
__kmp_monitor_wakeups; /* number of times monitor wakes up per second */
extern int __kmp_bt_intervals; /* number of monitor timestamp intervals before
blocking */
#endif
#ifdef KMP_ADJUST_BLOCKTIME
extern int __kmp_zero_bt; /* whether blocktime has been forced to zero */
#endif /* KMP_ADJUST_BLOCKTIME */
#ifdef KMP_DFLT_NTH_CORES
extern int __kmp_ncores; /* Total number of cores for threads placement */
#endif
/* Number of millisecs to delay on abort for Intel(R) VTune(TM) tools */
extern int __kmp_abort_delay;
extern int __kmp_need_register_atfork_specified;
extern int
__kmp_need_register_atfork; /* At initialization, call pthread_atfork to
install fork handler */
extern int __kmp_gtid_mode; /* Method of getting gtid, values:
0 - not set, will be set at runtime
1 - using stack search
2 - dynamic TLS (pthread_getspecific(Linux* OS/OS
X*) or TlsGetValue(Windows* OS))
3 - static TLS (__declspec(thread) __kmp_gtid),
Linux* OS .so only. */
extern int
__kmp_adjust_gtid_mode; /* If true, adjust method based on #threads */
#ifdef KMP_TDATA_GTID
extern KMP_THREAD_LOCAL int __kmp_gtid;
#endif
extern int __kmp_tls_gtid_min; /* #threads below which use sp search for gtid */
extern int __kmp_foreign_tp; // If true, separate TP var for each foreign thread
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
extern int __kmp_inherit_fp_control; // copy fp creg(s) parent->workers at fork
extern kmp_int16 __kmp_init_x87_fpu_control_word; // init thread's FP ctrl reg
extern kmp_uint32 __kmp_init_mxcsr; /* init thread's mxscr */
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
extern int __kmp_dflt_max_active_levels; /* max_active_levels for nested
parallelism enabled by default via
OMP_MAX_ACTIVE_LEVELS */
extern int __kmp_dispatch_num_buffers; /* max possible dynamic loops in
concurrent execution per team */
#if KMP_NESTED_HOT_TEAMS
extern int __kmp_hot_teams_mode;
extern int __kmp_hot_teams_max_level;
#endif
#if KMP_OS_LINUX
extern enum clock_function_type __kmp_clock_function;
extern int __kmp_clock_function_param;
#endif /* KMP_OS_LINUX */
#if KMP_MIC_SUPPORTED
extern enum mic_type __kmp_mic_type;
#endif
#ifdef USE_LOAD_BALANCE
extern double __kmp_load_balance_interval; // load balance algorithm interval
#endif /* USE_LOAD_BALANCE */
// OpenMP 3.1 - Nested num threads array
typedef struct kmp_nested_nthreads_t {
int *nth;
int size;
int used;
} kmp_nested_nthreads_t;
extern kmp_nested_nthreads_t __kmp_nested_nth;
#if KMP_USE_ADAPTIVE_LOCKS
// Parameters for the speculative lock backoff system.
struct kmp_adaptive_backoff_params_t {
// Number of soft retries before it counts as a hard retry.
kmp_uint32 max_soft_retries;
// Badness is a bit mask : 0,1,3,7,15,... on each hard failure we move one to
// the right
kmp_uint32 max_badness;
};
extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
#if KMP_DEBUG_ADAPTIVE_LOCKS
extern const char *__kmp_speculative_statsfile;
#endif
#endif // KMP_USE_ADAPTIVE_LOCKS
#if OMP_40_ENABLED
extern int __kmp_display_env; /* TRUE or FALSE */
extern int __kmp_display_env_verbose; /* TRUE if OMP_DISPLAY_ENV=VERBOSE */
extern int __kmp_omp_cancellation; /* TRUE or FALSE */
#endif
/* ------------------------------------------------------------------------- */
/* the following are protected by the fork/join lock */
/* write: lock read: anytime */
extern kmp_info_t **__kmp_threads; /* Descriptors for the threads */
/* read/write: lock */
extern volatile kmp_team_t *__kmp_team_pool;
extern volatile kmp_info_t *__kmp_thread_pool;
extern kmp_info_t *__kmp_thread_pool_insert_pt;
// total num threads reachable from some root thread including all root threads
extern volatile int __kmp_nth;
/* total number of threads reachable from some root thread including all root
threads, and those in the thread pool */
extern volatile int __kmp_all_nth;
extern int __kmp_thread_pool_nth;
extern std::atomic<int> __kmp_thread_pool_active_nth;
extern kmp_root_t **__kmp_root; /* root of thread hierarchy */
/* end data protected by fork/join lock */
/* ------------------------------------------------------------------------- */
#define __kmp_get_gtid() __kmp_get_global_thread_id()
#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
// AT: Which way is correct?
// AT: 1. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team -> t.t_nproc;
// AT: 2. nproc = __kmp_threads[ ( gtid ) ] -> th.th_team_nproc;
#define __kmp_get_team_num_threads(gtid) \
(__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
static inline bool KMP_UBER_GTID(int gtid) {
KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
__kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
}
static inline int __kmp_tid_from_gtid(int gtid) {
KMP_DEBUG_ASSERT(gtid >= 0);
return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
}
static inline int __kmp_gtid_from_tid(int tid, const kmp_team_t *team) {
KMP_DEBUG_ASSERT(tid >= 0 && team);
return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
}
static inline int __kmp_gtid_from_thread(const kmp_info_t *thr) {
KMP_DEBUG_ASSERT(thr);
return thr->th.th_info.ds.ds_gtid;
}
static inline kmp_info_t *__kmp_thread_from_gtid(int gtid) {
KMP_DEBUG_ASSERT(gtid >= 0);
return __kmp_threads[gtid];
}
static inline kmp_team_t *__kmp_team_from_gtid(int gtid) {
KMP_DEBUG_ASSERT(gtid >= 0);
return __kmp_threads[gtid]->th.th_team;
}
/* ------------------------------------------------------------------------- */
extern kmp_global_t __kmp_global; /* global status */
extern kmp_info_t __kmp_monitor;
// For Debugging Support Library
extern std::atomic<kmp_int32> __kmp_team_counter;
// For Debugging Support Library
extern std::atomic<kmp_int32> __kmp_task_counter;
#if USE_DEBUGGER
#define _KMP_GEN_ID(counter) \
(__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
#else
#define _KMP_GEN_ID(counter) (~0)
#endif /* USE_DEBUGGER */
#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
/* ------------------------------------------------------------------------ */
extern void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2,
size_t size, char const *format, ...);
extern void __kmp_serial_initialize(void);
extern void __kmp_middle_initialize(void);
extern void __kmp_parallel_initialize(void);
extern void __kmp_internal_begin(void);
extern void __kmp_internal_end_library(int gtid);
extern void __kmp_internal_end_thread(int gtid);
extern void __kmp_internal_end_atexit(void);
extern void __kmp_internal_end_fini(void);
extern void __kmp_internal_end_dtor(void);
extern void __kmp_internal_end_dest(void *);
extern int __kmp_register_root(int initial_thread);
extern void __kmp_unregister_root(int gtid);
extern int __kmp_ignore_mppbeg(void);
extern int __kmp_ignore_mppend(void);
extern int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws);
extern void __kmp_exit_single(int gtid);
extern void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref);
extern void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref);
#ifdef USE_LOAD_BALANCE
extern int __kmp_get_load_balance(int);
#endif
extern int __kmp_get_global_thread_id(void);
extern int __kmp_get_global_thread_id_reg(void);
extern void __kmp_exit_thread(int exit_status);
extern void __kmp_abort(char const *format, ...);
extern void __kmp_abort_thread(void);
KMP_NORETURN extern void __kmp_abort_process(void);
extern void __kmp_warn(char const *format, ...);
extern void __kmp_set_num_threads(int new_nth, int gtid);
// Returns current thread (pointer to kmp_info_t). Current thread *must* be
// registered.
static inline kmp_info_t *__kmp_entry_thread() {
int gtid = __kmp_entry_gtid();
return __kmp_threads[gtid];
}
extern void __kmp_set_max_active_levels(int gtid, int new_max_active_levels);
extern int __kmp_get_max_active_levels(int gtid);
extern int __kmp_get_ancestor_thread_num(int gtid, int level);
extern int __kmp_get_team_size(int gtid, int level);
extern void __kmp_set_schedule(int gtid, kmp_sched_t new_sched, int chunk);
extern void __kmp_get_schedule(int gtid, kmp_sched_t *sched, int *chunk);
extern unsigned short __kmp_get_random(kmp_info_t *thread);
extern void __kmp_init_random(kmp_info_t *thread);
extern kmp_r_sched_t __kmp_get_schedule_global(void);
extern void __kmp_adjust_num_threads(int new_nproc);
extern void *___kmp_allocate(size_t size KMP_SRC_LOC_DECL);
extern void *___kmp_page_allocate(size_t size KMP_SRC_LOC_DECL);
extern void ___kmp_free(void *ptr KMP_SRC_LOC_DECL);
#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
#if USE_FAST_MEMORY
extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
size_t size KMP_SRC_LOC_DECL);
extern void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL);
extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
#define __kmp_fast_allocate(this_thr, size) \
___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
#define __kmp_fast_free(this_thr, ptr) \
___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
#endif
extern void *___kmp_thread_malloc(kmp_info_t *th, size_t size KMP_SRC_LOC_DECL);
extern void *___kmp_thread_calloc(kmp_info_t *th, size_t nelem,
size_t elsize KMP_SRC_LOC_DECL);
extern void *___kmp_thread_realloc(kmp_info_t *th, void *ptr,
size_t size KMP_SRC_LOC_DECL);
extern void ___kmp_thread_free(kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL);
#define __kmp_thread_malloc(th, size) \
___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
#define __kmp_thread_calloc(th, nelem, elsize) \
___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
#define __kmp_thread_realloc(th, ptr, size) \
___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
#define __kmp_thread_free(th, ptr) \
___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
#define KMP_INTERNAL_FREE(p) free(p)
#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
extern void __kmp_push_num_threads(ident_t *loc, int gtid, int num_threads);
#if OMP_40_ENABLED
extern void __kmp_push_proc_bind(ident_t *loc, int gtid,
kmp_proc_bind_t proc_bind);
extern void __kmp_push_num_teams(ident_t *loc, int gtid, int num_teams,
int num_threads);
#endif
extern void __kmp_yield(int cond);
extern void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 lb,
kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
extern void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint32 lb,
kmp_uint32 ub, kmp_int32 st,
kmp_int32 chunk);
extern void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int64 lb,
kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
extern void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint64 lb,
kmp_uint64 ub, kmp_int64 st,
kmp_int64 chunk);
extern int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid,
kmp_int32 *p_last, kmp_int32 *p_lb,
kmp_int32 *p_ub, kmp_int32 *p_st);
extern int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid,
kmp_int32 *p_last, kmp_uint32 *p_lb,
kmp_uint32 *p_ub, kmp_int32 *p_st);
extern int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid,
kmp_int32 *p_last, kmp_int64 *p_lb,
kmp_int64 *p_ub, kmp_int64 *p_st);
extern int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid,
kmp_int32 *p_last, kmp_uint64 *p_lb,
kmp_uint64 *p_ub, kmp_int64 *p_st);
extern void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid);
extern void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid);
extern void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid);
extern void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid);
#ifdef KMP_GOMP_COMPAT
extern void __kmp_aux_dispatch_init_4(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int32 lb,
kmp_int32 ub, kmp_int32 st,
kmp_int32 chunk, int push_ws);
extern void __kmp_aux_dispatch_init_4u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint32 lb,
kmp_uint32 ub, kmp_int32 st,
kmp_int32 chunk, int push_ws);
extern void __kmp_aux_dispatch_init_8(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_int64 lb,
kmp_int64 ub, kmp_int64 st,
kmp_int64 chunk, int push_ws);
extern void __kmp_aux_dispatch_init_8u(ident_t *loc, kmp_int32 gtid,
enum sched_type schedule, kmp_uint64 lb,
kmp_uint64 ub, kmp_int64 st,
kmp_int64 chunk, int push_ws);
extern void __kmp_aux_dispatch_fini_chunk_4(ident_t *loc, kmp_int32 gtid);
extern void __kmp_aux_dispatch_fini_chunk_8(ident_t *loc, kmp_int32 gtid);
extern void __kmp_aux_dispatch_fini_chunk_4u(ident_t *loc, kmp_int32 gtid);
extern void __kmp_aux_dispatch_fini_chunk_8u(ident_t *loc, kmp_int32 gtid);
#endif /* KMP_GOMP_COMPAT */
extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
extern kmp_uint32 __kmp_wait_yield_4(kmp_uint32 volatile *spinner,
kmp_uint32 checker,
kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
void *obj);
extern void __kmp_wait_yield_4_ptr(void *spinner, kmp_uint32 checker,
kmp_uint32 (*pred)(void *, kmp_uint32),
void *obj);
class kmp_flag_32;
class kmp_flag_64;
class kmp_flag_oncore;
extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64 *flag,
int final_spin
#if USE_ITT_BUILD
,
void *itt_sync_obj
#endif
);
extern void __kmp_release_64(kmp_flag_64 *flag);
extern void __kmp_infinite_loop(void);
extern void __kmp_cleanup(void);
#if KMP_HANDLE_SIGNALS
extern int __kmp_handle_signals;
extern void __kmp_install_signals(int parallel_init);
extern void __kmp_remove_signals(void);
#endif
extern void __kmp_clear_system_time(void);
extern void __kmp_read_system_time(double *delta);
extern void __kmp_check_stack_overlap(kmp_info_t *thr);
extern void __kmp_expand_host_name(char *buffer, size_t size);
extern void __kmp_expand_file_name(char *result, size_t rlen, char *pattern);
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
extern void
__kmp_initialize_system_tick(void); /* Initialize timer tick value */
#endif
extern void
__kmp_runtime_initialize(void); /* machine specific initialization */
extern void __kmp_runtime_destroy(void);
#if KMP_AFFINITY_SUPPORTED
extern char *__kmp_affinity_print_mask(char *buf, int buf_len,
kmp_affin_mask_t *mask);
extern kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
kmp_affin_mask_t *mask);
extern void __kmp_affinity_initialize(void);
extern void __kmp_affinity_uninitialize(void);
extern void __kmp_affinity_set_init_mask(
int gtid, int isa_root); /* set affinity according to KMP_AFFINITY */
#if OMP_40_ENABLED
extern void __kmp_affinity_set_place(int gtid);
#endif
extern void __kmp_affinity_determine_capable(const char *env_var);
extern int __kmp_aux_set_affinity(void **mask);
extern int __kmp_aux_get_affinity(void **mask);
extern int __kmp_aux_get_affinity_max_proc();
extern int __kmp_aux_set_affinity_mask_proc(int proc, void **mask);
extern int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask);
extern int __kmp_aux_get_affinity_mask_proc(int proc, void **mask);
extern void __kmp_balanced_affinity(kmp_info_t *th, int team_size);
#if KMP_OS_LINUX
extern int kmp_set_thread_affinity_mask_initial(void);
#endif
#endif /* KMP_AFFINITY_SUPPORTED */
#if OMP_50_ENABLED
// No need for KMP_AFFINITY_SUPPORTED guard as only one field in the
// format string is for affinity, so platforms that do not support
// affinity can still use the other fields, e.g., %n for num_threads
extern size_t __kmp_aux_capture_affinity(int gtid, const char *format,
kmp_str_buf_t *buffer);
extern void __kmp_aux_display_affinity(int gtid, const char *format);
#endif
extern void __kmp_cleanup_hierarchy();
extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
#if KMP_USE_FUTEX
extern int __kmp_futex_determine_capable(void);
#endif // KMP_USE_FUTEX
extern void __kmp_gtid_set_specific(int gtid);
extern int __kmp_gtid_get_specific(void);
extern double __kmp_read_cpu_time(void);
extern int __kmp_read_system_info(struct kmp_sys_info *info);
#if KMP_USE_MONITOR
extern void __kmp_create_monitor(kmp_info_t *th);
#endif
extern void *__kmp_launch_thread(kmp_info_t *thr);
extern void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size);
#if KMP_OS_WINDOWS
extern int __kmp_still_running(kmp_info_t *th);
extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
extern void __kmp_free_handle(kmp_thread_t tHandle);
#endif
#if KMP_USE_MONITOR
extern void __kmp_reap_monitor(kmp_info_t *th);
#endif
extern void __kmp_reap_worker(kmp_info_t *th);
extern void __kmp_terminate_thread(int gtid);
extern int __kmp_try_suspend_mx(kmp_info_t *th);
extern void __kmp_lock_suspend_mx(kmp_info_t *th);
extern void __kmp_unlock_suspend_mx(kmp_info_t *th);
extern void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag);
extern void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag);
extern void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag);
extern void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag);
extern void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag);
extern void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag);
extern void __kmp_elapsed(double *);
extern void __kmp_elapsed_tick(double *);
extern void __kmp_enable(int old_state);
extern void __kmp_disable(int *old_state);
extern void __kmp_thread_sleep(int millis);
extern void __kmp_common_initialize(void);
extern void __kmp_common_destroy(void);
extern void __kmp_common_destroy_gtid(int gtid);
#if KMP_OS_UNIX
extern void __kmp_register_atfork(void);
#endif
extern void __kmp_suspend_initialize(void);
extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
int tid);
#if OMP_40_ENABLED
extern kmp_team_t *
__kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
#if OMPT_SUPPORT
ompt_data_t ompt_parallel_data,
#endif
kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
#else
extern kmp_team_t *
__kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
#if OMPT_SUPPORT
ompt_id_t ompt_parallel_id,
#endif
kmp_internal_control_t *new_icvs,
int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
#endif // OMP_40_ENABLED
extern void __kmp_free_thread(kmp_info_t *);
extern void __kmp_free_team(kmp_root_t *,
kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
/* ------------------------------------------------------------------------ */
extern void __kmp_initialize_bget(kmp_info_t *th);
extern void __kmp_finalize_bget(kmp_info_t *th);
KMP_EXPORT void *kmpc_malloc(size_t size);
KMP_EXPORT void *kmpc_aligned_malloc(size_t size, size_t alignment);
KMP_EXPORT void *kmpc_calloc(size_t nelem, size_t elsize);
KMP_EXPORT void *kmpc_realloc(void *ptr, size_t size);
KMP_EXPORT void kmpc_free(void *ptr);
/* declarations for internal use */
extern int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
size_t reduce_size, void *reduce_data,
void (*reduce)(void *, void *));
extern void __kmp_end_split_barrier(enum barrier_type bt, int gtid);
/*!
* Tell the fork call which compiler generated the fork call, and therefore how
* to deal with the call.
*/
enum fork_context_e {
fork_context_gnu, /**< Called from GNU generated code, so must not invoke the
microtask internally. */
fork_context_intel, /**< Called from Intel generated code. */
fork_context_last
};
extern int __kmp_fork_call(ident_t *loc, int gtid,
enum fork_context_e fork_context, kmp_int32 argc,
microtask_t microtask, launch_t invoker,
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
#if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && KMP_OS_LINUX
va_list *ap
#else
va_list ap
#endif
);
extern void __kmp_join_call(ident_t *loc, int gtid
#if OMPT_SUPPORT
,
enum fork_context_e fork_context
#endif
#if OMP_40_ENABLED
,
int exit_teams = 0
#endif
);
extern void __kmp_serialized_parallel(ident_t *id, kmp_int32 gtid);
extern void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team);
extern void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team);
extern int __kmp_invoke_task_func(int gtid);
extern void __kmp_run_before_invoked_task(int gtid, int tid,
kmp_info_t *this_thr,
kmp_team_t *team);
extern void __kmp_run_after_invoked_task(int gtid, int tid,
kmp_info_t *this_thr,
kmp_team_t *team);
// should never have been exported
KMP_EXPORT int __kmpc_invoke_task_func(int gtid);
#if OMP_40_ENABLED
extern int __kmp_invoke_teams_master(int gtid);
extern void __kmp_teams_master(int gtid);
extern int __kmp_aux_get_team_num();
extern int __kmp_aux_get_num_teams();
#endif
extern void __kmp_save_internal_controls(kmp_info_t *thread);
extern void __kmp_user_set_library(enum library_type arg);
extern void __kmp_aux_set_library(enum library_type arg);
extern void __kmp_aux_set_stacksize(size_t arg);
extern void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid);
extern void __kmp_aux_set_defaults(char const *str, int len);
/* Functions called from __kmp_aux_env_initialize() in kmp_settings.cpp */
void kmpc_set_blocktime(int arg);
void ompc_set_nested(int flag);
void ompc_set_dynamic(int flag);
void ompc_set_num_threads(int arg);
extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
kmp_team_t *team, int tid);
extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
extern kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
kmp_tasking_flags_t *flags,
size_t sizeof_kmp_task_t,
size_t sizeof_shareds,
kmp_routine_entry_t task_entry);
extern void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
kmp_team_t *team, int tid,
int set_curr_task);
extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
kmp_flag_32 *flag, int final_spin,
int *thread_finished,
#if USE_ITT_BUILD
void *itt_sync_obj,
#endif /* USE_ITT_BUILD */
kmp_int32 is_constrained);
int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
kmp_flag_64 *flag, int final_spin,
int *thread_finished,
#if USE_ITT_BUILD
void *itt_sync_obj,
#endif /* USE_ITT_BUILD */
kmp_int32 is_constrained);
int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
kmp_flag_oncore *flag, int final_spin,
int *thread_finished,
#if USE_ITT_BUILD
void *itt_sync_obj,
#endif /* USE_ITT_BUILD */
kmp_int32 is_constrained);
extern void __kmp_free_task_team(kmp_info_t *thread,
kmp_task_team_t *task_team);
extern void __kmp_reap_task_teams(void);
extern void __kmp_wait_to_unref_task_teams(void);
extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team,
int always);
extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
#if USE_ITT_BUILD
,
void *itt_sync_obj
#endif /* USE_ITT_BUILD */
,
int wait = 1);
extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
int gtid);
extern int __kmp_is_address_mapped(void *addr);
extern kmp_uint64 __kmp_hardware_timestamp(void);
#if KMP_OS_UNIX
extern int __kmp_read_from_file(char const *path, char const *format, ...);
#endif
/* ------------------------------------------------------------------------ */
//
// Assembly routines that have no compiler intrinsic replacement
//
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
#define __kmp_load_mxcsr(p) _mm_setcsr(*(p))
static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
extern void __kmp_load_x87_fpu_control_word(kmp_int16 *p);
extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
extern void __kmp_clear_x87_fpu_status_word();
#define KMP_X86_MXCSR_MASK 0xffffffc0 /* ignore status flags (6 lsb) */
#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
extern int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int npr, int argc,
void *argv[]
#if OMPT_SUPPORT
,
void **exit_frame_ptr
#endif
);
/* ------------------------------------------------------------------------ */
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags);
KMP_EXPORT void __kmpc_end(ident_t *);
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data,
kmpc_ctor_vec ctor,
kmpc_cctor_vec cctor,
kmpc_dtor_vec dtor,
size_t vector_length);
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data,
kmpc_ctor ctor, kmpc_cctor cctor,
kmpc_dtor dtor);
KMP_EXPORT void *__kmpc_threadprivate(ident_t *, kmp_int32 global_tid,
void *data, size_t size);
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *);
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *);
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *);
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *);
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *);
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs,
kmpc_micro microtask, ...);
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_flush(ident_t *);
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid);
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid,
kmp_critical_name *);
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid,
kmp_critical_name *);
#if OMP_45_ENABLED
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid,
kmp_critical_name *, uint32_t hint);
#endif
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid);
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *,
kmp_int32 global_tid);
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
KMP_EXPORT void KMPC_FOR_STATIC_INIT(ident_t *loc, kmp_int32 global_tid,
kmp_int32 schedtype, kmp_int32 *plastiter,
kmp_int *plower, kmp_int *pupper,
kmp_int *pstride, kmp_int incr,
kmp_int chunk);
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
size_t cpy_size, void *cpy_data,
void (*cpy_func)(void *, void *),
kmp_int32 didit);
extern void KMPC_SET_NUM_THREADS(int arg);
extern void KMPC_SET_DYNAMIC(int flag);
extern void KMPC_SET_NESTED(int flag);
/* Taskq interface routines */
KMP_EXPORT kmpc_thunk_t *__kmpc_taskq(ident_t *loc, kmp_int32 global_tid,
kmpc_task_t taskq_task,
size_t sizeof_thunk,
size_t sizeof_shareds, kmp_int32 flags,
kmpc_shared_vars_t **shareds);
KMP_EXPORT void __kmpc_end_taskq(ident_t *loc, kmp_int32 global_tid,
kmpc_thunk_t *thunk);
KMP_EXPORT kmp_int32 __kmpc_task(ident_t *loc, kmp_int32 global_tid,
kmpc_thunk_t *thunk);
KMP_EXPORT void __kmpc_taskq_task(ident_t *loc, kmp_int32 global_tid,
kmpc_thunk_t *thunk, kmp_int32 status);
KMP_EXPORT void __kmpc_end_taskq_task(ident_t *loc, kmp_int32 global_tid,
kmpc_thunk_t *thunk);
KMP_EXPORT kmpc_thunk_t *__kmpc_task_buffer(ident_t *loc, kmp_int32 global_tid,
kmpc_thunk_t *taskq_thunk,
kmpc_task_t task);
/* OMP 3.0 tasking interface routines */
KMP_EXPORT kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
kmp_task_t *new_task);
KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
kmp_int32 flags,
size_t sizeof_kmp_task_t,
size_t sizeof_shareds,
kmp_routine_entry_t task_entry);
KMP_EXPORT void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
kmp_task_t *task);
KMP_EXPORT void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
kmp_task_t *task);
KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
kmp_task_t *new_task);
KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid);
KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid,
int end_part);
#if TASK_UNUSED
void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
kmp_task_t *task);
#endif // TASK_UNUSED
/* ------------------------------------------------------------------------ */
#if OMP_40_ENABLED
KMP_EXPORT void __kmpc_taskgroup(ident_t *loc, int gtid);
KMP_EXPORT void __kmpc_end_taskgroup(ident_t *loc, int gtid);
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
kmp_depend_info_t *noalias_dep_list);
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid,
kmp_int32 ndeps,
kmp_depend_info_t *dep_list,
kmp_int32 ndeps_noalias,
kmp_depend_info_t *noalias_dep_list);
extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
bool serialize_immediate);
KMP_EXPORT kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid,
kmp_int32 cncl_kind);
KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid,
kmp_int32 cncl_kind);
KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t *loc_ref, kmp_int32 gtid);
KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind);
#if OMP_45_ENABLED
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask);
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask);
KMP_EXPORT void __kmpc_taskloop(ident_t *loc, kmp_int32 gtid, kmp_task_t *task,
kmp_int32 if_val, kmp_uint64 *lb,
kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
kmp_int32 sched, kmp_uint64 grainsize,
void *task_dup);
#endif
#if OMP_50_ENABLED
KMP_EXPORT void *__kmpc_task_reduction_init(int gtid, int num_data, void *data);
KMP_EXPORT void *__kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d);
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins,
kmp_task_affinity_info_t *affin_list);
#endif
#endif
/* Lock interface routines (fast versions with gtid passed in) */
KMP_EXPORT void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock);
KMP_EXPORT void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
KMP_EXPORT int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock);
KMP_EXPORT int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid,
void **user_lock);
#if OMP_45_ENABLED
KMP_EXPORT void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid,
void **user_lock, uintptr_t hint);
KMP_EXPORT void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid,
void **user_lock,
uintptr_t hint);
#endif
/* Interface to fast scalable reduce methods routines */
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(
ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck);
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
kmp_critical_name *lck);
KMP_EXPORT kmp_int32 __kmpc_reduce(
ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck);
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
kmp_critical_name *lck);
/* Internal fast reduction routines */
extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
kmp_critical_name *lck);
// this function is for testing set/get/determine reduce method
KMP_EXPORT kmp_int32 __kmp_get_reduce_method(void);
KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
// C++ port
// missing 'extern "C"' declarations
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc);
KMP_EXPORT void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid);
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_threads);
#if OMP_40_ENABLED
KMP_EXPORT void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
int proc_bind);
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
kmp_int32 num_teams,
kmp_int32 num_threads);
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc,
kmpc_micro microtask, ...);
#endif
#if OMP_45_ENABLED
struct kmp_dim { // loop bounds info casted to kmp_int64
kmp_int64 lo; // lower
kmp_int64 up; // upper
kmp_int64 st; // stride
};
KMP_EXPORT void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
kmp_int32 num_dims,
const struct kmp_dim *dims);
KMP_EXPORT void __kmpc_doacross_wait(ident_t *loc, kmp_int32 gtid,
const kmp_int64 *vec);
KMP_EXPORT void __kmpc_doacross_post(ident_t *loc, kmp_int32 gtid,
const kmp_int64 *vec);
KMP_EXPORT void __kmpc_doacross_fini(ident_t *loc, kmp_int32 gtid);
#endif
KMP_EXPORT void *__kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid,
void *data, size_t size,
void ***cache);
// Symbols for MS mutual detection.
extern int _You_must_link_with_exactly_one_OpenMP_library;
extern int _You_must_link_with_Intel_OpenMP_library;
#if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
extern int _You_must_link_with_Microsoft_OpenMP_library;
#endif
// The routines below are not exported.
// Consider making them 'static' in corresponding source files.
void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
void *data_addr, size_t pc_size);
struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
void *data_addr,
size_t pc_size);
void __kmp_threadprivate_resize_cache(int newCapacity);
void __kmp_cleanup_threadprivate_caches();
// ompc_, kmpc_ entries moved from omp.h.
#if KMP_OS_WINDOWS
#define KMPC_CONVENTION __cdecl
#else
#define KMPC_CONVENTION
#endif
#ifndef __OMP_H
typedef enum omp_sched_t {
omp_sched_static = 1,
omp_sched_dynamic = 2,
omp_sched_guided = 3,
omp_sched_auto = 4
} omp_sched_t;
typedef void *kmp_affinity_mask_t;
#endif
KMP_EXPORT void KMPC_CONVENTION ompc_set_max_active_levels(int);
KMP_EXPORT void KMPC_CONVENTION ompc_set_schedule(omp_sched_t, int);
KMP_EXPORT int KMPC_CONVENTION ompc_get_ancestor_thread_num(int);
KMP_EXPORT int KMPC_CONVENTION ompc_get_team_size(int);
KMP_EXPORT int KMPC_CONVENTION
kmpc_set_affinity_mask_proc(int, kmp_affinity_mask_t *);
KMP_EXPORT int KMPC_CONVENTION
kmpc_unset_affinity_mask_proc(int, kmp_affinity_mask_t *);
KMP_EXPORT int KMPC_CONVENTION
kmpc_get_affinity_mask_proc(int, kmp_affinity_mask_t *);
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize(int);
KMP_EXPORT void KMPC_CONVENTION kmpc_set_stacksize_s(size_t);
KMP_EXPORT void KMPC_CONVENTION kmpc_set_library(int);
KMP_EXPORT void KMPC_CONVENTION kmpc_set_defaults(char const *);
KMP_EXPORT void KMPC_CONVENTION kmpc_set_disp_num_buffers(int);
#if OMP_50_ENABLED
enum kmp_target_offload_kind {
tgt_disabled = 0,
tgt_default = 1,
tgt_mandatory = 2
};
typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
// Set via OMP_TARGET_OFFLOAD if specified, defaults to tgt_default otherwise
extern kmp_target_offload_kind_t __kmp_target_offload;
extern int __kmpc_get_target_offload();
#endif
#if OMP_40_ENABLED
// Constants used in libomptarget
#define KMP_DEVICE_DEFAULT -1 // This is libomptarget's default device.
#define KMP_HOST_DEVICE -10 // This is what it is in libomptarget, go figure.
#define KMP_DEVICE_ALL -11 // This is libomptarget's "all devices".
#endif // OMP_40_ENABLED
#if OMP_50_ENABLED
// OMP Pause Resource
// The following enum is used both to set the status in __kmp_pause_status, and
// as the internal equivalent of the externally-visible omp_pause_resource_t.
typedef enum kmp_pause_status_t {
kmp_not_paused = 0, // status is not paused, or, requesting resume
kmp_soft_paused = 1, // status is soft-paused, or, requesting soft pause
kmp_hard_paused = 2 // status is hard-paused, or, requesting hard pause
} kmp_pause_status_t;
// This stores the pause state of the runtime
extern kmp_pause_status_t __kmp_pause_status;
extern int __kmpc_pause_resource(kmp_pause_status_t level);
extern int __kmp_pause_resource(kmp_pause_status_t level);
// Soft resume sets __kmp_pause_status, and wakes up all threads.
extern void __kmp_resume_if_soft_paused();
// Hard resume simply resets the status to not paused. Library will appear to
// be uninitialized after hard pause. Let OMP constructs trigger required
// initializations.
static inline void __kmp_resume_if_hard_paused() {
if (__kmp_pause_status == kmp_hard_paused) {
__kmp_pause_status = kmp_not_paused;
}
}
#endif // OMP_50_ENABLED
#ifdef __cplusplus
}
#endif
#endif /* KMP_H */
|