1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9 // expected-no-diagnostics
17 // kmp_task_affinity_info_t affs[1];
18 // CHECK: [[AFFS_ADDR:%.+]] = alloca [1 x %struct.kmp_task_affinity_info_t],
19 // CHECK: [[TD:%.+]] = call ptr @__kmpc_omp_task_alloc(ptr @{{.+}}, i32 [[GTID:%.+]], i32 1, i64 40, i64 1, ptr @{{.+}})
20 // CHECK: [[AFFINE_LST_ADDR:%.+]] = getelementptr inbounds [1 x %struct.kmp_task_affinity_info_t], ptr [[AFFS_ADDR]], i64 0, i64 0
21 // CHECK: [[P:%.+]] = load ptr, ptr [[P_ADDR:%.+]],
22 // CHECK: [[A_VAL:%.+]] = load i32, ptr [[A_ADDR:%.+]],
23 // CHECK: [[A_SZ:%.+]] = sext i32 [[A_VAL]] to i64
24 // CHECK: [[BYTES:%.+]] = mul nuw i64 4, [[A_SZ]]
25 // CHECK: [[SZ:%.+]] = mul nuw i64 [[BYTES]], 10
26 // CHECK: [[A_VAL:%.+]] = load i32, ptr [[A_ADDR]],
27 // CHECK: [[A_SZ1:%.+]] = sext i32 [[A_VAL]] to i64
28 // CHECK: [[SIZE:%.+]] = mul nuw i64 [[SZ]], [[A_SZ]]
29 // CHECK: [[AFFS_0_ADDR:%.+]] = getelementptr %struct.kmp_task_affinity_info_t, ptr [[AFFINE_LST_ADDR]], i64 0
32 // CHECK: [[AFFS_0_BASE_ADDR:%.+]] = getelementptr inbounds nuw %struct.kmp_task_affinity_info_t, ptr [[AFFS_0_ADDR]], i32 0, i32 0
33 // CHECK: [[P_INTPTR:%.+]] = ptrtoint ptr [[P]] to i64
34 // CHECK: store i64 [[P_INTPTR]], ptr [[AFFS_0_BASE_ADDR]],
36 // affs[0].size = sizeof(*p) * a * 10 * a;
37 // CHECK: [[AFFS_0_SIZE_ADDR:%.+]] = getelementptr inbounds nuw %struct.kmp_task_affinity_info_t, ptr [[AFFS_0_ADDR]], i32 0, i32 1
38 // CHECK: store i64 [[SIZE]], ptr [[AFFS_0_SIZE_ADDR]],
39 // CHECK: call i32 @__kmpc_omp_reg_task_with_affinity(ptr @{{.+}}, i32 [[GTID]], ptr [[TD]], i32 1, ptr [[AFFINE_LST_ADDR]])
40 #pragma omp task affinity(([a][10][a])p)
42 // CHECK: [[TD:%.+]] = call ptr @__kmpc_omp_task_alloc(ptr @{{.+}}, i32 [[GTID]], i32 1, i64 40, i64 1, ptr @{{.+}})
43 // CHECK: [[A_VAL:%.+]] = load i32, ptr [[A_ADDR]],
44 // CHECK: [[SUB:%.+]] = sub nsw i32 [[A_VAL]], 0
45 // CHECK: [[CONV:%.+]] = zext i32 [[SUB]] to i64
47 // <num_elem> = <num_iters> + 1 constant affinity for affinity(a)
48 // CHECK: [[NUM_ELEMS:%.+]] = add nuw i64 1, [[CONV]]
49 // CHECK: [[SV:%.+]] = call ptr @llvm.stacksave.p0()
50 // CHECK: store ptr [[SV]], ptr [[SV_ADDR:%.+]],
52 // kmp_task_affinity_info_t affs[<num_elem>];
53 // CHECK: [[AFFS_ADDR:%.+]] = alloca %struct.kmp_task_affinity_info_t, i64 [[NUM_ELEMS]],
54 // store i64 %21, ptr %__vla_expr0, align 8
55 // CHECK: [[NAFFS:%.+]] = trunc i64 [[NUM_ELEMS]] to i32
56 // CHECK: [[AFFS_0_ADDR:%.+]] = getelementptr %struct.kmp_task_affinity_info_t, ptr [[AFFS_ADDR]], i64 0
59 // CHECK: [[AFFS_0_BASE_ADDR:%.+]] = getelementptr inbounds nuw %struct.kmp_task_affinity_info_t, ptr [[AFFS_0_ADDR]], i32 0, i32 0
60 // CHECK: [[A_INTPTR:%.+]] = ptrtoint ptr [[A_ADDR]] to i64
61 // CHECK: store i64 [[A_INTPTR]], ptr [[AFFS_0_BASE_ADDR]],
63 // affs[0].size = sizeof(a);
64 // CHECK: [[AFFS_0_SIZE_ADDR:%.+]] = getelementptr inbounds nuw %struct.kmp_task_affinity_info_t, ptr [[AFFS_0_ADDR]], i32 0, i32 1
65 // CHECK: store i64 4, ptr [[AFFS_0_SIZE_ADDR]],
68 // CHECK: store i64 1, ptr [[AFFS_CNT_ADDR:%.+]],
69 // CHECK: [[A_VAL:%.+]] = load i32, ptr [[A_ADDR]],
70 // CHECK: [[NITERS:%.+]] = sub nsw i32 [[A_VAL]], 0
71 // CHECK: store i32 0, ptr [[CNT_ADDR:%.+]],
72 // CHECK: br label %[[CONT:[^,]+]]
74 //for (int cnt = 0; cnt < (a-0); ++cnt) {
76 // affs[affs_cnt].base = &p[i];
77 // affs[affs_cnt].size = sizeof(p[i]);
82 // CHECK: [[CNT:%.+]] = load i32, ptr [[CNT_ADDR]],
83 // CHECK: [[CMP:%.+]] = icmp slt i32 [[CNT]], [[NITERS]]
84 // CHECK: br i1 [[CMP]], label %[[BODY:[^,]+]], label %[[DONE:[^,]+]]
88 // CHECK: [[CNT:%.+]] = load i32, ptr [[CNT_ADDR]],
89 // CHECK: [[VAL:%.+]] = add nsw i32 0, [[CNT]]
90 // CHECK: store i32 [[VAL]], ptr [[I_ADDR:%.+]],
93 // CHECK: [[P:%.+]] = load ptr, ptr [[P_ADDR]],
94 // CHECK: [[I:%.+]] = load i32, ptr [[I_ADDR]],
95 // CHECK: [[IDX:%.+]] = sext i32 [[I]] to i64
96 // CHECK: [[P_I_ADDR:%.+]] = getelementptr inbounds float, ptr [[P]], i64 [[IDX]]
99 // CHECK: [[AFFS_CNT:%.+]] = load i64, ptr [[AFFS_CNT_ADDR]],
100 // CHECK: [[AFFS_ELEM_ADDR:%.+]] = getelementptr %struct.kmp_task_affinity_info_t, ptr [[AFFS_ADDR]], i64 [[AFFS_CNT]]
102 // affs[affs_cnt].base = &p[i];
103 // CHECK: [[AFFS_ELEM_BASE_ADDR:%.+]] = getelementptr inbounds nuw %struct.kmp_task_affinity_info_t, ptr [[AFFS_ELEM_ADDR]], i32 0, i32 0
104 // CHECK: [[CAST:%.+]] = ptrtoint ptr [[P_I_ADDR]] to i64
105 // CHECK: store i64 [[CAST]], ptr [[AFFS_ELEM_BASE_ADDR]],
107 // affs[affs_cnt].size = sizeof(p[i]);
108 // CHECK: [[AFFS_ELEM_SIZE_ADDR:%.+]] = getelementptr inbounds nuw %struct.kmp_task_affinity_info_t, ptr [[AFFS_ELEM_ADDR]], i32 0, i32 1
109 // CHECK: store i64 4, ptr [[AFFS_ELEM_SIZE_ADDR]],
112 // CHECK: [[AFFS_CNT_NEXT:%.+]] = add nuw i64 [[AFFS_CNT]], 1
113 // CHECK: store i64 [[AFFS_CNT_NEXT]], ptr [[AFFS_CNT_ADDR]],
116 // CHECK: [[CNT:%.+]] = load i32, ptr [[CNT_ADDR]],
117 // CHECK: [[CNT_NEXT:%.+]] = add nsw i32 [[CNT]], 1
118 // CHECK: store i32 [[CNT_NEXT]], ptr [[CNT_ADDR]],
119 // CHECK: br label %[[CONT]]
122 // CHECK: call i32 @__kmpc_omp_reg_task_with_affinity(ptr @{{.+}} i32 [[GTID]], ptr [[TD]], i32 [[NAFFS]], ptr [[AFFS_ADDR]])
123 // CHECK: [[SV:%.+]] = load ptr, ptr [[SV_ADDR]],
124 // CHECK: call void @llvm.stackrestore.p0(ptr [[SV]])
125 #pragma omp task affinity(iterator(i=0:a): p[i]) affinity(a)