1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes=openmp-opt-cgscc -aa-pipeline=basic-aa -openmp-hide-memory-transfer-latency < %s | FileCheck %s
4 target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7:8:9"
6 @.__omp_offloading_heavyComputation.region_id = weak constant i8 0
7 @.offload_maptypes. = private unnamed_addr constant [2 x i64] [i64 35, i64 35]
9 %struct.ident_t = type { i32, i32, i32, i32, ptr }
11 @.str = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
12 @0 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @.str }, align 8
14 ;int heavyComputation(ptr a, unsigned size) {
15 ; int random = rand() % 7;
17 ; //#pragma omp target data map(a[0:size], size)
21 ; __tgt_target_data_begin(..., args, ...)
23 ; #pragma omp target teams
24 ; for (int i = 0; i < size; ++i) {
25 ; a[i] = ++aptr 3.141624;
30 define dso_local i32 @heavyComputation(ptr %a, i32 %size) {
31 ; CHECK-LABEL: @heavyComputation(
33 ; CHECK-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
34 ; CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x ptr], align 8, addrspace(5)
35 ; CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x ptr], align 8, addrspace(5)
36 ; CHECK-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [2 x i64], align 8, addrspace(5)
37 ; CHECK-NEXT: [[HANDLE:%.*]] = alloca [[STRUCT___TGT_ASYNC_INFO:%.*]], align 8, addrspace(5)
38 ; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(5) [[HANDLE]] to ptr
39 ; CHECK-NEXT: store i32 [[SIZE:%.*]], ptr addrspace(5) [[SIZE_ADDR]], align 4
40 ; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (...) @rand()
41 ; CHECK-NEXT: [[CONV:%.*]] = zext i32 [[SIZE]] to i64
42 ; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i64 [[CONV]], 3
43 ; CHECK-NEXT: store ptr [[A:%.*]], ptr addrspace(5) [[DOTOFFLOAD_BASEPTRS]], align 8
44 ; CHECK-NEXT: store ptr [[A]], ptr addrspace(5) [[DOTOFFLOAD_PTRS]], align 8
45 ; CHECK-NEXT: store i64 [[SHL]], ptr addrspace(5) [[DOTOFFLOAD_SIZES]], align 8
46 ; CHECK-NEXT: [[GEP0:%.*]] = getelementptr inbounds [2 x ptr], ptr addrspace(5) [[DOTOFFLOAD_BASEPTRS]], i64 0, i64 1
47 ; CHECK-NEXT: store ptr addrspace(5) [[SIZE_ADDR]], ptr addrspace(5) [[GEP0]], align 8
48 ; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds [2 x ptr], ptr addrspace(5) [[DOTOFFLOAD_PTRS]], i64 0, i64 1
49 ; CHECK-NEXT: store ptr addrspace(5) [[SIZE_ADDR]], ptr addrspace(5) [[GEP1]], align 8
50 ; CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds [2 x i64], ptr addrspace(5) [[DOTOFFLOAD_SIZES]], i64 0, i64 1
51 ; CHECK-NEXT: store i64 4, ptr addrspace(5) [[GEP2]], align 8
52 ; CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS_FLAT:%.*]] = addrspacecast ptr addrspace(5) [[DOTOFFLOAD_BASEPTRS]] to ptr
53 ; CHECK-NEXT: [[DOTOFFLOAD_PTRS_FLAT:%.*]] = addrspacecast ptr addrspace(5) [[DOTOFFLOAD_PTRS]] to ptr
54 ; CHECK-NEXT: [[DOTOFFLOAD_SIZES_FLAT:%.*]] = addrspacecast ptr addrspace(5) [[DOTOFFLOAD_SIZES]] to ptr
55 ; CHECK-NEXT: call void @__tgt_target_data_begin_mapper_issue(ptr @[[GLOB0:[0-9]+]], i64 -1, i32 2, ptr [[DOTOFFLOAD_BASEPTRS_FLAT]], ptr [[DOTOFFLOAD_PTRS_FLAT]], ptr [[DOTOFFLOAD_SIZES_FLAT]], ptr @.offload_maptypes., ptr null, ptr null, ptr [[TMP0]])
56 ; CHECK-NEXT: [[REM:%.*]] = srem i32 [[CALL]], 7
57 ; CHECK-NEXT: call void @__tgt_target_data_begin_mapper_wait(i64 -1, ptr [[TMP0]])
58 ; CHECK-NEXT: call void @__tgt_target_data_end_mapper(ptr @[[GLOB0]], i64 -1, i32 2, ptr nonnull [[DOTOFFLOAD_BASEPTRS_FLAT]], ptr nonnull [[DOTOFFLOAD_PTRS_FLAT]], ptr nonnull [[DOTOFFLOAD_SIZES_FLAT]], ptr @.offload_maptypes., ptr null, ptr null)
59 ; CHECK-NEXT: ret i32 [[REM]]
62 %size.addr = alloca i32, align 4, addrspace(5)
63 %.offload_baseptrs = alloca [2 x ptr], align 8, addrspace(5)
64 %.offload_ptrs = alloca [2 x ptr], align 8, addrspace(5)
65 %.offload_sizes = alloca [2 x i64], align 8, addrspace(5)
66 store i32 %size, ptr addrspace(5) %size.addr, align 4
67 %call = tail call i32 (...) @rand()
68 %conv = zext i32 %size to i64
69 %shl = shl nuw nsw i64 %conv, 3
70 store ptr %a, ptr addrspace(5) %.offload_baseptrs, align 8
71 store ptr %a, ptr addrspace(5) %.offload_ptrs, align 8
72 store i64 %shl, ptr addrspace(5) %.offload_sizes, align 8
73 %gep0 = getelementptr inbounds [2 x ptr], ptr addrspace(5) %.offload_baseptrs, i64 0, i64 1
74 store ptr addrspace(5) %size.addr, ptr addrspace(5) %gep0, align 8
75 %gep1 = getelementptr inbounds [2 x ptr], ptr addrspace(5) %.offload_ptrs, i64 0, i64 1
76 store ptr addrspace(5) %size.addr, ptr addrspace(5) %gep1, align 8
77 %gep2 = getelementptr inbounds [2 x i64], ptr addrspace(5) %.offload_sizes, i64 0, i64 1
78 store i64 4, ptr addrspace(5) %gep2, align 8
79 %.offload_baseptrs.flat = addrspacecast ptr addrspace(5) %.offload_baseptrs to ptr
80 %.offload_ptrs.flat = addrspacecast ptr addrspace(5) %.offload_ptrs to ptr
81 %.offload_sizes.flat = addrspacecast ptr addrspace(5) %.offload_sizes to ptr
82 call void @__tgt_target_data_begin_mapper(ptr @0, i64 -1, i32 2, ptr nonnull %.offload_baseptrs.flat, ptr nonnull %.offload_ptrs.flat, ptr nonnull %.offload_sizes.flat, ptr @.offload_maptypes., ptr null, ptr null)
83 %rem = srem i32 %call, 7
84 call void @__tgt_target_data_end_mapper(ptr @0, i64 -1, i32 2, ptr nonnull %.offload_baseptrs.flat, ptr nonnull %.offload_ptrs.flat, ptr nonnull %.offload_sizes.flat, ptr @.offload_maptypes., ptr null, ptr null)
88 declare void @__tgt_target_data_begin_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr)
89 declare void @__tgt_target_data_end_mapper(ptr, i64, i32, ptr, ptr, ptr, ptr, ptr, ptr)
91 declare dso_local i32 @rand(...)
93 !llvm.module.flags = !{!0}
95 !0 = !{i32 7, !"openmp", i32 50}