Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / target-addrspace.cpp
blob9adf53611bc2422eb5522c8d9c01d24415731d45
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
2 // RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -emit-llvm \
3 // RUN: -fvisibility=hidden -o - %s | FileCheck %s --check-prefix=NVPTX
4 // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -emit-llvm \
5 // RUN: -fvisibility=hidden -o - %s | FileCheck %s --check-prefix=AMDGPU
7 // NVPTX-LABEL: define hidden void @_Z1fPv(
8 // NVPTX-SAME: ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] {
9 // NVPTX-NEXT: [[ENTRY:.*:]]
10 // NVPTX-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
11 // NVPTX-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
12 // NVPTX-NEXT: ret void
14 // AMDGPU-LABEL: define hidden void @_Z1fPv(
15 // AMDGPU-SAME: ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] {
16 // AMDGPU-NEXT: [[ENTRY:.*:]]
17 // AMDGPU-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
18 // AMDGPU-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
19 // AMDGPU-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
20 // AMDGPU-NEXT: ret void
22 void f(void *p) {}
24 // NVPTX-LABEL: define hidden void @_Z2p1Pv(
25 // NVPTX-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
26 // NVPTX-NEXT: [[ENTRY:.*:]]
27 // NVPTX-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
28 // NVPTX-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
29 // NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8
30 // NVPTX-NEXT: call void @_Z1fPv(ptr noundef [[TMP0]]) #[[ATTR1:[0-9]+]]
31 // NVPTX-NEXT: ret void
33 // AMDGPU-LABEL: define hidden void @_Z2p1Pv(
34 // AMDGPU-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
35 // AMDGPU-NEXT: [[ENTRY:.*:]]
36 // AMDGPU-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
37 // AMDGPU-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
38 // AMDGPU-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
39 // AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
40 // AMDGPU-NEXT: call void @_Z1fPv(ptr noundef [[TMP0]]) #[[ATTR1:[0-9]+]]
41 // AMDGPU-NEXT: ret void
43 void p1(void [[clang::address_space(0)]] * p) { f(p); }
44 // NVPTX-LABEL: define hidden noundef ptr @_Z2p2PU3AS3v(
45 // NVPTX-SAME: ptr addrspace(3) noundef [[P:%.*]]) #[[ATTR0]] {
46 // NVPTX-NEXT: [[ENTRY:.*:]]
47 // NVPTX-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(3), align 8
48 // NVPTX-NEXT: store ptr addrspace(3) [[P]], ptr [[P_ADDR]], align 8
49 // NVPTX-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[P_ADDR]], align 8
50 // NVPTX-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(3) [[TMP0]] to ptr
51 // NVPTX-NEXT: ret ptr [[TMP1]]
53 // AMDGPU-LABEL: define hidden noundef ptr @_Z2p2PU3AS3v(
54 // AMDGPU-SAME: ptr addrspace(3) noundef [[P:%.*]]) #[[ATTR0]] {
55 // AMDGPU-NEXT: [[ENTRY:.*:]]
56 // AMDGPU-NEXT: [[RETVAL:%.*]] = alloca ptr, align 8, addrspace(5)
57 // AMDGPU-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
58 // AMDGPU-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
59 // AMDGPU-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
60 // AMDGPU-NEXT: store ptr addrspace(3) [[P]], ptr [[P_ADDR_ASCAST]], align 4
61 // AMDGPU-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[P_ADDR_ASCAST]], align 4
62 // AMDGPU-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(3) [[TMP0]] to ptr
63 // AMDGPU-NEXT: ret ptr [[TMP1]]
65 void *p2(void [[clang::address_space(3)]] * p) { return p; }
66 // NVPTX-LABEL: define hidden noundef ptr @_Z2p3PU3AS3v(
67 // NVPTX-SAME: ptr addrspace(3) noundef [[P:%.*]]) #[[ATTR0]] {
68 // NVPTX-NEXT: [[ENTRY:.*:]]
69 // NVPTX-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(3), align 8
70 // NVPTX-NEXT: store ptr addrspace(3) [[P]], ptr [[P_ADDR]], align 8
71 // NVPTX-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[P_ADDR]], align 8
72 // NVPTX-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(3) [[TMP0]] to ptr
73 // NVPTX-NEXT: ret ptr [[TMP1]]
75 // AMDGPU-LABEL: define hidden noundef ptr @_Z2p3PU3AS3v(
76 // AMDGPU-SAME: ptr addrspace(3) noundef [[P:%.*]]) #[[ATTR0]] {
77 // AMDGPU-NEXT: [[ENTRY:.*:]]
78 // AMDGPU-NEXT: [[RETVAL:%.*]] = alloca ptr, align 8, addrspace(5)
79 // AMDGPU-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
80 // AMDGPU-NEXT: [[RETVAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RETVAL]] to ptr
81 // AMDGPU-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
82 // AMDGPU-NEXT: store ptr addrspace(3) [[P]], ptr [[P_ADDR_ASCAST]], align 4
83 // AMDGPU-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[P_ADDR_ASCAST]], align 4
84 // AMDGPU-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(3) [[TMP0]] to ptr
85 // AMDGPU-NEXT: ret ptr [[TMP1]]
87 void *p3(void [[clang::address_space(3)]] * p) { return p; }
89 struct S {
90 S() = default;
91 ~S() = default;
92 void foo() {}
95 S s1;
96 S [[clang::address_space(1)]] s2;
97 S [[clang::address_space(3)]] s3;
99 template <typename Ty> void foo(Ty *) {}
101 // NVPTX-LABEL: define hidden void @_Z2t1Pv(
102 // NVPTX-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
103 // NVPTX-NEXT: [[ENTRY:.*:]]
104 // NVPTX-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8
105 // NVPTX-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 8
106 // NVPTX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 8
107 // NVPTX-NEXT: call void @_Z3fooIvEvPT_(ptr noundef [[TMP0]]) #[[ATTR1]]
108 // NVPTX-NEXT: ret void
110 // AMDGPU-LABEL: define hidden void @_Z2t1Pv(
111 // AMDGPU-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
112 // AMDGPU-NEXT: [[ENTRY:.*:]]
113 // AMDGPU-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
114 // AMDGPU-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
115 // AMDGPU-NEXT: store ptr [[P]], ptr [[P_ADDR_ASCAST]], align 8
116 // AMDGPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR_ASCAST]], align 8
117 // AMDGPU-NEXT: call void @_Z3fooIvEvPT_(ptr noundef [[TMP0]]) #[[ATTR1]]
118 // AMDGPU-NEXT: ret void
120 void t1(void *p) { foo(p); }
121 // NVPTX-LABEL: define hidden void @_Z2t3PU3AS3v(
122 // NVPTX-SAME: ptr addrspace(3) noundef [[P:%.*]]) #[[ATTR0]] {
123 // NVPTX-NEXT: [[ENTRY:.*:]]
124 // NVPTX-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(3), align 8
125 // NVPTX-NEXT: store ptr addrspace(3) [[P]], ptr [[P_ADDR]], align 8
126 // NVPTX-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[P_ADDR]], align 8
127 // NVPTX-NEXT: call void @_Z3fooIU3AS3vEvPT_(ptr addrspace(3) noundef [[TMP0]]) #[[ATTR1]]
128 // NVPTX-NEXT: ret void
130 // AMDGPU-LABEL: define hidden void @_Z2t3PU3AS3v(
131 // AMDGPU-SAME: ptr addrspace(3) noundef [[P:%.*]]) #[[ATTR0]] {
132 // AMDGPU-NEXT: [[ENTRY:.*:]]
133 // AMDGPU-NEXT: [[P_ADDR:%.*]] = alloca ptr addrspace(3), align 4, addrspace(5)
134 // AMDGPU-NEXT: [[P_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[P_ADDR]] to ptr
135 // AMDGPU-NEXT: store ptr addrspace(3) [[P]], ptr [[P_ADDR_ASCAST]], align 4
136 // AMDGPU-NEXT: [[TMP0:%.*]] = load ptr addrspace(3), ptr [[P_ADDR_ASCAST]], align 4
137 // AMDGPU-NEXT: call void @_Z3fooIU3AS3vEvPT_(ptr addrspace(3) noundef [[TMP0]]) #[[ATTR1]]
138 // AMDGPU-NEXT: ret void
140 void t3(void [[clang::address_space(3)]] *p) { foo(p); }