[Flang] remove whole-archive option for AIX linker (#76039)
[llvm-project.git] / clang / test / Driver / linker-wrapper-image.c
bloba2a1996f66430900e8ea655e978b6fe49052fa5f
1 // REQUIRES: x86-registered-target
2 // REQUIRES: nvptx-registered-target
3 // REQUIRES: amdgpu-registered-target
5 // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.elf.o
7 // RUN: clang-offload-packager -o %t.out --image=file=%t.elf.o,kind=openmp,triple=nvptx64-nvidia-cuda,arch=sm_70
8 // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o \
9 // RUN: -fembed-offload-object=%t.out
10 // RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu \
11 // RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=OPENMP,OPENMP-ELF
12 // RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-windows-gnu \
13 // RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=OPENMP,OPENMP-COFF
15 // OPENMP-ELF: @__start_omp_offloading_entries = external hidden constant [0 x %struct.__tgt_offload_entry]
16 // OPENMP-ELF-NEXT: @__stop_omp_offloading_entries = external hidden constant [0 x %struct.__tgt_offload_entry]
17 // OPENMP-ELF-NEXT: @__dummy.omp_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries"
19 // OPENMP-COFF: @__start_omp_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries$OA"
20 // OPENMP-COFF-NEXT: @__stop_omp_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "omp_offloading_entries$OZ"
22 // OPENMP: @.omp_offloading.device_image = internal unnamed_addr constant [[[SIZE:[0-9]+]] x i8] c"\10\FF\10\AD{{.*}}"
23 // OPENMP-NEXT: @.omp_offloading.device_images = internal unnamed_addr constant [1 x %__tgt_device_image] [%__tgt_device_image { ptr @.omp_offloading.device_image, ptr getelementptr inbounds ([[[SIZE]] x i8], ptr @.omp_offloading.device_image, i64 1, i64 0), ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }]
24 // OPENMP-NEXT: @.omp_offloading.descriptor = internal constant %__tgt_bin_desc { i32 1, ptr @.omp_offloading.device_images, ptr @__start_omp_offloading_entries, ptr @__stop_omp_offloading_entries }
25 // OPENMP-NEXT: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_reg, ptr null }]
26 // OPENMP-NEXT: @llvm.global_dtors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.omp_offloading.descriptor_unreg, ptr null }]
28 // OPENMP: define internal void @.omp_offloading.descriptor_reg() section ".text.startup" {
29 // OPENMP-NEXT: entry:
30 // OPENMP-NEXT: call void @__tgt_register_lib(ptr @.omp_offloading.descriptor)
31 // OPENMP-NEXT: ret void
32 // OPENMP-NEXT: }
34 // OPENMP: define internal void @.omp_offloading.descriptor_unreg() section ".text.startup" {
35 // OPENMP-NEXT: entry:
36 // OPENMP-NEXT: call void @__tgt_unregister_lib(ptr @.omp_offloading.descriptor)
37 // OPENMP-NEXT: ret void
38 // OPENMP-NEXT: }
40 // RUN: clang-offload-packager -o %t.out --image=file=%t.elf.o,kind=cuda,triple=nvptx64-nvidia-cuda,arch=sm_70
41 // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o \
42 // RUN: -fembed-offload-object=%t.out
43 // RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu \
44 // RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=CUDA,CUDA-ELF
45 // RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-windows-gnu \
46 // RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=CUDA,CUDA-COFF
48 // CUDA: @.fatbin_image = internal constant [0 x i8] zeroinitializer, section ".nv_fatbin"
49 // CUDA-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1180844977, i32 1, ptr @.fatbin_image, ptr null }, section ".nvFatBinSegment", align 8
50 // CUDA-NEXT: @.cuda.binary_handle = internal global ptr null
52 // CUDA-ELF: @__start_cuda_offloading_entries = external hidden constant [0 x %struct.__tgt_offload_entry]
53 // CUDA-ELF-NEXT: @__stop_cuda_offloading_entries = external hidden constant [0 x %struct.__tgt_offload_entry]
54 // CUDA-ELF-NEXT: @__dummy.cuda_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "cuda_offloading_entries"
56 // CUDA-COFF: @__start_cuda_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "cuda_offloading_entries$OA"
57 // CUDA-COFF-NEXT: @__stop_cuda_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "cuda_offloading_entries$OZ"
59 // CUDA: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.cuda.fatbin_reg, ptr null }]
61 // CUDA: define internal void @.cuda.fatbin_reg() section ".text.startup" {
62 // CUDA-NEXT: entry:
63 // CUDA-NEXT: %0 = call ptr @__cudaRegisterFatBinary(ptr @.fatbin_wrapper)
64 // CUDA-NEXT: store ptr %0, ptr @.cuda.binary_handle, align 8
65 // CUDA-NEXT: call void @.cuda.globals_reg(ptr %0)
66 // CUDA-NEXT: call void @__cudaRegisterFatBinaryEnd(ptr %0)
67 // CUDA-NEXT: %1 = call i32 @atexit(ptr @.cuda.fatbin_unreg)
68 // CUDA-NEXT: ret void
69 // CUDA-NEXT: }
71 // CUDA: define internal void @.cuda.fatbin_unreg() section ".text.startup" {
72 // CUDA-NEXT: entry:
73 // CUDA-NEXT: %0 = load ptr, ptr @.cuda.binary_handle, align 8
74 // CUDA-NEXT: call void @__cudaUnregisterFatBinary(ptr %0)
75 // CUDA-NEXT: ret void
76 // CUDA-NEXT: }
78 // CUDA: define internal void @.cuda.globals_reg(ptr %0) section ".text.startup" {
79 // CUDA-NEXT: entry:
80 // CUDA-NEXT: br i1 icmp ne (ptr @__start_cuda_offloading_entries, ptr @__stop_cuda_offloading_entries), label %while.entry, label %while.end
82 // CUDA: while.entry:
83 // CUDA-NEXT: %entry1 = phi ptr [ @__start_cuda_offloading_entries, %entry ], [ %11, %if.end ]
84 // CUDA-NEXT: %1 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 0
85 // CUDA-NEXT: %addr = load ptr, ptr %1, align 8
86 // CUDA-NEXT: %2 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 1
87 // CUDA-NEXT: %name = load ptr, ptr %2, align 8
88 // CUDA-NEXT: %3 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 2
89 // CUDA-NEXT: %size = load i64, ptr %3, align 4
90 // CUDA-NEXT: %4 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 3
91 // CUDA-NEXT: %flags = load i32, ptr %4, align 4
92 // CUDA-NEXT: %5 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 4
93 // CUDA-NEXT: %textype = load i32, ptr %5, align 4
94 // CUDA-NEXT: %type = and i32 %flags, 7
95 // CUDA-NEXT: %6 = and i32 %flags, 8
96 // CUDA-NEXT: %extern = lshr i32 %6, 3
97 // CUDA-NEXT: %7 = and i32 %flags, 16
98 // CUDA-NEXT: %constant = lshr i32 %7, 4
99 // CUDA-NEXT: %8 = and i32 %flags, 32
100 // CUDA-NEXT: %normalized = lshr i32 %8, 5
101 // CUDA-NEXT: %9 = icmp eq i64 %size, 0
102 // CUDA-NEXT: br i1 %9, label %if.then, label %if.else
104 // CUDA: if.then:
105 // CUDA-NEXT: %10 = call i32 @__cudaRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
106 // CUDA-NEXT: br label %if.end
108 // CUDA: if.else:
109 // CUDA-NEXT: switch i32 %type, label %if.end [
110 // CUDA-NEXT: i32 0, label %sw.global
111 // CUDA-NEXT: i32 1, label %sw.managed
112 // CUDA-NEXT: i32 2, label %sw.surface
113 // CUDA-NEXT: i32 3, label %sw.texture
114 // CUDA-NEXT: ]
116 // CUDA: sw.global:
117 // CUDA-NEXT: call void @__cudaRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %extern, i64 %size, i32 %constant, i32 0)
118 // CUDA-NEXT: br label %if.end
120 // CUDA: sw.managed:
121 // CUDA-NEXT: br label %if.end
123 // CUDA: sw.surface:
124 // CUDA-NEXT: call void @__cudaRegisterSurface(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %textype, i32 %extern)
125 // CUDA-NEXT: br label %if.end
127 // CUDA: sw.texture:
128 // CUDA-NEXT: call void @__cudaRegisterTexture(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %textype, i32 %normalized, i32 %extern)
129 // CUDA-NEXT: br label %if.end
131 // CUDA: if.end:
132 // CUDA-NEXT: %11 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 1
133 // CUDA-NEXT: %12 = icmp eq ptr %11, @__stop_cuda_offloading_entries
134 // CUDA-NEXT: br i1 %12, label %while.end, label %while.entry
136 // CUDA: while.end:
137 // CUDA-NEXT: ret void
138 // CUDA-NEXT: }
140 // RUN: clang-offload-packager -o %t.out --image=file=%t.elf.o,kind=hip,triple=amdgcn-amd-amdhsa,arch=gfx908
141 // RUN: %clang -cc1 %s -triple x86_64-unknown-linux-gnu -emit-obj -o %t.o \
142 // RUN: -fembed-offload-object=%t.out
143 // RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-linux-gnu \
144 // RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=HIP,HIP-ELF
145 // RUN: clang-linker-wrapper --print-wrapped-module --dry-run --host-triple=x86_64-unknown-windows-gnu \
146 // RUN: --linker-path=/usr/bin/ld -- %t.o -o a.out 2>&1 | FileCheck %s --check-prefixes=HIP,HIP-COFF
148 // HIP: @.fatbin_image = internal constant [0 x i8] zeroinitializer, section ".hip_fatbin"
149 // HIP-NEXT: @.fatbin_wrapper = internal constant %fatbin_wrapper { i32 1212764230, i32 1, ptr @.fatbin_image, ptr null }, section ".hipFatBinSegment", align 8
150 // HIP-NEXT: @.hip.binary_handle = internal global ptr null
152 // HIP-ELF: @__start_hip_offloading_entries = external hidden constant [0 x %struct.__tgt_offload_entry]
153 // HIP-ELF-NEXT: @__stop_hip_offloading_entries = external hidden constant [0 x %struct.__tgt_offload_entry]
154 // HIP-ELF-NEXT: @__dummy.hip_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "hip_offloading_entries"
156 // HIP-COFF: @__start_hip_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "hip_offloading_entries$OA"
157 // HIP-COFF-NEXT: @__stop_hip_offloading_entries = hidden constant [0 x %struct.__tgt_offload_entry] zeroinitializer, section "hip_offloading_entries$OZ"
159 // HIP: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @.hip.fatbin_reg, ptr null }]
161 // HIP: define internal void @.hip.fatbin_reg() section ".text.startup" {
162 // HIP-NEXT: entry:
163 // HIP-NEXT: %0 = call ptr @__hipRegisterFatBinary(ptr @.fatbin_wrapper)
164 // HIP-NEXT: store ptr %0, ptr @.hip.binary_handle, align 8
165 // HIP-NEXT: call void @.hip.globals_reg(ptr %0)
166 // HIP-NEXT: %1 = call i32 @atexit(ptr @.hip.fatbin_unreg)
167 // HIP-NEXT: ret void
168 // HIP-NEXT: }
170 // HIP: define internal void @.hip.fatbin_unreg() section ".text.startup" {
171 // HIP-NEXT: entry:
172 // HIP-NEXT: %0 = load ptr, ptr @.hip.binary_handle, align 8
173 // HIP-NEXT: call void @__hipUnregisterFatBinary(ptr %0)
174 // HIP-NEXT: ret void
175 // HIP-NEXT: }
177 // HIP: define internal void @.hip.globals_reg(ptr %0) section ".text.startup" {
178 // HIP-NEXT: entry:
179 // HIP-NEXT: br i1 icmp ne (ptr @__start_hip_offloading_entries, ptr @__stop_hip_offloading_entries), label %while.entry, label %while.end
181 // HIP: while.entry:
182 // HIP-NEXT: %entry1 = phi ptr [ @__start_hip_offloading_entries, %entry ], [ %11, %if.end ]
183 // HIP-NEXT: %1 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 0
184 // HIP-NEXT: %addr = load ptr, ptr %1, align 8
185 // HIP-NEXT: %2 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 1
186 // HIP-NEXT: %name = load ptr, ptr %2, align 8
187 // HIP-NEXT: %3 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 2
188 // HIP-NEXT: %size = load i64, ptr %3, align 4
189 // HIP-NEXT: %4 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 3
190 // HIP-NEXT: %flags = load i32, ptr %4, align 4
191 // HIP-NEXT: %5 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 0, i32 4
192 // HIP-NEXT: %textype = load i32, ptr %5, align 4
193 // HIP-NEXT: %type = and i32 %flags, 7
194 // HIP-NEXT: %6 = and i32 %flags, 8
195 // HIP-NEXT: %extern = lshr i32 %6, 3
196 // HIP-NEXT: %7 = and i32 %flags, 16
197 // HIP-NEXT: %constant = lshr i32 %7, 4
198 // HIP-NEXT: %8 = and i32 %flags, 32
199 // HIP-NEXT: %normalized = lshr i32 %8, 5
200 // HIP-NEXT: %9 = icmp eq i64 %size, 0
201 // HIP-NEXT: br i1 %9, label %if.then, label %if.else
203 // HIP: if.then:
204 // HIP-NEXT: %10 = call i32 @__hipRegisterFunction(ptr %0, ptr %addr, ptr %name, ptr %name, i32 -1, ptr null, ptr null, ptr null, ptr null, ptr null)
205 // HIP-NEXT: br label %if.end
207 // HIP: if.else:
208 // HIP-NEXT: switch i32 %type, label %if.end [
209 // HIP-NEXT: i32 0, label %sw.global
210 // HIP-NEXT: i32 1, label %sw.managed
211 // HIP-NEXT: i32 2, label %sw.surface
212 // HIP-NEXT: i32 3, label %sw.texture
213 // HIP-NEXT: ]
215 // HIP: sw.global:
216 // HIP-NEXT: call void @__hipRegisterVar(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %extern, i64 %size, i32 %constant, i32 0)
217 // HIP-NEXT: br label %if.end
219 // HIP: sw.managed:
220 // HIP-NEXT: br label %if.end
222 // HIP: sw.surface:
223 // HIP-NEXT: call void @__hipRegisterSurface(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %textype, i32 %extern)
224 // HIP-NEXT: br label %if.end
226 // HIP: sw.texture:
227 // HIP-NEXT: call void @__hipRegisterTexture(ptr %0, ptr %addr, ptr %name, ptr %name, i32 %textype, i32 %normalized, i32 %extern)
228 // HIP-NEXT: br label %if.end
230 // HIP: if.end:
231 // HIP-NEXT: %11 = getelementptr inbounds %struct.__tgt_offload_entry, ptr %entry1, i64 1
232 // HIP-NEXT: %12 = icmp eq ptr %11, @__stop_hip_offloading_entries
233 // HIP-NEXT: br i1 %12, label %while.end, label %while.entry
235 // HIP: while.end:
236 // HIP-NEXT: ret void
237 // HIP-NEXT: }