1 ; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
3 ; CHECK-SPIRV: OpFNegate
4 ; CHECK-SPIRV: OpFNegate
5 ; CHECK-SPIRV: OpFNegate
6 ; CHECK-SPIRV: OpFNegate
8 ;; #pragma OPENCL EXTENSION cl_khr_fp64 : enable
9 ;; #pragma OPENCL EXTENSION cl_khr_fp16 : enable
11 ;; __kernel void foo(double a1, __global half *h, __global float *b0, __global double *b1, __global double8 *d) {
18 define dso_local spir_kernel void @foo(double noundef %a1, half addrspace(1)* noundef %h, float addrspace(1)* noundef %b0, double addrspace(1)* noundef %b1, <8 x double> addrspace(1)* noundef %d) {
20 %a1.addr = alloca double, align 8
21 %h.addr = alloca half addrspace(1)*, align 4
22 %b0.addr = alloca float addrspace(1)*, align 4
23 %b1.addr = alloca double addrspace(1)*, align 4
24 %d.addr = alloca <8 x double> addrspace(1)*, align 4
25 store double %a1, double* %a1.addr, align 8
26 store half addrspace(1)* %h, half addrspace(1)** %h.addr, align 4
27 store float addrspace(1)* %b0, float addrspace(1)** %b0.addr, align 4
28 store double addrspace(1)* %b1, double addrspace(1)** %b1.addr, align 4
29 store <8 x double> addrspace(1)* %d, <8 x double> addrspace(1)** %d.addr, align 4
30 %0 = load half addrspace(1)*, half addrspace(1)** %h.addr, align 4
31 %1 = load half, half addrspace(1)* %0, align 2
33 %2 = load half addrspace(1)*, half addrspace(1)** %h.addr, align 4
34 store half %fneg, half addrspace(1)* %2, align 2
35 %3 = load float addrspace(1)*, float addrspace(1)** %b0.addr, align 4
36 %4 = load float, float addrspace(1)* %3, align 4
37 %fneg1 = fneg float %4
38 %5 = load float addrspace(1)*, float addrspace(1)** %b0.addr, align 4
39 store float %fneg1, float addrspace(1)* %5, align 4
40 %6 = load double, double* %a1.addr, align 8
41 %fneg2 = fneg double %6
42 %7 = load double addrspace(1)*, double addrspace(1)** %b1.addr, align 4
43 store double %fneg2, double addrspace(1)* %7, align 8
44 %8 = load <8 x double> addrspace(1)*, <8 x double> addrspace(1)** %d.addr, align 4
45 %9 = load <8 x double>, <8 x double> addrspace(1)* %8, align 64
46 %fneg3 = fneg <8 x double> %9
47 %10 = load <8 x double> addrspace(1)*, <8 x double> addrspace(1)** %d.addr, align 4
48 store <8 x double> %fneg3, <8 x double> addrspace(1)* %10, align 64