1 // RUN: mlir-translate -mlir-to-llvmir -split-input-file %s | FileCheck %s
3 //CHECK-LABEL: define void @_QPsimd_aligned_pointer() {
4 //CHECK: %[[A_PTR:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8 }, i64 1, align 8
5 //CHECK: %[[A_VAL:.*]] = load ptr, ptr %[[A_PTR]], align 8
6 //CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %[[A_VAL]], i64 256) ]
7 llvm.func @_QPsimd_aligned_pointer() {
8 %1 = llvm.mlir.constant(1 : i64) : i64
9 %2 = llvm.alloca %1 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {bindc_name = "x"} : (i64) -> !llvm.ptr
10 %3 = llvm.alloca %1 x i32 {bindc_name = "i", pinned} : (i64) -> !llvm.ptr
11 %4 = llvm.mlir.constant(1 : i32) : i32
12 %5 = llvm.mlir.constant(10 : i32) : i32
13 %6 = llvm.mlir.constant(1 : i32) : i32
14 omp.simd aligned(%2 : !llvm.ptr -> 256 : i64) {
15 omp.loop_nest (%arg0) : i32 = (%4) to (%5) inclusive step (%6) {
16 llvm.store %arg0, %3 : i32, !llvm.ptr
23 //CHECK-LABEL: define void @_QPsimd_aligned_cptr() {
24 //CHECK: %[[A_CPTR:.*]] = alloca %_QM__fortran_builtinsT__builtin_c_ptr, i64 1, align 8
25 //CHECK: %[[A_VAL:.*]] = load ptr, ptr %[[A_CPTR]], align 8
26 //CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %[[A_VAL]], i64 256) ]
27 llvm.func @_QPsimd_aligned_cptr() {
28 %0 = llvm.mlir.constant(1 : i64) : i64
29 %1 = llvm.alloca %0 x !llvm.struct<"_QM__fortran_builtinsT__builtin_c_ptr", (i64)> {bindc_name = "a"} : (i64) -> !llvm.ptr
30 %2 = llvm.mlir.constant(1 : i64) : i64
31 %3 = llvm.alloca %2 x i32 {bindc_name = "i", pinned} : (i64) -> !llvm.ptr
32 %4 = llvm.mlir.constant(1 : i32) : i32
33 %5 = llvm.mlir.constant(10 : i32) : i32
34 %6 = llvm.mlir.constant(1 : i32) : i32
35 omp.simd aligned(%1 : !llvm.ptr -> 256 : i64) {
36 omp.loop_nest (%arg0) : i32 = (%4) to (%5) inclusive step (%6) {
37 llvm.store %arg0, %3 : i32, !llvm.ptr
44 //CHECK-LABEL: define void @_QPsimd_aligned_allocatable() {
45 //CHECK: %[[A_ADDR:.*]] = alloca { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, i64 1, align 8
46 //CHECK: %[[A_VAL:.*]] = load ptr, ptr %[[A_ADDR]], align 8
47 //CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %[[A_VAL]], i64 256) ]
48 llvm.func @_QPsimd_aligned_allocatable() {
49 %0 = llvm.mlir.constant(1 : i64) : i64
50 %1 = llvm.alloca %0 x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {bindc_name = "a"} : (i64) -> !llvm.ptr
51 %2 = llvm.mlir.constant(1 : i32) : i32
52 %3 = llvm.mlir.constant(10 : i32) : i32
53 %4 = llvm.mlir.constant(1 : i32) : i32
54 omp.simd aligned(%1 : !llvm.ptr -> 256 : i64) {
55 omp.loop_nest (%arg0) : i32 = (%2) to (%3) inclusive step (%4) {