1 // RUN: mlir-opt %s -pass-pipeline="builtin.module(func.func(convert-scf-to-cf,convert-arith-to-llvm),finalize-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts)" \
2 // RUN: | mlir-cpu-runner -e main -entry-point-result=void \
3 // RUN: -shared-libs=%mlir_runner_utils,%mlir_c_runner_utils \
6 func.func private @printMemrefF32(memref<*xf32>) attributes { llvm.emit_c_interface }
8 func.func @main() -> () {
9 %c0 = arith.constant 0 : index
10 %c1 = arith.constant 1 : index
11 %c42 = arith.constant 42.0 : f32
14 %input = memref.alloc() : memref<2x3xf32>
15 %dim_x = memref.dim %input, %c0 : memref<2x3xf32>
16 %dim_y = memref.dim %input, %c1 : memref<2x3xf32>
17 scf.parallel (%i, %j) = (%c0, %c0) to (%dim_x, %dim_y) step (%c1, %c1) {
18 %prod = arith.muli %i, %dim_y : index
19 %val = arith.addi %prod, %j : index
20 %val_i64 = arith.index_cast %val : index to i64
21 %val_f32 = arith.sitofp %val_i64 : i64 to f32
22 memref.store %val_f32, %input[%i, %j] : memref<2x3xf32>
24 %unranked_input = memref.cast %input : memref<2x3xf32> to memref<*xf32>
25 call @printMemrefF32(%unranked_input) : (memref<*xf32>) -> ()
26 // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1]
27 // CHECK-NEXT: [0, 1, 2]
28 // CHECK-NEXT: [3, 4, 5]
30 %copy = memref.alloc() : memref<2x3xf32>
31 memref.copy %input, %copy : memref<2x3xf32> to memref<2x3xf32>
32 %unranked_copy = memref.cast %copy : memref<2x3xf32> to memref<*xf32>
33 call @printMemrefF32(%unranked_copy) : (memref<*xf32>) -> ()
34 // CHECK: rank = 2 offset = 0 sizes = [2, 3] strides = [3, 1]
35 // CHECK-NEXT: [0, 1, 2]
36 // CHECK-NEXT: [3, 4, 5]
38 %copy_two = memref.alloc() : memref<3x2xf32>
39 %copy_two_casted = memref.reinterpret_cast %copy_two to offset: [0], sizes: [2, 3], strides: [1, 2]
40 : memref<3x2xf32> to memref<2x3xf32, strided<[1, 2], offset: 0>>
41 memref.copy %input, %copy_two_casted : memref<2x3xf32> to memref<2x3xf32, strided<[1, 2], offset: 0>>
42 %unranked_copy_two = memref.cast %copy_two : memref<3x2xf32> to memref<*xf32>
43 call @printMemrefF32(%unranked_copy_two) : (memref<*xf32>) -> ()
44 // CHECK: rank = 2 offset = 0 sizes = [3, 2] strides = [2, 1]
49 %input_empty = memref.alloc() : memref<3x0x1xf32>
50 %copy_empty = memref.alloc() : memref<3x0x1xf32>
51 // Copying an empty shape should do nothing (and should not crash).
52 memref.copy %input_empty, %copy_empty : memref<3x0x1xf32> to memref<3x0x1xf32>
54 %input_empty_casted = memref.reinterpret_cast %input_empty to offset: [0], sizes: [0, 3, 1], strides: [3, 1, 1]
55 : memref<3x0x1xf32> to memref<0x3x1xf32, strided<[3, 1, 1], offset: 0>>
56 %copy_empty_casted = memref.alloc() : memref<0x3x1xf32>
57 // Copying a casted empty shape should do nothing (and should not crash).
58 memref.copy %input_empty_casted, %copy_empty_casted : memref<0x3x1xf32, strided<[3, 1, 1], offset: 0>> to memref<0x3x1xf32>
60 %scalar = memref.alloc() : memref<f32>
61 memref.store %c42, %scalar[] : memref<f32>
62 %scalar_copy = memref.alloc() : memref<f32>
63 memref.copy %scalar, %scalar_copy : memref<f32> to memref<f32>
64 %unranked_scalar_copy = memref.cast %scalar_copy : memref<f32> to memref<*xf32>
65 call @printMemrefF32(%unranked_scalar_copy) : (memref<*xf32>) -> ()
66 // CHECK: rank = 0 offset = 0 sizes = [] strides = []
69 memref.dealloc %copy_empty : memref<3x0x1xf32>
70 memref.dealloc %copy_empty_casted : memref<0x3x1xf32>
71 memref.dealloc %input_empty : memref<3x0x1xf32>
72 memref.dealloc %copy_two : memref<3x2xf32>
73 memref.dealloc %copy : memref<2x3xf32>
74 memref.dealloc %input : memref<2x3xf32>
75 memref.dealloc %scalar : memref<f32>
76 memref.dealloc %scalar_copy : memref<f32>