1 // RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
2 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
3 // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
6 // RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-std -convert-vector-to-llvm -convert-memref-to-llvm -convert-std-to-llvm -reconcile-unrealized-casts | \
7 // RUN: mlir-cpu-runner -e entry -entry-point-result=void \
8 // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
11 func @transfer_read_1d(%A : memref<?xf32>, %base: index) {
12 %fm42 = arith.constant -42.0: f32
13 %f = vector.transfer_read %A[%base], %fm42
14 {permutation_map = affine_map<(d0) -> (d0)>} :
15 memref<?xf32>, vector<13xf32>
16 vector.print %f: vector<13xf32>
20 func @transfer_read_mask_1d(%A : memref<?xf32>, %base: index) {
21 %fm42 = arith.constant -42.0: f32
22 %m = arith.constant dense<[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]> : vector<13xi1>
23 %f = vector.transfer_read %A[%base], %fm42, %m : memref<?xf32>, vector<13xf32>
24 vector.print %f: vector<13xf32>
28 func @transfer_read_inbounds_4(%A : memref<?xf32>, %base: index) {
29 %fm42 = arith.constant -42.0: f32
30 %f = vector.transfer_read %A[%base], %fm42
31 {permutation_map = affine_map<(d0) -> (d0)>, in_bounds = [true]} :
32 memref<?xf32>, vector<4xf32>
33 vector.print %f: vector<4xf32>
37 func @transfer_read_mask_inbounds_4(%A : memref<?xf32>, %base: index) {
38 %fm42 = arith.constant -42.0: f32
39 %m = arith.constant dense<[0, 1, 0, 1]> : vector<4xi1>
40 %f = vector.transfer_read %A[%base], %fm42, %m {in_bounds = [true]}
41 : memref<?xf32>, vector<4xf32>
42 vector.print %f: vector<4xf32>
46 func @transfer_write_1d(%A : memref<?xf32>, %base: index) {
47 %f0 = arith.constant 0.0 : f32
48 %vf0 = splat %f0 : vector<4xf32>
49 vector.transfer_write %vf0, %A[%base]
50 {permutation_map = affine_map<(d0) -> (d0)>} :
51 vector<4xf32>, memref<?xf32>
56 %c0 = arith.constant 0: index
57 %c1 = arith.constant 1: index
58 %c2 = arith.constant 2: index
59 %c3 = arith.constant 3: index
60 %c4 = arith.constant 4: index
61 %c5 = arith.constant 5: index
62 %A = memref.alloc(%c5) : memref<?xf32>
63 scf.for %i = %c0 to %c5 step %c1 {
64 %i32 = arith.index_cast %i : index to i32
65 %fi = arith.sitofp %i32 : i32 to f32
66 memref.store %fi, %A[%i] : memref<?xf32>
68 // On input, memory contains [[ 0, 1, 2, 3, 4, xxx garbage xxx ]]
69 // Read shifted by 2 and pad with -42:
70 // ( 2, 3, 4, -42, ..., -42)
71 call @transfer_read_1d(%A, %c2) : (memref<?xf32>, index) -> ()
72 // Read with mask and out-of-bounds access.
73 call @transfer_read_mask_1d(%A, %c2) : (memref<?xf32>, index) -> ()
74 // Write into memory shifted by 3
75 // memory contains [[ 0, 1, 2, 0, 0, xxx garbage xxx ]]
76 call @transfer_write_1d(%A, %c3) : (memref<?xf32>, index) -> ()
77 // Read shifted by 0 and pad with -42:
78 // ( 0, 1, 2, 0, 0, -42, ..., -42)
79 call @transfer_read_1d(%A, %c0) : (memref<?xf32>, index) -> ()
80 // Read in-bounds 4 @ 1, guaranteed to not overflow.
81 // Exercises proper alignment.
82 call @transfer_read_inbounds_4(%A, %c1) : (memref<?xf32>, index) -> ()
83 // Read in-bounds with mask.
84 call @transfer_read_mask_inbounds_4(%A, %c1) : (memref<?xf32>, index) -> ()
86 memref.dealloc %A : memref<?xf32>
91 // CHECK: ( 2, 3, 4, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 )
92 // CHECK: ( -42, -42, 4, -42, -42, -42, -42, -42, -42, -42, -42, -42, -42 )
93 // CHECK: ( 0, 1, 2, 0, 0, -42, -42, -42, -42, -42, -42, -42, -42 )
94 // CHECK: ( 1, 2, 0, 0 )
95 // CHECK: ( -42, 2, -42, 0 )