[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / mlir / utils / tree-sitter-mlir / test / highlight / attribute.mlir
blob4fe2d42fc1e07d10a989720d5ef148e185900c04
1 func.func @depthwise_conv_1d_nwc_wcm(%input: tensor<1x12x8xf32>, %filter: tensor<3x8x8xf32>)
2 // <- function.builtin
3 //        ^ function
4 //                                   ^ variable.parameter
5 //                                           ^ type.builtin
6 //                                                               ^ variable.parameter
7 //                                                                        ^ type.builtin
8   -> tensor<1x10x8x8xf32> {
9 // ^ operator
10 //   ^ type.builtin
11   %zero = arith.constant 0.000000e+00 : f32
12 // ^ variable
13 //        ^ function.builtin
14 //                       ^ number
15 //                                      ^ type.builtin
16   %init = tensor.empty() : tensor<1x10x8x8xf32>
17 // ^ variable
18 //        ^ function.builtin
19 //                         ^ type.builtin
20   %fill = linalg.fill ins(%zero : f32) outs(%init : tensor<1x10x8x8xf32>) -> tensor<1x10x8x8xf32>
21 // ^ variable
22 //        ^ function.builtin
23 //                    ^ keyword
24 //                        ^ variable
25 //                                ^ type.builtin
26 //                                     ^ keyword
27   %0 = linalg.depthwise_conv_1d_nwc_wcm {dilations = dense<1> : tensor<1xi64>,
28 // ^ variable
29 //     ^ function.builtin
30 //                                       ^ attribute
31 //                                                   ^ constant.builtin
32     strides = dense<1> : tensor<1xi64>}
33 //            ^ constant.builtin
34     ins(%input, %filter : tensor<1x12x8xf32>, tensor<3x8x8xf32>)
35 //      ^ variable.parameter
36 //              ^ variable.parameter
37     outs(%fill : tensor<1x10x8x8xf32>) -> tensor<1x10x8x8xf32>
38 //       ^ variable
39   return %0 : tensor<1x10x8x8xf32>
40 // ^ function.builtin
41 //       ^ variable
44 func.func @fastmath(%arg0: f32, %arg1: f32) {
45 // <- function.builtin
46 //        ^ function
47 //                  ^ variable.parameter
48 //                         ^ type.builtin
49 //                              ^ variable.parameter
50 //                                     ^ type.builtin
51   %5 = arith.negf %arg0 fastmath<fast> : f32
52 //     ^ function.builtin
53 //                      ^ attribute
54   %6 = arith.addf %arg0, %arg1 fastmath<none> : f32
55 //     ^ function.builtin
56 //                             ^ attribute
57   %8 = arith.mulf %arg0, %arg1 fastmath<reassoc,nnan,ninf,nsz,arcp,contract,afn> : f32
58 //     ^ function.builtin
59 //                             ^ attribute
60   return
61 // ^ function.builtin
64 #map0 = affine_map<(d0, d1) -> (d0, d1)>
65 // <- attribute
66 //      ^ attribute
67 #map1 = affine_map<(d0, d1) -> (d0)>
68 // <- attribute
69 //      ^ attribute
70 #map2 = affine_map<(d0) -> (d0)>
71 // <- attribute
72 //      ^ attribute
74 func.func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>,
75   %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
77   %c0 = arith.constant 0 : index
78   %c1 = arith.constant 1 : index
79   %0 = tensor.dim %arg0, %c0 : tensor<?xf32>
80   %1 = tensor.empty(%0) : tensor<?xf32>
81   %2 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]}
82 //                                      ^ attribute
83 //                                             ^ attribute
84 //                                                    ^ attribute
85       ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
86 //    ^ keyword
87       outs(%1 : tensor<?xf32>) {
88 //    ^ keyword
89     ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
90       %3 = arith.addf %arg3, %arg4 : f32
91       linalg.yield %3 : f32
92   } -> tensor<?xf32>
93   %3 = tensor.dim %arg2, %c1 : tensor<?x?xf32>
94   %4 = tensor.empty(%0, %3) : tensor<?x?xf32>
95   %5 = linalg.generic {indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]}
96 //     ^ function.builtin
97       ins(%2, %arg2 : tensor<?xf32>, tensor<?x?xf32>)
98       outs(%4 : tensor<?x?xf32>){
99     ^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
100       %6 = arith.mulf %arg5, %arg6 : f32
101       linalg.yield %6 : f32
102     } -> tensor<?x?xf32>
103   return %5 : tensor<?x?xf32>
106 func.func @broadcast(%input: tensor<8x32xf32>,
107                      %init: tensor<8x16x32xf32>) -> tensor<8x16x32xf32> {
108   %bcast = linalg.broadcast
109 //         ^ function.builtin
110       ins(%input:tensor<8x32xf32>)
111 //    ^ keyword
112       outs(%init:tensor<8x16x32xf32>)
113 //    ^ keyword
114       dimensions = [1]
115 //    ^ attribute
116   func.return %bcast : tensor<8x16x32xf32>