1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/uverbs_std_types.h>
11 #include <rdma/mlx5_user_ioctl_cmds.h>
12 #include <rdma/mlx5_user_ioctl_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
16 #include <linux/mlx5/fs_helpers.h>
17 #include <linux/mlx5/accel.h>
18 #include <linux/mlx5/eswitch.h>
24 #define UVERBS_MODULE_NAME mlx5_ib
25 #include <rdma/uverbs_named_ioctl.h>
28 MATCH_CRITERIA_ENABLE_OUTER_BIT
,
29 MATCH_CRITERIA_ENABLE_MISC_BIT
,
30 MATCH_CRITERIA_ENABLE_INNER_BIT
,
31 MATCH_CRITERIA_ENABLE_MISC2_BIT
34 #define HEADER_IS_ZERO(match_criteria, headers) \
35 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
36 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
38 static u8 get_match_criteria_enable(u32 *match_criteria)
40 u8 match_criteria_enable
;
42 match_criteria_enable
=
43 (!HEADER_IS_ZERO(match_criteria
, outer_headers
)) <<
44 MATCH_CRITERIA_ENABLE_OUTER_BIT
;
45 match_criteria_enable
|=
46 (!HEADER_IS_ZERO(match_criteria
, misc_parameters
)) <<
47 MATCH_CRITERIA_ENABLE_MISC_BIT
;
48 match_criteria_enable
|=
49 (!HEADER_IS_ZERO(match_criteria
, inner_headers
)) <<
50 MATCH_CRITERIA_ENABLE_INNER_BIT
;
51 match_criteria_enable
|=
52 (!HEADER_IS_ZERO(match_criteria
, misc_parameters_2
)) <<
53 MATCH_CRITERIA_ENABLE_MISC2_BIT
;
55 return match_criteria_enable
;
58 static int set_proto(void *outer_c
, void *outer_v
, u8 mask
, u8 val
)
67 entry_mask
= MLX5_GET(fte_match_set_lyr_2_4
, outer_c
,
69 entry_val
= MLX5_GET(fte_match_set_lyr_2_4
, outer_v
,
72 MLX5_SET(fte_match_set_lyr_2_4
, outer_c
, ip_protocol
, mask
);
73 MLX5_SET(fte_match_set_lyr_2_4
, outer_v
, ip_protocol
, val
);
76 /* Don't override existing ip protocol */
77 if (mask
!= entry_mask
|| val
!= entry_val
)
83 static void set_flow_label(void *misc_c
, void *misc_v
, u32 mask
, u32 val
,
87 MLX5_SET(fte_match_set_misc
,
88 misc_c
, inner_ipv6_flow_label
, mask
);
89 MLX5_SET(fte_match_set_misc
,
90 misc_v
, inner_ipv6_flow_label
, val
);
92 MLX5_SET(fte_match_set_misc
,
93 misc_c
, outer_ipv6_flow_label
, mask
);
94 MLX5_SET(fte_match_set_misc
,
95 misc_v
, outer_ipv6_flow_label
, val
);
99 static void set_tos(void *outer_c
, void *outer_v
, u8 mask
, u8 val
)
101 MLX5_SET(fte_match_set_lyr_2_4
, outer_c
, ip_ecn
, mask
);
102 MLX5_SET(fte_match_set_lyr_2_4
, outer_v
, ip_ecn
, val
);
103 MLX5_SET(fte_match_set_lyr_2_4
, outer_c
, ip_dscp
, mask
>> 2);
104 MLX5_SET(fte_match_set_lyr_2_4
, outer_v
, ip_dscp
, val
>> 2);
107 static int check_mpls_supp_fields(u32 field_support
, const __be32
*set_mask
)
109 if (MLX5_GET(fte_match_mpls
, set_mask
, mpls_label
) &&
110 !(field_support
& MLX5_FIELD_SUPPORT_MPLS_LABEL
))
113 if (MLX5_GET(fte_match_mpls
, set_mask
, mpls_exp
) &&
114 !(field_support
& MLX5_FIELD_SUPPORT_MPLS_EXP
))
117 if (MLX5_GET(fte_match_mpls
, set_mask
, mpls_s_bos
) &&
118 !(field_support
& MLX5_FIELD_SUPPORT_MPLS_S_BOS
))
121 if (MLX5_GET(fte_match_mpls
, set_mask
, mpls_ttl
) &&
122 !(field_support
& MLX5_FIELD_SUPPORT_MPLS_TTL
))
128 #define LAST_ETH_FIELD vlan_tag
129 #define LAST_IB_FIELD sl
130 #define LAST_IPV4_FIELD tos
131 #define LAST_IPV6_FIELD traffic_class
132 #define LAST_TCP_UDP_FIELD src_port
133 #define LAST_TUNNEL_FIELD tunnel_id
134 #define LAST_FLOW_TAG_FIELD tag_id
135 #define LAST_DROP_FIELD size
136 #define LAST_COUNTERS_FIELD counters
138 /* Field is the last supported field */
139 #define FIELDS_NOT_SUPPORTED(filter, field) \
140 memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \
141 sizeof(filter) - offsetofend(typeof(filter), field))
143 int parse_flow_flow_action(struct mlx5_ib_flow_action
*maction
,
145 struct mlx5_flow_act
*action
)
148 switch (maction
->ib_action
.type
) {
149 case IB_FLOW_ACTION_ESP
:
150 if (action
->action
& (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT
|
151 MLX5_FLOW_CONTEXT_ACTION_DECRYPT
))
153 /* Currently only AES_GCM keymat is supported by the driver */
154 action
->esp_id
= (uintptr_t)maction
->esp_aes_gcm
.ctx
;
155 action
->action
|= is_egress
?
156 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT
:
157 MLX5_FLOW_CONTEXT_ACTION_DECRYPT
;
159 case IB_FLOW_ACTION_UNSPECIFIED
:
160 if (maction
->flow_action_raw
.sub_type
==
161 MLX5_IB_FLOW_ACTION_MODIFY_HEADER
) {
162 if (action
->action
& MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
)
164 action
->action
|= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR
;
166 maction
->flow_action_raw
.modify_hdr
;
169 if (maction
->flow_action_raw
.sub_type
==
170 MLX5_IB_FLOW_ACTION_DECAP
) {
171 if (action
->action
& MLX5_FLOW_CONTEXT_ACTION_DECAP
)
173 action
->action
|= MLX5_FLOW_CONTEXT_ACTION_DECAP
;
176 if (maction
->flow_action_raw
.sub_type
==
177 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT
) {
179 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
)
182 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT
;
183 action
->pkt_reformat
=
184 maction
->flow_action_raw
.pkt_reformat
;
193 static int parse_flow_attr(struct mlx5_core_dev
*mdev
,
194 struct mlx5_flow_spec
*spec
,
195 const union ib_flow_spec
*ib_spec
,
196 const struct ib_flow_attr
*flow_attr
,
197 struct mlx5_flow_act
*action
, u32 prev_type
)
199 struct mlx5_flow_context
*flow_context
= &spec
->flow_context
;
200 u32
*match_c
= spec
->match_criteria
;
201 u32
*match_v
= spec
->match_value
;
202 void *misc_params_c
= MLX5_ADDR_OF(fte_match_param
, match_c
,
204 void *misc_params_v
= MLX5_ADDR_OF(fte_match_param
, match_v
,
206 void *misc_params2_c
= MLX5_ADDR_OF(fte_match_param
, match_c
,
208 void *misc_params2_v
= MLX5_ADDR_OF(fte_match_param
, match_v
,
215 if (ib_spec
->type
& IB_FLOW_SPEC_INNER
) {
216 headers_c
= MLX5_ADDR_OF(fte_match_param
, match_c
,
218 headers_v
= MLX5_ADDR_OF(fte_match_param
, match_v
,
220 match_ipv
= MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
221 ft_field_support
.inner_ip_version
);
223 headers_c
= MLX5_ADDR_OF(fte_match_param
, match_c
,
225 headers_v
= MLX5_ADDR_OF(fte_match_param
, match_v
,
227 match_ipv
= MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
228 ft_field_support
.outer_ip_version
);
231 switch (ib_spec
->type
& ~IB_FLOW_SPEC_INNER
) {
232 case IB_FLOW_SPEC_ETH
:
233 if (FIELDS_NOT_SUPPORTED(ib_spec
->eth
.mask
, LAST_ETH_FIELD
))
236 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
238 ib_spec
->eth
.mask
.dst_mac
);
239 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
241 ib_spec
->eth
.val
.dst_mac
);
243 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
245 ib_spec
->eth
.mask
.src_mac
);
246 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
248 ib_spec
->eth
.val
.src_mac
);
250 if (ib_spec
->eth
.mask
.vlan_tag
) {
251 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
253 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
256 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
257 first_vid
, ntohs(ib_spec
->eth
.mask
.vlan_tag
));
258 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
259 first_vid
, ntohs(ib_spec
->eth
.val
.vlan_tag
));
261 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
263 ntohs(ib_spec
->eth
.mask
.vlan_tag
) >> 12);
264 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
266 ntohs(ib_spec
->eth
.val
.vlan_tag
) >> 12);
268 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
270 ntohs(ib_spec
->eth
.mask
.vlan_tag
) >> 13);
271 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
273 ntohs(ib_spec
->eth
.val
.vlan_tag
) >> 13);
275 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
276 ethertype
, ntohs(ib_spec
->eth
.mask
.ether_type
));
277 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
278 ethertype
, ntohs(ib_spec
->eth
.val
.ether_type
));
280 case IB_FLOW_SPEC_IPV4
:
281 if (FIELDS_NOT_SUPPORTED(ib_spec
->ipv4
.mask
, LAST_IPV4_FIELD
))
285 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
287 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
288 ip_version
, MLX5_FS_IPV4_VERSION
);
290 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
292 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
293 ethertype
, ETH_P_IP
);
296 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
297 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
298 &ib_spec
->ipv4
.mask
.src_ip
,
299 sizeof(ib_spec
->ipv4
.mask
.src_ip
));
300 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
301 src_ipv4_src_ipv6
.ipv4_layout
.ipv4
),
302 &ib_spec
->ipv4
.val
.src_ip
,
303 sizeof(ib_spec
->ipv4
.val
.src_ip
));
304 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
305 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
306 &ib_spec
->ipv4
.mask
.dst_ip
,
307 sizeof(ib_spec
->ipv4
.mask
.dst_ip
));
308 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
309 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
),
310 &ib_spec
->ipv4
.val
.dst_ip
,
311 sizeof(ib_spec
->ipv4
.val
.dst_ip
));
313 set_tos(headers_c
, headers_v
,
314 ib_spec
->ipv4
.mask
.tos
, ib_spec
->ipv4
.val
.tos
);
316 if (set_proto(headers_c
, headers_v
,
317 ib_spec
->ipv4
.mask
.proto
,
318 ib_spec
->ipv4
.val
.proto
))
321 case IB_FLOW_SPEC_IPV6
:
322 if (FIELDS_NOT_SUPPORTED(ib_spec
->ipv6
.mask
, LAST_IPV6_FIELD
))
326 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
328 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
329 ip_version
, MLX5_FS_IPV6_VERSION
);
331 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
,
333 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
,
334 ethertype
, ETH_P_IPV6
);
337 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
338 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
339 &ib_spec
->ipv6
.mask
.src_ip
,
340 sizeof(ib_spec
->ipv6
.mask
.src_ip
));
341 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
342 src_ipv4_src_ipv6
.ipv6_layout
.ipv6
),
343 &ib_spec
->ipv6
.val
.src_ip
,
344 sizeof(ib_spec
->ipv6
.val
.src_ip
));
345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_c
,
346 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
347 &ib_spec
->ipv6
.mask
.dst_ip
,
348 sizeof(ib_spec
->ipv6
.mask
.dst_ip
));
349 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4
, headers_v
,
350 dst_ipv4_dst_ipv6
.ipv6_layout
.ipv6
),
351 &ib_spec
->ipv6
.val
.dst_ip
,
352 sizeof(ib_spec
->ipv6
.val
.dst_ip
));
354 set_tos(headers_c
, headers_v
,
355 ib_spec
->ipv6
.mask
.traffic_class
,
356 ib_spec
->ipv6
.val
.traffic_class
);
358 if (set_proto(headers_c
, headers_v
,
359 ib_spec
->ipv6
.mask
.next_hdr
,
360 ib_spec
->ipv6
.val
.next_hdr
))
363 set_flow_label(misc_params_c
, misc_params_v
,
364 ntohl(ib_spec
->ipv6
.mask
.flow_label
),
365 ntohl(ib_spec
->ipv6
.val
.flow_label
),
366 ib_spec
->type
& IB_FLOW_SPEC_INNER
);
368 case IB_FLOW_SPEC_ESP
:
369 if (ib_spec
->esp
.mask
.seq
)
372 MLX5_SET(fte_match_set_misc
, misc_params_c
, outer_esp_spi
,
373 ntohl(ib_spec
->esp
.mask
.spi
));
374 MLX5_SET(fte_match_set_misc
, misc_params_v
, outer_esp_spi
,
375 ntohl(ib_spec
->esp
.val
.spi
));
377 case IB_FLOW_SPEC_TCP
:
378 if (FIELDS_NOT_SUPPORTED(ib_spec
->tcp_udp
.mask
,
382 if (set_proto(headers_c
, headers_v
, 0xff, IPPROTO_TCP
))
385 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, tcp_sport
,
386 ntohs(ib_spec
->tcp_udp
.mask
.src_port
));
387 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, tcp_sport
,
388 ntohs(ib_spec
->tcp_udp
.val
.src_port
));
390 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, tcp_dport
,
391 ntohs(ib_spec
->tcp_udp
.mask
.dst_port
));
392 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, tcp_dport
,
393 ntohs(ib_spec
->tcp_udp
.val
.dst_port
));
395 case IB_FLOW_SPEC_UDP
:
396 if (FIELDS_NOT_SUPPORTED(ib_spec
->tcp_udp
.mask
,
400 if (set_proto(headers_c
, headers_v
, 0xff, IPPROTO_UDP
))
403 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, udp_sport
,
404 ntohs(ib_spec
->tcp_udp
.mask
.src_port
));
405 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, udp_sport
,
406 ntohs(ib_spec
->tcp_udp
.val
.src_port
));
408 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, udp_dport
,
409 ntohs(ib_spec
->tcp_udp
.mask
.dst_port
));
410 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, udp_dport
,
411 ntohs(ib_spec
->tcp_udp
.val
.dst_port
));
413 case IB_FLOW_SPEC_GRE
:
414 if (ib_spec
->gre
.mask
.c_ks_res0_ver
)
417 if (set_proto(headers_c
, headers_v
, 0xff, IPPROTO_GRE
))
420 MLX5_SET(fte_match_set_lyr_2_4
, headers_c
, ip_protocol
,
422 MLX5_SET(fte_match_set_lyr_2_4
, headers_v
, ip_protocol
,
425 MLX5_SET(fte_match_set_misc
, misc_params_c
, gre_protocol
,
426 ntohs(ib_spec
->gre
.mask
.protocol
));
427 MLX5_SET(fte_match_set_misc
, misc_params_v
, gre_protocol
,
428 ntohs(ib_spec
->gre
.val
.protocol
));
430 memcpy(MLX5_ADDR_OF(fte_match_set_misc
, misc_params_c
,
432 &ib_spec
->gre
.mask
.key
,
433 sizeof(ib_spec
->gre
.mask
.key
));
434 memcpy(MLX5_ADDR_OF(fte_match_set_misc
, misc_params_v
,
436 &ib_spec
->gre
.val
.key
,
437 sizeof(ib_spec
->gre
.val
.key
));
439 case IB_FLOW_SPEC_MPLS
:
441 case IB_FLOW_SPEC_UDP
:
442 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
443 ft_field_support
.outer_first_mpls_over_udp
),
444 &ib_spec
->mpls
.mask
.tag
))
447 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_v
,
448 outer_first_mpls_over_udp
),
449 &ib_spec
->mpls
.val
.tag
,
450 sizeof(ib_spec
->mpls
.val
.tag
));
451 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_c
,
452 outer_first_mpls_over_udp
),
453 &ib_spec
->mpls
.mask
.tag
,
454 sizeof(ib_spec
->mpls
.mask
.tag
));
456 case IB_FLOW_SPEC_GRE
:
457 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
458 ft_field_support
.outer_first_mpls_over_gre
),
459 &ib_spec
->mpls
.mask
.tag
))
462 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_v
,
463 outer_first_mpls_over_gre
),
464 &ib_spec
->mpls
.val
.tag
,
465 sizeof(ib_spec
->mpls
.val
.tag
));
466 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_c
,
467 outer_first_mpls_over_gre
),
468 &ib_spec
->mpls
.mask
.tag
,
469 sizeof(ib_spec
->mpls
.mask
.tag
));
472 if (ib_spec
->type
& IB_FLOW_SPEC_INNER
) {
473 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
474 ft_field_support
.inner_first_mpls
),
475 &ib_spec
->mpls
.mask
.tag
))
478 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_v
,
480 &ib_spec
->mpls
.val
.tag
,
481 sizeof(ib_spec
->mpls
.val
.tag
));
482 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_c
,
484 &ib_spec
->mpls
.mask
.tag
,
485 sizeof(ib_spec
->mpls
.mask
.tag
));
487 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
488 ft_field_support
.outer_first_mpls
),
489 &ib_spec
->mpls
.mask
.tag
))
492 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_v
,
494 &ib_spec
->mpls
.val
.tag
,
495 sizeof(ib_spec
->mpls
.val
.tag
));
496 memcpy(MLX5_ADDR_OF(fte_match_set_misc2
, misc_params2_c
,
498 &ib_spec
->mpls
.mask
.tag
,
499 sizeof(ib_spec
->mpls
.mask
.tag
));
503 case IB_FLOW_SPEC_VXLAN_TUNNEL
:
504 if (FIELDS_NOT_SUPPORTED(ib_spec
->tunnel
.mask
,
508 MLX5_SET(fte_match_set_misc
, misc_params_c
, vxlan_vni
,
509 ntohl(ib_spec
->tunnel
.mask
.tunnel_id
));
510 MLX5_SET(fte_match_set_misc
, misc_params_v
, vxlan_vni
,
511 ntohl(ib_spec
->tunnel
.val
.tunnel_id
));
513 case IB_FLOW_SPEC_ACTION_TAG
:
514 if (FIELDS_NOT_SUPPORTED(ib_spec
->flow_tag
,
515 LAST_FLOW_TAG_FIELD
))
517 if (ib_spec
->flow_tag
.tag_id
>= BIT(24))
520 flow_context
->flow_tag
= ib_spec
->flow_tag
.tag_id
;
521 flow_context
->flags
|= FLOW_CONTEXT_HAS_TAG
;
523 case IB_FLOW_SPEC_ACTION_DROP
:
524 if (FIELDS_NOT_SUPPORTED(ib_spec
->drop
,
527 action
->action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
;
529 case IB_FLOW_SPEC_ACTION_HANDLE
:
530 ret
= parse_flow_flow_action(to_mflow_act(ib_spec
->action
.act
),
531 flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_EGRESS
, action
);
535 case IB_FLOW_SPEC_ACTION_COUNT
:
536 if (FIELDS_NOT_SUPPORTED(ib_spec
->flow_count
,
537 LAST_COUNTERS_FIELD
))
540 /* for now support only one counters spec per flow */
541 if (action
->action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
)
544 action
->counters
= ib_spec
->flow_count
.counters
;
545 action
->action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
554 /* If a flow could catch both multicast and unicast packets,
555 * it won't fall into the multicast flow steering table and this rule
556 * could steal other multicast packets.
558 static bool flow_is_multicast_only(const struct ib_flow_attr
*ib_attr
)
560 union ib_flow_spec
*flow_spec
;
562 if (ib_attr
->type
!= IB_FLOW_ATTR_NORMAL
||
563 ib_attr
->num_of_specs
< 1)
566 flow_spec
= (union ib_flow_spec
*)(ib_attr
+ 1);
567 if (flow_spec
->type
== IB_FLOW_SPEC_IPV4
) {
568 struct ib_flow_spec_ipv4
*ipv4_spec
;
570 ipv4_spec
= (struct ib_flow_spec_ipv4
*)flow_spec
;
571 if (ipv4_is_multicast(ipv4_spec
->val
.dst_ip
))
577 if (flow_spec
->type
== IB_FLOW_SPEC_ETH
) {
578 struct ib_flow_spec_eth
*eth_spec
;
580 eth_spec
= (struct ib_flow_spec_eth
*)flow_spec
;
581 return is_multicast_ether_addr(eth_spec
->mask
.dst_mac
) &&
582 is_multicast_ether_addr(eth_spec
->val
.dst_mac
);
594 static enum valid_spec
595 is_valid_esp_aes_gcm(struct mlx5_core_dev
*mdev
,
596 const struct mlx5_flow_spec
*spec
,
597 const struct mlx5_flow_act
*flow_act
,
600 const u32
*match_c
= spec
->match_criteria
;
602 (flow_act
->action
& (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT
|
603 MLX5_FLOW_CONTEXT_ACTION_DECRYPT
));
604 bool is_ipsec
= mlx5_fs_is_ipsec_flow(match_c
);
605 bool is_drop
= flow_act
->action
& MLX5_FLOW_CONTEXT_ACTION_DROP
;
608 * Currently only crypto is supported in egress, when regular egress
609 * rules would be supported, always return VALID_SPEC_NA.
612 return VALID_SPEC_NA
;
614 return is_crypto
&& is_ipsec
&&
615 (!egress
|| (!is_drop
&&
616 !(spec
->flow_context
.flags
& FLOW_CONTEXT_HAS_TAG
))) ?
617 VALID_SPEC_VALID
: VALID_SPEC_INVALID
;
620 static bool is_valid_spec(struct mlx5_core_dev
*mdev
,
621 const struct mlx5_flow_spec
*spec
,
622 const struct mlx5_flow_act
*flow_act
,
625 /* We curretly only support ipsec egress flow */
626 return is_valid_esp_aes_gcm(mdev
, spec
, flow_act
, egress
) != VALID_SPEC_INVALID
;
629 static bool is_valid_ethertype(struct mlx5_core_dev
*mdev
,
630 const struct ib_flow_attr
*flow_attr
,
633 union ib_flow_spec
*ib_spec
= (union ib_flow_spec
*)(flow_attr
+ 1);
634 int match_ipv
= check_inner
?
635 MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
636 ft_field_support
.inner_ip_version
) :
637 MLX5_CAP_FLOWTABLE_NIC_RX(mdev
,
638 ft_field_support
.outer_ip_version
);
639 int inner_bit
= check_inner
? IB_FLOW_SPEC_INNER
: 0;
640 bool ipv4_spec_valid
, ipv6_spec_valid
;
641 unsigned int ip_spec_type
= 0;
642 bool has_ethertype
= false;
643 unsigned int spec_index
;
644 bool mask_valid
= true;
648 /* Validate that ethertype is correct */
649 for (spec_index
= 0; spec_index
< flow_attr
->num_of_specs
; spec_index
++) {
650 if ((ib_spec
->type
== (IB_FLOW_SPEC_ETH
| inner_bit
)) &&
651 ib_spec
->eth
.mask
.ether_type
) {
652 mask_valid
= (ib_spec
->eth
.mask
.ether_type
==
654 has_ethertype
= true;
655 eth_type
= ntohs(ib_spec
->eth
.val
.ether_type
);
656 } else if ((ib_spec
->type
== (IB_FLOW_SPEC_IPV4
| inner_bit
)) ||
657 (ib_spec
->type
== (IB_FLOW_SPEC_IPV6
| inner_bit
))) {
658 ip_spec_type
= ib_spec
->type
;
660 ib_spec
= (void *)ib_spec
+ ib_spec
->size
;
663 type_valid
= (!has_ethertype
) || (!ip_spec_type
);
664 if (!type_valid
&& mask_valid
) {
665 ipv4_spec_valid
= (eth_type
== ETH_P_IP
) &&
666 (ip_spec_type
== (IB_FLOW_SPEC_IPV4
| inner_bit
));
667 ipv6_spec_valid
= (eth_type
== ETH_P_IPV6
) &&
668 (ip_spec_type
== (IB_FLOW_SPEC_IPV6
| inner_bit
));
670 type_valid
= (ipv4_spec_valid
) || (ipv6_spec_valid
) ||
671 (((eth_type
== ETH_P_MPLS_UC
) ||
672 (eth_type
== ETH_P_MPLS_MC
)) && match_ipv
);
678 static bool is_valid_attr(struct mlx5_core_dev
*mdev
,
679 const struct ib_flow_attr
*flow_attr
)
681 return is_valid_ethertype(mdev
, flow_attr
, false) &&
682 is_valid_ethertype(mdev
, flow_attr
, true);
685 static void put_flow_table(struct mlx5_ib_dev
*dev
,
686 struct mlx5_ib_flow_prio
*prio
, bool ft_added
)
688 prio
->refcount
-= !!ft_added
;
689 if (!prio
->refcount
) {
690 mlx5_destroy_flow_table(prio
->flow_table
);
691 prio
->flow_table
= NULL
;
695 static int mlx5_ib_destroy_flow(struct ib_flow
*flow_id
)
697 struct mlx5_ib_flow_handler
*handler
= container_of(flow_id
,
698 struct mlx5_ib_flow_handler
,
700 struct mlx5_ib_flow_handler
*iter
, *tmp
;
701 struct mlx5_ib_dev
*dev
= handler
->dev
;
703 mutex_lock(&dev
->flow_db
->lock
);
705 list_for_each_entry_safe(iter
, tmp
, &handler
->list
, list
) {
706 mlx5_del_flow_rules(iter
->rule
);
707 put_flow_table(dev
, iter
->prio
, true);
708 list_del(&iter
->list
);
712 mlx5_del_flow_rules(handler
->rule
);
713 put_flow_table(dev
, handler
->prio
, true);
714 mlx5_ib_counters_clear_description(handler
->ibcounters
);
715 mutex_unlock(&dev
->flow_db
->lock
);
716 if (handler
->flow_matcher
)
717 atomic_dec(&handler
->flow_matcher
->usecnt
);
723 static int ib_prio_to_core_prio(unsigned int priority
, bool dont_trap
)
731 enum flow_table_type
{
736 #define MLX5_FS_MAX_TYPES 6
737 #define MLX5_FS_MAX_ENTRIES BIT(16)
739 static struct mlx5_ib_flow_prio
*_get_prio(struct mlx5_flow_namespace
*ns
,
740 struct mlx5_ib_flow_prio
*prio
,
742 int num_entries
, int num_groups
,
745 struct mlx5_flow_table_attr ft_attr
= {};
746 struct mlx5_flow_table
*ft
;
748 ft_attr
.prio
= priority
;
749 ft_attr
.max_fte
= num_entries
;
750 ft_attr
.flags
= flags
;
751 ft_attr
.autogroup
.max_num_groups
= num_groups
;
752 ft
= mlx5_create_auto_grouped_flow_table(ns
, &ft_attr
);
756 prio
->flow_table
= ft
;
761 static struct mlx5_ib_flow_prio
*get_flow_table(struct mlx5_ib_dev
*dev
,
762 struct ib_flow_attr
*flow_attr
,
763 enum flow_table_type ft_type
)
765 bool dont_trap
= flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
;
766 struct mlx5_flow_namespace
*ns
= NULL
;
767 enum mlx5_flow_namespace_type fn_type
;
768 struct mlx5_ib_flow_prio
*prio
;
769 struct mlx5_flow_table
*ft
;
777 max_table_size
= BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
,
779 esw_encap
= mlx5_eswitch_get_encap_mode(dev
->mdev
) !=
780 DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
781 switch (flow_attr
->type
) {
782 case IB_FLOW_ATTR_NORMAL
:
783 if (flow_is_multicast_only(flow_attr
) && !dont_trap
)
784 priority
= MLX5_IB_FLOW_MCAST_PRIO
;
786 priority
= ib_prio_to_core_prio(flow_attr
->priority
,
788 if (ft_type
== MLX5_IB_FT_RX
) {
789 fn_type
= MLX5_FLOW_NAMESPACE_BYPASS
;
790 prio
= &dev
->flow_db
->prios
[priority
];
791 if (!dev
->is_rep
&& !esw_encap
&&
792 MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, decap
))
793 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
;
794 if (!dev
->is_rep
&& !esw_encap
&&
795 MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
,
796 reformat_l3_tunnel_to_l2
))
797 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
;
799 max_table_size
= BIT(MLX5_CAP_FLOWTABLE_NIC_TX(
800 dev
->mdev
, log_max_ft_size
));
801 fn_type
= MLX5_FLOW_NAMESPACE_EGRESS
;
802 prio
= &dev
->flow_db
->egress_prios
[priority
];
803 if (!dev
->is_rep
&& !esw_encap
&&
804 MLX5_CAP_FLOWTABLE_NIC_TX(dev
->mdev
, reformat
))
805 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
;
807 ns
= mlx5_get_flow_namespace(dev
->mdev
, fn_type
);
808 num_entries
= MLX5_FS_MAX_ENTRIES
;
809 num_groups
= MLX5_FS_MAX_TYPES
;
811 case IB_FLOW_ATTR_ALL_DEFAULT
:
812 case IB_FLOW_ATTR_MC_DEFAULT
:
813 ns
= mlx5_get_flow_namespace(dev
->mdev
,
814 MLX5_FLOW_NAMESPACE_LEFTOVERS
);
815 build_leftovers_ft_param(&priority
, &num_entries
, &num_groups
);
816 prio
= &dev
->flow_db
->prios
[MLX5_IB_FLOW_LEFTOVERS_PRIO
];
818 case IB_FLOW_ATTR_SNIFFER
:
819 if (!MLX5_CAP_FLOWTABLE(dev
->mdev
,
820 allow_sniffer_and_nic_rx_shared_tir
))
821 return ERR_PTR(-EOPNOTSUPP
);
823 ns
= mlx5_get_flow_namespace(
824 dev
->mdev
, ft_type
== MLX5_IB_FT_RX
?
825 MLX5_FLOW_NAMESPACE_SNIFFER_RX
:
826 MLX5_FLOW_NAMESPACE_SNIFFER_TX
);
828 prio
= &dev
->flow_db
->sniffer
[ft_type
];
838 return ERR_PTR(-EOPNOTSUPP
);
840 max_table_size
= min_t(int, num_entries
, max_table_size
);
842 ft
= prio
->flow_table
;
844 return _get_prio(ns
, prio
, priority
, max_table_size
, num_groups
,
850 static void set_underlay_qp(struct mlx5_ib_dev
*dev
,
851 struct mlx5_flow_spec
*spec
,
854 void *misc_params_c
= MLX5_ADDR_OF(fte_match_param
,
855 spec
->match_criteria
,
857 void *misc_params_v
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
861 MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
,
862 ft_field_support
.bth_dst_qp
)) {
863 MLX5_SET(fte_match_set_misc
,
864 misc_params_v
, bth_dst_qp
, underlay_qpn
);
865 MLX5_SET(fte_match_set_misc
,
866 misc_params_c
, bth_dst_qp
, 0xffffff);
870 static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev
*dev
,
871 struct mlx5_flow_spec
*spec
,
872 struct mlx5_eswitch_rep
*rep
)
874 struct mlx5_eswitch
*esw
= dev
->mdev
->priv
.eswitch
;
877 if (mlx5_eswitch_vport_match_metadata_enabled(esw
)) {
878 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
881 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
882 mlx5_eswitch_get_vport_metadata_for_match(esw
,
884 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
887 MLX5_SET(fte_match_set_misc2
, misc
, metadata_reg_c_0
,
888 mlx5_eswitch_get_vport_metadata_mask());
890 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_value
,
893 MLX5_SET(fte_match_set_misc
, misc
, source_port
, rep
->vport
);
895 misc
= MLX5_ADDR_OF(fte_match_param
, spec
->match_criteria
,
898 MLX5_SET_TO_ONES(fte_match_set_misc
, misc
, source_port
);
902 static struct mlx5_ib_flow_handler
*_create_flow_rule(struct mlx5_ib_dev
*dev
,
903 struct mlx5_ib_flow_prio
*ft_prio
,
904 const struct ib_flow_attr
*flow_attr
,
905 struct mlx5_flow_destination
*dst
,
907 struct mlx5_ib_create_flow
*ucmd
)
909 struct mlx5_flow_table
*ft
= ft_prio
->flow_table
;
910 struct mlx5_ib_flow_handler
*handler
;
911 struct mlx5_flow_act flow_act
= {};
912 struct mlx5_flow_spec
*spec
;
913 struct mlx5_flow_destination dest_arr
[2] = {};
914 struct mlx5_flow_destination
*rule_dst
= dest_arr
;
915 const void *ib_flow
= (const void *)flow_attr
+ sizeof(*flow_attr
);
916 unsigned int spec_index
;
920 bool is_egress
= flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_EGRESS
;
922 if (!is_valid_attr(dev
->mdev
, flow_attr
))
923 return ERR_PTR(-EINVAL
);
925 if (dev
->is_rep
&& is_egress
)
926 return ERR_PTR(-EINVAL
);
928 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
929 handler
= kzalloc(sizeof(*handler
), GFP_KERNEL
);
930 if (!handler
|| !spec
) {
935 INIT_LIST_HEAD(&handler
->list
);
937 for (spec_index
= 0; spec_index
< flow_attr
->num_of_specs
; spec_index
++) {
938 err
= parse_flow_attr(dev
->mdev
, spec
,
939 ib_flow
, flow_attr
, &flow_act
,
944 prev_type
= ((union ib_flow_spec
*)ib_flow
)->type
;
945 ib_flow
+= ((union ib_flow_spec
*)ib_flow
)->size
;
948 if (dst
&& !(flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_DROP
)) {
949 memcpy(&dest_arr
[0], dst
, sizeof(*dst
));
953 if (!flow_is_multicast_only(flow_attr
))
954 set_underlay_qp(dev
, spec
, underlay_qpn
);
956 if (dev
->is_rep
&& flow_attr
->type
!= IB_FLOW_ATTR_SNIFFER
) {
957 struct mlx5_eswitch_rep
*rep
;
959 rep
= dev
->port
[flow_attr
->port
- 1].rep
;
965 mlx5_ib_set_rule_source_port(dev
, spec
, rep
);
968 spec
->match_criteria_enable
= get_match_criteria_enable(spec
->match_criteria
);
971 !is_valid_spec(dev
->mdev
, spec
, &flow_act
, is_egress
)) {
976 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
977 struct mlx5_ib_mcounters
*mcounters
;
979 err
= mlx5_ib_flow_counters_set_data(flow_act
.counters
, ucmd
);
983 mcounters
= to_mcounters(flow_act
.counters
);
984 handler
->ibcounters
= flow_act
.counters
;
985 dest_arr
[dest_num
].type
=
986 MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
987 dest_arr
[dest_num
].counter_id
=
988 mlx5_fc_id(mcounters
->hw_cntrs_hndl
);
992 if (flow_act
.action
& MLX5_FLOW_CONTEXT_ACTION_DROP
) {
996 if (flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_DONT_TRAP
)
998 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO
;
1000 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1002 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1005 if ((spec
->flow_context
.flags
& FLOW_CONTEXT_HAS_TAG
) &&
1006 (flow_attr
->type
== IB_FLOW_ATTR_ALL_DEFAULT
||
1007 flow_attr
->type
== IB_FLOW_ATTR_MC_DEFAULT
)) {
1008 mlx5_ib_warn(dev
, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
1009 spec
->flow_context
.flow_tag
, flow_attr
->type
);
1013 handler
->rule
= mlx5_add_flow_rules(ft
, spec
,
1015 rule_dst
, dest_num
);
1017 if (IS_ERR(handler
->rule
)) {
1018 err
= PTR_ERR(handler
->rule
);
1022 ft_prio
->refcount
++;
1023 handler
->prio
= ft_prio
;
1026 ft_prio
->flow_table
= ft
;
1028 if (err
&& handler
) {
1029 mlx5_ib_counters_clear_description(handler
->ibcounters
);
1033 return err
? ERR_PTR(err
) : handler
;
1036 static struct mlx5_ib_flow_handler
*create_flow_rule(struct mlx5_ib_dev
*dev
,
1037 struct mlx5_ib_flow_prio
*ft_prio
,
1038 const struct ib_flow_attr
*flow_attr
,
1039 struct mlx5_flow_destination
*dst
)
1041 return _create_flow_rule(dev
, ft_prio
, flow_attr
, dst
, 0, NULL
);
1049 static struct mlx5_ib_flow_handler
*create_leftovers_rule(struct mlx5_ib_dev
*dev
,
1050 struct mlx5_ib_flow_prio
*ft_prio
,
1051 struct ib_flow_attr
*flow_attr
,
1052 struct mlx5_flow_destination
*dst
)
1054 struct mlx5_ib_flow_handler
*handler_ucast
= NULL
;
1055 struct mlx5_ib_flow_handler
*handler
= NULL
;
1058 struct ib_flow_attr flow_attr
;
1059 struct ib_flow_spec_eth eth_flow
;
1060 } leftovers_specs
[] = {
1064 .size
= sizeof(leftovers_specs
[0])
1067 .type
= IB_FLOW_SPEC_ETH
,
1068 .size
= sizeof(struct ib_flow_spec_eth
),
1069 .mask
= {.dst_mac
= {0x1} },
1070 .val
= {.dst_mac
= {0x1} }
1076 .size
= sizeof(leftovers_specs
[0])
1079 .type
= IB_FLOW_SPEC_ETH
,
1080 .size
= sizeof(struct ib_flow_spec_eth
),
1081 .mask
= {.dst_mac
= {0x1} },
1082 .val
= {.dst_mac
= {} }
1087 handler
= create_flow_rule(dev
, ft_prio
,
1088 &leftovers_specs
[LEFTOVERS_MC
].flow_attr
,
1090 if (!IS_ERR(handler
) &&
1091 flow_attr
->type
== IB_FLOW_ATTR_ALL_DEFAULT
) {
1092 handler_ucast
= create_flow_rule(dev
, ft_prio
,
1093 &leftovers_specs
[LEFTOVERS_UC
].flow_attr
,
1095 if (IS_ERR(handler_ucast
)) {
1096 mlx5_del_flow_rules(handler
->rule
);
1097 ft_prio
->refcount
--;
1099 handler
= handler_ucast
;
1101 list_add(&handler_ucast
->list
, &handler
->list
);
1108 static struct mlx5_ib_flow_handler
*create_sniffer_rule(struct mlx5_ib_dev
*dev
,
1109 struct mlx5_ib_flow_prio
*ft_rx
,
1110 struct mlx5_ib_flow_prio
*ft_tx
,
1111 struct mlx5_flow_destination
*dst
)
1113 struct mlx5_ib_flow_handler
*handler_rx
;
1114 struct mlx5_ib_flow_handler
*handler_tx
;
1116 static const struct ib_flow_attr flow_attr
= {
1118 .type
= IB_FLOW_ATTR_SNIFFER
,
1119 .size
= sizeof(flow_attr
)
1122 handler_rx
= create_flow_rule(dev
, ft_rx
, &flow_attr
, dst
);
1123 if (IS_ERR(handler_rx
)) {
1124 err
= PTR_ERR(handler_rx
);
1128 handler_tx
= create_flow_rule(dev
, ft_tx
, &flow_attr
, dst
);
1129 if (IS_ERR(handler_tx
)) {
1130 err
= PTR_ERR(handler_tx
);
1134 list_add(&handler_tx
->list
, &handler_rx
->list
);
1139 mlx5_del_flow_rules(handler_rx
->rule
);
1143 return ERR_PTR(err
);
1146 static struct ib_flow
*mlx5_ib_create_flow(struct ib_qp
*qp
,
1147 struct ib_flow_attr
*flow_attr
,
1148 struct ib_udata
*udata
)
1150 struct mlx5_ib_dev
*dev
= to_mdev(qp
->device
);
1151 struct mlx5_ib_qp
*mqp
= to_mqp(qp
);
1152 struct mlx5_ib_flow_handler
*handler
= NULL
;
1153 struct mlx5_flow_destination
*dst
= NULL
;
1154 struct mlx5_ib_flow_prio
*ft_prio_tx
= NULL
;
1155 struct mlx5_ib_flow_prio
*ft_prio
;
1156 bool is_egress
= flow_attr
->flags
& IB_FLOW_ATTR_FLAGS_EGRESS
;
1157 struct mlx5_ib_create_flow
*ucmd
= NULL
, ucmd_hdr
;
1158 size_t min_ucmd_sz
, required_ucmd_sz
;
1162 if (udata
&& udata
->inlen
) {
1163 min_ucmd_sz
= offsetofend(struct mlx5_ib_create_flow
, reserved
);
1164 if (udata
->inlen
< min_ucmd_sz
)
1165 return ERR_PTR(-EOPNOTSUPP
);
1167 err
= ib_copy_from_udata(&ucmd_hdr
, udata
, min_ucmd_sz
);
1169 return ERR_PTR(err
);
1171 /* currently supports only one counters data */
1172 if (ucmd_hdr
.ncounters_data
> 1)
1173 return ERR_PTR(-EINVAL
);
1175 required_ucmd_sz
= min_ucmd_sz
+
1176 sizeof(struct mlx5_ib_flow_counters_data
) *
1177 ucmd_hdr
.ncounters_data
;
1178 if (udata
->inlen
> required_ucmd_sz
&&
1179 !ib_is_udata_cleared(udata
, required_ucmd_sz
,
1180 udata
->inlen
- required_ucmd_sz
))
1181 return ERR_PTR(-EOPNOTSUPP
);
1183 ucmd
= kzalloc(required_ucmd_sz
, GFP_KERNEL
);
1185 return ERR_PTR(-ENOMEM
);
1187 err
= ib_copy_from_udata(ucmd
, udata
, required_ucmd_sz
);
1192 if (flow_attr
->priority
> MLX5_IB_FLOW_LAST_PRIO
) {
1197 if (flow_attr
->port
> dev
->num_ports
||
1199 ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP
| IB_FLOW_ATTR_FLAGS_EGRESS
))) {
1205 (flow_attr
->type
== IB_FLOW_ATTR_ALL_DEFAULT
||
1206 flow_attr
->type
== IB_FLOW_ATTR_MC_DEFAULT
)) {
1211 dst
= kzalloc(sizeof(*dst
), GFP_KERNEL
);
1217 mutex_lock(&dev
->flow_db
->lock
);
1219 ft_prio
= get_flow_table(dev
, flow_attr
,
1220 is_egress
? MLX5_IB_FT_TX
: MLX5_IB_FT_RX
);
1221 if (IS_ERR(ft_prio
)) {
1222 err
= PTR_ERR(ft_prio
);
1225 if (flow_attr
->type
== IB_FLOW_ATTR_SNIFFER
) {
1226 ft_prio_tx
= get_flow_table(dev
, flow_attr
, MLX5_IB_FT_TX
);
1227 if (IS_ERR(ft_prio_tx
)) {
1228 err
= PTR_ERR(ft_prio_tx
);
1235 dst
->type
= MLX5_FLOW_DESTINATION_TYPE_PORT
;
1237 dst
->type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
1239 dst
->tir_num
= mqp
->rss_qp
.tirn
;
1241 dst
->tir_num
= mqp
->raw_packet_qp
.rq
.tirn
;
1244 switch (flow_attr
->type
) {
1245 case IB_FLOW_ATTR_NORMAL
:
1246 underlay_qpn
= (mqp
->flags
& IB_QP_CREATE_SOURCE_QPN
) ?
1249 handler
= _create_flow_rule(dev
, ft_prio
, flow_attr
, dst
,
1250 underlay_qpn
, ucmd
);
1252 case IB_FLOW_ATTR_ALL_DEFAULT
:
1253 case IB_FLOW_ATTR_MC_DEFAULT
:
1254 handler
= create_leftovers_rule(dev
, ft_prio
, flow_attr
, dst
);
1256 case IB_FLOW_ATTR_SNIFFER
:
1257 handler
= create_sniffer_rule(dev
, ft_prio
, ft_prio_tx
, dst
);
1264 if (IS_ERR(handler
)) {
1265 err
= PTR_ERR(handler
);
1270 mutex_unlock(&dev
->flow_db
->lock
);
1274 return &handler
->ibflow
;
1277 put_flow_table(dev
, ft_prio
, false);
1279 put_flow_table(dev
, ft_prio_tx
, false);
1281 mutex_unlock(&dev
->flow_db
->lock
);
1285 return ERR_PTR(err
);
1288 static struct mlx5_ib_flow_prio
*
1289 _get_flow_table(struct mlx5_ib_dev
*dev
,
1290 struct mlx5_ib_flow_matcher
*fs_matcher
,
1293 struct mlx5_flow_namespace
*ns
= NULL
;
1294 struct mlx5_ib_flow_prio
*prio
= NULL
;
1295 int max_table_size
= 0;
1301 priority
= MLX5_IB_FLOW_MCAST_PRIO
;
1303 priority
= ib_prio_to_core_prio(fs_matcher
->priority
, false);
1305 esw_encap
= mlx5_eswitch_get_encap_mode(dev
->mdev
) !=
1306 DEVLINK_ESWITCH_ENCAP_MODE_NONE
;
1307 switch (fs_matcher
->ns_type
) {
1308 case MLX5_FLOW_NAMESPACE_BYPASS
:
1309 max_table_size
= BIT(
1310 MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, log_max_ft_size
));
1311 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
, decap
) && !esw_encap
)
1312 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
;
1313 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
,
1314 reformat_l3_tunnel_to_l2
) &&
1316 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
;
1318 case MLX5_FLOW_NAMESPACE_EGRESS
:
1319 max_table_size
= BIT(
1320 MLX5_CAP_FLOWTABLE_NIC_TX(dev
->mdev
, log_max_ft_size
));
1321 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev
->mdev
, reformat
) &&
1323 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
;
1325 case MLX5_FLOW_NAMESPACE_FDB
:
1326 max_table_size
= BIT(
1327 MLX5_CAP_ESW_FLOWTABLE_FDB(dev
->mdev
, log_max_ft_size
));
1328 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev
->mdev
, decap
) && esw_encap
)
1329 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP
;
1330 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev
->mdev
,
1331 reformat_l3_tunnel_to_l2
) &&
1333 flags
|= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT
;
1334 priority
= FDB_BYPASS_PATH
;
1336 case MLX5_FLOW_NAMESPACE_RDMA_RX
:
1337 max_table_size
= BIT(
1338 MLX5_CAP_FLOWTABLE_RDMA_RX(dev
->mdev
, log_max_ft_size
));
1339 priority
= fs_matcher
->priority
;
1341 case MLX5_FLOW_NAMESPACE_RDMA_TX
:
1342 max_table_size
= BIT(
1343 MLX5_CAP_FLOWTABLE_RDMA_TX(dev
->mdev
, log_max_ft_size
));
1344 priority
= fs_matcher
->priority
;
1350 max_table_size
= min_t(int, max_table_size
, MLX5_FS_MAX_ENTRIES
);
1352 ns
= mlx5_get_flow_namespace(dev
->mdev
, fs_matcher
->ns_type
);
1354 return ERR_PTR(-EOPNOTSUPP
);
1356 switch (fs_matcher
->ns_type
) {
1357 case MLX5_FLOW_NAMESPACE_BYPASS
:
1358 prio
= &dev
->flow_db
->prios
[priority
];
1360 case MLX5_FLOW_NAMESPACE_EGRESS
:
1361 prio
= &dev
->flow_db
->egress_prios
[priority
];
1363 case MLX5_FLOW_NAMESPACE_FDB
:
1364 prio
= &dev
->flow_db
->fdb
;
1366 case MLX5_FLOW_NAMESPACE_RDMA_RX
:
1367 prio
= &dev
->flow_db
->rdma_rx
[priority
];
1369 case MLX5_FLOW_NAMESPACE_RDMA_TX
:
1370 prio
= &dev
->flow_db
->rdma_tx
[priority
];
1372 default: return ERR_PTR(-EINVAL
);
1376 return ERR_PTR(-EINVAL
);
1378 if (prio
->flow_table
)
1381 return _get_prio(ns
, prio
, priority
, max_table_size
,
1382 MLX5_FS_MAX_TYPES
, flags
);
1385 static struct mlx5_ib_flow_handler
*
1386 _create_raw_flow_rule(struct mlx5_ib_dev
*dev
,
1387 struct mlx5_ib_flow_prio
*ft_prio
,
1388 struct mlx5_flow_destination
*dst
,
1389 struct mlx5_ib_flow_matcher
*fs_matcher
,
1390 struct mlx5_flow_context
*flow_context
,
1391 struct mlx5_flow_act
*flow_act
,
1392 void *cmd_in
, int inlen
,
1395 struct mlx5_ib_flow_handler
*handler
;
1396 struct mlx5_flow_spec
*spec
;
1397 struct mlx5_flow_table
*ft
= ft_prio
->flow_table
;
1400 spec
= kvzalloc(sizeof(*spec
), GFP_KERNEL
);
1401 handler
= kzalloc(sizeof(*handler
), GFP_KERNEL
);
1402 if (!handler
|| !spec
) {
1407 INIT_LIST_HEAD(&handler
->list
);
1409 memcpy(spec
->match_value
, cmd_in
, inlen
);
1410 memcpy(spec
->match_criteria
, fs_matcher
->matcher_mask
.match_params
,
1411 fs_matcher
->mask_len
);
1412 spec
->match_criteria_enable
= fs_matcher
->match_criteria_enable
;
1413 spec
->flow_context
= *flow_context
;
1415 handler
->rule
= mlx5_add_flow_rules(ft
, spec
,
1416 flow_act
, dst
, dst_num
);
1418 if (IS_ERR(handler
->rule
)) {
1419 err
= PTR_ERR(handler
->rule
);
1423 ft_prio
->refcount
++;
1424 handler
->prio
= ft_prio
;
1426 ft_prio
->flow_table
= ft
;
1432 return err
? ERR_PTR(err
) : handler
;
1435 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher
*fs_matcher
,
1439 void *match_v_set_lyr_2_4
, *match_c_set_lyr_2_4
;
1440 void *dmac
, *dmac_mask
;
1441 void *ipv4
, *ipv4_mask
;
1443 if (!(fs_matcher
->match_criteria_enable
&
1444 (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT
)))
1447 match_c
= fs_matcher
->matcher_mask
.match_params
;
1448 match_v_set_lyr_2_4
= MLX5_ADDR_OF(fte_match_param
, match_v
,
1450 match_c_set_lyr_2_4
= MLX5_ADDR_OF(fte_match_param
, match_c
,
1453 dmac
= MLX5_ADDR_OF(fte_match_set_lyr_2_4
, match_v_set_lyr_2_4
,
1455 dmac_mask
= MLX5_ADDR_OF(fte_match_set_lyr_2_4
, match_c_set_lyr_2_4
,
1458 if (is_multicast_ether_addr(dmac
) &&
1459 is_multicast_ether_addr(dmac_mask
))
1462 ipv4
= MLX5_ADDR_OF(fte_match_set_lyr_2_4
, match_v_set_lyr_2_4
,
1463 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
);
1465 ipv4_mask
= MLX5_ADDR_OF(fte_match_set_lyr_2_4
, match_c_set_lyr_2_4
,
1466 dst_ipv4_dst_ipv6
.ipv4_layout
.ipv4
);
1468 if (ipv4_is_multicast(*(__be32
*)(ipv4
)) &&
1469 ipv4_is_multicast(*(__be32
*)(ipv4_mask
)))
1475 static struct mlx5_ib_flow_handler
*raw_fs_rule_add(
1476 struct mlx5_ib_dev
*dev
, struct mlx5_ib_flow_matcher
*fs_matcher
,
1477 struct mlx5_flow_context
*flow_context
, struct mlx5_flow_act
*flow_act
,
1478 u32 counter_id
, void *cmd_in
, int inlen
, int dest_id
, int dest_type
)
1480 struct mlx5_flow_destination
*dst
;
1481 struct mlx5_ib_flow_prio
*ft_prio
;
1482 struct mlx5_ib_flow_handler
*handler
;
1487 if (fs_matcher
->flow_type
!= MLX5_IB_FLOW_TYPE_NORMAL
)
1488 return ERR_PTR(-EOPNOTSUPP
);
1490 if (fs_matcher
->priority
> MLX5_IB_FLOW_LAST_PRIO
)
1491 return ERR_PTR(-ENOMEM
);
1493 dst
= kcalloc(2, sizeof(*dst
), GFP_KERNEL
);
1495 return ERR_PTR(-ENOMEM
);
1497 mcast
= raw_fs_is_multicast(fs_matcher
, cmd_in
);
1498 mutex_lock(&dev
->flow_db
->lock
);
1500 ft_prio
= _get_flow_table(dev
, fs_matcher
, mcast
);
1501 if (IS_ERR(ft_prio
)) {
1502 err
= PTR_ERR(ft_prio
);
1506 switch (dest_type
) {
1507 case MLX5_FLOW_DESTINATION_TYPE_TIR
:
1508 dst
[dst_num
].type
= dest_type
;
1509 dst
[dst_num
++].tir_num
= dest_id
;
1510 flow_act
->action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1512 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
:
1513 dst
[dst_num
].type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM
;
1514 dst
[dst_num
++].ft_num
= dest_id
;
1515 flow_act
->action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST
;
1517 case MLX5_FLOW_DESTINATION_TYPE_PORT
:
1518 dst
[dst_num
++].type
= MLX5_FLOW_DESTINATION_TYPE_PORT
;
1519 flow_act
->action
|= MLX5_FLOW_CONTEXT_ACTION_ALLOW
;
1525 if (flow_act
->action
& MLX5_FLOW_CONTEXT_ACTION_COUNT
) {
1526 dst
[dst_num
].type
= MLX5_FLOW_DESTINATION_TYPE_COUNTER
;
1527 dst
[dst_num
].counter_id
= counter_id
;
1531 handler
= _create_raw_flow_rule(dev
, ft_prio
, dst
, fs_matcher
,
1532 flow_context
, flow_act
,
1533 cmd_in
, inlen
, dst_num
);
1535 if (IS_ERR(handler
)) {
1536 err
= PTR_ERR(handler
);
1540 mutex_unlock(&dev
->flow_db
->lock
);
1541 atomic_inc(&fs_matcher
->usecnt
);
1542 handler
->flow_matcher
= fs_matcher
;
1549 put_flow_table(dev
, ft_prio
, false);
1551 mutex_unlock(&dev
->flow_db
->lock
);
1554 return ERR_PTR(err
);
1557 static u32
mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags
)
1561 if (mlx5_flags
& MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
)
1562 flags
|= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA
;
1567 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED \
1568 MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
1569 static struct ib_flow_action
*
1570 mlx5_ib_create_flow_action_esp(struct ib_device
*device
,
1571 const struct ib_flow_action_attrs_esp
*attr
,
1572 struct uverbs_attr_bundle
*attrs
)
1574 struct mlx5_ib_dev
*mdev
= to_mdev(device
);
1575 struct ib_uverbs_flow_action_esp_keymat_aes_gcm
*aes_gcm
;
1576 struct mlx5_accel_esp_xfrm_attrs accel_attrs
= {};
1577 struct mlx5_ib_flow_action
*action
;
1582 err
= uverbs_get_flags64(
1583 &action_flags
, attrs
, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS
,
1584 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED
<< 1) - 1));
1586 return ERR_PTR(err
);
1588 flags
= mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags
);
1590 /* We current only support a subset of the standard features. Only a
1591 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
1592 * (with overlap). Full offload mode isn't supported.
1594 if (!attr
->keymat
|| attr
->replay
|| attr
->encap
||
1595 attr
->spi
|| attr
->seq
|| attr
->tfc_pad
||
1596 attr
->hard_limit_pkts
||
1597 (attr
->flags
& ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
|
1598 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT
)))
1599 return ERR_PTR(-EOPNOTSUPP
);
1601 if (attr
->keymat
->protocol
!=
1602 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM
)
1603 return ERR_PTR(-EOPNOTSUPP
);
1605 aes_gcm
= &attr
->keymat
->keymat
.aes_gcm
;
1607 if (aes_gcm
->icv_len
!= 16 ||
1608 aes_gcm
->iv_algo
!= IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ
)
1609 return ERR_PTR(-EOPNOTSUPP
);
1611 action
= kmalloc(sizeof(*action
), GFP_KERNEL
);
1613 return ERR_PTR(-ENOMEM
);
1615 action
->esp_aes_gcm
.ib_flags
= attr
->flags
;
1616 memcpy(&accel_attrs
.keymat
.aes_gcm
.aes_key
, &aes_gcm
->aes_key
,
1617 sizeof(accel_attrs
.keymat
.aes_gcm
.aes_key
));
1618 accel_attrs
.keymat
.aes_gcm
.key_len
= aes_gcm
->key_len
* 8;
1619 memcpy(&accel_attrs
.keymat
.aes_gcm
.salt
, &aes_gcm
->salt
,
1620 sizeof(accel_attrs
.keymat
.aes_gcm
.salt
));
1621 memcpy(&accel_attrs
.keymat
.aes_gcm
.seq_iv
, &aes_gcm
->iv
,
1622 sizeof(accel_attrs
.keymat
.aes_gcm
.seq_iv
));
1623 accel_attrs
.keymat
.aes_gcm
.icv_len
= aes_gcm
->icv_len
* 8;
1624 accel_attrs
.keymat
.aes_gcm
.iv_algo
= MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ
;
1625 accel_attrs
.keymat_type
= MLX5_ACCEL_ESP_KEYMAT_AES_GCM
;
1627 accel_attrs
.esn
= attr
->esn
;
1628 if (attr
->flags
& IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
)
1629 accel_attrs
.flags
|= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED
;
1630 if (attr
->flags
& IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
)
1631 accel_attrs
.flags
|= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP
;
1633 if (attr
->flags
& IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT
)
1634 accel_attrs
.action
|= MLX5_ACCEL_ESP_ACTION_ENCRYPT
;
1636 action
->esp_aes_gcm
.ctx
=
1637 mlx5_accel_esp_create_xfrm(mdev
->mdev
, &accel_attrs
, flags
);
1638 if (IS_ERR(action
->esp_aes_gcm
.ctx
)) {
1639 err
= PTR_ERR(action
->esp_aes_gcm
.ctx
);
1643 action
->esp_aes_gcm
.ib_flags
= attr
->flags
;
1645 return &action
->ib_action
;
1649 return ERR_PTR(err
);
1653 mlx5_ib_modify_flow_action_esp(struct ib_flow_action
*action
,
1654 const struct ib_flow_action_attrs_esp
*attr
,
1655 struct uverbs_attr_bundle
*attrs
)
1657 struct mlx5_ib_flow_action
*maction
= to_mflow_act(action
);
1658 struct mlx5_accel_esp_xfrm_attrs accel_attrs
;
1661 if (attr
->keymat
|| attr
->replay
|| attr
->encap
||
1662 attr
->spi
|| attr
->seq
|| attr
->tfc_pad
||
1663 attr
->hard_limit_pkts
||
1664 (attr
->flags
& ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
|
1665 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS
|
1666 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
)))
1669 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
1672 if (!(maction
->esp_aes_gcm
.ib_flags
&
1673 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
) &&
1674 attr
->flags
& (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED
|
1675 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
))
1678 memcpy(&accel_attrs
, &maction
->esp_aes_gcm
.ctx
->attrs
,
1679 sizeof(accel_attrs
));
1681 accel_attrs
.esn
= attr
->esn
;
1682 if (attr
->flags
& IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
)
1683 accel_attrs
.flags
|= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP
;
1685 accel_attrs
.flags
&= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP
;
1687 err
= mlx5_accel_esp_modify_xfrm(maction
->esp_aes_gcm
.ctx
,
1692 maction
->esp_aes_gcm
.ib_flags
&=
1693 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
;
1694 maction
->esp_aes_gcm
.ib_flags
|=
1695 attr
->flags
& IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW
;
1700 static void destroy_flow_action_raw(struct mlx5_ib_flow_action
*maction
)
1702 switch (maction
->flow_action_raw
.sub_type
) {
1703 case MLX5_IB_FLOW_ACTION_MODIFY_HEADER
:
1704 mlx5_modify_header_dealloc(maction
->flow_action_raw
.dev
->mdev
,
1705 maction
->flow_action_raw
.modify_hdr
);
1707 case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT
:
1708 mlx5_packet_reformat_dealloc(maction
->flow_action_raw
.dev
->mdev
,
1709 maction
->flow_action_raw
.pkt_reformat
);
1711 case MLX5_IB_FLOW_ACTION_DECAP
:
1718 static int mlx5_ib_destroy_flow_action(struct ib_flow_action
*action
)
1720 struct mlx5_ib_flow_action
*maction
= to_mflow_act(action
);
1722 switch (action
->type
) {
1723 case IB_FLOW_ACTION_ESP
:
1725 * We only support aes_gcm by now, so we implicitly know this is
1726 * the underline crypto.
1728 mlx5_accel_esp_destroy_xfrm(maction
->esp_aes_gcm
.ctx
);
1730 case IB_FLOW_ACTION_UNSPECIFIED
:
1731 destroy_flow_action_raw(maction
);
1743 mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type
,
1744 enum mlx5_flow_namespace_type
*namespace)
1746 switch (table_type
) {
1747 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
:
1748 *namespace = MLX5_FLOW_NAMESPACE_BYPASS
;
1750 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
:
1751 *namespace = MLX5_FLOW_NAMESPACE_EGRESS
;
1753 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB
:
1754 *namespace = MLX5_FLOW_NAMESPACE_FDB
;
1756 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX
:
1757 *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX
;
1759 case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX
:
1760 *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX
;
1769 static const struct uverbs_attr_spec mlx5_ib_flow_type
[] = {
1770 [MLX5_IB_FLOW_TYPE_NORMAL
] = {
1771 .type
= UVERBS_ATTR_TYPE_PTR_IN
,
1773 .len
= sizeof(u16
), /* data is priority */
1774 .min_len
= sizeof(u16
),
1777 [MLX5_IB_FLOW_TYPE_SNIFFER
] = {
1778 .type
= UVERBS_ATTR_TYPE_PTR_IN
,
1779 UVERBS_ATTR_NO_DATA(),
1781 [MLX5_IB_FLOW_TYPE_ALL_DEFAULT
] = {
1782 .type
= UVERBS_ATTR_TYPE_PTR_IN
,
1783 UVERBS_ATTR_NO_DATA(),
1785 [MLX5_IB_FLOW_TYPE_MC_DEFAULT
] = {
1786 .type
= UVERBS_ATTR_TYPE_PTR_IN
,
1787 UVERBS_ATTR_NO_DATA(),
1791 static bool is_flow_dest(void *obj
, int *dest_id
, int *dest_type
)
1793 struct devx_obj
*devx_obj
= obj
;
1794 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, devx_obj
->dinbox
, opcode
);
1797 case MLX5_CMD_OP_DESTROY_TIR
:
1798 *dest_type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
1799 *dest_id
= MLX5_GET(general_obj_in_cmd_hdr
, devx_obj
->dinbox
,
1803 case MLX5_CMD_OP_DESTROY_FLOW_TABLE
:
1804 *dest_type
= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
;
1805 *dest_id
= MLX5_GET(destroy_flow_table_in
, devx_obj
->dinbox
,
1813 static int get_dests(struct uverbs_attr_bundle
*attrs
,
1814 struct mlx5_ib_flow_matcher
*fs_matcher
, int *dest_id
,
1815 int *dest_type
, struct ib_qp
**qp
, u32
*flags
)
1817 bool dest_devx
, dest_qp
;
1821 dest_devx
= uverbs_attr_is_valid(attrs
,
1822 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX
);
1823 dest_qp
= uverbs_attr_is_valid(attrs
,
1824 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP
);
1827 err
= uverbs_get_flags32(flags
, attrs
, MLX5_IB_ATTR_CREATE_FLOW_FLAGS
,
1828 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS
|
1829 MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP
);
1833 /* Both flags are not allowed */
1834 if (*flags
& MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS
&&
1835 *flags
& MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP
)
1838 if (fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_BYPASS
) {
1839 if (dest_devx
&& (dest_qp
|| *flags
))
1841 else if (dest_qp
&& *flags
)
1845 /* Allow only DEVX object, drop as dest for FDB */
1846 if (fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_FDB
&& !(dest_devx
||
1847 (*flags
& MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP
)))
1850 /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
1851 if ((fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_RDMA_RX
) &&
1852 ((!dest_devx
&& !dest_qp
) || (dest_devx
&& dest_qp
)))
1858 uverbs_attr_get_obj(attrs
,
1859 MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX
);
1861 /* Verify that the given DEVX object is a flow
1862 * steering destination.
1864 if (!is_flow_dest(devx_obj
, dest_id
, dest_type
))
1866 /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
1867 if ((fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_FDB
||
1868 fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_RDMA_RX
) &&
1869 *dest_type
!= MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE
)
1871 } else if (dest_qp
) {
1872 struct mlx5_ib_qp
*mqp
;
1874 *qp
= uverbs_attr_get_obj(attrs
,
1875 MLX5_IB_ATTR_CREATE_FLOW_DEST_QP
);
1877 return PTR_ERR(*qp
);
1879 if ((*qp
)->qp_type
!= IB_QPT_RAW_PACKET
)
1884 *dest_id
= mqp
->rss_qp
.tirn
;
1886 *dest_id
= mqp
->raw_packet_qp
.rq
.tirn
;
1887 *dest_type
= MLX5_FLOW_DESTINATION_TYPE_TIR
;
1888 } else if (fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_EGRESS
||
1889 fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_RDMA_TX
) {
1890 *dest_type
= MLX5_FLOW_DESTINATION_TYPE_PORT
;
1893 if (*dest_type
== MLX5_FLOW_DESTINATION_TYPE_TIR
&&
1894 (fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_EGRESS
||
1895 fs_matcher
->ns_type
== MLX5_FLOW_NAMESPACE_RDMA_TX
))
1901 static bool is_flow_counter(void *obj
, u32 offset
, u32
*counter_id
)
1903 struct devx_obj
*devx_obj
= obj
;
1904 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, devx_obj
->dinbox
, opcode
);
1906 if (opcode
== MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
) {
1908 if (offset
&& offset
>= devx_obj
->flow_counter_bulk_size
)
1911 *counter_id
= MLX5_GET(dealloc_flow_counter_in
,
1914 *counter_id
+= offset
;
1921 #define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
1922 static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW
)(
1923 struct uverbs_attr_bundle
*attrs
)
1925 struct mlx5_flow_context flow_context
= {.flow_tag
=
1926 MLX5_FS_DEFAULT_FLOW_TAG
};
1927 u32
*offset_attr
, offset
= 0, counter_id
= 0;
1928 int dest_id
, dest_type
= -1, inlen
, len
, ret
, i
;
1929 struct mlx5_ib_flow_handler
*flow_handler
;
1930 struct mlx5_ib_flow_matcher
*fs_matcher
;
1931 struct ib_uobject
**arr_flow_actions
;
1932 struct ib_uflow_resources
*uflow_res
;
1933 struct mlx5_flow_act flow_act
= {};
1934 struct ib_qp
*qp
= NULL
;
1935 void *devx_obj
, *cmd_in
;
1936 struct ib_uobject
*uobj
;
1937 struct mlx5_ib_dev
*dev
;
1940 if (!capable(CAP_NET_RAW
))
1943 fs_matcher
= uverbs_attr_get_obj(attrs
,
1944 MLX5_IB_ATTR_CREATE_FLOW_MATCHER
);
1945 uobj
= uverbs_attr_get_uobject(attrs
, MLX5_IB_ATTR_CREATE_FLOW_HANDLE
);
1946 dev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
1948 if (get_dests(attrs
, fs_matcher
, &dest_id
, &dest_type
, &qp
, &flags
))
1951 if (flags
& MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS
)
1952 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS
;
1954 if (flags
& MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP
)
1955 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_DROP
;
1957 len
= uverbs_attr_get_uobjs_arr(attrs
,
1958 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX
, &arr_flow_actions
);
1960 devx_obj
= arr_flow_actions
[0]->object
;
1962 if (uverbs_attr_is_valid(attrs
,
1963 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET
)) {
1965 int num_offsets
= uverbs_attr_ptr_get_array_size(
1967 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET
,
1970 if (num_offsets
!= 1)
1973 offset_attr
= uverbs_attr_get_alloced_ptr(
1975 MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET
);
1976 offset
= *offset_attr
;
1979 if (!is_flow_counter(devx_obj
, offset
, &counter_id
))
1982 flow_act
.action
|= MLX5_FLOW_CONTEXT_ACTION_COUNT
;
1985 cmd_in
= uverbs_attr_get_alloced_ptr(
1986 attrs
, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE
);
1987 inlen
= uverbs_attr_get_len(attrs
,
1988 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE
);
1990 uflow_res
= flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS
);
1994 len
= uverbs_attr_get_uobjs_arr(attrs
,
1995 MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS
, &arr_flow_actions
);
1996 for (i
= 0; i
< len
; i
++) {
1997 struct mlx5_ib_flow_action
*maction
=
1998 to_mflow_act(arr_flow_actions
[i
]->object
);
2000 ret
= parse_flow_flow_action(maction
, false, &flow_act
);
2003 flow_resources_add(uflow_res
, IB_FLOW_SPEC_ACTION_HANDLE
,
2004 arr_flow_actions
[i
]->object
);
2007 ret
= uverbs_copy_from(&flow_context
.flow_tag
, attrs
,
2008 MLX5_IB_ATTR_CREATE_FLOW_TAG
);
2010 if (flow_context
.flow_tag
>= BIT(24)) {
2014 flow_context
.flags
|= FLOW_CONTEXT_HAS_TAG
;
2018 raw_fs_rule_add(dev
, fs_matcher
, &flow_context
, &flow_act
,
2019 counter_id
, cmd_in
, inlen
, dest_id
, dest_type
);
2020 if (IS_ERR(flow_handler
)) {
2021 ret
= PTR_ERR(flow_handler
);
2025 ib_set_flow(uobj
, &flow_handler
->ibflow
, qp
, &dev
->ib_dev
, uflow_res
);
2029 ib_uverbs_flow_resources_free(uflow_res
);
2033 static int flow_matcher_cleanup(struct ib_uobject
*uobject
,
2034 enum rdma_remove_reason why
,
2035 struct uverbs_attr_bundle
*attrs
)
2037 struct mlx5_ib_flow_matcher
*obj
= uobject
->object
;
2039 if (atomic_read(&obj
->usecnt
))
2046 static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle
*attrs
,
2047 struct mlx5_ib_flow_matcher
*obj
)
2049 enum mlx5_ib_uapi_flow_table_type ft_type
=
2050 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
;
2054 /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
2055 * users should switch to it. We leave this to not break userspace
2057 if (uverbs_attr_is_valid(attrs
, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE
) &&
2058 uverbs_attr_is_valid(attrs
, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS
))
2061 if (uverbs_attr_is_valid(attrs
, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE
)) {
2062 err
= uverbs_get_const(&ft_type
, attrs
,
2063 MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE
);
2067 err
= mlx5_ib_ft_type_to_namespace(ft_type
, &obj
->ns_type
);
2074 if (uverbs_attr_is_valid(attrs
, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS
)) {
2075 err
= uverbs_get_flags32(&flags
, attrs
,
2076 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS
,
2077 IB_FLOW_ATTR_FLAGS_EGRESS
);
2082 mlx5_ib_ft_type_to_namespace(
2083 MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
,
2089 obj
->ns_type
= MLX5_FLOW_NAMESPACE_BYPASS
;
2094 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE
)(
2095 struct uverbs_attr_bundle
*attrs
)
2097 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
2098 attrs
, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE
);
2099 struct mlx5_ib_dev
*dev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
2100 struct mlx5_ib_flow_matcher
*obj
;
2103 obj
= kzalloc(sizeof(struct mlx5_ib_flow_matcher
), GFP_KERNEL
);
2107 obj
->mask_len
= uverbs_attr_get_len(
2108 attrs
, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK
);
2109 err
= uverbs_copy_from(&obj
->matcher_mask
,
2111 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK
);
2115 obj
->flow_type
= uverbs_attr_get_enum_id(
2116 attrs
, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE
);
2118 if (obj
->flow_type
== MLX5_IB_FLOW_TYPE_NORMAL
) {
2119 err
= uverbs_copy_from(&obj
->priority
,
2121 MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE
);
2126 err
= uverbs_copy_from(&obj
->match_criteria_enable
,
2128 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA
);
2132 err
= mlx5_ib_matcher_ns(attrs
, obj
);
2137 obj
->mdev
= dev
->mdev
;
2138 atomic_set(&obj
->usecnt
, 0);
2146 static struct ib_flow_action
*
2147 mlx5_ib_create_modify_header(struct mlx5_ib_dev
*dev
,
2148 enum mlx5_ib_uapi_flow_table_type ft_type
,
2149 u8 num_actions
, void *in
)
2151 enum mlx5_flow_namespace_type
namespace;
2152 struct mlx5_ib_flow_action
*maction
;
2155 ret
= mlx5_ib_ft_type_to_namespace(ft_type
, &namespace);
2157 return ERR_PTR(-EINVAL
);
2159 maction
= kzalloc(sizeof(*maction
), GFP_KERNEL
);
2161 return ERR_PTR(-ENOMEM
);
2163 maction
->flow_action_raw
.modify_hdr
=
2164 mlx5_modify_header_alloc(dev
->mdev
, namespace, num_actions
, in
);
2166 if (IS_ERR(maction
->flow_action_raw
.modify_hdr
)) {
2167 ret
= PTR_ERR(maction
->flow_action_raw
.modify_hdr
);
2169 return ERR_PTR(ret
);
2171 maction
->flow_action_raw
.sub_type
=
2172 MLX5_IB_FLOW_ACTION_MODIFY_HEADER
;
2173 maction
->flow_action_raw
.dev
= dev
;
2175 return &maction
->ib_action
;
2178 static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev
*dev
)
2180 return MLX5_CAP_FLOWTABLE_NIC_RX(dev
->mdev
,
2181 max_modify_header_actions
) ||
2182 MLX5_CAP_FLOWTABLE_NIC_TX(dev
->mdev
,
2183 max_modify_header_actions
) ||
2184 MLX5_CAP_FLOWTABLE_RDMA_TX(dev
->mdev
,
2185 max_modify_header_actions
);
2188 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER
)(
2189 struct uverbs_attr_bundle
*attrs
)
2191 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
2192 attrs
, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE
);
2193 struct mlx5_ib_dev
*mdev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
2194 enum mlx5_ib_uapi_flow_table_type ft_type
;
2195 struct ib_flow_action
*action
;
2200 if (!mlx5_ib_modify_header_supported(mdev
))
2203 in
= uverbs_attr_get_alloced_ptr(attrs
,
2204 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM
);
2206 num_actions
= uverbs_attr_ptr_get_array_size(
2207 attrs
, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM
,
2208 MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto
));
2209 if (num_actions
< 0)
2212 ret
= uverbs_get_const(&ft_type
, attrs
,
2213 MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE
);
2216 action
= mlx5_ib_create_modify_header(mdev
, ft_type
, num_actions
, in
);
2218 return PTR_ERR(action
);
2220 uverbs_flow_action_fill_action(action
, uobj
, &mdev
->ib_dev
,
2221 IB_FLOW_ACTION_UNSPECIFIED
);
2226 static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev
*ibdev
,
2227 u8 packet_reformat_type
,
2230 switch (packet_reformat_type
) {
2231 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL
:
2232 if (ft_type
== MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
)
2233 return MLX5_CAP_FLOWTABLE(ibdev
->mdev
,
2234 encap_general_header
);
2236 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL
:
2237 if (ft_type
== MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
)
2238 return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev
->mdev
,
2239 reformat_l2_to_l3_tunnel
);
2241 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2
:
2242 if (ft_type
== MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
)
2243 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev
->mdev
,
2244 reformat_l3_tunnel_to_l2
);
2246 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2
:
2247 if (ft_type
== MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
)
2248 return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev
->mdev
, decap
);
2257 static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt
, u8
*prm_prt
)
2260 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL
:
2261 *prm_prt
= MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL
;
2263 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2
:
2264 *prm_prt
= MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2
;
2266 case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL
:
2267 *prm_prt
= MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL
;
2276 static int mlx5_ib_flow_action_create_packet_reformat_ctx(
2277 struct mlx5_ib_dev
*dev
,
2278 struct mlx5_ib_flow_action
*maction
,
2279 u8 ft_type
, u8 dv_prt
,
2280 void *in
, size_t len
)
2282 enum mlx5_flow_namespace_type
namespace;
2286 ret
= mlx5_ib_ft_type_to_namespace(ft_type
, &namespace);
2290 ret
= mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt
, &prm_prt
);
2294 maction
->flow_action_raw
.pkt_reformat
=
2295 mlx5_packet_reformat_alloc(dev
->mdev
, prm_prt
, len
,
2297 if (IS_ERR(maction
->flow_action_raw
.pkt_reformat
)) {
2298 ret
= PTR_ERR(maction
->flow_action_raw
.pkt_reformat
);
2302 maction
->flow_action_raw
.sub_type
=
2303 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT
;
2304 maction
->flow_action_raw
.dev
= dev
;
2309 static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT
)(
2310 struct uverbs_attr_bundle
*attrs
)
2312 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(attrs
,
2313 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE
);
2314 struct mlx5_ib_dev
*mdev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
2315 enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt
;
2316 enum mlx5_ib_uapi_flow_table_type ft_type
;
2317 struct mlx5_ib_flow_action
*maction
;
2320 ret
= uverbs_get_const(&ft_type
, attrs
,
2321 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE
);
2325 ret
= uverbs_get_const(&dv_prt
, attrs
,
2326 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE
);
2330 if (!mlx5_ib_flow_action_packet_reformat_valid(mdev
, dv_prt
, ft_type
))
2333 maction
= kzalloc(sizeof(*maction
), GFP_KERNEL
);
2338 MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2
) {
2339 maction
->flow_action_raw
.sub_type
=
2340 MLX5_IB_FLOW_ACTION_DECAP
;
2341 maction
->flow_action_raw
.dev
= mdev
;
2346 in
= uverbs_attr_get_alloced_ptr(attrs
,
2347 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF
);
2353 len
= uverbs_attr_get_len(attrs
,
2354 MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF
);
2356 ret
= mlx5_ib_flow_action_create_packet_reformat_ctx(mdev
,
2357 maction
, ft_type
, dv_prt
, in
, len
);
2362 uverbs_flow_action_fill_action(&maction
->ib_action
, uobj
, &mdev
->ib_dev
,
2363 IB_FLOW_ACTION_UNSPECIFIED
);
2371 DECLARE_UVERBS_NAMED_METHOD(
2372 MLX5_IB_METHOD_CREATE_FLOW
,
2373 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE
,
2378 MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE
,
2379 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params
)),
2382 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER
,
2383 MLX5_IB_OBJECT_FLOW_MATCHER
,
2386 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP
,
2388 UVERBS_ACCESS_READ
),
2389 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX
,
2390 MLX5_IB_OBJECT_DEVX_OBJ
,
2391 UVERBS_ACCESS_READ
),
2392 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS
,
2393 UVERBS_OBJECT_FLOW_ACTION
,
2394 UVERBS_ACCESS_READ
, 1,
2395 MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS
,
2397 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG
,
2398 UVERBS_ATTR_TYPE(u32
),
2400 UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX
,
2401 MLX5_IB_OBJECT_DEVX_OBJ
,
2402 UVERBS_ACCESS_READ
, 1, 1,
2404 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET
,
2405 UVERBS_ATTR_MIN_SIZE(sizeof(u32
)),
2408 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS
,
2409 enum mlx5_ib_create_flow_flags
,
2412 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2413 MLX5_IB_METHOD_DESTROY_FLOW
,
2414 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE
,
2416 UVERBS_ACCESS_DESTROY
,
2419 ADD_UVERBS_METHODS(mlx5_ib_fs
,
2421 &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW
),
2422 &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW
));
2424 DECLARE_UVERBS_NAMED_METHOD(
2425 MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER
,
2426 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE
,
2427 UVERBS_OBJECT_FLOW_ACTION
,
2430 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM
,
2431 UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
2432 set_add_copy_action_in_auto
)),
2435 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE
,
2436 enum mlx5_ib_uapi_flow_table_type
,
2439 DECLARE_UVERBS_NAMED_METHOD(
2440 MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT
,
2441 UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE
,
2442 UVERBS_OBJECT_FLOW_ACTION
,
2445 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF
,
2446 UVERBS_ATTR_MIN_SIZE(1),
2449 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE
,
2450 enum mlx5_ib_uapi_flow_action_packet_reformat_type
,
2452 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE
,
2453 enum mlx5_ib_uapi_flow_table_type
,
2457 mlx5_ib_flow_actions
,
2458 UVERBS_OBJECT_FLOW_ACTION
,
2459 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER
),
2460 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT
));
2462 DECLARE_UVERBS_NAMED_METHOD(
2463 MLX5_IB_METHOD_FLOW_MATCHER_CREATE
,
2464 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE
,
2465 MLX5_IB_OBJECT_FLOW_MATCHER
,
2469 MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK
,
2470 UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params
)),
2472 UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE
,
2475 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA
,
2476 UVERBS_ATTR_TYPE(u8
),
2478 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS
,
2481 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE
,
2482 enum mlx5_ib_uapi_flow_table_type
,
2485 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2486 MLX5_IB_METHOD_FLOW_MATCHER_DESTROY
,
2487 UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE
,
2488 MLX5_IB_OBJECT_FLOW_MATCHER
,
2489 UVERBS_ACCESS_DESTROY
,
2492 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER
,
2493 UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup
),
2494 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE
),
2495 &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY
));
2497 const struct uapi_definition mlx5_ib_flow_defs
[] = {
2498 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2499 MLX5_IB_OBJECT_FLOW_MATCHER
),
2500 UAPI_DEF_CHAIN_OBJ_TREE(
2503 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION
,
2504 &mlx5_ib_flow_actions
),
2508 static const struct ib_device_ops flow_ops
= {
2509 .create_flow
= mlx5_ib_create_flow
,
2510 .destroy_flow
= mlx5_ib_destroy_flow
,
2511 .destroy_flow_action
= mlx5_ib_destroy_flow_action
,
2514 static const struct ib_device_ops flow_ipsec_ops
= {
2515 .create_flow_action_esp
= mlx5_ib_create_flow_action_esp
,
2516 .modify_flow_action_esp
= mlx5_ib_modify_flow_action_esp
,
2519 int mlx5_ib_fs_init(struct mlx5_ib_dev
*dev
)
2521 dev
->flow_db
= kzalloc(sizeof(*dev
->flow_db
), GFP_KERNEL
);
2526 mutex_init(&dev
->flow_db
->lock
);
2528 ib_set_device_ops(&dev
->ib_dev
, &flow_ops
);
2529 if (mlx5_accel_ipsec_device_caps(dev
->mdev
) &
2530 MLX5_ACCEL_IPSEC_CAP_DEVICE
)
2531 ib_set_device_ops(&dev
->ib_dev
, &flow_ipsec_ops
);