1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
23 #include "bnxt_sriov.h"
27 #define BNXT_FID_INVALID 0xffff
28 #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
30 /* Return the dst fid of the func for flow forwarding
31 * For PFs: src_fid is the fid of the PF
32 * For VF-reps: src_fid the fid of the VF
34 static u16
bnxt_flow_get_dst_fid(struct bnxt
*pf_bp
, struct net_device
*dev
)
38 /* check if dev belongs to the same switch */
39 if (!switchdev_port_same_parent_id(pf_bp
->dev
, dev
)) {
40 netdev_info(pf_bp
->dev
, "dev(ifindex=%d) not on same switch",
42 return BNXT_FID_INVALID
;
45 /* Is dev a VF-rep? */
46 if (bnxt_dev_is_vf_rep(dev
))
47 return bnxt_vf_rep_get_fid(dev
);
49 bp
= netdev_priv(dev
);
53 static int bnxt_tc_parse_redir(struct bnxt
*bp
,
54 struct bnxt_tc_actions
*actions
,
55 const struct tc_action
*tc_act
)
57 struct net_device
*dev
= tcf_mirred_dev(tc_act
);
60 netdev_info(bp
->dev
, "no dev in mirred action");
64 actions
->flags
|= BNXT_TC_ACTION_FLAG_FWD
;
65 actions
->dst_dev
= dev
;
69 static void bnxt_tc_parse_vlan(struct bnxt
*bp
,
70 struct bnxt_tc_actions
*actions
,
71 const struct tc_action
*tc_act
)
73 if (tcf_vlan_action(tc_act
) == TCA_VLAN_ACT_POP
) {
74 actions
->flags
|= BNXT_TC_ACTION_FLAG_POP_VLAN
;
75 } else if (tcf_vlan_action(tc_act
) == TCA_VLAN_ACT_PUSH
) {
76 actions
->flags
|= BNXT_TC_ACTION_FLAG_PUSH_VLAN
;
77 actions
->push_vlan_tci
= htons(tcf_vlan_push_vid(tc_act
));
78 actions
->push_vlan_tpid
= tcf_vlan_push_proto(tc_act
);
82 static int bnxt_tc_parse_tunnel_set(struct bnxt
*bp
,
83 struct bnxt_tc_actions
*actions
,
84 const struct tc_action
*tc_act
)
86 struct ip_tunnel_info
*tun_info
= tcf_tunnel_info(tc_act
);
87 struct ip_tunnel_key
*tun_key
= &tun_info
->key
;
89 if (ip_tunnel_info_af(tun_info
) != AF_INET
) {
90 netdev_info(bp
->dev
, "only IPv4 tunnel-encap is supported");
94 actions
->tun_encap_key
= *tun_key
;
95 actions
->flags
|= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP
;
99 static int bnxt_tc_parse_actions(struct bnxt
*bp
,
100 struct bnxt_tc_actions
*actions
,
101 struct tcf_exts
*tc_exts
)
103 const struct tc_action
*tc_act
;
104 LIST_HEAD(tc_actions
);
107 if (!tcf_exts_has_actions(tc_exts
)) {
108 netdev_info(bp
->dev
, "no actions");
112 tcf_exts_to_list(tc_exts
, &tc_actions
);
113 list_for_each_entry(tc_act
, &tc_actions
, list
) {
115 if (is_tcf_gact_shot(tc_act
)) {
116 actions
->flags
|= BNXT_TC_ACTION_FLAG_DROP
;
117 return 0; /* don't bother with other actions */
120 /* Redirect action */
121 if (is_tcf_mirred_egress_redirect(tc_act
)) {
122 rc
= bnxt_tc_parse_redir(bp
, actions
, tc_act
);
129 if (is_tcf_vlan(tc_act
)) {
130 bnxt_tc_parse_vlan(bp
, actions
, tc_act
);
135 if (is_tcf_tunnel_set(tc_act
)) {
136 rc
= bnxt_tc_parse_tunnel_set(bp
, actions
, tc_act
);
143 if (is_tcf_tunnel_release(tc_act
)) {
144 actions
->flags
|= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP
;
149 if (actions
->flags
& BNXT_TC_ACTION_FLAG_FWD
) {
150 if (actions
->flags
& BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP
) {
151 /* dst_fid is PF's fid */
152 actions
->dst_fid
= bp
->pf
.fw_fid
;
154 /* find the FID from dst_dev */
156 bnxt_flow_get_dst_fid(bp
, actions
->dst_dev
);
157 if (actions
->dst_fid
== BNXT_FID_INVALID
)
165 #define GET_KEY(flow_cmd, key_type) \
166 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
168 #define GET_MASK(flow_cmd, key_type) \
169 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
172 static int bnxt_tc_parse_flow(struct bnxt
*bp
,
173 struct tc_cls_flower_offload
*tc_flow_cmd
,
174 struct bnxt_tc_flow
*flow
)
176 struct flow_dissector
*dissector
= tc_flow_cmd
->dissector
;
179 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
180 if ((dissector
->used_keys
& BIT(FLOW_DISSECTOR_KEY_CONTROL
)) == 0 ||
181 (dissector
->used_keys
& BIT(FLOW_DISSECTOR_KEY_BASIC
)) == 0) {
182 netdev_info(bp
->dev
, "cannot form TC key: used_keys = 0x%x",
183 dissector
->used_keys
);
187 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
188 struct flow_dissector_key_control
*key
=
189 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_CONTROL
);
191 addr_type
= key
->addr_type
;
194 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
195 struct flow_dissector_key_basic
*key
=
196 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_BASIC
);
197 struct flow_dissector_key_basic
*mask
=
198 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_BASIC
);
200 flow
->l2_key
.ether_type
= key
->n_proto
;
201 flow
->l2_mask
.ether_type
= mask
->n_proto
;
203 if (key
->n_proto
== htons(ETH_P_IP
) ||
204 key
->n_proto
== htons(ETH_P_IPV6
)) {
205 flow
->l4_key
.ip_proto
= key
->ip_proto
;
206 flow
->l4_mask
.ip_proto
= mask
->ip_proto
;
210 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
211 struct flow_dissector_key_eth_addrs
*key
=
212 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ETH_ADDRS
);
213 struct flow_dissector_key_eth_addrs
*mask
=
214 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ETH_ADDRS
);
216 flow
->flags
|= BNXT_TC_FLOW_FLAGS_ETH_ADDRS
;
217 ether_addr_copy(flow
->l2_key
.dmac
, key
->dst
);
218 ether_addr_copy(flow
->l2_mask
.dmac
, mask
->dst
);
219 ether_addr_copy(flow
->l2_key
.smac
, key
->src
);
220 ether_addr_copy(flow
->l2_mask
.smac
, mask
->src
);
223 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
224 struct flow_dissector_key_vlan
*key
=
225 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_VLAN
);
226 struct flow_dissector_key_vlan
*mask
=
227 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_VLAN
);
229 flow
->l2_key
.inner_vlan_tci
=
230 cpu_to_be16(VLAN_TCI(key
->vlan_id
, key
->vlan_priority
));
231 flow
->l2_mask
.inner_vlan_tci
=
232 cpu_to_be16((VLAN_TCI(mask
->vlan_id
, mask
->vlan_priority
)));
233 flow
->l2_key
.inner_vlan_tpid
= htons(ETH_P_8021Q
);
234 flow
->l2_mask
.inner_vlan_tpid
= htons(0xffff);
235 flow
->l2_key
.num_vlans
= 1;
238 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
239 struct flow_dissector_key_ipv4_addrs
*key
=
240 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
);
241 struct flow_dissector_key_ipv4_addrs
*mask
=
242 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
);
244 flow
->flags
|= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS
;
245 flow
->l3_key
.ipv4
.daddr
.s_addr
= key
->dst
;
246 flow
->l3_mask
.ipv4
.daddr
.s_addr
= mask
->dst
;
247 flow
->l3_key
.ipv4
.saddr
.s_addr
= key
->src
;
248 flow
->l3_mask
.ipv4
.saddr
.s_addr
= mask
->src
;
249 } else if (dissector_uses_key(dissector
,
250 FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
251 struct flow_dissector_key_ipv6_addrs
*key
=
252 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
);
253 struct flow_dissector_key_ipv6_addrs
*mask
=
254 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
);
256 flow
->flags
|= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS
;
257 flow
->l3_key
.ipv6
.daddr
= key
->dst
;
258 flow
->l3_mask
.ipv6
.daddr
= mask
->dst
;
259 flow
->l3_key
.ipv6
.saddr
= key
->src
;
260 flow
->l3_mask
.ipv6
.saddr
= mask
->src
;
263 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
264 struct flow_dissector_key_ports
*key
=
265 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_PORTS
);
266 struct flow_dissector_key_ports
*mask
=
267 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_PORTS
);
269 flow
->flags
|= BNXT_TC_FLOW_FLAGS_PORTS
;
270 flow
->l4_key
.ports
.dport
= key
->dst
;
271 flow
->l4_mask
.ports
.dport
= mask
->dst
;
272 flow
->l4_key
.ports
.sport
= key
->src
;
273 flow
->l4_mask
.ports
.sport
= mask
->src
;
276 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_ICMP
)) {
277 struct flow_dissector_key_icmp
*key
=
278 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ICMP
);
279 struct flow_dissector_key_icmp
*mask
=
280 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ICMP
);
282 flow
->flags
|= BNXT_TC_FLOW_FLAGS_ICMP
;
283 flow
->l4_key
.icmp
.type
= key
->type
;
284 flow
->l4_key
.icmp
.code
= key
->code
;
285 flow
->l4_mask
.icmp
.type
= mask
->type
;
286 flow
->l4_mask
.icmp
.code
= mask
->code
;
289 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_ENC_CONTROL
)) {
290 struct flow_dissector_key_control
*key
=
291 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ENC_CONTROL
);
293 addr_type
= key
->addr_type
;
296 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
)) {
297 struct flow_dissector_key_ipv4_addrs
*key
=
298 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
);
299 struct flow_dissector_key_ipv4_addrs
*mask
=
300 GET_MASK(tc_flow_cmd
,
301 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
);
303 flow
->flags
|= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS
;
304 flow
->tun_key
.u
.ipv4
.dst
= key
->dst
;
305 flow
->tun_mask
.u
.ipv4
.dst
= mask
->dst
;
306 flow
->tun_key
.u
.ipv4
.src
= key
->src
;
307 flow
->tun_mask
.u
.ipv4
.src
= mask
->src
;
308 } else if (dissector_uses_key(dissector
,
309 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
)) {
313 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
314 struct flow_dissector_key_keyid
*key
=
315 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ENC_KEYID
);
316 struct flow_dissector_key_keyid
*mask
=
317 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ENC_KEYID
);
319 flow
->flags
|= BNXT_TC_FLOW_FLAGS_TUNL_ID
;
320 flow
->tun_key
.tun_id
= key32_to_tunnel_id(key
->keyid
);
321 flow
->tun_mask
.tun_id
= key32_to_tunnel_id(mask
->keyid
);
324 if (dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) {
325 struct flow_dissector_key_ports
*key
=
326 GET_KEY(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ENC_PORTS
);
327 struct flow_dissector_key_ports
*mask
=
328 GET_MASK(tc_flow_cmd
, FLOW_DISSECTOR_KEY_ENC_PORTS
);
330 flow
->flags
|= BNXT_TC_FLOW_FLAGS_TUNL_PORTS
;
331 flow
->tun_key
.tp_dst
= key
->dst
;
332 flow
->tun_mask
.tp_dst
= mask
->dst
;
333 flow
->tun_key
.tp_src
= key
->src
;
334 flow
->tun_mask
.tp_src
= mask
->src
;
337 return bnxt_tc_parse_actions(bp
, &flow
->actions
, tc_flow_cmd
->exts
);
340 static int bnxt_hwrm_cfa_flow_free(struct bnxt
*bp
, __le16 flow_handle
)
342 struct hwrm_cfa_flow_free_input req
= { 0 };
345 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_FLOW_FREE
, -1, -1);
346 req
.flow_handle
= flow_handle
;
348 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
350 netdev_info(bp
->dev
, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__
, flow_handle
, rc
);
355 static int ipv6_mask_len(struct in6_addr
*mask
)
359 for (i
= 0; i
< 4; i
++)
360 mask_len
+= inet_mask_len(mask
->s6_addr32
[i
]);
365 static bool is_wildcard(void *mask
, int len
)
370 for (i
= 0; i
< len
; i
++) {
377 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt
*bp
, struct bnxt_tc_flow
*flow
,
378 __le16 ref_flow_handle
,
379 __le32 tunnel_handle
, __le16
*flow_handle
)
381 struct hwrm_cfa_flow_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
382 struct bnxt_tc_actions
*actions
= &flow
->actions
;
383 struct bnxt_tc_l3_key
*l3_mask
= &flow
->l3_mask
;
384 struct bnxt_tc_l3_key
*l3_key
= &flow
->l3_key
;
385 struct hwrm_cfa_flow_alloc_input req
= { 0 };
386 u16 flow_flags
= 0, action_flags
= 0;
389 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_FLOW_ALLOC
, -1, -1);
391 req
.src_fid
= cpu_to_le16(flow
->src_fid
);
392 req
.ref_flow_handle
= ref_flow_handle
;
394 if (actions
->flags
& BNXT_TC_ACTION_FLAG_TUNNEL_DECAP
||
395 actions
->flags
& BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP
) {
396 req
.tunnel_handle
= tunnel_handle
;
397 flow_flags
|= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL
;
398 action_flags
|= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL
;
401 req
.ethertype
= flow
->l2_key
.ether_type
;
402 req
.ip_proto
= flow
->l4_key
.ip_proto
;
404 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_ETH_ADDRS
) {
405 memcpy(req
.dmac
, flow
->l2_key
.dmac
, ETH_ALEN
);
406 memcpy(req
.smac
, flow
->l2_key
.smac
, ETH_ALEN
);
409 if (flow
->l2_key
.num_vlans
> 0) {
410 flow_flags
|= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE
;
411 /* FW expects the inner_vlan_tci value to be set
412 * in outer_vlan_tci when num_vlans is 1 (which is
413 * always the case in TC.)
415 req
.outer_vlan_tci
= flow
->l2_key
.inner_vlan_tci
;
418 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
419 if (is_wildcard(l3_mask
, sizeof(*l3_mask
)) &&
420 is_wildcard(&flow
->l4_mask
, sizeof(flow
->l4_mask
))) {
421 flow_flags
|= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2
;
423 flow_flags
|= flow
->l2_key
.ether_type
== htons(ETH_P_IP
) ?
424 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4
:
425 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
;
427 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_IPV4_ADDRS
) {
428 req
.ip_dst
[0] = l3_key
->ipv4
.daddr
.s_addr
;
429 req
.ip_dst_mask_len
=
430 inet_mask_len(l3_mask
->ipv4
.daddr
.s_addr
);
431 req
.ip_src
[0] = l3_key
->ipv4
.saddr
.s_addr
;
432 req
.ip_src_mask_len
=
433 inet_mask_len(l3_mask
->ipv4
.saddr
.s_addr
);
434 } else if (flow
->flags
& BNXT_TC_FLOW_FLAGS_IPV6_ADDRS
) {
435 memcpy(req
.ip_dst
, l3_key
->ipv6
.daddr
.s6_addr32
,
437 req
.ip_dst_mask_len
=
438 ipv6_mask_len(&l3_mask
->ipv6
.daddr
);
439 memcpy(req
.ip_src
, l3_key
->ipv6
.saddr
.s6_addr32
,
441 req
.ip_src_mask_len
=
442 ipv6_mask_len(&l3_mask
->ipv6
.saddr
);
446 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_PORTS
) {
447 req
.l4_src_port
= flow
->l4_key
.ports
.sport
;
448 req
.l4_src_port_mask
= flow
->l4_mask
.ports
.sport
;
449 req
.l4_dst_port
= flow
->l4_key
.ports
.dport
;
450 req
.l4_dst_port_mask
= flow
->l4_mask
.ports
.dport
;
451 } else if (flow
->flags
& BNXT_TC_FLOW_FLAGS_ICMP
) {
452 /* l4 ports serve as type/code when ip_proto is ICMP */
453 req
.l4_src_port
= htons(flow
->l4_key
.icmp
.type
);
454 req
.l4_src_port_mask
= htons(flow
->l4_mask
.icmp
.type
);
455 req
.l4_dst_port
= htons(flow
->l4_key
.icmp
.code
);
456 req
.l4_dst_port_mask
= htons(flow
->l4_mask
.icmp
.code
);
458 req
.flags
= cpu_to_le16(flow_flags
);
460 if (actions
->flags
& BNXT_TC_ACTION_FLAG_DROP
) {
461 action_flags
|= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP
;
463 if (actions
->flags
& BNXT_TC_ACTION_FLAG_FWD
) {
464 action_flags
|= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD
;
465 req
.dst_fid
= cpu_to_le16(actions
->dst_fid
);
467 if (actions
->flags
& BNXT_TC_ACTION_FLAG_PUSH_VLAN
) {
469 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE
;
470 req
.l2_rewrite_vlan_tpid
= actions
->push_vlan_tpid
;
471 req
.l2_rewrite_vlan_tci
= actions
->push_vlan_tci
;
472 memcpy(&req
.l2_rewrite_dmac
, &req
.dmac
, ETH_ALEN
);
473 memcpy(&req
.l2_rewrite_smac
, &req
.smac
, ETH_ALEN
);
475 if (actions
->flags
& BNXT_TC_ACTION_FLAG_POP_VLAN
) {
477 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE
;
478 /* Rewrite config with tpid = 0 implies vlan pop */
479 req
.l2_rewrite_vlan_tpid
= 0;
480 memcpy(&req
.l2_rewrite_dmac
, &req
.dmac
, ETH_ALEN
);
481 memcpy(&req
.l2_rewrite_smac
, &req
.smac
, ETH_ALEN
);
484 req
.action_flags
= cpu_to_le16(action_flags
);
486 mutex_lock(&bp
->hwrm_cmd_lock
);
488 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
490 *flow_handle
= resp
->flow_handle
;
492 mutex_unlock(&bp
->hwrm_cmd_lock
);
497 static int hwrm_cfa_decap_filter_alloc(struct bnxt
*bp
,
498 struct bnxt_tc_flow
*flow
,
499 struct bnxt_tc_l2_key
*l2_info
,
500 __le32 ref_decap_handle
,
501 __le32
*decap_filter_handle
)
503 struct hwrm_cfa_decap_filter_alloc_output
*resp
=
504 bp
->hwrm_cmd_resp_addr
;
505 struct hwrm_cfa_decap_filter_alloc_input req
= { 0 };
506 struct ip_tunnel_key
*tun_key
= &flow
->tun_key
;
510 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_DECAP_FILTER_ALLOC
, -1, -1);
512 req
.flags
= cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL
);
513 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
|
514 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL
;
515 req
.tunnel_type
= CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN
;
516 req
.ip_protocol
= CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
;
518 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_TUNL_ID
) {
519 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID
;
520 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
521 req
.tunnel_id
= tunnel_id_to_key32(tun_key
->tun_id
);
524 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS
) {
525 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR
;
526 ether_addr_copy(req
.dst_macaddr
, l2_info
->dmac
);
528 if (l2_info
->num_vlans
) {
529 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID
;
530 req
.t_ivlan_vid
= l2_info
->inner_vlan_tci
;
533 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE
;
534 req
.ethertype
= htons(ETH_P_IP
);
536 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS
) {
537 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR
|
538 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR
|
539 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE
;
540 req
.ip_addr_type
= CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4
;
541 req
.dst_ipaddr
[0] = tun_key
->u
.ipv4
.dst
;
542 req
.src_ipaddr
[0] = tun_key
->u
.ipv4
.src
;
545 if (flow
->flags
& BNXT_TC_FLOW_FLAGS_TUNL_PORTS
) {
546 enables
|= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT
;
547 req
.dst_port
= tun_key
->tp_dst
;
550 /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
551 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
553 req
.l2_ctxt_ref_id
= (__force __le16
)ref_decap_handle
;
554 req
.enables
= cpu_to_le32(enables
);
556 mutex_lock(&bp
->hwrm_cmd_lock
);
557 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
559 *decap_filter_handle
= resp
->decap_filter_id
;
561 netdev_info(bp
->dev
, "%s: Error rc=%d", __func__
, rc
);
562 mutex_unlock(&bp
->hwrm_cmd_lock
);
567 static int hwrm_cfa_decap_filter_free(struct bnxt
*bp
,
568 __le32 decap_filter_handle
)
570 struct hwrm_cfa_decap_filter_free_input req
= { 0 };
573 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_DECAP_FILTER_FREE
, -1, -1);
574 req
.decap_filter_id
= decap_filter_handle
;
576 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
578 netdev_info(bp
->dev
, "%s: Error rc=%d", __func__
, rc
);
582 static int hwrm_cfa_encap_record_alloc(struct bnxt
*bp
,
583 struct ip_tunnel_key
*encap_key
,
584 struct bnxt_tc_l2_key
*l2_info
,
585 __le32
*encap_record_handle
)
587 struct hwrm_cfa_encap_record_alloc_output
*resp
=
588 bp
->hwrm_cmd_resp_addr
;
589 struct hwrm_cfa_encap_record_alloc_input req
= { 0 };
590 struct hwrm_cfa_encap_data_vxlan
*encap
=
591 (struct hwrm_cfa_encap_data_vxlan
*)&req
.encap_data
;
592 struct hwrm_vxlan_ipv4_hdr
*encap_ipv4
=
593 (struct hwrm_vxlan_ipv4_hdr
*)encap
->l3
;
596 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_ENCAP_RECORD_ALLOC
, -1, -1);
598 req
.encap_type
= CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN
;
600 ether_addr_copy(encap
->dst_mac_addr
, l2_info
->dmac
);
601 ether_addr_copy(encap
->src_mac_addr
, l2_info
->smac
);
602 if (l2_info
->num_vlans
) {
603 encap
->num_vlan_tags
= l2_info
->num_vlans
;
604 encap
->ovlan_tci
= l2_info
->inner_vlan_tci
;
605 encap
->ovlan_tpid
= l2_info
->inner_vlan_tpid
;
608 encap_ipv4
->ver_hlen
= 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT
;
609 encap_ipv4
->ver_hlen
|= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT
;
610 encap_ipv4
->ttl
= encap_key
->ttl
;
612 encap_ipv4
->dest_ip_addr
= encap_key
->u
.ipv4
.dst
;
613 encap_ipv4
->src_ip_addr
= encap_key
->u
.ipv4
.src
;
614 encap_ipv4
->protocol
= IPPROTO_UDP
;
616 encap
->dst_port
= encap_key
->tp_dst
;
617 encap
->vni
= tunnel_id_to_key32(encap_key
->tun_id
);
619 mutex_lock(&bp
->hwrm_cmd_lock
);
620 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
622 *encap_record_handle
= resp
->encap_record_id
;
624 netdev_info(bp
->dev
, "%s: Error rc=%d", __func__
, rc
);
625 mutex_unlock(&bp
->hwrm_cmd_lock
);
630 static int hwrm_cfa_encap_record_free(struct bnxt
*bp
,
631 __le32 encap_record_handle
)
633 struct hwrm_cfa_encap_record_free_input req
= { 0 };
636 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_ENCAP_RECORD_FREE
, -1, -1);
637 req
.encap_record_id
= encap_record_handle
;
639 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
641 netdev_info(bp
->dev
, "%s: Error rc=%d", __func__
, rc
);
645 static int bnxt_tc_put_l2_node(struct bnxt
*bp
,
646 struct bnxt_tc_flow_node
*flow_node
)
648 struct bnxt_tc_l2_node
*l2_node
= flow_node
->l2_node
;
649 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
652 /* remove flow_node from the L2 shared flow list */
653 list_del(&flow_node
->l2_list_node
);
654 if (--l2_node
->refcount
== 0) {
655 rc
= rhashtable_remove_fast(&tc_info
->l2_table
, &l2_node
->node
,
656 tc_info
->l2_ht_params
);
659 "Error: %s: rhashtable_remove_fast: %d",
661 kfree_rcu(l2_node
, rcu
);
666 static struct bnxt_tc_l2_node
*
667 bnxt_tc_get_l2_node(struct bnxt
*bp
, struct rhashtable
*l2_table
,
668 struct rhashtable_params ht_params
,
669 struct bnxt_tc_l2_key
*l2_key
)
671 struct bnxt_tc_l2_node
*l2_node
;
674 l2_node
= rhashtable_lookup_fast(l2_table
, l2_key
, ht_params
);
676 l2_node
= kzalloc(sizeof(*l2_node
), GFP_KERNEL
);
682 l2_node
->key
= *l2_key
;
683 rc
= rhashtable_insert_fast(l2_table
, &l2_node
->node
,
686 kfree_rcu(l2_node
, rcu
);
688 "Error: %s: rhashtable_insert_fast: %d",
692 INIT_LIST_HEAD(&l2_node
->common_l2_flows
);
697 /* Get the ref_flow_handle for a flow by checking if there are any other
698 * flows that share the same L2 key as this flow.
701 bnxt_tc_get_ref_flow_handle(struct bnxt
*bp
, struct bnxt_tc_flow
*flow
,
702 struct bnxt_tc_flow_node
*flow_node
,
703 __le16
*ref_flow_handle
)
705 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
706 struct bnxt_tc_flow_node
*ref_flow_node
;
707 struct bnxt_tc_l2_node
*l2_node
;
709 l2_node
= bnxt_tc_get_l2_node(bp
, &tc_info
->l2_table
,
710 tc_info
->l2_ht_params
,
715 /* If any other flow is using this l2_node, use it's flow_handle
716 * as the ref_flow_handle
718 if (l2_node
->refcount
> 0) {
719 ref_flow_node
= list_first_entry(&l2_node
->common_l2_flows
,
720 struct bnxt_tc_flow_node
,
722 *ref_flow_handle
= ref_flow_node
->flow_handle
;
724 *ref_flow_handle
= cpu_to_le16(0xffff);
727 /* Insert the l2_node into the flow_node so that subsequent flows
728 * with a matching l2 key can use the flow_handle of this flow
729 * as their ref_flow_handle
731 flow_node
->l2_node
= l2_node
;
732 list_add(&flow_node
->l2_list_node
, &l2_node
->common_l2_flows
);
737 /* After the flow parsing is done, this routine is used for checking
738 * if there are any aspects of the flow that prevent it from being
741 static bool bnxt_tc_can_offload(struct bnxt
*bp
, struct bnxt_tc_flow
*flow
)
743 /* If L4 ports are specified then ip_proto must be TCP or UDP */
744 if ((flow
->flags
& BNXT_TC_FLOW_FLAGS_PORTS
) &&
745 (flow
->l4_key
.ip_proto
!= IPPROTO_TCP
&&
746 flow
->l4_key
.ip_proto
!= IPPROTO_UDP
)) {
747 netdev_info(bp
->dev
, "Cannot offload non-TCP/UDP (%d) ports",
748 flow
->l4_key
.ip_proto
);
755 /* Returns the final refcount of the node on success
756 * or a -ve error code on failure
758 static int bnxt_tc_put_tunnel_node(struct bnxt
*bp
,
759 struct rhashtable
*tunnel_table
,
760 struct rhashtable_params
*ht_params
,
761 struct bnxt_tc_tunnel_node
*tunnel_node
)
765 if (--tunnel_node
->refcount
== 0) {
766 rc
= rhashtable_remove_fast(tunnel_table
, &tunnel_node
->node
,
769 netdev_err(bp
->dev
, "rhashtable_remove_fast rc=%d", rc
);
772 kfree_rcu(tunnel_node
, rcu
);
775 return tunnel_node
->refcount
;
779 /* Get (or add) either encap or decap tunnel node from/to the supplied
782 static struct bnxt_tc_tunnel_node
*
783 bnxt_tc_get_tunnel_node(struct bnxt
*bp
, struct rhashtable
*tunnel_table
,
784 struct rhashtable_params
*ht_params
,
785 struct ip_tunnel_key
*tun_key
)
787 struct bnxt_tc_tunnel_node
*tunnel_node
;
790 tunnel_node
= rhashtable_lookup_fast(tunnel_table
, tun_key
, *ht_params
);
792 tunnel_node
= kzalloc(sizeof(*tunnel_node
), GFP_KERNEL
);
798 tunnel_node
->key
= *tun_key
;
799 tunnel_node
->tunnel_handle
= INVALID_TUNNEL_HANDLE
;
800 rc
= rhashtable_insert_fast(tunnel_table
, &tunnel_node
->node
,
803 kfree_rcu(tunnel_node
, rcu
);
807 tunnel_node
->refcount
++;
810 netdev_info(bp
->dev
, "error rc=%d", rc
);
814 static int bnxt_tc_get_ref_decap_handle(struct bnxt
*bp
,
815 struct bnxt_tc_flow
*flow
,
816 struct bnxt_tc_l2_key
*l2_key
,
817 struct bnxt_tc_flow_node
*flow_node
,
818 __le32
*ref_decap_handle
)
820 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
821 struct bnxt_tc_flow_node
*ref_flow_node
;
822 struct bnxt_tc_l2_node
*decap_l2_node
;
824 decap_l2_node
= bnxt_tc_get_l2_node(bp
, &tc_info
->decap_l2_table
,
825 tc_info
->decap_l2_ht_params
,
830 /* If any other flow is using this decap_l2_node, use it's decap_handle
831 * as the ref_decap_handle
833 if (decap_l2_node
->refcount
> 0) {
835 list_first_entry(&decap_l2_node
->common_l2_flows
,
836 struct bnxt_tc_flow_node
,
838 *ref_decap_handle
= ref_flow_node
->decap_node
->tunnel_handle
;
840 *ref_decap_handle
= INVALID_TUNNEL_HANDLE
;
843 /* Insert the l2_node into the flow_node so that subsequent flows
844 * with a matching decap l2 key can use the decap_filter_handle of
845 * this flow as their ref_decap_handle
847 flow_node
->decap_l2_node
= decap_l2_node
;
848 list_add(&flow_node
->decap_l2_list_node
,
849 &decap_l2_node
->common_l2_flows
);
850 decap_l2_node
->refcount
++;
854 static void bnxt_tc_put_decap_l2_node(struct bnxt
*bp
,
855 struct bnxt_tc_flow_node
*flow_node
)
857 struct bnxt_tc_l2_node
*decap_l2_node
= flow_node
->decap_l2_node
;
858 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
861 /* remove flow_node from the decap L2 sharing flow list */
862 list_del(&flow_node
->decap_l2_list_node
);
863 if (--decap_l2_node
->refcount
== 0) {
864 rc
= rhashtable_remove_fast(&tc_info
->decap_l2_table
,
865 &decap_l2_node
->node
,
866 tc_info
->decap_l2_ht_params
);
868 netdev_err(bp
->dev
, "rhashtable_remove_fast rc=%d", rc
);
869 kfree_rcu(decap_l2_node
, rcu
);
873 static void bnxt_tc_put_decap_handle(struct bnxt
*bp
,
874 struct bnxt_tc_flow_node
*flow_node
)
876 __le32 decap_handle
= flow_node
->decap_node
->tunnel_handle
;
877 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
880 if (flow_node
->decap_l2_node
)
881 bnxt_tc_put_decap_l2_node(bp
, flow_node
);
883 rc
= bnxt_tc_put_tunnel_node(bp
, &tc_info
->decap_table
,
884 &tc_info
->decap_ht_params
,
885 flow_node
->decap_node
);
886 if (!rc
&& decap_handle
!= INVALID_TUNNEL_HANDLE
)
887 hwrm_cfa_decap_filter_free(bp
, decap_handle
);
890 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt
*bp
,
891 struct ip_tunnel_key
*tun_key
,
892 struct bnxt_tc_l2_key
*l2_info
)
895 struct net_device
*real_dst_dev
= bp
->dev
;
896 struct flowi4 flow
= { {0} };
897 struct net_device
*dst_dev
;
898 struct neighbour
*nbr
;
902 flow
.flowi4_proto
= IPPROTO_UDP
;
903 flow
.fl4_dport
= tun_key
->tp_dst
;
904 flow
.daddr
= tun_key
->u
.ipv4
.dst
;
906 rt
= ip_route_output_key(dev_net(real_dst_dev
), &flow
);
908 netdev_info(bp
->dev
, "no route to %pI4b", &flow
.daddr
);
912 /* The route must either point to the real_dst_dev or a dst_dev that
913 * uses the real_dst_dev.
915 dst_dev
= rt
->dst
.dev
;
916 if (is_vlan_dev(dst_dev
)) {
917 #if IS_ENABLED(CONFIG_VLAN_8021Q)
918 struct vlan_dev_priv
*vlan
= vlan_dev_priv(dst_dev
);
920 if (vlan
->real_dev
!= real_dst_dev
) {
922 "dst_dev(%s) doesn't use PF-if(%s)",
923 netdev_name(dst_dev
),
924 netdev_name(real_dst_dev
));
928 l2_info
->inner_vlan_tci
= htons(vlan
->vlan_id
);
929 l2_info
->inner_vlan_tpid
= vlan
->vlan_proto
;
930 l2_info
->num_vlans
= 1;
932 } else if (dst_dev
!= real_dst_dev
) {
934 "dst_dev(%s) for %pI4b is not PF-if(%s)",
935 netdev_name(dst_dev
), &flow
.daddr
,
936 netdev_name(real_dst_dev
));
941 nbr
= dst_neigh_lookup(&rt
->dst
, &flow
.daddr
);
943 netdev_info(bp
->dev
, "can't lookup neighbor for %pI4b",
949 tun_key
->u
.ipv4
.src
= flow
.saddr
;
950 tun_key
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
951 neigh_ha_snapshot(l2_info
->dmac
, nbr
, dst_dev
);
952 ether_addr_copy(l2_info
->smac
, dst_dev
->dev_addr
);
965 static int bnxt_tc_get_decap_handle(struct bnxt
*bp
, struct bnxt_tc_flow
*flow
,
966 struct bnxt_tc_flow_node
*flow_node
,
967 __le32
*decap_filter_handle
)
969 struct ip_tunnel_key
*decap_key
= &flow
->tun_key
;
970 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
971 struct bnxt_tc_l2_key l2_info
= { {0} };
972 struct bnxt_tc_tunnel_node
*decap_node
;
973 struct ip_tunnel_key tun_key
= { 0 };
974 struct bnxt_tc_l2_key
*decap_l2_info
;
975 __le32 ref_decap_handle
;
978 /* Check if there's another flow using the same tunnel decap.
979 * If not, add this tunnel to the table and resolve the other
980 * tunnel header fileds
982 decap_node
= bnxt_tc_get_tunnel_node(bp
, &tc_info
->decap_table
,
983 &tc_info
->decap_ht_params
,
988 flow_node
->decap_node
= decap_node
;
990 if (decap_node
->tunnel_handle
!= INVALID_TUNNEL_HANDLE
)
993 /* Resolve the L2 fields for tunnel decap
994 * Resolve the route for remote vtep (saddr) of the decap key
995 * Find it's next-hop mac addrs
997 tun_key
.u
.ipv4
.dst
= flow
->tun_key
.u
.ipv4
.src
;
998 tun_key
.tp_dst
= flow
->tun_key
.tp_dst
;
999 rc
= bnxt_tc_resolve_tunnel_hdrs(bp
, &tun_key
, &l2_info
);
1003 decap_l2_info
= &decap_node
->l2_info
;
1004 /* decap smac is wildcarded */
1005 ether_addr_copy(decap_l2_info
->dmac
, l2_info
.smac
);
1006 if (l2_info
.num_vlans
) {
1007 decap_l2_info
->num_vlans
= l2_info
.num_vlans
;
1008 decap_l2_info
->inner_vlan_tpid
= l2_info
.inner_vlan_tpid
;
1009 decap_l2_info
->inner_vlan_tci
= l2_info
.inner_vlan_tci
;
1011 flow
->flags
|= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS
;
1013 /* For getting a decap_filter_handle we first need to check if
1014 * there are any other decap flows that share the same tunnel L2
1015 * key and if so, pass that flow's decap_filter_handle as the
1016 * ref_decap_handle for this flow.
1018 rc
= bnxt_tc_get_ref_decap_handle(bp
, flow
, decap_l2_info
, flow_node
,
1023 /* Issue the hwrm cmd to allocate a decap filter handle */
1024 rc
= hwrm_cfa_decap_filter_alloc(bp
, flow
, decap_l2_info
,
1026 &decap_node
->tunnel_handle
);
1031 *decap_filter_handle
= decap_node
->tunnel_handle
;
1035 bnxt_tc_put_decap_l2_node(bp
, flow_node
);
1037 bnxt_tc_put_tunnel_node(bp
, &tc_info
->decap_table
,
1038 &tc_info
->decap_ht_params
,
1039 flow_node
->decap_node
);
1043 static void bnxt_tc_put_encap_handle(struct bnxt
*bp
,
1044 struct bnxt_tc_tunnel_node
*encap_node
)
1046 __le32 encap_handle
= encap_node
->tunnel_handle
;
1047 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1050 rc
= bnxt_tc_put_tunnel_node(bp
, &tc_info
->encap_table
,
1051 &tc_info
->encap_ht_params
, encap_node
);
1052 if (!rc
&& encap_handle
!= INVALID_TUNNEL_HANDLE
)
1053 hwrm_cfa_encap_record_free(bp
, encap_handle
);
1056 /* Lookup the tunnel encap table and check if there's an encap_handle
1058 * If not, query L2 info via a route lookup and issue an encap_record_alloc
1061 static int bnxt_tc_get_encap_handle(struct bnxt
*bp
, struct bnxt_tc_flow
*flow
,
1062 struct bnxt_tc_flow_node
*flow_node
,
1063 __le32
*encap_handle
)
1065 struct ip_tunnel_key
*encap_key
= &flow
->actions
.tun_encap_key
;
1066 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1067 struct bnxt_tc_tunnel_node
*encap_node
;
1070 /* Check if there's another flow using the same tunnel encap.
1071 * If not, add this tunnel to the table and resolve the other
1072 * tunnel header fileds
1074 encap_node
= bnxt_tc_get_tunnel_node(bp
, &tc_info
->encap_table
,
1075 &tc_info
->encap_ht_params
,
1080 flow_node
->encap_node
= encap_node
;
1082 if (encap_node
->tunnel_handle
!= INVALID_TUNNEL_HANDLE
)
1085 rc
= bnxt_tc_resolve_tunnel_hdrs(bp
, encap_key
, &encap_node
->l2_info
);
1089 /* Allocate a new tunnel encap record */
1090 rc
= hwrm_cfa_encap_record_alloc(bp
, encap_key
, &encap_node
->l2_info
,
1091 &encap_node
->tunnel_handle
);
1096 *encap_handle
= encap_node
->tunnel_handle
;
1100 bnxt_tc_put_tunnel_node(bp
, &tc_info
->encap_table
,
1101 &tc_info
->encap_ht_params
, encap_node
);
1105 static void bnxt_tc_put_tunnel_handle(struct bnxt
*bp
,
1106 struct bnxt_tc_flow
*flow
,
1107 struct bnxt_tc_flow_node
*flow_node
)
1109 if (flow
->actions
.flags
& BNXT_TC_ACTION_FLAG_TUNNEL_DECAP
)
1110 bnxt_tc_put_decap_handle(bp
, flow_node
);
1111 else if (flow
->actions
.flags
& BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP
)
1112 bnxt_tc_put_encap_handle(bp
, flow_node
->encap_node
);
1115 static int bnxt_tc_get_tunnel_handle(struct bnxt
*bp
,
1116 struct bnxt_tc_flow
*flow
,
1117 struct bnxt_tc_flow_node
*flow_node
,
1118 __le32
*tunnel_handle
)
1120 if (flow
->actions
.flags
& BNXT_TC_ACTION_FLAG_TUNNEL_DECAP
)
1121 return bnxt_tc_get_decap_handle(bp
, flow
, flow_node
,
1123 else if (flow
->actions
.flags
& BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP
)
1124 return bnxt_tc_get_encap_handle(bp
, flow
, flow_node
,
1129 static int __bnxt_tc_del_flow(struct bnxt
*bp
,
1130 struct bnxt_tc_flow_node
*flow_node
)
1132 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1135 /* send HWRM cmd to free the flow-id */
1136 bnxt_hwrm_cfa_flow_free(bp
, flow_node
->flow_handle
);
1138 mutex_lock(&tc_info
->lock
);
1140 /* release references to any tunnel encap/decap nodes */
1141 bnxt_tc_put_tunnel_handle(bp
, &flow_node
->flow
, flow_node
);
1143 /* release reference to l2 node */
1144 bnxt_tc_put_l2_node(bp
, flow_node
);
1146 mutex_unlock(&tc_info
->lock
);
1148 rc
= rhashtable_remove_fast(&tc_info
->flow_table
, &flow_node
->node
,
1149 tc_info
->flow_ht_params
);
1151 netdev_err(bp
->dev
, "Error: %s: rhashtable_remove_fast rc=%d",
1154 kfree_rcu(flow_node
, rcu
);
1158 static void bnxt_tc_set_src_fid(struct bnxt
*bp
, struct bnxt_tc_flow
*flow
,
1161 if (flow
->actions
.flags
& BNXT_TC_ACTION_FLAG_TUNNEL_DECAP
)
1162 flow
->src_fid
= bp
->pf
.fw_fid
;
1164 flow
->src_fid
= src_fid
;
1167 /* Add a new flow or replace an existing flow.
1169 * There are essentially two critical sections here.
1170 * 1. while adding a new flow
1172 * b) issue HWRM cmd and get flow_handle
1173 * c) link l2-key with flow
1174 * 2. while deleting a flow
1175 * a) unlinking l2-key from flow
1176 * A lock is needed to protect these two critical sections.
1178 * The hash-tables are already protected by the rhashtable API.
1180 static int bnxt_tc_add_flow(struct bnxt
*bp
, u16 src_fid
,
1181 struct tc_cls_flower_offload
*tc_flow_cmd
)
1183 struct bnxt_tc_flow_node
*new_node
, *old_node
;
1184 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1185 struct bnxt_tc_flow
*flow
;
1186 __le32 tunnel_handle
= 0;
1187 __le16 ref_flow_handle
;
1190 /* allocate memory for the new flow and it's node */
1191 new_node
= kzalloc(sizeof(*new_node
), GFP_KERNEL
);
1196 new_node
->cookie
= tc_flow_cmd
->cookie
;
1197 flow
= &new_node
->flow
;
1199 rc
= bnxt_tc_parse_flow(bp
, tc_flow_cmd
, flow
);
1203 bnxt_tc_set_src_fid(bp
, flow
, src_fid
);
1205 if (!bnxt_tc_can_offload(bp
, flow
)) {
1210 /* If a flow exists with the same cookie, delete it */
1211 old_node
= rhashtable_lookup_fast(&tc_info
->flow_table
,
1212 &tc_flow_cmd
->cookie
,
1213 tc_info
->flow_ht_params
);
1215 __bnxt_tc_del_flow(bp
, old_node
);
1217 /* Check if the L2 part of the flow has been offloaded already.
1218 * If so, bump up it's refcnt and get it's reference handle.
1220 mutex_lock(&tc_info
->lock
);
1221 rc
= bnxt_tc_get_ref_flow_handle(bp
, flow
, new_node
, &ref_flow_handle
);
1225 /* If the flow involves tunnel encap/decap, get tunnel_handle */
1226 rc
= bnxt_tc_get_tunnel_handle(bp
, flow
, new_node
, &tunnel_handle
);
1230 /* send HWRM cmd to alloc the flow */
1231 rc
= bnxt_hwrm_cfa_flow_alloc(bp
, flow
, ref_flow_handle
,
1232 tunnel_handle
, &new_node
->flow_handle
);
1236 flow
->lastused
= jiffies
;
1237 spin_lock_init(&flow
->stats_lock
);
1238 /* add new flow to flow-table */
1239 rc
= rhashtable_insert_fast(&tc_info
->flow_table
, &new_node
->node
,
1240 tc_info
->flow_ht_params
);
1242 goto hwrm_flow_free
;
1244 mutex_unlock(&tc_info
->lock
);
1248 bnxt_hwrm_cfa_flow_free(bp
, new_node
->flow_handle
);
1250 bnxt_tc_put_tunnel_handle(bp
, flow
, new_node
);
1252 bnxt_tc_put_l2_node(bp
, new_node
);
1254 mutex_unlock(&tc_info
->lock
);
1256 kfree_rcu(new_node
, rcu
);
1258 netdev_err(bp
->dev
, "Error: %s: cookie=0x%lx error=%d",
1259 __func__
, tc_flow_cmd
->cookie
, rc
);
1263 static int bnxt_tc_del_flow(struct bnxt
*bp
,
1264 struct tc_cls_flower_offload
*tc_flow_cmd
)
1266 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1267 struct bnxt_tc_flow_node
*flow_node
;
1269 flow_node
= rhashtable_lookup_fast(&tc_info
->flow_table
,
1270 &tc_flow_cmd
->cookie
,
1271 tc_info
->flow_ht_params
);
1273 netdev_info(bp
->dev
, "ERROR: no flow_node for cookie %lx",
1274 tc_flow_cmd
->cookie
);
1278 return __bnxt_tc_del_flow(bp
, flow_node
);
1281 static int bnxt_tc_get_flow_stats(struct bnxt
*bp
,
1282 struct tc_cls_flower_offload
*tc_flow_cmd
)
1284 struct bnxt_tc_flow_stats stats
, *curr_stats
, *prev_stats
;
1285 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1286 struct bnxt_tc_flow_node
*flow_node
;
1287 struct bnxt_tc_flow
*flow
;
1288 unsigned long lastused
;
1290 flow_node
= rhashtable_lookup_fast(&tc_info
->flow_table
,
1291 &tc_flow_cmd
->cookie
,
1292 tc_info
->flow_ht_params
);
1294 netdev_info(bp
->dev
, "Error: no flow_node for cookie %lx",
1295 tc_flow_cmd
->cookie
);
1299 flow
= &flow_node
->flow
;
1300 curr_stats
= &flow
->stats
;
1301 prev_stats
= &flow
->prev_stats
;
1303 spin_lock(&flow
->stats_lock
);
1304 stats
.packets
= curr_stats
->packets
- prev_stats
->packets
;
1305 stats
.bytes
= curr_stats
->bytes
- prev_stats
->bytes
;
1306 *prev_stats
= *curr_stats
;
1307 lastused
= flow
->lastused
;
1308 spin_unlock(&flow
->stats_lock
);
1310 tcf_exts_stats_update(tc_flow_cmd
->exts
, stats
.bytes
, stats
.packets
,
1316 bnxt_hwrm_cfa_flow_stats_get(struct bnxt
*bp
, int num_flows
,
1317 struct bnxt_tc_stats_batch stats_batch
[])
1319 struct hwrm_cfa_flow_stats_output
*resp
= bp
->hwrm_cmd_resp_addr
;
1320 struct hwrm_cfa_flow_stats_input req
= { 0 };
1321 __le16
*req_flow_handles
= &req
.flow_handle_0
;
1324 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_FLOW_STATS
, -1, -1);
1325 req
.num_flows
= cpu_to_le16(num_flows
);
1326 for (i
= 0; i
< num_flows
; i
++) {
1327 struct bnxt_tc_flow_node
*flow_node
= stats_batch
[i
].flow_node
;
1329 req_flow_handles
[i
] = flow_node
->flow_handle
;
1332 mutex_lock(&bp
->hwrm_cmd_lock
);
1333 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
1335 __le64
*resp_packets
= &resp
->packet_0
;
1336 __le64
*resp_bytes
= &resp
->byte_0
;
1338 for (i
= 0; i
< num_flows
; i
++) {
1339 stats_batch
[i
].hw_stats
.packets
=
1340 le64_to_cpu(resp_packets
[i
]);
1341 stats_batch
[i
].hw_stats
.bytes
=
1342 le64_to_cpu(resp_bytes
[i
]);
1345 netdev_info(bp
->dev
, "error rc=%d", rc
);
1348 mutex_unlock(&bp
->hwrm_cmd_lock
);
1352 /* Add val to accum while handling a possible wraparound
1353 * of val. Eventhough val is of type u64, its actual width
1354 * is denoted by mask and will wrap-around beyond that width.
1356 static void accumulate_val(u64
*accum
, u64 val
, u64 mask
)
1358 #define low_bits(x, mask) ((x) & (mask))
1359 #define high_bits(x, mask) ((x) & ~(mask))
1360 bool wrapped
= val
< low_bits(*accum
, mask
);
1362 *accum
= high_bits(*accum
, mask
) + val
;
1364 *accum
+= (mask
+ 1);
1367 /* The HW counters' width is much less than 64bits.
1368 * Handle possible wrap-around while updating the stat counters
1370 static void bnxt_flow_stats_accum(struct bnxt_tc_info
*tc_info
,
1371 struct bnxt_tc_flow_stats
*acc_stats
,
1372 struct bnxt_tc_flow_stats
*hw_stats
)
1374 accumulate_val(&acc_stats
->bytes
, hw_stats
->bytes
, tc_info
->bytes_mask
);
1375 accumulate_val(&acc_stats
->packets
, hw_stats
->packets
,
1376 tc_info
->packets_mask
);
1380 bnxt_tc_flow_stats_batch_update(struct bnxt
*bp
, int num_flows
,
1381 struct bnxt_tc_stats_batch stats_batch
[])
1383 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1386 rc
= bnxt_hwrm_cfa_flow_stats_get(bp
, num_flows
, stats_batch
);
1390 for (i
= 0; i
< num_flows
; i
++) {
1391 struct bnxt_tc_flow_node
*flow_node
= stats_batch
[i
].flow_node
;
1392 struct bnxt_tc_flow
*flow
= &flow_node
->flow
;
1394 spin_lock(&flow
->stats_lock
);
1395 bnxt_flow_stats_accum(tc_info
, &flow
->stats
,
1396 &stats_batch
[i
].hw_stats
);
1397 if (flow
->stats
.packets
!= flow
->prev_stats
.packets
)
1398 flow
->lastused
= jiffies
;
1399 spin_unlock(&flow
->stats_lock
);
1406 bnxt_tc_flow_stats_batch_prep(struct bnxt
*bp
,
1407 struct bnxt_tc_stats_batch stats_batch
[],
1410 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1411 struct rhashtable_iter
*iter
= &tc_info
->iter
;
1415 rhashtable_walk_start(iter
);
1418 for (i
= 0; i
< BNXT_FLOW_STATS_BATCH_MAX
; i
++) {
1419 flow_node
= rhashtable_walk_next(iter
);
1420 if (IS_ERR(flow_node
)) {
1422 if (PTR_ERR(flow_node
) == -EAGAIN
) {
1425 rc
= PTR_ERR(flow_node
);
1434 stats_batch
[i
].flow_node
= flow_node
;
1437 rhashtable_walk_stop(iter
);
1442 void bnxt_tc_flow_stats_work(struct bnxt
*bp
)
1444 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1447 num_flows
= atomic_read(&tc_info
->flow_table
.nelems
);
1451 rhashtable_walk_enter(&tc_info
->flow_table
, &tc_info
->iter
);
1454 rc
= bnxt_tc_flow_stats_batch_prep(bp
, tc_info
->stats_batch
,
1465 bnxt_tc_flow_stats_batch_update(bp
, num_flows
,
1466 tc_info
->stats_batch
);
1469 rhashtable_walk_exit(&tc_info
->iter
);
1472 int bnxt_tc_setup_flower(struct bnxt
*bp
, u16 src_fid
,
1473 struct tc_cls_flower_offload
*cls_flower
)
1477 switch (cls_flower
->command
) {
1478 case TC_CLSFLOWER_REPLACE
:
1479 rc
= bnxt_tc_add_flow(bp
, src_fid
, cls_flower
);
1482 case TC_CLSFLOWER_DESTROY
:
1483 rc
= bnxt_tc_del_flow(bp
, cls_flower
);
1486 case TC_CLSFLOWER_STATS
:
1487 rc
= bnxt_tc_get_flow_stats(bp
, cls_flower
);
1493 static const struct rhashtable_params bnxt_tc_flow_ht_params
= {
1494 .head_offset
= offsetof(struct bnxt_tc_flow_node
, node
),
1495 .key_offset
= offsetof(struct bnxt_tc_flow_node
, cookie
),
1496 .key_len
= sizeof(((struct bnxt_tc_flow_node
*)0)->cookie
),
1497 .automatic_shrinking
= true
1500 static const struct rhashtable_params bnxt_tc_l2_ht_params
= {
1501 .head_offset
= offsetof(struct bnxt_tc_l2_node
, node
),
1502 .key_offset
= offsetof(struct bnxt_tc_l2_node
, key
),
1503 .key_len
= BNXT_TC_L2_KEY_LEN
,
1504 .automatic_shrinking
= true
1507 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params
= {
1508 .head_offset
= offsetof(struct bnxt_tc_l2_node
, node
),
1509 .key_offset
= offsetof(struct bnxt_tc_l2_node
, key
),
1510 .key_len
= BNXT_TC_L2_KEY_LEN
,
1511 .automatic_shrinking
= true
1514 static const struct rhashtable_params bnxt_tc_tunnel_ht_params
= {
1515 .head_offset
= offsetof(struct bnxt_tc_tunnel_node
, node
),
1516 .key_offset
= offsetof(struct bnxt_tc_tunnel_node
, key
),
1517 .key_len
= sizeof(struct ip_tunnel_key
),
1518 .automatic_shrinking
= true
1521 /* convert counter width in bits to a mask */
1522 #define mask(width) ((u64)~0 >> (64 - (width)))
1524 int bnxt_init_tc(struct bnxt
*bp
)
1526 struct bnxt_tc_info
*tc_info
;
1529 if (bp
->hwrm_spec_code
< 0x10803) {
1530 netdev_warn(bp
->dev
,
1531 "Firmware does not support TC flower offload.\n");
1535 tc_info
= kzalloc(sizeof(*tc_info
), GFP_KERNEL
);
1538 mutex_init(&tc_info
->lock
);
1540 /* Counter widths are programmed by FW */
1541 tc_info
->bytes_mask
= mask(36);
1542 tc_info
->packets_mask
= mask(28);
1544 tc_info
->flow_ht_params
= bnxt_tc_flow_ht_params
;
1545 rc
= rhashtable_init(&tc_info
->flow_table
, &tc_info
->flow_ht_params
);
1549 tc_info
->l2_ht_params
= bnxt_tc_l2_ht_params
;
1550 rc
= rhashtable_init(&tc_info
->l2_table
, &tc_info
->l2_ht_params
);
1552 goto destroy_flow_table
;
1554 tc_info
->decap_l2_ht_params
= bnxt_tc_decap_l2_ht_params
;
1555 rc
= rhashtable_init(&tc_info
->decap_l2_table
,
1556 &tc_info
->decap_l2_ht_params
);
1558 goto destroy_l2_table
;
1560 tc_info
->decap_ht_params
= bnxt_tc_tunnel_ht_params
;
1561 rc
= rhashtable_init(&tc_info
->decap_table
,
1562 &tc_info
->decap_ht_params
);
1564 goto destroy_decap_l2_table
;
1566 tc_info
->encap_ht_params
= bnxt_tc_tunnel_ht_params
;
1567 rc
= rhashtable_init(&tc_info
->encap_table
,
1568 &tc_info
->encap_ht_params
);
1570 goto destroy_decap_table
;
1572 tc_info
->enabled
= true;
1573 bp
->dev
->hw_features
|= NETIF_F_HW_TC
;
1574 bp
->dev
->features
|= NETIF_F_HW_TC
;
1575 bp
->tc_info
= tc_info
;
1578 destroy_decap_table
:
1579 rhashtable_destroy(&tc_info
->decap_table
);
1580 destroy_decap_l2_table
:
1581 rhashtable_destroy(&tc_info
->decap_l2_table
);
1583 rhashtable_destroy(&tc_info
->l2_table
);
1585 rhashtable_destroy(&tc_info
->flow_table
);
1591 void bnxt_shutdown_tc(struct bnxt
*bp
)
1593 struct bnxt_tc_info
*tc_info
= bp
->tc_info
;
1595 if (!bnxt_tc_flower_enabled(bp
))
1598 rhashtable_destroy(&tc_info
->flow_table
);
1599 rhashtable_destroy(&tc_info
->l2_table
);
1600 rhashtable_destroy(&tc_info
->decap_l2_table
);
1601 rhashtable_destroy(&tc_info
->decap_table
);
1602 rhashtable_destroy(&tc_info
->encap_table
);