1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/if_ether.h>
4 #include <linux/if_link.h>
5 #include <linux/netdevice.h>
7 #include <linux/types.h>
8 #include <linux/skbuff.h>
9 #include <net/flow_dissector.h>
11 #include "enic_clsf.h"
13 /* enic_addfltr_5t - Add ipv4 5tuple filter
14 * @enic: enic struct of vnic
15 * @keys: flow_keys of ipv4 5tuple
16 * @rq: rq number to steer to
18 * This function returns filter_id(hardware_id) of the filter
19 * added. In case of error it returns a negative number.
21 int enic_addfltr_5t(struct enic
*enic
, struct flow_keys
*keys
, u16 rq
)
26 switch (keys
->basic
.ip_proto
) {
28 data
.u
.ipv4
.protocol
= PROTO_TCP
;
31 data
.u
.ipv4
.protocol
= PROTO_UDP
;
34 return -EPROTONOSUPPORT
;
37 data
.type
= FILTER_IPV4_5TUPLE
;
38 data
.u
.ipv4
.src_addr
= ntohl(keys
->addrs
.v4addrs
.src
);
39 data
.u
.ipv4
.dst_addr
= ntohl(keys
->addrs
.v4addrs
.dst
);
40 data
.u
.ipv4
.src_port
= ntohs(keys
->ports
.src
);
41 data
.u
.ipv4
.dst_port
= ntohs(keys
->ports
.dst
);
42 data
.u
.ipv4
.flags
= FILTER_FIELDS_IPV4_5TUPLE
;
44 spin_lock_bh(&enic
->devcmd_lock
);
45 res
= vnic_dev_classifier(enic
->vdev
, CLSF_ADD
, &rq
, &data
);
46 spin_unlock_bh(&enic
->devcmd_lock
);
47 res
= (res
== 0) ? rq
: res
;
52 /* enic_delfltr - Delete clsf filter
53 * @enic: enic struct of vnic
54 * @filter_id: filter_is(hardware_id) of filter to be deleted
56 * This function returns zero in case of success, negative number incase of
59 int enic_delfltr(struct enic
*enic
, u16 filter_id
)
63 spin_lock_bh(&enic
->devcmd_lock
);
64 ret
= vnic_dev_classifier(enic
->vdev
, CLSF_DEL
, &filter_id
, NULL
);
65 spin_unlock_bh(&enic
->devcmd_lock
);
70 /* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
73 void enic_rfs_flw_tbl_init(struct enic
*enic
)
77 spin_lock_init(&enic
->rfs_h
.lock
);
78 for (i
= 0; i
<= ENIC_RFS_FLW_MASK
; i
++)
79 INIT_HLIST_HEAD(&enic
->rfs_h
.ht_head
[i
]);
80 enic
->rfs_h
.max
= enic
->config
.num_arfs
;
81 enic
->rfs_h
.free
= enic
->rfs_h
.max
;
82 enic
->rfs_h
.toclean
= 0;
85 void enic_rfs_flw_tbl_free(struct enic
*enic
)
89 enic_rfs_timer_stop(enic
);
90 spin_lock_bh(&enic
->rfs_h
.lock
);
91 for (i
= 0; i
< (1 << ENIC_RFS_FLW_BITSHIFT
); i
++) {
92 struct hlist_head
*hhead
;
93 struct hlist_node
*tmp
;
94 struct enic_rfs_fltr_node
*n
;
96 hhead
= &enic
->rfs_h
.ht_head
[i
];
97 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
) {
98 enic_delfltr(enic
, n
->fltr_id
);
104 spin_unlock_bh(&enic
->rfs_h
.lock
);
107 struct enic_rfs_fltr_node
*htbl_fltr_search(struct enic
*enic
, u16 fltr_id
)
111 for (i
= 0; i
< (1 << ENIC_RFS_FLW_BITSHIFT
); i
++) {
112 struct hlist_head
*hhead
;
113 struct hlist_node
*tmp
;
114 struct enic_rfs_fltr_node
*n
;
116 hhead
= &enic
->rfs_h
.ht_head
[i
];
117 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
)
118 if (n
->fltr_id
== fltr_id
)
125 #ifdef CONFIG_RFS_ACCEL
126 void enic_flow_may_expire(struct timer_list
*t
)
128 struct enic
*enic
= from_timer(enic
, t
, rfs_h
.rfs_may_expire
);
132 spin_lock_bh(&enic
->rfs_h
.lock
);
133 for (j
= 0; j
< ENIC_CLSF_EXPIRE_COUNT
; j
++) {
134 struct hlist_head
*hhead
;
135 struct hlist_node
*tmp
;
136 struct enic_rfs_fltr_node
*n
;
138 hhead
= &enic
->rfs_h
.ht_head
[enic
->rfs_h
.toclean
++];
139 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
) {
140 res
= rps_may_expire_flow(enic
->netdev
, n
->rq_id
,
141 n
->flow_id
, n
->fltr_id
);
143 res
= enic_delfltr(enic
, n
->fltr_id
);
152 spin_unlock_bh(&enic
->rfs_h
.lock
);
153 mod_timer(&enic
->rfs_h
.rfs_may_expire
, jiffies
+ HZ
/4);
156 static struct enic_rfs_fltr_node
*htbl_key_search(struct hlist_head
*h
,
159 struct enic_rfs_fltr_node
*tpos
;
161 hlist_for_each_entry(tpos
, h
, node
)
162 if (tpos
->keys
.addrs
.v4addrs
.src
== k
->addrs
.v4addrs
.src
&&
163 tpos
->keys
.addrs
.v4addrs
.dst
== k
->addrs
.v4addrs
.dst
&&
164 tpos
->keys
.ports
.ports
== k
->ports
.ports
&&
165 tpos
->keys
.basic
.ip_proto
== k
->basic
.ip_proto
&&
166 tpos
->keys
.basic
.n_proto
== k
->basic
.n_proto
)
171 int enic_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
172 u16 rxq_index
, u32 flow_id
)
174 struct flow_keys keys
;
175 struct enic_rfs_fltr_node
*n
;
180 enic
= netdev_priv(dev
);
181 res
= skb_flow_dissect_flow_keys(skb
, &keys
, 0);
182 if (!res
|| keys
.basic
.n_proto
!= htons(ETH_P_IP
) ||
183 (keys
.basic
.ip_proto
!= IPPROTO_TCP
&&
184 keys
.basic
.ip_proto
!= IPPROTO_UDP
))
185 return -EPROTONOSUPPORT
;
187 tbl_idx
= skb_get_hash_raw(skb
) & ENIC_RFS_FLW_MASK
;
188 spin_lock_bh(&enic
->rfs_h
.lock
);
189 n
= htbl_key_search(&enic
->rfs_h
.ht_head
[tbl_idx
], &keys
);
191 if (n
) { /* entry already present */
192 if (rxq_index
== n
->rq_id
) {
197 /* desired rq changed for the flow, we need to delete
198 * old fltr and add new one
200 * The moment we delete the fltr, the upcoming pkts
201 * are put it default rq based on rss. When we add
202 * new filter, upcoming pkts are put in desired queue.
203 * This could cause ooo pkts.
205 * Lets 1st try adding new fltr and then del old one.
207 i
= --enic
->rfs_h
.free
;
208 /* clsf tbl is full, we have to del old fltr first*/
209 if (unlikely(i
< 0)) {
211 res
= enic_delfltr(enic
, n
->fltr_id
);
212 if (unlikely(res
< 0))
214 res
= enic_addfltr_5t(enic
, &keys
, rxq_index
);
220 /* add new fltr 1st then del old fltr */
224 res
= enic_addfltr_5t(enic
, &keys
, rxq_index
);
229 ret
= enic_delfltr(enic
, n
->fltr_id
);
230 /* deleting old fltr failed. Add old fltr to list.
231 * enic_flow_may_expire() will try to delete it later.
233 if (unlikely(ret
< 0)) {
234 struct enic_rfs_fltr_node
*d
;
235 struct hlist_head
*head
;
237 head
= &enic
->rfs_h
.ht_head
[tbl_idx
];
238 d
= kmalloc(sizeof(*d
), GFP_ATOMIC
);
240 d
->fltr_id
= n
->fltr_id
;
241 INIT_HLIST_NODE(&d
->node
);
242 hlist_add_head(&d
->node
, head
);
248 n
->rq_id
= rxq_index
;
250 n
->flow_id
= flow_id
;
251 /* entry not present */
253 i
= --enic
->rfs_h
.free
;
260 n
= kmalloc(sizeof(*n
), GFP_ATOMIC
);
267 res
= enic_addfltr_5t(enic
, &keys
, rxq_index
);
273 n
->rq_id
= rxq_index
;
275 n
->flow_id
= flow_id
;
277 INIT_HLIST_NODE(&n
->node
);
278 hlist_add_head(&n
->node
, &enic
->rfs_h
.ht_head
[tbl_idx
]);
282 spin_unlock_bh(&enic
->rfs_h
.lock
);
286 #endif /* CONFIG_RFS_ACCEL */