2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_mirred.h>
39 #include "cxgb4_filter.h"
40 #include "cxgb4_tc_u32_parse.h"
41 #include "cxgb4_tc_u32.h"
43 /* Fill ch_filter_specification with parsed match value/mask pair. */
44 static int fill_match_fields(struct adapter
*adap
,
45 struct ch_filter_specification
*fs
,
46 struct tc_cls_u32_offload
*cls
,
47 const struct cxgb4_match_field
*entry
,
55 for (i
= 0; i
< cls
->knode
.sel
->nkeys
; i
++) {
56 off
= cls
->knode
.sel
->keys
[i
].off
;
57 val
= cls
->knode
.sel
->keys
[i
].val
;
58 mask
= cls
->knode
.sel
->keys
[i
].mask
;
61 /* For next headers, parse only keys with offmask */
62 if (!cls
->knode
.sel
->keys
[i
].offmask
)
65 /* For the remaining, parse only keys without offmask */
66 if (cls
->knode
.sel
->keys
[i
].offmask
)
72 for (j
= 0; entry
[j
].val
; j
++) {
73 if (off
== entry
[j
].off
) {
75 err
= entry
[j
].val(fs
, val
, mask
);
89 /* Fill ch_filter_specification with parsed action. */
90 static int fill_action_fields(struct adapter
*adap
,
91 struct ch_filter_specification
*fs
,
92 struct tc_cls_u32_offload
*cls
)
94 unsigned int num_actions
= 0;
95 const struct tc_action
*a
;
96 struct tcf_exts
*exts
;
99 exts
= cls
->knode
.exts
;
100 if (!tcf_exts_has_actions(exts
))
103 tcf_exts_for_each_action(i
, a
, exts
) {
104 /* Don't allow more than one action per rule. */
108 /* Drop in hardware. */
109 if (is_tcf_gact_shot(a
)) {
110 fs
->action
= FILTER_DROP
;
115 /* Re-direct to specified port in hardware. */
116 if (is_tcf_mirred_egress_redirect(a
)) {
117 struct net_device
*n_dev
, *target_dev
;
121 target_dev
= tcf_mirred_dev(a
);
122 for_each_port(adap
, i
) {
123 n_dev
= adap
->port
[i
];
124 if (target_dev
== n_dev
) {
125 fs
->action
= FILTER_SWITCH
;
132 /* Interface doesn't belong to any port of
133 * the underlying hardware.
142 /* Un-supported action. */
149 int cxgb4_config_knode(struct net_device
*dev
, struct tc_cls_u32_offload
*cls
)
151 const struct cxgb4_match_field
*start
, *link_start
= NULL
;
152 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
153 struct adapter
*adapter
= netdev2adap(dev
);
154 __be16 protocol
= cls
->common
.protocol
;
155 struct ch_filter_specification fs
;
156 struct cxgb4_tc_u32_table
*t
;
157 struct cxgb4_link
*link
;
158 u32 uhtid
, link_uhtid
;
159 bool is_ipv6
= false;
164 if (!can_tc_u32_offload(dev
))
167 if (protocol
!= htons(ETH_P_IP
) && protocol
!= htons(ETH_P_IPV6
))
170 inet_family
= (protocol
== htons(ETH_P_IPV6
)) ? PF_INET6
: PF_INET
;
172 /* Get a free filter entry TID, where we can insert this new
173 * rule. Only insert rule if its prio doesn't conflict with
176 filter_id
= cxgb4_get_free_ftid(dev
, inet_family
, false,
177 TC_U32_NODE(cls
->knode
.handle
));
179 NL_SET_ERR_MSG_MOD(extack
,
180 "No free LETCAM index available");
185 uhtid
= TC_U32_USERHTID(cls
->knode
.handle
);
186 link_uhtid
= TC_U32_USERHTID(cls
->knode
.link_handle
);
188 /* Ensure that uhtid is either root u32 (i.e. 0x800)
189 * or a a valid linked bucket.
191 if (uhtid
!= 0x800 && uhtid
>= t
->size
)
194 /* Ensure link handle uhtid is sane, if specified. */
195 if (link_uhtid
>= t
->size
)
198 memset(&fs
, 0, sizeof(fs
));
200 if (filter_id
< adapter
->tids
.nhpftids
)
202 fs
.tc_prio
= cls
->common
.prio
;
203 fs
.tc_cookie
= cls
->knode
.handle
;
205 if (protocol
== htons(ETH_P_IPV6
)) {
206 start
= cxgb4_ipv6_fields
;
209 start
= cxgb4_ipv4_fields
;
213 if (uhtid
!= 0x800) {
214 /* Link must exist from root node before insertion. */
215 if (!t
->table
[uhtid
- 1].link_handle
)
218 /* Link must have a valid supported next header. */
219 link_start
= t
->table
[uhtid
- 1].match_field
;
224 /* Parse links and record them for subsequent jumps to valid
228 const struct cxgb4_next_header
*next
;
234 if (t
->table
[link_uhtid
- 1].link_handle
) {
235 dev_err(adapter
->pdev_dev
,
236 "Link handle exists for: 0x%x\n",
241 next
= is_ipv6
? cxgb4_ipv6_jumps
: cxgb4_ipv4_jumps
;
243 /* Try to find matches that allow jumps to next header. */
244 for (i
= 0; next
[i
].jump
; i
++) {
245 if (next
[i
].sel
.offoff
!= cls
->knode
.sel
->offoff
||
246 next
[i
].sel
.offshift
!= cls
->knode
.sel
->offshift
||
247 next
[i
].sel
.offmask
!= cls
->knode
.sel
->offmask
||
248 next
[i
].sel
.off
!= cls
->knode
.sel
->off
)
251 /* Found a possible candidate. Find a key that
252 * matches the corresponding offset, value, and
253 * mask to jump to next header.
255 for (j
= 0; j
< cls
->knode
.sel
->nkeys
; j
++) {
256 off
= cls
->knode
.sel
->keys
[j
].off
;
257 val
= cls
->knode
.sel
->keys
[j
].val
;
258 mask
= cls
->knode
.sel
->keys
[j
].mask
;
260 if (next
[i
].key
.off
== off
&&
261 next
[i
].key
.val
== val
&&
262 next
[i
].key
.mask
== mask
) {
269 continue; /* Try next candidate. */
271 /* Candidate to jump to next header found.
272 * Translate all keys to internal specification
273 * and store them in jump table. This spec is copied
274 * later to set the actual filters.
276 ret
= fill_match_fields(adapter
, &fs
, cls
,
281 link
= &t
->table
[link_uhtid
- 1];
282 link
->match_field
= next
[i
].jump
;
283 link
->link_handle
= cls
->knode
.handle
;
284 memcpy(&link
->fs
, &fs
, sizeof(fs
));
288 /* No candidate found to jump to next header. */
295 /* Fill ch_filter_specification match fields to be shipped to hardware.
296 * Copy the linked spec (if any) first. And then update the spec as
299 if (uhtid
!= 0x800 && t
->table
[uhtid
- 1].link_handle
) {
300 /* Copy linked ch_filter_specification */
301 memcpy(&fs
, &t
->table
[uhtid
- 1].fs
, sizeof(fs
));
302 ret
= fill_match_fields(adapter
, &fs
, cls
,
308 ret
= fill_match_fields(adapter
, &fs
, cls
, start
, false);
312 /* Fill ch_filter_specification action fields to be shipped to
315 ret
= fill_action_fields(adapter
, &fs
, cls
);
319 /* The filter spec has been completely built from the info
320 * provided from u32. We now set some default fields in the
324 /* Match only packets coming from the ingress port where this
325 * filter will be created.
327 fs
.val
.iport
= netdev2pinfo(dev
)->port_id
;
330 /* Enable filter hit counts. */
333 /* Set type of filter - IPv6 or IPv4 */
334 fs
.type
= is_ipv6
? 1 : 0;
337 ret
= cxgb4_set_filter(dev
, filter_id
, &fs
);
341 /* If this is a linked bucket, then set the corresponding
342 * entry in the bitmap to mark it as belonging to this linked
345 if (uhtid
!= 0x800 && t
->table
[uhtid
- 1].link_handle
)
346 set_bit(filter_id
, t
->table
[uhtid
- 1].tid_map
);
352 int cxgb4_delete_knode(struct net_device
*dev
, struct tc_cls_u32_offload
*cls
)
354 struct adapter
*adapter
= netdev2adap(dev
);
355 unsigned int filter_id
, max_tids
, i
, j
;
356 struct cxgb4_link
*link
= NULL
;
357 struct cxgb4_tc_u32_table
*t
;
358 struct filter_entry
*f
;
364 if (!can_tc_u32_offload(dev
))
367 /* Fetch the location to delete the filter. */
368 max_tids
= adapter
->tids
.nhpftids
+ adapter
->tids
.nftids
;
370 spin_lock_bh(&adapter
->tids
.ftid_lock
);
372 while (filter_id
< max_tids
) {
373 if (filter_id
< adapter
->tids
.nhpftids
) {
375 f
= &adapter
->tids
.hpftid_tab
[i
];
376 if (f
->valid
&& f
->fs
.tc_cookie
== cls
->knode
.handle
) {
381 i
= find_next_bit(adapter
->tids
.hpftid_bmap
,
382 adapter
->tids
.nhpftids
, i
+ 1);
383 if (i
>= adapter
->tids
.nhpftids
) {
384 filter_id
= adapter
->tids
.nhpftids
;
390 i
= filter_id
- adapter
->tids
.nhpftids
;
391 f
= &adapter
->tids
.ftid_tab
[i
];
392 if (f
->valid
&& f
->fs
.tc_cookie
== cls
->knode
.handle
) {
397 i
= find_next_bit(adapter
->tids
.ftid_bmap
,
398 adapter
->tids
.nftids
, i
+ 1);
399 if (i
>= adapter
->tids
.nftids
)
402 filter_id
= i
+ adapter
->tids
.nhpftids
;
408 if (CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <
415 spin_unlock_bh(&adapter
->tids
.ftid_lock
);
421 handle
= cls
->knode
.handle
;
422 uhtid
= TC_U32_USERHTID(cls
->knode
.handle
);
424 /* Ensure that uhtid is either root u32 (i.e. 0x800)
425 * or a a valid linked bucket.
427 if (uhtid
!= 0x800 && uhtid
>= t
->size
)
430 /* Delete the specified filter */
431 if (uhtid
!= 0x800) {
432 link
= &t
->table
[uhtid
- 1];
433 if (!link
->link_handle
)
436 if (!test_bit(filter_id
, link
->tid_map
))
440 ret
= cxgb4_del_filter(dev
, filter_id
, NULL
);
445 clear_bit(filter_id
, link
->tid_map
);
447 /* If a link is being deleted, then delete all filters
448 * associated with the link.
450 for (i
= 0; i
< t
->size
; i
++) {
453 if (link
->link_handle
== handle
) {
454 for (j
= 0; j
< max_tids
; j
++) {
455 if (!test_bit(j
, link
->tid_map
))
458 ret
= __cxgb4_del_filter(dev
, j
, NULL
, NULL
);
462 clear_bit(j
, link
->tid_map
);
465 /* Clear the link state */
466 link
->match_field
= NULL
;
467 link
->link_handle
= 0;
468 memset(&link
->fs
, 0, sizeof(link
->fs
));
477 void cxgb4_cleanup_tc_u32(struct adapter
*adap
)
479 struct cxgb4_tc_u32_table
*t
;
485 /* Free up all allocated memory. */
487 for (i
= 0; i
< t
->size
; i
++) {
488 struct cxgb4_link
*link
= &t
->table
[i
];
490 kvfree(link
->tid_map
);
492 kvfree(adap
->tc_u32
);
495 struct cxgb4_tc_u32_table
*cxgb4_init_tc_u32(struct adapter
*adap
)
497 unsigned int max_tids
= adap
->tids
.nftids
+ adap
->tids
.nhpftids
;
498 struct cxgb4_tc_u32_table
*t
;
504 t
= kvzalloc(struct_size(t
, table
, max_tids
), GFP_KERNEL
);
510 for (i
= 0; i
< t
->size
; i
++) {
511 struct cxgb4_link
*link
= &t
->table
[i
];
512 unsigned int bmap_size
;
514 bmap_size
= BITS_TO_LONGS(max_tids
);
515 link
->tid_map
= kvcalloc(bmap_size
, sizeof(unsigned long),
519 bitmap_zero(link
->tid_map
, max_tids
);
525 for (i
= 0; i
< t
->size
; i
++) {
526 struct cxgb4_link
*link
= &t
->table
[i
];
529 kvfree(link
->tid_map
);