2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_mirred.h>
39 #include "cxgb4_tc_u32_parse.h"
40 #include "cxgb4_tc_u32.h"
42 /* Fill ch_filter_specification with parsed match value/mask pair. */
43 static int fill_match_fields(struct adapter
*adap
,
44 struct ch_filter_specification
*fs
,
45 struct tc_cls_u32_offload
*cls
,
46 const struct cxgb4_match_field
*entry
,
54 for (i
= 0; i
< cls
->knode
.sel
->nkeys
; i
++) {
55 off
= cls
->knode
.sel
->keys
[i
].off
;
56 val
= cls
->knode
.sel
->keys
[i
].val
;
57 mask
= cls
->knode
.sel
->keys
[i
].mask
;
60 /* For next headers, parse only keys with offmask */
61 if (!cls
->knode
.sel
->keys
[i
].offmask
)
64 /* For the remaining, parse only keys without offmask */
65 if (cls
->knode
.sel
->keys
[i
].offmask
)
71 for (j
= 0; entry
[j
].val
; j
++) {
72 if (off
== entry
[j
].off
) {
74 err
= entry
[j
].val(fs
, val
, mask
);
88 /* Fill ch_filter_specification with parsed action. */
89 static int fill_action_fields(struct adapter
*adap
,
90 struct ch_filter_specification
*fs
,
91 struct tc_cls_u32_offload
*cls
)
93 unsigned int num_actions
= 0;
94 const struct tc_action
*a
;
95 struct tcf_exts
*exts
;
98 exts
= cls
->knode
.exts
;
99 if (!tcf_exts_has_actions(exts
))
102 tcf_exts_to_list(exts
, &actions
);
103 list_for_each_entry(a
, &actions
, list
) {
104 /* Don't allow more than one action per rule. */
108 /* Drop in hardware. */
109 if (is_tcf_gact_shot(a
)) {
110 fs
->action
= FILTER_DROP
;
115 /* Re-direct to specified port in hardware. */
116 if (is_tcf_mirred_egress_redirect(a
)) {
117 struct net_device
*n_dev
, *target_dev
;
121 target_dev
= tcf_mirred_dev(a
);
122 for_each_port(adap
, i
) {
123 n_dev
= adap
->port
[i
];
124 if (target_dev
== n_dev
) {
125 fs
->action
= FILTER_SWITCH
;
132 /* Interface doesn't belong to any port of
133 * the underlying hardware.
142 /* Un-supported action. */
149 int cxgb4_config_knode(struct net_device
*dev
, struct tc_cls_u32_offload
*cls
)
151 const struct cxgb4_match_field
*start
, *link_start
= NULL
;
152 struct adapter
*adapter
= netdev2adap(dev
);
153 __be16 protocol
= cls
->common
.protocol
;
154 struct ch_filter_specification fs
;
155 struct cxgb4_tc_u32_table
*t
;
156 struct cxgb4_link
*link
;
157 unsigned int filter_id
;
158 u32 uhtid
, link_uhtid
;
159 bool is_ipv6
= false;
162 if (!can_tc_u32_offload(dev
))
165 if (protocol
!= htons(ETH_P_IP
) && protocol
!= htons(ETH_P_IPV6
))
168 /* Fetch the location to insert the filter. */
169 filter_id
= cls
->knode
.handle
& 0xFFFFF;
171 if (filter_id
> adapter
->tids
.nftids
) {
172 dev_err(adapter
->pdev_dev
,
173 "Location %d out of range for insertion. Max: %d\n",
174 filter_id
, adapter
->tids
.nftids
);
179 uhtid
= TC_U32_USERHTID(cls
->knode
.handle
);
180 link_uhtid
= TC_U32_USERHTID(cls
->knode
.link_handle
);
182 /* Ensure that uhtid is either root u32 (i.e. 0x800)
183 * or a a valid linked bucket.
185 if (uhtid
!= 0x800 && uhtid
>= t
->size
)
188 /* Ensure link handle uhtid is sane, if specified. */
189 if (link_uhtid
>= t
->size
)
192 memset(&fs
, 0, sizeof(fs
));
194 if (protocol
== htons(ETH_P_IPV6
)) {
195 start
= cxgb4_ipv6_fields
;
198 start
= cxgb4_ipv4_fields
;
202 if (uhtid
!= 0x800) {
203 /* Link must exist from root node before insertion. */
204 if (!t
->table
[uhtid
- 1].link_handle
)
207 /* Link must have a valid supported next header. */
208 link_start
= t
->table
[uhtid
- 1].match_field
;
213 /* Parse links and record them for subsequent jumps to valid
217 const struct cxgb4_next_header
*next
;
223 if (t
->table
[link_uhtid
- 1].link_handle
) {
224 dev_err(adapter
->pdev_dev
,
225 "Link handle exists for: 0x%x\n",
230 next
= is_ipv6
? cxgb4_ipv6_jumps
: cxgb4_ipv4_jumps
;
232 /* Try to find matches that allow jumps to next header. */
233 for (i
= 0; next
[i
].jump
; i
++) {
234 if (next
[i
].offoff
!= cls
->knode
.sel
->offoff
||
235 next
[i
].shift
!= cls
->knode
.sel
->offshift
||
236 next
[i
].mask
!= cls
->knode
.sel
->offmask
||
237 next
[i
].offset
!= cls
->knode
.sel
->off
)
240 /* Found a possible candidate. Find a key that
241 * matches the corresponding offset, value, and
242 * mask to jump to next header.
244 for (j
= 0; j
< cls
->knode
.sel
->nkeys
; j
++) {
245 off
= cls
->knode
.sel
->keys
[j
].off
;
246 val
= cls
->knode
.sel
->keys
[j
].val
;
247 mask
= cls
->knode
.sel
->keys
[j
].mask
;
249 if (next
[i
].match_off
== off
&&
250 next
[i
].match_val
== val
&&
251 next
[i
].match_mask
== mask
) {
258 continue; /* Try next candidate. */
260 /* Candidate to jump to next header found.
261 * Translate all keys to internal specification
262 * and store them in jump table. This spec is copied
263 * later to set the actual filters.
265 ret
= fill_match_fields(adapter
, &fs
, cls
,
270 link
= &t
->table
[link_uhtid
- 1];
271 link
->match_field
= next
[i
].jump
;
272 link
->link_handle
= cls
->knode
.handle
;
273 memcpy(&link
->fs
, &fs
, sizeof(fs
));
277 /* No candidate found to jump to next header. */
284 /* Fill ch_filter_specification match fields to be shipped to hardware.
285 * Copy the linked spec (if any) first. And then update the spec as
288 if (uhtid
!= 0x800 && t
->table
[uhtid
- 1].link_handle
) {
289 /* Copy linked ch_filter_specification */
290 memcpy(&fs
, &t
->table
[uhtid
- 1].fs
, sizeof(fs
));
291 ret
= fill_match_fields(adapter
, &fs
, cls
,
297 ret
= fill_match_fields(adapter
, &fs
, cls
, start
, false);
301 /* Fill ch_filter_specification action fields to be shipped to
304 ret
= fill_action_fields(adapter
, &fs
, cls
);
308 /* The filter spec has been completely built from the info
309 * provided from u32. We now set some default fields in the
313 /* Match only packets coming from the ingress port where this
314 * filter will be created.
316 fs
.val
.iport
= netdev2pinfo(dev
)->port_id
;
319 /* Enable filter hit counts. */
322 /* Set type of filter - IPv6 or IPv4 */
323 fs
.type
= is_ipv6
? 1 : 0;
326 ret
= cxgb4_set_filter(dev
, filter_id
, &fs
);
330 /* If this is a linked bucket, then set the corresponding
331 * entry in the bitmap to mark it as belonging to this linked
334 if (uhtid
!= 0x800 && t
->table
[uhtid
- 1].link_handle
)
335 set_bit(filter_id
, t
->table
[uhtid
- 1].tid_map
);
341 int cxgb4_delete_knode(struct net_device
*dev
, struct tc_cls_u32_offload
*cls
)
343 struct adapter
*adapter
= netdev2adap(dev
);
344 unsigned int filter_id
, max_tids
, i
, j
;
345 struct cxgb4_link
*link
= NULL
;
346 struct cxgb4_tc_u32_table
*t
;
350 if (!can_tc_u32_offload(dev
))
353 /* Fetch the location to delete the filter. */
354 filter_id
= cls
->knode
.handle
& 0xFFFFF;
356 if (filter_id
> adapter
->tids
.nftids
) {
357 dev_err(adapter
->pdev_dev
,
358 "Location %d out of range for deletion. Max: %d\n",
359 filter_id
, adapter
->tids
.nftids
);
364 handle
= cls
->knode
.handle
;
365 uhtid
= TC_U32_USERHTID(cls
->knode
.handle
);
367 /* Ensure that uhtid is either root u32 (i.e. 0x800)
368 * or a a valid linked bucket.
370 if (uhtid
!= 0x800 && uhtid
>= t
->size
)
373 /* Delete the specified filter */
374 if (uhtid
!= 0x800) {
375 link
= &t
->table
[uhtid
- 1];
376 if (!link
->link_handle
)
379 if (!test_bit(filter_id
, link
->tid_map
))
383 ret
= cxgb4_del_filter(dev
, filter_id
, NULL
);
388 clear_bit(filter_id
, link
->tid_map
);
390 /* If a link is being deleted, then delete all filters
391 * associated with the link.
393 max_tids
= adapter
->tids
.nftids
;
394 for (i
= 0; i
< t
->size
; i
++) {
397 if (link
->link_handle
== handle
) {
398 for (j
= 0; j
< max_tids
; j
++) {
399 if (!test_bit(j
, link
->tid_map
))
402 ret
= __cxgb4_del_filter(dev
, j
, NULL
, NULL
);
406 clear_bit(j
, link
->tid_map
);
409 /* Clear the link state */
410 link
->match_field
= NULL
;
411 link
->link_handle
= 0;
412 memset(&link
->fs
, 0, sizeof(link
->fs
));
421 void cxgb4_cleanup_tc_u32(struct adapter
*adap
)
423 struct cxgb4_tc_u32_table
*t
;
429 /* Free up all allocated memory. */
431 for (i
= 0; i
< t
->size
; i
++) {
432 struct cxgb4_link
*link
= &t
->table
[i
];
434 kvfree(link
->tid_map
);
436 kvfree(adap
->tc_u32
);
439 struct cxgb4_tc_u32_table
*cxgb4_init_tc_u32(struct adapter
*adap
)
441 unsigned int max_tids
= adap
->tids
.nftids
;
442 struct cxgb4_tc_u32_table
*t
;
448 t
= kvzalloc(sizeof(*t
) +
449 (max_tids
* sizeof(struct cxgb4_link
)), GFP_KERNEL
);
455 for (i
= 0; i
< t
->size
; i
++) {
456 struct cxgb4_link
*link
= &t
->table
[i
];
457 unsigned int bmap_size
;
459 bmap_size
= BITS_TO_LONGS(max_tids
);
460 link
->tid_map
= kvzalloc(sizeof(unsigned long) * bmap_size
, GFP_KERNEL
);
463 bitmap_zero(link
->tid_map
, max_tids
);
469 for (i
= 0; i
< t
->size
; i
++) {
470 struct cxgb4_link
*link
= &t
->table
[i
];
473 kvfree(link
->tid_map
);