1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008, Intel Corporation.
5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
17 #include <net/dsfield.h>
18 #include <net/pkt_cls.h>
20 #include <linux/tc_act/tc_skbedit.h>
21 #include <net/tc_act/tc_skbedit.h>
23 static unsigned int skbedit_net_id
;
24 static struct tc_action_ops act_skbedit_ops
;
26 static int tcf_skbedit_act(struct sk_buff
*skb
, const struct tc_action
*a
,
27 struct tcf_result
*res
)
29 struct tcf_skbedit
*d
= to_skbedit(a
);
30 struct tcf_skbedit_params
*params
;
33 tcf_lastuse_update(&d
->tcf_tm
);
34 bstats_cpu_update(this_cpu_ptr(d
->common
.cpu_bstats
), skb
);
36 params
= rcu_dereference_bh(d
->params
);
37 action
= READ_ONCE(d
->tcf_action
);
39 if (params
->flags
& SKBEDIT_F_PRIORITY
)
40 skb
->priority
= params
->priority
;
41 if (params
->flags
& SKBEDIT_F_INHERITDSFIELD
) {
42 int wlen
= skb_network_offset(skb
);
44 switch (skb_protocol(skb
, true)) {
46 wlen
+= sizeof(struct iphdr
);
47 if (!pskb_may_pull(skb
, wlen
))
49 skb
->priority
= ipv4_get_dsfield(ip_hdr(skb
)) >> 2;
52 case htons(ETH_P_IPV6
):
53 wlen
+= sizeof(struct ipv6hdr
);
54 if (!pskb_may_pull(skb
, wlen
))
56 skb
->priority
= ipv6_get_dsfield(ipv6_hdr(skb
)) >> 2;
60 if (params
->flags
& SKBEDIT_F_QUEUE_MAPPING
&&
61 skb
->dev
->real_num_tx_queues
> params
->queue_mapping
)
62 skb_set_queue_mapping(skb
, params
->queue_mapping
);
63 if (params
->flags
& SKBEDIT_F_MARK
) {
64 skb
->mark
&= ~params
->mask
;
65 skb
->mark
|= params
->mark
& params
->mask
;
67 if (params
->flags
& SKBEDIT_F_PTYPE
)
68 skb
->pkt_type
= params
->ptype
;
72 qstats_drop_inc(this_cpu_ptr(d
->common
.cpu_qstats
));
76 static void tcf_skbedit_stats_update(struct tc_action
*a
, u64 bytes
,
77 u64 packets
, u64 drops
,
80 struct tcf_skbedit
*d
= to_skbedit(a
);
81 struct tcf_t
*tm
= &d
->tcf_tm
;
83 tcf_action_update_stats(a
, bytes
, packets
, drops
, hw
);
84 tm
->lastuse
= max_t(u64
, tm
->lastuse
, lastuse
);
87 static const struct nla_policy skbedit_policy
[TCA_SKBEDIT_MAX
+ 1] = {
88 [TCA_SKBEDIT_PARMS
] = { .len
= sizeof(struct tc_skbedit
) },
89 [TCA_SKBEDIT_PRIORITY
] = { .len
= sizeof(u32
) },
90 [TCA_SKBEDIT_QUEUE_MAPPING
] = { .len
= sizeof(u16
) },
91 [TCA_SKBEDIT_MARK
] = { .len
= sizeof(u32
) },
92 [TCA_SKBEDIT_PTYPE
] = { .len
= sizeof(u16
) },
93 [TCA_SKBEDIT_MASK
] = { .len
= sizeof(u32
) },
94 [TCA_SKBEDIT_FLAGS
] = { .len
= sizeof(u64
) },
97 static int tcf_skbedit_init(struct net
*net
, struct nlattr
*nla
,
98 struct nlattr
*est
, struct tc_action
**a
,
99 int ovr
, int bind
, bool rtnl_held
,
100 struct tcf_proto
*tp
, u32 act_flags
,
101 struct netlink_ext_ack
*extack
)
103 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
104 struct tcf_skbedit_params
*params_new
;
105 struct nlattr
*tb
[TCA_SKBEDIT_MAX
+ 1];
106 struct tcf_chain
*goto_ch
= NULL
;
107 struct tc_skbedit
*parm
;
108 struct tcf_skbedit
*d
;
109 u32 flags
= 0, *priority
= NULL
, *mark
= NULL
, *mask
= NULL
;
110 u16
*queue_mapping
= NULL
, *ptype
= NULL
;
118 err
= nla_parse_nested_deprecated(tb
, TCA_SKBEDIT_MAX
, nla
,
119 skbedit_policy
, NULL
);
123 if (tb
[TCA_SKBEDIT_PARMS
] == NULL
)
126 if (tb
[TCA_SKBEDIT_PRIORITY
] != NULL
) {
127 flags
|= SKBEDIT_F_PRIORITY
;
128 priority
= nla_data(tb
[TCA_SKBEDIT_PRIORITY
]);
131 if (tb
[TCA_SKBEDIT_QUEUE_MAPPING
] != NULL
) {
132 flags
|= SKBEDIT_F_QUEUE_MAPPING
;
133 queue_mapping
= nla_data(tb
[TCA_SKBEDIT_QUEUE_MAPPING
]);
136 if (tb
[TCA_SKBEDIT_PTYPE
] != NULL
) {
137 ptype
= nla_data(tb
[TCA_SKBEDIT_PTYPE
]);
138 if (!skb_pkt_type_ok(*ptype
))
140 flags
|= SKBEDIT_F_PTYPE
;
143 if (tb
[TCA_SKBEDIT_MARK
] != NULL
) {
144 flags
|= SKBEDIT_F_MARK
;
145 mark
= nla_data(tb
[TCA_SKBEDIT_MARK
]);
148 if (tb
[TCA_SKBEDIT_MASK
] != NULL
) {
149 flags
|= SKBEDIT_F_MASK
;
150 mask
= nla_data(tb
[TCA_SKBEDIT_MASK
]);
153 if (tb
[TCA_SKBEDIT_FLAGS
] != NULL
) {
154 u64
*pure_flags
= nla_data(tb
[TCA_SKBEDIT_FLAGS
]);
156 if (*pure_flags
& SKBEDIT_F_INHERITDSFIELD
)
157 flags
|= SKBEDIT_F_INHERITDSFIELD
;
160 parm
= nla_data(tb
[TCA_SKBEDIT_PARMS
]);
162 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
171 tcf_idr_release(*a
, bind
);
173 tcf_idr_cleanup(tn
, index
);
178 ret
= tcf_idr_create(tn
, index
, est
, a
,
179 &act_skbedit_ops
, bind
, true, 0);
181 tcf_idr_cleanup(tn
, index
);
190 tcf_idr_release(*a
, bind
);
194 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
198 params_new
= kzalloc(sizeof(*params_new
), GFP_KERNEL
);
199 if (unlikely(!params_new
)) {
204 params_new
->flags
= flags
;
205 if (flags
& SKBEDIT_F_PRIORITY
)
206 params_new
->priority
= *priority
;
207 if (flags
& SKBEDIT_F_QUEUE_MAPPING
)
208 params_new
->queue_mapping
= *queue_mapping
;
209 if (flags
& SKBEDIT_F_MARK
)
210 params_new
->mark
= *mark
;
211 if (flags
& SKBEDIT_F_PTYPE
)
212 params_new
->ptype
= *ptype
;
213 /* default behaviour is to use all the bits */
214 params_new
->mask
= 0xffffffff;
215 if (flags
& SKBEDIT_F_MASK
)
216 params_new
->mask
= *mask
;
218 spin_lock_bh(&d
->tcf_lock
);
219 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
220 params_new
= rcu_replace_pointer(d
->params
, params_new
,
221 lockdep_is_held(&d
->tcf_lock
));
222 spin_unlock_bh(&d
->tcf_lock
);
224 kfree_rcu(params_new
, rcu
);
226 tcf_chain_put_by_act(goto_ch
);
231 tcf_chain_put_by_act(goto_ch
);
233 tcf_idr_release(*a
, bind
);
237 static int tcf_skbedit_dump(struct sk_buff
*skb
, struct tc_action
*a
,
240 unsigned char *b
= skb_tail_pointer(skb
);
241 struct tcf_skbedit
*d
= to_skbedit(a
);
242 struct tcf_skbedit_params
*params
;
243 struct tc_skbedit opt
= {
244 .index
= d
->tcf_index
,
245 .refcnt
= refcount_read(&d
->tcf_refcnt
) - ref
,
246 .bindcnt
= atomic_read(&d
->tcf_bindcnt
) - bind
,
251 spin_lock_bh(&d
->tcf_lock
);
252 params
= rcu_dereference_protected(d
->params
,
253 lockdep_is_held(&d
->tcf_lock
));
254 opt
.action
= d
->tcf_action
;
256 if (nla_put(skb
, TCA_SKBEDIT_PARMS
, sizeof(opt
), &opt
))
257 goto nla_put_failure
;
258 if ((params
->flags
& SKBEDIT_F_PRIORITY
) &&
259 nla_put_u32(skb
, TCA_SKBEDIT_PRIORITY
, params
->priority
))
260 goto nla_put_failure
;
261 if ((params
->flags
& SKBEDIT_F_QUEUE_MAPPING
) &&
262 nla_put_u16(skb
, TCA_SKBEDIT_QUEUE_MAPPING
, params
->queue_mapping
))
263 goto nla_put_failure
;
264 if ((params
->flags
& SKBEDIT_F_MARK
) &&
265 nla_put_u32(skb
, TCA_SKBEDIT_MARK
, params
->mark
))
266 goto nla_put_failure
;
267 if ((params
->flags
& SKBEDIT_F_PTYPE
) &&
268 nla_put_u16(skb
, TCA_SKBEDIT_PTYPE
, params
->ptype
))
269 goto nla_put_failure
;
270 if ((params
->flags
& SKBEDIT_F_MASK
) &&
271 nla_put_u32(skb
, TCA_SKBEDIT_MASK
, params
->mask
))
272 goto nla_put_failure
;
273 if (params
->flags
& SKBEDIT_F_INHERITDSFIELD
)
274 pure_flags
|= SKBEDIT_F_INHERITDSFIELD
;
275 if (pure_flags
!= 0 &&
276 nla_put(skb
, TCA_SKBEDIT_FLAGS
, sizeof(pure_flags
), &pure_flags
))
277 goto nla_put_failure
;
279 tcf_tm_dump(&t
, &d
->tcf_tm
);
280 if (nla_put_64bit(skb
, TCA_SKBEDIT_TM
, sizeof(t
), &t
, TCA_SKBEDIT_PAD
))
281 goto nla_put_failure
;
282 spin_unlock_bh(&d
->tcf_lock
);
287 spin_unlock_bh(&d
->tcf_lock
);
292 static void tcf_skbedit_cleanup(struct tc_action
*a
)
294 struct tcf_skbedit
*d
= to_skbedit(a
);
295 struct tcf_skbedit_params
*params
;
297 params
= rcu_dereference_protected(d
->params
, 1);
299 kfree_rcu(params
, rcu
);
302 static int tcf_skbedit_walker(struct net
*net
, struct sk_buff
*skb
,
303 struct netlink_callback
*cb
, int type
,
304 const struct tc_action_ops
*ops
,
305 struct netlink_ext_ack
*extack
)
307 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
309 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
312 static int tcf_skbedit_search(struct net
*net
, struct tc_action
**a
, u32 index
)
314 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
316 return tcf_idr_search(tn
, a
, index
);
319 static size_t tcf_skbedit_get_fill_size(const struct tc_action
*act
)
321 return nla_total_size(sizeof(struct tc_skbedit
))
322 + nla_total_size(sizeof(u32
)) /* TCA_SKBEDIT_PRIORITY */
323 + nla_total_size(sizeof(u16
)) /* TCA_SKBEDIT_QUEUE_MAPPING */
324 + nla_total_size(sizeof(u32
)) /* TCA_SKBEDIT_MARK */
325 + nla_total_size(sizeof(u16
)) /* TCA_SKBEDIT_PTYPE */
326 + nla_total_size(sizeof(u32
)) /* TCA_SKBEDIT_MASK */
327 + nla_total_size_64bit(sizeof(u64
)); /* TCA_SKBEDIT_FLAGS */
330 static struct tc_action_ops act_skbedit_ops
= {
332 .id
= TCA_ID_SKBEDIT
,
333 .owner
= THIS_MODULE
,
334 .act
= tcf_skbedit_act
,
335 .stats_update
= tcf_skbedit_stats_update
,
336 .dump
= tcf_skbedit_dump
,
337 .init
= tcf_skbedit_init
,
338 .cleanup
= tcf_skbedit_cleanup
,
339 .walk
= tcf_skbedit_walker
,
340 .get_fill_size
= tcf_skbedit_get_fill_size
,
341 .lookup
= tcf_skbedit_search
,
342 .size
= sizeof(struct tcf_skbedit
),
345 static __net_init
int skbedit_init_net(struct net
*net
)
347 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
349 return tc_action_net_init(net
, tn
, &act_skbedit_ops
);
352 static void __net_exit
skbedit_exit_net(struct list_head
*net_list
)
354 tc_action_net_exit(net_list
, skbedit_net_id
);
357 static struct pernet_operations skbedit_net_ops
= {
358 .init
= skbedit_init_net
,
359 .exit_batch
= skbedit_exit_net
,
360 .id
= &skbedit_net_id
,
361 .size
= sizeof(struct tc_action_net
),
364 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
365 MODULE_DESCRIPTION("SKB Editing");
366 MODULE_LICENSE("GPL");
368 static int __init
skbedit_init_module(void)
370 return tcf_register_action(&act_skbedit_ops
, &skbedit_net_ops
);
373 static void __exit
skbedit_cleanup_module(void)
375 tcf_unregister_action(&act_skbedit_ops
, &skbedit_net_ops
);
378 module_init(skbedit_init_module
);
379 module_exit(skbedit_cleanup_module
);