1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/act_skbmod.c skb data modifier
5 * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
8 #include <linux/module.h>
9 #include <linux/if_arp.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/rtnetlink.h>
14 #include <net/inet_ecn.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/pkt_cls.h>
18 #include <net/tc_wrapper.h>
20 #include <linux/tc_act/tc_skbmod.h>
21 #include <net/tc_act/tc_skbmod.h>
23 static struct tc_action_ops act_skbmod_ops
;
25 TC_INDIRECT_SCOPE
int tcf_skbmod_act(struct sk_buff
*skb
,
26 const struct tc_action
*a
,
27 struct tcf_result
*res
)
29 struct tcf_skbmod
*d
= to_skbmod(a
);
30 int action
, max_edit_len
, err
;
31 struct tcf_skbmod_params
*p
;
34 tcf_lastuse_update(&d
->tcf_tm
);
35 bstats_update(this_cpu_ptr(d
->common
.cpu_bstats
), skb
);
37 action
= READ_ONCE(d
->tcf_action
);
38 if (unlikely(action
== TC_ACT_SHOT
))
41 max_edit_len
= skb_mac_header_len(skb
);
42 p
= rcu_dereference_bh(d
->skbmod_p
);
45 /* tcf_skbmod_init() guarantees "flags" to be one of the following:
46 * 1. a combination of SKBMOD_F_{DMAC,SMAC,ETYPE}
49 * SKBMOD_F_ECN only works with IP packets; all other flags only work with Ethernet
52 if (flags
== SKBMOD_F_ECN
) {
53 switch (skb_protocol(skb
, true)) {
54 case cpu_to_be16(ETH_P_IP
):
55 case cpu_to_be16(ETH_P_IPV6
):
56 max_edit_len
+= skb_network_header_len(skb
);
61 } else if (!skb
->dev
|| skb
->dev
->type
!= ARPHRD_ETHER
) {
65 err
= skb_ensure_writable(skb
, max_edit_len
);
66 if (unlikely(err
)) /* best policy is to drop on the floor */
69 if (flags
& SKBMOD_F_DMAC
)
70 ether_addr_copy(eth_hdr(skb
)->h_dest
, p
->eth_dst
);
71 if (flags
& SKBMOD_F_SMAC
)
72 ether_addr_copy(eth_hdr(skb
)->h_source
, p
->eth_src
);
73 if (flags
& SKBMOD_F_ETYPE
)
74 eth_hdr(skb
)->h_proto
= p
->eth_type
;
76 if (flags
& SKBMOD_F_SWAPMAC
) {
77 u16 tmpaddr
[ETH_ALEN
/ 2]; /* ether_addr_copy() requirement */
78 /*XXX: I am sure we can come up with more efficient swapping*/
79 ether_addr_copy((u8
*)tmpaddr
, eth_hdr(skb
)->h_dest
);
80 ether_addr_copy(eth_hdr(skb
)->h_dest
, eth_hdr(skb
)->h_source
);
81 ether_addr_copy(eth_hdr(skb
)->h_source
, (u8
*)tmpaddr
);
84 if (flags
& SKBMOD_F_ECN
)
91 qstats_overlimit_inc(this_cpu_ptr(d
->common
.cpu_qstats
));
95 static const struct nla_policy skbmod_policy
[TCA_SKBMOD_MAX
+ 1] = {
96 [TCA_SKBMOD_PARMS
] = { .len
= sizeof(struct tc_skbmod
) },
97 [TCA_SKBMOD_DMAC
] = { .len
= ETH_ALEN
},
98 [TCA_SKBMOD_SMAC
] = { .len
= ETH_ALEN
},
99 [TCA_SKBMOD_ETYPE
] = { .type
= NLA_U16
},
102 static int tcf_skbmod_init(struct net
*net
, struct nlattr
*nla
,
103 struct nlattr
*est
, struct tc_action
**a
,
104 struct tcf_proto
*tp
, u32 flags
,
105 struct netlink_ext_ack
*extack
)
107 struct tc_action_net
*tn
= net_generic(net
, act_skbmod_ops
.net_id
);
108 bool ovr
= flags
& TCA_ACT_FLAGS_REPLACE
;
109 bool bind
= flags
& TCA_ACT_FLAGS_BIND
;
110 struct nlattr
*tb
[TCA_SKBMOD_MAX
+ 1];
111 struct tcf_skbmod_params
*p
, *p_old
;
112 struct tcf_chain
*goto_ch
= NULL
;
113 struct tc_skbmod
*parm
;
114 u32 lflags
= 0, index
;
115 struct tcf_skbmod
*d
;
125 err
= nla_parse_nested_deprecated(tb
, TCA_SKBMOD_MAX
, nla
,
126 skbmod_policy
, NULL
);
130 if (!tb
[TCA_SKBMOD_PARMS
])
133 if (tb
[TCA_SKBMOD_DMAC
]) {
134 daddr
= nla_data(tb
[TCA_SKBMOD_DMAC
]);
135 lflags
|= SKBMOD_F_DMAC
;
138 if (tb
[TCA_SKBMOD_SMAC
]) {
139 saddr
= nla_data(tb
[TCA_SKBMOD_SMAC
]);
140 lflags
|= SKBMOD_F_SMAC
;
143 if (tb
[TCA_SKBMOD_ETYPE
]) {
144 eth_type
= nla_get_u16(tb
[TCA_SKBMOD_ETYPE
]);
145 lflags
|= SKBMOD_F_ETYPE
;
148 parm
= nla_data(tb
[TCA_SKBMOD_PARMS
]);
150 if (parm
->flags
& SKBMOD_F_SWAPMAC
)
151 lflags
= SKBMOD_F_SWAPMAC
;
152 if (parm
->flags
& SKBMOD_F_ECN
)
153 lflags
= SKBMOD_F_ECN
;
155 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
164 tcf_idr_release(*a
, bind
);
166 tcf_idr_cleanup(tn
, index
);
171 ret
= tcf_idr_create(tn
, index
, est
, a
,
172 &act_skbmod_ops
, bind
, true, flags
);
174 tcf_idr_cleanup(tn
, index
);
180 tcf_idr_release(*a
, bind
);
183 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
189 p
= kzalloc(sizeof(struct tcf_skbmod_params
), GFP_KERNEL
);
198 spin_lock_bh(&d
->tcf_lock
);
199 /* Protected by tcf_lock if overwriting existing action. */
200 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
201 p_old
= rcu_dereference_protected(d
->skbmod_p
, 1);
203 if (lflags
& SKBMOD_F_DMAC
)
204 ether_addr_copy(p
->eth_dst
, daddr
);
205 if (lflags
& SKBMOD_F_SMAC
)
206 ether_addr_copy(p
->eth_src
, saddr
);
207 if (lflags
& SKBMOD_F_ETYPE
)
208 p
->eth_type
= htons(eth_type
);
210 rcu_assign_pointer(d
->skbmod_p
, p
);
212 spin_unlock_bh(&d
->tcf_lock
);
215 kfree_rcu(p_old
, rcu
);
217 tcf_chain_put_by_act(goto_ch
);
222 tcf_chain_put_by_act(goto_ch
);
224 tcf_idr_release(*a
, bind
);
228 static void tcf_skbmod_cleanup(struct tc_action
*a
)
230 struct tcf_skbmod
*d
= to_skbmod(a
);
231 struct tcf_skbmod_params
*p
;
233 p
= rcu_dereference_protected(d
->skbmod_p
, 1);
238 static int tcf_skbmod_dump(struct sk_buff
*skb
, struct tc_action
*a
,
241 struct tcf_skbmod
*d
= to_skbmod(a
);
242 unsigned char *b
= skb_tail_pointer(skb
);
243 struct tcf_skbmod_params
*p
;
244 struct tc_skbmod opt
;
247 memset(&opt
, 0, sizeof(opt
));
248 opt
.index
= d
->tcf_index
;
249 opt
.refcnt
= refcount_read(&d
->tcf_refcnt
) - ref
;
250 opt
.bindcnt
= atomic_read(&d
->tcf_bindcnt
) - bind
;
251 spin_lock_bh(&d
->tcf_lock
);
252 opt
.action
= d
->tcf_action
;
253 p
= rcu_dereference_protected(d
->skbmod_p
,
254 lockdep_is_held(&d
->tcf_lock
));
255 opt
.flags
= p
->flags
;
256 if (nla_put(skb
, TCA_SKBMOD_PARMS
, sizeof(opt
), &opt
))
257 goto nla_put_failure
;
258 if ((p
->flags
& SKBMOD_F_DMAC
) &&
259 nla_put(skb
, TCA_SKBMOD_DMAC
, ETH_ALEN
, p
->eth_dst
))
260 goto nla_put_failure
;
261 if ((p
->flags
& SKBMOD_F_SMAC
) &&
262 nla_put(skb
, TCA_SKBMOD_SMAC
, ETH_ALEN
, p
->eth_src
))
263 goto nla_put_failure
;
264 if ((p
->flags
& SKBMOD_F_ETYPE
) &&
265 nla_put_u16(skb
, TCA_SKBMOD_ETYPE
, ntohs(p
->eth_type
)))
266 goto nla_put_failure
;
268 tcf_tm_dump(&t
, &d
->tcf_tm
);
269 if (nla_put_64bit(skb
, TCA_SKBMOD_TM
, sizeof(t
), &t
, TCA_SKBMOD_PAD
))
270 goto nla_put_failure
;
272 spin_unlock_bh(&d
->tcf_lock
);
275 spin_unlock_bh(&d
->tcf_lock
);
280 static struct tc_action_ops act_skbmod_ops
= {
282 .id
= TCA_ACT_SKBMOD
,
283 .owner
= THIS_MODULE
,
284 .act
= tcf_skbmod_act
,
285 .dump
= tcf_skbmod_dump
,
286 .init
= tcf_skbmod_init
,
287 .cleanup
= tcf_skbmod_cleanup
,
288 .size
= sizeof(struct tcf_skbmod
),
290 MODULE_ALIAS_NET_ACT("skbmod");
292 static __net_init
int skbmod_init_net(struct net
*net
)
294 struct tc_action_net
*tn
= net_generic(net
, act_skbmod_ops
.net_id
);
296 return tc_action_net_init(net
, tn
, &act_skbmod_ops
);
299 static void __net_exit
skbmod_exit_net(struct list_head
*net_list
)
301 tc_action_net_exit(net_list
, act_skbmod_ops
.net_id
);
304 static struct pernet_operations skbmod_net_ops
= {
305 .init
= skbmod_init_net
,
306 .exit_batch
= skbmod_exit_net
,
307 .id
= &act_skbmod_ops
.net_id
,
308 .size
= sizeof(struct tc_action_net
),
311 MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
312 MODULE_DESCRIPTION("SKB data mod-ing");
313 MODULE_LICENSE("GPL");
315 static int __init
skbmod_init_module(void)
317 return tcf_register_action(&act_skbmod_ops
, &skbmod_net_ops
);
320 static void __exit
skbmod_cleanup_module(void)
322 tcf_unregister_action(&act_skbmod_ops
, &skbmod_net_ops
);
325 module_init(skbmod_init_module
);
326 module_exit(skbmod_cleanup_module
);