Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / sched / act_ctinfo.c
blobb20c8ce59905b38024f7fc19cebf39a1dc2ee918
1 // SPDX-License-Identifier: GPL-2.0+
2 /* net/sched/act_ctinfo.c netfilter ctinfo connmark actions
4 * Copyright (c) 2019 Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
5 */
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include <linux/rtnetlink.h>
12 #include <linux/pkt_cls.h>
13 #include <linux/ip.h>
14 #include <linux/ipv6.h>
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
17 #include <net/act_api.h>
18 #include <net/pkt_cls.h>
19 #include <uapi/linux/tc_act/tc_ctinfo.h>
20 #include <net/tc_act/tc_ctinfo.h>
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_conntrack_ecache.h>
25 #include <net/netfilter/nf_conntrack_zones.h>
27 static struct tc_action_ops act_ctinfo_ops;
28 static unsigned int ctinfo_net_id;
30 static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
31 struct tcf_ctinfo_params *cp,
32 struct sk_buff *skb, int wlen, int proto)
34 u8 dscp, newdscp;
36 newdscp = (((ct->mark & cp->dscpmask) >> cp->dscpmaskshift) << 2) &
37 ~INET_ECN_MASK;
39 switch (proto) {
40 case NFPROTO_IPV4:
41 dscp = ipv4_get_dsfield(ip_hdr(skb)) & ~INET_ECN_MASK;
42 if (dscp != newdscp) {
43 if (likely(!skb_try_make_writable(skb, wlen))) {
44 ipv4_change_dsfield(ip_hdr(skb),
45 INET_ECN_MASK,
46 newdscp);
47 ca->stats_dscp_set++;
48 } else {
49 ca->stats_dscp_error++;
52 break;
53 case NFPROTO_IPV6:
54 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK;
55 if (dscp != newdscp) {
56 if (likely(!skb_try_make_writable(skb, wlen))) {
57 ipv6_change_dsfield(ipv6_hdr(skb),
58 INET_ECN_MASK,
59 newdscp);
60 ca->stats_dscp_set++;
61 } else {
62 ca->stats_dscp_error++;
65 break;
66 default:
67 break;
71 static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
72 struct tcf_ctinfo_params *cp,
73 struct sk_buff *skb)
75 ca->stats_cpmark_set++;
76 skb->mark = ct->mark & cp->cpmarkmask;
79 static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
80 struct tcf_result *res)
82 const struct nf_conntrack_tuple_hash *thash = NULL;
83 struct tcf_ctinfo *ca = to_ctinfo(a);
84 struct nf_conntrack_tuple tuple;
85 struct nf_conntrack_zone zone;
86 enum ip_conntrack_info ctinfo;
87 struct tcf_ctinfo_params *cp;
88 struct nf_conn *ct;
89 int proto, wlen;
90 int action;
92 cp = rcu_dereference_bh(ca->params);
94 tcf_lastuse_update(&ca->tcf_tm);
95 bstats_update(&ca->tcf_bstats, skb);
96 action = READ_ONCE(ca->tcf_action);
98 wlen = skb_network_offset(skb);
99 switch (skb_protocol(skb, true)) {
100 case htons(ETH_P_IP):
101 wlen += sizeof(struct iphdr);
102 if (!pskb_may_pull(skb, wlen))
103 goto out;
105 proto = NFPROTO_IPV4;
106 break;
107 case htons(ETH_P_IPV6):
108 wlen += sizeof(struct ipv6hdr);
109 if (!pskb_may_pull(skb, wlen))
110 goto out;
112 proto = NFPROTO_IPV6;
113 break;
114 default:
115 goto out;
118 ct = nf_ct_get(skb, &ctinfo);
119 if (!ct) { /* look harder, usually ingress */
120 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
121 proto, cp->net, &tuple))
122 goto out;
123 zone.id = cp->zone;
124 zone.dir = NF_CT_DEFAULT_ZONE_DIR;
126 thash = nf_conntrack_find_get(cp->net, &zone, &tuple);
127 if (!thash)
128 goto out;
130 ct = nf_ct_tuplehash_to_ctrack(thash);
133 if (cp->mode & CTINFO_MODE_DSCP)
134 if (!cp->dscpstatemask || (ct->mark & cp->dscpstatemask))
135 tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
137 if (cp->mode & CTINFO_MODE_CPMARK)
138 tcf_ctinfo_cpmark_set(ct, ca, cp, skb);
140 if (thash)
141 nf_ct_put(ct);
142 out:
143 return action;
146 static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
147 [TCA_CTINFO_ACT] =
148 NLA_POLICY_EXACT_LEN(sizeof(struct tc_ctinfo)),
149 [TCA_CTINFO_ZONE] = { .type = NLA_U16 },
150 [TCA_CTINFO_PARMS_DSCP_MASK] = { .type = NLA_U32 },
151 [TCA_CTINFO_PARMS_DSCP_STATEMASK] = { .type = NLA_U32 },
152 [TCA_CTINFO_PARMS_CPMARK_MASK] = { .type = NLA_U32 },
155 static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
156 struct nlattr *est, struct tc_action **a,
157 int ovr, int bind, bool rtnl_held,
158 struct tcf_proto *tp, u32 flags,
159 struct netlink_ext_ack *extack)
161 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
162 u32 dscpmask = 0, dscpstatemask, index;
163 struct nlattr *tb[TCA_CTINFO_MAX + 1];
164 struct tcf_ctinfo_params *cp_new;
165 struct tcf_chain *goto_ch = NULL;
166 struct tc_ctinfo *actparm;
167 struct tcf_ctinfo *ci;
168 u8 dscpmaskshift;
169 int ret = 0, err;
171 if (!nla) {
172 NL_SET_ERR_MSG_MOD(extack, "ctinfo requires attributes to be passed");
173 return -EINVAL;
176 err = nla_parse_nested(tb, TCA_CTINFO_MAX, nla, ctinfo_policy, extack);
177 if (err < 0)
178 return err;
180 if (!tb[TCA_CTINFO_ACT]) {
181 NL_SET_ERR_MSG_MOD(extack,
182 "Missing required TCA_CTINFO_ACT attribute");
183 return -EINVAL;
185 actparm = nla_data(tb[TCA_CTINFO_ACT]);
187 /* do some basic validation here before dynamically allocating things */
188 /* that we would otherwise have to clean up. */
189 if (tb[TCA_CTINFO_PARMS_DSCP_MASK]) {
190 dscpmask = nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_MASK]);
191 /* need contiguous 6 bit mask */
192 dscpmaskshift = dscpmask ? __ffs(dscpmask) : 0;
193 if ((~0 & (dscpmask >> dscpmaskshift)) != 0x3f) {
194 NL_SET_ERR_MSG_ATTR(extack,
195 tb[TCA_CTINFO_PARMS_DSCP_MASK],
196 "dscp mask must be 6 contiguous bits");
197 return -EINVAL;
199 dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ?
200 nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0;
201 /* mask & statemask must not overlap */
202 if (dscpmask & dscpstatemask) {
203 NL_SET_ERR_MSG_ATTR(extack,
204 tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
205 "dscp statemask must not overlap dscp mask");
206 return -EINVAL;
210 /* done the validation:now to the actual action allocation */
211 index = actparm->index;
212 err = tcf_idr_check_alloc(tn, &index, a, bind);
213 if (!err) {
214 ret = tcf_idr_create(tn, index, est, a,
215 &act_ctinfo_ops, bind, false, 0);
216 if (ret) {
217 tcf_idr_cleanup(tn, index);
218 return ret;
220 ret = ACT_P_CREATED;
221 } else if (err > 0) {
222 if (bind) /* don't override defaults */
223 return 0;
224 if (!ovr) {
225 tcf_idr_release(*a, bind);
226 return -EEXIST;
228 } else {
229 return err;
232 err = tcf_action_check_ctrlact(actparm->action, tp, &goto_ch, extack);
233 if (err < 0)
234 goto release_idr;
236 ci = to_ctinfo(*a);
238 cp_new = kzalloc(sizeof(*cp_new), GFP_KERNEL);
239 if (unlikely(!cp_new)) {
240 err = -ENOMEM;
241 goto put_chain;
244 cp_new->net = net;
245 cp_new->zone = tb[TCA_CTINFO_ZONE] ?
246 nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0;
247 if (dscpmask) {
248 cp_new->dscpmask = dscpmask;
249 cp_new->dscpmaskshift = dscpmaskshift;
250 cp_new->dscpstatemask = dscpstatemask;
251 cp_new->mode |= CTINFO_MODE_DSCP;
254 if (tb[TCA_CTINFO_PARMS_CPMARK_MASK]) {
255 cp_new->cpmarkmask =
256 nla_get_u32(tb[TCA_CTINFO_PARMS_CPMARK_MASK]);
257 cp_new->mode |= CTINFO_MODE_CPMARK;
260 spin_lock_bh(&ci->tcf_lock);
261 goto_ch = tcf_action_set_ctrlact(*a, actparm->action, goto_ch);
262 cp_new = rcu_replace_pointer(ci->params, cp_new,
263 lockdep_is_held(&ci->tcf_lock));
264 spin_unlock_bh(&ci->tcf_lock);
266 if (goto_ch)
267 tcf_chain_put_by_act(goto_ch);
268 if (cp_new)
269 kfree_rcu(cp_new, rcu);
271 return ret;
273 put_chain:
274 if (goto_ch)
275 tcf_chain_put_by_act(goto_ch);
276 release_idr:
277 tcf_idr_release(*a, bind);
278 return err;
281 static int tcf_ctinfo_dump(struct sk_buff *skb, struct tc_action *a,
282 int bind, int ref)
284 struct tcf_ctinfo *ci = to_ctinfo(a);
285 struct tc_ctinfo opt = {
286 .index = ci->tcf_index,
287 .refcnt = refcount_read(&ci->tcf_refcnt) - ref,
288 .bindcnt = atomic_read(&ci->tcf_bindcnt) - bind,
290 unsigned char *b = skb_tail_pointer(skb);
291 struct tcf_ctinfo_params *cp;
292 struct tcf_t t;
294 spin_lock_bh(&ci->tcf_lock);
295 cp = rcu_dereference_protected(ci->params,
296 lockdep_is_held(&ci->tcf_lock));
298 tcf_tm_dump(&t, &ci->tcf_tm);
299 if (nla_put_64bit(skb, TCA_CTINFO_TM, sizeof(t), &t, TCA_CTINFO_PAD))
300 goto nla_put_failure;
302 opt.action = ci->tcf_action;
303 if (nla_put(skb, TCA_CTINFO_ACT, sizeof(opt), &opt))
304 goto nla_put_failure;
306 if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone))
307 goto nla_put_failure;
309 if (cp->mode & CTINFO_MODE_DSCP) {
310 if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_MASK,
311 cp->dscpmask))
312 goto nla_put_failure;
313 if (nla_put_u32(skb, TCA_CTINFO_PARMS_DSCP_STATEMASK,
314 cp->dscpstatemask))
315 goto nla_put_failure;
318 if (cp->mode & CTINFO_MODE_CPMARK) {
319 if (nla_put_u32(skb, TCA_CTINFO_PARMS_CPMARK_MASK,
320 cp->cpmarkmask))
321 goto nla_put_failure;
324 if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_SET,
325 ci->stats_dscp_set, TCA_CTINFO_PAD))
326 goto nla_put_failure;
328 if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_DSCP_ERROR,
329 ci->stats_dscp_error, TCA_CTINFO_PAD))
330 goto nla_put_failure;
332 if (nla_put_u64_64bit(skb, TCA_CTINFO_STATS_CPMARK_SET,
333 ci->stats_cpmark_set, TCA_CTINFO_PAD))
334 goto nla_put_failure;
336 spin_unlock_bh(&ci->tcf_lock);
337 return skb->len;
339 nla_put_failure:
340 spin_unlock_bh(&ci->tcf_lock);
341 nlmsg_trim(skb, b);
342 return -1;
345 static int tcf_ctinfo_walker(struct net *net, struct sk_buff *skb,
346 struct netlink_callback *cb, int type,
347 const struct tc_action_ops *ops,
348 struct netlink_ext_ack *extack)
350 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
352 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
355 static int tcf_ctinfo_search(struct net *net, struct tc_action **a, u32 index)
357 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
359 return tcf_idr_search(tn, a, index);
362 static void tcf_ctinfo_cleanup(struct tc_action *a)
364 struct tcf_ctinfo *ci = to_ctinfo(a);
365 struct tcf_ctinfo_params *cp;
367 cp = rcu_dereference_protected(ci->params, 1);
368 if (cp)
369 kfree_rcu(cp, rcu);
372 static struct tc_action_ops act_ctinfo_ops = {
373 .kind = "ctinfo",
374 .id = TCA_ID_CTINFO,
375 .owner = THIS_MODULE,
376 .act = tcf_ctinfo_act,
377 .dump = tcf_ctinfo_dump,
378 .init = tcf_ctinfo_init,
379 .cleanup= tcf_ctinfo_cleanup,
380 .walk = tcf_ctinfo_walker,
381 .lookup = tcf_ctinfo_search,
382 .size = sizeof(struct tcf_ctinfo),
385 static __net_init int ctinfo_init_net(struct net *net)
387 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
389 return tc_action_net_init(net, tn, &act_ctinfo_ops);
392 static void __net_exit ctinfo_exit_net(struct list_head *net_list)
394 tc_action_net_exit(net_list, ctinfo_net_id);
397 static struct pernet_operations ctinfo_net_ops = {
398 .init = ctinfo_init_net,
399 .exit_batch = ctinfo_exit_net,
400 .id = &ctinfo_net_id,
401 .size = sizeof(struct tc_action_net),
404 static int __init ctinfo_init_module(void)
406 return tcf_register_action(&act_ctinfo_ops, &ctinfo_net_ops);
409 static void __exit ctinfo_cleanup_module(void)
411 tcf_unregister_action(&act_ctinfo_ops, &ctinfo_net_ops);
414 module_init(ctinfo_init_module);
415 module_exit(ctinfo_cleanup_module);
416 MODULE_AUTHOR("Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>");
417 MODULE_DESCRIPTION("Connection tracking mark actions");
418 MODULE_LICENSE("GPL");