2 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/if_vlan.h>
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
19 #include <linux/tc_act/tc_vlan.h>
20 #include <net/tc_act/tc_vlan.h>
22 static unsigned int vlan_net_id
;
23 static struct tc_action_ops act_vlan_ops
;
25 static int tcf_vlan_act(struct sk_buff
*skb
, const struct tc_action
*a
,
26 struct tcf_result
*res
)
28 struct tcf_vlan
*v
= to_vlan(a
);
29 struct tcf_vlan_params
*p
;
34 tcf_lastuse_update(&v
->tcf_tm
);
35 bstats_cpu_update(this_cpu_ptr(v
->common
.cpu_bstats
), skb
);
37 /* Ensure 'data' points at mac_header prior calling vlan manipulating
40 if (skb_at_tc_ingress(skb
))
41 skb_push_rcsum(skb
, skb
->mac_len
);
43 action
= READ_ONCE(v
->tcf_action
);
45 p
= rcu_dereference_bh(v
->vlan_p
);
47 switch (p
->tcfv_action
) {
48 case TCA_VLAN_ACT_POP
:
49 err
= skb_vlan_pop(skb
);
53 case TCA_VLAN_ACT_PUSH
:
54 err
= skb_vlan_push(skb
, p
->tcfv_push_proto
, p
->tcfv_push_vid
|
55 (p
->tcfv_push_prio
<< VLAN_PRIO_SHIFT
));
59 case TCA_VLAN_ACT_MODIFY
:
60 /* No-op if no vlan tag (either hw-accel or in-payload) */
61 if (!skb_vlan_tagged(skb
))
63 /* extract existing tag (and guarantee no hw-accel tag) */
64 if (skb_vlan_tag_present(skb
)) {
65 tci
= skb_vlan_tag_get(skb
);
68 /* in-payload vlan tag, pop it */
69 err
= __skb_vlan_pop(skb
, &tci
);
74 tci
= (tci
& ~VLAN_VID_MASK
) | p
->tcfv_push_vid
;
75 /* replace prio bits, if tcfv_push_prio specified */
76 if (p
->tcfv_push_prio
) {
77 tci
&= ~VLAN_PRIO_MASK
;
78 tci
|= p
->tcfv_push_prio
<< VLAN_PRIO_SHIFT
;
80 /* put updated tci as hwaccel tag */
81 __vlan_hwaccel_put_tag(skb
, p
->tcfv_push_proto
, tci
);
88 if (skb_at_tc_ingress(skb
))
89 skb_pull_rcsum(skb
, skb
->mac_len
);
94 qstats_drop_inc(this_cpu_ptr(v
->common
.cpu_qstats
));
98 static const struct nla_policy vlan_policy
[TCA_VLAN_MAX
+ 1] = {
99 [TCA_VLAN_PARMS
] = { .len
= sizeof(struct tc_vlan
) },
100 [TCA_VLAN_PUSH_VLAN_ID
] = { .type
= NLA_U16
},
101 [TCA_VLAN_PUSH_VLAN_PROTOCOL
] = { .type
= NLA_U16
},
102 [TCA_VLAN_PUSH_VLAN_PRIORITY
] = { .type
= NLA_U8
},
105 static int tcf_vlan_init(struct net
*net
, struct nlattr
*nla
,
106 struct nlattr
*est
, struct tc_action
**a
,
107 int ovr
, int bind
, bool rtnl_held
,
108 struct netlink_ext_ack
*extack
)
110 struct tc_action_net
*tn
= net_generic(net
, vlan_net_id
);
111 struct nlattr
*tb
[TCA_VLAN_MAX
+ 1];
112 struct tcf_vlan_params
*p
;
113 struct tc_vlan
*parm
;
117 __be16 push_proto
= 0;
126 err
= nla_parse_nested(tb
, TCA_VLAN_MAX
, nla
, vlan_policy
, NULL
);
130 if (!tb
[TCA_VLAN_PARMS
])
132 parm
= nla_data(tb
[TCA_VLAN_PARMS
]);
134 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
141 switch (parm
->v_action
) {
142 case TCA_VLAN_ACT_POP
:
144 case TCA_VLAN_ACT_PUSH
:
145 case TCA_VLAN_ACT_MODIFY
:
146 if (!tb
[TCA_VLAN_PUSH_VLAN_ID
]) {
148 tcf_idr_release(*a
, bind
);
150 tcf_idr_cleanup(tn
, index
);
153 push_vid
= nla_get_u16(tb
[TCA_VLAN_PUSH_VLAN_ID
]);
154 if (push_vid
>= VLAN_VID_MASK
) {
156 tcf_idr_release(*a
, bind
);
158 tcf_idr_cleanup(tn
, index
);
162 if (tb
[TCA_VLAN_PUSH_VLAN_PROTOCOL
]) {
163 push_proto
= nla_get_be16(tb
[TCA_VLAN_PUSH_VLAN_PROTOCOL
]);
164 switch (push_proto
) {
165 case htons(ETH_P_8021Q
):
166 case htons(ETH_P_8021AD
):
170 tcf_idr_release(*a
, bind
);
172 tcf_idr_cleanup(tn
, index
);
173 return -EPROTONOSUPPORT
;
176 push_proto
= htons(ETH_P_8021Q
);
179 if (tb
[TCA_VLAN_PUSH_VLAN_PRIORITY
])
180 push_prio
= nla_get_u8(tb
[TCA_VLAN_PUSH_VLAN_PRIORITY
]);
184 tcf_idr_release(*a
, bind
);
186 tcf_idr_cleanup(tn
, index
);
189 action
= parm
->v_action
;
192 ret
= tcf_idr_create(tn
, index
, est
, a
,
193 &act_vlan_ops
, bind
, true);
195 tcf_idr_cleanup(tn
, index
);
201 tcf_idr_release(*a
, bind
);
207 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
209 tcf_idr_release(*a
, bind
);
213 p
->tcfv_action
= action
;
214 p
->tcfv_push_vid
= push_vid
;
215 p
->tcfv_push_prio
= push_prio
;
216 p
->tcfv_push_proto
= push_proto
;
218 spin_lock_bh(&v
->tcf_lock
);
219 v
->tcf_action
= parm
->action
;
220 rcu_swap_protected(v
->vlan_p
, p
, lockdep_is_held(&v
->tcf_lock
));
221 spin_unlock_bh(&v
->tcf_lock
);
226 if (ret
== ACT_P_CREATED
)
227 tcf_idr_insert(tn
, *a
);
231 static void tcf_vlan_cleanup(struct tc_action
*a
)
233 struct tcf_vlan
*v
= to_vlan(a
);
234 struct tcf_vlan_params
*p
;
236 p
= rcu_dereference_protected(v
->vlan_p
, 1);
241 static int tcf_vlan_dump(struct sk_buff
*skb
, struct tc_action
*a
,
244 unsigned char *b
= skb_tail_pointer(skb
);
245 struct tcf_vlan
*v
= to_vlan(a
);
246 struct tcf_vlan_params
*p
;
247 struct tc_vlan opt
= {
248 .index
= v
->tcf_index
,
249 .refcnt
= refcount_read(&v
->tcf_refcnt
) - ref
,
250 .bindcnt
= atomic_read(&v
->tcf_bindcnt
) - bind
,
254 spin_lock_bh(&v
->tcf_lock
);
255 opt
.action
= v
->tcf_action
;
256 p
= rcu_dereference_protected(v
->vlan_p
, lockdep_is_held(&v
->tcf_lock
));
257 opt
.v_action
= p
->tcfv_action
;
258 if (nla_put(skb
, TCA_VLAN_PARMS
, sizeof(opt
), &opt
))
259 goto nla_put_failure
;
261 if ((p
->tcfv_action
== TCA_VLAN_ACT_PUSH
||
262 p
->tcfv_action
== TCA_VLAN_ACT_MODIFY
) &&
263 (nla_put_u16(skb
, TCA_VLAN_PUSH_VLAN_ID
, p
->tcfv_push_vid
) ||
264 nla_put_be16(skb
, TCA_VLAN_PUSH_VLAN_PROTOCOL
,
265 p
->tcfv_push_proto
) ||
266 (nla_put_u8(skb
, TCA_VLAN_PUSH_VLAN_PRIORITY
,
267 p
->tcfv_push_prio
))))
268 goto nla_put_failure
;
270 tcf_tm_dump(&t
, &v
->tcf_tm
);
271 if (nla_put_64bit(skb
, TCA_VLAN_TM
, sizeof(t
), &t
, TCA_VLAN_PAD
))
272 goto nla_put_failure
;
273 spin_unlock_bh(&v
->tcf_lock
);
278 spin_unlock_bh(&v
->tcf_lock
);
283 static int tcf_vlan_walker(struct net
*net
, struct sk_buff
*skb
,
284 struct netlink_callback
*cb
, int type
,
285 const struct tc_action_ops
*ops
,
286 struct netlink_ext_ack
*extack
)
288 struct tc_action_net
*tn
= net_generic(net
, vlan_net_id
);
290 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
, extack
);
293 static int tcf_vlan_search(struct net
*net
, struct tc_action
**a
, u32 index
,
294 struct netlink_ext_ack
*extack
)
296 struct tc_action_net
*tn
= net_generic(net
, vlan_net_id
);
298 return tcf_idr_search(tn
, a
, index
);
301 static size_t tcf_vlan_get_fill_size(const struct tc_action
*act
)
303 return nla_total_size(sizeof(struct tc_vlan
))
304 + nla_total_size(sizeof(u16
)) /* TCA_VLAN_PUSH_VLAN_ID */
305 + nla_total_size(sizeof(u16
)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
306 + nla_total_size(sizeof(u8
)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
309 static struct tc_action_ops act_vlan_ops
= {
311 .type
= TCA_ACT_VLAN
,
312 .owner
= THIS_MODULE
,
314 .dump
= tcf_vlan_dump
,
315 .init
= tcf_vlan_init
,
316 .cleanup
= tcf_vlan_cleanup
,
317 .walk
= tcf_vlan_walker
,
318 .get_fill_size
= tcf_vlan_get_fill_size
,
319 .lookup
= tcf_vlan_search
,
320 .size
= sizeof(struct tcf_vlan
),
323 static __net_init
int vlan_init_net(struct net
*net
)
325 struct tc_action_net
*tn
= net_generic(net
, vlan_net_id
);
327 return tc_action_net_init(net
, tn
, &act_vlan_ops
);
330 static void __net_exit
vlan_exit_net(struct list_head
*net_list
)
332 tc_action_net_exit(net_list
, vlan_net_id
);
335 static struct pernet_operations vlan_net_ops
= {
336 .init
= vlan_init_net
,
337 .exit_batch
= vlan_exit_net
,
339 .size
= sizeof(struct tc_action_net
),
342 static int __init
vlan_init_module(void)
344 return tcf_register_action(&act_vlan_ops
, &vlan_net_ops
);
347 static void __exit
vlan_cleanup_module(void)
349 tcf_unregister_action(&act_vlan_ops
, &vlan_net_ops
);
352 module_init(vlan_init_module
);
353 module_exit(vlan_cleanup_module
);
355 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
356 MODULE_DESCRIPTION("vlan manipulation actions");
357 MODULE_LICENSE("GPL v2");