2 * Copyright (c) 2008, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
16 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
27 #include <linux/tc_act/tc_skbedit.h>
28 #include <net/tc_act/tc_skbedit.h>
30 static unsigned int skbedit_net_id
;
31 static struct tc_action_ops act_skbedit_ops
;
33 static int tcf_skbedit(struct sk_buff
*skb
, const struct tc_action
*a
,
34 struct tcf_result
*res
)
36 struct tcf_skbedit
*d
= to_skbedit(a
);
38 spin_lock(&d
->tcf_lock
);
39 tcf_lastuse_update(&d
->tcf_tm
);
40 bstats_update(&d
->tcf_bstats
, skb
);
42 if (d
->flags
& SKBEDIT_F_PRIORITY
)
43 skb
->priority
= d
->priority
;
44 if (d
->flags
& SKBEDIT_F_QUEUE_MAPPING
&&
45 skb
->dev
->real_num_tx_queues
> d
->queue_mapping
)
46 skb_set_queue_mapping(skb
, d
->queue_mapping
);
47 if (d
->flags
& SKBEDIT_F_MARK
) {
48 skb
->mark
&= ~d
->mask
;
49 skb
->mark
|= d
->mark
& d
->mask
;
51 if (d
->flags
& SKBEDIT_F_PTYPE
)
52 skb
->pkt_type
= d
->ptype
;
54 spin_unlock(&d
->tcf_lock
);
58 static const struct nla_policy skbedit_policy
[TCA_SKBEDIT_MAX
+ 1] = {
59 [TCA_SKBEDIT_PARMS
] = { .len
= sizeof(struct tc_skbedit
) },
60 [TCA_SKBEDIT_PRIORITY
] = { .len
= sizeof(u32
) },
61 [TCA_SKBEDIT_QUEUE_MAPPING
] = { .len
= sizeof(u16
) },
62 [TCA_SKBEDIT_MARK
] = { .len
= sizeof(u32
) },
63 [TCA_SKBEDIT_PTYPE
] = { .len
= sizeof(u16
) },
64 [TCA_SKBEDIT_MASK
] = { .len
= sizeof(u32
) },
67 static int tcf_skbedit_init(struct net
*net
, struct nlattr
*nla
,
68 struct nlattr
*est
, struct tc_action
**a
,
71 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
72 struct nlattr
*tb
[TCA_SKBEDIT_MAX
+ 1];
73 struct tc_skbedit
*parm
;
74 struct tcf_skbedit
*d
;
75 u32 flags
= 0, *priority
= NULL
, *mark
= NULL
, *mask
= NULL
;
76 u16
*queue_mapping
= NULL
, *ptype
= NULL
;
83 err
= nla_parse_nested(tb
, TCA_SKBEDIT_MAX
, nla
, skbedit_policy
, NULL
);
87 if (tb
[TCA_SKBEDIT_PARMS
] == NULL
)
90 if (tb
[TCA_SKBEDIT_PRIORITY
] != NULL
) {
91 flags
|= SKBEDIT_F_PRIORITY
;
92 priority
= nla_data(tb
[TCA_SKBEDIT_PRIORITY
]);
95 if (tb
[TCA_SKBEDIT_QUEUE_MAPPING
] != NULL
) {
96 flags
|= SKBEDIT_F_QUEUE_MAPPING
;
97 queue_mapping
= nla_data(tb
[TCA_SKBEDIT_QUEUE_MAPPING
]);
100 if (tb
[TCA_SKBEDIT_PTYPE
] != NULL
) {
101 ptype
= nla_data(tb
[TCA_SKBEDIT_PTYPE
]);
102 if (!skb_pkt_type_ok(*ptype
))
104 flags
|= SKBEDIT_F_PTYPE
;
107 if (tb
[TCA_SKBEDIT_MARK
] != NULL
) {
108 flags
|= SKBEDIT_F_MARK
;
109 mark
= nla_data(tb
[TCA_SKBEDIT_MARK
]);
112 if (tb
[TCA_SKBEDIT_MASK
] != NULL
) {
113 flags
|= SKBEDIT_F_MASK
;
114 mask
= nla_data(tb
[TCA_SKBEDIT_MASK
]);
117 parm
= nla_data(tb
[TCA_SKBEDIT_PARMS
]);
119 exists
= tcf_idr_check(tn
, parm
->index
, a
, bind
);
124 tcf_idr_release(*a
, bind
);
129 ret
= tcf_idr_create(tn
, parm
->index
, est
, a
,
130 &act_skbedit_ops
, bind
, false);
138 tcf_idr_release(*a
, bind
);
143 spin_lock_bh(&d
->tcf_lock
);
146 if (flags
& SKBEDIT_F_PRIORITY
)
147 d
->priority
= *priority
;
148 if (flags
& SKBEDIT_F_QUEUE_MAPPING
)
149 d
->queue_mapping
= *queue_mapping
;
150 if (flags
& SKBEDIT_F_MARK
)
152 if (flags
& SKBEDIT_F_PTYPE
)
154 /* default behaviour is to use all the bits */
155 d
->mask
= 0xffffffff;
156 if (flags
& SKBEDIT_F_MASK
)
159 d
->tcf_action
= parm
->action
;
161 spin_unlock_bh(&d
->tcf_lock
);
163 if (ret
== ACT_P_CREATED
)
164 tcf_idr_insert(tn
, *a
);
168 static int tcf_skbedit_dump(struct sk_buff
*skb
, struct tc_action
*a
,
171 unsigned char *b
= skb_tail_pointer(skb
);
172 struct tcf_skbedit
*d
= to_skbedit(a
);
173 struct tc_skbedit opt
= {
174 .index
= d
->tcf_index
,
175 .refcnt
= d
->tcf_refcnt
- ref
,
176 .bindcnt
= d
->tcf_bindcnt
- bind
,
177 .action
= d
->tcf_action
,
181 if (nla_put(skb
, TCA_SKBEDIT_PARMS
, sizeof(opt
), &opt
))
182 goto nla_put_failure
;
183 if ((d
->flags
& SKBEDIT_F_PRIORITY
) &&
184 nla_put_u32(skb
, TCA_SKBEDIT_PRIORITY
, d
->priority
))
185 goto nla_put_failure
;
186 if ((d
->flags
& SKBEDIT_F_QUEUE_MAPPING
) &&
187 nla_put_u16(skb
, TCA_SKBEDIT_QUEUE_MAPPING
, d
->queue_mapping
))
188 goto nla_put_failure
;
189 if ((d
->flags
& SKBEDIT_F_MARK
) &&
190 nla_put_u32(skb
, TCA_SKBEDIT_MARK
, d
->mark
))
191 goto nla_put_failure
;
192 if ((d
->flags
& SKBEDIT_F_PTYPE
) &&
193 nla_put_u16(skb
, TCA_SKBEDIT_PTYPE
, d
->ptype
))
194 goto nla_put_failure
;
195 if ((d
->flags
& SKBEDIT_F_MASK
) &&
196 nla_put_u32(skb
, TCA_SKBEDIT_MASK
, d
->mask
))
197 goto nla_put_failure
;
199 tcf_tm_dump(&t
, &d
->tcf_tm
);
200 if (nla_put_64bit(skb
, TCA_SKBEDIT_TM
, sizeof(t
), &t
, TCA_SKBEDIT_PAD
))
201 goto nla_put_failure
;
209 static int tcf_skbedit_walker(struct net
*net
, struct sk_buff
*skb
,
210 struct netlink_callback
*cb
, int type
,
211 const struct tc_action_ops
*ops
)
213 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
215 return tcf_generic_walker(tn
, skb
, cb
, type
, ops
);
218 static int tcf_skbedit_search(struct net
*net
, struct tc_action
**a
, u32 index
)
220 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
222 return tcf_idr_search(tn
, a
, index
);
225 static struct tc_action_ops act_skbedit_ops
= {
227 .type
= TCA_ACT_SKBEDIT
,
228 .owner
= THIS_MODULE
,
230 .dump
= tcf_skbedit_dump
,
231 .init
= tcf_skbedit_init
,
232 .walk
= tcf_skbedit_walker
,
233 .lookup
= tcf_skbedit_search
,
234 .size
= sizeof(struct tcf_skbedit
),
237 static __net_init
int skbedit_init_net(struct net
*net
)
239 struct tc_action_net
*tn
= net_generic(net
, skbedit_net_id
);
241 return tc_action_net_init(tn
, &act_skbedit_ops
);
244 static void __net_exit
skbedit_exit_net(struct list_head
*net_list
)
246 tc_action_net_exit(net_list
, skbedit_net_id
);
249 static struct pernet_operations skbedit_net_ops
= {
250 .init
= skbedit_init_net
,
251 .exit_batch
= skbedit_exit_net
,
252 .id
= &skbedit_net_id
,
253 .size
= sizeof(struct tc_action_net
),
256 MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
257 MODULE_DESCRIPTION("SKB Editing");
258 MODULE_LICENSE("GPL");
260 static int __init
skbedit_init_module(void)
262 return tcf_register_action(&act_skbedit_ops
, &skbedit_net_ops
);
265 static void __exit
skbedit_cleanup_module(void)
267 tcf_unregister_action(&act_skbedit_ops
, &skbedit_net_ops
);
270 module_init(skbedit_init_module
);
271 module_exit(skbedit_cleanup_module
);