fix a kmap leak in virtio_console
[linux/fpc-iii.git] / net / sched / em_meta.c
blob9b8c0b0e60d7dbc34260ae8fc010844eeda4e8ad
1 /*
2 * net/sched/em_meta.c Metadata ematch
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Thomas Graf <tgraf@suug.ch>
11 * ==========================================================================
13 * The metadata ematch compares two meta objects where each object
14 * represents either a meta value stored in the kernel or a static
15 * value provided by userspace. The objects are not provided by
16 * userspace itself but rather a definition providing the information
17 * to build them. Every object is of a certain type which must be
18 * equal to the object it is being compared to.
20 * The definition of a objects conists of the type (meta type), a
21 * identifier (meta id) and additional type specific information.
22 * The meta id is either TCF_META_TYPE_VALUE for values provided by
23 * userspace or a index to the meta operations table consisting of
24 * function pointers to type specific meta data collectors returning
25 * the value of the requested meta value.
27 * lvalue rvalue
28 * +-----------+ +-----------+
29 * | type: INT | | type: INT |
30 * def | id: DEV | | id: VALUE |
31 * | data: | | data: 3 |
32 * +-----------+ +-----------+
33 * | |
34 * ---> meta_ops[INT][DEV](...) |
35 * | |
36 * ----------- |
37 * V V
38 * +-----------+ +-----------+
39 * | type: INT | | type: INT |
40 * obj | id: DEV | | id: VALUE |
41 * | data: 2 |<--data got filled out | data: 3 |
42 * +-----------+ +-----------+
43 * | |
44 * --------------> 2 equals 3 <--------------
46 * This is a simplified schema, the complexity varies depending
47 * on the meta type. Obviously, the length of the data must also
48 * be provided for non-numeric types.
50 * Additionally, type dependent modifiers such as shift operators
51 * or mask may be applied to extend the functionaliy. As of now,
52 * the variable length type supports shifting the byte string to
53 * the right, eating up any number of octets and thus supporting
54 * wildcard interface name comparisons such as "ppp%" matching
55 * ppp0..9.
57 * NOTE: Certain meta values depend on other subsystems and are
58 * only available if that subsystem is enabled in the kernel.
61 #include <linux/slab.h>
62 #include <linux/module.h>
63 #include <linux/types.h>
64 #include <linux/kernel.h>
65 #include <linux/sched.h>
66 #include <linux/string.h>
67 #include <linux/skbuff.h>
68 #include <linux/random.h>
69 #include <linux/if_vlan.h>
70 #include <linux/tc_ematch/tc_em_meta.h>
71 #include <net/dst.h>
72 #include <net/route.h>
73 #include <net/pkt_cls.h>
74 #include <net/sock.h>
76 struct meta_obj {
77 unsigned long value;
78 unsigned int len;
81 struct meta_value {
82 struct tcf_meta_val hdr;
83 unsigned long val;
84 unsigned int len;
87 struct meta_match {
88 struct meta_value lvalue;
89 struct meta_value rvalue;
92 static inline int meta_id(struct meta_value *v)
94 return TCF_META_ID(v->hdr.kind);
97 static inline int meta_type(struct meta_value *v)
99 return TCF_META_TYPE(v->hdr.kind);
102 #define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
103 struct tcf_pkt_info *info, struct meta_value *v, \
104 struct meta_obj *dst, int *err)
106 /**************************************************************************
107 * System status & misc
108 **************************************************************************/
110 META_COLLECTOR(int_random)
112 get_random_bytes(&dst->value, sizeof(dst->value));
115 static inline unsigned long fixed_loadavg(int load)
117 int rnd_load = load + (FIXED_1/200);
118 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
120 return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
123 META_COLLECTOR(int_loadavg_0)
125 dst->value = fixed_loadavg(avenrun[0]);
128 META_COLLECTOR(int_loadavg_1)
130 dst->value = fixed_loadavg(avenrun[1]);
133 META_COLLECTOR(int_loadavg_2)
135 dst->value = fixed_loadavg(avenrun[2]);
138 /**************************************************************************
139 * Device names & indices
140 **************************************************************************/
142 static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
144 if (unlikely(dev == NULL))
145 return -1;
147 dst->value = dev->ifindex;
148 return 0;
151 static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
153 if (unlikely(dev == NULL))
154 return -1;
156 dst->value = (unsigned long) dev->name;
157 dst->len = strlen(dev->name);
158 return 0;
161 META_COLLECTOR(int_dev)
163 *err = int_dev(skb->dev, dst);
166 META_COLLECTOR(var_dev)
168 *err = var_dev(skb->dev, dst);
171 /**************************************************************************
172 * vlan tag
173 **************************************************************************/
175 META_COLLECTOR(int_vlan_tag)
177 unsigned short tag;
179 tag = vlan_tx_tag_get(skb);
180 if (!tag && __vlan_get_tag(skb, &tag))
181 *err = -1;
182 else
183 dst->value = tag;
188 /**************************************************************************
189 * skb attributes
190 **************************************************************************/
192 META_COLLECTOR(int_priority)
194 dst->value = skb->priority;
197 META_COLLECTOR(int_protocol)
199 /* Let userspace take care of the byte ordering */
200 dst->value = skb->protocol;
203 META_COLLECTOR(int_pkttype)
205 dst->value = skb->pkt_type;
208 META_COLLECTOR(int_pktlen)
210 dst->value = skb->len;
213 META_COLLECTOR(int_datalen)
215 dst->value = skb->data_len;
218 META_COLLECTOR(int_maclen)
220 dst->value = skb->mac_len;
223 META_COLLECTOR(int_rxhash)
225 dst->value = skb_get_hash(skb);
228 /**************************************************************************
229 * Netfilter
230 **************************************************************************/
232 META_COLLECTOR(int_mark)
234 dst->value = skb->mark;
237 /**************************************************************************
238 * Traffic Control
239 **************************************************************************/
241 META_COLLECTOR(int_tcindex)
243 dst->value = skb->tc_index;
246 /**************************************************************************
247 * Routing
248 **************************************************************************/
250 META_COLLECTOR(int_rtclassid)
252 if (unlikely(skb_dst(skb) == NULL))
253 *err = -1;
254 else
255 #ifdef CONFIG_IP_ROUTE_CLASSID
256 dst->value = skb_dst(skb)->tclassid;
257 #else
258 dst->value = 0;
259 #endif
262 META_COLLECTOR(int_rtiif)
264 if (unlikely(skb_rtable(skb) == NULL))
265 *err = -1;
266 else
267 dst->value = inet_iif(skb);
270 /**************************************************************************
271 * Socket Attributes
272 **************************************************************************/
274 #define skip_nonlocal(skb) \
275 (unlikely(skb->sk == NULL))
277 META_COLLECTOR(int_sk_family)
279 if (skip_nonlocal(skb)) {
280 *err = -1;
281 return;
283 dst->value = skb->sk->sk_family;
286 META_COLLECTOR(int_sk_state)
288 if (skip_nonlocal(skb)) {
289 *err = -1;
290 return;
292 dst->value = skb->sk->sk_state;
295 META_COLLECTOR(int_sk_reuse)
297 if (skip_nonlocal(skb)) {
298 *err = -1;
299 return;
301 dst->value = skb->sk->sk_reuse;
304 META_COLLECTOR(int_sk_bound_if)
306 if (skip_nonlocal(skb)) {
307 *err = -1;
308 return;
310 /* No error if bound_dev_if is 0, legal userspace check */
311 dst->value = skb->sk->sk_bound_dev_if;
314 META_COLLECTOR(var_sk_bound_if)
316 if (skip_nonlocal(skb)) {
317 *err = -1;
318 return;
321 if (skb->sk->sk_bound_dev_if == 0) {
322 dst->value = (unsigned long) "any";
323 dst->len = 3;
324 } else {
325 struct net_device *dev;
327 rcu_read_lock();
328 dev = dev_get_by_index_rcu(sock_net(skb->sk),
329 skb->sk->sk_bound_dev_if);
330 *err = var_dev(dev, dst);
331 rcu_read_unlock();
335 META_COLLECTOR(int_sk_refcnt)
337 if (skip_nonlocal(skb)) {
338 *err = -1;
339 return;
341 dst->value = atomic_read(&skb->sk->sk_refcnt);
344 META_COLLECTOR(int_sk_rcvbuf)
346 if (skip_nonlocal(skb)) {
347 *err = -1;
348 return;
350 dst->value = skb->sk->sk_rcvbuf;
353 META_COLLECTOR(int_sk_shutdown)
355 if (skip_nonlocal(skb)) {
356 *err = -1;
357 return;
359 dst->value = skb->sk->sk_shutdown;
362 META_COLLECTOR(int_sk_proto)
364 if (skip_nonlocal(skb)) {
365 *err = -1;
366 return;
368 dst->value = skb->sk->sk_protocol;
371 META_COLLECTOR(int_sk_type)
373 if (skip_nonlocal(skb)) {
374 *err = -1;
375 return;
377 dst->value = skb->sk->sk_type;
380 META_COLLECTOR(int_sk_rmem_alloc)
382 if (skip_nonlocal(skb)) {
383 *err = -1;
384 return;
386 dst->value = sk_rmem_alloc_get(skb->sk);
389 META_COLLECTOR(int_sk_wmem_alloc)
391 if (skip_nonlocal(skb)) {
392 *err = -1;
393 return;
395 dst->value = sk_wmem_alloc_get(skb->sk);
398 META_COLLECTOR(int_sk_omem_alloc)
400 if (skip_nonlocal(skb)) {
401 *err = -1;
402 return;
404 dst->value = atomic_read(&skb->sk->sk_omem_alloc);
407 META_COLLECTOR(int_sk_rcv_qlen)
409 if (skip_nonlocal(skb)) {
410 *err = -1;
411 return;
413 dst->value = skb->sk->sk_receive_queue.qlen;
416 META_COLLECTOR(int_sk_snd_qlen)
418 if (skip_nonlocal(skb)) {
419 *err = -1;
420 return;
422 dst->value = skb->sk->sk_write_queue.qlen;
425 META_COLLECTOR(int_sk_wmem_queued)
427 if (skip_nonlocal(skb)) {
428 *err = -1;
429 return;
431 dst->value = skb->sk->sk_wmem_queued;
434 META_COLLECTOR(int_sk_fwd_alloc)
436 if (skip_nonlocal(skb)) {
437 *err = -1;
438 return;
440 dst->value = skb->sk->sk_forward_alloc;
443 META_COLLECTOR(int_sk_sndbuf)
445 if (skip_nonlocal(skb)) {
446 *err = -1;
447 return;
449 dst->value = skb->sk->sk_sndbuf;
452 META_COLLECTOR(int_sk_alloc)
454 if (skip_nonlocal(skb)) {
455 *err = -1;
456 return;
458 dst->value = (__force int) skb->sk->sk_allocation;
461 META_COLLECTOR(int_sk_hash)
463 if (skip_nonlocal(skb)) {
464 *err = -1;
465 return;
467 dst->value = skb->sk->sk_hash;
470 META_COLLECTOR(int_sk_lingertime)
472 if (skip_nonlocal(skb)) {
473 *err = -1;
474 return;
476 dst->value = skb->sk->sk_lingertime / HZ;
479 META_COLLECTOR(int_sk_err_qlen)
481 if (skip_nonlocal(skb)) {
482 *err = -1;
483 return;
485 dst->value = skb->sk->sk_error_queue.qlen;
488 META_COLLECTOR(int_sk_ack_bl)
490 if (skip_nonlocal(skb)) {
491 *err = -1;
492 return;
494 dst->value = skb->sk->sk_ack_backlog;
497 META_COLLECTOR(int_sk_max_ack_bl)
499 if (skip_nonlocal(skb)) {
500 *err = -1;
501 return;
503 dst->value = skb->sk->sk_max_ack_backlog;
506 META_COLLECTOR(int_sk_prio)
508 if (skip_nonlocal(skb)) {
509 *err = -1;
510 return;
512 dst->value = skb->sk->sk_priority;
515 META_COLLECTOR(int_sk_rcvlowat)
517 if (skip_nonlocal(skb)) {
518 *err = -1;
519 return;
521 dst->value = skb->sk->sk_rcvlowat;
524 META_COLLECTOR(int_sk_rcvtimeo)
526 if (skip_nonlocal(skb)) {
527 *err = -1;
528 return;
530 dst->value = skb->sk->sk_rcvtimeo / HZ;
533 META_COLLECTOR(int_sk_sndtimeo)
535 if (skip_nonlocal(skb)) {
536 *err = -1;
537 return;
539 dst->value = skb->sk->sk_sndtimeo / HZ;
542 META_COLLECTOR(int_sk_sendmsg_off)
544 if (skip_nonlocal(skb)) {
545 *err = -1;
546 return;
548 dst->value = skb->sk->sk_frag.offset;
551 META_COLLECTOR(int_sk_write_pend)
553 if (skip_nonlocal(skb)) {
554 *err = -1;
555 return;
557 dst->value = skb->sk->sk_write_pending;
560 /**************************************************************************
561 * Meta value collectors assignment table
562 **************************************************************************/
564 struct meta_ops {
565 void (*get)(struct sk_buff *, struct tcf_pkt_info *,
566 struct meta_value *, struct meta_obj *, int *);
569 #define META_ID(name) TCF_META_ID_##name
570 #define META_FUNC(name) { .get = meta_##name }
572 /* Meta value operations table listing all meta value collectors and
573 * assigns them to a type and meta id. */
574 static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
575 [TCF_META_TYPE_VAR] = {
576 [META_ID(DEV)] = META_FUNC(var_dev),
577 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
579 [TCF_META_TYPE_INT] = {
580 [META_ID(RANDOM)] = META_FUNC(int_random),
581 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
582 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
583 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
584 [META_ID(DEV)] = META_FUNC(int_dev),
585 [META_ID(PRIORITY)] = META_FUNC(int_priority),
586 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
587 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
588 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
589 [META_ID(DATALEN)] = META_FUNC(int_datalen),
590 [META_ID(MACLEN)] = META_FUNC(int_maclen),
591 [META_ID(NFMARK)] = META_FUNC(int_mark),
592 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
593 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
594 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
595 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
596 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
597 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
598 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
599 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
600 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
601 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
602 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
603 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
604 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
605 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
606 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
607 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
608 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
609 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
610 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
611 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
612 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
613 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
614 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
615 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
616 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
617 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
618 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
619 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
620 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
621 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
622 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
623 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
624 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
625 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
629 static inline struct meta_ops *meta_ops(struct meta_value *val)
631 return &__meta_ops[meta_type(val)][meta_id(val)];
634 /**************************************************************************
635 * Type specific operations for TCF_META_TYPE_VAR
636 **************************************************************************/
638 static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
640 int r = a->len - b->len;
642 if (r == 0)
643 r = memcmp((void *) a->value, (void *) b->value, a->len);
645 return r;
648 static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
650 int len = nla_len(nla);
652 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
653 if (dst->val == 0UL)
654 return -ENOMEM;
655 dst->len = len;
656 return 0;
659 static void meta_var_destroy(struct meta_value *v)
661 kfree((void *) v->val);
664 static void meta_var_apply_extras(struct meta_value *v,
665 struct meta_obj *dst)
667 int shift = v->hdr.shift;
669 if (shift && shift < dst->len)
670 dst->len -= shift;
673 static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
675 if (v->val && v->len &&
676 nla_put(skb, tlv, v->len, (void *) v->val))
677 goto nla_put_failure;
678 return 0;
680 nla_put_failure:
681 return -1;
684 /**************************************************************************
685 * Type specific operations for TCF_META_TYPE_INT
686 **************************************************************************/
688 static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
690 /* Let gcc optimize it, the unlikely is not really based on
691 * some numbers but jump free code for mismatches seems
692 * more logical. */
693 if (unlikely(a->value == b->value))
694 return 0;
695 else if (a->value < b->value)
696 return -1;
697 else
698 return 1;
701 static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
703 if (nla_len(nla) >= sizeof(unsigned long)) {
704 dst->val = *(unsigned long *) nla_data(nla);
705 dst->len = sizeof(unsigned long);
706 } else if (nla_len(nla) == sizeof(u32)) {
707 dst->val = nla_get_u32(nla);
708 dst->len = sizeof(u32);
709 } else
710 return -EINVAL;
712 return 0;
715 static void meta_int_apply_extras(struct meta_value *v,
716 struct meta_obj *dst)
718 if (v->hdr.shift)
719 dst->value >>= v->hdr.shift;
721 if (v->val)
722 dst->value &= v->val;
725 static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
727 if (v->len == sizeof(unsigned long)) {
728 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
729 goto nla_put_failure;
730 } else if (v->len == sizeof(u32)) {
731 if (nla_put_u32(skb, tlv, v->val))
732 goto nla_put_failure;
735 return 0;
737 nla_put_failure:
738 return -1;
741 /**************************************************************************
742 * Type specific operations table
743 **************************************************************************/
745 struct meta_type_ops {
746 void (*destroy)(struct meta_value *);
747 int (*compare)(struct meta_obj *, struct meta_obj *);
748 int (*change)(struct meta_value *, struct nlattr *);
749 void (*apply_extras)(struct meta_value *, struct meta_obj *);
750 int (*dump)(struct sk_buff *, struct meta_value *, int);
753 static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
754 [TCF_META_TYPE_VAR] = {
755 .destroy = meta_var_destroy,
756 .compare = meta_var_compare,
757 .change = meta_var_change,
758 .apply_extras = meta_var_apply_extras,
759 .dump = meta_var_dump
761 [TCF_META_TYPE_INT] = {
762 .compare = meta_int_compare,
763 .change = meta_int_change,
764 .apply_extras = meta_int_apply_extras,
765 .dump = meta_int_dump
769 static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
771 return &__meta_type_ops[meta_type(v)];
774 /**************************************************************************
775 * Core
776 **************************************************************************/
778 static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
779 struct meta_value *v, struct meta_obj *dst)
781 int err = 0;
783 if (meta_id(v) == TCF_META_ID_VALUE) {
784 dst->value = v->val;
785 dst->len = v->len;
786 return 0;
789 meta_ops(v)->get(skb, info, v, dst, &err);
790 if (err < 0)
791 return err;
793 if (meta_type_ops(v)->apply_extras)
794 meta_type_ops(v)->apply_extras(v, dst);
796 return 0;
799 static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
800 struct tcf_pkt_info *info)
802 int r;
803 struct meta_match *meta = (struct meta_match *) m->data;
804 struct meta_obj l_value, r_value;
806 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
807 meta_get(skb, info, &meta->rvalue, &r_value) < 0)
808 return 0;
810 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
812 switch (meta->lvalue.hdr.op) {
813 case TCF_EM_OPND_EQ:
814 return !r;
815 case TCF_EM_OPND_LT:
816 return r < 0;
817 case TCF_EM_OPND_GT:
818 return r > 0;
821 return 0;
824 static void meta_delete(struct meta_match *meta)
826 if (meta) {
827 struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
829 if (ops && ops->destroy) {
830 ops->destroy(&meta->lvalue);
831 ops->destroy(&meta->rvalue);
835 kfree(meta);
838 static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
840 if (nla) {
841 if (nla_len(nla) == 0)
842 return -EINVAL;
844 return meta_type_ops(dst)->change(dst, nla);
847 return 0;
850 static inline int meta_is_supported(struct meta_value *val)
852 return !meta_id(val) || meta_ops(val)->get;
855 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
856 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
859 static int em_meta_change(struct tcf_proto *tp, void *data, int len,
860 struct tcf_ematch *m)
862 int err;
863 struct nlattr *tb[TCA_EM_META_MAX + 1];
864 struct tcf_meta_hdr *hdr;
865 struct meta_match *meta = NULL;
867 err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy);
868 if (err < 0)
869 goto errout;
871 err = -EINVAL;
872 if (tb[TCA_EM_META_HDR] == NULL)
873 goto errout;
874 hdr = nla_data(tb[TCA_EM_META_HDR]);
876 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
877 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
878 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
879 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
880 goto errout;
882 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
883 if (meta == NULL) {
884 err = -ENOMEM;
885 goto errout;
888 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
889 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
891 if (!meta_is_supported(&meta->lvalue) ||
892 !meta_is_supported(&meta->rvalue)) {
893 err = -EOPNOTSUPP;
894 goto errout;
897 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
898 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
899 goto errout;
901 m->datalen = sizeof(*meta);
902 m->data = (unsigned long) meta;
904 err = 0;
905 errout:
906 if (err && meta)
907 meta_delete(meta);
908 return err;
911 static void em_meta_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
913 if (m)
914 meta_delete((struct meta_match *) m->data);
917 static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
919 struct meta_match *meta = (struct meta_match *) em->data;
920 struct tcf_meta_hdr hdr;
921 struct meta_type_ops *ops;
923 memset(&hdr, 0, sizeof(hdr));
924 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
925 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
927 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
928 goto nla_put_failure;
930 ops = meta_type_ops(&meta->lvalue);
931 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
932 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
933 goto nla_put_failure;
935 return 0;
937 nla_put_failure:
938 return -1;
941 static struct tcf_ematch_ops em_meta_ops = {
942 .kind = TCF_EM_META,
943 .change = em_meta_change,
944 .match = em_meta_match,
945 .destroy = em_meta_destroy,
946 .dump = em_meta_dump,
947 .owner = THIS_MODULE,
948 .link = LIST_HEAD_INIT(em_meta_ops.link)
951 static int __init init_em_meta(void)
953 return tcf_em_register(&em_meta_ops);
956 static void __exit exit_em_meta(void)
958 tcf_em_unregister(&em_meta_ops);
961 MODULE_LICENSE("GPL");
963 module_init(init_em_meta);
964 module_exit(exit_em_meta);
966 MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);