1 /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/string.h>
10 #include <linux/errno.h>
11 #include <linux/skbuff.h>
12 #include <linux/atmdev.h>
13 #include <linux/atmclip.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/file.h> /* for fput */
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
18 #include <net/pkt_cls.h>
21 * The ATM queuing discipline provides a framework for invoking classifiers
22 * (aka "filters"), which in turn select classes of this queuing discipline.
23 * Each class maps the flow(s) it is handling to a given VC. Multiple classes
24 * may share the same VC.
26 * When creating a class, VCs are specified by passing the number of the open
27 * socket descriptor by which the calling process references the VC. The kernel
28 * keeps the VC open at least until all classes using it are removed.
30 * In this file, most functions are named atm_tc_* to avoid confusion with all
31 * the atm_* in net/atm. This naming convention differs from what's used in the
35 * - sometimes messes up the IP stack
36 * - any manipulations besides the few operations described in the README, are
37 * untested and likely to crash the system
38 * - should lock the flow while there is data in the queue (?)
41 #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
43 struct atm_flow_data
{
44 struct Qdisc_class_common common
;
45 struct Qdisc
*q
; /* FIFO, TBF, etc. */
46 struct tcf_proto __rcu
*filter_list
;
47 struct tcf_block
*block
;
48 struct atm_vcc
*vcc
; /* VCC; NULL if VCC is closed */
49 void (*old_pop
)(struct atm_vcc
*vcc
,
50 struct sk_buff
*skb
); /* chaining */
51 struct atm_qdisc_data
*parent
; /* parent qdisc */
52 struct socket
*sock
; /* for closing */
53 int ref
; /* reference count */
54 struct gnet_stats_basic_packed bstats
;
55 struct gnet_stats_queue qstats
;
56 struct list_head list
;
57 struct atm_flow_data
*excess
; /* flow for excess traffic;
58 NULL to set CLP instead */
60 unsigned char hdr
[0]; /* header data; MUST BE LAST */
63 struct atm_qdisc_data
{
64 struct atm_flow_data link
; /* unclassified skbs go here */
65 struct list_head flows
; /* NB: "link" is also on this
67 struct tasklet_struct task
; /* dequeue tasklet */
70 /* ------------------------- Class/flow operations ------------------------- */
72 static inline struct atm_flow_data
*lookup_flow(struct Qdisc
*sch
, u32 classid
)
74 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
75 struct atm_flow_data
*flow
;
77 list_for_each_entry(flow
, &p
->flows
, list
) {
78 if (flow
->common
.classid
== classid
)
84 static int atm_tc_graft(struct Qdisc
*sch
, unsigned long arg
,
85 struct Qdisc
*new, struct Qdisc
**old
,
86 struct netlink_ext_ack
*extack
)
88 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
89 struct atm_flow_data
*flow
= (struct atm_flow_data
*)arg
;
91 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
92 sch
, p
, flow
, new, old
);
93 if (list_empty(&flow
->list
))
104 static struct Qdisc
*atm_tc_leaf(struct Qdisc
*sch
, unsigned long cl
)
106 struct atm_flow_data
*flow
= (struct atm_flow_data
*)cl
;
108 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch
, flow
);
109 return flow
? flow
->q
: NULL
;
112 static unsigned long atm_tc_find(struct Qdisc
*sch
, u32 classid
)
114 struct atm_qdisc_data
*p __maybe_unused
= qdisc_priv(sch
);
115 struct atm_flow_data
*flow
;
117 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__
, sch
, p
, classid
);
118 flow
= lookup_flow(sch
, classid
);
119 pr_debug("%s: flow %p\n", __func__
, flow
);
120 return (unsigned long)flow
;
123 static unsigned long atm_tc_bind_filter(struct Qdisc
*sch
,
124 unsigned long parent
, u32 classid
)
126 struct atm_qdisc_data
*p __maybe_unused
= qdisc_priv(sch
);
127 struct atm_flow_data
*flow
;
129 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__
, sch
, p
, classid
);
130 flow
= lookup_flow(sch
, classid
);
133 pr_debug("%s: flow %p\n", __func__
, flow
);
134 return (unsigned long)flow
;
138 * atm_tc_put handles all destructions, including the ones that are explicitly
139 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
140 * anything that still seems to be in use.
142 static void atm_tc_put(struct Qdisc
*sch
, unsigned long cl
)
144 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
145 struct atm_flow_data
*flow
= (struct atm_flow_data
*)cl
;
147 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch
, p
, flow
);
150 pr_debug("atm_tc_put: destroying\n");
151 list_del_init(&flow
->list
);
152 pr_debug("atm_tc_put: qdisc %p\n", flow
->q
);
153 qdisc_destroy(flow
->q
);
154 tcf_block_put(flow
->block
);
156 pr_debug("atm_tc_put: f_count %ld\n",
157 file_count(flow
->sock
->file
));
158 flow
->vcc
->pop
= flow
->old_pop
;
159 sockfd_put(flow
->sock
);
162 atm_tc_put(sch
, (unsigned long)flow
->excess
);
163 if (flow
!= &p
->link
)
166 * If flow == &p->link, the qdisc no longer works at this point and
167 * needs to be removed. (By the caller of atm_tc_put.)
171 static void sch_atm_pop(struct atm_vcc
*vcc
, struct sk_buff
*skb
)
173 struct atm_qdisc_data
*p
= VCC2FLOW(vcc
)->parent
;
175 pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc
, skb
, p
);
176 VCC2FLOW(vcc
)->old_pop(vcc
, skb
);
177 tasklet_schedule(&p
->task
);
180 static const u8 llc_oui_ip
[] = {
181 0xaa, /* DSAP: non-ISO */
182 0xaa, /* SSAP: non-ISO */
183 0x03, /* Ctrl: Unnumbered Information Command PDU */
184 0x00, /* OUI: EtherType */
187 }; /* Ethertype IP (0800) */
189 static const struct nla_policy atm_policy
[TCA_ATM_MAX
+ 1] = {
190 [TCA_ATM_FD
] = { .type
= NLA_U32
},
191 [TCA_ATM_EXCESS
] = { .type
= NLA_U32
},
194 static int atm_tc_change(struct Qdisc
*sch
, u32 classid
, u32 parent
,
195 struct nlattr
**tca
, unsigned long *arg
,
196 struct netlink_ext_ack
*extack
)
198 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
199 struct atm_flow_data
*flow
= (struct atm_flow_data
*)*arg
;
200 struct atm_flow_data
*excess
= NULL
;
201 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
202 struct nlattr
*tb
[TCA_ATM_MAX
+ 1];
204 int fd
, error
, hdr_len
;
207 pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
208 "flow %p,opt %p)\n", sch
, p
, classid
, parent
, flow
, opt
);
210 * The concept of parents doesn't apply for this qdisc.
212 if (parent
&& parent
!= TC_H_ROOT
&& parent
!= sch
->handle
)
215 * ATM classes cannot be changed. In order to change properties of the
216 * ATM connection, that socket needs to be modified directly (via the
217 * native ATM API. In order to send a flow to a different VC, the old
218 * class needs to be removed and a new one added. (This may be changed
226 error
= nla_parse_nested(tb
, TCA_ATM_MAX
, opt
, atm_policy
, NULL
);
232 fd
= nla_get_u32(tb
[TCA_ATM_FD
]);
233 pr_debug("atm_tc_change: fd %d\n", fd
);
234 if (tb
[TCA_ATM_HDR
]) {
235 hdr_len
= nla_len(tb
[TCA_ATM_HDR
]);
236 hdr
= nla_data(tb
[TCA_ATM_HDR
]);
238 hdr_len
= RFC1483LLC_LEN
;
239 hdr
= NULL
; /* default LLC/SNAP for IP */
241 if (!tb
[TCA_ATM_EXCESS
])
244 excess
= (struct atm_flow_data
*)
245 atm_tc_find(sch
, nla_get_u32(tb
[TCA_ATM_EXCESS
]));
249 pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
250 opt
->nla_type
, nla_len(opt
), hdr_len
);
251 sock
= sockfd_lookup(fd
, &error
);
253 return error
; /* f_count++ */
254 pr_debug("atm_tc_change: f_count %ld\n", file_count(sock
->file
));
255 if (sock
->ops
->family
!= PF_ATMSVC
&& sock
->ops
->family
!= PF_ATMPVC
) {
259 /* @@@ should check if the socket is really operational or we'll crash
262 if (TC_H_MAJ(classid
^ sch
->handle
)) {
263 pr_debug("atm_tc_change: classid mismatch\n");
271 for (i
= 1; i
< 0x8000; i
++) {
272 classid
= TC_H_MAKE(sch
->handle
, 0x8000 | i
);
273 cl
= atm_tc_find(sch
, classid
);
278 pr_debug("atm_tc_change: new id %x\n", classid
);
279 flow
= kzalloc(sizeof(struct atm_flow_data
) + hdr_len
, GFP_KERNEL
);
280 pr_debug("atm_tc_change: flow %p\n", flow
);
286 error
= tcf_block_get(&flow
->block
, &flow
->filter_list
, sch
,
293 flow
->q
= qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
, classid
,
296 flow
->q
= &noop_qdisc
;
297 pr_debug("atm_tc_change: qdisc %p\n", flow
->q
);
299 flow
->vcc
= ATM_SD(sock
); /* speedup */
300 flow
->vcc
->user_back
= flow
;
301 pr_debug("atm_tc_change: vcc %p\n", flow
->vcc
);
302 flow
->old_pop
= flow
->vcc
->pop
;
304 flow
->vcc
->pop
= sch_atm_pop
;
305 flow
->common
.classid
= classid
;
307 flow
->excess
= excess
;
308 list_add(&flow
->list
, &p
->link
.list
);
309 flow
->hdr_len
= hdr_len
;
311 memcpy(flow
->hdr
, hdr
, hdr_len
);
313 memcpy(flow
->hdr
, llc_oui_ip
, sizeof(llc_oui_ip
));
314 *arg
= (unsigned long)flow
;
321 static int atm_tc_delete(struct Qdisc
*sch
, unsigned long arg
)
323 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
324 struct atm_flow_data
*flow
= (struct atm_flow_data
*)arg
;
326 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch
, p
, flow
);
327 if (list_empty(&flow
->list
))
329 if (rcu_access_pointer(flow
->filter_list
) || flow
== &p
->link
)
332 * Reference count must be 2: one for "keepalive" (set at class
333 * creation), and one for the reference held when calling delete.
336 pr_err("atm_tc_delete: flow->ref == %d\n", flow
->ref
);
340 return -EBUSY
; /* catch references via excess, etc. */
341 atm_tc_put(sch
, arg
);
345 static void atm_tc_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
347 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
348 struct atm_flow_data
*flow
;
350 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch
, p
, walker
);
353 list_for_each_entry(flow
, &p
->flows
, list
) {
354 if (walker
->count
>= walker
->skip
&&
355 walker
->fn(sch
, (unsigned long)flow
, walker
) < 0) {
363 static struct tcf_block
*atm_tc_tcf_block(struct Qdisc
*sch
, unsigned long cl
,
364 struct netlink_ext_ack
*extack
)
366 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
367 struct atm_flow_data
*flow
= (struct atm_flow_data
*)cl
;
369 pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch
, p
, flow
);
370 return flow
? flow
->block
: p
->link
.block
;
373 /* --------------------------- Qdisc operations ---------------------------- */
375 static int atm_tc_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
376 struct sk_buff
**to_free
)
378 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
379 struct atm_flow_data
*flow
;
380 struct tcf_result res
;
382 int ret
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
384 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb
, sch
, p
);
385 result
= TC_ACT_OK
; /* be nice to gcc */
387 if (TC_H_MAJ(skb
->priority
) != sch
->handle
||
388 !(flow
= (struct atm_flow_data
*)atm_tc_find(sch
, skb
->priority
))) {
389 struct tcf_proto
*fl
;
391 list_for_each_entry(flow
, &p
->flows
, list
) {
392 fl
= rcu_dereference_bh(flow
->filter_list
);
394 result
= tcf_classify(skb
, fl
, &res
, true);
397 flow
= (struct atm_flow_data
*)res
.class;
399 flow
= lookup_flow(sch
, res
.classid
);
411 ATM_SKB(skb
)->atm_options
= flow
->vcc
->atm_options
;
412 /*@@@ looks good ... but it's not supposed to work :-) */
413 #ifdef CONFIG_NET_CLS_ACT
418 __qdisc_drop(skb
, to_free
);
419 return NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
421 __qdisc_drop(skb
, to_free
);
423 case TC_ACT_RECLASSIFY
:
427 ATM_SKB(skb
)->atm_options
|= ATM_ATMOPT_CLP
;
433 ret
= qdisc_enqueue(skb
, flow
->q
, to_free
);
434 if (ret
!= NET_XMIT_SUCCESS
) {
436 if (net_xmit_drop_count(ret
)) {
437 qdisc_qstats_drop(sch
);
439 flow
->qstats
.drops
++;
444 * Okay, this may seem weird. We pretend we've dropped the packet if
445 * it goes via ATM. The reason for this is that the outer qdisc
446 * expects to be able to q->dequeue the packet later on if we return
447 * success at this place. Also, sch->q.qdisc needs to reflect whether
448 * there is a packet egligible for dequeuing or not. Note that the
449 * statistics of the outer qdisc are necessarily wrong because of all
450 * this. There's currently no correct solution for this.
452 if (flow
== &p
->link
) {
454 return NET_XMIT_SUCCESS
;
456 tasklet_schedule(&p
->task
);
457 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
461 * Dequeue packets and send them over ATM. Note that we quite deliberately
462 * avoid checking net_device's flow control here, simply because sch_atm
463 * uses its own channels, which have nothing to do with any CLIP/LANE/or
464 * non-ATM interfaces.
467 static void sch_atm_dequeue(unsigned long data
)
469 struct Qdisc
*sch
= (struct Qdisc
*)data
;
470 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
471 struct atm_flow_data
*flow
;
474 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch
, p
);
475 list_for_each_entry(flow
, &p
->flows
, list
) {
476 if (flow
== &p
->link
)
479 * If traffic is properly shaped, this won't generate nasty
480 * little bursts. Otherwise, it may ... (but that's okay)
482 while ((skb
= flow
->q
->ops
->peek(flow
->q
))) {
483 if (!atm_may_send(flow
->vcc
, skb
->truesize
))
486 skb
= qdisc_dequeue_peeked(flow
->q
);
490 qdisc_bstats_update(sch
, skb
);
491 bstats_update(&flow
->bstats
, skb
);
492 pr_debug("atm_tc_dequeue: sending on class %p\n", flow
);
493 /* remove any LL header somebody else has attached */
494 skb_pull(skb
, skb_network_offset(skb
));
495 if (skb_headroom(skb
) < flow
->hdr_len
) {
498 new = skb_realloc_headroom(skb
, flow
->hdr_len
);
504 pr_debug("sch_atm_dequeue: ip %p, data %p\n",
505 skb_network_header(skb
), skb
->data
);
506 ATM_SKB(skb
)->vcc
= flow
->vcc
;
507 memcpy(skb_push(skb
, flow
->hdr_len
), flow
->hdr
,
509 refcount_add(skb
->truesize
,
510 &sk_atm(flow
->vcc
)->sk_wmem_alloc
);
511 /* atm.atm_options are already set by atm_tc_enqueue */
512 flow
->vcc
->send(flow
->vcc
, skb
);
517 static struct sk_buff
*atm_tc_dequeue(struct Qdisc
*sch
)
519 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
522 pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch
, p
);
523 tasklet_schedule(&p
->task
);
524 skb
= qdisc_dequeue_peeked(p
->link
.q
);
530 static struct sk_buff
*atm_tc_peek(struct Qdisc
*sch
)
532 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
534 pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch
, p
);
536 return p
->link
.q
->ops
->peek(p
->link
.q
);
539 static int atm_tc_init(struct Qdisc
*sch
, struct nlattr
*opt
,
540 struct netlink_ext_ack
*extack
)
542 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
545 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch
, p
, opt
);
546 INIT_LIST_HEAD(&p
->flows
);
547 INIT_LIST_HEAD(&p
->link
.list
);
548 list_add(&p
->link
.list
, &p
->flows
);
549 p
->link
.q
= qdisc_create_dflt(sch
->dev_queue
,
550 &pfifo_qdisc_ops
, sch
->handle
, extack
);
552 p
->link
.q
= &noop_qdisc
;
553 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p
->link
, p
->link
.q
);
555 err
= tcf_block_get(&p
->link
.block
, &p
->link
.filter_list
, sch
,
562 p
->link
.common
.classid
= sch
->handle
;
564 tasklet_init(&p
->task
, sch_atm_dequeue
, (unsigned long)sch
);
568 static void atm_tc_reset(struct Qdisc
*sch
)
570 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
571 struct atm_flow_data
*flow
;
573 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch
, p
);
574 list_for_each_entry(flow
, &p
->flows
, list
)
575 qdisc_reset(flow
->q
);
579 static void atm_tc_destroy(struct Qdisc
*sch
)
581 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
582 struct atm_flow_data
*flow
, *tmp
;
584 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch
, p
);
585 list_for_each_entry(flow
, &p
->flows
, list
) {
586 tcf_block_put(flow
->block
);
590 list_for_each_entry_safe(flow
, tmp
, &p
->flows
, list
) {
592 pr_err("atm_destroy: %p->ref = %d\n", flow
, flow
->ref
);
593 atm_tc_put(sch
, (unsigned long)flow
);
595 tasklet_kill(&p
->task
);
598 static int atm_tc_dump_class(struct Qdisc
*sch
, unsigned long cl
,
599 struct sk_buff
*skb
, struct tcmsg
*tcm
)
601 struct atm_qdisc_data
*p
= qdisc_priv(sch
);
602 struct atm_flow_data
*flow
= (struct atm_flow_data
*)cl
;
605 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
606 sch
, p
, flow
, skb
, tcm
);
607 if (list_empty(&flow
->list
))
609 tcm
->tcm_handle
= flow
->common
.classid
;
610 tcm
->tcm_info
= flow
->q
->handle
;
612 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
614 goto nla_put_failure
;
616 if (nla_put(skb
, TCA_ATM_HDR
, flow
->hdr_len
, flow
->hdr
))
617 goto nla_put_failure
;
619 struct sockaddr_atmpvc pvc
;
622 memset(&pvc
, 0, sizeof(pvc
));
623 pvc
.sap_family
= AF_ATMPVC
;
624 pvc
.sap_addr
.itf
= flow
->vcc
->dev
? flow
->vcc
->dev
->number
: -1;
625 pvc
.sap_addr
.vpi
= flow
->vcc
->vpi
;
626 pvc
.sap_addr
.vci
= flow
->vcc
->vci
;
627 if (nla_put(skb
, TCA_ATM_ADDR
, sizeof(pvc
), &pvc
))
628 goto nla_put_failure
;
629 state
= ATM_VF2VS(flow
->vcc
->flags
);
630 if (nla_put_u32(skb
, TCA_ATM_STATE
, state
))
631 goto nla_put_failure
;
634 if (nla_put_u32(skb
, TCA_ATM_EXCESS
, flow
->common
.classid
))
635 goto nla_put_failure
;
637 if (nla_put_u32(skb
, TCA_ATM_EXCESS
, 0))
638 goto nla_put_failure
;
640 return nla_nest_end(skb
, nest
);
643 nla_nest_cancel(skb
, nest
);
647 atm_tc_dump_class_stats(struct Qdisc
*sch
, unsigned long arg
,
650 struct atm_flow_data
*flow
= (struct atm_flow_data
*)arg
;
652 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch
),
653 d
, NULL
, &flow
->bstats
) < 0 ||
654 gnet_stats_copy_queue(d
, NULL
, &flow
->qstats
, flow
->q
->q
.qlen
) < 0)
660 static int atm_tc_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
665 static const struct Qdisc_class_ops atm_class_ops
= {
666 .graft
= atm_tc_graft
,
669 .change
= atm_tc_change
,
670 .delete = atm_tc_delete
,
672 .tcf_block
= atm_tc_tcf_block
,
673 .bind_tcf
= atm_tc_bind_filter
,
674 .unbind_tcf
= atm_tc_put
,
675 .dump
= atm_tc_dump_class
,
676 .dump_stats
= atm_tc_dump_class_stats
,
679 static struct Qdisc_ops atm_qdisc_ops __read_mostly
= {
680 .cl_ops
= &atm_class_ops
,
682 .priv_size
= sizeof(struct atm_qdisc_data
),
683 .enqueue
= atm_tc_enqueue
,
684 .dequeue
= atm_tc_dequeue
,
687 .reset
= atm_tc_reset
,
688 .destroy
= atm_tc_destroy
,
690 .owner
= THIS_MODULE
,
693 static int __init
atm_init(void)
695 return register_qdisc(&atm_qdisc_ops
);
698 static void __exit
atm_exit(void)
700 unregister_qdisc(&atm_qdisc_ops
);
703 module_init(atm_init
)
704 module_exit(atm_exit
)
705 MODULE_LICENSE("GPL");