iio: magnetometer: ak8974: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / net / sched / sch_atm.c
blob481e4f12aeb4c05e17f6985764cd901c9fa59331
1 /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/string.h>
10 #include <linux/errno.h>
11 #include <linux/skbuff.h>
12 #include <linux/atmdev.h>
13 #include <linux/atmclip.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/file.h> /* for fput */
16 #include <net/netlink.h>
17 #include <net/pkt_sched.h>
20 * The ATM queuing discipline provides a framework for invoking classifiers
21 * (aka "filters"), which in turn select classes of this queuing discipline.
22 * Each class maps the flow(s) it is handling to a given VC. Multiple classes
23 * may share the same VC.
25 * When creating a class, VCs are specified by passing the number of the open
26 * socket descriptor by which the calling process references the VC. The kernel
27 * keeps the VC open at least until all classes using it are removed.
29 * In this file, most functions are named atm_tc_* to avoid confusion with all
30 * the atm_* in net/atm. This naming convention differs from what's used in the
31 * rest of net/sched.
33 * Known bugs:
34 * - sometimes messes up the IP stack
35 * - any manipulations besides the few operations described in the README, are
36 * untested and likely to crash the system
37 * - should lock the flow while there is data in the queue (?)
40 #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
42 struct atm_flow_data {
43 struct Qdisc *q; /* FIFO, TBF, etc. */
44 struct tcf_proto __rcu *filter_list;
45 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
46 void (*old_pop)(struct atm_vcc *vcc,
47 struct sk_buff *skb); /* chaining */
48 struct atm_qdisc_data *parent; /* parent qdisc */
49 struct socket *sock; /* for closing */
50 u32 classid; /* x:y type ID */
51 int ref; /* reference count */
52 struct gnet_stats_basic_packed bstats;
53 struct gnet_stats_queue qstats;
54 struct list_head list;
55 struct atm_flow_data *excess; /* flow for excess traffic;
56 NULL to set CLP instead */
57 int hdr_len;
58 unsigned char hdr[0]; /* header data; MUST BE LAST */
61 struct atm_qdisc_data {
62 struct atm_flow_data link; /* unclassified skbs go here */
63 struct list_head flows; /* NB: "link" is also on this
64 list */
65 struct tasklet_struct task; /* dequeue tasklet */
68 /* ------------------------- Class/flow operations ------------------------- */
70 static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
72 struct atm_qdisc_data *p = qdisc_priv(sch);
73 struct atm_flow_data *flow;
75 list_for_each_entry(flow, &p->flows, list) {
76 if (flow->classid == classid)
77 return flow;
79 return NULL;
82 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
83 struct Qdisc *new, struct Qdisc **old)
85 struct atm_qdisc_data *p = qdisc_priv(sch);
86 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
88 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
89 sch, p, flow, new, old);
90 if (list_empty(&flow->list))
91 return -EINVAL;
92 if (!new)
93 new = &noop_qdisc;
94 *old = flow->q;
95 flow->q = new;
96 if (*old)
97 qdisc_reset(*old);
98 return 0;
101 static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
103 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
105 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
106 return flow ? flow->q : NULL;
109 static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
111 struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
112 struct atm_flow_data *flow;
114 pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
115 flow = lookup_flow(sch, classid);
116 if (flow)
117 flow->ref++;
118 pr_debug("atm_tc_get: flow %p\n", flow);
119 return (unsigned long)flow;
122 static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
123 unsigned long parent, u32 classid)
125 return atm_tc_get(sch, classid);
129 * atm_tc_put handles all destructions, including the ones that are explicitly
130 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
131 * anything that still seems to be in use.
133 static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
135 struct atm_qdisc_data *p = qdisc_priv(sch);
136 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
138 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
139 if (--flow->ref)
140 return;
141 pr_debug("atm_tc_put: destroying\n");
142 list_del_init(&flow->list);
143 pr_debug("atm_tc_put: qdisc %p\n", flow->q);
144 qdisc_destroy(flow->q);
145 tcf_destroy_chain(&flow->filter_list);
146 if (flow->sock) {
147 pr_debug("atm_tc_put: f_count %ld\n",
148 file_count(flow->sock->file));
149 flow->vcc->pop = flow->old_pop;
150 sockfd_put(flow->sock);
152 if (flow->excess)
153 atm_tc_put(sch, (unsigned long)flow->excess);
154 if (flow != &p->link)
155 kfree(flow);
157 * If flow == &p->link, the qdisc no longer works at this point and
158 * needs to be removed. (By the caller of atm_tc_put.)
162 static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
164 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
166 pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
167 VCC2FLOW(vcc)->old_pop(vcc, skb);
168 tasklet_schedule(&p->task);
171 static const u8 llc_oui_ip[] = {
172 0xaa, /* DSAP: non-ISO */
173 0xaa, /* SSAP: non-ISO */
174 0x03, /* Ctrl: Unnumbered Information Command PDU */
175 0x00, /* OUI: EtherType */
176 0x00, 0x00,
177 0x08, 0x00
178 }; /* Ethertype IP (0800) */
180 static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
181 [TCA_ATM_FD] = { .type = NLA_U32 },
182 [TCA_ATM_EXCESS] = { .type = NLA_U32 },
185 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
186 struct nlattr **tca, unsigned long *arg)
188 struct atm_qdisc_data *p = qdisc_priv(sch);
189 struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
190 struct atm_flow_data *excess = NULL;
191 struct nlattr *opt = tca[TCA_OPTIONS];
192 struct nlattr *tb[TCA_ATM_MAX + 1];
193 struct socket *sock;
194 int fd, error, hdr_len;
195 void *hdr;
197 pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
198 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
200 * The concept of parents doesn't apply for this qdisc.
202 if (parent && parent != TC_H_ROOT && parent != sch->handle)
203 return -EINVAL;
205 * ATM classes cannot be changed. In order to change properties of the
206 * ATM connection, that socket needs to be modified directly (via the
207 * native ATM API. In order to send a flow to a different VC, the old
208 * class needs to be removed and a new one added. (This may be changed
209 * later.)
211 if (flow)
212 return -EBUSY;
213 if (opt == NULL)
214 return -EINVAL;
216 error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy);
217 if (error < 0)
218 return error;
220 if (!tb[TCA_ATM_FD])
221 return -EINVAL;
222 fd = nla_get_u32(tb[TCA_ATM_FD]);
223 pr_debug("atm_tc_change: fd %d\n", fd);
224 if (tb[TCA_ATM_HDR]) {
225 hdr_len = nla_len(tb[TCA_ATM_HDR]);
226 hdr = nla_data(tb[TCA_ATM_HDR]);
227 } else {
228 hdr_len = RFC1483LLC_LEN;
229 hdr = NULL; /* default LLC/SNAP for IP */
231 if (!tb[TCA_ATM_EXCESS])
232 excess = NULL;
233 else {
234 excess = (struct atm_flow_data *)
235 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
236 if (!excess)
237 return -ENOENT;
239 pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
240 opt->nla_type, nla_len(opt), hdr_len);
241 sock = sockfd_lookup(fd, &error);
242 if (!sock)
243 return error; /* f_count++ */
244 pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
245 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
246 error = -EPROTOTYPE;
247 goto err_out;
249 /* @@@ should check if the socket is really operational or we'll crash
250 on vcc->send */
251 if (classid) {
252 if (TC_H_MAJ(classid ^ sch->handle)) {
253 pr_debug("atm_tc_change: classid mismatch\n");
254 error = -EINVAL;
255 goto err_out;
257 } else {
258 int i;
259 unsigned long cl;
261 for (i = 1; i < 0x8000; i++) {
262 classid = TC_H_MAKE(sch->handle, 0x8000 | i);
263 cl = atm_tc_get(sch, classid);
264 if (!cl)
265 break;
266 atm_tc_put(sch, cl);
269 pr_debug("atm_tc_change: new id %x\n", classid);
270 flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
271 pr_debug("atm_tc_change: flow %p\n", flow);
272 if (!flow) {
273 error = -ENOBUFS;
274 goto err_out;
276 RCU_INIT_POINTER(flow->filter_list, NULL);
277 flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
278 if (!flow->q)
279 flow->q = &noop_qdisc;
280 pr_debug("atm_tc_change: qdisc %p\n", flow->q);
281 flow->sock = sock;
282 flow->vcc = ATM_SD(sock); /* speedup */
283 flow->vcc->user_back = flow;
284 pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
285 flow->old_pop = flow->vcc->pop;
286 flow->parent = p;
287 flow->vcc->pop = sch_atm_pop;
288 flow->classid = classid;
289 flow->ref = 1;
290 flow->excess = excess;
291 list_add(&flow->list, &p->link.list);
292 flow->hdr_len = hdr_len;
293 if (hdr)
294 memcpy(flow->hdr, hdr, hdr_len);
295 else
296 memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
297 *arg = (unsigned long)flow;
298 return 0;
299 err_out:
300 if (excess)
301 atm_tc_put(sch, (unsigned long)excess);
302 sockfd_put(sock);
303 return error;
306 static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
308 struct atm_qdisc_data *p = qdisc_priv(sch);
309 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
311 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
312 if (list_empty(&flow->list))
313 return -EINVAL;
314 if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
315 return -EBUSY;
317 * Reference count must be 2: one for "keepalive" (set at class
318 * creation), and one for the reference held when calling delete.
320 if (flow->ref < 2) {
321 pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
322 return -EINVAL;
324 if (flow->ref > 2)
325 return -EBUSY; /* catch references via excess, etc. */
326 atm_tc_put(sch, arg);
327 return 0;
330 static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
332 struct atm_qdisc_data *p = qdisc_priv(sch);
333 struct atm_flow_data *flow;
335 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
336 if (walker->stop)
337 return;
338 list_for_each_entry(flow, &p->flows, list) {
339 if (walker->count >= walker->skip &&
340 walker->fn(sch, (unsigned long)flow, walker) < 0) {
341 walker->stop = 1;
342 break;
344 walker->count++;
348 static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
349 unsigned long cl)
351 struct atm_qdisc_data *p = qdisc_priv(sch);
352 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
354 pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
355 return flow ? &flow->filter_list : &p->link.filter_list;
358 /* --------------------------- Qdisc operations ---------------------------- */
360 static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
361 struct sk_buff **to_free)
363 struct atm_qdisc_data *p = qdisc_priv(sch);
364 struct atm_flow_data *flow;
365 struct tcf_result res;
366 int result;
367 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
369 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
370 result = TC_ACT_OK; /* be nice to gcc */
371 flow = NULL;
372 if (TC_H_MAJ(skb->priority) != sch->handle ||
373 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
374 struct tcf_proto *fl;
376 list_for_each_entry(flow, &p->flows, list) {
377 fl = rcu_dereference_bh(flow->filter_list);
378 if (fl) {
379 result = tc_classify(skb, fl, &res, true);
380 if (result < 0)
381 continue;
382 flow = (struct atm_flow_data *)res.class;
383 if (!flow)
384 flow = lookup_flow(sch, res.classid);
385 goto done;
388 flow = NULL;
389 done:
392 if (!flow) {
393 flow = &p->link;
394 } else {
395 if (flow->vcc)
396 ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
397 /*@@@ looks good ... but it's not supposed to work :-) */
398 #ifdef CONFIG_NET_CLS_ACT
399 switch (result) {
400 case TC_ACT_QUEUED:
401 case TC_ACT_STOLEN:
402 __qdisc_drop(skb, to_free);
403 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
404 case TC_ACT_SHOT:
405 __qdisc_drop(skb, to_free);
406 goto drop;
407 case TC_ACT_RECLASSIFY:
408 if (flow->excess)
409 flow = flow->excess;
410 else
411 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
412 break;
414 #endif
417 ret = qdisc_enqueue(skb, flow->q, to_free);
418 if (ret != NET_XMIT_SUCCESS) {
419 drop: __maybe_unused
420 if (net_xmit_drop_count(ret)) {
421 qdisc_qstats_drop(sch);
422 if (flow)
423 flow->qstats.drops++;
425 return ret;
428 * Okay, this may seem weird. We pretend we've dropped the packet if
429 * it goes via ATM. The reason for this is that the outer qdisc
430 * expects to be able to q->dequeue the packet later on if we return
431 * success at this place. Also, sch->q.qdisc needs to reflect whether
432 * there is a packet egligible for dequeuing or not. Note that the
433 * statistics of the outer qdisc are necessarily wrong because of all
434 * this. There's currently no correct solution for this.
436 if (flow == &p->link) {
437 sch->q.qlen++;
438 return NET_XMIT_SUCCESS;
440 tasklet_schedule(&p->task);
441 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
445 * Dequeue packets and send them over ATM. Note that we quite deliberately
446 * avoid checking net_device's flow control here, simply because sch_atm
447 * uses its own channels, which have nothing to do with any CLIP/LANE/or
448 * non-ATM interfaces.
451 static void sch_atm_dequeue(unsigned long data)
453 struct Qdisc *sch = (struct Qdisc *)data;
454 struct atm_qdisc_data *p = qdisc_priv(sch);
455 struct atm_flow_data *flow;
456 struct sk_buff *skb;
458 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
459 list_for_each_entry(flow, &p->flows, list) {
460 if (flow == &p->link)
461 continue;
463 * If traffic is properly shaped, this won't generate nasty
464 * little bursts. Otherwise, it may ... (but that's okay)
466 while ((skb = flow->q->ops->peek(flow->q))) {
467 if (!atm_may_send(flow->vcc, skb->truesize))
468 break;
470 skb = qdisc_dequeue_peeked(flow->q);
471 if (unlikely(!skb))
472 break;
474 qdisc_bstats_update(sch, skb);
475 bstats_update(&flow->bstats, skb);
476 pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
477 /* remove any LL header somebody else has attached */
478 skb_pull(skb, skb_network_offset(skb));
479 if (skb_headroom(skb) < flow->hdr_len) {
480 struct sk_buff *new;
482 new = skb_realloc_headroom(skb, flow->hdr_len);
483 dev_kfree_skb(skb);
484 if (!new)
485 continue;
486 skb = new;
488 pr_debug("sch_atm_dequeue: ip %p, data %p\n",
489 skb_network_header(skb), skb->data);
490 ATM_SKB(skb)->vcc = flow->vcc;
491 memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
492 flow->hdr_len);
493 atomic_add(skb->truesize,
494 &sk_atm(flow->vcc)->sk_wmem_alloc);
495 /* atm.atm_options are already set by atm_tc_enqueue */
496 flow->vcc->send(flow->vcc, skb);
501 static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
503 struct atm_qdisc_data *p = qdisc_priv(sch);
504 struct sk_buff *skb;
506 pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
507 tasklet_schedule(&p->task);
508 skb = qdisc_dequeue_peeked(p->link.q);
509 if (skb)
510 sch->q.qlen--;
511 return skb;
514 static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
516 struct atm_qdisc_data *p = qdisc_priv(sch);
518 pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
520 return p->link.q->ops->peek(p->link.q);
523 static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
525 struct atm_qdisc_data *p = qdisc_priv(sch);
527 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
528 INIT_LIST_HEAD(&p->flows);
529 INIT_LIST_HEAD(&p->link.list);
530 list_add(&p->link.list, &p->flows);
531 p->link.q = qdisc_create_dflt(sch->dev_queue,
532 &pfifo_qdisc_ops, sch->handle);
533 if (!p->link.q)
534 p->link.q = &noop_qdisc;
535 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
536 RCU_INIT_POINTER(p->link.filter_list, NULL);
537 p->link.vcc = NULL;
538 p->link.sock = NULL;
539 p->link.classid = sch->handle;
540 p->link.ref = 1;
541 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
542 return 0;
545 static void atm_tc_reset(struct Qdisc *sch)
547 struct atm_qdisc_data *p = qdisc_priv(sch);
548 struct atm_flow_data *flow;
550 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
551 list_for_each_entry(flow, &p->flows, list)
552 qdisc_reset(flow->q);
553 sch->q.qlen = 0;
556 static void atm_tc_destroy(struct Qdisc *sch)
558 struct atm_qdisc_data *p = qdisc_priv(sch);
559 struct atm_flow_data *flow, *tmp;
561 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
562 list_for_each_entry(flow, &p->flows, list)
563 tcf_destroy_chain(&flow->filter_list);
565 list_for_each_entry_safe(flow, tmp, &p->flows, list) {
566 if (flow->ref > 1)
567 pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
568 atm_tc_put(sch, (unsigned long)flow);
570 tasklet_kill(&p->task);
573 static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
574 struct sk_buff *skb, struct tcmsg *tcm)
576 struct atm_qdisc_data *p = qdisc_priv(sch);
577 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
578 struct nlattr *nest;
580 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
581 sch, p, flow, skb, tcm);
582 if (list_empty(&flow->list))
583 return -EINVAL;
584 tcm->tcm_handle = flow->classid;
585 tcm->tcm_info = flow->q->handle;
587 nest = nla_nest_start(skb, TCA_OPTIONS);
588 if (nest == NULL)
589 goto nla_put_failure;
591 if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
592 goto nla_put_failure;
593 if (flow->vcc) {
594 struct sockaddr_atmpvc pvc;
595 int state;
597 memset(&pvc, 0, sizeof(pvc));
598 pvc.sap_family = AF_ATMPVC;
599 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
600 pvc.sap_addr.vpi = flow->vcc->vpi;
601 pvc.sap_addr.vci = flow->vcc->vci;
602 if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
603 goto nla_put_failure;
604 state = ATM_VF2VS(flow->vcc->flags);
605 if (nla_put_u32(skb, TCA_ATM_STATE, state))
606 goto nla_put_failure;
608 if (flow->excess) {
609 if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
610 goto nla_put_failure;
611 } else {
612 if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
613 goto nla_put_failure;
615 return nla_nest_end(skb, nest);
617 nla_put_failure:
618 nla_nest_cancel(skb, nest);
619 return -1;
621 static int
622 atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
623 struct gnet_dump *d)
625 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
627 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
628 d, NULL, &flow->bstats) < 0 ||
629 gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
630 return -1;
632 return 0;
635 static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
637 return 0;
640 static const struct Qdisc_class_ops atm_class_ops = {
641 .graft = atm_tc_graft,
642 .leaf = atm_tc_leaf,
643 .get = atm_tc_get,
644 .put = atm_tc_put,
645 .change = atm_tc_change,
646 .delete = atm_tc_delete,
647 .walk = atm_tc_walk,
648 .tcf_chain = atm_tc_find_tcf,
649 .bind_tcf = atm_tc_bind_filter,
650 .unbind_tcf = atm_tc_put,
651 .dump = atm_tc_dump_class,
652 .dump_stats = atm_tc_dump_class_stats,
655 static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
656 .cl_ops = &atm_class_ops,
657 .id = "atm",
658 .priv_size = sizeof(struct atm_qdisc_data),
659 .enqueue = atm_tc_enqueue,
660 .dequeue = atm_tc_dequeue,
661 .peek = atm_tc_peek,
662 .init = atm_tc_init,
663 .reset = atm_tc_reset,
664 .destroy = atm_tc_destroy,
665 .dump = atm_tc_dump,
666 .owner = THIS_MODULE,
669 static int __init atm_init(void)
671 return register_qdisc(&atm_qdisc_ops);
674 static void __exit atm_exit(void)
676 unregister_qdisc(&atm_qdisc_ops);
679 module_init(atm_init)
680 module_exit(atm_exit)
681 MODULE_LICENSE("GPL");