drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / sched / sch_fifo.c
blobb50b2c2cc09bc6ee5b23d9d5d3abea4423ff75b9
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/sch_fifo.c The simplest FIFO queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
17 /* 1 band FIFO pseudo-"scheduler" */
19 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20 struct sk_buff **to_free)
22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
23 READ_ONCE(sch->limit)))
24 return qdisc_enqueue_tail(skb, sch);
26 return qdisc_drop(skb, sch, to_free);
29 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
30 struct sk_buff **to_free)
32 if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
33 return qdisc_enqueue_tail(skb, sch);
35 return qdisc_drop(skb, sch, to_free);
38 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
39 struct sk_buff **to_free)
41 unsigned int prev_backlog;
43 if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
44 return qdisc_enqueue_tail(skb, sch);
46 prev_backlog = sch->qstats.backlog;
47 /* queue full, remove one skb to fulfill the limit */
48 __qdisc_queue_drop_head(sch, &sch->q, to_free);
49 qdisc_qstats_drop(sch);
50 qdisc_enqueue_tail(skb, sch);
52 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
53 return NET_XMIT_CN;
56 static void fifo_offload_init(struct Qdisc *sch)
58 struct net_device *dev = qdisc_dev(sch);
59 struct tc_fifo_qopt_offload qopt;
61 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
62 return;
64 qopt.command = TC_FIFO_REPLACE;
65 qopt.handle = sch->handle;
66 qopt.parent = sch->parent;
67 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
70 static void fifo_offload_destroy(struct Qdisc *sch)
72 struct net_device *dev = qdisc_dev(sch);
73 struct tc_fifo_qopt_offload qopt;
75 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
76 return;
78 qopt.command = TC_FIFO_DESTROY;
79 qopt.handle = sch->handle;
80 qopt.parent = sch->parent;
81 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
84 static int fifo_offload_dump(struct Qdisc *sch)
86 struct tc_fifo_qopt_offload qopt;
88 qopt.command = TC_FIFO_STATS;
89 qopt.handle = sch->handle;
90 qopt.parent = sch->parent;
91 qopt.stats.bstats = &sch->bstats;
92 qopt.stats.qstats = &sch->qstats;
94 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
97 static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
98 struct netlink_ext_ack *extack)
100 bool bypass;
101 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
103 if (opt == NULL) {
104 u32 limit = qdisc_dev(sch)->tx_queue_len;
106 if (is_bfifo)
107 limit *= psched_mtu(qdisc_dev(sch));
109 WRITE_ONCE(sch->limit, limit);
110 } else {
111 struct tc_fifo_qopt *ctl = nla_data(opt);
113 if (nla_len(opt) < sizeof(*ctl))
114 return -EINVAL;
116 WRITE_ONCE(sch->limit, ctl->limit);
119 if (is_bfifo)
120 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
121 else
122 bypass = sch->limit >= 1;
124 if (bypass)
125 sch->flags |= TCQ_F_CAN_BYPASS;
126 else
127 sch->flags &= ~TCQ_F_CAN_BYPASS;
129 return 0;
132 static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
133 struct netlink_ext_ack *extack)
135 int err;
137 err = __fifo_init(sch, opt, extack);
138 if (err)
139 return err;
141 fifo_offload_init(sch);
142 return 0;
145 static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
146 struct netlink_ext_ack *extack)
148 return __fifo_init(sch, opt, extack);
151 static void fifo_destroy(struct Qdisc *sch)
153 fifo_offload_destroy(sch);
156 static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
158 struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) };
160 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
161 goto nla_put_failure;
162 return skb->len;
164 nla_put_failure:
165 return -1;
168 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
170 int err;
172 err = fifo_offload_dump(sch);
173 if (err)
174 return err;
176 return __fifo_dump(sch, skb);
179 static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
181 return __fifo_dump(sch, skb);
184 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
185 .id = "pfifo",
186 .priv_size = 0,
187 .enqueue = pfifo_enqueue,
188 .dequeue = qdisc_dequeue_head,
189 .peek = qdisc_peek_head,
190 .init = fifo_init,
191 .destroy = fifo_destroy,
192 .reset = qdisc_reset_queue,
193 .change = fifo_init,
194 .dump = fifo_dump,
195 .owner = THIS_MODULE,
197 EXPORT_SYMBOL(pfifo_qdisc_ops);
199 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
200 .id = "bfifo",
201 .priv_size = 0,
202 .enqueue = bfifo_enqueue,
203 .dequeue = qdisc_dequeue_head,
204 .peek = qdisc_peek_head,
205 .init = fifo_init,
206 .destroy = fifo_destroy,
207 .reset = qdisc_reset_queue,
208 .change = fifo_init,
209 .dump = fifo_dump,
210 .owner = THIS_MODULE,
212 EXPORT_SYMBOL(bfifo_qdisc_ops);
214 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
215 .id = "pfifo_head_drop",
216 .priv_size = 0,
217 .enqueue = pfifo_tail_enqueue,
218 .dequeue = qdisc_dequeue_head,
219 .peek = qdisc_peek_head,
220 .init = fifo_hd_init,
221 .reset = qdisc_reset_queue,
222 .change = fifo_hd_init,
223 .dump = fifo_hd_dump,
224 .owner = THIS_MODULE,
227 /* Pass size change message down to embedded FIFO */
228 int fifo_set_limit(struct Qdisc *q, unsigned int limit)
230 struct nlattr *nla;
231 int ret = -ENOMEM;
233 /* Hack to avoid sending change message to non-FIFO */
234 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
235 return 0;
237 if (!q->ops->change)
238 return 0;
240 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
241 if (nla) {
242 nla->nla_type = RTM_NEWQDISC;
243 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
244 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
246 ret = q->ops->change(q, nla, NULL);
247 kfree(nla);
249 return ret;
251 EXPORT_SYMBOL(fifo_set_limit);
253 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
254 unsigned int limit,
255 struct netlink_ext_ack *extack)
257 struct Qdisc *q;
258 int err = -ENOMEM;
260 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
261 extack);
262 if (q) {
263 err = fifo_set_limit(q, limit);
264 if (err < 0) {
265 qdisc_put(q);
266 q = NULL;
270 return q ? : ERR_PTR(err);
272 EXPORT_SYMBOL(fifo_create_dflt);
273 MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");