1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/sched/sch_fifo.c The simplest FIFO queue.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
17 /* 1 band FIFO pseudo-"scheduler" */
19 static int bfifo_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
20 struct sk_buff
**to_free
)
22 if (likely(sch
->qstats
.backlog
+ qdisc_pkt_len(skb
) <=
23 READ_ONCE(sch
->limit
)))
24 return qdisc_enqueue_tail(skb
, sch
);
26 return qdisc_drop(skb
, sch
, to_free
);
29 static int pfifo_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
30 struct sk_buff
**to_free
)
32 if (likely(sch
->q
.qlen
< READ_ONCE(sch
->limit
)))
33 return qdisc_enqueue_tail(skb
, sch
);
35 return qdisc_drop(skb
, sch
, to_free
);
38 static int pfifo_tail_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
39 struct sk_buff
**to_free
)
41 unsigned int prev_backlog
;
43 if (likely(sch
->q
.qlen
< READ_ONCE(sch
->limit
)))
44 return qdisc_enqueue_tail(skb
, sch
);
46 prev_backlog
= sch
->qstats
.backlog
;
47 /* queue full, remove one skb to fulfill the limit */
48 __qdisc_queue_drop_head(sch
, &sch
->q
, to_free
);
49 qdisc_qstats_drop(sch
);
50 qdisc_enqueue_tail(skb
, sch
);
52 qdisc_tree_reduce_backlog(sch
, 0, prev_backlog
- sch
->qstats
.backlog
);
56 static void fifo_offload_init(struct Qdisc
*sch
)
58 struct net_device
*dev
= qdisc_dev(sch
);
59 struct tc_fifo_qopt_offload qopt
;
61 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
64 qopt
.command
= TC_FIFO_REPLACE
;
65 qopt
.handle
= sch
->handle
;
66 qopt
.parent
= sch
->parent
;
67 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_FIFO
, &qopt
);
70 static void fifo_offload_destroy(struct Qdisc
*sch
)
72 struct net_device
*dev
= qdisc_dev(sch
);
73 struct tc_fifo_qopt_offload qopt
;
75 if (!tc_can_offload(dev
) || !dev
->netdev_ops
->ndo_setup_tc
)
78 qopt
.command
= TC_FIFO_DESTROY
;
79 qopt
.handle
= sch
->handle
;
80 qopt
.parent
= sch
->parent
;
81 dev
->netdev_ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_FIFO
, &qopt
);
84 static int fifo_offload_dump(struct Qdisc
*sch
)
86 struct tc_fifo_qopt_offload qopt
;
88 qopt
.command
= TC_FIFO_STATS
;
89 qopt
.handle
= sch
->handle
;
90 qopt
.parent
= sch
->parent
;
91 qopt
.stats
.bstats
= &sch
->bstats
;
92 qopt
.stats
.qstats
= &sch
->qstats
;
94 return qdisc_offload_dump_helper(sch
, TC_SETUP_QDISC_FIFO
, &qopt
);
97 static int __fifo_init(struct Qdisc
*sch
, struct nlattr
*opt
,
98 struct netlink_ext_ack
*extack
)
101 bool is_bfifo
= sch
->ops
== &bfifo_qdisc_ops
;
104 u32 limit
= qdisc_dev(sch
)->tx_queue_len
;
107 limit
*= psched_mtu(qdisc_dev(sch
));
109 WRITE_ONCE(sch
->limit
, limit
);
111 struct tc_fifo_qopt
*ctl
= nla_data(opt
);
113 if (nla_len(opt
) < sizeof(*ctl
))
116 WRITE_ONCE(sch
->limit
, ctl
->limit
);
120 bypass
= sch
->limit
>= psched_mtu(qdisc_dev(sch
));
122 bypass
= sch
->limit
>= 1;
125 sch
->flags
|= TCQ_F_CAN_BYPASS
;
127 sch
->flags
&= ~TCQ_F_CAN_BYPASS
;
132 static int fifo_init(struct Qdisc
*sch
, struct nlattr
*opt
,
133 struct netlink_ext_ack
*extack
)
137 err
= __fifo_init(sch
, opt
, extack
);
141 fifo_offload_init(sch
);
145 static int fifo_hd_init(struct Qdisc
*sch
, struct nlattr
*opt
,
146 struct netlink_ext_ack
*extack
)
148 return __fifo_init(sch
, opt
, extack
);
151 static void fifo_destroy(struct Qdisc
*sch
)
153 fifo_offload_destroy(sch
);
156 static int __fifo_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
158 struct tc_fifo_qopt opt
= { .limit
= READ_ONCE(sch
->limit
) };
160 if (nla_put(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
))
161 goto nla_put_failure
;
168 static int fifo_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
172 err
= fifo_offload_dump(sch
);
176 return __fifo_dump(sch
, skb
);
179 static int fifo_hd_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
181 return __fifo_dump(sch
, skb
);
184 struct Qdisc_ops pfifo_qdisc_ops __read_mostly
= {
187 .enqueue
= pfifo_enqueue
,
188 .dequeue
= qdisc_dequeue_head
,
189 .peek
= qdisc_peek_head
,
191 .destroy
= fifo_destroy
,
192 .reset
= qdisc_reset_queue
,
195 .owner
= THIS_MODULE
,
197 EXPORT_SYMBOL(pfifo_qdisc_ops
);
199 struct Qdisc_ops bfifo_qdisc_ops __read_mostly
= {
202 .enqueue
= bfifo_enqueue
,
203 .dequeue
= qdisc_dequeue_head
,
204 .peek
= qdisc_peek_head
,
206 .destroy
= fifo_destroy
,
207 .reset
= qdisc_reset_queue
,
210 .owner
= THIS_MODULE
,
212 EXPORT_SYMBOL(bfifo_qdisc_ops
);
214 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly
= {
215 .id
= "pfifo_head_drop",
217 .enqueue
= pfifo_tail_enqueue
,
218 .dequeue
= qdisc_dequeue_head
,
219 .peek
= qdisc_peek_head
,
220 .init
= fifo_hd_init
,
221 .reset
= qdisc_reset_queue
,
222 .change
= fifo_hd_init
,
223 .dump
= fifo_hd_dump
,
224 .owner
= THIS_MODULE
,
227 /* Pass size change message down to embedded FIFO */
228 int fifo_set_limit(struct Qdisc
*q
, unsigned int limit
)
233 /* Hack to avoid sending change message to non-FIFO */
234 if (strncmp(q
->ops
->id
+ 1, "fifo", 4) != 0)
240 nla
= kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt
)), GFP_KERNEL
);
242 nla
->nla_type
= RTM_NEWQDISC
;
243 nla
->nla_len
= nla_attr_size(sizeof(struct tc_fifo_qopt
));
244 ((struct tc_fifo_qopt
*)nla_data(nla
))->limit
= limit
;
246 ret
= q
->ops
->change(q
, nla
, NULL
);
251 EXPORT_SYMBOL(fifo_set_limit
);
253 struct Qdisc
*fifo_create_dflt(struct Qdisc
*sch
, struct Qdisc_ops
*ops
,
255 struct netlink_ext_ack
*extack
)
260 q
= qdisc_create_dflt(sch
->dev_queue
, ops
, TC_H_MAKE(sch
->handle
, 1),
263 err
= fifo_set_limit(q
, limit
);
270 return q
? : ERR_PTR(err
);
272 EXPORT_SYMBOL(fifo_create_dflt
);
273 MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");