1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2014 Protonic Holland,
4 * Copyright (C) 2014-2017 Pengutronix,
5 * Marc Kleine-Budde <kernel@pengutronix.de>
8 #include <linux/can/dev.h>
9 #include <linux/can/rx-offload.h>
11 struct can_rx_offload_cb
{
15 static inline struct can_rx_offload_cb
*
16 can_rx_offload_get_cb(struct sk_buff
*skb
)
18 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb
) > sizeof(skb
->cb
));
20 return (struct can_rx_offload_cb
*)skb
->cb
;
24 can_rx_offload_le(struct can_rx_offload
*offload
,
25 unsigned int a
, unsigned int b
)
33 static inline unsigned int
34 can_rx_offload_inc(struct can_rx_offload
*offload
, unsigned int *val
)
42 static int can_rx_offload_napi_poll(struct napi_struct
*napi
, int quota
)
44 struct can_rx_offload
*offload
= container_of(napi
,
45 struct can_rx_offload
,
47 struct net_device
*dev
= offload
->dev
;
48 struct net_device_stats
*stats
= &dev
->stats
;
52 while ((work_done
< quota
) &&
53 (skb
= skb_dequeue(&offload
->skb_queue
))) {
54 struct can_frame
*cf
= (struct can_frame
*)skb
->data
;
58 stats
->rx_bytes
+= cf
->can_dlc
;
59 netif_receive_skb(skb
);
62 if (work_done
< quota
) {
63 napi_complete_done(napi
, work_done
);
65 /* Check if there was another interrupt */
66 if (!skb_queue_empty(&offload
->skb_queue
))
67 napi_reschedule(&offload
->napi
);
70 can_led_event(offload
->dev
, CAN_LED_EVENT_RX
);
76 __skb_queue_add_sort(struct sk_buff_head
*head
, struct sk_buff
*new,
77 int (*compare
)(struct sk_buff
*a
, struct sk_buff
*b
))
79 struct sk_buff
*pos
, *insert
= NULL
;
81 skb_queue_reverse_walk(head
, pos
) {
82 const struct can_rx_offload_cb
*cb_pos
, *cb_new
;
84 cb_pos
= can_rx_offload_get_cb(pos
);
85 cb_new
= can_rx_offload_get_cb(new);
88 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
90 cb_pos
->timestamp
, cb_new
->timestamp
,
91 cb_new
->timestamp
- cb_pos
->timestamp
,
94 if (compare(pos
, new) < 0)
100 __skb_queue_head(head
, new);
102 __skb_queue_after(head
, insert
, new);
105 static int can_rx_offload_compare(struct sk_buff
*a
, struct sk_buff
*b
)
107 const struct can_rx_offload_cb
*cb_a
, *cb_b
;
109 cb_a
= can_rx_offload_get_cb(a
);
110 cb_b
= can_rx_offload_get_cb(b
);
112 /* Subtract two u32 and return result as int, to keep
113 * difference steady around the u32 overflow.
115 return cb_b
->timestamp
- cb_a
->timestamp
;
119 * can_rx_offload_offload_one() - Read one CAN frame from HW
120 * @offload: pointer to rx_offload context
121 * @n: number of mailbox to read
123 * The task of this function is to read a CAN frame from mailbox @n
124 * from the device and return the mailbox's content as a struct
127 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
128 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
129 * allocated, the mailbox contents is discarded by reading it into an
130 * overflow buffer. This way the mailbox is marked as free by the
133 * Return: A pointer to skb containing the CAN frame on success.
135 * NULL if the mailbox @n is empty.
137 * ERR_PTR() in case of an error
139 static struct sk_buff
*
140 can_rx_offload_offload_one(struct can_rx_offload
*offload
, unsigned int n
)
143 struct can_rx_offload_cb
*cb
;
147 /* If queue is full drop frame */
148 if (unlikely(skb_queue_len(&offload
->skb_queue
) >
149 offload
->skb_queue_len_max
))
152 skb
= offload
->mailbox_read(offload
, n
, ×tamp
, drop
);
153 /* Mailbox was empty. */
157 /* There was a problem reading the mailbox, propagate
160 if (unlikely(IS_ERR(skb
))) {
161 offload
->dev
->stats
.rx_dropped
++;
162 offload
->dev
->stats
.rx_fifo_errors
++;
167 /* Mailbox was read. */
168 cb
= can_rx_offload_get_cb(skb
);
169 cb
->timestamp
= timestamp
;
174 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload
*offload
,
177 struct sk_buff_head skb_queue
;
180 __skb_queue_head_init(&skb_queue
);
182 for (i
= offload
->mb_first
;
183 can_rx_offload_le(offload
, i
, offload
->mb_last
);
184 can_rx_offload_inc(offload
, &i
)) {
187 if (!(pending
& BIT_ULL(i
)))
190 skb
= can_rx_offload_offload_one(offload
, i
);
191 if (IS_ERR_OR_NULL(skb
))
194 __skb_queue_add_sort(&skb_queue
, skb
, can_rx_offload_compare
);
197 if (!skb_queue_empty(&skb_queue
)) {
201 spin_lock_irqsave(&offload
->skb_queue
.lock
, flags
);
202 skb_queue_splice_tail(&skb_queue
, &offload
->skb_queue
);
203 spin_unlock_irqrestore(&offload
->skb_queue
.lock
, flags
);
205 queue_len
= skb_queue_len(&offload
->skb_queue
);
206 if (queue_len
> offload
->skb_queue_len_max
/ 8)
207 netdev_dbg(offload
->dev
, "%s: queue_len=%d\n",
208 __func__
, queue_len
);
210 can_rx_offload_schedule(offload
);
213 return skb_queue_len(&skb_queue
);
215 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp
);
217 int can_rx_offload_irq_offload_fifo(struct can_rx_offload
*offload
)
223 skb
= can_rx_offload_offload_one(offload
, 0);
229 skb_queue_tail(&offload
->skb_queue
, skb
);
234 can_rx_offload_schedule(offload
);
238 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo
);
240 int can_rx_offload_queue_sorted(struct can_rx_offload
*offload
,
241 struct sk_buff
*skb
, u32 timestamp
)
243 struct can_rx_offload_cb
*cb
;
246 if (skb_queue_len(&offload
->skb_queue
) >
247 offload
->skb_queue_len_max
) {
252 cb
= can_rx_offload_get_cb(skb
);
253 cb
->timestamp
= timestamp
;
255 spin_lock_irqsave(&offload
->skb_queue
.lock
, flags
);
256 __skb_queue_add_sort(&offload
->skb_queue
, skb
, can_rx_offload_compare
);
257 spin_unlock_irqrestore(&offload
->skb_queue
.lock
, flags
);
259 can_rx_offload_schedule(offload
);
263 EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted
);
265 unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload
*offload
,
266 unsigned int idx
, u32 timestamp
)
268 struct net_device
*dev
= offload
->dev
;
269 struct net_device_stats
*stats
= &dev
->stats
;
274 skb
= __can_get_echo_skb(dev
, idx
, &len
);
278 err
= can_rx_offload_queue_sorted(offload
, skb
, timestamp
);
281 stats
->tx_fifo_errors
++;
286 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb
);
288 int can_rx_offload_queue_tail(struct can_rx_offload
*offload
,
291 if (skb_queue_len(&offload
->skb_queue
) >
292 offload
->skb_queue_len_max
) {
297 skb_queue_tail(&offload
->skb_queue
, skb
);
298 can_rx_offload_schedule(offload
);
302 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail
);
304 static int can_rx_offload_init_queue(struct net_device
*dev
,
305 struct can_rx_offload
*offload
,
310 /* Limit queue len to 4x the weight (rounted to next power of two) */
311 offload
->skb_queue_len_max
= 2 << fls(weight
);
312 offload
->skb_queue_len_max
*= 4;
313 skb_queue_head_init(&offload
->skb_queue
);
315 netif_napi_add(dev
, &offload
->napi
, can_rx_offload_napi_poll
, weight
);
317 dev_dbg(dev
->dev
.parent
, "%s: skb_queue_len_max=%d\n",
318 __func__
, offload
->skb_queue_len_max
);
323 int can_rx_offload_add_timestamp(struct net_device
*dev
,
324 struct can_rx_offload
*offload
)
328 if (offload
->mb_first
> BITS_PER_LONG_LONG
||
329 offload
->mb_last
> BITS_PER_LONG_LONG
|| !offload
->mailbox_read
)
332 if (offload
->mb_first
< offload
->mb_last
) {
334 weight
= offload
->mb_last
- offload
->mb_first
;
336 offload
->inc
= false;
337 weight
= offload
->mb_first
- offload
->mb_last
;
340 return can_rx_offload_init_queue(dev
, offload
, weight
);
342 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp
);
344 int can_rx_offload_add_fifo(struct net_device
*dev
,
345 struct can_rx_offload
*offload
, unsigned int weight
)
347 if (!offload
->mailbox_read
)
350 return can_rx_offload_init_queue(dev
, offload
, weight
);
352 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo
);
354 void can_rx_offload_enable(struct can_rx_offload
*offload
)
356 napi_enable(&offload
->napi
);
358 EXPORT_SYMBOL_GPL(can_rx_offload_enable
);
360 void can_rx_offload_del(struct can_rx_offload
*offload
)
362 netif_napi_del(&offload
->napi
);
363 skb_queue_purge(&offload
->skb_queue
);
365 EXPORT_SYMBOL_GPL(can_rx_offload_del
);