2 * Copyright (c) 2014 David Jander, Protonic Holland
3 * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the version 2 of the GNU General Public License
7 * as published by the Free Software Foundation
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include <linux/can/dev.h>
19 #include <linux/can/rx-offload.h>
21 struct can_rx_offload_cb
{
25 static inline struct can_rx_offload_cb
*can_rx_offload_get_cb(struct sk_buff
*skb
)
27 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb
) > sizeof(skb
->cb
));
29 return (struct can_rx_offload_cb
*)skb
->cb
;
32 static inline bool can_rx_offload_le(struct can_rx_offload
*offload
, unsigned int a
, unsigned int b
)
40 static inline unsigned int can_rx_offload_inc(struct can_rx_offload
*offload
, unsigned int *val
)
48 static int can_rx_offload_napi_poll(struct napi_struct
*napi
, int quota
)
50 struct can_rx_offload
*offload
= container_of(napi
, struct can_rx_offload
, napi
);
51 struct net_device
*dev
= offload
->dev
;
52 struct net_device_stats
*stats
= &dev
->stats
;
56 while ((work_done
< quota
) &&
57 (skb
= skb_dequeue(&offload
->skb_queue
))) {
58 struct can_frame
*cf
= (struct can_frame
*)skb
->data
;
62 stats
->rx_bytes
+= cf
->can_dlc
;
63 netif_receive_skb(skb
);
66 if (work_done
< quota
) {
67 napi_complete_done(napi
, work_done
);
69 /* Check if there was another interrupt */
70 if (!skb_queue_empty(&offload
->skb_queue
))
71 napi_reschedule(&offload
->napi
);
74 can_led_event(offload
->dev
, CAN_LED_EVENT_RX
);
79 static inline void __skb_queue_add_sort(struct sk_buff_head
*head
, struct sk_buff
*new,
80 int (*compare
)(struct sk_buff
*a
, struct sk_buff
*b
))
82 struct sk_buff
*pos
, *insert
= (struct sk_buff
*)head
;
84 skb_queue_reverse_walk(head
, pos
) {
85 const struct can_rx_offload_cb
*cb_pos
, *cb_new
;
87 cb_pos
= can_rx_offload_get_cb(pos
);
88 cb_new
= can_rx_offload_get_cb(new);
91 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
93 cb_pos
->timestamp
, cb_new
->timestamp
,
94 cb_new
->timestamp
- cb_pos
->timestamp
,
97 if (compare(pos
, new) < 0)
103 __skb_queue_after(head
, insert
, new);
106 static int can_rx_offload_compare(struct sk_buff
*a
, struct sk_buff
*b
)
108 const struct can_rx_offload_cb
*cb_a
, *cb_b
;
110 cb_a
= can_rx_offload_get_cb(a
);
111 cb_b
= can_rx_offload_get_cb(b
);
113 /* Substract two u32 and return result as int, to keep
114 * difference steady around the u32 overflow.
116 return cb_b
->timestamp
- cb_a
->timestamp
;
119 static struct sk_buff
*can_rx_offload_offload_one(struct can_rx_offload
*offload
, unsigned int n
)
121 struct sk_buff
*skb
= NULL
;
122 struct can_rx_offload_cb
*cb
;
123 struct can_frame
*cf
;
126 /* If queue is full or skb not available, read to discard mailbox */
127 if (likely(skb_queue_len(&offload
->skb_queue
) <=
128 offload
->skb_queue_len_max
))
129 skb
= alloc_can_skb(offload
->dev
, &cf
);
132 struct can_frame cf_overflow
;
135 ret
= offload
->mailbox_read(offload
, &cf_overflow
,
138 offload
->dev
->stats
.rx_dropped
++;
143 cb
= can_rx_offload_get_cb(skb
);
144 ret
= offload
->mailbox_read(offload
, cf
, &cb
->timestamp
, n
);
153 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload
*offload
, u64 pending
)
155 struct sk_buff_head skb_queue
;
158 __skb_queue_head_init(&skb_queue
);
160 for (i
= offload
->mb_first
;
161 can_rx_offload_le(offload
, i
, offload
->mb_last
);
162 can_rx_offload_inc(offload
, &i
)) {
165 if (!(pending
& BIT_ULL(i
)))
168 skb
= can_rx_offload_offload_one(offload
, i
);
172 __skb_queue_add_sort(&skb_queue
, skb
, can_rx_offload_compare
);
175 if (!skb_queue_empty(&skb_queue
)) {
179 spin_lock_irqsave(&offload
->skb_queue
.lock
, flags
);
180 skb_queue_splice_tail(&skb_queue
, &offload
->skb_queue
);
181 spin_unlock_irqrestore(&offload
->skb_queue
.lock
, flags
);
183 if ((queue_len
= skb_queue_len(&offload
->skb_queue
)) >
184 (offload
->skb_queue_len_max
/ 8))
185 netdev_dbg(offload
->dev
, "%s: queue_len=%d\n",
186 __func__
, queue_len
);
188 can_rx_offload_schedule(offload
);
191 return skb_queue_len(&skb_queue
);
193 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp
);
195 int can_rx_offload_irq_offload_fifo(struct can_rx_offload
*offload
)
200 while ((skb
= can_rx_offload_offload_one(offload
, 0))) {
201 skb_queue_tail(&offload
->skb_queue
, skb
);
206 can_rx_offload_schedule(offload
);
210 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo
);
212 int can_rx_offload_irq_queue_err_skb(struct can_rx_offload
*offload
, struct sk_buff
*skb
)
214 if (skb_queue_len(&offload
->skb_queue
) >
215 offload
->skb_queue_len_max
)
218 skb_queue_tail(&offload
->skb_queue
, skb
);
219 can_rx_offload_schedule(offload
);
223 EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb
);
225 static int can_rx_offload_init_queue(struct net_device
*dev
, struct can_rx_offload
*offload
, unsigned int weight
)
229 /* Limit queue len to 4x the weight (rounted to next power of two) */
230 offload
->skb_queue_len_max
= 2 << fls(weight
);
231 offload
->skb_queue_len_max
*= 4;
232 skb_queue_head_init(&offload
->skb_queue
);
234 can_rx_offload_reset(offload
);
235 netif_napi_add(dev
, &offload
->napi
, can_rx_offload_napi_poll
, weight
);
237 dev_dbg(dev
->dev
.parent
, "%s: skb_queue_len_max=%d\n",
238 __func__
, offload
->skb_queue_len_max
);
243 int can_rx_offload_add_timestamp(struct net_device
*dev
, struct can_rx_offload
*offload
)
247 if (offload
->mb_first
> BITS_PER_LONG_LONG
||
248 offload
->mb_last
> BITS_PER_LONG_LONG
|| !offload
->mailbox_read
)
251 if (offload
->mb_first
< offload
->mb_last
) {
253 weight
= offload
->mb_last
- offload
->mb_first
;
255 offload
->inc
= false;
256 weight
= offload
->mb_first
- offload
->mb_last
;
259 return can_rx_offload_init_queue(dev
, offload
, weight
);
261 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp
);
263 int can_rx_offload_add_fifo(struct net_device
*dev
, struct can_rx_offload
*offload
, unsigned int weight
)
265 if (!offload
->mailbox_read
)
268 return can_rx_offload_init_queue(dev
, offload
, weight
);
270 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo
);
272 void can_rx_offload_enable(struct can_rx_offload
*offload
)
274 can_rx_offload_reset(offload
);
275 napi_enable(&offload
->napi
);
277 EXPORT_SYMBOL_GPL(can_rx_offload_enable
);
279 void can_rx_offload_del(struct can_rx_offload
*offload
)
281 netif_napi_del(&offload
->napi
);
282 skb_queue_purge(&offload
->skb_queue
);
284 EXPORT_SYMBOL_GPL(can_rx_offload_del
);
286 void can_rx_offload_reset(struct can_rx_offload
*offload
)
289 EXPORT_SYMBOL_GPL(can_rx_offload_reset
);