2 * Intel Wireless WiMAX Connection 2400m
6 * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Intel Corporation <linux-wimax@intel.com>
36 * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37 * - Initial implementation
38 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
39 * - Use skb_clone(), break up processing in chunks
40 * - Split transport/device specific
41 * - Make buffer size dynamic to exert less memory pressure
44 * This handles the RX path on USB.
46 * When a notification is received that says 'there is RX data ready',
47 * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
48 * reads a buffer from USB and passes it to i2400m_rx() in the generic
49 * handling code. The RX buffer has an specific format that is
52 * We use a kernel thread in a loop because:
54 * - we want to be able to call the USB power management get/put
55 * functions (blocking) before each transaction.
57 * - We might get a lot of notifications and we don't want to submit
58 * a zillion reads; by serializing, we are throttling.
60 * - RX data processing can get heavy enough so that it is not
61 * appropriate for doing it in the USB callback; thus we run it in a
64 * We provide a read buffer of an arbitrary size (short of a page); if
65 * the callback reports -EOVERFLOW, it means it was too small, so we
66 * just double the size and retry (being careful to append, as
67 * sometimes the device provided some data). Every now and then we
68 * check if the average packet size is smaller than the current packet
69 * size and if so, we halve it. At the end, the size of the
70 * preallocated buffer should be following the average received
71 * transaction size, adapting dynamically to it.
75 * i2400mu_rx_kick() Called from notif.c when we get a
76 * 'data ready' notification
77 * i2400mu_rxd() Kernel RX daemon
78 * i2400mu_rx() Receive USB data
79 * i2400m_rx() Send data to generic i2400m RX handling
81 * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
83 * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
85 #include <linux/workqueue.h>
86 #include <linux/slab.h>
87 #include <linux/usb.h>
88 #include "i2400m-usb.h"
91 #define D_SUBMODULE rx
92 #include "usb-debug-levels.h"
97 * We can't let the rx_size be a multiple of 512 bytes (the RX
98 * endpoint's max packet size). On some USB host controllers (we
99 * haven't been able to fully characterize which), if the device is
100 * about to send (for example) X bytes and we only post a buffer to
101 * receive n*512, it will fail to mark that as babble (so that
102 * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
105 * So on growing or shrinking, if it is a multiple of the
106 * maxpacketsize, we remove some (instead of incresing some, so in a
107 * buddy allocator we try to waste less space).
109 * Note we also need a hook for this on i2400mu_rx() -- when we do the
110 * first read, we are sure we won't hit this spot because
111 * i240mm->rx_size has been set properly. However, if we have to
112 * double because of -EOVERFLOW, when we launch the read to get the
113 * rest of the data, we *have* to make sure that also is not a
114 * multiple of the max_pkt_size.
118 size_t i2400mu_rx_size_grow(struct i2400mu
*i2400mu
)
120 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
122 const size_t max_pkt_size
= 512;
124 rx_size
= 2 * i2400mu
->rx_size
;
125 if (rx_size
% max_pkt_size
== 0) {
128 "RX: expected size grew to %zu [adjusted -8] "
130 rx_size
, i2400mu
->rx_size
);
133 "RX: expected size grew to %zu from %zu\n",
134 rx_size
, i2400mu
->rx_size
);
140 void i2400mu_rx_size_maybe_shrink(struct i2400mu
*i2400mu
)
142 const size_t max_pkt_size
= 512;
143 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
145 if (unlikely(i2400mu
->rx_size_cnt
>= 100
146 && i2400mu
->rx_size_auto_shrink
)) {
148 i2400mu
->rx_size_acc
/ i2400mu
->rx_size_cnt
;
149 size_t new_rx_size
= i2400mu
->rx_size
/ 2;
150 if (avg_rx_size
< new_rx_size
) {
151 if (new_rx_size
% max_pkt_size
== 0) {
154 "RX: expected size shrank to %zu "
155 "[adjusted -8] from %zu\n",
156 new_rx_size
, i2400mu
->rx_size
);
159 "RX: expected size shrank to %zu "
161 new_rx_size
, i2400mu
->rx_size
);
162 i2400mu
->rx_size
= new_rx_size
;
163 i2400mu
->rx_size_cnt
= 0;
164 i2400mu
->rx_size_acc
= i2400mu
->rx_size
;
170 * Receive a message with payloads from the USB bus into an skb
172 * @i2400mu: USB device descriptor
173 * @rx_skb: skb where to place the received message
175 * Deals with all the USB-specifics of receiving, dynamically
176 * increasing the buffer size if so needed. Returns the payload in the
177 * skb, ready to process. On a zero-length packet, we retry.
179 * On soft USB errors, we retry (until they become too frequent and
180 * then are promoted to hard); on hard USB errors, we reset the
181 * device. On other errors (skb realloacation, we just drop it and
182 * hope for the next invocation to solve it).
184 * Returns: pointer to the skb if ok, ERR_PTR on error.
185 * NOTE: this function might realloc the skb (if it is too small),
186 * so always update with the one returned.
187 * ERR_PTR() is < 0 on error.
188 * Will return NULL if it cannot reallocate -- this can be
189 * considered a transient retryable error.
192 struct sk_buff
*i2400mu_rx(struct i2400mu
*i2400mu
, struct sk_buff
*rx_skb
)
195 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
196 int usb_pipe
, read_size
, rx_size
, do_autopm
;
197 struct usb_endpoint_descriptor
*epd
;
198 const size_t max_pkt_size
= 512;
200 d_fnstart(4, dev
, "(i2400mu %p)\n", i2400mu
);
201 do_autopm
= atomic_read(&i2400mu
->do_autopm
);
203 usb_autopm_get_interface(i2400mu
->usb_iface
) : 0;
205 dev_err(dev
, "RX: can't get autopm: %d\n", result
);
208 epd
= usb_get_epd(i2400mu
->usb_iface
, i2400mu
->endpoint_cfg
.bulk_in
);
209 usb_pipe
= usb_rcvbulkpipe(i2400mu
->usb_dev
, epd
->bEndpointAddress
);
211 rx_size
= skb_end_pointer(rx_skb
) - rx_skb
->data
- rx_skb
->len
;
212 if (unlikely(rx_size
% max_pkt_size
== 0)) {
214 d_printf(1, dev
, "RX: rx_size adapted to %d [-8]\n", rx_size
);
216 result
= usb_bulk_msg(
217 i2400mu
->usb_dev
, usb_pipe
, rx_skb
->data
+ rx_skb
->len
,
218 rx_size
, &read_size
, 200);
219 usb_mark_last_busy(i2400mu
->usb_dev
);
223 goto retry
; /* ZLP, just resubmit */
224 skb_put(rx_skb
, read_size
);
228 * Stall -- maybe the device is choking with our
229 * requests. Clear it and give it some time. If they
230 * happen to often, it might be another symptom, so we
233 * No error handling for usb_clear_halt(0; if it
234 * works, the retry works; if it fails, this switch
235 * does the error handling for us.
237 if (edc_inc(&i2400mu
->urb_edc
,
238 10 * EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
239 dev_err(dev
, "BM-CMD: too many stalls in "
240 "URB; resetting device\n");
243 usb_clear_halt(i2400mu
->usb_dev
, usb_pipe
);
244 msleep(10); /* give the device some time */
246 case -EINVAL
: /* while removing driver */
247 case -ENODEV
: /* dev disconnect ... */
248 case -ENOENT
: /* just ignore it */
252 case -EOVERFLOW
: { /* too small, reallocate */
253 struct sk_buff
*new_skb
;
254 rx_size
= i2400mu_rx_size_grow(i2400mu
);
255 if (rx_size
<= (1 << 16)) /* cap it */
256 i2400mu
->rx_size
= rx_size
;
257 else if (printk_ratelimit()) {
258 dev_err(dev
, "BUG? rx_size up to %d\n", rx_size
);
262 skb_put(rx_skb
, read_size
);
263 new_skb
= skb_copy_expand(rx_skb
, 0, rx_size
- rx_skb
->len
,
265 if (new_skb
== NULL
) {
266 if (printk_ratelimit())
267 dev_err(dev
, "RX: Can't reallocate skb to %d; "
268 "RX dropped\n", rx_size
);
271 goto out
; /* drop it...*/
275 i2400mu
->rx_size_cnt
= 0;
276 i2400mu
->rx_size_acc
= i2400mu
->rx_size
;
277 d_printf(1, dev
, "RX: size changed to %d, received %d, "
278 "copied %d, capacity %ld\n",
279 rx_size
, read_size
, rx_skb
->len
,
280 (long) skb_end_offset(new_skb
));
283 /* In most cases, it happens due to the hardware scheduling a
284 * read when there was no data - unfortunately, we have no way
285 * to tell this timeout from a USB timeout. So we just ignore
288 dev_err(dev
, "RX: timeout: %d\n", result
);
291 default: /* Any error */
292 if (edc_inc(&i2400mu
->urb_edc
,
293 EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
))
295 dev_err(dev
, "RX: error receiving URB: %d, retrying\n", result
);
300 usb_autopm_put_interface(i2400mu
->usb_iface
);
301 d_fnend(4, dev
, "(i2400mu %p) = %p\n", i2400mu
, rx_skb
);
305 dev_err(dev
, "RX: maximum errors in URB exceeded; "
306 "resetting device\n");
308 usb_queue_reset_device(i2400mu
->usb_iface
);
309 rx_skb
= ERR_PTR(result
);
315 * Kernel thread for USB reception of data
317 * This thread waits for a kick; once kicked, it will allocate an skb
318 * and receive a single message to it from USB (using
319 * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
320 * code for processing.
322 * When done processing, it runs some dirty statistics to verify if
323 * the last 100 messages received were smaller than half of the
324 * current RX buffer size. In that case, the RX buffer size is
325 * halved. This will helps lowering the pressure on the memory
328 * Hard errors force the thread to exit.
331 int i2400mu_rxd(void *_i2400mu
)
334 struct i2400mu
*i2400mu
= _i2400mu
;
335 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
336 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
337 struct net_device
*net_dev
= i2400m
->wimax_dev
.net_dev
;
340 struct sk_buff
*rx_skb
;
343 d_fnstart(4, dev
, "(i2400mu %p)\n", i2400mu
);
344 spin_lock_irqsave(&i2400m
->rx_lock
, flags
);
345 BUG_ON(i2400mu
->rx_kthread
!= NULL
);
346 i2400mu
->rx_kthread
= current
;
347 spin_unlock_irqrestore(&i2400m
->rx_lock
, flags
);
349 d_printf(2, dev
, "RX: waiting for messages\n");
351 wait_event_interruptible(
353 (kthread_should_stop() /* check this first! */
354 || (pending
= atomic_read(&i2400mu
->rx_pending_count
)))
356 if (kthread_should_stop())
360 rx_size
= i2400mu
->rx_size
;
361 d_printf(2, dev
, "RX: reading up to %d bytes\n", rx_size
);
362 rx_skb
= __netdev_alloc_skb(net_dev
, rx_size
, GFP_KERNEL
);
363 if (rx_skb
== NULL
) {
364 dev_err(dev
, "RX: can't allocate skb [%d bytes]\n",
366 msleep(50); /* give it some time? */
370 /* Receive the message with the payloads */
371 rx_skb
= i2400mu_rx(i2400mu
, rx_skb
);
372 result
= PTR_ERR(rx_skb
);
375 atomic_dec(&i2400mu
->rx_pending_count
);
376 if (rx_skb
== NULL
|| rx_skb
->len
== 0) {
377 /* some "ignorable" condition */
382 /* Deliver the message to the generic i2400m code */
383 i2400mu
->rx_size_cnt
++;
384 i2400mu
->rx_size_acc
+= rx_skb
->len
;
385 result
= i2400m_rx(i2400m
, rx_skb
);
387 && edc_inc(&i2400mu
->urb_edc
,
388 EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
392 /* Maybe adjust RX buffer size */
393 i2400mu_rx_size_maybe_shrink(i2400mu
);
397 spin_lock_irqsave(&i2400m
->rx_lock
, flags
);
398 i2400mu
->rx_kthread
= NULL
;
399 spin_unlock_irqrestore(&i2400m
->rx_lock
, flags
);
400 d_fnend(4, dev
, "(i2400mu %p) = %d\n", i2400mu
, result
);
404 dev_err(dev
, "RX: maximum errors in received buffer exceeded; "
405 "resetting device\n");
406 usb_queue_reset_device(i2400mu
->usb_iface
);
412 * Start reading from the device
414 * @i2400m: device instance
416 * Notify the RX thread that there is data pending.
418 void i2400mu_rx_kick(struct i2400mu
*i2400mu
)
420 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
421 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
423 d_fnstart(3, dev
, "(i2400mu %p)\n", i2400m
);
424 atomic_inc(&i2400mu
->rx_pending_count
);
425 wake_up_all(&i2400mu
->rx_wq
);
426 d_fnend(3, dev
, "(i2400m %p) = void\n", i2400m
);
430 int i2400mu_rx_setup(struct i2400mu
*i2400mu
)
433 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
434 struct device
*dev
= &i2400mu
->usb_iface
->dev
;
435 struct wimax_dev
*wimax_dev
= &i2400m
->wimax_dev
;
436 struct task_struct
*kthread
;
438 kthread
= kthread_run(i2400mu_rxd
, i2400mu
, "%s-rx",
440 /* the kthread function sets i2400mu->rx_thread */
441 if (IS_ERR(kthread
)) {
442 result
= PTR_ERR(kthread
);
443 dev_err(dev
, "RX: cannot start thread: %d\n", result
);
449 void i2400mu_rx_release(struct i2400mu
*i2400mu
)
452 struct i2400m
*i2400m
= &i2400mu
->i2400m
;
453 struct device
*dev
= i2400m_dev(i2400m
);
454 struct task_struct
*kthread
;
456 spin_lock_irqsave(&i2400m
->rx_lock
, flags
);
457 kthread
= i2400mu
->rx_kthread
;
458 i2400mu
->rx_kthread
= NULL
;
459 spin_unlock_irqrestore(&i2400m
->rx_lock
, flags
);
461 kthread_stop(kthread
);
463 d_printf(1, dev
, "RX: kthread had already exited\n");