2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
38 * i1480u_tx_timeout(): called for timeout handling from the
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
57 #include "i1480u-wlp.h"
60 /* This is only for Next and Last TX packets */
61 i1480u_MAX_PL_SIZE
= i1480u_MAX_FRG_SIZE
62 - sizeof(struct untd_hdr_rst
),
65 /* Free resources allocated to a i1480u tx context. */
67 void i1480u_tx_free(struct i1480u_tx
*wtx
)
71 dev_kfree_skb_irq(wtx
->skb
);
72 usb_free_urb(wtx
->urb
);
77 void i1480u_tx_destroy(struct i1480u
*i1480u
, struct i1480u_tx
*wtx
)
80 spin_lock_irqsave(&i1480u
->tx_list_lock
, flags
); /* not active any more */
81 list_del(&wtx
->list_node
);
83 spin_unlock_irqrestore(&i1480u
->tx_list_lock
, flags
);
87 void i1480u_tx_unlink_urbs(struct i1480u
*i1480u
)
90 struct i1480u_tx
*wtx
, *next
;
92 spin_lock_irqsave(&i1480u
->tx_list_lock
, flags
);
93 list_for_each_entry_safe(wtx
, next
, &i1480u
->tx_list
, list_node
) {
94 usb_unlink_urb(wtx
->urb
);
96 spin_unlock_irqrestore(&i1480u
->tx_list_lock
, flags
);
101 * Callback for a completed tx USB URB.
105 * - FIXME: recover errors more gracefully
106 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
109 void i1480u_tx_cb(struct urb
*urb
)
111 struct i1480u_tx
*wtx
= urb
->context
;
112 struct i1480u
*i1480u
= wtx
->i1480u
;
113 struct net_device
*net_dev
= i1480u
->net_dev
;
114 struct device
*dev
= &i1480u
->usb_iface
->dev
;
117 switch (urb
->status
) {
119 spin_lock_irqsave(&i1480u
->lock
, flags
);
120 net_dev
->stats
.tx_packets
++;
121 net_dev
->stats
.tx_bytes
+= urb
->actual_length
;
122 spin_unlock_irqrestore(&i1480u
->lock
, flags
);
124 case -ECONNRESET
: /* Not an error, but a controlled situation; */
125 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
126 dev_dbg(dev
, "notif endp: reset/noent %d\n", urb
->status
);
127 netif_stop_queue(net_dev
);
129 case -ESHUTDOWN
: /* going away! */
130 dev_dbg(dev
, "notif endp: down %d\n", urb
->status
);
131 netif_stop_queue(net_dev
);
134 dev_err(dev
, "TX: unknown URB status %d\n", urb
->status
);
135 if (edc_inc(&i1480u
->tx_errors
, EDC_MAX_ERRORS
,
136 EDC_ERROR_TIMEFRAME
)) {
137 dev_err(dev
, "TX: max acceptable errors exceeded."
139 netif_stop_queue(net_dev
);
140 i1480u_tx_unlink_urbs(i1480u
);
141 wlp_reset_all(&i1480u
->wlp
);
145 i1480u_tx_destroy(i1480u
, wtx
);
146 if (atomic_dec_return(&i1480u
->tx_inflight
.count
)
147 <= i1480u
->tx_inflight
.threshold
148 && netif_queue_stopped(net_dev
)
149 && i1480u
->tx_inflight
.threshold
!= 0) {
150 netif_start_queue(net_dev
);
151 atomic_inc(&i1480u
->tx_inflight
.restart_count
);
158 * Given a buffer that doesn't fit in a single fragment, create an
159 * scatter/gather structure for delivery to the USB pipe.
161 * Implements functionality of i1480u_tx_create().
163 * @wtx: tx descriptor
165 * @gfp_mask: gfp allocation mask
166 * @returns: Pointer to @wtx if ok, NULL on error.
168 * Sorry, TOO LONG a function, but breaking it up is kind of hard
170 * This will break the buffer in chunks smaller than
171 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
175 * i1480 tx header | fragment 1
177 * nxt header \ fragment 2
181 * last header \ fragment 3
182 * last fragment data /
184 * This does not fill the i1480 TX header, it is left up to the
185 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
187 * This function consumes the skb unless there is an error.
190 int i1480u_tx_create_n(struct i1480u_tx
*wtx
, struct sk_buff
*skb
,
197 void *pl_itr
, *buf_itr
;
198 size_t pl_size_left
, frgs
, pl_size_1st
, frg_pl_size
= 0;
199 struct untd_hdr_1st
*untd_hdr_1st
;
200 struct wlp_tx_hdr
*wlp_tx_hdr
;
201 struct untd_hdr_rst
*untd_hdr_rst
;
207 pl_size_left
= pl_size
; /* payload size */
208 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
210 pl_size_1st
= i1480u_MAX_FRG_SIZE
211 - sizeof(struct untd_hdr_1st
) - sizeof(struct wlp_tx_hdr
);
212 BUG_ON(pl_size_1st
> pl_size
);
213 pl_size_left
-= pl_size_1st
;
214 /* The rest have an smaller header (no i1480 TX header). We
215 * need to break up the payload in blocks smaller than
216 * i1480u_MAX_PL_SIZE (payload excluding header). */
217 frgs
= (pl_size_left
+ i1480u_MAX_PL_SIZE
- 1) / i1480u_MAX_PL_SIZE
;
218 /* Allocate space for the new buffer. In this new buffer we'll
219 * place the headers followed by the data fragment, headers,
220 * data fragments, etc..
223 wtx
->buf_size
= sizeof(*untd_hdr_1st
)
224 + sizeof(*wlp_tx_hdr
)
225 + frgs
* sizeof(*untd_hdr_rst
)
227 wtx
->buf
= kmalloc(wtx
->buf_size
, gfp_mask
);
228 if (wtx
->buf
== NULL
)
229 goto error_buf_alloc
;
231 buf_itr
= wtx
->buf
; /* We got the space, let's fill it up */
232 /* Fill 1st fragment */
233 untd_hdr_1st
= buf_itr
;
234 buf_itr
+= sizeof(*untd_hdr_1st
);
235 untd_hdr_set_type(&untd_hdr_1st
->hdr
, i1480u_PKT_FRAG_1ST
);
236 untd_hdr_set_rx_tx(&untd_hdr_1st
->hdr
, 0);
237 untd_hdr_1st
->hdr
.len
= cpu_to_le16(pl_size
+ sizeof(*wlp_tx_hdr
));
238 untd_hdr_1st
->fragment_len
=
239 cpu_to_le16(pl_size_1st
+ sizeof(*wlp_tx_hdr
));
240 memset(untd_hdr_1st
->padding
, 0, sizeof(untd_hdr_1st
->padding
));
241 /* Set up i1480 header info */
242 wlp_tx_hdr
= wtx
->wlp_tx_hdr
= buf_itr
;
243 buf_itr
+= sizeof(*wlp_tx_hdr
);
244 /* Copy the first fragment */
245 memcpy(buf_itr
, pl_itr
, pl_size_1st
);
246 pl_itr
+= pl_size_1st
;
247 buf_itr
+= pl_size_1st
;
249 /* Now do each remaining fragment */
251 while (pl_size_left
> 0) {
252 if (buf_itr
+ sizeof(*untd_hdr_rst
) - wtx
->buf
254 printk(KERN_ERR
"BUG: no space for header\n");
257 untd_hdr_rst
= buf_itr
;
258 buf_itr
+= sizeof(*untd_hdr_rst
);
259 if (pl_size_left
> i1480u_MAX_PL_SIZE
) {
260 frg_pl_size
= i1480u_MAX_PL_SIZE
;
261 untd_hdr_set_type(&untd_hdr_rst
->hdr
, i1480u_PKT_FRAG_NXT
);
263 frg_pl_size
= pl_size_left
;
264 untd_hdr_set_type(&untd_hdr_rst
->hdr
, i1480u_PKT_FRAG_LST
);
266 untd_hdr_set_rx_tx(&untd_hdr_rst
->hdr
, 0);
267 untd_hdr_rst
->hdr
.len
= cpu_to_le16(frg_pl_size
);
268 untd_hdr_rst
->padding
= 0;
269 if (buf_itr
+ frg_pl_size
- wtx
->buf
271 printk(KERN_ERR
"BUG: no space for payload\n");
274 memcpy(buf_itr
, pl_itr
, frg_pl_size
);
275 buf_itr
+= frg_pl_size
;
276 pl_itr
+= frg_pl_size
;
277 pl_size_left
-= frg_pl_size
;
279 dev_kfree_skb_irq(skb
);
284 "BUG: skb %u bytes\n"
285 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
286 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
288 frg_pl_size
, i1480u_MAX_FRG_SIZE
,
289 buf_itr
- wtx
->buf
, wtx
->buf_size
, pl_size_left
);
298 * Given a buffer that fits in a single fragment, fill out a @wtx
299 * struct for transmitting it down the USB pipe.
301 * Uses the fact that we have space reserved in front of the skbuff
302 * for hardware headers :]
304 * This does not fill the i1480 TX header, it is left up to the
305 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
307 * @pl: pointer to payload data
308 * @pl_size: size of the payuload
310 * This function does not consume the @skb.
313 int i1480u_tx_create_1(struct i1480u_tx
*wtx
, struct sk_buff
*skb
,
316 struct untd_hdr_cmp
*untd_hdr_cmp
;
317 struct wlp_tx_hdr
*wlp_tx_hdr
;
321 BUG_ON(skb_headroom(skb
) < sizeof(*wlp_tx_hdr
));
322 wlp_tx_hdr
= (void *) __skb_push(skb
, sizeof(*wlp_tx_hdr
));
323 wtx
->wlp_tx_hdr
= wlp_tx_hdr
;
324 BUG_ON(skb_headroom(skb
) < sizeof(*untd_hdr_cmp
));
325 untd_hdr_cmp
= (void *) __skb_push(skb
, sizeof(*untd_hdr_cmp
));
327 untd_hdr_set_type(&untd_hdr_cmp
->hdr
, i1480u_PKT_FRAG_CMP
);
328 untd_hdr_set_rx_tx(&untd_hdr_cmp
->hdr
, 0);
329 untd_hdr_cmp
->hdr
.len
= cpu_to_le16(skb
->len
- sizeof(*untd_hdr_cmp
));
330 untd_hdr_cmp
->padding
= 0;
336 * Given a skb to transmit, massage it to become palatable for the TX pipe
338 * This will break the buffer in chunks smaller than
339 * i1480u_MAX_FRG_SIZE and add proper headers to each.
342 * i1480 tx header | fragment 1
344 * nxt header \ fragment 2
348 * last header \ fragment 3
349 * last fragment data /
351 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
353 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
354 * following is composed:
357 * i1480 tx header | single fragment
360 * We were going to use s/g support, but because the interface is
361 * synch and at the end there is plenty of overhead to do it, it
362 * didn't seem that worth for data that is going to be smaller than
366 struct i1480u_tx
*i1480u_tx_create(struct i1480u
*i1480u
,
367 struct sk_buff
*skb
, gfp_t gfp_mask
)
370 struct usb_endpoint_descriptor
*epd
;
374 struct i1480u_tx
*wtx
;
375 const size_t pl_max_size
=
376 i1480u_MAX_FRG_SIZE
- sizeof(struct untd_hdr_cmp
)
377 - sizeof(struct wlp_tx_hdr
);
379 wtx
= kmalloc(sizeof(*wtx
), gfp_mask
);
381 goto error_wtx_alloc
;
382 wtx
->urb
= usb_alloc_urb(0, gfp_mask
);
383 if (wtx
->urb
== NULL
)
384 goto error_urb_alloc
;
385 epd
= &i1480u
->usb_iface
->cur_altsetting
->endpoint
[2].desc
;
386 usb_pipe
= usb_sndbulkpipe(i1480u
->usb_dev
, epd
->bEndpointAddress
);
387 /* Fits in a single complete packet or need to split? */
388 if (skb
->len
> pl_max_size
) {
389 result
= i1480u_tx_create_n(wtx
, skb
, gfp_mask
);
392 usb_fill_bulk_urb(wtx
->urb
, i1480u
->usb_dev
, usb_pipe
,
393 wtx
->buf
, wtx
->buf_size
, i1480u_tx_cb
, wtx
);
395 result
= i1480u_tx_create_1(wtx
, skb
, gfp_mask
);
398 usb_fill_bulk_urb(wtx
->urb
, i1480u
->usb_dev
, usb_pipe
,
399 skb
->data
, skb
->len
, i1480u_tx_cb
, wtx
);
401 spin_lock_irqsave(&i1480u
->tx_list_lock
, flags
);
402 list_add(&wtx
->list_node
, &i1480u
->tx_list
);
403 spin_unlock_irqrestore(&i1480u
->tx_list_lock
, flags
);
415 * Actual fragmentation and transmission of frame
417 * @wlp: WLP substack data structure
418 * @skb: To be transmitted
419 * @dst: Device address of destination
420 * @returns: 0 on success, <0 on failure
422 * This function can also be called directly (not just from
423 * hard_start_xmit), so we also check here if the interface is up before
424 * taking sending anything.
426 int i1480u_xmit_frame(struct wlp
*wlp
, struct sk_buff
*skb
,
427 struct uwb_dev_addr
*dst
)
430 struct i1480u
*i1480u
= container_of(wlp
, struct i1480u
, wlp
);
431 struct device
*dev
= &i1480u
->usb_iface
->dev
;
432 struct net_device
*net_dev
= i1480u
->net_dev
;
433 struct i1480u_tx
*wtx
;
434 struct wlp_tx_hdr
*wlp_tx_hdr
;
435 static unsigned char dev_bcast
[2] = { 0xff, 0xff };
437 BUG_ON(i1480u
->wlp
.rc
== NULL
);
438 if ((net_dev
->flags
& IFF_UP
) == 0)
441 if (atomic_read(&i1480u
->tx_inflight
.count
) >= i1480u
->tx_inflight
.max
) {
442 netif_stop_queue(net_dev
);
443 goto error_max_inflight
;
446 wtx
= i1480u_tx_create(i1480u
, skb
, GFP_ATOMIC
);
447 if (unlikely(wtx
== NULL
)) {
448 if (printk_ratelimit())
449 dev_err(dev
, "TX: no memory for WLP TX URB,"
450 "dropping packet (in flight %d)\n",
451 atomic_read(&i1480u
->tx_inflight
.count
));
452 netif_stop_queue(net_dev
);
453 goto error_wtx_alloc
;
455 wtx
->i1480u
= i1480u
;
456 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
457 * locking. We do so because they are kind of orthogonal to
458 * each other (and thus not changed in an atomic batch).
459 * The ETH header is right after the WLP TX header. */
460 wlp_tx_hdr
= wtx
->wlp_tx_hdr
;
461 *wlp_tx_hdr
= i1480u
->options
.def_tx_hdr
;
462 wlp_tx_hdr
->dstaddr
= *dst
;
463 if (!memcmp(&wlp_tx_hdr
->dstaddr
, dev_bcast
, sizeof(dev_bcast
))
464 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr
) & WLP_DRP
)) {
465 /*Broadcast message directed to DRP host. Send as best effort
467 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr
, i1480u
->options
.pca_base_priority
);
470 result
= usb_submit_urb(wtx
->urb
, GFP_ATOMIC
); /* Go baby */
472 dev_err(dev
, "TX: cannot submit URB: %d\n", result
);
473 /* We leave the freeing of skb to calling function */
475 goto error_tx_urb_submit
;
477 atomic_inc(&i1480u
->tx_inflight
.count
);
478 net_dev
->trans_start
= jiffies
;
482 i1480u_tx_destroy(i1480u
, wtx
);
491 * Transmit an skb Called when an skbuf has to be transmitted
493 * The skb is first passed to WLP substack to ensure this is a valid
494 * frame. If valid the device address of destination will be filled and
495 * the WLP header prepended to the skb. If this step fails we fake sending
496 * the frame, if we return an error the network stack will just keep trying.
498 * Broadcast frames inside a WSS needs to be treated special as multicast is
499 * not supported. A broadcast frame is sent as unicast to each member of the
500 * WSS - this is done by the WLP substack when it finds a broadcast frame.
501 * So, we test if the WLP substack took over the skb and only transmit it
502 * if it has not (been taken over).
504 * @net_dev->xmit_lock is held
506 netdev_tx_t
i1480u_hard_start_xmit(struct sk_buff
*skb
,
507 struct net_device
*net_dev
)
510 struct i1480u
*i1480u
= netdev_priv(net_dev
);
511 struct device
*dev
= &i1480u
->usb_iface
->dev
;
512 struct uwb_dev_addr dst
;
514 if ((net_dev
->flags
& IFF_UP
) == 0)
516 result
= wlp_prepare_tx_frame(dev
, &i1480u
->wlp
, skb
, &dst
);
518 dev_err(dev
, "WLP verification of TX frame failed (%d). "
519 "Dropping packet.\n", result
);
521 } else if (result
== 1) {
522 /* trans_start time will be set when WLP actually transmits
526 result
= i1480u_xmit_frame(&i1480u
->wlp
, skb
, &dst
);
528 dev_err(dev
, "Frame TX failed (%d).\n", result
);
533 dev_kfree_skb_any(skb
);
534 net_dev
->stats
.tx_dropped
++;
541 * Called when a pkt transmission doesn't complete in a reasonable period
542 * Device reset may sleep - do it outside of interrupt context (delayed)
544 void i1480u_tx_timeout(struct net_device
*net_dev
)
546 struct i1480u
*i1480u
= netdev_priv(net_dev
);
548 wlp_reset_all(&i1480u
->wlp
);
552 void i1480u_tx_release(struct i1480u
*i1480u
)
555 struct i1480u_tx
*wtx
, *next
;
556 int count
= 0, empty
;
558 spin_lock_irqsave(&i1480u
->tx_list_lock
, flags
);
559 list_for_each_entry_safe(wtx
, next
, &i1480u
->tx_list
, list_node
) {
561 usb_unlink_urb(wtx
->urb
);
563 spin_unlock_irqrestore(&i1480u
->tx_list_lock
, flags
);
564 count
= count
*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
566 * We don't like this sollution too much (dirty as it is), but
567 * it is cheaper than putting a refcount on each i1480u_tx and
568 * i1480uting for all of them to go away...
570 * Called when no more packets can be added to tx_list
571 * so can i1480ut for it to be empty.
574 spin_lock_irqsave(&i1480u
->tx_list_lock
, flags
);
575 empty
= list_empty(&i1480u
->tx_list
);
576 spin_unlock_irqrestore(&i1480u
->tx_list_lock
, flags
);