2 * This file is part of wl1251
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
33 static bool wl1251_tx_double_buffer_busy(struct wl1251
*wl
, u32 data_out_count
)
35 int used
, data_in_count
;
37 data_in_count
= wl
->data_in_count
;
39 if (data_in_count
< data_out_count
)
40 /* data_in_count has wrapped */
41 data_in_count
+= TX_STATUS_DATA_OUT_COUNT_MASK
+ 1;
43 used
= data_in_count
- data_out_count
;
46 WARN_ON(used
> DP_TX_PACKET_RING_CHUNK_NUM
);
48 if (used
>= DP_TX_PACKET_RING_CHUNK_NUM
)
54 static int wl1251_tx_path_status(struct wl1251
*wl
)
56 u32 status
, addr
, data_out_count
;
59 addr
= wl
->data_path
->tx_control_addr
;
60 status
= wl1251_mem_read32(wl
, addr
);
61 data_out_count
= status
& TX_STATUS_DATA_OUT_COUNT_MASK
;
62 busy
= wl1251_tx_double_buffer_busy(wl
, data_out_count
);
70 static int wl1251_tx_id(struct wl1251
*wl
, struct sk_buff
*skb
)
74 for (i
= 0; i
< FW_TX_CMPLT_BLOCK_SIZE
; i
++)
75 if (wl
->tx_frames
[i
] == NULL
) {
76 wl
->tx_frames
[i
] = skb
;
83 static void wl1251_tx_control(struct tx_double_buffer_desc
*tx_hdr
,
84 struct ieee80211_tx_info
*control
, u16 fc
)
86 *(u16
*)&tx_hdr
->control
= 0;
88 tx_hdr
->control
.rate_policy
= 0;
91 tx_hdr
->control
.packet_type
= 0;
93 /* Also disable retry and ACK policy for injected packets */
94 if ((control
->flags
& IEEE80211_TX_CTL_NO_ACK
) ||
95 (control
->flags
& IEEE80211_TX_CTL_INJECTED
)) {
96 tx_hdr
->control
.rate_policy
= 1;
97 tx_hdr
->control
.ack_policy
= 1;
100 tx_hdr
->control
.tx_complete
= 1;
102 if ((fc
& IEEE80211_FTYPE_DATA
) &&
103 ((fc
& IEEE80211_STYPE_QOS_DATA
) ||
104 (fc
& IEEE80211_STYPE_QOS_NULLFUNC
)))
105 tx_hdr
->control
.qos
= 1;
108 /* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
109 #define MAX_MSDU_SECURITY_LENGTH 16
110 #define MAX_MPDU_SECURITY_LENGTH 16
111 #define WLAN_QOS_HDR_LEN 26
112 #define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
114 #define HW_BLOCK_SIZE 252
115 static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc
*tx_hdr
)
117 u16 payload_len
, frag_threshold
, mem_blocks
;
118 u16 num_mpdus
, mem_blocks_per_frag
;
120 frag_threshold
= IEEE80211_MAX_FRAG_THRESHOLD
;
121 tx_hdr
->frag_threshold
= cpu_to_le16(frag_threshold
);
123 payload_len
= le16_to_cpu(tx_hdr
->length
) + MAX_MSDU_SECURITY_LENGTH
;
125 if (payload_len
> frag_threshold
) {
126 mem_blocks_per_frag
=
127 ((frag_threshold
+ MAX_MPDU_HEADER_AND_SECURITY
) /
129 num_mpdus
= payload_len
/ frag_threshold
;
130 mem_blocks
= num_mpdus
* mem_blocks_per_frag
;
131 payload_len
-= num_mpdus
* frag_threshold
;
135 mem_blocks_per_frag
= 0;
140 mem_blocks
+= (payload_len
/ HW_BLOCK_SIZE
) + 1;
143 mem_blocks
+= min(num_mpdus
, mem_blocks_per_frag
);
145 tx_hdr
->num_mem_blocks
= mem_blocks
;
148 static int wl1251_tx_fill_hdr(struct wl1251
*wl
, struct sk_buff
*skb
,
149 struct ieee80211_tx_info
*control
)
151 struct tx_double_buffer_desc
*tx_hdr
;
152 struct ieee80211_rate
*rate
;
159 id
= wl1251_tx_id(wl
, skb
);
163 fc
= *(u16
*)skb
->data
;
164 tx_hdr
= skb_push(skb
, sizeof(*tx_hdr
));
166 tx_hdr
->length
= cpu_to_le16(skb
->len
- sizeof(*tx_hdr
));
167 rate
= ieee80211_get_tx_rate(wl
->hw
, control
);
168 tx_hdr
->rate
= cpu_to_le16(rate
->hw_value
);
169 tx_hdr
->expiry_time
= cpu_to_le32(1 << 16);
172 tx_hdr
->xmit_queue
= wl1251_tx_get_queue(skb_get_queue_mapping(skb
));
174 wl1251_tx_control(tx_hdr
, control
, fc
);
175 wl1251_tx_frag_block_num(tx_hdr
);
180 /* We copy the packet to the target */
181 static int wl1251_tx_send_packet(struct wl1251
*wl
, struct sk_buff
*skb
,
182 struct ieee80211_tx_info
*control
)
184 struct tx_double_buffer_desc
*tx_hdr
;
191 tx_hdr
= (struct tx_double_buffer_desc
*) skb
->data
;
193 if (control
->control
.hw_key
&&
194 control
->control
.hw_key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
200 fc
= *(__le16
*)(skb
->data
+ sizeof(*tx_hdr
));
201 length
= le16_to_cpu(tx_hdr
->length
) + WL1251_TKIP_IV_SPACE
;
202 tx_hdr
->length
= cpu_to_le16(length
);
204 hdrlen
= ieee80211_hdrlen(fc
);
206 pos
= skb_push(skb
, WL1251_TKIP_IV_SPACE
);
207 memmove(pos
, pos
+ WL1251_TKIP_IV_SPACE
,
208 sizeof(*tx_hdr
) + hdrlen
);
211 /* Revisit. This is a workaround for getting non-aligned packets.
212 This happens at least with EAPOL packets from the user space.
213 Our DMA requires packets to be aligned on a 4-byte boundary.
215 if (unlikely((long)skb
->data
& 0x03)) {
216 int offset
= (4 - (long)skb
->data
) & 0x03;
217 wl1251_debug(DEBUG_TX
, "skb offset %d", offset
);
219 /* check whether the current skb can be used */
220 if (skb_cloned(skb
) || (skb_tailroom(skb
) < offset
)) {
221 struct sk_buff
*newskb
= skb_copy_expand(skb
, 0, 3,
224 if (unlikely(newskb
== NULL
)) {
225 wl1251_error("Can't allocate skb!");
229 tx_hdr
= (struct tx_double_buffer_desc
*) newskb
->data
;
231 dev_kfree_skb_any(skb
);
232 wl
->tx_frames
[tx_hdr
->id
] = skb
= newskb
;
234 offset
= (4 - (long)skb
->data
) & 0x03;
235 wl1251_debug(DEBUG_TX
, "new skb offset %d", offset
);
238 /* align the buffer on a 4-byte boundary */
240 unsigned char *src
= skb
->data
;
241 skb_reserve(skb
, offset
);
242 memmove(skb
->data
, src
, skb
->len
);
243 tx_hdr
= (struct tx_double_buffer_desc
*) skb
->data
;
247 /* Our skb->data at this point includes the HW header */
248 len
= WL1251_TX_ALIGN(skb
->len
);
250 if (wl
->data_in_count
& 0x1)
251 addr
= wl
->data_path
->tx_packet_ring_addr
+
252 wl
->data_path
->tx_packet_ring_chunk_size
;
254 addr
= wl
->data_path
->tx_packet_ring_addr
;
256 wl1251_mem_write(wl
, addr
, skb
->data
, len
);
258 wl1251_debug(DEBUG_TX
, "tx id %u skb 0x%p payload %u rate 0x%x "
259 "queue %d", tx_hdr
->id
, skb
, tx_hdr
->length
,
260 tx_hdr
->rate
, tx_hdr
->xmit_queue
);
265 static void wl1251_tx_trigger(struct wl1251
*wl
)
269 if (wl
->data_in_count
& 0x1) {
270 addr
= ACX_REG_INTERRUPT_TRIG_H
;
271 data
= INTR_TRIG_TX_PROC1
;
273 addr
= ACX_REG_INTERRUPT_TRIG
;
274 data
= INTR_TRIG_TX_PROC0
;
277 wl1251_reg_write32(wl
, addr
, data
);
279 /* Bumping data in */
280 wl
->data_in_count
= (wl
->data_in_count
+ 1) &
281 TX_STATUS_DATA_OUT_COUNT_MASK
;
284 static void enable_tx_for_packet_injection(struct wl1251
*wl
)
288 ret
= wl1251_cmd_join(wl
, BSS_TYPE_STA_BSS
, wl
->channel
,
289 wl
->beacon_int
, wl
->dtim_period
);
291 wl1251_warning("join failed");
295 ret
= wl1251_event_wait(wl
, JOIN_EVENT_COMPLETE_ID
, 100);
297 wl1251_warning("join timeout");
304 /* caller must hold wl->mutex */
305 static int wl1251_tx_frame(struct wl1251
*wl
, struct sk_buff
*skb
)
307 struct ieee80211_tx_info
*info
;
311 info
= IEEE80211_SKB_CB(skb
);
313 if (info
->control
.hw_key
) {
314 if (unlikely(wl
->monitor_present
))
317 idx
= info
->control
.hw_key
->hw_key_idx
;
318 if (unlikely(wl
->default_key
!= idx
)) {
319 ret
= wl1251_acx_default_key(wl
, idx
);
325 /* Enable tx path in monitor mode for packet injection */
326 if ((wl
->vif
== NULL
) && !wl
->joined
)
327 enable_tx_for_packet_injection(wl
);
329 ret
= wl1251_tx_path_status(wl
);
333 ret
= wl1251_tx_fill_hdr(wl
, skb
, info
);
337 ret
= wl1251_tx_send_packet(wl
, skb
, info
);
341 wl1251_tx_trigger(wl
);
346 void wl1251_tx_work(struct work_struct
*work
)
348 struct wl1251
*wl
= container_of(work
, struct wl1251
, tx_work
);
350 bool woken_up
= false;
353 mutex_lock(&wl
->mutex
);
355 if (unlikely(wl
->state
== WL1251_STATE_OFF
))
358 while ((skb
= skb_dequeue(&wl
->tx_queue
))) {
360 ret
= wl1251_ps_elp_wakeup(wl
);
366 ret
= wl1251_tx_frame(wl
, skb
);
368 skb_queue_head(&wl
->tx_queue
, skb
);
370 } else if (ret
< 0) {
378 wl1251_ps_elp_sleep(wl
);
380 mutex_unlock(&wl
->mutex
);
383 static const char *wl1251_tx_parse_status(u8 status
)
385 /* 8 bit status field, one character per bit plus null */
389 memset(buf
, 0, sizeof(buf
));
391 if (status
& TX_DMA_ERROR
)
393 if (status
& TX_DISABLED
)
395 if (status
& TX_RETRY_EXCEEDED
)
397 if (status
& TX_TIMEOUT
)
399 if (status
& TX_KEY_NOT_FOUND
)
401 if (status
& TX_ENCRYPT_FAIL
)
403 if (status
& TX_UNAVAILABLE_PRIORITY
)
406 /* bit 0 is unused apparently */
411 static void wl1251_tx_packet_cb(struct wl1251
*wl
,
412 struct tx_result
*result
)
414 struct ieee80211_tx_info
*info
;
419 skb
= wl
->tx_frames
[result
->id
];
421 wl1251_error("SKB for packet %d is NULL", result
->id
);
425 info
= IEEE80211_SKB_CB(skb
);
427 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
) &&
428 !(info
->flags
& IEEE80211_TX_CTL_INJECTED
) &&
429 (result
->status
== TX_SUCCESS
))
430 info
->flags
|= IEEE80211_TX_STAT_ACK
;
432 info
->status
.rates
[0].count
= result
->ack_failures
+ 1;
433 wl
->stats
.retry_count
+= result
->ack_failures
;
436 * We have to remove our private TX header before pushing
437 * the skb back to mac80211.
439 frame
= skb_pull(skb
, sizeof(struct tx_double_buffer_desc
));
440 if (info
->control
.hw_key
&&
441 info
->control
.hw_key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
442 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
443 memmove(frame
+ WL1251_TKIP_IV_SPACE
, frame
, hdrlen
);
444 skb_pull(skb
, WL1251_TKIP_IV_SPACE
);
447 wl1251_debug(DEBUG_TX
, "tx status id %u skb 0x%p failures %u rate 0x%x"
449 result
->id
, skb
, result
->ack_failures
, result
->rate
,
450 result
->status
, wl1251_tx_parse_status(result
->status
));
453 ieee80211_tx_status(wl
->hw
, skb
);
455 wl
->tx_frames
[result
->id
] = NULL
;
458 /* Called upon reception of a TX complete interrupt */
459 void wl1251_tx_complete(struct wl1251
*wl
)
461 int i
, result_index
, num_complete
= 0, queue_len
;
462 struct tx_result result
[FW_TX_CMPLT_BLOCK_SIZE
], *result_ptr
;
465 if (unlikely(wl
->state
!= WL1251_STATE_ON
))
468 /* First we read the result */
469 wl1251_mem_read(wl
, wl
->data_path
->tx_complete_addr
,
470 result
, sizeof(result
));
472 result_index
= wl
->next_tx_complete
;
474 for (i
= 0; i
< ARRAY_SIZE(result
); i
++) {
475 result_ptr
= &result
[result_index
];
477 if (result_ptr
->done_1
== 1 &&
478 result_ptr
->done_2
== 1) {
479 wl1251_tx_packet_cb(wl
, result_ptr
);
481 result_ptr
->done_1
= 0;
482 result_ptr
->done_2
= 0;
484 result_index
= (result_index
+ 1) &
485 (FW_TX_CMPLT_BLOCK_SIZE
- 1);
492 queue_len
= skb_queue_len(&wl
->tx_queue
);
494 if ((num_complete
> 0) && (queue_len
> 0)) {
495 /* firmware buffer has space, reschedule tx_work */
496 wl1251_debug(DEBUG_TX
, "tx_complete: reschedule tx_work");
497 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
500 if (wl
->tx_queue_stopped
&&
501 queue_len
<= WL1251_TX_QUEUE_LOW_WATERMARK
) {
502 /* tx_queue has space, restart queues */
503 wl1251_debug(DEBUG_TX
, "tx_complete: waking queues");
504 spin_lock_irqsave(&wl
->wl_lock
, flags
);
505 ieee80211_wake_queues(wl
->hw
);
506 wl
->tx_queue_stopped
= false;
507 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
510 /* Every completed frame needs to be acknowledged */
513 * If we've wrapped, we have to clear
514 * the results in 2 steps.
516 if (result_index
> wl
->next_tx_complete
) {
517 /* Only 1 write is needed */
519 wl
->data_path
->tx_complete_addr
+
520 (wl
->next_tx_complete
*
521 sizeof(struct tx_result
)),
522 &result
[wl
->next_tx_complete
],
524 sizeof(struct tx_result
));
527 } else if (result_index
< wl
->next_tx_complete
) {
528 /* 2 writes are needed */
530 wl
->data_path
->tx_complete_addr
+
531 (wl
->next_tx_complete
*
532 sizeof(struct tx_result
)),
533 &result
[wl
->next_tx_complete
],
534 (FW_TX_CMPLT_BLOCK_SIZE
-
535 wl
->next_tx_complete
) *
536 sizeof(struct tx_result
));
539 wl
->data_path
->tx_complete_addr
,
542 FW_TX_CMPLT_BLOCK_SIZE
+
543 wl
->next_tx_complete
) *
544 sizeof(struct tx_result
));
547 /* We have to write the whole array */
549 wl
->data_path
->tx_complete_addr
,
551 FW_TX_CMPLT_BLOCK_SIZE
*
552 sizeof(struct tx_result
));
557 wl
->next_tx_complete
= result_index
;
560 /* caller must hold wl->mutex */
561 void wl1251_tx_flush(struct wl1251
*wl
)
565 struct ieee80211_tx_info
*info
;
568 /* control->flags = 0; FIXME */
570 while ((skb
= skb_dequeue(&wl
->tx_queue
))) {
571 info
= IEEE80211_SKB_CB(skb
);
573 wl1251_debug(DEBUG_TX
, "flushing skb 0x%p", skb
);
575 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
))
578 ieee80211_tx_status(wl
->hw
, skb
);
581 for (i
= 0; i
< FW_TX_CMPLT_BLOCK_SIZE
; i
++)
582 if (wl
->tx_frames
[i
] != NULL
) {
583 skb
= wl
->tx_frames
[i
];
584 info
= IEEE80211_SKB_CB(skb
);
586 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
))
589 ieee80211_tx_status(wl
->hw
, skb
);
590 wl
->tx_frames
[i
] = NULL
;