2 * This file is part of wl1251
4 * Copyright (c) 1998-2007 Texas Instruments Incorporated
5 * Copyright (C) 2008 Nokia Corporation
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
33 static bool wl1251_tx_double_buffer_busy(struct wl1251
*wl
, u32 data_out_count
)
35 int used
, data_in_count
;
37 data_in_count
= wl
->data_in_count
;
39 if (data_in_count
< data_out_count
)
40 /* data_in_count has wrapped */
41 data_in_count
+= TX_STATUS_DATA_OUT_COUNT_MASK
+ 1;
43 used
= data_in_count
- data_out_count
;
46 WARN_ON(used
> DP_TX_PACKET_RING_CHUNK_NUM
);
48 if (used
>= DP_TX_PACKET_RING_CHUNK_NUM
)
54 static int wl1251_tx_path_status(struct wl1251
*wl
)
56 u32 status
, addr
, data_out_count
;
59 addr
= wl
->data_path
->tx_control_addr
;
60 status
= wl1251_mem_read32(wl
, addr
);
61 data_out_count
= status
& TX_STATUS_DATA_OUT_COUNT_MASK
;
62 busy
= wl1251_tx_double_buffer_busy(wl
, data_out_count
);
70 static int wl1251_tx_id(struct wl1251
*wl
, struct sk_buff
*skb
)
74 for (i
= 0; i
< FW_TX_CMPLT_BLOCK_SIZE
; i
++)
75 if (wl
->tx_frames
[i
] == NULL
) {
76 wl
->tx_frames
[i
] = skb
;
83 static void wl1251_tx_control(struct tx_double_buffer_desc
*tx_hdr
,
84 struct ieee80211_tx_info
*control
, u16 fc
)
86 *(u16
*)&tx_hdr
->control
= 0;
88 tx_hdr
->control
.rate_policy
= 0;
91 tx_hdr
->control
.packet_type
= 0;
93 /* Also disable retry and ACK policy for injected packets */
94 if ((control
->flags
& IEEE80211_TX_CTL_NO_ACK
) ||
95 (control
->flags
& IEEE80211_TX_CTL_INJECTED
)) {
96 tx_hdr
->control
.rate_policy
= 1;
97 tx_hdr
->control
.ack_policy
= 1;
100 tx_hdr
->control
.tx_complete
= 1;
102 if ((fc
& IEEE80211_FTYPE_DATA
) &&
103 ((fc
& IEEE80211_STYPE_QOS_DATA
) ||
104 (fc
& IEEE80211_STYPE_QOS_NULLFUNC
)))
105 tx_hdr
->control
.qos
= 1;
108 /* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
109 #define MAX_MSDU_SECURITY_LENGTH 16
110 #define MAX_MPDU_SECURITY_LENGTH 16
111 #define WLAN_QOS_HDR_LEN 26
112 #define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
114 #define HW_BLOCK_SIZE 252
115 static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc
*tx_hdr
)
117 u16 payload_len
, frag_threshold
, mem_blocks
;
118 u16 num_mpdus
, mem_blocks_per_frag
;
120 frag_threshold
= IEEE80211_MAX_FRAG_THRESHOLD
;
121 tx_hdr
->frag_threshold
= cpu_to_le16(frag_threshold
);
123 payload_len
= le16_to_cpu(tx_hdr
->length
) + MAX_MSDU_SECURITY_LENGTH
;
125 if (payload_len
> frag_threshold
) {
126 mem_blocks_per_frag
=
127 ((frag_threshold
+ MAX_MPDU_HEADER_AND_SECURITY
) /
129 num_mpdus
= payload_len
/ frag_threshold
;
130 mem_blocks
= num_mpdus
* mem_blocks_per_frag
;
131 payload_len
-= num_mpdus
* frag_threshold
;
135 mem_blocks_per_frag
= 0;
140 mem_blocks
+= (payload_len
/ HW_BLOCK_SIZE
) + 1;
143 mem_blocks
+= min(num_mpdus
, mem_blocks_per_frag
);
145 tx_hdr
->num_mem_blocks
= mem_blocks
;
148 static int wl1251_tx_fill_hdr(struct wl1251
*wl
, struct sk_buff
*skb
,
149 struct ieee80211_tx_info
*control
)
151 struct tx_double_buffer_desc
*tx_hdr
;
152 struct ieee80211_rate
*rate
;
159 id
= wl1251_tx_id(wl
, skb
);
163 fc
= *(u16
*)skb
->data
;
164 tx_hdr
= (struct tx_double_buffer_desc
*) skb_push(skb
,
167 tx_hdr
->length
= cpu_to_le16(skb
->len
- sizeof(*tx_hdr
));
168 rate
= ieee80211_get_tx_rate(wl
->hw
, control
);
169 tx_hdr
->rate
= cpu_to_le16(rate
->hw_value
);
170 tx_hdr
->expiry_time
= cpu_to_le32(1 << 16);
173 tx_hdr
->xmit_queue
= wl1251_tx_get_queue(skb_get_queue_mapping(skb
));
175 wl1251_tx_control(tx_hdr
, control
, fc
);
176 wl1251_tx_frag_block_num(tx_hdr
);
181 /* We copy the packet to the target */
182 static int wl1251_tx_send_packet(struct wl1251
*wl
, struct sk_buff
*skb
,
183 struct ieee80211_tx_info
*control
)
185 struct tx_double_buffer_desc
*tx_hdr
;
192 tx_hdr
= (struct tx_double_buffer_desc
*) skb
->data
;
194 if (control
->control
.hw_key
&&
195 control
->control
.hw_key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
201 fc
= *(__le16
*)(skb
->data
+ sizeof(*tx_hdr
));
202 length
= le16_to_cpu(tx_hdr
->length
) + WL1251_TKIP_IV_SPACE
;
203 tx_hdr
->length
= cpu_to_le16(length
);
205 hdrlen
= ieee80211_hdrlen(fc
);
207 pos
= skb_push(skb
, WL1251_TKIP_IV_SPACE
);
208 memmove(pos
, pos
+ WL1251_TKIP_IV_SPACE
,
209 sizeof(*tx_hdr
) + hdrlen
);
212 /* Revisit. This is a workaround for getting non-aligned packets.
213 This happens at least with EAPOL packets from the user space.
214 Our DMA requires packets to be aligned on a 4-byte boundary.
216 if (unlikely((long)skb
->data
& 0x03)) {
217 int offset
= (4 - (long)skb
->data
) & 0x03;
218 wl1251_debug(DEBUG_TX
, "skb offset %d", offset
);
220 /* check whether the current skb can be used */
221 if (skb_cloned(skb
) || (skb_tailroom(skb
) < offset
)) {
222 struct sk_buff
*newskb
= skb_copy_expand(skb
, 0, 3,
225 if (unlikely(newskb
== NULL
)) {
226 wl1251_error("Can't allocate skb!");
230 tx_hdr
= (struct tx_double_buffer_desc
*) newskb
->data
;
232 dev_kfree_skb_any(skb
);
233 wl
->tx_frames
[tx_hdr
->id
] = skb
= newskb
;
235 offset
= (4 - (long)skb
->data
) & 0x03;
236 wl1251_debug(DEBUG_TX
, "new skb offset %d", offset
);
239 /* align the buffer on a 4-byte boundary */
241 unsigned char *src
= skb
->data
;
242 skb_reserve(skb
, offset
);
243 memmove(skb
->data
, src
, skb
->len
);
244 tx_hdr
= (struct tx_double_buffer_desc
*) skb
->data
;
248 /* Our skb->data at this point includes the HW header */
249 len
= WL1251_TX_ALIGN(skb
->len
);
251 if (wl
->data_in_count
& 0x1)
252 addr
= wl
->data_path
->tx_packet_ring_addr
+
253 wl
->data_path
->tx_packet_ring_chunk_size
;
255 addr
= wl
->data_path
->tx_packet_ring_addr
;
257 wl1251_mem_write(wl
, addr
, skb
->data
, len
);
259 wl1251_debug(DEBUG_TX
, "tx id %u skb 0x%p payload %u rate 0x%x "
260 "queue %d", tx_hdr
->id
, skb
, tx_hdr
->length
,
261 tx_hdr
->rate
, tx_hdr
->xmit_queue
);
266 static void wl1251_tx_trigger(struct wl1251
*wl
)
270 if (wl
->data_in_count
& 0x1) {
271 addr
= ACX_REG_INTERRUPT_TRIG_H
;
272 data
= INTR_TRIG_TX_PROC1
;
274 addr
= ACX_REG_INTERRUPT_TRIG
;
275 data
= INTR_TRIG_TX_PROC0
;
278 wl1251_reg_write32(wl
, addr
, data
);
280 /* Bumping data in */
281 wl
->data_in_count
= (wl
->data_in_count
+ 1) &
282 TX_STATUS_DATA_OUT_COUNT_MASK
;
285 static void enable_tx_for_packet_injection(struct wl1251
*wl
)
289 ret
= wl1251_cmd_join(wl
, BSS_TYPE_STA_BSS
, wl
->channel
,
290 wl
->beacon_int
, wl
->dtim_period
);
292 wl1251_warning("join failed");
296 ret
= wl1251_event_wait(wl
, JOIN_EVENT_COMPLETE_ID
, 100);
298 wl1251_warning("join timeout");
305 /* caller must hold wl->mutex */
306 static int wl1251_tx_frame(struct wl1251
*wl
, struct sk_buff
*skb
)
308 struct ieee80211_tx_info
*info
;
312 info
= IEEE80211_SKB_CB(skb
);
314 if (info
->control
.hw_key
) {
315 if (unlikely(wl
->monitor_present
))
318 idx
= info
->control
.hw_key
->hw_key_idx
;
319 if (unlikely(wl
->default_key
!= idx
)) {
320 ret
= wl1251_acx_default_key(wl
, idx
);
326 /* Enable tx path in monitor mode for packet injection */
327 if ((wl
->vif
== NULL
) && !wl
->joined
)
328 enable_tx_for_packet_injection(wl
);
330 ret
= wl1251_tx_path_status(wl
);
334 ret
= wl1251_tx_fill_hdr(wl
, skb
, info
);
338 ret
= wl1251_tx_send_packet(wl
, skb
, info
);
342 wl1251_tx_trigger(wl
);
347 void wl1251_tx_work(struct work_struct
*work
)
349 struct wl1251
*wl
= container_of(work
, struct wl1251
, tx_work
);
351 bool woken_up
= false;
354 mutex_lock(&wl
->mutex
);
356 if (unlikely(wl
->state
== WL1251_STATE_OFF
))
359 while ((skb
= skb_dequeue(&wl
->tx_queue
))) {
361 ret
= wl1251_ps_elp_wakeup(wl
);
367 ret
= wl1251_tx_frame(wl
, skb
);
369 skb_queue_head(&wl
->tx_queue
, skb
);
371 } else if (ret
< 0) {
379 wl1251_ps_elp_sleep(wl
);
381 mutex_unlock(&wl
->mutex
);
384 static const char *wl1251_tx_parse_status(u8 status
)
386 /* 8 bit status field, one character per bit plus null */
390 memset(buf
, 0, sizeof(buf
));
392 if (status
& TX_DMA_ERROR
)
394 if (status
& TX_DISABLED
)
396 if (status
& TX_RETRY_EXCEEDED
)
398 if (status
& TX_TIMEOUT
)
400 if (status
& TX_KEY_NOT_FOUND
)
402 if (status
& TX_ENCRYPT_FAIL
)
404 if (status
& TX_UNAVAILABLE_PRIORITY
)
407 /* bit 0 is unused apparently */
412 static void wl1251_tx_packet_cb(struct wl1251
*wl
,
413 struct tx_result
*result
)
415 struct ieee80211_tx_info
*info
;
420 skb
= wl
->tx_frames
[result
->id
];
422 wl1251_error("SKB for packet %d is NULL", result
->id
);
426 info
= IEEE80211_SKB_CB(skb
);
428 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
) &&
429 !(info
->flags
& IEEE80211_TX_CTL_INJECTED
) &&
430 (result
->status
== TX_SUCCESS
))
431 info
->flags
|= IEEE80211_TX_STAT_ACK
;
433 info
->status
.rates
[0].count
= result
->ack_failures
+ 1;
434 wl
->stats
.retry_count
+= result
->ack_failures
;
437 * We have to remove our private TX header before pushing
438 * the skb back to mac80211.
440 frame
= skb_pull(skb
, sizeof(struct tx_double_buffer_desc
));
441 if (info
->control
.hw_key
&&
442 info
->control
.hw_key
->cipher
== WLAN_CIPHER_SUITE_TKIP
) {
443 hdrlen
= ieee80211_get_hdrlen_from_skb(skb
);
444 memmove(frame
+ WL1251_TKIP_IV_SPACE
, frame
, hdrlen
);
445 skb_pull(skb
, WL1251_TKIP_IV_SPACE
);
448 wl1251_debug(DEBUG_TX
, "tx status id %u skb 0x%p failures %u rate 0x%x"
450 result
->id
, skb
, result
->ack_failures
, result
->rate
,
451 result
->status
, wl1251_tx_parse_status(result
->status
));
454 ieee80211_tx_status(wl
->hw
, skb
);
456 wl
->tx_frames
[result
->id
] = NULL
;
459 /* Called upon reception of a TX complete interrupt */
460 void wl1251_tx_complete(struct wl1251
*wl
)
462 int i
, result_index
, num_complete
= 0, queue_len
;
463 struct tx_result result
[FW_TX_CMPLT_BLOCK_SIZE
], *result_ptr
;
466 if (unlikely(wl
->state
!= WL1251_STATE_ON
))
469 /* First we read the result */
470 wl1251_mem_read(wl
, wl
->data_path
->tx_complete_addr
,
471 result
, sizeof(result
));
473 result_index
= wl
->next_tx_complete
;
475 for (i
= 0; i
< ARRAY_SIZE(result
); i
++) {
476 result_ptr
= &result
[result_index
];
478 if (result_ptr
->done_1
== 1 &&
479 result_ptr
->done_2
== 1) {
480 wl1251_tx_packet_cb(wl
, result_ptr
);
482 result_ptr
->done_1
= 0;
483 result_ptr
->done_2
= 0;
485 result_index
= (result_index
+ 1) &
486 (FW_TX_CMPLT_BLOCK_SIZE
- 1);
493 queue_len
= skb_queue_len(&wl
->tx_queue
);
495 if ((num_complete
> 0) && (queue_len
> 0)) {
496 /* firmware buffer has space, reschedule tx_work */
497 wl1251_debug(DEBUG_TX
, "tx_complete: reschedule tx_work");
498 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
501 if (wl
->tx_queue_stopped
&&
502 queue_len
<= WL1251_TX_QUEUE_LOW_WATERMARK
) {
503 /* tx_queue has space, restart queues */
504 wl1251_debug(DEBUG_TX
, "tx_complete: waking queues");
505 spin_lock_irqsave(&wl
->wl_lock
, flags
);
506 ieee80211_wake_queues(wl
->hw
);
507 wl
->tx_queue_stopped
= false;
508 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
511 /* Every completed frame needs to be acknowledged */
514 * If we've wrapped, we have to clear
515 * the results in 2 steps.
517 if (result_index
> wl
->next_tx_complete
) {
518 /* Only 1 write is needed */
520 wl
->data_path
->tx_complete_addr
+
521 (wl
->next_tx_complete
*
522 sizeof(struct tx_result
)),
523 &result
[wl
->next_tx_complete
],
525 sizeof(struct tx_result
));
528 } else if (result_index
< wl
->next_tx_complete
) {
529 /* 2 writes are needed */
531 wl
->data_path
->tx_complete_addr
+
532 (wl
->next_tx_complete
*
533 sizeof(struct tx_result
)),
534 &result
[wl
->next_tx_complete
],
535 (FW_TX_CMPLT_BLOCK_SIZE
-
536 wl
->next_tx_complete
) *
537 sizeof(struct tx_result
));
540 wl
->data_path
->tx_complete_addr
,
543 FW_TX_CMPLT_BLOCK_SIZE
+
544 wl
->next_tx_complete
) *
545 sizeof(struct tx_result
));
548 /* We have to write the whole array */
550 wl
->data_path
->tx_complete_addr
,
552 FW_TX_CMPLT_BLOCK_SIZE
*
553 sizeof(struct tx_result
));
558 wl
->next_tx_complete
= result_index
;
561 /* caller must hold wl->mutex */
562 void wl1251_tx_flush(struct wl1251
*wl
)
566 struct ieee80211_tx_info
*info
;
569 /* control->flags = 0; FIXME */
571 while ((skb
= skb_dequeue(&wl
->tx_queue
))) {
572 info
= IEEE80211_SKB_CB(skb
);
574 wl1251_debug(DEBUG_TX
, "flushing skb 0x%p", skb
);
576 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
))
579 ieee80211_tx_status(wl
->hw
, skb
);
582 for (i
= 0; i
< FW_TX_CMPLT_BLOCK_SIZE
; i
++)
583 if (wl
->tx_frames
[i
] != NULL
) {
584 skb
= wl
->tx_frames
[i
];
585 info
= IEEE80211_SKB_CB(skb
);
587 if (!(info
->flags
& IEEE80211_TX_CTL_REQ_TX_STATUS
))
590 ieee80211_tx_status(wl
->hw
, skb
);
591 wl
->tx_frames
[i
] = NULL
;