1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
39 #include "iwl-helpers.h"
40 #include "iwl-4965-hw.h"
44 * mac80211 queues, ACs, hardware queues, FIFOs.
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
69 static const u8 tid_to_ac
[] = {
80 static inline int iwl4965_get_ac_from_tid(u16 tid
)
82 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
83 return tid_to_ac
[tid
];
85 /* no support for TIDs 8-15 yet */
90 iwl4965_get_fifo_from_tid(struct iwl_rxon_context
*ctx
, u16 tid
)
92 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
93 return ctx
->ac_to_fifo
[tid_to_ac
[tid
]];
95 /* no support for TIDs 8-15 yet */
100 * handle build REPLY_TX command notification.
102 static void iwl4965_tx_cmd_build_basic(struct iwl_priv
*priv
,
104 struct iwl_tx_cmd
*tx_cmd
,
105 struct ieee80211_tx_info
*info
,
106 struct ieee80211_hdr
*hdr
,
109 __le16 fc
= hdr
->frame_control
;
110 __le32 tx_flags
= tx_cmd
->tx_flags
;
112 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
113 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
114 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
115 if (ieee80211_is_mgmt(fc
))
116 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
117 if (ieee80211_is_probe_resp(fc
) &&
118 !(le16_to_cpu(hdr
->seq_ctrl
) & 0xf))
119 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
121 tx_flags
&= (~TX_CMD_FLG_ACK_MSK
);
122 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
125 if (ieee80211_is_back_req(fc
))
126 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
128 tx_cmd
->sta_id
= std_id
;
129 if (ieee80211_has_morefrags(fc
))
130 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
132 if (ieee80211_is_data_qos(fc
)) {
133 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
134 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
135 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
137 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
140 iwl_legacy_tx_cmd_protection(priv
, info
, fc
, &tx_flags
);
142 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
143 if (ieee80211_is_mgmt(fc
)) {
144 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
145 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
147 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
149 tx_cmd
->timeout
.pm_frame_timeout
= 0;
152 tx_cmd
->driver_txop
= 0;
153 tx_cmd
->tx_flags
= tx_flags
;
154 tx_cmd
->next_frame_len
= 0;
157 #define RTS_DFAULT_RETRY_LIMIT 60
159 static void iwl4965_tx_cmd_build_rate(struct iwl_priv
*priv
,
160 struct iwl_tx_cmd
*tx_cmd
,
161 struct ieee80211_tx_info
*info
,
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc
))
172 data_retry_limit
= 3;
174 data_retry_limit
= IWL4965_DEFAULT_TX_RETRY
;
175 tx_cmd
->data_retry_limit
= data_retry_limit
;
177 /* Set retry limit on RTS packets */
178 rts_retry_limit
= RTS_DFAULT_RETRY_LIMIT
;
179 if (data_retry_limit
< rts_retry_limit
)
180 rts_retry_limit
= data_retry_limit
;
181 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
183 /* DATA packets will use the uCode station table for rate/antenna
185 if (ieee80211_is_data(fc
)) {
186 tx_cmd
->initial_rate_index
= 0;
187 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
197 rate_idx
= info
->control
.rates
[0].idx
;
198 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
||
199 (rate_idx
< 0) || (rate_idx
> IWL_RATE_COUNT_LEGACY
))
200 rate_idx
= rate_lowest_index(&priv
->bands
[info
->band
],
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info
->band
== IEEE80211_BAND_5GHZ
)
204 rate_idx
+= IWL_FIRST_OFDM_RATE
;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp
= iwlegacy_rates
[rate_idx
].plcp
;
207 /* Zero out flags for this packet */
210 /* Set CCK flag as needed */
211 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
212 rate_flags
|= RATE_MCS_CCK_MSK
;
214 /* Set up antennas */
215 priv
->mgmt_tx_ant
= iwl4965_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
216 priv
->hw_params
.valid_tx_ant
);
218 rate_flags
|= iwl4965_ant_idx_to_flags(priv
->mgmt_tx_ant
);
220 /* Set the rate in the TX cmd */
221 tx_cmd
->rate_n_flags
= iwl4965_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
224 static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
225 struct ieee80211_tx_info
*info
,
226 struct iwl_tx_cmd
*tx_cmd
,
227 struct sk_buff
*skb_frag
,
230 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
232 switch (keyconf
->cipher
) {
233 case WLAN_CIPHER_SUITE_CCMP
:
234 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
235 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
236 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
237 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
238 IWL_DEBUG_TX(priv
, "tx_cmd with AES hwcrypto\n");
241 case WLAN_CIPHER_SUITE_TKIP
:
242 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
243 ieee80211_get_tkip_key(keyconf
, skb_frag
,
244 IEEE80211_TKIP_P2_KEY
, tx_cmd
->key
);
245 IWL_DEBUG_TX(priv
, "tx_cmd with tkip hwcrypto\n");
248 case WLAN_CIPHER_SUITE_WEP104
:
249 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
251 case WLAN_CIPHER_SUITE_WEP40
:
252 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
253 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
255 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
257 IWL_DEBUG_TX(priv
, "Configuring packet for WEP encryption "
258 "with key %d\n", keyconf
->keyidx
);
262 IWL_ERR(priv
, "Unknown encode cipher %x\n", keyconf
->cipher
);
268 * start REPLY_TX command process
270 int iwl4965_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
272 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
273 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
274 struct ieee80211_sta
*sta
= info
->control
.sta
;
275 struct iwl_station_priv
*sta_priv
= NULL
;
276 struct iwl_tx_queue
*txq
;
278 struct iwl_device_cmd
*out_cmd
;
279 struct iwl_cmd_meta
*out_meta
;
280 struct iwl_tx_cmd
*tx_cmd
;
281 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
283 dma_addr_t phys_addr
;
284 dma_addr_t txcmd_phys
;
285 dma_addr_t scratch_phys
;
286 u16 len
, firstlen
, secondlen
;
291 u8 wait_write_ptr
= 0;
297 if (info
->control
.vif
)
298 ctx
= iwl_legacy_rxon_ctx_from_vif(info
->control
.vif
);
300 spin_lock_irqsave(&priv
->lock
, flags
);
301 if (iwl_legacy_is_rfkill(priv
)) {
302 IWL_DEBUG_DROP(priv
, "Dropping - RF KILL\n");
306 fc
= hdr
->frame_control
;
308 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
309 if (ieee80211_is_auth(fc
))
310 IWL_DEBUG_TX(priv
, "Sending AUTH frame\n");
311 else if (ieee80211_is_assoc_req(fc
))
312 IWL_DEBUG_TX(priv
, "Sending ASSOC frame\n");
313 else if (ieee80211_is_reassoc_req(fc
))
314 IWL_DEBUG_TX(priv
, "Sending REASSOC frame\n");
317 hdr_len
= ieee80211_hdrlen(fc
);
319 /* For management frames use broadcast id to do not break aggregation */
320 if (!ieee80211_is_data(fc
))
321 sta_id
= ctx
->bcast_sta_id
;
323 /* Find index into station table for destination station */
324 sta_id
= iwl_legacy_sta_id_or_broadcast(priv
, ctx
, info
->control
.sta
);
326 if (sta_id
== IWL_INVALID_STATION
) {
327 IWL_DEBUG_DROP(priv
, "Dropping - INVALID STATION: %pM\n",
333 IWL_DEBUG_TX(priv
, "station Id %d\n", sta_id
);
336 sta_priv
= (void *)sta
->drv_priv
;
338 if (sta_priv
&& sta_priv
->asleep
&&
339 (info
->flags
& IEEE80211_TX_CTL_PSPOLL_RESPONSE
)) {
341 * This sends an asynchronous command to the device,
342 * but we can rely on it being processed before the
343 * next frame is processed -- and the next frame to
344 * this station is the one that will consume this
346 * For now set the counter to just 1 since we do not
349 iwl4965_sta_modify_sleep_tx_count(priv
, sta_id
, 1);
353 * Send this frame after DTIM -- there's a special queue
354 * reserved for this for contexts that support AP mode.
356 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
357 txq_id
= ctx
->mcast_queue
;
359 * The microcode will clear the more data
360 * bit in the last frame it transmits.
362 hdr
->frame_control
|=
363 cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
365 txq_id
= ctx
->ac_to_queue
[skb_get_queue_mapping(skb
)];
367 /* irqs already disabled/saved above when locking priv->lock */
368 spin_lock(&priv
->sta_lock
);
370 if (ieee80211_is_data_qos(fc
)) {
371 qc
= ieee80211_get_qos_ctl(hdr
);
372 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
373 if (WARN_ON_ONCE(tid
>= MAX_TID_COUNT
)) {
374 spin_unlock(&priv
->sta_lock
);
377 seq_number
= priv
->stations
[sta_id
].tid
[tid
].seq_number
;
378 seq_number
&= IEEE80211_SCTL_SEQ
;
379 hdr
->seq_ctrl
= hdr
->seq_ctrl
&
380 cpu_to_le16(IEEE80211_SCTL_FRAG
);
381 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
383 /* aggregation is on for this <sta,tid> */
384 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
385 priv
->stations
[sta_id
].tid
[tid
].agg
.state
== IWL_AGG_ON
) {
386 txq_id
= priv
->stations
[sta_id
].tid
[tid
].agg
.txq_id
;
391 txq
= &priv
->txq
[txq_id
];
394 if (unlikely(iwl_legacy_queue_space(q
) < q
->high_mark
)) {
395 spin_unlock(&priv
->sta_lock
);
399 if (ieee80211_is_data_qos(fc
)) {
400 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
++;
401 if (!ieee80211_has_morefrags(fc
))
402 priv
->stations
[sta_id
].tid
[tid
].seq_number
= seq_number
;
405 spin_unlock(&priv
->sta_lock
);
407 /* Set up driver data for this TFD */
408 memset(&(txq
->txb
[q
->write_ptr
]), 0, sizeof(struct iwl_tx_info
));
409 txq
->txb
[q
->write_ptr
].skb
= skb
;
410 txq
->txb
[q
->write_ptr
].ctx
= ctx
;
412 /* Set up first empty entry in queue's array of Tx/cmd buffers */
413 out_cmd
= txq
->cmd
[q
->write_ptr
];
414 out_meta
= &txq
->meta
[q
->write_ptr
];
415 tx_cmd
= &out_cmd
->cmd
.tx
;
416 memset(&out_cmd
->hdr
, 0, sizeof(out_cmd
->hdr
));
417 memset(tx_cmd
, 0, sizeof(struct iwl_tx_cmd
));
420 * Set up the Tx-command (not MAC!) header.
421 * Store the chosen Tx queue and TFD index within the sequence field;
422 * after Tx, uCode's Tx response will return this value so driver can
423 * locate the frame within the tx queue and do post-tx processing.
425 out_cmd
->hdr
.cmd
= REPLY_TX
;
426 out_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
427 INDEX_TO_SEQ(q
->write_ptr
)));
429 /* Copy MAC header from skb into command buffer */
430 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
433 /* Total # bytes to be transmitted */
435 tx_cmd
->len
= cpu_to_le16(len
);
437 if (info
->control
.hw_key
)
438 iwl4965_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
, sta_id
);
440 /* TODO need this for burst mode later on */
441 iwl4965_tx_cmd_build_basic(priv
, skb
, tx_cmd
, info
, hdr
, sta_id
);
442 iwl_legacy_dbg_log_tx_data_frame(priv
, len
, hdr
);
444 iwl4965_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
);
446 iwl_legacy_update_stats(priv
, true, fc
, len
);
448 * Use the first empty entry in this queue's command buffer array
449 * to contain the Tx command and MAC header concatenated together
450 * (payload data will be in another buffer).
451 * Size of this varies, due to varying MAC header length.
452 * If end is not dword aligned, we'll have 2 extra bytes at the end
453 * of the MAC header (device reads on dword boundaries).
454 * We'll tell device about this padding later.
456 len
= sizeof(struct iwl_tx_cmd
) +
457 sizeof(struct iwl_cmd_header
) + hdr_len
;
458 firstlen
= (len
+ 3) & ~3;
460 /* Tell NIC about any 2-byte padding after MAC header */
462 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
464 /* Physical address of this Tx command's header (not MAC header!),
465 * within command buffer array. */
466 txcmd_phys
= pci_map_single(priv
->pci_dev
,
467 &out_cmd
->hdr
, firstlen
,
468 PCI_DMA_BIDIRECTIONAL
);
469 dma_unmap_addr_set(out_meta
, mapping
, txcmd_phys
);
470 dma_unmap_len_set(out_meta
, len
, firstlen
);
471 /* Add buffer containing Tx command and MAC(!) header to TFD's
473 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
474 txcmd_phys
, firstlen
, 1, 0);
476 if (!ieee80211_has_morefrags(hdr
->frame_control
)) {
477 txq
->need_update
= 1;
480 txq
->need_update
= 0;
483 /* Set up TFD's 2nd entry to point directly to remainder of skb,
484 * if any (802.11 null frames have no payload). */
485 secondlen
= skb
->len
- hdr_len
;
487 phys_addr
= pci_map_single(priv
->pci_dev
, skb
->data
+ hdr_len
,
488 secondlen
, PCI_DMA_TODEVICE
);
489 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
490 phys_addr
, secondlen
,
494 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
495 offsetof(struct iwl_tx_cmd
, scratch
);
497 /* take back ownership of DMA buffer to enable update */
498 pci_dma_sync_single_for_cpu(priv
->pci_dev
, txcmd_phys
,
499 firstlen
, PCI_DMA_BIDIRECTIONAL
);
500 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
501 tx_cmd
->dram_msb_ptr
= iwl_legacy_get_dma_hi_addr(scratch_phys
);
503 IWL_DEBUG_TX(priv
, "sequence nr = 0X%x\n",
504 le16_to_cpu(out_cmd
->hdr
.sequence
));
505 IWL_DEBUG_TX(priv
, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd
->tx_flags
));
506 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
, sizeof(*tx_cmd
));
507 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
->hdr
, hdr_len
);
509 /* Set up entry for this TFD in Tx byte-count array */
510 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
511 priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl(priv
, txq
,
512 le16_to_cpu(tx_cmd
->len
));
514 pci_dma_sync_single_for_device(priv
->pci_dev
, txcmd_phys
,
515 firstlen
, PCI_DMA_BIDIRECTIONAL
);
517 trace_iwlwifi_legacy_dev_tx(priv
,
518 &((struct iwl_tfd
*)txq
->tfds
)[txq
->q
.write_ptr
],
519 sizeof(struct iwl_tfd
),
520 &out_cmd
->hdr
, firstlen
,
521 skb
->data
+ hdr_len
, secondlen
);
523 /* Tell device the write index *just past* this latest filled TFD */
524 q
->write_ptr
= iwl_legacy_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
525 iwl_legacy_txq_update_write_ptr(priv
, txq
);
526 spin_unlock_irqrestore(&priv
->lock
, flags
);
529 * At this point the frame is "transmitted" successfully
530 * and we will get a TX status notification eventually,
531 * regardless of the value of ret. "ret" only indicates
532 * whether or not we should update the write pointer.
536 * Avoid atomic ops if it isn't an associated client.
537 * Also, if this is a packet for aggregation, don't
538 * increase the counter because the ucode will stop
539 * aggregation queues when their respective station
542 if (sta_priv
&& sta_priv
->client
&& !is_agg
)
543 atomic_inc(&sta_priv
->pending_frames
);
545 if ((iwl_legacy_queue_space(q
) < q
->high_mark
) &&
546 priv
->mac80211_registered
) {
547 if (wait_write_ptr
) {
548 spin_lock_irqsave(&priv
->lock
, flags
);
549 txq
->need_update
= 1;
550 iwl_legacy_txq_update_write_ptr(priv
, txq
);
551 spin_unlock_irqrestore(&priv
->lock
, flags
);
553 iwl_legacy_stop_queue(priv
, txq
);
560 spin_unlock_irqrestore(&priv
->lock
, flags
);
564 static inline int iwl4965_alloc_dma_ptr(struct iwl_priv
*priv
,
565 struct iwl_dma_ptr
*ptr
, size_t size
)
567 ptr
->addr
= dma_alloc_coherent(&priv
->pci_dev
->dev
, size
, &ptr
->dma
,
575 static inline void iwl4965_free_dma_ptr(struct iwl_priv
*priv
,
576 struct iwl_dma_ptr
*ptr
)
578 if (unlikely(!ptr
->addr
))
581 dma_free_coherent(&priv
->pci_dev
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
582 memset(ptr
, 0, sizeof(*ptr
));
586 * iwl4965_hw_txq_ctx_free - Free TXQ Context
588 * Destroy all TX DMA queues and structures
590 void iwl4965_hw_txq_ctx_free(struct iwl_priv
*priv
)
596 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
597 if (txq_id
== priv
->cmd_queue
)
598 iwl_legacy_cmd_queue_free(priv
);
600 iwl_legacy_tx_queue_free(priv
, txq_id
);
602 iwl4965_free_dma_ptr(priv
, &priv
->kw
);
604 iwl4965_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
606 /* free tx queue structure */
607 iwl_legacy_txq_mem(priv
);
611 * iwl4965_txq_ctx_alloc - allocate TX queue context
612 * Allocate all Tx DMA structures and initialize them
617 int iwl4965_txq_ctx_alloc(struct iwl_priv
*priv
)
620 int txq_id
, slots_num
;
623 /* Free all tx/cmd queues and keep-warm buffer */
624 iwl4965_hw_txq_ctx_free(priv
);
626 ret
= iwl4965_alloc_dma_ptr(priv
, &priv
->scd_bc_tbls
,
627 priv
->hw_params
.scd_bc_tbls_size
);
629 IWL_ERR(priv
, "Scheduler BC Table allocation failed\n");
632 /* Alloc keep-warm buffer */
633 ret
= iwl4965_alloc_dma_ptr(priv
, &priv
->kw
, IWL_KW_SIZE
);
635 IWL_ERR(priv
, "Keep Warm allocation failed\n");
639 /* allocate tx queue structure */
640 ret
= iwl_legacy_alloc_txq_mem(priv
);
644 spin_lock_irqsave(&priv
->lock
, flags
);
646 /* Turn off all Tx DMA fifos */
647 iwl4965_txq_set_sched(priv
, 0);
649 /* Tell NIC where to find the "keep warm" buffer */
650 iwl_legacy_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
652 spin_unlock_irqrestore(&priv
->lock
, flags
);
654 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
655 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
656 slots_num
= (txq_id
== priv
->cmd_queue
) ?
657 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
658 ret
= iwl_legacy_tx_queue_init(priv
,
659 &priv
->txq
[txq_id
], slots_num
,
662 IWL_ERR(priv
, "Tx %d queue init failed\n", txq_id
);
670 iwl4965_hw_txq_ctx_free(priv
);
671 iwl4965_free_dma_ptr(priv
, &priv
->kw
);
673 iwl4965_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
678 void iwl4965_txq_ctx_reset(struct iwl_priv
*priv
)
680 int txq_id
, slots_num
;
683 spin_lock_irqsave(&priv
->lock
, flags
);
685 /* Turn off all Tx DMA fifos */
686 iwl4965_txq_set_sched(priv
, 0);
688 /* Tell NIC where to find the "keep warm" buffer */
689 iwl_legacy_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
691 spin_unlock_irqrestore(&priv
->lock
, flags
);
693 /* Alloc and init all Tx queues, including the command queue (#4) */
694 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
695 slots_num
= txq_id
== priv
->cmd_queue
?
696 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
697 iwl_legacy_tx_queue_reset(priv
, &priv
->txq
[txq_id
],
703 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
705 void iwl4965_txq_ctx_stop(struct iwl_priv
*priv
)
710 /* Turn off all Tx DMA fifos */
711 spin_lock_irqsave(&priv
->lock
, flags
);
713 iwl4965_txq_set_sched(priv
, 0);
715 /* Stop each Tx DMA channel, and wait for it to be idle */
716 for (ch
= 0; ch
< priv
->hw_params
.dma_chnl_num
; ch
++) {
717 iwl_legacy_write_direct32(priv
,
718 FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
719 if (iwl_poll_direct_bit(priv
, FH_TSSR_TX_STATUS_REG
,
720 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
722 IWL_ERR(priv
, "Failing on timeout while stopping"
723 " DMA channel %d [0x%08x]", ch
,
724 iwl_legacy_read_direct32(priv
,
725 FH_TSSR_TX_STATUS_REG
));
727 spin_unlock_irqrestore(&priv
->lock
, flags
);
732 /* Unmap DMA from host system and free skb's */
733 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
734 if (txq_id
== priv
->cmd_queue
)
735 iwl_legacy_cmd_queue_unmap(priv
);
737 iwl_legacy_tx_queue_unmap(priv
, txq_id
);
741 * Find first available (lowest unused) Tx Queue, mark it "active".
742 * Called only when finding queue for aggregation.
743 * Should never return anything < 7, because they should already
744 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
746 static int iwl4965_txq_ctx_activate_free(struct iwl_priv
*priv
)
750 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
751 if (!test_and_set_bit(txq_id
, &priv
->txq_ctx_active_msk
))
757 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
759 static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv
*priv
,
762 /* Simply stop the queue, but don't change any configuration;
763 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
764 iwl_legacy_write_prph(priv
,
765 IWL49_SCD_QUEUE_STATUS_BITS(txq_id
),
766 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
767 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
771 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
773 static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv
*priv
, u16 ra_tid
,
780 scd_q2ratid
= ra_tid
& IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
782 tbl_dw_addr
= priv
->scd_base_addr
+
783 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id
);
785 tbl_dw
= iwl_legacy_read_targ_mem(priv
, tbl_dw_addr
);
788 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
790 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
792 iwl_legacy_write_targ_mem(priv
, tbl_dw_addr
, tbl_dw
);
798 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
800 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
801 * i.e. it must be one of the higher queues used for aggregation
803 static int iwl4965_txq_agg_enable(struct iwl_priv
*priv
, int txq_id
,
804 int tx_fifo
, int sta_id
, int tid
, u16 ssn_idx
)
810 if ((IWL49_FIRST_AMPDU_QUEUE
> txq_id
) ||
811 (IWL49_FIRST_AMPDU_QUEUE
+
812 priv
->cfg
->base_params
->num_of_ampdu_queues
<= txq_id
)) {
814 "queue number out of range: %d, must be %d to %d\n",
815 txq_id
, IWL49_FIRST_AMPDU_QUEUE
,
816 IWL49_FIRST_AMPDU_QUEUE
+
817 priv
->cfg
->base_params
->num_of_ampdu_queues
- 1);
821 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
823 /* Modify device's station table to Tx this TID */
824 ret
= iwl4965_sta_tx_modify_enable_tid(priv
, sta_id
, tid
);
828 spin_lock_irqsave(&priv
->lock
, flags
);
830 /* Stop this Tx queue before configuring it */
831 iwl4965_tx_queue_stop_scheduler(priv
, txq_id
);
833 /* Map receiver-address / traffic-ID to this queue */
834 iwl4965_tx_queue_set_q2ratid(priv
, ra_tid
, txq_id
);
836 /* Set this queue as a chain-building queue */
837 iwl_legacy_set_bits_prph(priv
, IWL49_SCD_QUEUECHAIN_SEL
, (1 << txq_id
));
839 /* Place first TFD at index corresponding to start sequence number.
840 * Assumes that ssn_idx is valid (!= 0xFFF) */
841 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
842 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
843 iwl4965_set_wr_ptrs(priv
, txq_id
, ssn_idx
);
845 /* Set up Tx window size and frame limit for this queue */
846 iwl_legacy_write_targ_mem(priv
,
847 priv
->scd_base_addr
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id
),
848 (SCD_WIN_SIZE
<< IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS
) &
849 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK
);
851 iwl_legacy_write_targ_mem(priv
, priv
->scd_base_addr
+
852 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id
) + sizeof(u32
),
853 (SCD_FRAME_LIMIT
<< IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
)
854 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
);
856 iwl_legacy_set_bits_prph(priv
, IWL49_SCD_INTERRUPT_MASK
, (1 << txq_id
));
858 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
859 iwl4965_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 1);
861 spin_unlock_irqrestore(&priv
->lock
, flags
);
867 int iwl4965_tx_agg_start(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
868 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
875 struct iwl_tid_data
*tid_data
;
877 tx_fifo
= iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif
), tid
);
878 if (unlikely(tx_fifo
< 0))
881 IWL_WARN(priv
, "%s on ra = %pM tid = %d\n",
882 __func__
, sta
->addr
, tid
);
884 sta_id
= iwl_legacy_sta_id(sta
);
885 if (sta_id
== IWL_INVALID_STATION
) {
886 IWL_ERR(priv
, "Start AGG on invalid station\n");
889 if (unlikely(tid
>= MAX_TID_COUNT
))
892 if (priv
->stations
[sta_id
].tid
[tid
].agg
.state
!= IWL_AGG_OFF
) {
893 IWL_ERR(priv
, "Start AGG when state is not IWL_AGG_OFF !\n");
897 txq_id
= iwl4965_txq_ctx_activate_free(priv
);
899 IWL_ERR(priv
, "No free aggregation queue available\n");
903 spin_lock_irqsave(&priv
->sta_lock
, flags
);
904 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
905 *ssn
= SEQ_TO_SN(tid_data
->seq_number
);
906 tid_data
->agg
.txq_id
= txq_id
;
907 iwl_legacy_set_swq_id(&priv
->txq
[txq_id
],
908 iwl4965_get_ac_from_tid(tid
), txq_id
);
909 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
911 ret
= iwl4965_txq_agg_enable(priv
, txq_id
, tx_fifo
,
916 spin_lock_irqsave(&priv
->sta_lock
, flags
);
917 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
918 if (tid_data
->tfds_in_queue
== 0) {
919 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
920 tid_data
->agg
.state
= IWL_AGG_ON
;
921 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
924 "HW queue is NOT empty: %d packets in HW queue\n",
925 tid_data
->tfds_in_queue
);
926 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
928 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
933 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
934 * priv->lock must be held by the caller
936 static int iwl4965_txq_agg_disable(struct iwl_priv
*priv
, u16 txq_id
,
937 u16 ssn_idx
, u8 tx_fifo
)
939 if ((IWL49_FIRST_AMPDU_QUEUE
> txq_id
) ||
940 (IWL49_FIRST_AMPDU_QUEUE
+
941 priv
->cfg
->base_params
->num_of_ampdu_queues
<= txq_id
)) {
943 "queue number out of range: %d, must be %d to %d\n",
944 txq_id
, IWL49_FIRST_AMPDU_QUEUE
,
945 IWL49_FIRST_AMPDU_QUEUE
+
946 priv
->cfg
->base_params
->num_of_ampdu_queues
- 1);
950 iwl4965_tx_queue_stop_scheduler(priv
, txq_id
);
952 iwl_legacy_clear_bits_prph(priv
,
953 IWL49_SCD_QUEUECHAIN_SEL
, (1 << txq_id
));
955 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
956 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
957 /* supposes that ssn_idx is valid (!= 0xFFF) */
958 iwl4965_set_wr_ptrs(priv
, txq_id
, ssn_idx
);
960 iwl_legacy_clear_bits_prph(priv
,
961 IWL49_SCD_INTERRUPT_MASK
, (1 << txq_id
));
962 iwl_txq_ctx_deactivate(priv
, txq_id
);
963 iwl4965_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 0);
968 int iwl4965_tx_agg_stop(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
969 struct ieee80211_sta
*sta
, u16 tid
)
971 int tx_fifo_id
, txq_id
, sta_id
, ssn
;
972 struct iwl_tid_data
*tid_data
;
973 int write_ptr
, read_ptr
;
976 tx_fifo_id
= iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif
), tid
);
977 if (unlikely(tx_fifo_id
< 0))
980 sta_id
= iwl_legacy_sta_id(sta
);
982 if (sta_id
== IWL_INVALID_STATION
) {
983 IWL_ERR(priv
, "Invalid station for AGG tid %d\n", tid
);
987 spin_lock_irqsave(&priv
->sta_lock
, flags
);
989 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
990 ssn
= (tid_data
->seq_number
& IEEE80211_SCTL_SEQ
) >> 4;
991 txq_id
= tid_data
->agg
.txq_id
;
993 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
994 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
996 * This can happen if the peer stops aggregation
997 * again before we've had a chance to drain the
998 * queue we selected previously, i.e. before the
999 * session was really started completely.
1001 IWL_DEBUG_HT(priv
, "AGG stop before setup done\n");
1006 IWL_WARN(priv
, "Stopping AGG while state not ON or starting\n");
1009 write_ptr
= priv
->txq
[txq_id
].q
.write_ptr
;
1010 read_ptr
= priv
->txq
[txq_id
].q
.read_ptr
;
1012 /* The queue is not empty */
1013 if (write_ptr
!= read_ptr
) {
1014 IWL_DEBUG_HT(priv
, "Stopping a non empty AGG HW QUEUE\n");
1015 priv
->stations
[sta_id
].tid
[tid
].agg
.state
=
1016 IWL_EMPTYING_HW_QUEUE_DELBA
;
1017 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1021 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
1023 priv
->stations
[sta_id
].tid
[tid
].agg
.state
= IWL_AGG_OFF
;
1025 /* do not restore/save irqs */
1026 spin_unlock(&priv
->sta_lock
);
1027 spin_lock(&priv
->lock
);
1030 * the only reason this call can fail is queue number out of range,
1031 * which can happen if uCode is reloaded and all the station
1032 * information are lost. if it is outside the range, there is no need
1033 * to deactivate the uCode queue, just return "success" to allow
1034 * mac80211 to clean up it own data.
1036 iwl4965_txq_agg_disable(priv
, txq_id
, ssn
, tx_fifo_id
);
1037 spin_unlock_irqrestore(&priv
->lock
, flags
);
1039 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1044 int iwl4965_txq_check_empty(struct iwl_priv
*priv
,
1045 int sta_id
, u8 tid
, int txq_id
)
1047 struct iwl_queue
*q
= &priv
->txq
[txq_id
].q
;
1048 u8
*addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
1049 struct iwl_tid_data
*tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1050 struct iwl_rxon_context
*ctx
;
1052 ctx
= &priv
->contexts
[priv
->stations
[sta_id
].ctxid
];
1054 lockdep_assert_held(&priv
->sta_lock
);
1056 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
1057 case IWL_EMPTYING_HW_QUEUE_DELBA
:
1058 /* We are reclaiming the last packet of the */
1059 /* aggregated HW queue */
1060 if ((txq_id
== tid_data
->agg
.txq_id
) &&
1061 (q
->read_ptr
== q
->write_ptr
)) {
1062 u16 ssn
= SEQ_TO_SN(tid_data
->seq_number
);
1063 int tx_fifo
= iwl4965_get_fifo_from_tid(ctx
, tid
);
1065 "HW queue empty: continue DELBA flow\n");
1066 iwl4965_txq_agg_disable(priv
, txq_id
, ssn
, tx_fifo
);
1067 tid_data
->agg
.state
= IWL_AGG_OFF
;
1068 ieee80211_stop_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
1071 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1072 /* We are reclaiming the last packet of the queue */
1073 if (tid_data
->tfds_in_queue
== 0) {
1075 "HW queue empty: continue ADDBA flow\n");
1076 tid_data
->agg
.state
= IWL_AGG_ON
;
1077 ieee80211_start_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
1085 static void iwl4965_non_agg_tx_status(struct iwl_priv
*priv
,
1086 struct iwl_rxon_context
*ctx
,
1089 struct ieee80211_sta
*sta
;
1090 struct iwl_station_priv
*sta_priv
;
1093 sta
= ieee80211_find_sta(ctx
->vif
, addr1
);
1095 sta_priv
= (void *)sta
->drv_priv
;
1096 /* avoid atomic ops if this isn't a client */
1097 if (sta_priv
->client
&&
1098 atomic_dec_return(&sta_priv
->pending_frames
) == 0)
1099 ieee80211_sta_block_awake(priv
->hw
, sta
, false);
1105 iwl4965_tx_status(struct iwl_priv
*priv
, struct iwl_tx_info
*tx_info
,
1108 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) tx_info
->skb
->data
;
1111 iwl4965_non_agg_tx_status(priv
, tx_info
->ctx
, hdr
->addr1
);
1113 ieee80211_tx_status_irqsafe(priv
->hw
, tx_info
->skb
);
1116 int iwl4965_tx_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int index
)
1118 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
1119 struct iwl_queue
*q
= &txq
->q
;
1120 struct iwl_tx_info
*tx_info
;
1122 struct ieee80211_hdr
*hdr
;
1124 if ((index
>= q
->n_bd
) || (iwl_legacy_queue_used(q
, index
) == 0)) {
1125 IWL_ERR(priv
, "Read index for DMA queue txq id (%d), index %d, "
1126 "is out of range [0-%d] %d %d.\n", txq_id
,
1127 index
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
1131 for (index
= iwl_legacy_queue_inc_wrap(index
, q
->n_bd
);
1132 q
->read_ptr
!= index
;
1133 q
->read_ptr
= iwl_legacy_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1135 tx_info
= &txq
->txb
[txq
->q
.read_ptr
];
1137 if (WARN_ON_ONCE(tx_info
->skb
== NULL
))
1140 hdr
= (struct ieee80211_hdr
*)tx_info
->skb
->data
;
1141 if (ieee80211_is_data_qos(hdr
->frame_control
))
1144 iwl4965_tx_status(priv
, tx_info
,
1145 txq_id
>= IWL4965_FIRST_AMPDU_QUEUE
);
1146 tx_info
->skb
= NULL
;
1148 priv
->cfg
->ops
->lib
->txq_free_tfd(priv
, txq
);
1154 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1156 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1157 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1159 static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv
*priv
,
1160 struct iwl_ht_agg
*agg
,
1161 struct iwl_compressed_ba_resp
*ba_resp
)
1165 u16 seq_ctl
= le16_to_cpu(ba_resp
->seq_ctl
);
1166 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1168 struct ieee80211_tx_info
*info
;
1169 u64 bitmap
, sent_bitmap
;
1171 if (unlikely(!agg
->wait_for_ba
)) {
1172 if (unlikely(ba_resp
->bitmap
))
1173 IWL_ERR(priv
, "Received BA when not expected\n");
1177 /* Mark that the expected block-ack response arrived */
1178 agg
->wait_for_ba
= 0;
1179 IWL_DEBUG_TX_REPLY(priv
, "BA %d %d\n", agg
->start_idx
,
1182 /* Calculate shift to align block-ack bits with our Tx window bits */
1183 sh
= agg
->start_idx
- SEQ_TO_INDEX(seq_ctl
>> 4);
1184 if (sh
< 0) /* tbw something is wrong with indices */
1187 if (agg
->frame_count
> (64 - sh
)) {
1188 IWL_DEBUG_TX_REPLY(priv
, "more frames than bitmap size");
1192 /* don't use 64-bit values for now */
1193 bitmap
= le64_to_cpu(ba_resp
->bitmap
) >> sh
;
1195 /* check for success or failure according to the
1196 * transmitted bitmap and block-ack bitmap */
1197 sent_bitmap
= bitmap
& agg
->bitmap
;
1199 /* For each frame attempted in aggregation,
1200 * update driver's record of tx frame's status. */
1202 while (sent_bitmap
) {
1203 ack
= sent_bitmap
& 1ULL;
1205 IWL_DEBUG_TX_REPLY(priv
, "%s ON i=%d idx=%d raw=%d\n",
1206 ack
? "ACK" : "NACK", i
,
1207 (agg
->start_idx
+ i
) & 0xff,
1208 agg
->start_idx
+ i
);
1213 IWL_DEBUG_TX_REPLY(priv
, "Bitmap %llx\n",
1214 (unsigned long long)bitmap
);
1216 info
= IEEE80211_SKB_CB(priv
->txq
[scd_flow
].txb
[agg
->start_idx
].skb
);
1217 memset(&info
->status
, 0, sizeof(info
->status
));
1218 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1219 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1220 info
->status
.ampdu_ack_len
= successes
;
1221 info
->status
.ampdu_len
= agg
->frame_count
;
1222 iwl4965_hwrate_to_tx_control(priv
, agg
->rate_n_flags
, info
);
1228 * translate ucode response to mac80211 tx status control values
1230 void iwl4965_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
1231 struct ieee80211_tx_info
*info
)
1233 struct ieee80211_tx_rate
*r
= &info
->control
.rates
[0];
1235 info
->antenna_sel_tx
=
1236 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
1237 if (rate_n_flags
& RATE_MCS_HT_MSK
)
1238 r
->flags
|= IEEE80211_TX_RC_MCS
;
1239 if (rate_n_flags
& RATE_MCS_GF_MSK
)
1240 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
1241 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
1242 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1243 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
1244 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
1245 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1246 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
1247 r
->idx
= iwl4965_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
1251 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1253 * Handles block-acknowledge notification from device, which reports success
1254 * of frames sent via aggregation.
1256 void iwl4965_rx_reply_compressed_ba(struct iwl_priv
*priv
,
1257 struct iwl_rx_mem_buffer
*rxb
)
1259 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1260 struct iwl_compressed_ba_resp
*ba_resp
= &pkt
->u
.compressed_ba
;
1261 struct iwl_tx_queue
*txq
= NULL
;
1262 struct iwl_ht_agg
*agg
;
1266 unsigned long flags
;
1268 /* "flow" corresponds to Tx queue */
1269 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1271 /* "ssn" is start of block-ack Tx window, corresponds to index
1272 * (in Tx queue's circular buffer) of first TFD/frame in window */
1273 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
1275 if (scd_flow
>= priv
->hw_params
.max_txq_num
) {
1277 "BUG_ON scd_flow is bigger than number of queues\n");
1281 txq
= &priv
->txq
[scd_flow
];
1282 sta_id
= ba_resp
->sta_id
;
1284 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
1285 if (unlikely(agg
->txq_id
!= scd_flow
)) {
1287 * FIXME: this is a uCode bug which need to be addressed,
1288 * log the information and return for now!
1289 * since it is possible happen very often and in order
1290 * not to fill the syslog, don't enable the logging by default
1292 IWL_DEBUG_TX_REPLY(priv
,
1293 "BA scd_flow %d does not match txq_id %d\n",
1294 scd_flow
, agg
->txq_id
);
1298 /* Find index just before block-ack window */
1299 index
= iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn
& 0xff, txq
->q
.n_bd
);
1301 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1303 IWL_DEBUG_TX_REPLY(priv
, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1306 (u8
*) &ba_resp
->sta_addr_lo32
,
1308 IWL_DEBUG_TX_REPLY(priv
, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1310 "%d, scd_ssn = %d\n",
1313 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
1316 IWL_DEBUG_TX_REPLY(priv
, "DAT start_idx = %d, bitmap = 0x%llx\n",
1318 (unsigned long long)agg
->bitmap
);
1320 /* Update driver's record of ACK vs. not for each frame in window */
1321 iwl4965_tx_status_reply_compressed_ba(priv
, agg
, ba_resp
);
1323 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1324 * block-ack window (we assume that they've been successfully
1325 * transmitted ... if not, it's too late anyway). */
1326 if (txq
->q
.read_ptr
!= (ba_resp_scd_ssn
& 0xff)) {
1327 /* calculate mac80211 ampdu sw queue to wake */
1328 int freed
= iwl4965_tx_queue_reclaim(priv
, scd_flow
, index
);
1329 iwl4965_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
1331 if ((iwl_legacy_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
1332 priv
->mac80211_registered
&&
1333 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
))
1334 iwl_legacy_wake_queue(priv
, txq
);
1336 iwl4965_txq_check_empty(priv
, sta_id
, tid
, scd_flow
);
1339 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1342 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1343 const char *iwl4965_get_tx_fail_reason(u32 status
)
1345 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1346 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1348 switch (status
& TX_STATUS_MSK
) {
1349 case TX_STATUS_SUCCESS
:
1351 TX_STATUS_POSTPONE(DELAY
);
1352 TX_STATUS_POSTPONE(FEW_BYTES
);
1353 TX_STATUS_POSTPONE(QUIET_PERIOD
);
1354 TX_STATUS_POSTPONE(CALC_TTAK
);
1355 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
1356 TX_STATUS_FAIL(SHORT_LIMIT
);
1357 TX_STATUS_FAIL(LONG_LIMIT
);
1358 TX_STATUS_FAIL(FIFO_UNDERRUN
);
1359 TX_STATUS_FAIL(DRAIN_FLOW
);
1360 TX_STATUS_FAIL(RFKILL_FLUSH
);
1361 TX_STATUS_FAIL(LIFE_EXPIRE
);
1362 TX_STATUS_FAIL(DEST_PS
);
1363 TX_STATUS_FAIL(HOST_ABORTED
);
1364 TX_STATUS_FAIL(BT_RETRY
);
1365 TX_STATUS_FAIL(STA_INVALID
);
1366 TX_STATUS_FAIL(FRAG_DROPPED
);
1367 TX_STATUS_FAIL(TID_DISABLE
);
1368 TX_STATUS_FAIL(FIFO_FLUSHED
);
1369 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL
);
1370 TX_STATUS_FAIL(PASSIVE_NO_RX
);
1371 TX_STATUS_FAIL(NO_BEACON_ON_RADAR
);
1376 #undef TX_STATUS_FAIL
1377 #undef TX_STATUS_POSTPONE
1379 #endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */