1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
39 #include "iwl-helpers.h"
40 #include "iwl-4965-hw.h"
44 * mac80211 queues, ACs, hardware queues, FIFOs.
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c), the AC->hw queue mapping is the identity
69 static const u8 tid_to_ac
[] = {
80 static inline int iwl4965_get_ac_from_tid(u16 tid
)
82 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
83 return tid_to_ac
[tid
];
85 /* no support for TIDs 8-15 yet */
90 iwl4965_get_fifo_from_tid(struct iwl_rxon_context
*ctx
, u16 tid
)
92 if (likely(tid
< ARRAY_SIZE(tid_to_ac
)))
93 return ctx
->ac_to_fifo
[tid_to_ac
[tid
]];
95 /* no support for TIDs 8-15 yet */
100 * handle build REPLY_TX command notification.
102 static void iwl4965_tx_cmd_build_basic(struct iwl_priv
*priv
,
104 struct iwl_tx_cmd
*tx_cmd
,
105 struct ieee80211_tx_info
*info
,
106 struct ieee80211_hdr
*hdr
,
109 __le16 fc
= hdr
->frame_control
;
110 __le32 tx_flags
= tx_cmd
->tx_flags
;
112 tx_cmd
->stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
113 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) {
114 tx_flags
|= TX_CMD_FLG_ACK_MSK
;
115 if (ieee80211_is_mgmt(fc
))
116 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
117 if (ieee80211_is_probe_resp(fc
) &&
118 !(le16_to_cpu(hdr
->seq_ctrl
) & 0xf))
119 tx_flags
|= TX_CMD_FLG_TSF_MSK
;
121 tx_flags
&= (~TX_CMD_FLG_ACK_MSK
);
122 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
125 if (ieee80211_is_back_req(fc
))
126 tx_flags
|= TX_CMD_FLG_ACK_MSK
| TX_CMD_FLG_IMM_BA_RSP_MASK
;
128 tx_cmd
->sta_id
= std_id
;
129 if (ieee80211_has_morefrags(fc
))
130 tx_flags
|= TX_CMD_FLG_MORE_FRAG_MSK
;
132 if (ieee80211_is_data_qos(fc
)) {
133 u8
*qc
= ieee80211_get_qos_ctl(hdr
);
134 tx_cmd
->tid_tspec
= qc
[0] & 0xf;
135 tx_flags
&= ~TX_CMD_FLG_SEQ_CTL_MSK
;
137 tx_flags
|= TX_CMD_FLG_SEQ_CTL_MSK
;
140 iwl_legacy_tx_cmd_protection(priv
, info
, fc
, &tx_flags
);
142 tx_flags
&= ~(TX_CMD_FLG_ANT_SEL_MSK
);
143 if (ieee80211_is_mgmt(fc
)) {
144 if (ieee80211_is_assoc_req(fc
) || ieee80211_is_reassoc_req(fc
))
145 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(3);
147 tx_cmd
->timeout
.pm_frame_timeout
= cpu_to_le16(2);
149 tx_cmd
->timeout
.pm_frame_timeout
= 0;
152 tx_cmd
->driver_txop
= 0;
153 tx_cmd
->tx_flags
= tx_flags
;
154 tx_cmd
->next_frame_len
= 0;
157 #define RTS_DFAULT_RETRY_LIMIT 60
159 static void iwl4965_tx_cmd_build_rate(struct iwl_priv
*priv
,
160 struct iwl_tx_cmd
*tx_cmd
,
161 struct ieee80211_tx_info
*info
,
170 /* Set retry limit on DATA packets and Probe Responses*/
171 if (ieee80211_is_probe_resp(fc
))
172 data_retry_limit
= 3;
174 data_retry_limit
= IWL4965_DEFAULT_TX_RETRY
;
175 tx_cmd
->data_retry_limit
= data_retry_limit
;
177 /* Set retry limit on RTS packets */
178 rts_retry_limit
= RTS_DFAULT_RETRY_LIMIT
;
179 if (data_retry_limit
< rts_retry_limit
)
180 rts_retry_limit
= data_retry_limit
;
181 tx_cmd
->rts_retry_limit
= rts_retry_limit
;
183 /* DATA packets will use the uCode station table for rate/antenna
185 if (ieee80211_is_data(fc
)) {
186 tx_cmd
->initial_rate_index
= 0;
187 tx_cmd
->tx_flags
|= TX_CMD_FLG_STA_RATE_MSK
;
192 * If the current TX rate stored in mac80211 has the MCS bit set, it's
193 * not really a TX rate. Thus, we use the lowest supported rate for
194 * this band. Also use the lowest supported rate if the stored rate
197 rate_idx
= info
->control
.rates
[0].idx
;
198 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_MCS
||
199 (rate_idx
< 0) || (rate_idx
> IWL_RATE_COUNT_LEGACY
))
200 rate_idx
= rate_lowest_index(&priv
->bands
[info
->band
],
202 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
203 if (info
->band
== IEEE80211_BAND_5GHZ
)
204 rate_idx
+= IWL_FIRST_OFDM_RATE
;
205 /* Get PLCP rate for tx_cmd->rate_n_flags */
206 rate_plcp
= iwlegacy_rates
[rate_idx
].plcp
;
207 /* Zero out flags for this packet */
210 /* Set CCK flag as needed */
211 if ((rate_idx
>= IWL_FIRST_CCK_RATE
) && (rate_idx
<= IWL_LAST_CCK_RATE
))
212 rate_flags
|= RATE_MCS_CCK_MSK
;
214 /* Set up antennas */
215 priv
->mgmt_tx_ant
= iwl4965_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
216 priv
->hw_params
.valid_tx_ant
);
218 rate_flags
|= iwl4965_ant_idx_to_flags(priv
->mgmt_tx_ant
);
220 /* Set the rate in the TX cmd */
221 tx_cmd
->rate_n_flags
= iwl4965_hw_set_rate_n_flags(rate_plcp
, rate_flags
);
224 static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv
*priv
,
225 struct ieee80211_tx_info
*info
,
226 struct iwl_tx_cmd
*tx_cmd
,
227 struct sk_buff
*skb_frag
,
230 struct ieee80211_key_conf
*keyconf
= info
->control
.hw_key
;
232 switch (keyconf
->cipher
) {
233 case WLAN_CIPHER_SUITE_CCMP
:
234 tx_cmd
->sec_ctl
= TX_CMD_SEC_CCM
;
235 memcpy(tx_cmd
->key
, keyconf
->key
, keyconf
->keylen
);
236 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
237 tx_cmd
->tx_flags
|= TX_CMD_FLG_AGG_CCMP_MSK
;
238 IWL_DEBUG_TX(priv
, "tx_cmd with AES hwcrypto\n");
241 case WLAN_CIPHER_SUITE_TKIP
:
242 tx_cmd
->sec_ctl
= TX_CMD_SEC_TKIP
;
243 ieee80211_get_tkip_p2k(keyconf
, skb_frag
, tx_cmd
->key
);
244 IWL_DEBUG_TX(priv
, "tx_cmd with tkip hwcrypto\n");
247 case WLAN_CIPHER_SUITE_WEP104
:
248 tx_cmd
->sec_ctl
|= TX_CMD_SEC_KEY128
;
250 case WLAN_CIPHER_SUITE_WEP40
:
251 tx_cmd
->sec_ctl
|= (TX_CMD_SEC_WEP
|
252 (keyconf
->keyidx
& TX_CMD_SEC_MSK
) << TX_CMD_SEC_SHIFT
);
254 memcpy(&tx_cmd
->key
[3], keyconf
->key
, keyconf
->keylen
);
256 IWL_DEBUG_TX(priv
, "Configuring packet for WEP encryption "
257 "with key %d\n", keyconf
->keyidx
);
261 IWL_ERR(priv
, "Unknown encode cipher %x\n", keyconf
->cipher
);
267 * start REPLY_TX command process
269 int iwl4965_tx_skb(struct iwl_priv
*priv
, struct sk_buff
*skb
)
271 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
272 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
273 struct ieee80211_sta
*sta
= info
->control
.sta
;
274 struct iwl_station_priv
*sta_priv
= NULL
;
275 struct iwl_tx_queue
*txq
;
277 struct iwl_device_cmd
*out_cmd
;
278 struct iwl_cmd_meta
*out_meta
;
279 struct iwl_tx_cmd
*tx_cmd
;
280 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
282 dma_addr_t phys_addr
;
283 dma_addr_t txcmd_phys
;
284 dma_addr_t scratch_phys
;
285 u16 len
, firstlen
, secondlen
;
290 u8 wait_write_ptr
= 0;
296 if (info
->control
.vif
)
297 ctx
= iwl_legacy_rxon_ctx_from_vif(info
->control
.vif
);
299 spin_lock_irqsave(&priv
->lock
, flags
);
300 if (iwl_legacy_is_rfkill(priv
)) {
301 IWL_DEBUG_DROP(priv
, "Dropping - RF KILL\n");
305 fc
= hdr
->frame_control
;
307 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
308 if (ieee80211_is_auth(fc
))
309 IWL_DEBUG_TX(priv
, "Sending AUTH frame\n");
310 else if (ieee80211_is_assoc_req(fc
))
311 IWL_DEBUG_TX(priv
, "Sending ASSOC frame\n");
312 else if (ieee80211_is_reassoc_req(fc
))
313 IWL_DEBUG_TX(priv
, "Sending REASSOC frame\n");
316 hdr_len
= ieee80211_hdrlen(fc
);
318 /* For management frames use broadcast id to do not break aggregation */
319 if (!ieee80211_is_data(fc
))
320 sta_id
= ctx
->bcast_sta_id
;
322 /* Find index into station table for destination station */
323 sta_id
= iwl_legacy_sta_id_or_broadcast(priv
, ctx
, info
->control
.sta
);
325 if (sta_id
== IWL_INVALID_STATION
) {
326 IWL_DEBUG_DROP(priv
, "Dropping - INVALID STATION: %pM\n",
332 IWL_DEBUG_TX(priv
, "station Id %d\n", sta_id
);
335 sta_priv
= (void *)sta
->drv_priv
;
337 if (sta_priv
&& sta_priv
->asleep
&&
338 (info
->flags
& IEEE80211_TX_CTL_POLL_RESPONSE
)) {
340 * This sends an asynchronous command to the device,
341 * but we can rely on it being processed before the
342 * next frame is processed -- and the next frame to
343 * this station is the one that will consume this
345 * For now set the counter to just 1 since we do not
348 iwl4965_sta_modify_sleep_tx_count(priv
, sta_id
, 1);
352 * Send this frame after DTIM -- there's a special queue
353 * reserved for this for contexts that support AP mode.
355 if (info
->flags
& IEEE80211_TX_CTL_SEND_AFTER_DTIM
) {
356 txq_id
= ctx
->mcast_queue
;
358 * The microcode will clear the more data
359 * bit in the last frame it transmits.
361 hdr
->frame_control
|=
362 cpu_to_le16(IEEE80211_FCTL_MOREDATA
);
364 txq_id
= ctx
->ac_to_queue
[skb_get_queue_mapping(skb
)];
366 /* irqs already disabled/saved above when locking priv->lock */
367 spin_lock(&priv
->sta_lock
);
369 if (ieee80211_is_data_qos(fc
)) {
370 qc
= ieee80211_get_qos_ctl(hdr
);
371 tid
= qc
[0] & IEEE80211_QOS_CTL_TID_MASK
;
372 if (WARN_ON_ONCE(tid
>= MAX_TID_COUNT
)) {
373 spin_unlock(&priv
->sta_lock
);
376 seq_number
= priv
->stations
[sta_id
].tid
[tid
].seq_number
;
377 seq_number
&= IEEE80211_SCTL_SEQ
;
378 hdr
->seq_ctrl
= hdr
->seq_ctrl
&
379 cpu_to_le16(IEEE80211_SCTL_FRAG
);
380 hdr
->seq_ctrl
|= cpu_to_le16(seq_number
);
382 /* aggregation is on for this <sta,tid> */
383 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
&&
384 priv
->stations
[sta_id
].tid
[tid
].agg
.state
== IWL_AGG_ON
) {
385 txq_id
= priv
->stations
[sta_id
].tid
[tid
].agg
.txq_id
;
390 txq
= &priv
->txq
[txq_id
];
393 if (unlikely(iwl_legacy_queue_space(q
) < q
->high_mark
)) {
394 spin_unlock(&priv
->sta_lock
);
398 if (ieee80211_is_data_qos(fc
)) {
399 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
++;
400 if (!ieee80211_has_morefrags(fc
))
401 priv
->stations
[sta_id
].tid
[tid
].seq_number
= seq_number
;
404 spin_unlock(&priv
->sta_lock
);
406 /* Set up driver data for this TFD */
407 memset(&(txq
->txb
[q
->write_ptr
]), 0, sizeof(struct iwl_tx_info
));
408 txq
->txb
[q
->write_ptr
].skb
= skb
;
409 txq
->txb
[q
->write_ptr
].ctx
= ctx
;
411 /* Set up first empty entry in queue's array of Tx/cmd buffers */
412 out_cmd
= txq
->cmd
[q
->write_ptr
];
413 out_meta
= &txq
->meta
[q
->write_ptr
];
414 tx_cmd
= &out_cmd
->cmd
.tx
;
415 memset(&out_cmd
->hdr
, 0, sizeof(out_cmd
->hdr
));
416 memset(tx_cmd
, 0, sizeof(struct iwl_tx_cmd
));
419 * Set up the Tx-command (not MAC!) header.
420 * Store the chosen Tx queue and TFD index within the sequence field;
421 * after Tx, uCode's Tx response will return this value so driver can
422 * locate the frame within the tx queue and do post-tx processing.
424 out_cmd
->hdr
.cmd
= REPLY_TX
;
425 out_cmd
->hdr
.sequence
= cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
426 INDEX_TO_SEQ(q
->write_ptr
)));
428 /* Copy MAC header from skb into command buffer */
429 memcpy(tx_cmd
->hdr
, hdr
, hdr_len
);
432 /* Total # bytes to be transmitted */
434 tx_cmd
->len
= cpu_to_le16(len
);
436 if (info
->control
.hw_key
)
437 iwl4965_tx_cmd_build_hwcrypto(priv
, info
, tx_cmd
, skb
, sta_id
);
439 /* TODO need this for burst mode later on */
440 iwl4965_tx_cmd_build_basic(priv
, skb
, tx_cmd
, info
, hdr
, sta_id
);
441 iwl_legacy_dbg_log_tx_data_frame(priv
, len
, hdr
);
443 iwl4965_tx_cmd_build_rate(priv
, tx_cmd
, info
, fc
);
445 iwl_legacy_update_stats(priv
, true, fc
, len
);
447 * Use the first empty entry in this queue's command buffer array
448 * to contain the Tx command and MAC header concatenated together
449 * (payload data will be in another buffer).
450 * Size of this varies, due to varying MAC header length.
451 * If end is not dword aligned, we'll have 2 extra bytes at the end
452 * of the MAC header (device reads on dword boundaries).
453 * We'll tell device about this padding later.
455 len
= sizeof(struct iwl_tx_cmd
) +
456 sizeof(struct iwl_cmd_header
) + hdr_len
;
457 firstlen
= (len
+ 3) & ~3;
459 /* Tell NIC about any 2-byte padding after MAC header */
461 tx_cmd
->tx_flags
|= TX_CMD_FLG_MH_PAD_MSK
;
463 /* Physical address of this Tx command's header (not MAC header!),
464 * within command buffer array. */
465 txcmd_phys
= pci_map_single(priv
->pci_dev
,
466 &out_cmd
->hdr
, firstlen
,
467 PCI_DMA_BIDIRECTIONAL
);
468 dma_unmap_addr_set(out_meta
, mapping
, txcmd_phys
);
469 dma_unmap_len_set(out_meta
, len
, firstlen
);
470 /* Add buffer containing Tx command and MAC(!) header to TFD's
472 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
473 txcmd_phys
, firstlen
, 1, 0);
475 if (!ieee80211_has_morefrags(hdr
->frame_control
)) {
476 txq
->need_update
= 1;
479 txq
->need_update
= 0;
482 /* Set up TFD's 2nd entry to point directly to remainder of skb,
483 * if any (802.11 null frames have no payload). */
484 secondlen
= skb
->len
- hdr_len
;
486 phys_addr
= pci_map_single(priv
->pci_dev
, skb
->data
+ hdr_len
,
487 secondlen
, PCI_DMA_TODEVICE
);
488 priv
->cfg
->ops
->lib
->txq_attach_buf_to_tfd(priv
, txq
,
489 phys_addr
, secondlen
,
493 scratch_phys
= txcmd_phys
+ sizeof(struct iwl_cmd_header
) +
494 offsetof(struct iwl_tx_cmd
, scratch
);
496 /* take back ownership of DMA buffer to enable update */
497 pci_dma_sync_single_for_cpu(priv
->pci_dev
, txcmd_phys
,
498 firstlen
, PCI_DMA_BIDIRECTIONAL
);
499 tx_cmd
->dram_lsb_ptr
= cpu_to_le32(scratch_phys
);
500 tx_cmd
->dram_msb_ptr
= iwl_legacy_get_dma_hi_addr(scratch_phys
);
502 IWL_DEBUG_TX(priv
, "sequence nr = 0X%x\n",
503 le16_to_cpu(out_cmd
->hdr
.sequence
));
504 IWL_DEBUG_TX(priv
, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd
->tx_flags
));
505 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
, sizeof(*tx_cmd
));
506 iwl_print_hex_dump(priv
, IWL_DL_TX
, (u8
*)tx_cmd
->hdr
, hdr_len
);
508 /* Set up entry for this TFD in Tx byte-count array */
509 if (info
->flags
& IEEE80211_TX_CTL_AMPDU
)
510 priv
->cfg
->ops
->lib
->txq_update_byte_cnt_tbl(priv
, txq
,
511 le16_to_cpu(tx_cmd
->len
));
513 pci_dma_sync_single_for_device(priv
->pci_dev
, txcmd_phys
,
514 firstlen
, PCI_DMA_BIDIRECTIONAL
);
516 trace_iwlwifi_legacy_dev_tx(priv
,
517 &((struct iwl_tfd
*)txq
->tfds
)[txq
->q
.write_ptr
],
518 sizeof(struct iwl_tfd
),
519 &out_cmd
->hdr
, firstlen
,
520 skb
->data
+ hdr_len
, secondlen
);
522 /* Tell device the write index *just past* this latest filled TFD */
523 q
->write_ptr
= iwl_legacy_queue_inc_wrap(q
->write_ptr
, q
->n_bd
);
524 iwl_legacy_txq_update_write_ptr(priv
, txq
);
525 spin_unlock_irqrestore(&priv
->lock
, flags
);
528 * At this point the frame is "transmitted" successfully
529 * and we will get a TX status notification eventually,
530 * regardless of the value of ret. "ret" only indicates
531 * whether or not we should update the write pointer.
535 * Avoid atomic ops if it isn't an associated client.
536 * Also, if this is a packet for aggregation, don't
537 * increase the counter because the ucode will stop
538 * aggregation queues when their respective station
541 if (sta_priv
&& sta_priv
->client
&& !is_agg
)
542 atomic_inc(&sta_priv
->pending_frames
);
544 if ((iwl_legacy_queue_space(q
) < q
->high_mark
) &&
545 priv
->mac80211_registered
) {
546 if (wait_write_ptr
) {
547 spin_lock_irqsave(&priv
->lock
, flags
);
548 txq
->need_update
= 1;
549 iwl_legacy_txq_update_write_ptr(priv
, txq
);
550 spin_unlock_irqrestore(&priv
->lock
, flags
);
552 iwl_legacy_stop_queue(priv
, txq
);
559 spin_unlock_irqrestore(&priv
->lock
, flags
);
563 static inline int iwl4965_alloc_dma_ptr(struct iwl_priv
*priv
,
564 struct iwl_dma_ptr
*ptr
, size_t size
)
566 ptr
->addr
= dma_alloc_coherent(&priv
->pci_dev
->dev
, size
, &ptr
->dma
,
574 static inline void iwl4965_free_dma_ptr(struct iwl_priv
*priv
,
575 struct iwl_dma_ptr
*ptr
)
577 if (unlikely(!ptr
->addr
))
580 dma_free_coherent(&priv
->pci_dev
->dev
, ptr
->size
, ptr
->addr
, ptr
->dma
);
581 memset(ptr
, 0, sizeof(*ptr
));
585 * iwl4965_hw_txq_ctx_free - Free TXQ Context
587 * Destroy all TX DMA queues and structures
589 void iwl4965_hw_txq_ctx_free(struct iwl_priv
*priv
)
595 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
596 if (txq_id
== priv
->cmd_queue
)
597 iwl_legacy_cmd_queue_free(priv
);
599 iwl_legacy_tx_queue_free(priv
, txq_id
);
601 iwl4965_free_dma_ptr(priv
, &priv
->kw
);
603 iwl4965_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
605 /* free tx queue structure */
606 iwl_legacy_txq_mem(priv
);
610 * iwl4965_txq_ctx_alloc - allocate TX queue context
611 * Allocate all Tx DMA structures and initialize them
616 int iwl4965_txq_ctx_alloc(struct iwl_priv
*priv
)
619 int txq_id
, slots_num
;
622 /* Free all tx/cmd queues and keep-warm buffer */
623 iwl4965_hw_txq_ctx_free(priv
);
625 ret
= iwl4965_alloc_dma_ptr(priv
, &priv
->scd_bc_tbls
,
626 priv
->hw_params
.scd_bc_tbls_size
);
628 IWL_ERR(priv
, "Scheduler BC Table allocation failed\n");
631 /* Alloc keep-warm buffer */
632 ret
= iwl4965_alloc_dma_ptr(priv
, &priv
->kw
, IWL_KW_SIZE
);
634 IWL_ERR(priv
, "Keep Warm allocation failed\n");
638 /* allocate tx queue structure */
639 ret
= iwl_legacy_alloc_txq_mem(priv
);
643 spin_lock_irqsave(&priv
->lock
, flags
);
645 /* Turn off all Tx DMA fifos */
646 iwl4965_txq_set_sched(priv
, 0);
648 /* Tell NIC where to find the "keep warm" buffer */
649 iwl_legacy_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
651 spin_unlock_irqrestore(&priv
->lock
, flags
);
653 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
654 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
655 slots_num
= (txq_id
== priv
->cmd_queue
) ?
656 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
657 ret
= iwl_legacy_tx_queue_init(priv
,
658 &priv
->txq
[txq_id
], slots_num
,
661 IWL_ERR(priv
, "Tx %d queue init failed\n", txq_id
);
669 iwl4965_hw_txq_ctx_free(priv
);
670 iwl4965_free_dma_ptr(priv
, &priv
->kw
);
672 iwl4965_free_dma_ptr(priv
, &priv
->scd_bc_tbls
);
677 void iwl4965_txq_ctx_reset(struct iwl_priv
*priv
)
679 int txq_id
, slots_num
;
682 spin_lock_irqsave(&priv
->lock
, flags
);
684 /* Turn off all Tx DMA fifos */
685 iwl4965_txq_set_sched(priv
, 0);
687 /* Tell NIC where to find the "keep warm" buffer */
688 iwl_legacy_write_direct32(priv
, FH_KW_MEM_ADDR_REG
, priv
->kw
.dma
>> 4);
690 spin_unlock_irqrestore(&priv
->lock
, flags
);
692 /* Alloc and init all Tx queues, including the command queue (#4) */
693 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++) {
694 slots_num
= txq_id
== priv
->cmd_queue
?
695 TFD_CMD_SLOTS
: TFD_TX_CMD_SLOTS
;
696 iwl_legacy_tx_queue_reset(priv
, &priv
->txq
[txq_id
],
702 * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
704 void iwl4965_txq_ctx_stop(struct iwl_priv
*priv
)
709 /* Turn off all Tx DMA fifos */
710 spin_lock_irqsave(&priv
->lock
, flags
);
712 iwl4965_txq_set_sched(priv
, 0);
714 /* Stop each Tx DMA channel, and wait for it to be idle */
715 for (ch
= 0; ch
< priv
->hw_params
.dma_chnl_num
; ch
++) {
716 iwl_legacy_write_direct32(priv
,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch
), 0x0);
718 if (iwl_poll_direct_bit(priv
, FH_TSSR_TX_STATUS_REG
,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch
),
721 IWL_ERR(priv
, "Failing on timeout while stopping"
722 " DMA channel %d [0x%08x]", ch
,
723 iwl_legacy_read_direct32(priv
,
724 FH_TSSR_TX_STATUS_REG
));
726 spin_unlock_irqrestore(&priv
->lock
, flags
);
731 /* Unmap DMA from host system and free skb's */
732 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
733 if (txq_id
== priv
->cmd_queue
)
734 iwl_legacy_cmd_queue_unmap(priv
);
736 iwl_legacy_tx_queue_unmap(priv
, txq_id
);
740 * Find first available (lowest unused) Tx Queue, mark it "active".
741 * Called only when finding queue for aggregation.
742 * Should never return anything < 7, because they should already
743 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
745 static int iwl4965_txq_ctx_activate_free(struct iwl_priv
*priv
)
749 for (txq_id
= 0; txq_id
< priv
->hw_params
.max_txq_num
; txq_id
++)
750 if (!test_and_set_bit(txq_id
, &priv
->txq_ctx_active_msk
))
756 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
758 static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv
*priv
,
761 /* Simply stop the queue, but don't change any configuration;
762 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
763 iwl_legacy_write_prph(priv
,
764 IWL49_SCD_QUEUE_STATUS_BITS(txq_id
),
765 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE
)|
766 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
));
770 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
772 static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv
*priv
, u16 ra_tid
,
779 scd_q2ratid
= ra_tid
& IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK
;
781 tbl_dw_addr
= priv
->scd_base_addr
+
782 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id
);
784 tbl_dw
= iwl_legacy_read_targ_mem(priv
, tbl_dw_addr
);
787 tbl_dw
= (scd_q2ratid
<< 16) | (tbl_dw
& 0x0000FFFF);
789 tbl_dw
= scd_q2ratid
| (tbl_dw
& 0xFFFF0000);
791 iwl_legacy_write_targ_mem(priv
, tbl_dw_addr
, tbl_dw
);
797 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
799 * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
800 * i.e. it must be one of the higher queues used for aggregation
802 static int iwl4965_txq_agg_enable(struct iwl_priv
*priv
, int txq_id
,
803 int tx_fifo
, int sta_id
, int tid
, u16 ssn_idx
)
809 if ((IWL49_FIRST_AMPDU_QUEUE
> txq_id
) ||
810 (IWL49_FIRST_AMPDU_QUEUE
+
811 priv
->cfg
->base_params
->num_of_ampdu_queues
<= txq_id
)) {
813 "queue number out of range: %d, must be %d to %d\n",
814 txq_id
, IWL49_FIRST_AMPDU_QUEUE
,
815 IWL49_FIRST_AMPDU_QUEUE
+
816 priv
->cfg
->base_params
->num_of_ampdu_queues
- 1);
820 ra_tid
= BUILD_RAxTID(sta_id
, tid
);
822 /* Modify device's station table to Tx this TID */
823 ret
= iwl4965_sta_tx_modify_enable_tid(priv
, sta_id
, tid
);
827 spin_lock_irqsave(&priv
->lock
, flags
);
829 /* Stop this Tx queue before configuring it */
830 iwl4965_tx_queue_stop_scheduler(priv
, txq_id
);
832 /* Map receiver-address / traffic-ID to this queue */
833 iwl4965_tx_queue_set_q2ratid(priv
, ra_tid
, txq_id
);
835 /* Set this queue as a chain-building queue */
836 iwl_legacy_set_bits_prph(priv
, IWL49_SCD_QUEUECHAIN_SEL
, (1 << txq_id
));
838 /* Place first TFD at index corresponding to start sequence number.
839 * Assumes that ssn_idx is valid (!= 0xFFF) */
840 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
841 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
842 iwl4965_set_wr_ptrs(priv
, txq_id
, ssn_idx
);
844 /* Set up Tx window size and frame limit for this queue */
845 iwl_legacy_write_targ_mem(priv
,
846 priv
->scd_base_addr
+ IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id
),
847 (SCD_WIN_SIZE
<< IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS
) &
848 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK
);
850 iwl_legacy_write_targ_mem(priv
, priv
->scd_base_addr
+
851 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id
) + sizeof(u32
),
852 (SCD_FRAME_LIMIT
<< IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
)
853 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
);
855 iwl_legacy_set_bits_prph(priv
, IWL49_SCD_INTERRUPT_MASK
, (1 << txq_id
));
857 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
858 iwl4965_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 1);
860 spin_unlock_irqrestore(&priv
->lock
, flags
);
866 int iwl4965_tx_agg_start(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
867 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
)
874 struct iwl_tid_data
*tid_data
;
876 tx_fifo
= iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif
), tid
);
877 if (unlikely(tx_fifo
< 0))
880 IWL_WARN(priv
, "%s on ra = %pM tid = %d\n",
881 __func__
, sta
->addr
, tid
);
883 sta_id
= iwl_legacy_sta_id(sta
);
884 if (sta_id
== IWL_INVALID_STATION
) {
885 IWL_ERR(priv
, "Start AGG on invalid station\n");
888 if (unlikely(tid
>= MAX_TID_COUNT
))
891 if (priv
->stations
[sta_id
].tid
[tid
].agg
.state
!= IWL_AGG_OFF
) {
892 IWL_ERR(priv
, "Start AGG when state is not IWL_AGG_OFF !\n");
896 txq_id
= iwl4965_txq_ctx_activate_free(priv
);
898 IWL_ERR(priv
, "No free aggregation queue available\n");
902 spin_lock_irqsave(&priv
->sta_lock
, flags
);
903 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
904 *ssn
= SEQ_TO_SN(tid_data
->seq_number
);
905 tid_data
->agg
.txq_id
= txq_id
;
906 iwl_legacy_set_swq_id(&priv
->txq
[txq_id
],
907 iwl4965_get_ac_from_tid(tid
), txq_id
);
908 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
910 ret
= iwl4965_txq_agg_enable(priv
, txq_id
, tx_fifo
,
915 spin_lock_irqsave(&priv
->sta_lock
, flags
);
916 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
917 if (tid_data
->tfds_in_queue
== 0) {
918 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
919 tid_data
->agg
.state
= IWL_AGG_ON
;
920 ieee80211_start_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
923 "HW queue is NOT empty: %d packets in HW queue\n",
924 tid_data
->tfds_in_queue
);
925 tid_data
->agg
.state
= IWL_EMPTYING_HW_QUEUE_ADDBA
;
927 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
932 * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
933 * priv->lock must be held by the caller
935 static int iwl4965_txq_agg_disable(struct iwl_priv
*priv
, u16 txq_id
,
936 u16 ssn_idx
, u8 tx_fifo
)
938 if ((IWL49_FIRST_AMPDU_QUEUE
> txq_id
) ||
939 (IWL49_FIRST_AMPDU_QUEUE
+
940 priv
->cfg
->base_params
->num_of_ampdu_queues
<= txq_id
)) {
942 "queue number out of range: %d, must be %d to %d\n",
943 txq_id
, IWL49_FIRST_AMPDU_QUEUE
,
944 IWL49_FIRST_AMPDU_QUEUE
+
945 priv
->cfg
->base_params
->num_of_ampdu_queues
- 1);
949 iwl4965_tx_queue_stop_scheduler(priv
, txq_id
);
951 iwl_legacy_clear_bits_prph(priv
,
952 IWL49_SCD_QUEUECHAIN_SEL
, (1 << txq_id
));
954 priv
->txq
[txq_id
].q
.read_ptr
= (ssn_idx
& 0xff);
955 priv
->txq
[txq_id
].q
.write_ptr
= (ssn_idx
& 0xff);
956 /* supposes that ssn_idx is valid (!= 0xFFF) */
957 iwl4965_set_wr_ptrs(priv
, txq_id
, ssn_idx
);
959 iwl_legacy_clear_bits_prph(priv
,
960 IWL49_SCD_INTERRUPT_MASK
, (1 << txq_id
));
961 iwl_txq_ctx_deactivate(priv
, txq_id
);
962 iwl4965_tx_queue_set_status(priv
, &priv
->txq
[txq_id
], tx_fifo
, 0);
967 int iwl4965_tx_agg_stop(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
,
968 struct ieee80211_sta
*sta
, u16 tid
)
970 int tx_fifo_id
, txq_id
, sta_id
, ssn
;
971 struct iwl_tid_data
*tid_data
;
972 int write_ptr
, read_ptr
;
975 tx_fifo_id
= iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif
), tid
);
976 if (unlikely(tx_fifo_id
< 0))
979 sta_id
= iwl_legacy_sta_id(sta
);
981 if (sta_id
== IWL_INVALID_STATION
) {
982 IWL_ERR(priv
, "Invalid station for AGG tid %d\n", tid
);
986 spin_lock_irqsave(&priv
->sta_lock
, flags
);
988 tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
989 ssn
= (tid_data
->seq_number
& IEEE80211_SCTL_SEQ
) >> 4;
990 txq_id
= tid_data
->agg
.txq_id
;
992 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
993 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
995 * This can happen if the peer stops aggregation
996 * again before we've had a chance to drain the
997 * queue we selected previously, i.e. before the
998 * session was really started completely.
1000 IWL_DEBUG_HT(priv
, "AGG stop before setup done\n");
1005 IWL_WARN(priv
, "Stopping AGG while state not ON or starting\n");
1008 write_ptr
= priv
->txq
[txq_id
].q
.write_ptr
;
1009 read_ptr
= priv
->txq
[txq_id
].q
.read_ptr
;
1011 /* The queue is not empty */
1012 if (write_ptr
!= read_ptr
) {
1013 IWL_DEBUG_HT(priv
, "Stopping a non empty AGG HW QUEUE\n");
1014 priv
->stations
[sta_id
].tid
[tid
].agg
.state
=
1015 IWL_EMPTYING_HW_QUEUE_DELBA
;
1016 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1020 IWL_DEBUG_HT(priv
, "HW queue is empty\n");
1022 priv
->stations
[sta_id
].tid
[tid
].agg
.state
= IWL_AGG_OFF
;
1024 /* do not restore/save irqs */
1025 spin_unlock(&priv
->sta_lock
);
1026 spin_lock(&priv
->lock
);
1029 * the only reason this call can fail is queue number out of range,
1030 * which can happen if uCode is reloaded and all the station
1031 * information are lost. if it is outside the range, there is no need
1032 * to deactivate the uCode queue, just return "success" to allow
1033 * mac80211 to clean up it own data.
1035 iwl4965_txq_agg_disable(priv
, txq_id
, ssn
, tx_fifo_id
);
1036 spin_unlock_irqrestore(&priv
->lock
, flags
);
1038 ieee80211_stop_tx_ba_cb_irqsafe(vif
, sta
->addr
, tid
);
1043 int iwl4965_txq_check_empty(struct iwl_priv
*priv
,
1044 int sta_id
, u8 tid
, int txq_id
)
1046 struct iwl_queue
*q
= &priv
->txq
[txq_id
].q
;
1047 u8
*addr
= priv
->stations
[sta_id
].sta
.sta
.addr
;
1048 struct iwl_tid_data
*tid_data
= &priv
->stations
[sta_id
].tid
[tid
];
1049 struct iwl_rxon_context
*ctx
;
1051 ctx
= &priv
->contexts
[priv
->stations
[sta_id
].ctxid
];
1053 lockdep_assert_held(&priv
->sta_lock
);
1055 switch (priv
->stations
[sta_id
].tid
[tid
].agg
.state
) {
1056 case IWL_EMPTYING_HW_QUEUE_DELBA
:
1057 /* We are reclaiming the last packet of the */
1058 /* aggregated HW queue */
1059 if ((txq_id
== tid_data
->agg
.txq_id
) &&
1060 (q
->read_ptr
== q
->write_ptr
)) {
1061 u16 ssn
= SEQ_TO_SN(tid_data
->seq_number
);
1062 int tx_fifo
= iwl4965_get_fifo_from_tid(ctx
, tid
);
1064 "HW queue empty: continue DELBA flow\n");
1065 iwl4965_txq_agg_disable(priv
, txq_id
, ssn
, tx_fifo
);
1066 tid_data
->agg
.state
= IWL_AGG_OFF
;
1067 ieee80211_stop_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
1070 case IWL_EMPTYING_HW_QUEUE_ADDBA
:
1071 /* We are reclaiming the last packet of the queue */
1072 if (tid_data
->tfds_in_queue
== 0) {
1074 "HW queue empty: continue ADDBA flow\n");
1075 tid_data
->agg
.state
= IWL_AGG_ON
;
1076 ieee80211_start_tx_ba_cb_irqsafe(ctx
->vif
, addr
, tid
);
1084 static void iwl4965_non_agg_tx_status(struct iwl_priv
*priv
,
1085 struct iwl_rxon_context
*ctx
,
1088 struct ieee80211_sta
*sta
;
1089 struct iwl_station_priv
*sta_priv
;
1092 sta
= ieee80211_find_sta(ctx
->vif
, addr1
);
1094 sta_priv
= (void *)sta
->drv_priv
;
1095 /* avoid atomic ops if this isn't a client */
1096 if (sta_priv
->client
&&
1097 atomic_dec_return(&sta_priv
->pending_frames
) == 0)
1098 ieee80211_sta_block_awake(priv
->hw
, sta
, false);
1104 iwl4965_tx_status(struct iwl_priv
*priv
, struct iwl_tx_info
*tx_info
,
1107 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) tx_info
->skb
->data
;
1110 iwl4965_non_agg_tx_status(priv
, tx_info
->ctx
, hdr
->addr1
);
1112 ieee80211_tx_status_irqsafe(priv
->hw
, tx_info
->skb
);
1115 int iwl4965_tx_queue_reclaim(struct iwl_priv
*priv
, int txq_id
, int index
)
1117 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
1118 struct iwl_queue
*q
= &txq
->q
;
1119 struct iwl_tx_info
*tx_info
;
1121 struct ieee80211_hdr
*hdr
;
1123 if ((index
>= q
->n_bd
) || (iwl_legacy_queue_used(q
, index
) == 0)) {
1124 IWL_ERR(priv
, "Read index for DMA queue txq id (%d), index %d, "
1125 "is out of range [0-%d] %d %d.\n", txq_id
,
1126 index
, q
->n_bd
, q
->write_ptr
, q
->read_ptr
);
1130 for (index
= iwl_legacy_queue_inc_wrap(index
, q
->n_bd
);
1131 q
->read_ptr
!= index
;
1132 q
->read_ptr
= iwl_legacy_queue_inc_wrap(q
->read_ptr
, q
->n_bd
)) {
1134 tx_info
= &txq
->txb
[txq
->q
.read_ptr
];
1136 if (WARN_ON_ONCE(tx_info
->skb
== NULL
))
1139 hdr
= (struct ieee80211_hdr
*)tx_info
->skb
->data
;
1140 if (ieee80211_is_data_qos(hdr
->frame_control
))
1143 iwl4965_tx_status(priv
, tx_info
,
1144 txq_id
>= IWL4965_FIRST_AMPDU_QUEUE
);
1145 tx_info
->skb
= NULL
;
1147 priv
->cfg
->ops
->lib
->txq_free_tfd(priv
, txq
);
1153 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
1155 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1156 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1158 static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv
*priv
,
1159 struct iwl_ht_agg
*agg
,
1160 struct iwl_compressed_ba_resp
*ba_resp
)
1164 u16 seq_ctl
= le16_to_cpu(ba_resp
->seq_ctl
);
1165 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1167 struct ieee80211_tx_info
*info
;
1168 u64 bitmap
, sent_bitmap
;
1170 if (unlikely(!agg
->wait_for_ba
)) {
1171 if (unlikely(ba_resp
->bitmap
))
1172 IWL_ERR(priv
, "Received BA when not expected\n");
1176 /* Mark that the expected block-ack response arrived */
1177 agg
->wait_for_ba
= 0;
1178 IWL_DEBUG_TX_REPLY(priv
, "BA %d %d\n", agg
->start_idx
,
1181 /* Calculate shift to align block-ack bits with our Tx window bits */
1182 sh
= agg
->start_idx
- SEQ_TO_INDEX(seq_ctl
>> 4);
1183 if (sh
< 0) /* tbw something is wrong with indices */
1186 if (agg
->frame_count
> (64 - sh
)) {
1187 IWL_DEBUG_TX_REPLY(priv
, "more frames than bitmap size");
1191 /* don't use 64-bit values for now */
1192 bitmap
= le64_to_cpu(ba_resp
->bitmap
) >> sh
;
1194 /* check for success or failure according to the
1195 * transmitted bitmap and block-ack bitmap */
1196 sent_bitmap
= bitmap
& agg
->bitmap
;
1198 /* For each frame attempted in aggregation,
1199 * update driver's record of tx frame's status. */
1201 while (sent_bitmap
) {
1202 ack
= sent_bitmap
& 1ULL;
1204 IWL_DEBUG_TX_REPLY(priv
, "%s ON i=%d idx=%d raw=%d\n",
1205 ack
? "ACK" : "NACK", i
,
1206 (agg
->start_idx
+ i
) & 0xff,
1207 agg
->start_idx
+ i
);
1212 IWL_DEBUG_TX_REPLY(priv
, "Bitmap %llx\n",
1213 (unsigned long long)bitmap
);
1215 info
= IEEE80211_SKB_CB(priv
->txq
[scd_flow
].txb
[agg
->start_idx
].skb
);
1216 memset(&info
->status
, 0, sizeof(info
->status
));
1217 info
->flags
|= IEEE80211_TX_STAT_ACK
;
1218 info
->flags
|= IEEE80211_TX_STAT_AMPDU
;
1219 info
->status
.ampdu_ack_len
= successes
;
1220 info
->status
.ampdu_len
= agg
->frame_count
;
1221 iwl4965_hwrate_to_tx_control(priv
, agg
->rate_n_flags
, info
);
1227 * translate ucode response to mac80211 tx status control values
1229 void iwl4965_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
1230 struct ieee80211_tx_info
*info
)
1232 struct ieee80211_tx_rate
*r
= &info
->control
.rates
[0];
1234 info
->antenna_sel_tx
=
1235 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
1236 if (rate_n_flags
& RATE_MCS_HT_MSK
)
1237 r
->flags
|= IEEE80211_TX_RC_MCS
;
1238 if (rate_n_flags
& RATE_MCS_GF_MSK
)
1239 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
1240 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
1241 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
1242 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
1243 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
1244 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
1245 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
1246 r
->idx
= iwl4965_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
1250 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1252 * Handles block-acknowledge notification from device, which reports success
1253 * of frames sent via aggregation.
1255 void iwl4965_rx_reply_compressed_ba(struct iwl_priv
*priv
,
1256 struct iwl_rx_mem_buffer
*rxb
)
1258 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1259 struct iwl_compressed_ba_resp
*ba_resp
= &pkt
->u
.compressed_ba
;
1260 struct iwl_tx_queue
*txq
= NULL
;
1261 struct iwl_ht_agg
*agg
;
1265 unsigned long flags
;
1267 /* "flow" corresponds to Tx queue */
1268 u16 scd_flow
= le16_to_cpu(ba_resp
->scd_flow
);
1270 /* "ssn" is start of block-ack Tx window, corresponds to index
1271 * (in Tx queue's circular buffer) of first TFD/frame in window */
1272 u16 ba_resp_scd_ssn
= le16_to_cpu(ba_resp
->scd_ssn
);
1274 if (scd_flow
>= priv
->hw_params
.max_txq_num
) {
1276 "BUG_ON scd_flow is bigger than number of queues\n");
1280 txq
= &priv
->txq
[scd_flow
];
1281 sta_id
= ba_resp
->sta_id
;
1283 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
1284 if (unlikely(agg
->txq_id
!= scd_flow
)) {
1286 * FIXME: this is a uCode bug which need to be addressed,
1287 * log the information and return for now!
1288 * since it is possible happen very often and in order
1289 * not to fill the syslog, don't enable the logging by default
1291 IWL_DEBUG_TX_REPLY(priv
,
1292 "BA scd_flow %d does not match txq_id %d\n",
1293 scd_flow
, agg
->txq_id
);
1297 /* Find index just before block-ack window */
1298 index
= iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn
& 0xff, txq
->q
.n_bd
);
1300 spin_lock_irqsave(&priv
->sta_lock
, flags
);
1302 IWL_DEBUG_TX_REPLY(priv
, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1305 (u8
*) &ba_resp
->sta_addr_lo32
,
1307 IWL_DEBUG_TX_REPLY(priv
, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
1309 "%d, scd_ssn = %d\n",
1312 (unsigned long long)le64_to_cpu(ba_resp
->bitmap
),
1315 IWL_DEBUG_TX_REPLY(priv
, "DAT start_idx = %d, bitmap = 0x%llx\n",
1317 (unsigned long long)agg
->bitmap
);
1319 /* Update driver's record of ACK vs. not for each frame in window */
1320 iwl4965_tx_status_reply_compressed_ba(priv
, agg
, ba_resp
);
1322 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1323 * block-ack window (we assume that they've been successfully
1324 * transmitted ... if not, it's too late anyway). */
1325 if (txq
->q
.read_ptr
!= (ba_resp_scd_ssn
& 0xff)) {
1326 /* calculate mac80211 ampdu sw queue to wake */
1327 int freed
= iwl4965_tx_queue_reclaim(priv
, scd_flow
, index
);
1328 iwl4965_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
1330 if ((iwl_legacy_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
1331 priv
->mac80211_registered
&&
1332 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
))
1333 iwl_legacy_wake_queue(priv
, txq
);
1335 iwl4965_txq_check_empty(priv
, sta_id
, tid
, scd_flow
);
1338 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
1341 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1342 const char *iwl4965_get_tx_fail_reason(u32 status
)
1344 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1345 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1347 switch (status
& TX_STATUS_MSK
) {
1348 case TX_STATUS_SUCCESS
:
1350 TX_STATUS_POSTPONE(DELAY
);
1351 TX_STATUS_POSTPONE(FEW_BYTES
);
1352 TX_STATUS_POSTPONE(QUIET_PERIOD
);
1353 TX_STATUS_POSTPONE(CALC_TTAK
);
1354 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY
);
1355 TX_STATUS_FAIL(SHORT_LIMIT
);
1356 TX_STATUS_FAIL(LONG_LIMIT
);
1357 TX_STATUS_FAIL(FIFO_UNDERRUN
);
1358 TX_STATUS_FAIL(DRAIN_FLOW
);
1359 TX_STATUS_FAIL(RFKILL_FLUSH
);
1360 TX_STATUS_FAIL(LIFE_EXPIRE
);
1361 TX_STATUS_FAIL(DEST_PS
);
1362 TX_STATUS_FAIL(HOST_ABORTED
);
1363 TX_STATUS_FAIL(BT_RETRY
);
1364 TX_STATUS_FAIL(STA_INVALID
);
1365 TX_STATUS_FAIL(FRAG_DROPPED
);
1366 TX_STATUS_FAIL(TID_DISABLE
);
1367 TX_STATUS_FAIL(FIFO_FLUSHED
);
1368 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL
);
1369 TX_STATUS_FAIL(PASSIVE_NO_RX
);
1370 TX_STATUS_FAIL(NO_BEACON_ON_RADAR
);
1375 #undef TX_STATUS_FAIL
1376 #undef TX_STATUS_POSTPONE
1378 #endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */