2 * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
3 * Copyright (c) 2002-2008 Atheros Communications, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 * $Id: ar5212_xmit.c,v 1.1.1.1 2008/12/11 04:46:43 alc Exp $
22 #include "ah_internal.h"
24 #include "ar5212/ar5212.h"
25 #include "ar5212/ar5212reg.h"
26 #include "ar5212/ar5212desc.h"
27 #include "ar5212/ar5212phy.h"
28 #ifdef AH_SUPPORT_5311
29 #include "ar5212/ar5311reg.h"
32 #ifdef AH_NEED_DESC_SWAP
33 static void ar5212SwapTxDesc(struct ath_desc
*ds
);
37 * Update Tx FIFO trigger level.
39 * Set bIncTrigLevel to TRUE to increase the trigger level.
40 * Set bIncTrigLevel to FALSE to decrease the trigger level.
42 * Returns TRUE if the trigger level was updated
45 ar5212UpdateTxTrigLevel(struct ath_hal
*ah
, HAL_BOOL bIncTrigLevel
)
47 struct ath_hal_5212
*ahp
= AH5212(ah
);
48 uint32_t txcfg
, curLevel
, newLevel
;
52 * Disable interrupts while futzing with the fifo level.
54 omask
= ar5212SetInterrupts(ah
, ahp
->ah_maskReg
&~ HAL_INT_GLOBAL
);
56 txcfg
= OS_REG_READ(ah
, AR_TXCFG
);
57 curLevel
= MS(txcfg
, AR_FTRIG
);
59 if (bIncTrigLevel
) { /* increase the trigger level */
60 if (curLevel
< MAX_TX_FIFO_THRESHOLD
)
62 } else if (curLevel
> MIN_TX_FIFO_THRESHOLD
)
64 if (newLevel
!= curLevel
)
65 /* Update the trigger level */
66 OS_REG_WRITE(ah
, AR_TXCFG
,
67 (txcfg
&~ AR_FTRIG
) | SM(newLevel
, AR_FTRIG
));
69 /* re-enable chip interrupts */
70 ar5212SetInterrupts(ah
, omask
);
72 return (newLevel
!= curLevel
);
76 * Set the properties of the tx queue with the parameters
80 ar5212SetTxQueueProps(struct ath_hal
*ah
, int q
, const HAL_TXQ_INFO
*qInfo
)
82 struct ath_hal_5212
*ahp
= AH5212(ah
);
83 HAL_CAPABILITIES
*pCap
= &AH_PRIVATE(ah
)->ah_caps
;
85 if (q
>= pCap
->halTotalQueues
) {
86 HALDEBUG(ah
, HAL_DEBUG_ANY
, "%s: invalid queue num %u\n",
90 return ath_hal_setTxQProps(ah
, &ahp
->ah_txq
[q
], qInfo
);
94 * Return the properties for the specified tx queue.
97 ar5212GetTxQueueProps(struct ath_hal
*ah
, int q
, HAL_TXQ_INFO
*qInfo
)
99 struct ath_hal_5212
*ahp
= AH5212(ah
);
100 HAL_CAPABILITIES
*pCap
= &AH_PRIVATE(ah
)->ah_caps
;
103 if (q
>= pCap
->halTotalQueues
) {
104 HALDEBUG(ah
, HAL_DEBUG_ANY
, "%s: invalid queue num %u\n",
108 return ath_hal_getTxQProps(ah
, qInfo
, &ahp
->ah_txq
[q
]);
112 * Allocate and initialize a tx DCU/QCU combination.
115 ar5212SetupTxQueue(struct ath_hal
*ah
, HAL_TX_QUEUE type
,
116 const HAL_TXQ_INFO
*qInfo
)
118 struct ath_hal_5212
*ahp
= AH5212(ah
);
119 HAL_TX_QUEUE_INFO
*qi
;
120 HAL_CAPABILITIES
*pCap
= &AH_PRIVATE(ah
)->ah_caps
;
123 /* by default enable OK+ERR+DESC+URN interrupts */
124 defqflags
= HAL_TXQ_TXOKINT_ENABLE
125 | HAL_TXQ_TXERRINT_ENABLE
126 | HAL_TXQ_TXDESCINT_ENABLE
127 | HAL_TXQ_TXURNINT_ENABLE
;
128 /* XXX move queue assignment to driver */
130 case HAL_TX_QUEUE_BEACON
:
131 q
= pCap
->halTotalQueues
-1; /* highest priority */
132 defqflags
|= HAL_TXQ_DBA_GATED
133 | HAL_TXQ_CBR_DIS_QEMPTY
134 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
135 | HAL_TXQ_BACKOFF_DISABLE
;
137 case HAL_TX_QUEUE_CAB
:
138 q
= pCap
->halTotalQueues
-2; /* next highest priority */
139 defqflags
|= HAL_TXQ_DBA_GATED
140 | HAL_TXQ_CBR_DIS_QEMPTY
141 | HAL_TXQ_CBR_DIS_BEMPTY
142 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
143 | HAL_TXQ_BACKOFF_DISABLE
;
145 case HAL_TX_QUEUE_UAPSD
:
146 q
= pCap
->halTotalQueues
-3; /* nextest highest priority */
147 if (ahp
->ah_txq
[q
].tqi_type
!= HAL_TX_QUEUE_INACTIVE
) {
148 HALDEBUG(ah
, HAL_DEBUG_ANY
,
149 "%s: no available UAPSD tx queue\n", __func__
);
153 case HAL_TX_QUEUE_DATA
:
154 for (q
= 0; q
< pCap
->halTotalQueues
; q
++)
155 if (ahp
->ah_txq
[q
].tqi_type
== HAL_TX_QUEUE_INACTIVE
)
157 if (q
== pCap
->halTotalQueues
) {
158 HALDEBUG(ah
, HAL_DEBUG_ANY
,
159 "%s: no available tx queue\n", __func__
);
164 HALDEBUG(ah
, HAL_DEBUG_ANY
,
165 "%s: bad tx queue type %u\n", __func__
, type
);
169 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
, "%s: queue %u\n", __func__
, q
);
171 qi
= &ahp
->ah_txq
[q
];
172 if (qi
->tqi_type
!= HAL_TX_QUEUE_INACTIVE
) {
173 HALDEBUG(ah
, HAL_DEBUG_ANY
, "%s: tx queue %u already active\n",
177 OS_MEMZERO(qi
, sizeof(HAL_TX_QUEUE_INFO
));
179 if (qInfo
== AH_NULL
) {
180 qi
->tqi_qflags
= defqflags
;
181 qi
->tqi_aifs
= INIT_AIFS
;
182 qi
->tqi_cwmin
= HAL_TXQ_USEDEFAULT
; /* NB: do at reset */
183 qi
->tqi_cwmax
= INIT_CWMAX
;
184 qi
->tqi_shretry
= INIT_SH_RETRY
;
185 qi
->tqi_lgretry
= INIT_LG_RETRY
;
186 qi
->tqi_physCompBuf
= 0;
188 qi
->tqi_physCompBuf
= qInfo
->tqi_compBuf
;
189 (void) ar5212SetTxQueueProps(ah
, q
, qInfo
);
191 /* NB: must be followed by ar5212ResetTxQueue */
196 * Update the h/w interrupt registers to reflect a tx q's configuration.
199 setTxQInterrupts(struct ath_hal
*ah
, HAL_TX_QUEUE_INFO
*qi
)
201 struct ath_hal_5212
*ahp
= AH5212(ah
);
203 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
,
204 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__
,
205 ahp
->ah_txOkInterruptMask
, ahp
->ah_txErrInterruptMask
,
206 ahp
->ah_txDescInterruptMask
, ahp
->ah_txEolInterruptMask
,
207 ahp
->ah_txUrnInterruptMask
);
209 OS_REG_WRITE(ah
, AR_IMR_S0
,
210 SM(ahp
->ah_txOkInterruptMask
, AR_IMR_S0_QCU_TXOK
)
211 | SM(ahp
->ah_txDescInterruptMask
, AR_IMR_S0_QCU_TXDESC
)
213 OS_REG_WRITE(ah
, AR_IMR_S1
,
214 SM(ahp
->ah_txErrInterruptMask
, AR_IMR_S1_QCU_TXERR
)
215 | SM(ahp
->ah_txEolInterruptMask
, AR_IMR_S1_QCU_TXEOL
)
217 OS_REG_RMW_FIELD(ah
, AR_IMR_S2
,
218 AR_IMR_S2_QCU_TXURN
, ahp
->ah_txUrnInterruptMask
);
222 * Free a tx DCU/QCU combination.
225 ar5212ReleaseTxQueue(struct ath_hal
*ah
, u_int q
)
227 struct ath_hal_5212
*ahp
= AH5212(ah
);
228 HAL_CAPABILITIES
*pCap
= &AH_PRIVATE(ah
)->ah_caps
;
229 HAL_TX_QUEUE_INFO
*qi
;
231 if (q
>= pCap
->halTotalQueues
) {
232 HALDEBUG(ah
, HAL_DEBUG_ANY
, "%s: invalid queue num %u\n",
236 qi
= &ahp
->ah_txq
[q
];
237 if (qi
->tqi_type
== HAL_TX_QUEUE_INACTIVE
) {
238 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
, "%s: inactive queue %u\n",
243 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
, "%s: release queue %u\n", __func__
, q
);
245 qi
->tqi_type
= HAL_TX_QUEUE_INACTIVE
;
246 ahp
->ah_txOkInterruptMask
&= ~(1 << q
);
247 ahp
->ah_txErrInterruptMask
&= ~(1 << q
);
248 ahp
->ah_txDescInterruptMask
&= ~(1 << q
);
249 ahp
->ah_txEolInterruptMask
&= ~(1 << q
);
250 ahp
->ah_txUrnInterruptMask
&= ~(1 << q
);
251 setTxQInterrupts(ah
, qi
);
257 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
259 * phwChannel has been set to point to the current channel
262 ar5212ResetTxQueue(struct ath_hal
*ah
, u_int q
)
264 struct ath_hal_5212
*ahp
= AH5212(ah
);
265 HAL_CAPABILITIES
*pCap
= &AH_PRIVATE(ah
)->ah_caps
;
266 HAL_CHANNEL_INTERNAL
*chan
= AH_PRIVATE(ah
)->ah_curchan
;
267 HAL_TX_QUEUE_INFO
*qi
;
268 uint32_t cwMin
, chanCwMin
, value
, qmisc
, dmisc
;
270 if (q
>= pCap
->halTotalQueues
) {
271 HALDEBUG(ah
, HAL_DEBUG_ANY
, "%s: invalid queue num %u\n",
275 qi
= &ahp
->ah_txq
[q
];
276 if (qi
->tqi_type
== HAL_TX_QUEUE_INACTIVE
) {
277 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
, "%s: inactive queue %u\n",
279 return AH_TRUE
; /* XXX??? */
282 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
, "%s: reset queue %u\n", __func__
, q
);
284 if (qi
->tqi_cwmin
== HAL_TXQ_USEDEFAULT
) {
286 * Select cwmin according to channel type.
287 * NB: chan can be NULL during attach
289 if (chan
&& IS_CHAN_B(chan
))
290 chanCwMin
= INIT_CWMIN_11B
;
292 chanCwMin
= INIT_CWMIN
;
293 /* make sure that the CWmin is of the form (2^n - 1) */
294 for (cwMin
= 1; cwMin
< chanCwMin
; cwMin
= (cwMin
<< 1) | 1)
297 cwMin
= qi
->tqi_cwmin
;
299 /* set cwMin/Max and AIFS values */
300 OS_REG_WRITE(ah
, AR_DLCL_IFS(q
),
301 SM(cwMin
, AR_D_LCL_IFS_CWMIN
)
302 | SM(qi
->tqi_cwmax
, AR_D_LCL_IFS_CWMAX
)
303 | SM(qi
->tqi_aifs
, AR_D_LCL_IFS_AIFS
));
305 /* Set retry limit values */
306 OS_REG_WRITE(ah
, AR_DRETRY_LIMIT(q
),
307 SM(INIT_SSH_RETRY
, AR_D_RETRY_LIMIT_STA_SH
)
308 | SM(INIT_SLG_RETRY
, AR_D_RETRY_LIMIT_STA_LG
)
309 | SM(qi
->tqi_lgretry
, AR_D_RETRY_LIMIT_FR_LG
)
310 | SM(qi
->tqi_shretry
, AR_D_RETRY_LIMIT_FR_SH
)
313 /* NB: always enable early termination on the QCU */
314 qmisc
= AR_Q_MISC_DCU_EARLY_TERM_REQ
315 | SM(AR_Q_MISC_FSP_ASAP
, AR_Q_MISC_FSP
);
317 /* NB: always enable DCU to wait for next fragment from QCU */
318 dmisc
= AR_D_MISC_FRAG_WAIT_EN
;
320 #ifdef AH_SUPPORT_5311
321 if (AH_PRIVATE(ah
)->ah_macVersion
< AR_SREV_VERSION_OAHU
) {
322 /* Configure DCU to use the global sequence count */
323 dmisc
|= AR5311_D_MISC_SEQ_NUM_CONTROL
;
326 /* multiqueue support */
327 if (qi
->tqi_cbrPeriod
) {
328 OS_REG_WRITE(ah
, AR_QCBRCFG(q
),
329 SM(qi
->tqi_cbrPeriod
,AR_Q_CBRCFG_CBR_INTERVAL
)
330 | SM(qi
->tqi_cbrOverflowLimit
, AR_Q_CBRCFG_CBR_OVF_THRESH
));
331 qmisc
= (qmisc
&~ AR_Q_MISC_FSP
) | AR_Q_MISC_FSP_CBR
;
332 if (qi
->tqi_cbrOverflowLimit
)
333 qmisc
|= AR_Q_MISC_CBR_EXP_CNTR_LIMIT
;
335 if (qi
->tqi_readyTime
) {
336 OS_REG_WRITE(ah
, AR_QRDYTIMECFG(q
),
337 SM(qi
->tqi_readyTime
, AR_Q_RDYTIMECFG_INT
)
338 | AR_Q_RDYTIMECFG_ENA
);
341 OS_REG_WRITE(ah
, AR_DCHNTIME(q
),
342 SM(qi
->tqi_burstTime
, AR_D_CHNTIME_DUR
)
343 | (qi
->tqi_burstTime
? AR_D_CHNTIME_EN
: 0));
345 if (qi
->tqi_readyTime
&&
346 (qi
->tqi_qflags
& HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE
))
347 qmisc
|= AR_Q_MISC_RDYTIME_EXP_POLICY
;
348 if (qi
->tqi_qflags
& HAL_TXQ_DBA_GATED
)
349 qmisc
= (qmisc
&~ AR_Q_MISC_FSP
) | AR_Q_MISC_FSP_DBA_GATED
;
350 if (MS(qmisc
, AR_Q_MISC_FSP
) != AR_Q_MISC_FSP_ASAP
) {
352 * These are meangingful only when not scheduled asap.
354 if (qi
->tqi_qflags
& HAL_TXQ_CBR_DIS_BEMPTY
)
355 qmisc
|= AR_Q_MISC_CBR_INCR_DIS0
;
357 qmisc
&= ~AR_Q_MISC_CBR_INCR_DIS0
;
358 if (qi
->tqi_qflags
& HAL_TXQ_CBR_DIS_QEMPTY
)
359 qmisc
|= AR_Q_MISC_CBR_INCR_DIS1
;
361 qmisc
&= ~AR_Q_MISC_CBR_INCR_DIS1
;
364 if (qi
->tqi_qflags
& HAL_TXQ_BACKOFF_DISABLE
)
365 dmisc
|= AR_D_MISC_POST_FR_BKOFF_DIS
;
366 if (qi
->tqi_qflags
& HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE
)
367 dmisc
|= AR_D_MISC_FRAG_BKOFF_EN
;
368 if (qi
->tqi_qflags
& HAL_TXQ_ARB_LOCKOUT_GLOBAL
)
369 dmisc
|= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL
,
370 AR_D_MISC_ARB_LOCKOUT_CNTRL
);
371 else if (qi
->tqi_qflags
& HAL_TXQ_ARB_LOCKOUT_INTRA
)
372 dmisc
|= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR
,
373 AR_D_MISC_ARB_LOCKOUT_CNTRL
);
374 if (qi
->tqi_qflags
& HAL_TXQ_IGNORE_VIRTCOL
)
375 dmisc
|= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE
,
376 AR_D_MISC_VIR_COL_HANDLING
);
377 if (qi
->tqi_qflags
& HAL_TXQ_SEQNUM_INC_DIS
)
378 dmisc
|= AR_D_MISC_SEQ_NUM_INCR_DIS
;
381 * Fillin type-dependent bits. Most of this can be
382 * removed by specifying the queue parameters in the
383 * driver; it's here for backwards compatibility.
385 switch (qi
->tqi_type
) {
386 case HAL_TX_QUEUE_BEACON
: /* beacon frames */
387 qmisc
|= AR_Q_MISC_FSP_DBA_GATED
388 | AR_Q_MISC_BEACON_USE
389 | AR_Q_MISC_CBR_INCR_DIS1
;
391 dmisc
|= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL
,
392 AR_D_MISC_ARB_LOCKOUT_CNTRL
)
393 | AR_D_MISC_BEACON_USE
394 | AR_D_MISC_POST_FR_BKOFF_DIS
;
396 case HAL_TX_QUEUE_CAB
: /* CAB frames */
398 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
399 * There is an issue with the CAB Queue
400 * not properly refreshing the Tx descriptor if
401 * the TXE clear setting is used.
403 qmisc
|= AR_Q_MISC_FSP_DBA_GATED
404 | AR_Q_MISC_CBR_INCR_DIS1
405 | AR_Q_MISC_CBR_INCR_DIS0
;
407 if (!qi
->tqi_readyTime
) {
409 * NB: don't set default ready time if driver
410 * has explicitly specified something. This is
411 * here solely for backwards compatibility.
413 value
= (ahp
->ah_beaconInterval
414 - (ath_hal_sw_beacon_response_time
-
415 ath_hal_dma_beacon_response_time
)
416 - ath_hal_additional_swba_backoff
) * 1024;
417 OS_REG_WRITE(ah
, AR_QRDYTIMECFG(q
), value
| AR_Q_RDYTIMECFG_ENA
);
419 dmisc
|= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL
,
420 AR_D_MISC_ARB_LOCKOUT_CNTRL
);
422 default: /* NB: silence compiler */
426 OS_REG_WRITE(ah
, AR_QMISC(q
), qmisc
);
427 OS_REG_WRITE(ah
, AR_DMISC(q
), dmisc
);
429 /* Setup compression scratchpad buffer */
431 * XXX: calling this asynchronously to queue operation can
432 * cause unexpected behavior!!!
434 if (qi
->tqi_physCompBuf
) {
435 HALASSERT(qi
->tqi_type
== HAL_TX_QUEUE_DATA
||
436 qi
->tqi_type
== HAL_TX_QUEUE_UAPSD
);
437 OS_REG_WRITE(ah
, AR_Q_CBBS
, (80 + 2*q
));
438 OS_REG_WRITE(ah
, AR_Q_CBBA
, qi
->tqi_physCompBuf
);
439 OS_REG_WRITE(ah
, AR_Q_CBC
, HAL_COMP_BUF_MAX_SIZE
/1024);
440 OS_REG_WRITE(ah
, AR_Q0_MISC
+ 4*q
,
441 OS_REG_READ(ah
, AR_Q0_MISC
+ 4*q
)
442 | AR_Q_MISC_QCU_COMP_EN
);
446 * Always update the secondary interrupt mask registers - this
447 * could be a new queue getting enabled in a running system or
448 * hw getting re-initialized during a reset!
450 * Since we don't differentiate between tx interrupts corresponding
451 * to individual queues - secondary tx mask regs are always unmasked;
452 * tx interrupts are enabled/disabled for all queues collectively
453 * using the primary mask reg
455 if (qi
->tqi_qflags
& HAL_TXQ_TXOKINT_ENABLE
)
456 ahp
->ah_txOkInterruptMask
|= 1 << q
;
458 ahp
->ah_txOkInterruptMask
&= ~(1 << q
);
459 if (qi
->tqi_qflags
& HAL_TXQ_TXERRINT_ENABLE
)
460 ahp
->ah_txErrInterruptMask
|= 1 << q
;
462 ahp
->ah_txErrInterruptMask
&= ~(1 << q
);
463 if (qi
->tqi_qflags
& HAL_TXQ_TXDESCINT_ENABLE
)
464 ahp
->ah_txDescInterruptMask
|= 1 << q
;
466 ahp
->ah_txDescInterruptMask
&= ~(1 << q
);
467 if (qi
->tqi_qflags
& HAL_TXQ_TXEOLINT_ENABLE
)
468 ahp
->ah_txEolInterruptMask
|= 1 << q
;
470 ahp
->ah_txEolInterruptMask
&= ~(1 << q
);
471 if (qi
->tqi_qflags
& HAL_TXQ_TXURNINT_ENABLE
)
472 ahp
->ah_txUrnInterruptMask
|= 1 << q
;
474 ahp
->ah_txUrnInterruptMask
&= ~(1 << q
);
475 setTxQInterrupts(ah
, qi
);
481 * Get the TXDP for the specified queue
484 ar5212GetTxDP(struct ath_hal
*ah
, u_int q
)
486 HALASSERT(q
< AH_PRIVATE(ah
)->ah_caps
.halTotalQueues
);
487 return OS_REG_READ(ah
, AR_QTXDP(q
));
491 * Set the TxDP for the specified queue
494 ar5212SetTxDP(struct ath_hal
*ah
, u_int q
, uint32_t txdp
)
496 HALASSERT(q
< AH_PRIVATE(ah
)->ah_caps
.halTotalQueues
);
497 HALASSERT(AH5212(ah
)->ah_txq
[q
].tqi_type
!= HAL_TX_QUEUE_INACTIVE
);
500 * Make sure that TXE is deasserted before setting the TXDP. If TXE
501 * is still asserted, setting TXDP will have no effect.
503 HALASSERT((OS_REG_READ(ah
, AR_Q_TXE
) & (1 << q
)) == 0);
505 OS_REG_WRITE(ah
, AR_QTXDP(q
), txdp
);
511 * Set Transmit Enable bits for the specified queue
514 ar5212StartTxDma(struct ath_hal
*ah
, u_int q
)
516 HALASSERT(q
< AH_PRIVATE(ah
)->ah_caps
.halTotalQueues
);
518 HALASSERT(AH5212(ah
)->ah_txq
[q
].tqi_type
!= HAL_TX_QUEUE_INACTIVE
);
520 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
, "%s: queue %u\n", __func__
, q
);
522 /* Check to be sure we're not enabling a q that has its TXD bit set. */
523 HALASSERT((OS_REG_READ(ah
, AR_Q_TXD
) & (1 << q
)) == 0);
525 OS_REG_WRITE(ah
, AR_Q_TXE
, 1 << q
);
530 * Return the number of pending frames or 0 if the specified
534 ar5212NumTxPending(struct ath_hal
*ah
, u_int q
)
538 HALASSERT(q
< AH_PRIVATE(ah
)->ah_caps
.halTotalQueues
);
539 HALASSERT(AH5212(ah
)->ah_txq
[q
].tqi_type
!= HAL_TX_QUEUE_INACTIVE
);
541 npend
= OS_REG_READ(ah
, AR_QSTS(q
)) & AR_Q_STS_PEND_FR_CNT
;
544 * Pending frame count (PFC) can momentarily go to zero
545 * while TXE remains asserted. In other words a PFC of
546 * zero is not sufficient to say that the queue has stopped.
548 if (OS_REG_READ(ah
, AR_Q_TXE
) & (1 << q
))
549 npend
= 1; /* arbitrarily return 1 */
555 * Stop transmit on the specified queue
558 ar5212StopTxDma(struct ath_hal
*ah
, u_int q
)
563 HALASSERT(q
< AH_PRIVATE(ah
)->ah_caps
.halTotalQueues
);
565 HALASSERT(AH5212(ah
)->ah_txq
[q
].tqi_type
!= HAL_TX_QUEUE_INACTIVE
);
567 OS_REG_WRITE(ah
, AR_Q_TXD
, 1 << q
);
568 for (i
= 1000; i
!= 0; i
--) {
569 if (ar5212NumTxPending(ah
, q
) == 0)
571 OS_DELAY(100); /* XXX get actual value */
575 HALDEBUG(ah
, HAL_DEBUG_ANY
,
576 "%s: queue %u DMA did not stop in 100 msec\n", __func__
, q
);
577 HALDEBUG(ah
, HAL_DEBUG_ANY
,
578 "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__
,
579 OS_REG_READ(ah
, AR_QSTS(q
)), OS_REG_READ(ah
, AR_Q_TXE
),
580 OS_REG_READ(ah
, AR_Q_TXD
), OS_REG_READ(ah
, AR_QCBRCFG(q
)));
581 HALDEBUG(ah
, HAL_DEBUG_ANY
,
582 "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
583 __func__
, OS_REG_READ(ah
, AR_QMISC(q
)),
584 OS_REG_READ(ah
, AR_QRDYTIMECFG(q
)),
585 OS_REG_READ(ah
, AR_Q_RDYTIMESHDN
));
587 #endif /* AH_DEBUG */
589 /* 2413+ and up can kill packets at the PCU level */
590 if (ar5212NumTxPending(ah
, q
) &&
591 (IS_2413(ah
) || IS_5413(ah
) || IS_2425(ah
) || IS_2417(ah
))) {
594 HALDEBUG(ah
, HAL_DEBUG_TXQUEUE
,
595 "%s: Num of pending TX Frames %d on Q %d\n",
596 __func__
, ar5212NumTxPending(ah
, q
), q
);
598 /* Kill last PCU Tx Frame */
599 /* TODO - save off and restore current values of Q1/Q2? */
600 for (j
= 0; j
< 2; j
++) {
601 tsfLow
= OS_REG_READ(ah
, AR_TSF_L32
);
602 OS_REG_WRITE(ah
, AR_QUIET2
, SM(100, AR_QUIET2_QUIET_PER
) |
603 SM(10, AR_QUIET2_QUIET_DUR
));
604 OS_REG_WRITE(ah
, AR_QUIET1
, AR_QUIET1_QUIET_ENABLE
|
605 SM(tsfLow
>> 10, AR_QUIET1_NEXT_QUIET
));
606 if ((OS_REG_READ(ah
, AR_TSF_L32
) >> 10) == (tsfLow
>> 10)) {
609 HALDEBUG(ah
, HAL_DEBUG_ANY
,
610 "%s: TSF moved while trying to set quiet time "
611 "TSF: 0x%08x\n", __func__
, tsfLow
);
612 HALASSERT(j
< 1); /* TSF shouldn't count twice or reg access is taking forever */
615 OS_REG_SET_BIT(ah
, AR_DIAG_SW
, AR_DIAG_CHAN_IDLE
);
617 /* Allow the quiet mechanism to do its work */
619 OS_REG_CLR_BIT(ah
, AR_QUIET1
, AR_QUIET1_QUIET_ENABLE
);
621 /* Give at least 1 millisec more to wait */
624 /* Verify all transmit is dead */
625 while (ar5212NumTxPending(ah
, q
)) {
627 HALDEBUG(ah
, HAL_DEBUG_ANY
,
628 "%s: Failed to stop Tx DMA in %d msec after killing last frame\n",
635 OS_REG_CLR_BIT(ah
, AR_DIAG_SW
, AR_DIAG_CHAN_IDLE
);
638 OS_REG_WRITE(ah
, AR_Q_TXD
, 0);
643 * Descriptor Access Functions
646 #define VALID_PKT_TYPES \
647 ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
648 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
649 (1<<HAL_PKT_TYPE_BEACON))
650 #define isValidPktType(_t) ((1<<(_t)) & VALID_PKT_TYPES)
651 #define VALID_TX_RATES \
652 ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
653 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
654 (1<<0x1d)|(1<<0x18)|(1<<0x1c))
655 #define isValidTxRate(_r) ((1<<(_r)) & VALID_TX_RATES)
658 ar5212SetupTxDesc(struct ath_hal
*ah
, struct ath_desc
*ds
,
663 u_int txRate0
, u_int txTries0
,
668 u_int rtsctsDuration
,
673 #define RTSCTS (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
674 struct ar5212_desc
*ads
= AR5212DESC(ds
);
675 struct ath_hal_5212
*ahp
= AH5212(ah
);
679 HALASSERT(txTries0
!= 0);
680 HALASSERT(isValidPktType(type
));
681 HALASSERT(isValidTxRate(txRate0
));
682 HALASSERT((flags
& RTSCTS
) != RTSCTS
);
683 /* XXX validate antMode */
685 txPower
= (txPower
+ ahp
->ah_txPowerIndexOffset
);
686 if(txPower
> 63) txPower
=63;
688 ads
->ds_ctl0
= (pktLen
& AR_FrameLen
)
689 | (txPower
<< AR_XmitPower_S
)
690 | (flags
& HAL_TXDESC_VEOL
? AR_VEOL
: 0)
691 | (flags
& HAL_TXDESC_CLRDMASK
? AR_ClearDestMask
: 0)
692 | SM(antMode
, AR_AntModeXmit
)
693 | (flags
& HAL_TXDESC_INTREQ
? AR_TxInterReq
: 0)
695 ads
->ds_ctl1
= (type
<< AR_FrmType_S
)
696 | (flags
& HAL_TXDESC_NOACK
? AR_NoAck
: 0)
697 | (comp
<< AR_CompProc_S
)
698 | (compicvLen
<< AR_CompICVLen_S
)
699 | (compivLen
<< AR_CompIVLen_S
)
701 ads
->ds_ctl2
= SM(txTries0
, AR_XmitDataTries0
)
702 | (flags
& HAL_TXDESC_DURENA
? AR_DurUpdateEna
: 0)
704 ads
->ds_ctl3
= (txRate0
<< AR_XmitRate0_S
)
706 if (keyIx
!= HAL_TXKEYIX_INVALID
) {
707 /* XXX validate key index */
708 ads
->ds_ctl1
|= SM(keyIx
, AR_DestIdx
);
709 ads
->ds_ctl0
|= AR_DestIdxValid
;
711 if (flags
& RTSCTS
) {
712 if (!isValidTxRate(rtsctsRate
)) {
713 HALDEBUG(ah
, HAL_DEBUG_ANY
,
714 "%s: invalid rts/cts rate 0x%x\n",
715 __func__
, rtsctsRate
);
718 /* XXX validate rtsctsDuration */
719 ads
->ds_ctl0
|= (flags
& HAL_TXDESC_CTSENA
? AR_CTSEnable
: 0)
720 | (flags
& HAL_TXDESC_RTSENA
? AR_RTSCTSEnable
: 0)
722 ads
->ds_ctl2
|= SM(rtsctsDuration
, AR_RTSCTSDuration
);
723 ads
->ds_ctl3
|= (rtsctsRate
<< AR_RTSCTSRate_S
);
730 ar5212SetupXTxDesc(struct ath_hal
*ah
, struct ath_desc
*ds
,
731 u_int txRate1
, u_int txTries1
,
732 u_int txRate2
, u_int txTries2
,
733 u_int txRate3
, u_int txTries3
)
735 struct ar5212_desc
*ads
= AR5212DESC(ds
);
738 HALASSERT(isValidTxRate(txRate1
));
739 ads
->ds_ctl2
|= SM(txTries1
, AR_XmitDataTries1
)
742 ads
->ds_ctl3
|= (txRate1
<< AR_XmitRate1_S
);
745 HALASSERT(isValidTxRate(txRate2
));
746 ads
->ds_ctl2
|= SM(txTries2
, AR_XmitDataTries2
)
749 ads
->ds_ctl3
|= (txRate2
<< AR_XmitRate2_S
);
752 HALASSERT(isValidTxRate(txRate3
));
753 ads
->ds_ctl2
|= SM(txTries3
, AR_XmitDataTries3
)
756 ads
->ds_ctl3
|= (txRate3
<< AR_XmitRate3_S
);
762 ar5212IntrReqTxDesc(struct ath_hal
*ah
, struct ath_desc
*ds
)
764 struct ar5212_desc
*ads
= AR5212DESC(ds
);
766 #ifdef AH_NEED_DESC_SWAP
767 ads
->ds_ctl0
|= __bswap32(AR_TxInterReq
);
769 ads
->ds_ctl0
|= AR_TxInterReq
;
774 ar5212FillTxDesc(struct ath_hal
*ah
, struct ath_desc
*ds
,
775 u_int segLen
, HAL_BOOL firstSeg
, HAL_BOOL lastSeg
,
776 const struct ath_desc
*ds0
)
778 struct ar5212_desc
*ads
= AR5212DESC(ds
);
780 HALASSERT((segLen
&~ AR_BufLen
) == 0);
784 * First descriptor, don't clobber xmit control data
785 * setup by ar5212SetupTxDesc.
787 ads
->ds_ctl1
|= segLen
| (lastSeg
? 0 : AR_More
);
788 } else if (lastSeg
) { /* !firstSeg && lastSeg */
790 * Last descriptor in a multi-descriptor frame,
791 * copy the multi-rate transmit parameters from
792 * the first frame for processing on completion.
795 ads
->ds_ctl1
= segLen
;
796 #ifdef AH_NEED_DESC_SWAP
797 ads
->ds_ctl2
= __bswap32(AR5212DESC_CONST(ds0
)->ds_ctl2
);
798 ads
->ds_ctl3
= __bswap32(AR5212DESC_CONST(ds0
)->ds_ctl3
);
800 ads
->ds_ctl2
= AR5212DESC_CONST(ds0
)->ds_ctl2
;
801 ads
->ds_ctl3
= AR5212DESC_CONST(ds0
)->ds_ctl3
;
803 } else { /* !firstSeg && !lastSeg */
805 * Intermediate descriptor in a multi-descriptor frame.
808 ads
->ds_ctl1
= segLen
| AR_More
;
812 ads
->ds_txstatus0
= ads
->ds_txstatus1
= 0;
816 #ifdef AH_NEED_DESC_SWAP
817 /* Swap transmit descriptor */
819 ar5212SwapTxDesc(struct ath_desc
*ds
)
821 ds
->ds_data
= __bswap32(ds
->ds_data
);
822 ds
->ds_ctl0
= __bswap32(ds
->ds_ctl0
);
823 ds
->ds_ctl1
= __bswap32(ds
->ds_ctl1
);
824 ds
->ds_hw
[0] = __bswap32(ds
->ds_hw
[0]);
825 ds
->ds_hw
[1] = __bswap32(ds
->ds_hw
[1]);
826 ds
->ds_hw
[2] = __bswap32(ds
->ds_hw
[2]);
827 ds
->ds_hw
[3] = __bswap32(ds
->ds_hw
[3]);
832 * Processing of HW TX descriptor.
835 ar5212ProcTxDesc(struct ath_hal
*ah
,
836 struct ath_desc
*ds
, struct ath_tx_status
*ts
)
838 struct ar5212_desc
*ads
= AR5212DESC(ds
);
840 #ifdef AH_NEED_DESC_SWAP
841 if ((ads
->ds_txstatus1
& __bswap32(AR_Done
)) == 0)
842 return HAL_EINPROGRESS
;
844 ar5212SwapTxDesc(ds
);
846 if ((ads
->ds_txstatus1
& AR_Done
) == 0)
847 return HAL_EINPROGRESS
;
850 /* Update software copies of the HW status */
851 ts
->ts_seqnum
= MS(ads
->ds_txstatus1
, AR_SeqNum
);
852 ts
->ts_tstamp
= MS(ads
->ds_txstatus0
, AR_SendTimestamp
);
854 if ((ads
->ds_txstatus0
& AR_FrmXmitOK
) == 0) {
855 if (ads
->ds_txstatus0
& AR_ExcessiveRetries
)
856 ts
->ts_status
|= HAL_TXERR_XRETRY
;
857 if (ads
->ds_txstatus0
& AR_Filtered
)
858 ts
->ts_status
|= HAL_TXERR_FILT
;
859 if (ads
->ds_txstatus0
& AR_FIFOUnderrun
)
860 ts
->ts_status
|= HAL_TXERR_FIFO
;
863 * Extract the transmit rate used and mark the rate as
864 * ``alternate'' if it wasn't the series 0 rate.
866 ts
->ts_finaltsi
= MS(ads
->ds_txstatus1
, AR_FinalTSIndex
);
867 switch (ts
->ts_finaltsi
) {
869 ts
->ts_rate
= MS(ads
->ds_ctl3
, AR_XmitRate0
);
872 ts
->ts_rate
= MS(ads
->ds_ctl3
, AR_XmitRate1
) |
876 ts
->ts_rate
= MS(ads
->ds_ctl3
, AR_XmitRate2
) |
880 ts
->ts_rate
= MS(ads
->ds_ctl3
, AR_XmitRate3
) |
884 ts
->ts_rssi
= MS(ads
->ds_txstatus1
, AR_AckSigStrength
);
885 ts
->ts_shortretry
= MS(ads
->ds_txstatus0
, AR_RTSFailCnt
);
886 ts
->ts_longretry
= MS(ads
->ds_txstatus0
, AR_DataFailCnt
);
888 * The retry count has the number of un-acked tries for the
889 * final series used. When doing multi-rate retry we must
890 * fixup the retry count by adding in the try counts for
891 * each series that was fully-processed. Beware that this
892 * takes values from the try counts in the final descriptor.
893 * These are not required by the hardware. We assume they
894 * are placed there by the driver as otherwise we have no
895 * access and the driver can't do the calculation because it
896 * doesn't know the descriptor format.
898 switch (ts
->ts_finaltsi
) {
899 case 3: ts
->ts_longretry
+= MS(ads
->ds_ctl2
, AR_XmitDataTries2
);
900 case 2: ts
->ts_longretry
+= MS(ads
->ds_ctl2
, AR_XmitDataTries1
);
901 case 1: ts
->ts_longretry
+= MS(ads
->ds_ctl2
, AR_XmitDataTries0
);
903 ts
->ts_virtcol
= MS(ads
->ds_txstatus0
, AR_VirtCollCnt
);
904 ts
->ts_antenna
= (ads
->ds_txstatus1
& AR_XmitAtenna
? 2 : 1);
910 * Determine which tx queues need interrupt servicing.
913 ar5212GetTxIntrQueue(struct ath_hal
*ah
, uint32_t *txqs
)
915 struct ath_hal_5212
*ahp
= AH5212(ah
);
916 *txqs
&= ahp
->ah_intrTxqs
;
917 ahp
->ah_intrTxqs
&= ~(*txqs
);