proc: use seq_puts()/seq_putc() where possible
[linux-2.6/next.git] / drivers / net / wireless / ath / ath5k / qcu.c
blob2c9c9e793d4ef23b2ac1a66df7355d091d0ce777
1 /*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 /********************************************\
20 Queue Control Unit, DFS Control Unit Functions
21 \********************************************/
23 #include "ath5k.h"
24 #include "reg.h"
25 #include "debug.h"
26 #include "base.h"
29 /******************\
30 * Helper functions *
31 \******************/
34 * Get number of pending frames
35 * for a specific queue [5211+]
37 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
39 u32 pending;
40 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
42 /* Return if queue is declared inactive */
43 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
44 return false;
46 /* XXX: How about AR5K_CFG_TXCNT ? */
47 if (ah->ah_version == AR5K_AR5210)
48 return false;
50 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
51 pending &= AR5K_QCU_STS_FRMPENDCNT;
53 /* It's possible to have no frames pending even if TXE
54 * is set. To indicate that q has not stopped return
55 * true */
56 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
57 return true;
59 return pending;
63 * Set a transmit queue inactive
65 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
67 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
68 return;
70 /* This queue will be skipped in further operations */
71 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
72 /*For SIMR setup*/
73 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
77 * Make sure cw is a power of 2 minus 1 and smaller than 1024
79 static u16 ath5k_cw_validate(u16 cw_req)
81 u32 cw = 1;
82 cw_req = min(cw_req, (u16)1023);
84 while (cw < cw_req)
85 cw = (cw << 1) | 1;
87 return cw;
91 * Get properties for a transmit queue
93 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
94 struct ath5k_txq_info *queue_info)
96 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
97 return 0;
101 * Set properties for a transmit queue
103 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
104 const struct ath5k_txq_info *qinfo)
106 struct ath5k_txq_info *qi;
108 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
110 qi = &ah->ah_txq[queue];
112 if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
113 return -EIO;
115 /* copy and validate values */
116 qi->tqi_type = qinfo->tqi_type;
117 qi->tqi_subtype = qinfo->tqi_subtype;
118 qi->tqi_flags = qinfo->tqi_flags;
120 * According to the docs: Although the AIFS field is 8 bit wide,
121 * the maximum supported value is 0xFC. Setting it higher than that
122 * will cause the DCU to hang.
124 qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
125 qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
126 qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
127 qi->tqi_cbr_period = qinfo->tqi_cbr_period;
128 qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
129 qi->tqi_burst_time = qinfo->tqi_burst_time;
130 qi->tqi_ready_time = qinfo->tqi_ready_time;
132 /*XXX: Is this supported on 5210 ?*/
133 /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
134 if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
135 ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
136 (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
137 qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
138 qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
140 return 0;
144 * Initialize a transmit queue
146 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
147 struct ath5k_txq_info *queue_info)
149 unsigned int queue;
150 int ret;
153 * Get queue by type
155 /* 5210 only has 2 queues */
156 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
157 switch (queue_type) {
158 case AR5K_TX_QUEUE_DATA:
159 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
160 break;
161 case AR5K_TX_QUEUE_BEACON:
162 case AR5K_TX_QUEUE_CAB:
163 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
164 break;
165 default:
166 return -EINVAL;
168 } else {
169 switch (queue_type) {
170 case AR5K_TX_QUEUE_DATA:
171 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
172 ah->ah_txq[queue].tqi_type !=
173 AR5K_TX_QUEUE_INACTIVE; queue++) {
175 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
176 return -EINVAL;
178 break;
179 case AR5K_TX_QUEUE_UAPSD:
180 queue = AR5K_TX_QUEUE_ID_UAPSD;
181 break;
182 case AR5K_TX_QUEUE_BEACON:
183 queue = AR5K_TX_QUEUE_ID_BEACON;
184 break;
185 case AR5K_TX_QUEUE_CAB:
186 queue = AR5K_TX_QUEUE_ID_CAB;
187 break;
188 case AR5K_TX_QUEUE_XR_DATA:
189 if (ah->ah_version != AR5K_AR5212)
190 ATH5K_ERR(ah->ah_sc,
191 "XR data queues only supported in"
192 " 5212!\n");
193 queue = AR5K_TX_QUEUE_ID_XR_DATA;
194 break;
195 default:
196 return -EINVAL;
201 * Setup internal queue structure
203 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
204 ah->ah_txq[queue].tqi_type = queue_type;
206 if (queue_info != NULL) {
207 queue_info->tqi_type = queue_type;
208 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
209 if (ret)
210 return ret;
214 * We use ah_txq_status to hold a temp value for
215 * the Secondary interrupt mask registers on 5211+
216 * check out ath5k_hw_reset_tx_queue
218 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
220 return queue;
224 /*******************************\
225 * Single QCU/DCU initialization *
226 \*******************************/
229 * Set tx retry limits on DCU
231 static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
232 unsigned int queue)
234 u32 retry_lg, retry_sh;
237 * Calculate and set retry limits
239 if (ah->ah_software_retry) {
240 /* XXX Need to test this */
241 retry_lg = ah->ah_limit_tx_retries;
242 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
243 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
244 } else {
245 retry_lg = AR5K_INIT_LG_RETRY;
246 retry_sh = AR5K_INIT_SH_RETRY;
249 /* Single data queue on AR5210 */
250 if (ah->ah_version == AR5K_AR5210) {
251 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
253 if (queue > 0)
254 return;
256 ath5k_hw_reg_write(ah,
257 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
258 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
259 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
260 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
261 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
262 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
263 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
264 AR5K_NODCU_RETRY_LMT);
265 /* DCU on AR5211+ */
266 } else {
267 ath5k_hw_reg_write(ah,
268 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
269 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
270 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
271 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
272 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
273 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
274 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
276 return;
280 * ath5k_hw_reset_tx_queue - Initialize a single hw queue
282 * @ah The &struct ath5k_hw
283 * @queue The hw queue number
285 * Set DFS properties for the given transmit queue on DCU
286 * and configures all queue-specific parameters.
288 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
290 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
292 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
294 tq = &ah->ah_txq[queue];
296 /* Skip if queue inactive or if we are on AR5210
297 * that doesn't have QCU/DCU */
298 if ((ah->ah_version == AR5K_AR5210) ||
299 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
300 return 0;
303 * Set contention window (cw_min/cw_max)
304 * and arbitrated interframe space (aifs)...
306 ath5k_hw_reg_write(ah,
307 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
308 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
309 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
310 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
313 * Set tx retry limits for this queue
315 ath5k_hw_set_tx_retry_limits(ah, queue);
319 * Set misc registers
322 /* Enable DCU to wait for next fragment from QCU */
323 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
324 AR5K_DCU_MISC_FRAG_WAIT);
326 /* On Maui and Spirit use the global seqnum on DCU */
327 if (ah->ah_mac_version < AR5K_SREV_AR5211)
328 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
329 AR5K_DCU_MISC_SEQNUM_CTL);
331 /* Constant bit rate period */
332 if (tq->tqi_cbr_period) {
333 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
334 AR5K_QCU_CBRCFG_INTVAL) |
335 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
336 AR5K_QCU_CBRCFG_ORN_THRES),
337 AR5K_QUEUE_CBRCFG(queue));
339 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
340 AR5K_QCU_MISC_FRSHED_CBR);
342 if (tq->tqi_cbr_overflow_limit)
343 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
344 AR5K_QCU_MISC_CBR_THRES_ENABLE);
347 /* Ready time interval */
348 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
349 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
350 AR5K_QCU_RDYTIMECFG_INTVAL) |
351 AR5K_QCU_RDYTIMECFG_ENABLE,
352 AR5K_QUEUE_RDYTIMECFG(queue));
354 if (tq->tqi_burst_time) {
355 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
356 AR5K_DCU_CHAN_TIME_DUR) |
357 AR5K_DCU_CHAN_TIME_ENABLE,
358 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
360 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
361 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
362 AR5K_QCU_MISC_RDY_VEOL_POLICY);
365 /* Enable/disable Post frame backoff */
366 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
367 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
368 AR5K_QUEUE_DFS_MISC(queue));
370 /* Enable/disable fragmentation burst backoff */
371 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
372 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
373 AR5K_QUEUE_DFS_MISC(queue));
376 * Set registers by queue type
378 switch (tq->tqi_type) {
379 case AR5K_TX_QUEUE_BEACON:
380 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
381 AR5K_QCU_MISC_FRSHED_DBA_GT |
382 AR5K_QCU_MISC_CBREXP_BCN_DIS |
383 AR5K_QCU_MISC_BCN_ENABLE);
385 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
386 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
387 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
388 AR5K_DCU_MISC_ARBLOCK_IGNORE |
389 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
390 AR5K_DCU_MISC_BCN_ENABLE);
391 break;
393 case AR5K_TX_QUEUE_CAB:
394 /* XXX: use BCN_SENT_GT, if we can figure out how */
395 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
396 AR5K_QCU_MISC_FRSHED_DBA_GT |
397 AR5K_QCU_MISC_CBREXP_DIS |
398 AR5K_QCU_MISC_CBREXP_BCN_DIS);
400 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
401 (AR5K_TUNE_SW_BEACON_RESP -
402 AR5K_TUNE_DMA_BEACON_RESP) -
403 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
404 AR5K_QCU_RDYTIMECFG_ENABLE,
405 AR5K_QUEUE_RDYTIMECFG(queue));
407 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
408 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
409 AR5K_DCU_MISC_ARBLOCK_CTL_S));
410 break;
412 case AR5K_TX_QUEUE_UAPSD:
413 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
414 AR5K_QCU_MISC_CBREXP_DIS);
415 break;
417 case AR5K_TX_QUEUE_DATA:
418 default:
419 break;
422 /* TODO: Handle frame compression */
425 * Enable interrupts for this tx queue
426 * in the secondary interrupt mask registers
428 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
429 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
431 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
432 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
434 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
435 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
437 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
438 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
440 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
441 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
443 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
444 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
446 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
447 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
449 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
450 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
452 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
453 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
455 /* Update secondary interrupt mask registers */
457 /* Filter out inactive queues */
458 ah->ah_txq_imr_txok &= ah->ah_txq_status;
459 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
460 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
461 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
462 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
463 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
464 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
465 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
466 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
468 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
469 AR5K_SIMR0_QCU_TXOK) |
470 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
471 AR5K_SIMR0_QCU_TXDESC),
472 AR5K_SIMR0);
474 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
475 AR5K_SIMR1_QCU_TXERR) |
476 AR5K_REG_SM(ah->ah_txq_imr_txeol,
477 AR5K_SIMR1_QCU_TXEOL),
478 AR5K_SIMR1);
480 /* Update SIMR2 but don't overwrite rest simr2 settings */
481 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
482 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
483 AR5K_REG_SM(ah->ah_txq_imr_txurn,
484 AR5K_SIMR2_QCU_TXURN));
486 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
487 AR5K_SIMR3_QCBRORN) |
488 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
489 AR5K_SIMR3_QCBRURN),
490 AR5K_SIMR3);
492 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
493 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
495 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
496 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
497 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
499 /* No queue has TXNOFRM enabled, disable the interrupt
500 * by setting AR5K_TXNOFRM to zero */
501 if (ah->ah_txq_imr_nofrm == 0)
502 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
504 /* Set QCU mask for this DCU to save power */
505 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
507 return 0;
511 /**************************\
512 * Global QCU/DCU functions *
513 \**************************/
516 * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
518 * @ah The &struct ath5k_hw
519 * @slot_time Slot time in us
521 * Sets the global IFS intervals on DCU (also works on AR5210) for
522 * the given slot time and the current bwmode.
524 int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
526 struct ieee80211_channel *channel = ah->ah_current_channel;
527 struct ath5k_softc *sc = ah->ah_sc;
528 struct ieee80211_rate *rate;
529 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
530 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
532 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
533 return -EINVAL;
535 sifs = ath5k_hw_get_default_sifs(ah);
536 sifs_clock = ath5k_hw_htoclock(ah, sifs);
538 /* EIFS
539 * Txtime of ack at lowest rate + SIFS + DIFS
540 * (DIFS = SIFS + 2 * Slot time)
542 * Note: HAL has some predefined values for EIFS
543 * Turbo: (37 + 2 * 6)
544 * Default: (74 + 2 * 9)
545 * Half: (149 + 2 * 13)
546 * Quarter: (298 + 2 * 21)
548 * (74 + 2 * 6) for AR5210 default and turbo !
550 * According to the formula we have
551 * ack_tx_time = 25 for turbo and
552 * ack_tx_time = 42.5 * clock multiplier
553 * for default/half/quarter.
555 * This can't be right, 42 is what we would get
556 * from ath5k_hw_get_frame_dur_for_bwmode or
557 * ieee80211_generic_frame_duration for zero frame
558 * length and without SIFS !
560 * Also we have different lowest rate for 802.11a
562 if (channel->hw_value & CHANNEL_5GHZ)
563 rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
564 else
565 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
567 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
569 /* ack_tx_time includes an SIFS already */
570 eifs = ack_tx_time + sifs + 2 * slot_time;
571 eifs_clock = ath5k_hw_htoclock(ah, eifs);
573 /* Set IFS settings on AR5210 */
574 if (ah->ah_version == AR5K_AR5210) {
575 u32 pifs, pifs_clock, difs, difs_clock;
577 /* Set slot time */
578 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
580 /* Set EIFS */
581 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
583 /* PIFS = Slot time + SIFS */
584 pifs = slot_time + sifs;
585 pifs_clock = ath5k_hw_htoclock(ah, pifs);
586 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
588 /* DIFS = SIFS + 2 * Slot time */
589 difs = sifs + 2 * slot_time;
590 difs_clock = ath5k_hw_htoclock(ah, difs);
592 /* Set SIFS/DIFS */
593 ath5k_hw_reg_write(ah, (difs_clock <<
594 AR5K_IFS0_DIFS_S) | sifs_clock,
595 AR5K_IFS0);
597 /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
598 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
599 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
600 AR5K_IFS1);
602 return 0;
605 /* Set IFS slot time */
606 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
608 /* Set EIFS interval */
609 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
611 /* Set SIFS interval in usecs */
612 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
613 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
614 sifs);
616 /* Set SIFS interval in clock cycles */
617 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
619 return 0;
623 int ath5k_hw_init_queues(struct ath5k_hw *ah)
625 int i, ret;
627 /* TODO: HW Compression support for data queues */
628 /* TODO: Burst prefetch for data queues */
631 * Reset queues and start beacon timers at the end of the reset routine
632 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
633 * Note: If we want we can assign multiple qcus on one dcu.
635 if (ah->ah_version != AR5K_AR5210)
636 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
637 ret = ath5k_hw_reset_tx_queue(ah, i);
638 if (ret) {
639 ATH5K_ERR(ah->ah_sc,
640 "failed to reset TX queue #%d\n", i);
641 return ret;
644 else
645 /* No QCU/DCU on AR5210, just set tx
646 * retry limits. We set IFS parameters
647 * on ath5k_hw_set_ifs_intervals */
648 ath5k_hw_set_tx_retry_limits(ah, 0);
650 /* Set the turbo flag when operating on 40MHz */
651 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
652 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
653 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
655 /* If we didn't set IFS timings through
656 * ath5k_hw_set_coverage_class make sure
657 * we set them here */
658 if (!ah->ah_coverage_class) {
659 unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
660 ath5k_hw_set_ifs_intervals(ah, slot_time);
663 return 0;