2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 Abstract: rt2x00 queue specific routines.
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/dma-mapping.h>
31 #include "rt2x00lib.h"
33 struct sk_buff
*rt2x00queue_alloc_rxskb(struct rt2x00_dev
*rt2x00dev
,
34 struct queue_entry
*entry
)
37 struct skb_frame_desc
*skbdesc
;
38 unsigned int frame_size
;
39 unsigned int head_size
= 0;
40 unsigned int tail_size
= 0;
43 * The frame size includes descriptor size, because the
44 * hardware directly receive the frame into the skbuffer.
46 frame_size
= entry
->queue
->data_size
+ entry
->queue
->desc_size
;
49 * The payload should be aligned to a 4-byte boundary,
50 * this means we need at least 3 bytes for moving the frame
51 * into the correct offset.
56 * For IV/EIV/ICV assembly we must make sure there is
57 * at least 8 bytes bytes available in headroom for IV/EIV
58 * and 8 bytes for ICV data as tailroon.
60 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO
, &rt2x00dev
->flags
)) {
68 skb
= dev_alloc_skb(frame_size
+ head_size
+ tail_size
);
73 * Make sure we not have a frame with the requested bytes
74 * available in the head and tail.
76 skb_reserve(skb
, head_size
);
77 skb_put(skb
, frame_size
);
82 skbdesc
= get_skb_frame_desc(skb
);
83 memset(skbdesc
, 0, sizeof(*skbdesc
));
84 skbdesc
->entry
= entry
;
86 if (test_bit(DRIVER_REQUIRE_DMA
, &rt2x00dev
->flags
)) {
87 skbdesc
->skb_dma
= dma_map_single(rt2x00dev
->dev
,
91 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_RX
;
97 void rt2x00queue_map_txskb(struct rt2x00_dev
*rt2x00dev
, struct sk_buff
*skb
)
99 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(skb
);
102 * If device has requested headroom, we should make sure that
103 * is also mapped to the DMA so it can be used for transfering
104 * additional descriptor information to the hardware.
106 skb_push(skb
, rt2x00dev
->hw
->extra_tx_headroom
);
109 dma_map_single(rt2x00dev
->dev
, skb
->data
, skb
->len
, DMA_TO_DEVICE
);
112 * Restore data pointer to original location again.
114 skb_pull(skb
, rt2x00dev
->hw
->extra_tx_headroom
);
116 skbdesc
->flags
|= SKBDESC_DMA_MAPPED_TX
;
118 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb
);
120 void rt2x00queue_unmap_skb(struct rt2x00_dev
*rt2x00dev
, struct sk_buff
*skb
)
122 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(skb
);
124 if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_RX
) {
125 dma_unmap_single(rt2x00dev
->dev
, skbdesc
->skb_dma
, skb
->len
,
127 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_RX
;
130 if (skbdesc
->flags
& SKBDESC_DMA_MAPPED_TX
) {
132 * Add headroom to the skb length, it has been removed
133 * by the driver, but it was actually mapped to DMA.
135 dma_unmap_single(rt2x00dev
->dev
, skbdesc
->skb_dma
,
136 skb
->len
+ rt2x00dev
->hw
->extra_tx_headroom
,
138 skbdesc
->flags
&= ~SKBDESC_DMA_MAPPED_TX
;
142 void rt2x00queue_free_skb(struct rt2x00_dev
*rt2x00dev
, struct sk_buff
*skb
)
147 rt2x00queue_unmap_skb(rt2x00dev
, skb
);
148 dev_kfree_skb_any(skb
);
151 void rt2x00queue_align_frame(struct sk_buff
*skb
)
153 unsigned int frame_length
= skb
->len
;
154 unsigned int align
= ALIGN_SIZE(skb
, 0);
159 skb_push(skb
, align
);
160 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
161 skb_trim(skb
, frame_length
);
164 void rt2x00queue_align_payload(struct sk_buff
*skb
, unsigned int header_lengt
)
166 unsigned int frame_length
= skb
->len
;
167 unsigned int align
= ALIGN_SIZE(skb
, header_lengt
);
172 skb_push(skb
, align
);
173 memmove(skb
->data
, skb
->data
+ align
, frame_length
);
174 skb_trim(skb
, frame_length
);
177 void rt2x00queue_insert_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
179 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(skb
);
180 unsigned int frame_length
= skb
->len
;
181 unsigned int header_align
= ALIGN_SIZE(skb
, 0);
182 unsigned int payload_align
= ALIGN_SIZE(skb
, header_length
);
183 unsigned int l2pad
= 4 - (payload_align
- header_align
);
185 if (header_align
== payload_align
) {
187 * Both header and payload must be moved the same
188 * amount of bytes to align them properly. This means
189 * we don't use the L2 padding but just move the entire
192 rt2x00queue_align_frame(skb
);
193 } else if (!payload_align
) {
195 * Simple L2 padding, only the header needs to be moved,
196 * the payload is already properly aligned.
198 skb_push(skb
, header_align
);
199 memmove(skb
->data
, skb
->data
+ header_align
, frame_length
);
200 skbdesc
->flags
|= SKBDESC_L2_PADDED
;
204 * Complicated L2 padding, both header and payload need
205 * to be moved. By default we only move to the start
206 * of the buffer, so our header alignment needs to be
207 * increased if there is not enough room for the header
210 if (payload_align
> header_align
)
213 skb_push(skb
, header_align
);
214 memmove(skb
->data
, skb
->data
+ header_align
, header_length
);
215 memmove(skb
->data
+ header_length
+ l2pad
,
216 skb
->data
+ header_length
+ l2pad
+ header_align
,
217 frame_length
- header_length
);
218 skbdesc
->flags
|= SKBDESC_L2_PADDED
;
222 void rt2x00queue_remove_l2pad(struct sk_buff
*skb
, unsigned int header_length
)
224 struct skb_frame_desc
*skbdesc
= get_skb_frame_desc(skb
);
225 unsigned int l2pad
= 4 - (header_length
& 3);
227 if (!l2pad
|| (skbdesc
->flags
& SKBDESC_L2_PADDED
))
230 memmove(skb
->data
+ l2pad
, skb
->data
, header_length
);
231 skb_pull(skb
, l2pad
);
234 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry
*entry
,
235 struct txentry_desc
*txdesc
)
237 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
238 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)entry
->skb
->data
;
239 struct rt2x00_intf
*intf
= vif_to_intf(tx_info
->control
.vif
);
240 unsigned long irqflags
;
242 if (!(tx_info
->flags
& IEEE80211_TX_CTL_ASSIGN_SEQ
) ||
243 unlikely(!tx_info
->control
.vif
))
247 * Hardware should insert sequence counter.
248 * FIXME: We insert a software sequence counter first for
249 * hardware that doesn't support hardware sequence counting.
251 * This is wrong because beacons are not getting sequence
252 * numbers assigned properly.
254 * A secondary problem exists for drivers that cannot toggle
255 * sequence counting per-frame, since those will override the
256 * sequence counter given by mac80211.
258 spin_lock_irqsave(&intf
->seqlock
, irqflags
);
260 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
))
262 hdr
->seq_ctrl
&= cpu_to_le16(IEEE80211_SCTL_FRAG
);
263 hdr
->seq_ctrl
|= cpu_to_le16(intf
->seqno
);
265 spin_unlock_irqrestore(&intf
->seqlock
, irqflags
);
267 __set_bit(ENTRY_TXD_GENERATE_SEQ
, &txdesc
->flags
);
270 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry
*entry
,
271 struct txentry_desc
*txdesc
,
272 const struct rt2x00_rate
*hwrate
)
274 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
275 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
276 struct ieee80211_tx_rate
*txrate
= &tx_info
->control
.rates
[0];
277 unsigned int data_length
;
278 unsigned int duration
;
279 unsigned int residual
;
281 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
282 data_length
= entry
->skb
->len
+ 4;
283 data_length
+= rt2x00crypto_tx_overhead(rt2x00dev
, entry
->skb
);
287 * Length calculation depends on OFDM/CCK rate.
289 txdesc
->signal
= hwrate
->plcp
;
290 txdesc
->service
= 0x04;
292 if (hwrate
->flags
& DEV_RATE_OFDM
) {
293 txdesc
->length_high
= (data_length
>> 6) & 0x3f;
294 txdesc
->length_low
= data_length
& 0x3f;
297 * Convert length to microseconds.
299 residual
= GET_DURATION_RES(data_length
, hwrate
->bitrate
);
300 duration
= GET_DURATION(data_length
, hwrate
->bitrate
);
306 * Check if we need to set the Length Extension
308 if (hwrate
->bitrate
== 110 && residual
<= 30)
309 txdesc
->service
|= 0x80;
312 txdesc
->length_high
= (duration
>> 8) & 0xff;
313 txdesc
->length_low
= duration
& 0xff;
316 * When preamble is enabled we should set the
317 * preamble bit for the signal.
319 if (txrate
->flags
& IEEE80211_TX_RC_USE_SHORT_PREAMBLE
)
320 txdesc
->signal
|= 0x08;
324 static void rt2x00queue_create_tx_descriptor(struct queue_entry
*entry
,
325 struct txentry_desc
*txdesc
)
327 struct rt2x00_dev
*rt2x00dev
= entry
->queue
->rt2x00dev
;
328 struct ieee80211_tx_info
*tx_info
= IEEE80211_SKB_CB(entry
->skb
);
329 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)entry
->skb
->data
;
330 struct ieee80211_rate
*rate
=
331 ieee80211_get_tx_rate(rt2x00dev
->hw
, tx_info
);
332 const struct rt2x00_rate
*hwrate
;
334 memset(txdesc
, 0, sizeof(*txdesc
));
337 * Initialize information from queue
339 txdesc
->queue
= entry
->queue
->qid
;
340 txdesc
->cw_min
= entry
->queue
->cw_min
;
341 txdesc
->cw_max
= entry
->queue
->cw_max
;
342 txdesc
->aifs
= entry
->queue
->aifs
;
345 * Header and alignment information.
347 txdesc
->header_length
= ieee80211_get_hdrlen_from_skb(entry
->skb
);
348 txdesc
->l2pad
= ALIGN_SIZE(entry
->skb
, txdesc
->header_length
);
351 * Check whether this frame is to be acked.
353 if (!(tx_info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
354 __set_bit(ENTRY_TXD_ACK
, &txdesc
->flags
);
357 * Check if this is a RTS/CTS frame
359 if (ieee80211_is_rts(hdr
->frame_control
) ||
360 ieee80211_is_cts(hdr
->frame_control
)) {
361 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
362 if (ieee80211_is_rts(hdr
->frame_control
))
363 __set_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
);
365 __set_bit(ENTRY_TXD_CTS_FRAME
, &txdesc
->flags
);
366 if (tx_info
->control
.rts_cts_rate_idx
>= 0)
368 ieee80211_get_rts_cts_rate(rt2x00dev
->hw
, tx_info
);
372 * Determine retry information.
374 txdesc
->retry_limit
= tx_info
->control
.rates
[0].count
- 1;
375 if (txdesc
->retry_limit
>= rt2x00dev
->long_retry
)
376 __set_bit(ENTRY_TXD_RETRY_MODE
, &txdesc
->flags
);
379 * Check if more fragments are pending
381 if (ieee80211_has_morefrags(hdr
->frame_control
) ||
382 (tx_info
->flags
& IEEE80211_TX_CTL_MORE_FRAMES
)) {
383 __set_bit(ENTRY_TXD_BURST
, &txdesc
->flags
);
384 __set_bit(ENTRY_TXD_MORE_FRAG
, &txdesc
->flags
);
388 * Beacons and probe responses require the tsf timestamp
389 * to be inserted into the frame.
391 if (ieee80211_is_beacon(hdr
->frame_control
) ||
392 ieee80211_is_probe_resp(hdr
->frame_control
))
393 __set_bit(ENTRY_TXD_REQ_TIMESTAMP
, &txdesc
->flags
);
396 * Determine with what IFS priority this frame should be send.
397 * Set ifs to IFS_SIFS when the this is not the first fragment,
398 * or this fragment came after RTS/CTS.
400 if ((tx_info
->flags
& IEEE80211_TX_CTL_FIRST_FRAGMENT
) &&
401 !test_bit(ENTRY_TXD_RTS_FRAME
, &txdesc
->flags
)) {
402 __set_bit(ENTRY_TXD_FIRST_FRAGMENT
, &txdesc
->flags
);
403 txdesc
->ifs
= IFS_BACKOFF
;
405 txdesc
->ifs
= IFS_SIFS
;
408 * Determine rate modulation.
410 hwrate
= rt2x00_get_rate(rate
->hw_value
);
411 txdesc
->rate_mode
= RATE_MODE_CCK
;
412 if (hwrate
->flags
& DEV_RATE_OFDM
)
413 txdesc
->rate_mode
= RATE_MODE_OFDM
;
416 * Apply TX descriptor handling by components
418 rt2x00crypto_create_tx_descriptor(entry
, txdesc
);
419 rt2x00ht_create_tx_descriptor(entry
, txdesc
, hwrate
);
420 rt2x00queue_create_tx_descriptor_seq(entry
, txdesc
);
421 rt2x00queue_create_tx_descriptor_plcp(entry
, txdesc
, hwrate
);
424 static void rt2x00queue_write_tx_descriptor(struct queue_entry
*entry
,
425 struct txentry_desc
*txdesc
)
427 struct data_queue
*queue
= entry
->queue
;
428 struct rt2x00_dev
*rt2x00dev
= queue
->rt2x00dev
;
430 rt2x00dev
->ops
->lib
->write_tx_desc(rt2x00dev
, entry
->skb
, txdesc
);
433 * All processing on the frame has been completed, this means
434 * it is now ready to be dumped to userspace through debugfs.
436 rt2x00debug_dump_frame(rt2x00dev
, DUMP_FRAME_TX
, entry
->skb
);
439 * Check if we need to kick the queue, there are however a few rules
440 * 1) Don't kick beacon queue
441 * 2) Don't kick unless this is the last in frame in a burst.
442 * When the burst flag is set, this frame is always followed
443 * by another frame which in some way are related to eachother.
444 * This is true for fragments, RTS or CTS-to-self frames.
445 * 3) Rule 2 can be broken when the available entries
446 * in the queue are less then a certain threshold.
448 if (entry
->queue
->qid
== QID_BEACON
)
451 if (rt2x00queue_threshold(queue
) ||
452 !test_bit(ENTRY_TXD_BURST
, &txdesc
->flags
))
453 rt2x00dev
->ops
->lib
->kick_tx_queue(rt2x00dev
, queue
->qid
);
456 int rt2x00queue_write_tx_frame(struct data_queue
*queue
, struct sk_buff
*skb
)
458 struct ieee80211_tx_info
*tx_info
;
459 struct queue_entry
*entry
= rt2x00queue_get_entry(queue
, Q_INDEX
);
460 struct txentry_desc txdesc
;
461 struct skb_frame_desc
*skbdesc
;
462 u8 rate_idx
, rate_flags
;
464 if (unlikely(rt2x00queue_full(queue
)))
467 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA
, &entry
->flags
)) {
468 ERROR(queue
->rt2x00dev
,
469 "Arrived at non-free entry in the non-full queue %d.\n"
470 "Please file bug report to %s.\n",
471 queue
->qid
, DRV_PROJECT
);
476 * Copy all TX descriptor information into txdesc,
477 * after that we are free to use the skb->cb array
478 * for our information.
481 rt2x00queue_create_tx_descriptor(entry
, &txdesc
);
484 * All information is retrieved from the skb->cb array,
485 * now we should claim ownership of the driver part of that
486 * array, preserving the bitrate index and flags.
488 tx_info
= IEEE80211_SKB_CB(skb
);
489 rate_idx
= tx_info
->control
.rates
[0].idx
;
490 rate_flags
= tx_info
->control
.rates
[0].flags
;
491 skbdesc
= get_skb_frame_desc(skb
);
492 memset(skbdesc
, 0, sizeof(*skbdesc
));
493 skbdesc
->entry
= entry
;
494 skbdesc
->tx_rate_idx
= rate_idx
;
495 skbdesc
->tx_rate_flags
= rate_flags
;
498 * When hardware encryption is supported, and this frame
499 * is to be encrypted, we should strip the IV/EIV data from
500 * the frame so we can provide it to the driver seperately.
502 if (test_bit(ENTRY_TXD_ENCRYPT
, &txdesc
.flags
) &&
503 !test_bit(ENTRY_TXD_ENCRYPT_IV
, &txdesc
.flags
)) {
504 if (test_bit(DRIVER_REQUIRE_COPY_IV
, &queue
->rt2x00dev
->flags
))
505 rt2x00crypto_tx_copy_iv(skb
, &txdesc
);
507 rt2x00crypto_tx_remove_iv(skb
, &txdesc
);
511 * When DMA allocation is required we should guarentee to the
512 * driver that the DMA is aligned to a 4-byte boundary.
513 * However some drivers require L2 padding to pad the payload
514 * rather then the header. This could be a requirement for
515 * PCI and USB devices, while header alignment only is valid
518 if (test_bit(DRIVER_REQUIRE_L2PAD
, &queue
->rt2x00dev
->flags
))
519 rt2x00queue_insert_l2pad(entry
->skb
, txdesc
.header_length
);
520 else if (test_bit(DRIVER_REQUIRE_DMA
, &queue
->rt2x00dev
->flags
))
521 rt2x00queue_align_frame(entry
->skb
);
524 * It could be possible that the queue was corrupted and this
525 * call failed. Since we always return NETDEV_TX_OK to mac80211,
526 * this frame will simply be dropped.
528 if (unlikely(queue
->rt2x00dev
->ops
->lib
->write_tx_data(entry
))) {
529 clear_bit(ENTRY_OWNER_DEVICE_DATA
, &entry
->flags
);
534 if (test_bit(DRIVER_REQUIRE_DMA
, &queue
->rt2x00dev
->flags
))
535 rt2x00queue_map_txskb(queue
->rt2x00dev
, skb
);
537 set_bit(ENTRY_DATA_PENDING
, &entry
->flags
);
539 rt2x00queue_index_inc(queue
, Q_INDEX
);
540 rt2x00queue_write_tx_descriptor(entry
, &txdesc
);
545 int rt2x00queue_update_beacon(struct rt2x00_dev
*rt2x00dev
,
546 struct ieee80211_vif
*vif
,
547 const bool enable_beacon
)
549 struct rt2x00_intf
*intf
= vif_to_intf(vif
);
550 struct skb_frame_desc
*skbdesc
;
551 struct txentry_desc txdesc
;
554 if (unlikely(!intf
->beacon
))
557 mutex_lock(&intf
->beacon_skb_mutex
);
560 * Clean up the beacon skb.
562 rt2x00queue_free_skb(rt2x00dev
, intf
->beacon
->skb
);
563 intf
->beacon
->skb
= NULL
;
565 if (!enable_beacon
) {
566 rt2x00dev
->ops
->lib
->kill_tx_queue(rt2x00dev
, QID_BEACON
);
567 mutex_unlock(&intf
->beacon_skb_mutex
);
571 intf
->beacon
->skb
= ieee80211_beacon_get(rt2x00dev
->hw
, vif
);
572 if (!intf
->beacon
->skb
) {
573 mutex_unlock(&intf
->beacon_skb_mutex
);
578 * Copy all TX descriptor information into txdesc,
579 * after that we are free to use the skb->cb array
580 * for our information.
582 rt2x00queue_create_tx_descriptor(intf
->beacon
, &txdesc
);
585 * For the descriptor we use a local array from where the
586 * driver can move it to the correct location required for
589 memset(desc
, 0, sizeof(desc
));
592 * Fill in skb descriptor
594 skbdesc
= get_skb_frame_desc(intf
->beacon
->skb
);
595 memset(skbdesc
, 0, sizeof(*skbdesc
));
596 skbdesc
->desc
= desc
;
597 skbdesc
->desc_len
= intf
->beacon
->queue
->desc_size
;
598 skbdesc
->entry
= intf
->beacon
;
601 * Write TX descriptor into reserved room in front of the beacon.
603 rt2x00queue_write_tx_descriptor(intf
->beacon
, &txdesc
);
606 * Send beacon to hardware.
607 * Also enable beacon generation, which might have been disabled
608 * by the driver during the config_beacon() callback function.
610 rt2x00dev
->ops
->lib
->write_beacon(intf
->beacon
);
611 rt2x00dev
->ops
->lib
->kick_tx_queue(rt2x00dev
, QID_BEACON
);
613 mutex_unlock(&intf
->beacon_skb_mutex
);
618 struct data_queue
*rt2x00queue_get_queue(struct rt2x00_dev
*rt2x00dev
,
619 const enum data_queue_qid queue
)
621 int atim
= test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
);
624 return rt2x00dev
->rx
;
626 if (queue
< rt2x00dev
->ops
->tx_queues
&& rt2x00dev
->tx
)
627 return &rt2x00dev
->tx
[queue
];
632 if (queue
== QID_BEACON
)
633 return &rt2x00dev
->bcn
[0];
634 else if (queue
== QID_ATIM
&& atim
)
635 return &rt2x00dev
->bcn
[1];
639 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue
);
641 struct queue_entry
*rt2x00queue_get_entry(struct data_queue
*queue
,
642 enum queue_index index
)
644 struct queue_entry
*entry
;
645 unsigned long irqflags
;
647 if (unlikely(index
>= Q_INDEX_MAX
)) {
648 ERROR(queue
->rt2x00dev
,
649 "Entry requested from invalid index type (%d)\n", index
);
653 spin_lock_irqsave(&queue
->lock
, irqflags
);
655 entry
= &queue
->entries
[queue
->index
[index
]];
657 spin_unlock_irqrestore(&queue
->lock
, irqflags
);
661 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry
);
663 void rt2x00queue_index_inc(struct data_queue
*queue
, enum queue_index index
)
665 unsigned long irqflags
;
667 if (unlikely(index
>= Q_INDEX_MAX
)) {
668 ERROR(queue
->rt2x00dev
,
669 "Index change on invalid index type (%d)\n", index
);
673 spin_lock_irqsave(&queue
->lock
, irqflags
);
675 queue
->index
[index
]++;
676 if (queue
->index
[index
] >= queue
->limit
)
677 queue
->index
[index
] = 0;
679 if (index
== Q_INDEX
) {
681 } else if (index
== Q_INDEX_DONE
) {
686 spin_unlock_irqrestore(&queue
->lock
, irqflags
);
689 static void rt2x00queue_reset(struct data_queue
*queue
)
691 unsigned long irqflags
;
693 spin_lock_irqsave(&queue
->lock
, irqflags
);
697 memset(queue
->index
, 0, sizeof(queue
->index
));
699 spin_unlock_irqrestore(&queue
->lock
, irqflags
);
702 void rt2x00queue_stop_queues(struct rt2x00_dev
*rt2x00dev
)
704 struct data_queue
*queue
;
706 txall_queue_for_each(rt2x00dev
, queue
)
707 rt2x00dev
->ops
->lib
->kill_tx_queue(rt2x00dev
, queue
->qid
);
710 void rt2x00queue_init_queues(struct rt2x00_dev
*rt2x00dev
)
712 struct data_queue
*queue
;
715 queue_for_each(rt2x00dev
, queue
) {
716 rt2x00queue_reset(queue
);
718 for (i
= 0; i
< queue
->limit
; i
++) {
719 queue
->entries
[i
].flags
= 0;
721 rt2x00dev
->ops
->lib
->clear_entry(&queue
->entries
[i
]);
726 static int rt2x00queue_alloc_entries(struct data_queue
*queue
,
727 const struct data_queue_desc
*qdesc
)
729 struct queue_entry
*entries
;
730 unsigned int entry_size
;
733 rt2x00queue_reset(queue
);
735 queue
->limit
= qdesc
->entry_num
;
736 queue
->threshold
= DIV_ROUND_UP(qdesc
->entry_num
, 10);
737 queue
->data_size
= qdesc
->data_size
;
738 queue
->desc_size
= qdesc
->desc_size
;
741 * Allocate all queue entries.
743 entry_size
= sizeof(*entries
) + qdesc
->priv_size
;
744 entries
= kzalloc(queue
->limit
* entry_size
, GFP_KERNEL
);
748 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
749 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
750 ((__index) * (__psize)) )
752 for (i
= 0; i
< queue
->limit
; i
++) {
753 entries
[i
].flags
= 0;
754 entries
[i
].queue
= queue
;
755 entries
[i
].skb
= NULL
;
756 entries
[i
].entry_idx
= i
;
757 entries
[i
].priv_data
=
758 QUEUE_ENTRY_PRIV_OFFSET(entries
, i
, queue
->limit
,
759 sizeof(*entries
), qdesc
->priv_size
);
762 #undef QUEUE_ENTRY_PRIV_OFFSET
764 queue
->entries
= entries
;
769 static void rt2x00queue_free_skbs(struct rt2x00_dev
*rt2x00dev
,
770 struct data_queue
*queue
)
777 for (i
= 0; i
< queue
->limit
; i
++) {
778 if (queue
->entries
[i
].skb
)
779 rt2x00queue_free_skb(rt2x00dev
, queue
->entries
[i
].skb
);
783 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev
*rt2x00dev
,
784 struct data_queue
*queue
)
789 for (i
= 0; i
< queue
->limit
; i
++) {
790 skb
= rt2x00queue_alloc_rxskb(rt2x00dev
, &queue
->entries
[i
]);
793 queue
->entries
[i
].skb
= skb
;
799 int rt2x00queue_initialize(struct rt2x00_dev
*rt2x00dev
)
801 struct data_queue
*queue
;
804 status
= rt2x00queue_alloc_entries(rt2x00dev
->rx
, rt2x00dev
->ops
->rx
);
808 tx_queue_for_each(rt2x00dev
, queue
) {
809 status
= rt2x00queue_alloc_entries(queue
, rt2x00dev
->ops
->tx
);
814 status
= rt2x00queue_alloc_entries(rt2x00dev
->bcn
, rt2x00dev
->ops
->bcn
);
818 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
)) {
819 status
= rt2x00queue_alloc_entries(&rt2x00dev
->bcn
[1],
820 rt2x00dev
->ops
->atim
);
825 status
= rt2x00queue_alloc_rxskbs(rt2x00dev
, rt2x00dev
->rx
);
832 ERROR(rt2x00dev
, "Queue entries allocation failed.\n");
834 rt2x00queue_uninitialize(rt2x00dev
);
839 void rt2x00queue_uninitialize(struct rt2x00_dev
*rt2x00dev
)
841 struct data_queue
*queue
;
843 rt2x00queue_free_skbs(rt2x00dev
, rt2x00dev
->rx
);
845 queue_for_each(rt2x00dev
, queue
) {
846 kfree(queue
->entries
);
847 queue
->entries
= NULL
;
851 static void rt2x00queue_init(struct rt2x00_dev
*rt2x00dev
,
852 struct data_queue
*queue
, enum data_queue_qid qid
)
854 spin_lock_init(&queue
->lock
);
856 queue
->rt2x00dev
= rt2x00dev
;
864 int rt2x00queue_allocate(struct rt2x00_dev
*rt2x00dev
)
866 struct data_queue
*queue
;
867 enum data_queue_qid qid
;
868 unsigned int req_atim
=
869 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE
, &rt2x00dev
->flags
);
872 * We need the following queues:
876 * Atim: 1 (if required)
878 rt2x00dev
->data_queues
= 2 + rt2x00dev
->ops
->tx_queues
+ req_atim
;
880 queue
= kzalloc(rt2x00dev
->data_queues
* sizeof(*queue
), GFP_KERNEL
);
882 ERROR(rt2x00dev
, "Queue allocation failed.\n");
887 * Initialize pointers
889 rt2x00dev
->rx
= queue
;
890 rt2x00dev
->tx
= &queue
[1];
891 rt2x00dev
->bcn
= &queue
[1 + rt2x00dev
->ops
->tx_queues
];
894 * Initialize queue parameters.
896 * TX: qid = QID_AC_BE + index
897 * TX: cw_min: 2^5 = 32.
898 * TX: cw_max: 2^10 = 1024.
899 * BCN: qid = QID_BEACON
900 * ATIM: qid = QID_ATIM
902 rt2x00queue_init(rt2x00dev
, rt2x00dev
->rx
, QID_RX
);
905 tx_queue_for_each(rt2x00dev
, queue
)
906 rt2x00queue_init(rt2x00dev
, queue
, qid
++);
908 rt2x00queue_init(rt2x00dev
, &rt2x00dev
->bcn
[0], QID_BEACON
);
910 rt2x00queue_init(rt2x00dev
, &rt2x00dev
->bcn
[1], QID_ATIM
);
915 void rt2x00queue_free(struct rt2x00_dev
*rt2x00dev
)
917 kfree(rt2x00dev
->rx
);
918 rt2x00dev
->rx
= NULL
;
919 rt2x00dev
->tx
= NULL
;
920 rt2x00dev
->bcn
= NULL
;