1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 - 2019 Intel Corporation
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
22 * Copyright(c) 2017 Intel Deutschland GmbH
23 * Copyright(c) 2018 - 2019 Intel Corporation
24 * All rights reserved.
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * * Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
36 * * Neither the name Intel Corporation nor the names of its
37 * contributors may be used to endorse or promote products derived
38 * from this software without specific prior written permission.
40 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 *****************************************************************************/
54 #include <linux/tcp.h>
56 #include "iwl-debug.h"
60 #include "fw/api/tx.h"
63 * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
65 void iwl_pcie_gen2_tx_stop(struct iwl_trans
*trans
)
67 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
71 * This function can be called before the op_mode disabled the
72 * queues. This happens when we have an rfkill interrupt.
73 * Since we stop Tx altogether - mark the queues as stopped.
75 memset(trans_pcie
->queue_stopped
, 0, sizeof(trans_pcie
->queue_stopped
));
76 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
78 /* Unmap DMA from host system and free skb's */
79 for (txq_id
= 0; txq_id
< ARRAY_SIZE(trans_pcie
->txq
); txq_id
++) {
80 if (!trans_pcie
->txq
[txq_id
])
82 iwl_pcie_gen2_txq_unmap(trans
, txq_id
);
87 * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
89 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie
*trans_pcie
,
90 struct iwl_txq
*txq
, u16 byte_cnt
,
93 struct iwlagn_scd_bc_tbl
*scd_bc_tbl
= txq
->bc_tbl
.addr
;
94 struct iwl_trans
*trans
= iwl_trans_pcie_get_trans(trans_pcie
);
95 struct iwl_gen3_bc_tbl
*scd_bc_tbl_gen3
= txq
->bc_tbl
.addr
;
96 int idx
= iwl_pcie_get_cmd_index(txq
, txq
->write_ptr
);
97 u8 filled_tfd_size
, num_fetch_chunks
;
101 if (WARN(idx
>= txq
->n_window
, "%d >= %d\n", idx
, txq
->n_window
))
104 filled_tfd_size
= offsetof(struct iwl_tfh_tfd
, tbs
) +
105 num_tbs
* sizeof(struct iwl_tfh_tb
);
107 * filled_tfd_size contains the number of filled bytes in the TFD.
108 * Dividing it by 64 will give the number of chunks to fetch
109 * to SRAM- 0 for one chunk, 1 for 2 and so on.
110 * If, for example, TFD contains only 3 TBs then 32 bytes
111 * of the TFD are used, and only one chunk of 64 bytes should
114 num_fetch_chunks
= DIV_ROUND_UP(filled_tfd_size
, 64) - 1;
116 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
117 /* Starting from AX210, the HW expects bytes */
118 WARN_ON(trans_pcie
->bc_table_dword
);
119 WARN_ON(len
> 0x3FFF);
120 bc_ent
= cpu_to_le16(len
| (num_fetch_chunks
<< 14));
121 scd_bc_tbl_gen3
->tfd_offset
[idx
] = bc_ent
;
123 /* Before AX210, the HW expects DW */
124 WARN_ON(!trans_pcie
->bc_table_dword
);
125 len
= DIV_ROUND_UP(len
, 4);
126 WARN_ON(len
> 0xFFF);
127 bc_ent
= cpu_to_le16(len
| (num_fetch_chunks
<< 12));
128 scd_bc_tbl
->tfd_offset
[idx
] = bc_ent
;
133 * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
135 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans
*trans
,
138 lockdep_assert_held(&txq
->lock
);
140 IWL_DEBUG_TX(trans
, "Q:%d WR: 0x%x\n", txq
->id
, txq
->write_ptr
);
143 * if not in power-save mode, uCode will never sleep when we're
144 * trying to tx (during RFKILL, we're not trying to tx).
146 iwl_write32(trans
, HBUS_TARG_WRPTR
, txq
->write_ptr
| (txq
->id
<< 16));
149 static u8
iwl_pcie_gen2_get_num_tbs(struct iwl_trans
*trans
,
150 struct iwl_tfh_tfd
*tfd
)
152 return le16_to_cpu(tfd
->num_tbs
) & 0x1f;
155 static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans
*trans
,
156 struct iwl_cmd_meta
*meta
,
157 struct iwl_tfh_tfd
*tfd
)
159 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
162 /* Sanity check on number of chunks */
163 num_tbs
= iwl_pcie_gen2_get_num_tbs(trans
, tfd
);
165 if (num_tbs
> trans_pcie
->max_tbs
) {
166 IWL_ERR(trans
, "Too many chunks: %i\n", num_tbs
);
170 /* first TB is never freed - it's the bidirectional DMA data */
171 for (i
= 1; i
< num_tbs
; i
++) {
172 if (meta
->tbs
& BIT(i
))
173 dma_unmap_page(trans
->dev
,
174 le64_to_cpu(tfd
->tbs
[i
].addr
),
175 le16_to_cpu(tfd
->tbs
[i
].tb_len
),
178 dma_unmap_single(trans
->dev
,
179 le64_to_cpu(tfd
->tbs
[i
].addr
),
180 le16_to_cpu(tfd
->tbs
[i
].tb_len
),
187 static void iwl_pcie_gen2_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
)
189 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
190 * idx is bounded by n_window
192 int idx
= iwl_pcie_get_cmd_index(txq
, txq
->read_ptr
);
194 lockdep_assert_held(&txq
->lock
);
196 iwl_pcie_gen2_tfd_unmap(trans
, &txq
->entries
[idx
].meta
,
197 iwl_pcie_get_tfd(trans
, txq
, idx
));
203 skb
= txq
->entries
[idx
].skb
;
205 /* Can be called from irqs-disabled context
206 * If skb is not NULL, it means that the whole queue is being
207 * freed and that the queue is not empty - free the skb
210 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
211 txq
->entries
[idx
].skb
= NULL
;
216 static int iwl_pcie_gen2_set_tb(struct iwl_trans
*trans
,
217 struct iwl_tfh_tfd
*tfd
, dma_addr_t addr
,
220 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
221 int idx
= iwl_pcie_gen2_get_num_tbs(trans
, tfd
);
222 struct iwl_tfh_tb
*tb
;
225 * Only WARN here so we know about the issue, but we mess up our
226 * unmap path because not every place currently checks for errors
227 * returned from this function - it can only return an error if
228 * there's no more space, and so when we know there is enough we
229 * don't always check ...
231 WARN(iwl_pcie_crosses_4g_boundary(addr
, len
),
232 "possible DMA problem with iova:0x%llx, len:%d\n",
233 (unsigned long long)addr
, len
);
235 if (WARN_ON(idx
>= IWL_TFH_NUM_TBS
))
239 /* Each TFD can point to a maximum max_tbs Tx buffers */
240 if (le16_to_cpu(tfd
->num_tbs
) >= trans_pcie
->max_tbs
) {
241 IWL_ERR(trans
, "Error can not send more than %d chunks\n",
242 trans_pcie
->max_tbs
);
246 put_unaligned_le64(addr
, &tb
->addr
);
247 tb
->tb_len
= cpu_to_le16(len
);
249 tfd
->num_tbs
= cpu_to_le16(idx
+ 1);
254 static struct page
*get_workaround_page(struct iwl_trans
*trans
,
257 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
258 struct page
**page_ptr
;
261 page_ptr
= (void *)((u8
*)skb
->cb
+ trans_pcie
->page_offs
);
263 ret
= alloc_page(GFP_ATOMIC
);
267 /* set the chaining pointer to the previous page if there */
268 *(void **)(page_address(ret
) + PAGE_SIZE
- sizeof(void *)) = *page_ptr
;
275 * Add a TB and if needed apply the FH HW bug workaround;
276 * meta != NULL indicates that it's a page mapping and we
277 * need to dma_unmap_page() and set the meta->tbs bit in
280 static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans
*trans
,
282 struct iwl_tfh_tfd
*tfd
,
283 dma_addr_t phys
, void *virt
,
284 u16 len
, struct iwl_cmd_meta
*meta
)
286 dma_addr_t oldphys
= phys
;
290 if (unlikely(dma_mapping_error(trans
->dev
, phys
)))
293 if (likely(!iwl_pcie_crosses_4g_boundary(phys
, len
))) {
294 ret
= iwl_pcie_gen2_set_tb(trans
, tfd
, phys
, len
);
300 meta
->tbs
|= BIT(ret
);
307 * Work around a hardware bug. If (as expressed in the
308 * condition above) the TB ends on a 32-bit boundary,
309 * then the next TB may be accessed with the wrong
311 * To work around it, copy the data elsewhere and make
312 * a new mapping for it so the device will not fail.
315 if (WARN_ON(len
> PAGE_SIZE
- sizeof(void *))) {
320 page
= get_workaround_page(trans
, skb
);
326 memcpy(page_address(page
), virt
, len
);
328 phys
= dma_map_single(trans
->dev
, page_address(page
), len
,
330 if (unlikely(dma_mapping_error(trans
->dev
, phys
)))
332 ret
= iwl_pcie_gen2_set_tb(trans
, tfd
, phys
, len
);
334 /* unmap the new allocation as single */
340 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
341 len
, (unsigned long long)oldphys
, (unsigned long long)phys
);
346 dma_unmap_page(trans
->dev
, oldphys
, len
, DMA_TO_DEVICE
);
348 dma_unmap_single(trans
->dev
, oldphys
, len
, DMA_TO_DEVICE
);
350 trace_iwlwifi_dev_tx_tb(trans
->dev
, skb
, virt
, phys
, len
);
355 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans
*trans
,
357 struct iwl_tfh_tfd
*tfd
, int start_len
,
359 struct iwl_device_tx_cmd
*dev_cmd
)
362 struct iwl_tx_cmd_gen2
*tx_cmd
= (void *)dev_cmd
->payload
;
363 struct ieee80211_hdr
*hdr
= (void *)skb
->data
;
364 unsigned int snap_ip_tcp_hdrlen
, ip_hdrlen
, total_len
, hdr_room
;
365 unsigned int mss
= skb_shinfo(skb
)->gso_size
;
366 u16 length
, amsdu_pad
;
368 struct iwl_tso_hdr_page
*hdr_page
;
371 trace_iwlwifi_dev_tx(trans
->dev
, skb
, tfd
, sizeof(*tfd
),
372 &dev_cmd
->hdr
, start_len
, 0);
374 ip_hdrlen
= skb_transport_header(skb
) - skb_network_header(skb
);
375 snap_ip_tcp_hdrlen
= 8 + ip_hdrlen
+ tcp_hdrlen(skb
);
376 total_len
= skb
->len
- snap_ip_tcp_hdrlen
- hdr_len
;
379 /* total amount of header we may need for this A-MSDU */
380 hdr_room
= DIV_ROUND_UP(total_len
, mss
) *
381 (3 + snap_ip_tcp_hdrlen
+ sizeof(struct ethhdr
));
383 /* Our device supports 9 segments at most, it will fit in 1 page */
384 hdr_page
= get_page_hdr(trans
, hdr_room
, skb
);
388 start_hdr
= hdr_page
->pos
;
391 * Pull the ieee80211 header to be able to use TSO core,
392 * we will restore it for the tx_status flow.
394 skb_pull(skb
, hdr_len
);
397 * Remove the length of all the headers that we don't actually
398 * have in the MPDU by themselves, but that we duplicate into
399 * all the different MSDUs inside the A-MSDU.
401 le16_add_cpu(&tx_cmd
->len
, -snap_ip_tcp_hdrlen
);
403 tso_start(skb
, &tso
);
406 /* this is the data left for this subframe */
407 unsigned int data_left
= min_t(unsigned int, mss
, total_len
);
408 struct sk_buff
*csum_skb
= NULL
;
411 u8
*subf_hdrs_start
= hdr_page
->pos
;
413 total_len
-= data_left
;
415 memset(hdr_page
->pos
, 0, amsdu_pad
);
416 hdr_page
->pos
+= amsdu_pad
;
417 amsdu_pad
= (4 - (sizeof(struct ethhdr
) + snap_ip_tcp_hdrlen
+
419 ether_addr_copy(hdr_page
->pos
, ieee80211_get_DA(hdr
));
420 hdr_page
->pos
+= ETH_ALEN
;
421 ether_addr_copy(hdr_page
->pos
, ieee80211_get_SA(hdr
));
422 hdr_page
->pos
+= ETH_ALEN
;
424 length
= snap_ip_tcp_hdrlen
+ data_left
;
425 *((__be16
*)hdr_page
->pos
) = cpu_to_be16(length
);
426 hdr_page
->pos
+= sizeof(length
);
429 * This will copy the SNAP as well which will be considered
432 tso_build_hdr(skb
, hdr_page
->pos
, &tso
, data_left
, !total_len
);
434 hdr_page
->pos
+= snap_ip_tcp_hdrlen
;
436 tb_len
= hdr_page
->pos
- start_hdr
;
437 tb_phys
= dma_map_single(trans
->dev
, start_hdr
,
438 tb_len
, DMA_TO_DEVICE
);
439 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
))) {
440 dev_kfree_skb(csum_skb
);
444 * No need for _with_wa, this is from the TSO page and
445 * we leave some space at the end of it so can't hit
446 * the buggy scenario.
448 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, tb_len
);
449 trace_iwlwifi_dev_tx_tb(trans
->dev
, skb
, start_hdr
,
451 /* add this subframe's headers' length to the tx_cmd */
452 le16_add_cpu(&tx_cmd
->len
, hdr_page
->pos
- subf_hdrs_start
);
454 /* prepare the start_hdr for the next subframe */
455 start_hdr
= hdr_page
->pos
;
457 /* put the payload */
461 tb_len
= min_t(unsigned int, tso
.size
, data_left
);
462 tb_phys
= dma_map_single(trans
->dev
, tso
.data
,
463 tb_len
, DMA_TO_DEVICE
);
464 ret
= iwl_pcie_gen2_set_tb_with_wa(trans
, skb
, tfd
,
468 dev_kfree_skb(csum_skb
);
473 tso_build_data(skb
, &tso
, tb_len
);
477 /* re -add the WiFi header */
478 skb_push(skb
, hdr_len
);
488 iwl_tfh_tfd
*iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans
*trans
,
490 struct iwl_device_tx_cmd
*dev_cmd
,
492 struct iwl_cmd_meta
*out_meta
,
496 int idx
= iwl_pcie_get_cmd_index(txq
, txq
->write_ptr
);
497 struct iwl_tfh_tfd
*tfd
= iwl_pcie_get_tfd(trans
, txq
, idx
);
502 tb_phys
= iwl_pcie_get_first_tb_dma(txq
, idx
);
505 * No need for _with_wa, the first TB allocation is aligned up
506 * to a 64-byte boundary and thus can't be at the end or cross
507 * a page boundary (much less a 2^32 boundary).
509 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, IWL_FIRST_TB_SIZE
);
512 * The second TB (tb1) points to the remainder of the TX command
513 * and the 802.11 header - dword aligned size
514 * (This calculation modifies the TX command, so do it before the
515 * setup of the first TB)
517 len
= tx_cmd_len
+ sizeof(struct iwl_cmd_header
) + hdr_len
-
520 /* do not align A-MSDU to dword as the subframe header aligns it */
522 /* map the data for TB1 */
523 tb1_addr
= ((u8
*)&dev_cmd
->hdr
) + IWL_FIRST_TB_SIZE
;
524 tb_phys
= dma_map_single(trans
->dev
, tb1_addr
, len
, DMA_TO_DEVICE
);
525 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
528 * No need for _with_wa(), we ensure (via alignment) that the data
529 * here can never cross or end at a page boundary.
531 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, len
);
533 if (iwl_pcie_gen2_build_amsdu(trans
, skb
, tfd
,
534 len
+ IWL_FIRST_TB_SIZE
,
538 /* building the A-MSDU might have changed this data, memcpy it now */
539 memcpy(&txq
->first_tb_bufs
[idx
], dev_cmd
, IWL_FIRST_TB_SIZE
);
543 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
547 static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans
*trans
,
549 struct iwl_tfh_tfd
*tfd
,
550 struct iwl_cmd_meta
*out_meta
)
554 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
555 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
557 unsigned int fragsz
= skb_frag_size(frag
);
563 tb_phys
= skb_frag_dma_map(trans
->dev
, frag
, 0,
564 fragsz
, DMA_TO_DEVICE
);
565 ret
= iwl_pcie_gen2_set_tb_with_wa(trans
, skb
, tfd
, tb_phys
,
566 skb_frag_address(frag
),
576 iwl_tfh_tfd
*iwl_pcie_gen2_build_tx(struct iwl_trans
*trans
,
578 struct iwl_device_tx_cmd
*dev_cmd
,
580 struct iwl_cmd_meta
*out_meta
,
585 int idx
= iwl_pcie_get_cmd_index(txq
, txq
->write_ptr
);
586 struct iwl_tfh_tfd
*tfd
= iwl_pcie_get_tfd(trans
, txq
, idx
);
588 int len
, tb1_len
, tb2_len
;
590 struct sk_buff
*frag
;
592 tb_phys
= iwl_pcie_get_first_tb_dma(txq
, idx
);
594 /* The first TB points to bi-directional DMA data */
595 memcpy(&txq
->first_tb_bufs
[idx
], dev_cmd
, IWL_FIRST_TB_SIZE
);
598 * No need for _with_wa, the first TB allocation is aligned up
599 * to a 64-byte boundary and thus can't be at the end or cross
600 * a page boundary (much less a 2^32 boundary).
602 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, IWL_FIRST_TB_SIZE
);
605 * The second TB (tb1) points to the remainder of the TX command
606 * and the 802.11 header - dword aligned size
607 * (This calculation modifies the TX command, so do it before the
608 * setup of the first TB)
610 len
= tx_cmd_len
+ sizeof(struct iwl_cmd_header
) + hdr_len
-
614 tb1_len
= ALIGN(len
, 4);
618 /* map the data for TB1 */
619 tb1_addr
= ((u8
*)&dev_cmd
->hdr
) + IWL_FIRST_TB_SIZE
;
620 tb_phys
= dma_map_single(trans
->dev
, tb1_addr
, tb1_len
, DMA_TO_DEVICE
);
621 if (unlikely(dma_mapping_error(trans
->dev
, tb_phys
)))
624 * No need for _with_wa(), we ensure (via alignment) that the data
625 * here can never cross or end at a page boundary.
627 iwl_pcie_gen2_set_tb(trans
, tfd
, tb_phys
, tb1_len
);
628 trace_iwlwifi_dev_tx(trans
->dev
, skb
, tfd
, sizeof(*tfd
), &dev_cmd
->hdr
,
629 IWL_FIRST_TB_SIZE
+ tb1_len
, hdr_len
);
631 /* set up TFD's third entry to point to remainder of skb's head */
632 tb2_len
= skb_headlen(skb
) - hdr_len
;
637 tb_phys
= dma_map_single(trans
->dev
, skb
->data
+ hdr_len
,
638 tb2_len
, DMA_TO_DEVICE
);
639 ret
= iwl_pcie_gen2_set_tb_with_wa(trans
, skb
, tfd
, tb_phys
,
640 skb
->data
+ hdr_len
, tb2_len
,
646 if (iwl_pcie_gen2_tx_add_frags(trans
, skb
, tfd
, out_meta
))
649 skb_walk_frags(skb
, frag
) {
652 tb_phys
= dma_map_single(trans
->dev
, frag
->data
,
653 skb_headlen(frag
), DMA_TO_DEVICE
);
654 ret
= iwl_pcie_gen2_set_tb_with_wa(trans
, skb
, tfd
, tb_phys
,
656 skb_headlen(frag
), NULL
);
659 if (iwl_pcie_gen2_tx_add_frags(trans
, frag
, tfd
, out_meta
))
666 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
671 struct iwl_tfh_tfd
*iwl_pcie_gen2_build_tfd(struct iwl_trans
*trans
,
673 struct iwl_device_tx_cmd
*dev_cmd
,
675 struct iwl_cmd_meta
*out_meta
)
677 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
678 int idx
= iwl_pcie_get_cmd_index(txq
, txq
->write_ptr
);
679 struct iwl_tfh_tfd
*tfd
= iwl_pcie_get_tfd(trans
, txq
, idx
);
683 /* There must be data left over for TB1 or this code must be changed */
684 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2
) < IWL_FIRST_TB_SIZE
);
686 memset(tfd
, 0, sizeof(*tfd
));
688 if (trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
)
689 len
= sizeof(struct iwl_tx_cmd_gen2
);
691 len
= sizeof(struct iwl_tx_cmd_gen3
);
693 amsdu
= ieee80211_is_data_qos(hdr
->frame_control
) &&
694 (*ieee80211_get_qos_ctl(hdr
) &
695 IEEE80211_QOS_CTL_A_MSDU_PRESENT
);
697 hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
700 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
701 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
702 * built in the higher layers already.
704 if (amsdu
&& skb_shinfo(skb
)->gso_size
)
705 return iwl_pcie_gen2_build_tx_amsdu(trans
, txq
, dev_cmd
, skb
,
706 out_meta
, hdr_len
, len
);
708 return iwl_pcie_gen2_build_tx(trans
, txq
, dev_cmd
, skb
, out_meta
,
709 hdr_len
, len
, !amsdu
);
712 int iwl_trans_pcie_gen2_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
713 struct iwl_device_tx_cmd
*dev_cmd
, int txq_id
)
715 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
716 struct iwl_cmd_meta
*out_meta
;
717 struct iwl_txq
*txq
= trans_pcie
->txq
[txq_id
];
722 if (WARN_ONCE(txq_id
>= IWL_MAX_TVQM_QUEUES
,
723 "queue %d out of range", txq_id
))
726 if (WARN_ONCE(!test_bit(txq_id
, trans_pcie
->queue_used
),
727 "TX on unused queue %d\n", txq_id
))
730 if (skb_is_nonlinear(skb
) &&
731 skb_shinfo(skb
)->nr_frags
> IWL_PCIE_MAX_FRAGS(trans_pcie
) &&
732 __skb_linearize(skb
))
735 spin_lock(&txq
->lock
);
737 if (iwl_queue_space(trans
, txq
) < txq
->high_mark
) {
738 iwl_stop_queue(trans
, txq
);
740 /* don't put the packet on the ring, if there is no room */
741 if (unlikely(iwl_queue_space(trans
, txq
) < 3)) {
742 struct iwl_device_tx_cmd
**dev_cmd_ptr
;
744 dev_cmd_ptr
= (void *)((u8
*)skb
->cb
+
745 trans_pcie
->dev_cmd_offs
);
747 *dev_cmd_ptr
= dev_cmd
;
748 __skb_queue_tail(&txq
->overflow_q
, skb
);
749 spin_unlock(&txq
->lock
);
754 idx
= iwl_pcie_get_cmd_index(txq
, txq
->write_ptr
);
756 /* Set up driver data for this TFD */
757 txq
->entries
[idx
].skb
= skb
;
758 txq
->entries
[idx
].cmd
= dev_cmd
;
760 dev_cmd
->hdr
.sequence
=
761 cpu_to_le16((u16
)(QUEUE_TO_SEQ(txq_id
) |
764 /* Set up first empty entry in queue's array of Tx/cmd buffers */
765 out_meta
= &txq
->entries
[idx
].meta
;
768 tfd
= iwl_pcie_gen2_build_tfd(trans
, txq
, dev_cmd
, skb
, out_meta
);
770 spin_unlock(&txq
->lock
);
774 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
775 struct iwl_tx_cmd_gen3
*tx_cmd_gen3
=
776 (void *)dev_cmd
->payload
;
778 cmd_len
= le16_to_cpu(tx_cmd_gen3
->len
);
780 struct iwl_tx_cmd_gen2
*tx_cmd_gen2
=
781 (void *)dev_cmd
->payload
;
783 cmd_len
= le16_to_cpu(tx_cmd_gen2
->len
);
786 /* Set up entry for this TFD in Tx byte-count array */
787 iwl_pcie_gen2_update_byte_tbl(trans_pcie
, txq
, cmd_len
,
788 iwl_pcie_gen2_get_num_tbs(trans
, tfd
));
790 /* start timer if queue currently empty */
791 if (txq
->read_ptr
== txq
->write_ptr
&& txq
->wd_timeout
)
792 mod_timer(&txq
->stuck_timer
, jiffies
+ txq
->wd_timeout
);
794 /* Tell device the write index *just past* this latest filled TFD */
795 txq
->write_ptr
= iwl_queue_inc_wrap(trans
, txq
->write_ptr
);
796 iwl_pcie_gen2_txq_inc_wr_ptr(trans
, txq
);
798 * At this point the frame is "transmitted" successfully
799 * and we will get a TX status notification eventually.
801 spin_unlock(&txq
->lock
);
805 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
808 * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
809 * @priv: device private data point
810 * @cmd: a pointer to the ucode command structure
812 * The function returns < 0 values to indicate the operation
813 * failed. On success, it returns the index (>= 0) of command in the
816 static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans
*trans
,
817 struct iwl_host_cmd
*cmd
)
819 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
820 struct iwl_txq
*txq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
821 struct iwl_device_cmd
*out_cmd
;
822 struct iwl_cmd_meta
*out_meta
;
824 void *dup_buf
= NULL
;
825 dma_addr_t phys_addr
;
827 u16 copy_size
, cmd_size
, tb0_size
;
828 bool had_nocopy
= false;
829 u8 group_id
= iwl_cmd_groupid(cmd
->id
);
830 const u8
*cmddata
[IWL_MAX_CMD_TBS_PER_TFD
];
831 u16 cmdlen
[IWL_MAX_CMD_TBS_PER_TFD
];
832 struct iwl_tfh_tfd
*tfd
;
834 copy_size
= sizeof(struct iwl_cmd_header_wide
);
835 cmd_size
= sizeof(struct iwl_cmd_header_wide
);
837 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
838 cmddata
[i
] = cmd
->data
[i
];
839 cmdlen
[i
] = cmd
->len
[i
];
844 /* need at least IWL_FIRST_TB_SIZE copied */
845 if (copy_size
< IWL_FIRST_TB_SIZE
) {
846 int copy
= IWL_FIRST_TB_SIZE
- copy_size
;
848 if (copy
> cmdlen
[i
])
855 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_NOCOPY
) {
857 if (WARN_ON(cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)) {
861 } else if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
) {
863 * This is also a chunk that isn't copied
864 * to the static buffer so set had_nocopy.
868 /* only allowed once */
869 if (WARN_ON(dup_buf
)) {
874 dup_buf
= kmemdup(cmddata
[i
], cmdlen
[i
],
879 /* NOCOPY must not be followed by normal! */
880 if (WARN_ON(had_nocopy
)) {
884 copy_size
+= cmdlen
[i
];
886 cmd_size
+= cmd
->len
[i
];
890 * If any of the command structures end up being larger than the
891 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
892 * separate TFDs, then we will need to increase the size of the buffers
894 if (WARN(copy_size
> TFD_MAX_PAYLOAD_SIZE
,
895 "Command %s (%#x) is too large (%d bytes)\n",
896 iwl_get_cmd_string(trans
, cmd
->id
), cmd
->id
, copy_size
)) {
901 spin_lock_bh(&txq
->lock
);
903 idx
= iwl_pcie_get_cmd_index(txq
, txq
->write_ptr
);
904 tfd
= iwl_pcie_get_tfd(trans
, txq
, txq
->write_ptr
);
905 memset(tfd
, 0, sizeof(*tfd
));
907 if (iwl_queue_space(trans
, txq
) < ((cmd
->flags
& CMD_ASYNC
) ? 2 : 1)) {
908 spin_unlock_bh(&txq
->lock
);
910 IWL_ERR(trans
, "No space in command queue\n");
911 iwl_op_mode_cmd_queue_full(trans
->op_mode
);
916 out_cmd
= txq
->entries
[idx
].cmd
;
917 out_meta
= &txq
->entries
[idx
].meta
;
919 /* re-initialize to NULL */
920 memset(out_meta
, 0, sizeof(*out_meta
));
921 if (cmd
->flags
& CMD_WANT_SKB
)
922 out_meta
->source
= cmd
;
924 /* set up the header */
925 out_cmd
->hdr_wide
.cmd
= iwl_cmd_opcode(cmd
->id
);
926 out_cmd
->hdr_wide
.group_id
= group_id
;
927 out_cmd
->hdr_wide
.version
= iwl_cmd_version(cmd
->id
);
928 out_cmd
->hdr_wide
.length
=
929 cpu_to_le16(cmd_size
- sizeof(struct iwl_cmd_header_wide
));
930 out_cmd
->hdr_wide
.reserved
= 0;
931 out_cmd
->hdr_wide
.sequence
=
932 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie
->cmd_queue
) |
933 INDEX_TO_SEQ(txq
->write_ptr
));
935 cmd_pos
= sizeof(struct iwl_cmd_header_wide
);
936 copy_size
= sizeof(struct iwl_cmd_header_wide
);
938 /* and copy the data that needs to be copied */
939 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
945 /* copy everything if not nocopy/dup */
946 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
947 IWL_HCMD_DFL_DUP
))) {
950 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
957 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
958 * in total (for bi-directional DMA), but copy up to what
959 * we can fit into the payload for debug dump purposes.
961 copy
= min_t(int, TFD_MAX_PAYLOAD_SIZE
- cmd_pos
, cmd
->len
[i
]);
963 memcpy((u8
*)out_cmd
+ cmd_pos
, cmd
->data
[i
], copy
);
966 /* However, treat copy_size the proper way, we need it below */
967 if (copy_size
< IWL_FIRST_TB_SIZE
) {
968 copy
= IWL_FIRST_TB_SIZE
- copy_size
;
970 if (copy
> cmd
->len
[i
])
977 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
978 iwl_get_cmd_string(trans
, cmd
->id
), group_id
,
979 out_cmd
->hdr
.cmd
, le16_to_cpu(out_cmd
->hdr
.sequence
),
980 cmd_size
, txq
->write_ptr
, idx
, trans_pcie
->cmd_queue
);
982 /* start the TFD with the minimum copy bytes */
983 tb0_size
= min_t(int, copy_size
, IWL_FIRST_TB_SIZE
);
984 memcpy(&txq
->first_tb_bufs
[idx
], out_cmd
, tb0_size
);
985 iwl_pcie_gen2_set_tb(trans
, tfd
, iwl_pcie_get_first_tb_dma(txq
, idx
),
988 /* map first command fragment, if any remains */
989 if (copy_size
> tb0_size
) {
990 phys_addr
= dma_map_single(trans
->dev
,
991 (u8
*)out_cmd
+ tb0_size
,
992 copy_size
- tb0_size
,
994 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
996 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
999 iwl_pcie_gen2_set_tb(trans
, tfd
, phys_addr
,
1000 copy_size
- tb0_size
);
1003 /* map the remaining (adjusted) nocopy/dup fragments */
1004 for (i
= 0; i
< IWL_MAX_CMD_TBS_PER_TFD
; i
++) {
1005 const void *data
= cmddata
[i
];
1009 if (!(cmd
->dataflags
[i
] & (IWL_HCMD_DFL_NOCOPY
|
1012 if (cmd
->dataflags
[i
] & IWL_HCMD_DFL_DUP
)
1014 phys_addr
= dma_map_single(trans
->dev
, (void *)data
,
1015 cmdlen
[i
], DMA_TO_DEVICE
);
1016 if (dma_mapping_error(trans
->dev
, phys_addr
)) {
1018 iwl_pcie_gen2_tfd_unmap(trans
, out_meta
, tfd
);
1021 iwl_pcie_gen2_set_tb(trans
, tfd
, phys_addr
, cmdlen
[i
]);
1024 BUILD_BUG_ON(IWL_TFH_NUM_TBS
> sizeof(out_meta
->tbs
) * BITS_PER_BYTE
);
1025 out_meta
->flags
= cmd
->flags
;
1026 if (WARN_ON_ONCE(txq
->entries
[idx
].free_buf
))
1027 kzfree(txq
->entries
[idx
].free_buf
);
1028 txq
->entries
[idx
].free_buf
= dup_buf
;
1030 trace_iwlwifi_dev_hcmd(trans
->dev
, cmd
, cmd_size
, &out_cmd
->hdr_wide
);
1032 /* start timer if queue currently empty */
1033 if (txq
->read_ptr
== txq
->write_ptr
&& txq
->wd_timeout
)
1034 mod_timer(&txq
->stuck_timer
, jiffies
+ txq
->wd_timeout
);
1036 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1037 /* Increment and update queue's write index */
1038 txq
->write_ptr
= iwl_queue_inc_wrap(trans
, txq
->write_ptr
);
1039 iwl_pcie_gen2_txq_inc_wr_ptr(trans
, txq
);
1040 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1043 spin_unlock_bh(&txq
->lock
);
1050 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
1052 static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans
*trans
,
1053 struct iwl_host_cmd
*cmd
)
1055 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1056 const char *cmd_str
= iwl_get_cmd_string(trans
, cmd
->id
);
1057 struct iwl_txq
*txq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1061 IWL_DEBUG_INFO(trans
, "Attempting to send sync command %s\n", cmd_str
);
1063 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE
,
1065 "Command %s: a command is already active!\n", cmd_str
))
1068 IWL_DEBUG_INFO(trans
, "Setting HCMD_ACTIVE for command %s\n", cmd_str
);
1070 cmd_idx
= iwl_pcie_gen2_enqueue_hcmd(trans
, cmd
);
1073 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1074 IWL_ERR(trans
, "Error sending %s: enqueue_hcmd failed: %d\n",
1079 ret
= wait_event_timeout(trans_pcie
->wait_command_queue
,
1080 !test_bit(STATUS_SYNC_HCMD_ACTIVE
,
1082 HOST_COMPLETE_TIMEOUT
);
1084 IWL_ERR(trans
, "Error sending %s: time out after %dms.\n",
1085 cmd_str
, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT
));
1087 IWL_ERR(trans
, "Current CMD queue read_ptr %d write_ptr %d\n",
1088 txq
->read_ptr
, txq
->write_ptr
);
1090 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1091 IWL_DEBUG_INFO(trans
, "Clearing HCMD_ACTIVE for command %s\n",
1095 iwl_trans_pcie_sync_nmi(trans
);
1099 if (test_bit(STATUS_FW_ERROR
, &trans
->status
)) {
1100 IWL_ERR(trans
, "FW error in SYNC CMD %s\n", cmd_str
);
1106 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
1107 test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
)) {
1108 IWL_DEBUG_RF_KILL(trans
, "RFKILL in SYNC CMD... no rsp\n");
1113 if ((cmd
->flags
& CMD_WANT_SKB
) && !cmd
->resp_pkt
) {
1114 IWL_ERR(trans
, "Error: Response NULL in '%s'\n", cmd_str
);
1122 if (cmd
->flags
& CMD_WANT_SKB
) {
1124 * Cancel the CMD_WANT_SKB flag for the cmd in the
1125 * TX cmd queue. Otherwise in case the cmd comes
1126 * in later, it will possibly set an invalid
1127 * address (cmd->meta.source).
1129 txq
->entries
[cmd_idx
].meta
.flags
&= ~CMD_WANT_SKB
;
1132 if (cmd
->resp_pkt
) {
1134 cmd
->resp_pkt
= NULL
;
1140 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans
*trans
,
1141 struct iwl_host_cmd
*cmd
)
1143 if (!(cmd
->flags
& CMD_SEND_IN_RFKILL
) &&
1144 test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
)) {
1145 IWL_DEBUG_RF_KILL(trans
, "Dropping CMD 0x%x: RF KILL\n",
1150 if (cmd
->flags
& CMD_ASYNC
) {
1153 /* An asynchronous command can not expect an SKB to be set. */
1154 if (WARN_ON(cmd
->flags
& CMD_WANT_SKB
))
1157 ret
= iwl_pcie_gen2_enqueue_hcmd(trans
, cmd
);
1160 "Error sending %s: enqueue_hcmd failed: %d\n",
1161 iwl_get_cmd_string(trans
, cmd
->id
), ret
);
1167 return iwl_pcie_gen2_send_hcmd_sync(trans
, cmd
);
1171 * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
1173 void iwl_pcie_gen2_txq_unmap(struct iwl_trans
*trans
, int txq_id
)
1175 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1176 struct iwl_txq
*txq
= trans_pcie
->txq
[txq_id
];
1178 spin_lock_bh(&txq
->lock
);
1179 while (txq
->write_ptr
!= txq
->read_ptr
) {
1180 IWL_DEBUG_TX_REPLY(trans
, "Q %d Free %d\n",
1181 txq_id
, txq
->read_ptr
);
1183 if (txq_id
!= trans_pcie
->cmd_queue
) {
1184 int idx
= iwl_pcie_get_cmd_index(txq
, txq
->read_ptr
);
1185 struct sk_buff
*skb
= txq
->entries
[idx
].skb
;
1187 if (WARN_ON_ONCE(!skb
))
1190 iwl_pcie_free_tso_page(trans_pcie
, skb
);
1192 iwl_pcie_gen2_free_tfd(trans
, txq
);
1193 txq
->read_ptr
= iwl_queue_inc_wrap(trans
, txq
->read_ptr
);
1196 while (!skb_queue_empty(&txq
->overflow_q
)) {
1197 struct sk_buff
*skb
= __skb_dequeue(&txq
->overflow_q
);
1199 iwl_op_mode_free_skb(trans
->op_mode
, skb
);
1202 spin_unlock_bh(&txq
->lock
);
1204 /* just in case - this queue may have been stopped */
1205 iwl_wake_queue(trans
, txq
);
1208 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans
*trans
,
1209 struct iwl_txq
*txq
)
1211 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1212 struct device
*dev
= trans
->dev
;
1214 /* De-alloc circular buffer of TFDs */
1216 dma_free_coherent(dev
,
1217 trans_pcie
->tfd_size
* txq
->n_window
,
1218 txq
->tfds
, txq
->dma_addr
);
1219 dma_free_coherent(dev
,
1220 sizeof(*txq
->first_tb_bufs
) * txq
->n_window
,
1221 txq
->first_tb_bufs
, txq
->first_tb_dma
);
1224 kfree(txq
->entries
);
1225 iwl_pcie_free_dma_ptr(trans
, &txq
->bc_tbl
);
1230 * iwl_pcie_txq_free - Deallocate DMA queue.
1231 * @txq: Transmit queue to deallocate.
1233 * Empty queue by removing and destroying all BD's.
1235 * 0-fill, but do not free "txq" descriptor structure.
1237 static void iwl_pcie_gen2_txq_free(struct iwl_trans
*trans
, int txq_id
)
1239 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1240 struct iwl_txq
*txq
;
1243 if (WARN_ONCE(txq_id
>= IWL_MAX_TVQM_QUEUES
,
1244 "queue %d out of range", txq_id
))
1247 txq
= trans_pcie
->txq
[txq_id
];
1252 iwl_pcie_gen2_txq_unmap(trans
, txq_id
);
1254 /* De-alloc array of command/tx buffers */
1255 if (txq_id
== trans_pcie
->cmd_queue
)
1256 for (i
= 0; i
< txq
->n_window
; i
++) {
1257 kzfree(txq
->entries
[i
].cmd
);
1258 kzfree(txq
->entries
[i
].free_buf
);
1260 del_timer_sync(&txq
->stuck_timer
);
1262 iwl_pcie_gen2_txq_free_memory(trans
, txq
);
1264 trans_pcie
->txq
[txq_id
] = NULL
;
1266 clear_bit(txq_id
, trans_pcie
->queue_used
);
1269 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans
*trans
,
1270 struct iwl_txq
**intxq
, int size
,
1271 unsigned int timeout
)
1275 struct iwl_txq
*txq
;
1276 txq
= kzalloc(sizeof(*txq
), GFP_KERNEL
);
1279 ret
= iwl_pcie_alloc_dma_ptr(trans
, &txq
->bc_tbl
,
1280 (trans
->trans_cfg
->device_family
>=
1281 IWL_DEVICE_FAMILY_AX210
) ?
1282 sizeof(struct iwl_gen3_bc_tbl
) :
1283 sizeof(struct iwlagn_scd_bc_tbl
));
1285 IWL_ERR(trans
, "Scheduler BC Table allocation failed\n");
1290 ret
= iwl_pcie_txq_alloc(trans
, txq
, size
, false);
1292 IWL_ERR(trans
, "Tx queue alloc failed\n");
1295 ret
= iwl_pcie_txq_init(trans
, txq
, size
, false);
1297 IWL_ERR(trans
, "Tx queue init failed\n");
1301 txq
->wd_timeout
= msecs_to_jiffies(timeout
);
1307 iwl_pcie_gen2_txq_free_memory(trans
, txq
);
1311 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans
*trans
,
1312 struct iwl_txq
*txq
,
1313 struct iwl_host_cmd
*hcmd
)
1315 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1316 struct iwl_tx_queue_cfg_rsp
*rsp
;
1320 if (WARN_ON(iwl_rx_packet_payload_len(hcmd
->resp_pkt
) !=
1323 goto error_free_resp
;
1326 rsp
= (void *)hcmd
->resp_pkt
->data
;
1327 qid
= le16_to_cpu(rsp
->queue_number
);
1328 wr_ptr
= le16_to_cpu(rsp
->write_pointer
);
1330 if (qid
>= ARRAY_SIZE(trans_pcie
->txq
)) {
1331 WARN_ONCE(1, "queue index %d unsupported", qid
);
1333 goto error_free_resp
;
1336 if (test_and_set_bit(qid
, trans_pcie
->queue_used
)) {
1337 WARN_ONCE(1, "queue %d already used", qid
);
1339 goto error_free_resp
;
1343 trans_pcie
->txq
[qid
] = txq
;
1344 wr_ptr
&= (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
1346 /* Place first TFD at index corresponding to start sequence number */
1347 txq
->read_ptr
= wr_ptr
;
1348 txq
->write_ptr
= wr_ptr
;
1350 IWL_DEBUG_TX_QUEUES(trans
, "Activate queue %d\n", qid
);
1352 iwl_free_resp(hcmd
);
1356 iwl_free_resp(hcmd
);
1357 iwl_pcie_gen2_txq_free_memory(trans
, txq
);
1361 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans
*trans
,
1362 __le16 flags
, u8 sta_id
, u8 tid
,
1363 int cmd_id
, int size
,
1364 unsigned int timeout
)
1366 struct iwl_txq
*txq
= NULL
;
1367 struct iwl_tx_queue_cfg_cmd cmd
= {
1372 struct iwl_host_cmd hcmd
= {
1374 .len
= { sizeof(cmd
) },
1376 .flags
= CMD_WANT_SKB
,
1380 ret
= iwl_trans_pcie_dyn_txq_alloc_dma(trans
, &txq
, size
, timeout
);
1384 cmd
.tfdq_addr
= cpu_to_le64(txq
->dma_addr
);
1385 cmd
.byte_cnt_addr
= cpu_to_le64(txq
->bc_tbl
.dma
);
1386 cmd
.cb_size
= cpu_to_le32(TFD_QUEUE_CB_SIZE(size
));
1388 ret
= iwl_trans_send_cmd(trans
, &hcmd
);
1392 return iwl_trans_pcie_txq_alloc_response(trans
, txq
, &hcmd
);
1395 iwl_pcie_gen2_txq_free_memory(trans
, txq
);
1399 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans
*trans
, int queue
)
1401 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1403 if (WARN(queue
>= IWL_MAX_TVQM_QUEUES
,
1404 "queue %d out of range", queue
))
1408 * Upon HW Rfkill - we stop the device, and then stop the queues
1409 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1410 * allow the op_mode to call txq_disable after it already called
1413 if (!test_and_clear_bit(queue
, trans_pcie
->queue_used
)) {
1414 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
),
1415 "queue %d not used", queue
);
1419 iwl_pcie_gen2_txq_unmap(trans
, queue
);
1421 IWL_DEBUG_TX_QUEUES(trans
, "Deactivate queue %d\n", queue
);
1424 void iwl_pcie_gen2_tx_free(struct iwl_trans
*trans
)
1426 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1429 memset(trans_pcie
->queue_used
, 0, sizeof(trans_pcie
->queue_used
));
1431 /* Free all TX queues */
1432 for (i
= 0; i
< ARRAY_SIZE(trans_pcie
->txq
); i
++) {
1433 if (!trans_pcie
->txq
[i
])
1436 iwl_pcie_gen2_txq_free(trans
, i
);
1440 int iwl_pcie_gen2_tx_init(struct iwl_trans
*trans
, int txq_id
, int queue_size
)
1442 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1443 struct iwl_txq
*queue
;
1446 /* alloc and init the tx queue */
1447 if (!trans_pcie
->txq
[txq_id
]) {
1448 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
1450 IWL_ERR(trans
, "Not enough memory for tx queue\n");
1453 trans_pcie
->txq
[txq_id
] = queue
;
1454 ret
= iwl_pcie_txq_alloc(trans
, queue
, queue_size
, true);
1456 IWL_ERR(trans
, "Tx %d queue init failed\n", txq_id
);
1460 queue
= trans_pcie
->txq
[txq_id
];
1463 ret
= iwl_pcie_txq_init(trans
, queue
, queue_size
,
1464 (txq_id
== trans_pcie
->cmd_queue
));
1466 IWL_ERR(trans
, "Tx %d queue alloc failed\n", txq_id
);
1469 trans_pcie
->txq
[txq_id
]->id
= txq_id
;
1470 set_bit(txq_id
, trans_pcie
->queue_used
);
1475 iwl_pcie_gen2_tx_free(trans
);