WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / wireless / intel / iwlwifi / queue / tx.c
blob27eea909e32da6df2a75a0fa7c9a2da037b075e3
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2020 Intel Corporation
4 */
5 #include <net/tso.h>
6 #include <linux/tcp.h>
8 #include "iwl-debug.h"
9 #include "iwl-io.h"
10 #include "fw/api/tx.h"
11 #include "queue/tx.h"
12 #include "iwl-fh.h"
13 #include "iwl-scd.h"
14 #include <linux/dmapool.h>
17 * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
19 void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
21 int txq_id;
24 * This function can be called before the op_mode disabled the
25 * queues. This happens when we have an rfkill interrupt.
26 * Since we stop Tx altogether - mark the queues as stopped.
28 memset(trans->txqs.queue_stopped, 0,
29 sizeof(trans->txqs.queue_stopped));
30 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
32 /* Unmap DMA from host system and free skb's */
33 for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
34 if (!trans->txqs.txq[txq_id])
35 continue;
36 iwl_txq_gen2_unmap(trans, txq_id);
41 * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
43 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
44 struct iwl_txq *txq, u16 byte_cnt,
45 int num_tbs)
47 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
48 u8 filled_tfd_size, num_fetch_chunks;
49 u16 len = byte_cnt;
50 __le16 bc_ent;
52 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
53 return;
55 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
56 num_tbs * sizeof(struct iwl_tfh_tb);
58 * filled_tfd_size contains the number of filled bytes in the TFD.
59 * Dividing it by 64 will give the number of chunks to fetch
60 * to SRAM- 0 for one chunk, 1 for 2 and so on.
61 * If, for example, TFD contains only 3 TBs then 32 bytes
62 * of the TFD are used, and only one chunk of 64 bytes should
63 * be fetched
65 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
67 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
68 struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
70 /* Starting from AX210, the HW expects bytes */
71 WARN_ON(trans->txqs.bc_table_dword);
72 WARN_ON(len > 0x3FFF);
73 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
74 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
75 } else {
76 struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
78 /* Before AX210, the HW expects DW */
79 WARN_ON(!trans->txqs.bc_table_dword);
80 len = DIV_ROUND_UP(len, 4);
81 WARN_ON(len > 0xFFF);
82 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
83 scd_bc_tbl->tfd_offset[idx] = bc_ent;
88 * iwl_txq_inc_wr_ptr - Send new write index to hardware
90 void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
92 lockdep_assert_held(&txq->lock);
94 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
97 * if not in power-save mode, uCode will never sleep when we're
98 * trying to tx (during RFKILL, we're not trying to tx).
100 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
103 static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
104 struct iwl_tfh_tfd *tfd)
106 return le16_to_cpu(tfd->num_tbs) & 0x1f;
109 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
110 struct iwl_tfh_tfd *tfd)
112 int i, num_tbs;
114 /* Sanity check on number of chunks */
115 num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
117 if (num_tbs > trans->txqs.tfd.max_tbs) {
118 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
119 return;
122 /* first TB is never freed - it's the bidirectional DMA data */
123 for (i = 1; i < num_tbs; i++) {
124 if (meta->tbs & BIT(i))
125 dma_unmap_page(trans->dev,
126 le64_to_cpu(tfd->tbs[i].addr),
127 le16_to_cpu(tfd->tbs[i].tb_len),
128 DMA_TO_DEVICE);
129 else
130 dma_unmap_single(trans->dev,
131 le64_to_cpu(tfd->tbs[i].addr),
132 le16_to_cpu(tfd->tbs[i].tb_len),
133 DMA_TO_DEVICE);
136 tfd->num_tbs = 0;
139 void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
141 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
142 * idx is bounded by n_window
144 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
146 lockdep_assert_held(&txq->lock);
148 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
149 iwl_txq_get_tfd(trans, txq, idx));
151 /* free SKB */
152 if (txq->entries) {
153 struct sk_buff *skb;
155 skb = txq->entries[idx].skb;
157 /* Can be called from irqs-disabled context
158 * If skb is not NULL, it means that the whole queue is being
159 * freed and that the queue is not empty - free the skb
161 if (skb) {
162 iwl_op_mode_free_skb(trans->op_mode, skb);
163 txq->entries[idx].skb = NULL;
168 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
169 dma_addr_t addr, u16 len)
171 int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
172 struct iwl_tfh_tb *tb;
175 * Only WARN here so we know about the issue, but we mess up our
176 * unmap path because not every place currently checks for errors
177 * returned from this function - it can only return an error if
178 * there's no more space, and so when we know there is enough we
179 * don't always check ...
181 WARN(iwl_txq_crosses_4g_boundary(addr, len),
182 "possible DMA problem with iova:0x%llx, len:%d\n",
183 (unsigned long long)addr, len);
185 if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
186 return -EINVAL;
187 tb = &tfd->tbs[idx];
189 /* Each TFD can point to a maximum max_tbs Tx buffers */
190 if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
191 IWL_ERR(trans, "Error can not send more than %d chunks\n",
192 trans->txqs.tfd.max_tbs);
193 return -EINVAL;
196 put_unaligned_le64(addr, &tb->addr);
197 tb->tb_len = cpu_to_le16(len);
199 tfd->num_tbs = cpu_to_le16(idx + 1);
201 return idx;
204 static struct page *get_workaround_page(struct iwl_trans *trans,
205 struct sk_buff *skb)
207 struct page **page_ptr;
208 struct page *ret;
210 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
212 ret = alloc_page(GFP_ATOMIC);
213 if (!ret)
214 return NULL;
216 /* set the chaining pointer to the previous page if there */
217 *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
218 *page_ptr = ret;
220 return ret;
224 * Add a TB and if needed apply the FH HW bug workaround;
225 * meta != NULL indicates that it's a page mapping and we
226 * need to dma_unmap_page() and set the meta->tbs bit in
227 * this case.
229 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
230 struct sk_buff *skb,
231 struct iwl_tfh_tfd *tfd,
232 dma_addr_t phys, void *virt,
233 u16 len, struct iwl_cmd_meta *meta)
235 dma_addr_t oldphys = phys;
236 struct page *page;
237 int ret;
239 if (unlikely(dma_mapping_error(trans->dev, phys)))
240 return -ENOMEM;
242 if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
243 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
245 if (ret < 0)
246 goto unmap;
248 if (meta)
249 meta->tbs |= BIT(ret);
251 ret = 0;
252 goto trace;
256 * Work around a hardware bug. If (as expressed in the
257 * condition above) the TB ends on a 32-bit boundary,
258 * then the next TB may be accessed with the wrong
259 * address.
260 * To work around it, copy the data elsewhere and make
261 * a new mapping for it so the device will not fail.
264 if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
265 ret = -ENOBUFS;
266 goto unmap;
269 page = get_workaround_page(trans, skb);
270 if (!page) {
271 ret = -ENOMEM;
272 goto unmap;
275 memcpy(page_address(page), virt, len);
277 phys = dma_map_single(trans->dev, page_address(page), len,
278 DMA_TO_DEVICE);
279 if (unlikely(dma_mapping_error(trans->dev, phys)))
280 return -ENOMEM;
281 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
282 if (ret < 0) {
283 /* unmap the new allocation as single */
284 oldphys = phys;
285 meta = NULL;
286 goto unmap;
288 IWL_WARN(trans,
289 "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
290 len, (unsigned long long)oldphys, (unsigned long long)phys);
292 ret = 0;
293 unmap:
294 if (meta)
295 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
296 else
297 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
298 trace:
299 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
301 return ret;
304 #ifdef CONFIG_INET
305 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
306 struct sk_buff *skb)
308 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
309 struct page **page_ptr;
311 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
313 if (WARN_ON(*page_ptr))
314 return NULL;
316 if (!p->page)
317 goto alloc;
320 * Check if there's enough room on this page
322 * Note that we put a page chaining pointer *last* in the
323 * page - we need it somewhere, and if it's there then we
324 * avoid DMA mapping the last bits of the page which may
325 * trigger the 32-bit boundary hardware bug.
327 * (see also get_workaround_page() in tx-gen2.c)
329 if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
330 sizeof(void *))
331 goto out;
333 /* We don't have enough room on this page, get a new one. */
334 __free_page(p->page);
336 alloc:
337 p->page = alloc_page(GFP_ATOMIC);
338 if (!p->page)
339 return NULL;
340 p->pos = page_address(p->page);
341 /* set the chaining pointer to NULL */
342 *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
343 out:
344 *page_ptr = p->page;
345 get_page(p->page);
346 return p;
348 #endif
350 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
351 struct sk_buff *skb,
352 struct iwl_tfh_tfd *tfd, int start_len,
353 u8 hdr_len,
354 struct iwl_device_tx_cmd *dev_cmd)
356 #ifdef CONFIG_INET
357 struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
358 struct ieee80211_hdr *hdr = (void *)skb->data;
359 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
360 unsigned int mss = skb_shinfo(skb)->gso_size;
361 u16 length, amsdu_pad;
362 u8 *start_hdr;
363 struct iwl_tso_hdr_page *hdr_page;
364 struct tso_t tso;
366 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
367 &dev_cmd->hdr, start_len, 0);
369 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
370 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
371 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
372 amsdu_pad = 0;
374 /* total amount of header we may need for this A-MSDU */
375 hdr_room = DIV_ROUND_UP(total_len, mss) *
376 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
378 /* Our device supports 9 segments at most, it will fit in 1 page */
379 hdr_page = get_page_hdr(trans, hdr_room, skb);
380 if (!hdr_page)
381 return -ENOMEM;
383 start_hdr = hdr_page->pos;
386 * Pull the ieee80211 header to be able to use TSO core,
387 * we will restore it for the tx_status flow.
389 skb_pull(skb, hdr_len);
392 * Remove the length of all the headers that we don't actually
393 * have in the MPDU by themselves, but that we duplicate into
394 * all the different MSDUs inside the A-MSDU.
396 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
398 tso_start(skb, &tso);
400 while (total_len) {
401 /* this is the data left for this subframe */
402 unsigned int data_left = min_t(unsigned int, mss, total_len);
403 struct sk_buff *csum_skb = NULL;
404 unsigned int tb_len;
405 dma_addr_t tb_phys;
406 u8 *subf_hdrs_start = hdr_page->pos;
408 total_len -= data_left;
410 memset(hdr_page->pos, 0, amsdu_pad);
411 hdr_page->pos += amsdu_pad;
412 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
413 data_left)) & 0x3;
414 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
415 hdr_page->pos += ETH_ALEN;
416 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
417 hdr_page->pos += ETH_ALEN;
419 length = snap_ip_tcp_hdrlen + data_left;
420 *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
421 hdr_page->pos += sizeof(length);
424 * This will copy the SNAP as well which will be considered
425 * as MAC header.
427 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
429 hdr_page->pos += snap_ip_tcp_hdrlen;
431 tb_len = hdr_page->pos - start_hdr;
432 tb_phys = dma_map_single(trans->dev, start_hdr,
433 tb_len, DMA_TO_DEVICE);
434 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
435 dev_kfree_skb(csum_skb);
436 goto out_err;
439 * No need for _with_wa, this is from the TSO page and
440 * we leave some space at the end of it so can't hit
441 * the buggy scenario.
443 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
444 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
445 tb_phys, tb_len);
446 /* add this subframe's headers' length to the tx_cmd */
447 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
449 /* prepare the start_hdr for the next subframe */
450 start_hdr = hdr_page->pos;
452 /* put the payload */
453 while (data_left) {
454 int ret;
456 tb_len = min_t(unsigned int, tso.size, data_left);
457 tb_phys = dma_map_single(trans->dev, tso.data,
458 tb_len, DMA_TO_DEVICE);
459 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
460 tb_phys, tso.data,
461 tb_len, NULL);
462 if (ret) {
463 dev_kfree_skb(csum_skb);
464 goto out_err;
467 data_left -= tb_len;
468 tso_build_data(skb, &tso, tb_len);
472 /* re -add the WiFi header */
473 skb_push(skb, hdr_len);
475 return 0;
477 out_err:
478 #endif
479 return -EINVAL;
482 static struct
483 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
484 struct iwl_txq *txq,
485 struct iwl_device_tx_cmd *dev_cmd,
486 struct sk_buff *skb,
487 struct iwl_cmd_meta *out_meta,
488 int hdr_len,
489 int tx_cmd_len)
491 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
492 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
493 dma_addr_t tb_phys;
494 int len;
495 void *tb1_addr;
497 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
500 * No need for _with_wa, the first TB allocation is aligned up
501 * to a 64-byte boundary and thus can't be at the end or cross
502 * a page boundary (much less a 2^32 boundary).
504 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
507 * The second TB (tb1) points to the remainder of the TX command
508 * and the 802.11 header - dword aligned size
509 * (This calculation modifies the TX command, so do it before the
510 * setup of the first TB)
512 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
513 IWL_FIRST_TB_SIZE;
515 /* do not align A-MSDU to dword as the subframe header aligns it */
517 /* map the data for TB1 */
518 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
519 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
520 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
521 goto out_err;
523 * No need for _with_wa(), we ensure (via alignment) that the data
524 * here can never cross or end at a page boundary.
526 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
528 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
529 hdr_len, dev_cmd))
530 goto out_err;
532 /* building the A-MSDU might have changed this data, memcpy it now */
533 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
534 return tfd;
536 out_err:
537 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
538 return NULL;
541 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
542 struct sk_buff *skb,
543 struct iwl_tfh_tfd *tfd,
544 struct iwl_cmd_meta *out_meta)
546 int i;
548 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
549 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
550 dma_addr_t tb_phys;
551 unsigned int fragsz = skb_frag_size(frag);
552 int ret;
554 if (!fragsz)
555 continue;
557 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
558 fragsz, DMA_TO_DEVICE);
559 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
560 skb_frag_address(frag),
561 fragsz, out_meta);
562 if (ret)
563 return ret;
566 return 0;
569 static struct
570 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
571 struct iwl_txq *txq,
572 struct iwl_device_tx_cmd *dev_cmd,
573 struct sk_buff *skb,
574 struct iwl_cmd_meta *out_meta,
575 int hdr_len,
576 int tx_cmd_len,
577 bool pad)
579 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
580 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
581 dma_addr_t tb_phys;
582 int len, tb1_len, tb2_len;
583 void *tb1_addr;
584 struct sk_buff *frag;
586 tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
588 /* The first TB points to bi-directional DMA data */
589 memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
592 * No need for _with_wa, the first TB allocation is aligned up
593 * to a 64-byte boundary and thus can't be at the end or cross
594 * a page boundary (much less a 2^32 boundary).
596 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
599 * The second TB (tb1) points to the remainder of the TX command
600 * and the 802.11 header - dword aligned size
601 * (This calculation modifies the TX command, so do it before the
602 * setup of the first TB)
604 len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
605 IWL_FIRST_TB_SIZE;
607 if (pad)
608 tb1_len = ALIGN(len, 4);
609 else
610 tb1_len = len;
612 /* map the data for TB1 */
613 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
614 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
615 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
616 goto out_err;
618 * No need for _with_wa(), we ensure (via alignment) that the data
619 * here can never cross or end at a page boundary.
621 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
622 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
623 IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
625 /* set up TFD's third entry to point to remainder of skb's head */
626 tb2_len = skb_headlen(skb) - hdr_len;
628 if (tb2_len > 0) {
629 int ret;
631 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
632 tb2_len, DMA_TO_DEVICE);
633 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
634 skb->data + hdr_len, tb2_len,
635 NULL);
636 if (ret)
637 goto out_err;
640 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
641 goto out_err;
643 skb_walk_frags(skb, frag) {
644 int ret;
646 tb_phys = dma_map_single(trans->dev, frag->data,
647 skb_headlen(frag), DMA_TO_DEVICE);
648 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
649 frag->data,
650 skb_headlen(frag), NULL);
651 if (ret)
652 goto out_err;
653 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
654 goto out_err;
657 return tfd;
659 out_err:
660 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
661 return NULL;
664 static
665 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
666 struct iwl_txq *txq,
667 struct iwl_device_tx_cmd *dev_cmd,
668 struct sk_buff *skb,
669 struct iwl_cmd_meta *out_meta)
671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
672 int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
673 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
674 int len, hdr_len;
675 bool amsdu;
677 /* There must be data left over for TB1 or this code must be changed */
678 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
680 memset(tfd, 0, sizeof(*tfd));
682 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
683 len = sizeof(struct iwl_tx_cmd_gen2);
684 else
685 len = sizeof(struct iwl_tx_cmd_gen3);
687 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
688 (*ieee80211_get_qos_ctl(hdr) &
689 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
691 hdr_len = ieee80211_hdrlen(hdr->frame_control);
694 * Only build A-MSDUs here if doing so by GSO, otherwise it may be
695 * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
696 * built in the higher layers already.
698 if (amsdu && skb_shinfo(skb)->gso_size)
699 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
700 out_meta, hdr_len, len);
701 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
702 hdr_len, len, !amsdu);
705 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
707 unsigned int max;
708 unsigned int used;
711 * To avoid ambiguity between empty and completely full queues, there
712 * should always be less than max_tfd_queue_size elements in the queue.
713 * If q->n_window is smaller than max_tfd_queue_size, there is no need
714 * to reserve any queue entries for this purpose.
716 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
717 max = q->n_window;
718 else
719 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
722 * max_tfd_queue_size is a power of 2, so the following is equivalent to
723 * modulo by max_tfd_queue_size and is well defined.
725 used = (q->write_ptr - q->read_ptr) &
726 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
728 if (WARN_ON(used > max))
729 return 0;
731 return max - used;
734 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
735 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
737 struct iwl_cmd_meta *out_meta;
738 struct iwl_txq *txq = trans->txqs.txq[txq_id];
739 u16 cmd_len;
740 int idx;
741 void *tfd;
743 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
744 "queue %d out of range", txq_id))
745 return -EINVAL;
747 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
748 "TX on unused queue %d\n", txq_id))
749 return -EINVAL;
751 if (skb_is_nonlinear(skb) &&
752 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
753 __skb_linearize(skb))
754 return -ENOMEM;
756 spin_lock(&txq->lock);
758 if (iwl_txq_space(trans, txq) < txq->high_mark) {
759 iwl_txq_stop(trans, txq);
761 /* don't put the packet on the ring, if there is no room */
762 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
763 struct iwl_device_tx_cmd **dev_cmd_ptr;
765 dev_cmd_ptr = (void *)((u8 *)skb->cb +
766 trans->txqs.dev_cmd_offs);
768 *dev_cmd_ptr = dev_cmd;
769 __skb_queue_tail(&txq->overflow_q, skb);
770 spin_unlock(&txq->lock);
771 return 0;
775 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
777 /* Set up driver data for this TFD */
778 txq->entries[idx].skb = skb;
779 txq->entries[idx].cmd = dev_cmd;
781 dev_cmd->hdr.sequence =
782 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
783 INDEX_TO_SEQ(idx)));
785 /* Set up first empty entry in queue's array of Tx/cmd buffers */
786 out_meta = &txq->entries[idx].meta;
787 out_meta->flags = 0;
789 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
790 if (!tfd) {
791 spin_unlock(&txq->lock);
792 return -1;
795 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
796 struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
797 (void *)dev_cmd->payload;
799 cmd_len = le16_to_cpu(tx_cmd_gen3->len);
800 } else {
801 struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
802 (void *)dev_cmd->payload;
804 cmd_len = le16_to_cpu(tx_cmd_gen2->len);
807 /* Set up entry for this TFD in Tx byte-count array */
808 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
809 iwl_txq_gen2_get_num_tbs(trans, tfd));
811 /* start timer if queue currently empty */
812 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
813 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
815 /* Tell device the write index *just past* this latest filled TFD */
816 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
817 iwl_txq_inc_wr_ptr(trans, txq);
819 * At this point the frame is "transmitted" successfully
820 * and we will get a TX status notification eventually.
822 spin_unlock(&txq->lock);
823 return 0;
826 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
829 * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
831 void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
833 struct iwl_txq *txq = trans->txqs.txq[txq_id];
835 spin_lock_bh(&txq->lock);
836 while (txq->write_ptr != txq->read_ptr) {
837 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
838 txq_id, txq->read_ptr);
840 if (txq_id != trans->txqs.cmd.q_id) {
841 int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
842 struct sk_buff *skb = txq->entries[idx].skb;
844 if (WARN_ON_ONCE(!skb))
845 continue;
847 iwl_txq_free_tso_page(trans, skb);
849 iwl_txq_gen2_free_tfd(trans, txq);
850 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
853 while (!skb_queue_empty(&txq->overflow_q)) {
854 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
856 iwl_op_mode_free_skb(trans->op_mode, skb);
859 spin_unlock_bh(&txq->lock);
861 /* just in case - this queue may have been stopped */
862 iwl_wake_queue(trans, txq);
865 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
866 struct iwl_txq *txq)
868 struct device *dev = trans->dev;
870 /* De-alloc circular buffer of TFDs */
871 if (txq->tfds) {
872 dma_free_coherent(dev,
873 trans->txqs.tfd.size * txq->n_window,
874 txq->tfds, txq->dma_addr);
875 dma_free_coherent(dev,
876 sizeof(*txq->first_tb_bufs) * txq->n_window,
877 txq->first_tb_bufs, txq->first_tb_dma);
880 kfree(txq->entries);
881 if (txq->bc_tbl.addr)
882 dma_pool_free(trans->txqs.bc_pool,
883 txq->bc_tbl.addr, txq->bc_tbl.dma);
884 kfree(txq);
888 * iwl_pcie_txq_free - Deallocate DMA queue.
889 * @txq: Transmit queue to deallocate.
891 * Empty queue by removing and destroying all BD's.
892 * Free all buffers.
893 * 0-fill, but do not free "txq" descriptor structure.
895 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
897 struct iwl_txq *txq;
898 int i;
900 if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
901 "queue %d out of range", txq_id))
902 return;
904 txq = trans->txqs.txq[txq_id];
906 if (WARN_ON(!txq))
907 return;
909 iwl_txq_gen2_unmap(trans, txq_id);
911 /* De-alloc array of command/tx buffers */
912 if (txq_id == trans->txqs.cmd.q_id)
913 for (i = 0; i < txq->n_window; i++) {
914 kfree_sensitive(txq->entries[i].cmd);
915 kfree_sensitive(txq->entries[i].free_buf);
917 del_timer_sync(&txq->stuck_timer);
919 iwl_txq_gen2_free_memory(trans, txq);
921 trans->txqs.txq[txq_id] = NULL;
923 clear_bit(txq_id, trans->txqs.queue_used);
927 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
929 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
931 q->n_window = slots_num;
933 /* slots_num must be power-of-two size, otherwise
934 * iwl_txq_get_cmd_index is broken. */
935 if (WARN_ON(!is_power_of_2(slots_num)))
936 return -EINVAL;
938 q->low_mark = q->n_window / 4;
939 if (q->low_mark < 4)
940 q->low_mark = 4;
942 q->high_mark = q->n_window / 8;
943 if (q->high_mark < 2)
944 q->high_mark = 2;
946 q->write_ptr = 0;
947 q->read_ptr = 0;
949 return 0;
952 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
953 bool cmd_queue)
955 int ret;
956 u32 tfd_queue_max_size =
957 trans->trans_cfg->base_params->max_tfd_queue_size;
959 txq->need_update = false;
961 /* max_tfd_queue_size must be power-of-two size, otherwise
962 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
963 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
964 "Max tfd queue size must be a power of two, but is %d",
965 tfd_queue_max_size))
966 return -EINVAL;
968 /* Initialize queue's high/low-water marks, and head/tail indexes */
969 ret = iwl_queue_init(txq, slots_num);
970 if (ret)
971 return ret;
973 spin_lock_init(&txq->lock);
975 if (cmd_queue) {
976 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
978 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
981 __skb_queue_head_init(&txq->overflow_q);
983 return 0;
986 void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
988 struct page **page_ptr;
989 struct page *next;
991 page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
992 next = *page_ptr;
993 *page_ptr = NULL;
995 while (next) {
996 struct page *tmp = next;
998 next = *(void **)(page_address(next) + PAGE_SIZE -
999 sizeof(void *));
1000 __free_page(tmp);
1004 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
1006 u32 txq_id = txq->id;
1007 u32 status;
1008 bool active;
1009 u8 fifo;
1011 if (trans->trans_cfg->use_tfh) {
1012 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
1013 txq->read_ptr, txq->write_ptr);
1014 /* TODO: access new SCD registers and dump them */
1015 return;
1018 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
1019 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1020 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1022 IWL_ERR(trans,
1023 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
1024 txq_id, active ? "" : "in", fifo,
1025 jiffies_to_msecs(txq->wd_timeout),
1026 txq->read_ptr, txq->write_ptr,
1027 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
1028 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1029 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
1030 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
1031 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
1034 static void iwl_txq_stuck_timer(struct timer_list *t)
1036 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
1037 struct iwl_trans *trans = txq->trans;
1039 spin_lock(&txq->lock);
1040 /* check if triggered erroneously */
1041 if (txq->read_ptr == txq->write_ptr) {
1042 spin_unlock(&txq->lock);
1043 return;
1045 spin_unlock(&txq->lock);
1047 iwl_txq_log_scd_error(trans, txq);
1049 iwl_force_nmi(trans);
1052 int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
1053 bool cmd_queue)
1055 size_t tfd_sz = trans->txqs.tfd.size *
1056 trans->trans_cfg->base_params->max_tfd_queue_size;
1057 size_t tb0_buf_sz;
1058 int i;
1060 if (WARN_ON(txq->entries || txq->tfds))
1061 return -EINVAL;
1063 if (trans->trans_cfg->use_tfh)
1064 tfd_sz = trans->txqs.tfd.size * slots_num;
1066 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
1067 txq->trans = trans;
1069 txq->n_window = slots_num;
1071 txq->entries = kcalloc(slots_num,
1072 sizeof(struct iwl_pcie_txq_entry),
1073 GFP_KERNEL);
1075 if (!txq->entries)
1076 goto error;
1078 if (cmd_queue)
1079 for (i = 0; i < slots_num; i++) {
1080 txq->entries[i].cmd =
1081 kmalloc(sizeof(struct iwl_device_cmd),
1082 GFP_KERNEL);
1083 if (!txq->entries[i].cmd)
1084 goto error;
1087 /* Circular buffer of transmit frame descriptors (TFDs),
1088 * shared with device */
1089 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
1090 &txq->dma_addr, GFP_KERNEL);
1091 if (!txq->tfds)
1092 goto error;
1094 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
1096 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
1098 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
1099 &txq->first_tb_dma,
1100 GFP_KERNEL);
1101 if (!txq->first_tb_bufs)
1102 goto err_free_tfds;
1104 return 0;
1105 err_free_tfds:
1106 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
1107 error:
1108 if (txq->entries && cmd_queue)
1109 for (i = 0; i < slots_num; i++)
1110 kfree(txq->entries[i].cmd);
1111 kfree(txq->entries);
1112 txq->entries = NULL;
1114 return -ENOMEM;
1117 static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
1118 struct iwl_txq **intxq, int size,
1119 unsigned int timeout)
1121 size_t bc_tbl_size, bc_tbl_entries;
1122 struct iwl_txq *txq;
1123 int ret;
1125 WARN_ON(!trans->txqs.bc_tbl_size);
1127 bc_tbl_size = trans->txqs.bc_tbl_size;
1128 bc_tbl_entries = bc_tbl_size / sizeof(u16);
1130 if (WARN_ON(size > bc_tbl_entries))
1131 return -EINVAL;
1133 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
1134 if (!txq)
1135 return -ENOMEM;
1137 txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
1138 &txq->bc_tbl.dma);
1139 if (!txq->bc_tbl.addr) {
1140 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
1141 kfree(txq);
1142 return -ENOMEM;
1145 ret = iwl_txq_alloc(trans, txq, size, false);
1146 if (ret) {
1147 IWL_ERR(trans, "Tx queue alloc failed\n");
1148 goto error;
1150 ret = iwl_txq_init(trans, txq, size, false);
1151 if (ret) {
1152 IWL_ERR(trans, "Tx queue init failed\n");
1153 goto error;
1156 txq->wd_timeout = msecs_to_jiffies(timeout);
1158 *intxq = txq;
1159 return 0;
1161 error:
1162 iwl_txq_gen2_free_memory(trans, txq);
1163 return ret;
1166 static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
1167 struct iwl_host_cmd *hcmd)
1169 struct iwl_tx_queue_cfg_rsp *rsp;
1170 int ret, qid;
1171 u32 wr_ptr;
1173 if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
1174 sizeof(*rsp))) {
1175 ret = -EINVAL;
1176 goto error_free_resp;
1179 rsp = (void *)hcmd->resp_pkt->data;
1180 qid = le16_to_cpu(rsp->queue_number);
1181 wr_ptr = le16_to_cpu(rsp->write_pointer);
1183 if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
1184 WARN_ONCE(1, "queue index %d unsupported", qid);
1185 ret = -EIO;
1186 goto error_free_resp;
1189 if (test_and_set_bit(qid, trans->txqs.queue_used)) {
1190 WARN_ONCE(1, "queue %d already used", qid);
1191 ret = -EIO;
1192 goto error_free_resp;
1195 txq->id = qid;
1196 trans->txqs.txq[qid] = txq;
1197 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1199 /* Place first TFD at index corresponding to start sequence number */
1200 txq->read_ptr = wr_ptr;
1201 txq->write_ptr = wr_ptr;
1203 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1205 iwl_free_resp(hcmd);
1206 return qid;
1208 error_free_resp:
1209 iwl_free_resp(hcmd);
1210 iwl_txq_gen2_free_memory(trans, txq);
1211 return ret;
1214 int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
1215 int cmd_id, int size, unsigned int timeout)
1217 struct iwl_txq *txq = NULL;
1218 struct iwl_tx_queue_cfg_cmd cmd = {
1219 .flags = flags,
1220 .sta_id = sta_id,
1221 .tid = tid,
1223 struct iwl_host_cmd hcmd = {
1224 .id = cmd_id,
1225 .len = { sizeof(cmd) },
1226 .data = { &cmd, },
1227 .flags = CMD_WANT_SKB,
1229 int ret;
1231 ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
1232 if (ret)
1233 return ret;
1235 cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
1236 cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1237 cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1239 ret = iwl_trans_send_cmd(trans, &hcmd);
1240 if (ret)
1241 goto error;
1243 return iwl_txq_alloc_response(trans, txq, &hcmd);
1245 error:
1246 iwl_txq_gen2_free_memory(trans, txq);
1247 return ret;
1250 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1252 if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1253 "queue %d out of range", queue))
1254 return;
1257 * Upon HW Rfkill - we stop the device, and then stop the queues
1258 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1259 * allow the op_mode to call txq_disable after it already called
1260 * stop_device.
1262 if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
1263 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1264 "queue %d not used", queue);
1265 return;
1268 iwl_txq_gen2_free(trans, queue);
1270 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1273 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1275 int i;
1277 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
1279 /* Free all TX queues */
1280 for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
1281 if (!trans->txqs.txq[i])
1282 continue;
1284 iwl_txq_gen2_free(trans, i);
1288 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1290 struct iwl_txq *queue;
1291 int ret;
1293 /* alloc and init the tx queue */
1294 if (!trans->txqs.txq[txq_id]) {
1295 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1296 if (!queue) {
1297 IWL_ERR(trans, "Not enough memory for tx queue\n");
1298 return -ENOMEM;
1300 trans->txqs.txq[txq_id] = queue;
1301 ret = iwl_txq_alloc(trans, queue, queue_size, true);
1302 if (ret) {
1303 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1304 goto error;
1306 } else {
1307 queue = trans->txqs.txq[txq_id];
1310 ret = iwl_txq_init(trans, queue, queue_size,
1311 (txq_id == trans->txqs.cmd.q_id));
1312 if (ret) {
1313 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1314 goto error;
1316 trans->txqs.txq[txq_id]->id = txq_id;
1317 set_bit(txq_id, trans->txqs.queue_used);
1319 return 0;
1321 error:
1322 iwl_txq_gen2_tx_free(trans);
1323 return ret;
1326 static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
1327 void *_tfd, u8 idx)
1329 struct iwl_tfd *tfd;
1330 struct iwl_tfd_tb *tb;
1331 dma_addr_t addr;
1332 dma_addr_t hi_len;
1334 if (trans->trans_cfg->use_tfh) {
1335 struct iwl_tfh_tfd *tfd = _tfd;
1336 struct iwl_tfh_tb *tb = &tfd->tbs[idx];
1338 return (dma_addr_t)(le64_to_cpu(tb->addr));
1341 tfd = _tfd;
1342 tb = &tfd->tbs[idx];
1343 addr = get_unaligned_le32(&tb->lo);
1345 if (sizeof(dma_addr_t) <= sizeof(u32))
1346 return addr;
1348 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
1351 * shift by 16 twice to avoid warnings on 32-bit
1352 * (where this code never runs anyway due to the
1353 * if statement above)
1355 return addr | ((hi_len << 16) << 16);
1358 void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
1359 struct iwl_cmd_meta *meta,
1360 struct iwl_txq *txq, int index)
1362 int i, num_tbs;
1363 void *tfd = iwl_txq_get_tfd(trans, txq, index);
1365 /* Sanity check on number of chunks */
1366 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
1368 if (num_tbs > trans->txqs.tfd.max_tbs) {
1369 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
1370 /* @todo issue fatal error, it is quite serious situation */
1371 return;
1374 /* first TB is never freed - it's the bidirectional DMA data */
1376 for (i = 1; i < num_tbs; i++) {
1377 if (meta->tbs & BIT(i))
1378 dma_unmap_page(trans->dev,
1379 iwl_txq_gen1_tfd_tb_get_addr(trans,
1380 tfd, i),
1381 iwl_txq_gen1_tfd_tb_get_len(trans,
1382 tfd, i),
1383 DMA_TO_DEVICE);
1384 else
1385 dma_unmap_single(trans->dev,
1386 iwl_txq_gen1_tfd_tb_get_addr(trans,
1387 tfd, i),
1388 iwl_txq_gen1_tfd_tb_get_len(trans,
1389 tfd, i),
1390 DMA_TO_DEVICE);
1393 meta->tbs = 0;
1395 if (trans->trans_cfg->use_tfh) {
1396 struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
1398 tfd_fh->num_tbs = 0;
1399 } else {
1400 struct iwl_tfd *tfd_fh = (void *)tfd;
1402 tfd_fh->num_tbs = 0;
1406 #define IWL_TX_CRC_SIZE 4
1407 #define IWL_TX_DELIMITER_SIZE 4
1410 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1412 void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
1413 struct iwl_txq *txq, u16 byte_cnt,
1414 int num_tbs)
1416 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
1417 int write_ptr = txq->write_ptr;
1418 int txq_id = txq->id;
1419 u8 sec_ctl = 0;
1420 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1421 __le16 bc_ent;
1422 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
1423 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1424 u8 sta_id = tx_cmd->sta_id;
1426 scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1428 sec_ctl = tx_cmd->sec_ctl;
1430 switch (sec_ctl & TX_CMD_SEC_MSK) {
1431 case TX_CMD_SEC_CCM:
1432 len += IEEE80211_CCMP_MIC_LEN;
1433 break;
1434 case TX_CMD_SEC_TKIP:
1435 len += IEEE80211_TKIP_ICV_LEN;
1436 break;
1437 case TX_CMD_SEC_WEP:
1438 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
1439 break;
1441 if (trans->txqs.bc_table_dword)
1442 len = DIV_ROUND_UP(len, 4);
1444 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
1445 return;
1447 bc_ent = cpu_to_le16(len | (sta_id << 12));
1449 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1451 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1452 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
1453 bc_ent;
1456 void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
1457 struct iwl_txq *txq)
1459 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
1460 int txq_id = txq->id;
1461 int read_ptr = txq->read_ptr;
1462 u8 sta_id = 0;
1463 __le16 bc_ent;
1464 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
1465 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1467 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
1469 if (txq_id != trans->txqs.cmd.q_id)
1470 sta_id = tx_cmd->sta_id;
1472 bc_ent = cpu_to_le16(1 | (sta_id << 12));
1474 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
1476 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
1477 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
1478 bc_ent;
1482 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
1483 * @trans - transport private data
1484 * @txq - tx queue
1485 * @dma_dir - the direction of the DMA mapping
1487 * Does NOT advance any TFD circular buffer read/write indexes
1488 * Does NOT free the TFD itself (which is within circular buffer)
1490 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
1492 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
1493 * idx is bounded by n_window
1495 int rd_ptr = txq->read_ptr;
1496 int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
1498 lockdep_assert_held(&txq->lock);
1500 /* We have only q->n_window txq->entries, but we use
1501 * TFD_QUEUE_SIZE_MAX tfds
1503 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
1505 /* free SKB */
1506 if (txq->entries) {
1507 struct sk_buff *skb;
1509 skb = txq->entries[idx].skb;
1511 /* Can be called from irqs-disabled context
1512 * If skb is not NULL, it means that the whole queue is being
1513 * freed and that the queue is not empty - free the skb
1515 if (skb) {
1516 iwl_op_mode_free_skb(trans->op_mode, skb);
1517 txq->entries[idx].skb = NULL;
1522 void iwl_txq_progress(struct iwl_txq *txq)
1524 lockdep_assert_held(&txq->lock);
1526 if (!txq->wd_timeout)
1527 return;
1530 * station is asleep and we send data - that must
1531 * be uAPSD or PS-Poll. Don't rearm the timer.
1533 if (txq->frozen)
1534 return;
1537 * if empty delete timer, otherwise move timer forward
1538 * since we're making progress on this queue
1540 if (txq->read_ptr == txq->write_ptr)
1541 del_timer(&txq->stuck_timer);
1542 else
1543 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1546 /* Frees buffers until index _not_ inclusive */
1547 void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1548 struct sk_buff_head *skbs)
1550 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1551 int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
1552 int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1553 int last_to_free;
1555 /* This function is not meant to release cmd queue*/
1556 if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1557 return;
1559 spin_lock_bh(&txq->lock);
1561 if (!test_bit(txq_id, trans->txqs.queue_used)) {
1562 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1563 txq_id, ssn);
1564 goto out;
1567 if (read_ptr == tfd_num)
1568 goto out;
1570 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1571 txq_id, txq->read_ptr, tfd_num, ssn);
1573 /*Since we free until index _not_ inclusive, the one before index is
1574 * the last we will free. This one must be used */
1575 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1577 if (!iwl_txq_used(txq, last_to_free)) {
1578 IWL_ERR(trans,
1579 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1580 __func__, txq_id, last_to_free,
1581 trans->trans_cfg->base_params->max_tfd_queue_size,
1582 txq->write_ptr, txq->read_ptr);
1583 goto out;
1586 if (WARN_ON(!skb_queue_empty(skbs)))
1587 goto out;
1589 for (;
1590 read_ptr != tfd_num;
1591 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
1592 read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1593 struct sk_buff *skb = txq->entries[read_ptr].skb;
1595 if (WARN_ON_ONCE(!skb))
1596 continue;
1598 iwl_txq_free_tso_page(trans, skb);
1600 __skb_queue_tail(skbs, skb);
1602 txq->entries[read_ptr].skb = NULL;
1604 if (!trans->trans_cfg->use_tfh)
1605 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1607 iwl_txq_free_tfd(trans, txq);
1610 iwl_txq_progress(txq);
1612 if (iwl_txq_space(trans, txq) > txq->low_mark &&
1613 test_bit(txq_id, trans->txqs.queue_stopped)) {
1614 struct sk_buff_head overflow_skbs;
1616 __skb_queue_head_init(&overflow_skbs);
1617 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1620 * We are going to transmit from the overflow queue.
1621 * Remember this state so that wait_for_txq_empty will know we
1622 * are adding more packets to the TFD queue. It cannot rely on
1623 * the state of &txq->overflow_q, as we just emptied it, but
1624 * haven't TXed the content yet.
1626 txq->overflow_tx = true;
1629 * This is tricky: we are in reclaim path which is non
1630 * re-entrant, so noone will try to take the access the
1631 * txq data from that path. We stopped tx, so we can't
1632 * have tx as well. Bottom line, we can unlock and re-lock
1633 * later.
1635 spin_unlock_bh(&txq->lock);
1637 while (!skb_queue_empty(&overflow_skbs)) {
1638 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1639 struct iwl_device_tx_cmd *dev_cmd_ptr;
1641 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1642 trans->txqs.dev_cmd_offs);
1645 * Note that we can very well be overflowing again.
1646 * In that case, iwl_txq_space will be small again
1647 * and we won't wake mac80211's queue.
1649 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1652 if (iwl_txq_space(trans, txq) > txq->low_mark)
1653 iwl_wake_queue(trans, txq);
1655 spin_lock_bh(&txq->lock);
1656 txq->overflow_tx = false;
1659 out:
1660 spin_unlock_bh(&txq->lock);
1663 /* Set wr_ptr of specific device and txq */
1664 void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
1666 struct iwl_txq *txq = trans->txqs.txq[txq_id];
1668 spin_lock_bh(&txq->lock);
1670 txq->write_ptr = ptr;
1671 txq->read_ptr = txq->write_ptr;
1673 spin_unlock_bh(&txq->lock);
1676 void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
1677 bool freeze)
1679 int queue;
1681 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1682 struct iwl_txq *txq = trans->txqs.txq[queue];
1683 unsigned long now;
1685 spin_lock_bh(&txq->lock);
1687 now = jiffies;
1689 if (txq->frozen == freeze)
1690 goto next_queue;
1692 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1693 freeze ? "Freezing" : "Waking", queue);
1695 txq->frozen = freeze;
1697 if (txq->read_ptr == txq->write_ptr)
1698 goto next_queue;
1700 if (freeze) {
1701 if (unlikely(time_after(now,
1702 txq->stuck_timer.expires))) {
1704 * The timer should have fired, maybe it is
1705 * spinning right now on the lock.
1707 goto next_queue;
1709 /* remember how long until the timer fires */
1710 txq->frozen_expiry_remainder =
1711 txq->stuck_timer.expires - now;
1712 del_timer(&txq->stuck_timer);
1713 goto next_queue;
1717 * Wake a non-empty queue -> arm timer with the
1718 * remainder before it froze
1720 mod_timer(&txq->stuck_timer,
1721 now + txq->frozen_expiry_remainder);
1723 next_queue:
1724 spin_unlock_bh(&txq->lock);