1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <linux/sched.h>
65 #include <linux/wait.h>
66 #include <linux/gfp.h>
71 #include "iwl-op-mode.h"
72 #include "iwl-context-info-gen3.h"
74 /******************************************************************************
78 ******************************************************************************/
81 * Rx theory of operation
83 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
84 * each of which point to Receive Buffers to be filled by the NIC. These get
85 * used not only for Rx frames, but for any command response or notification
86 * from the NIC. The driver and NIC manage the Rx buffers by means
87 * of indexes into the circular buffer.
90 * The host/firmware share two index registers for managing the Rx buffers.
92 * The READ index maps to the first position that the firmware may be writing
93 * to -- the driver can read up to (but not including) this position and get
95 * The READ index is managed by the firmware once the card is enabled.
97 * The WRITE index maps to the last position the driver has read from -- the
98 * position preceding WRITE is the last slot the firmware can place a packet.
100 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
103 * During initialization, the host sets up the READ queue position to the first
104 * INDEX position, and WRITE to the last (READ - 1 wrapped)
106 * When the firmware places a packet in a buffer, it will advance the READ index
107 * and fire the RX interrupt. The driver can then query the READ index and
108 * process as many packets as possible, moving the WRITE index forward as it
109 * resets the Rx queue buffers with new memory.
111 * The management in the driver is as follows:
112 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
113 * When the interrupt handler is called, the request is processed.
114 * The page is either stolen - transferred to the upper layer
115 * or reused - added immediately to the iwl->rxq->rx_free list.
116 * + When the page is stolen - the driver updates the matching queue's used
117 * count, detaches the RBD and transfers it to the queue used list.
118 * When there are two used RBDs - they are transferred to the allocator empty
119 * list. Work is then scheduled for the allocator to start allocating
121 * When there are another 6 used RBDs - they are transferred to the allocator
122 * empty list and the driver tries to claim the pre-allocated buffers and
123 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
125 * When there are 8+ buffers in the free list - either from allocation or from
126 * 8 reused unstolen pages - restock is called to update the FW and indexes.
127 * + In order to make sure the allocator always has RBDs to use for allocation
128 * the allocator has initial pool in the size of num_queues*(8-2) - the
129 * maximum missing RBDs per allocation request (request posted with 2
130 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
131 * The queues supplies the recycle of the rest of the RBDs.
132 * + A received packet is processed and handed to the kernel network stack,
133 * detached from the iwl->rxq. The driver 'processed' index is updated.
134 * + If there are no allocated buffers in iwl->rxq->rx_free,
135 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
136 * If there were enough free buffers and RX_STALLED is set it is cleared.
141 * iwl_rxq_alloc() Allocates rx_free
142 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
143 * iwl_pcie_rxq_restock.
144 * Used only during initialization.
145 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
146 * queue, updates firmware pointers, and updates
148 * iwl_pcie_rx_allocator() Background work for allocating pages.
150 * -- enable interrupts --
151 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
152 * READ INDEX, detaching the SKB from the pool.
153 * Moves the packet buffer from queue to rx_used.
154 * Posts and claims requests to the allocator.
155 * Calls iwl_pcie_rxq_restock to refill any empty
161 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
163 * Regular Receive interrupt:
165 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
166 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
168 * rxq.queue -> rxq.rx_free -> rxq.queue
174 * iwl_rxq_space - Return number of free slots available in queue.
176 static int iwl_rxq_space(const struct iwl_rxq
*rxq
)
178 /* Make sure rx queue size is a power of 2 */
179 WARN_ON(rxq
->queue_size
& (rxq
->queue_size
- 1));
182 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
183 * between empty and completely full queues.
184 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
185 * defined for negative dividends.
187 return (rxq
->read
- rxq
->write
- 1) & (rxq
->queue_size
- 1);
191 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
193 static inline __le32
iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr
)
195 return cpu_to_le32((u32
)(dma_addr
>> 8));
199 * iwl_pcie_rx_stop - stops the Rx DMA
201 int iwl_pcie_rx_stop(struct iwl_trans
*trans
)
203 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
204 /* TODO: remove this once fw does it */
205 iwl_write_umac_prph(trans
, RFH_RXF_DMA_CFG_GEN3
, 0);
206 return iwl_poll_umac_prph_bit(trans
, RFH_GEN_STATUS_GEN3
,
207 RXF_DMA_IDLE
, RXF_DMA_IDLE
, 1000);
208 } else if (trans
->trans_cfg
->mq_rx_supported
) {
209 iwl_write_prph(trans
, RFH_RXF_DMA_CFG
, 0);
210 return iwl_poll_prph_bit(trans
, RFH_GEN_STATUS
,
211 RXF_DMA_IDLE
, RXF_DMA_IDLE
, 1000);
213 iwl_write_direct32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
214 return iwl_poll_direct_bit(trans
, FH_MEM_RSSR_RX_STATUS_REG
,
215 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
,
221 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
223 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans
*trans
,
228 lockdep_assert_held(&rxq
->lock
);
231 * explicitly wake up the NIC if:
232 * 1. shadow registers aren't enabled
233 * 2. there is a chance that the NIC is asleep
235 if (!trans
->trans_cfg
->base_params
->shadow_reg_enable
&&
236 test_bit(STATUS_TPOWER_PMI
, &trans
->status
)) {
237 reg
= iwl_read32(trans
, CSR_UCODE_DRV_GP1
);
239 if (reg
& CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP
) {
240 IWL_DEBUG_INFO(trans
, "Rx queue requesting wakeup, GP1 = 0x%x\n",
242 iwl_set_bit(trans
, CSR_GP_CNTRL
,
243 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
244 rxq
->need_update
= true;
249 rxq
->write_actual
= round_down(rxq
->write
, 8);
250 if (trans
->trans_cfg
->mq_rx_supported
)
251 iwl_write32(trans
, RFH_Q_FRBDCB_WIDX_TRG(rxq
->id
),
254 iwl_write32(trans
, FH_RSCSR_CHNL0_WPTR
, rxq
->write_actual
);
257 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans
*trans
)
259 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
262 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
263 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
265 if (!rxq
->need_update
)
267 spin_lock(&rxq
->lock
);
268 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
269 rxq
->need_update
= false;
270 spin_unlock(&rxq
->lock
);
274 static void iwl_pcie_restock_bd(struct iwl_trans
*trans
,
276 struct iwl_rx_mem_buffer
*rxb
)
278 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
279 struct iwl_rx_transfer_desc
*bd
= rxq
->bd
;
281 BUILD_BUG_ON(sizeof(*bd
) != 2 * sizeof(u64
));
283 bd
[rxq
->write
].addr
= cpu_to_le64(rxb
->page_dma
);
284 bd
[rxq
->write
].rbid
= cpu_to_le16(rxb
->vid
);
286 __le64
*bd
= rxq
->bd
;
288 bd
[rxq
->write
] = cpu_to_le64(rxb
->page_dma
| rxb
->vid
);
291 IWL_DEBUG_RX(trans
, "Assigned virtual RB ID %u to queue %d index %d\n",
292 (u32
)rxb
->vid
, rxq
->id
, rxq
->write
);
296 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
298 static void iwl_pcie_rxmq_restock(struct iwl_trans
*trans
,
301 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
302 struct iwl_rx_mem_buffer
*rxb
;
305 * If the device isn't enabled - no need to try to add buffers...
306 * This can happen when we stop the device and still have an interrupt
307 * pending. We stop the APM before we sync the interrupts because we
308 * have to (see comment there). On the other hand, since the APM is
309 * stopped, we cannot access the HW (in particular not prph).
310 * So don't try to restock if the APM has been already stopped.
312 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
315 spin_lock(&rxq
->lock
);
316 while (rxq
->free_count
) {
317 /* Get next free Rx buffer, remove from free list */
318 rxb
= list_first_entry(&rxq
->rx_free
, struct iwl_rx_mem_buffer
,
320 list_del(&rxb
->list
);
321 rxb
->invalid
= false;
322 /* some low bits are expected to be unset (depending on hw) */
323 WARN_ON(rxb
->page_dma
& trans_pcie
->supported_dma_mask
);
324 /* Point to Rx buffer via next RBD in circular buffer */
325 iwl_pcie_restock_bd(trans
, rxq
, rxb
);
326 rxq
->write
= (rxq
->write
+ 1) & (rxq
->queue_size
- 1);
329 spin_unlock(&rxq
->lock
);
332 * If we've added more space for the firmware to place data, tell it.
333 * Increment device's write pointer in multiples of 8.
335 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
336 spin_lock(&rxq
->lock
);
337 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
338 spin_unlock(&rxq
->lock
);
343 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
345 static void iwl_pcie_rxsq_restock(struct iwl_trans
*trans
,
348 struct iwl_rx_mem_buffer
*rxb
;
351 * If the device isn't enabled - not need to try to add buffers...
352 * This can happen when we stop the device and still have an interrupt
353 * pending. We stop the APM before we sync the interrupts because we
354 * have to (see comment there). On the other hand, since the APM is
355 * stopped, we cannot access the HW (in particular not prph).
356 * So don't try to restock if the APM has been already stopped.
358 if (!test_bit(STATUS_DEVICE_ENABLED
, &trans
->status
))
361 spin_lock(&rxq
->lock
);
362 while ((iwl_rxq_space(rxq
) > 0) && (rxq
->free_count
)) {
363 __le32
*bd
= (__le32
*)rxq
->bd
;
364 /* The overwritten rxb must be a used one */
365 rxb
= rxq
->queue
[rxq
->write
];
366 BUG_ON(rxb
&& rxb
->page
);
368 /* Get next free Rx buffer, remove from free list */
369 rxb
= list_first_entry(&rxq
->rx_free
, struct iwl_rx_mem_buffer
,
371 list_del(&rxb
->list
);
372 rxb
->invalid
= false;
374 /* Point to Rx buffer via next RBD in circular buffer */
375 bd
[rxq
->write
] = iwl_pcie_dma_addr2rbd_ptr(rxb
->page_dma
);
376 rxq
->queue
[rxq
->write
] = rxb
;
377 rxq
->write
= (rxq
->write
+ 1) & RX_QUEUE_MASK
;
380 spin_unlock(&rxq
->lock
);
382 /* If we've added more space for the firmware to place data, tell it.
383 * Increment device's write pointer in multiples of 8. */
384 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
385 spin_lock(&rxq
->lock
);
386 iwl_pcie_rxq_inc_wr_ptr(trans
, rxq
);
387 spin_unlock(&rxq
->lock
);
392 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
394 * If there are slots in the RX queue that need to be restocked,
395 * and we have free pre-allocated buffers, fill the ranks as much
396 * as we can, pulling from rx_free.
398 * This moves the 'write' index forward to catch up with 'processed', and
399 * also updates the memory address in the firmware to reference the new
403 void iwl_pcie_rxq_restock(struct iwl_trans
*trans
, struct iwl_rxq
*rxq
)
405 if (trans
->trans_cfg
->mq_rx_supported
)
406 iwl_pcie_rxmq_restock(trans
, rxq
);
408 iwl_pcie_rxsq_restock(trans
, rxq
);
412 * iwl_pcie_rx_alloc_page - allocates and returns a page.
415 static struct page
*iwl_pcie_rx_alloc_page(struct iwl_trans
*trans
,
416 u32
*offset
, gfp_t priority
)
418 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
419 unsigned int rbsize
= iwl_trans_get_rb_size(trans_pcie
->rx_buf_size
);
420 unsigned int allocsize
= PAGE_SIZE
<< trans_pcie
->rx_page_order
;
422 gfp_t gfp_mask
= priority
;
424 if (trans_pcie
->rx_page_order
> 0)
425 gfp_mask
|= __GFP_COMP
;
427 if (trans_pcie
->alloc_page
) {
428 spin_lock_bh(&trans_pcie
->alloc_page_lock
);
430 if (trans_pcie
->alloc_page
) {
431 *offset
= trans_pcie
->alloc_page_used
;
432 page
= trans_pcie
->alloc_page
;
433 trans_pcie
->alloc_page_used
+= rbsize
;
434 if (trans_pcie
->alloc_page_used
>= allocsize
)
435 trans_pcie
->alloc_page
= NULL
;
438 spin_unlock_bh(&trans_pcie
->alloc_page_lock
);
441 spin_unlock_bh(&trans_pcie
->alloc_page_lock
);
444 /* Alloc a new receive buffer */
445 page
= alloc_pages(gfp_mask
, trans_pcie
->rx_page_order
);
448 IWL_DEBUG_INFO(trans
, "alloc_pages failed, order: %d\n",
449 trans_pcie
->rx_page_order
);
451 * Issue an error if we don't have enough pre-allocated
454 if (!(gfp_mask
& __GFP_NOWARN
) && net_ratelimit())
456 "Failed to alloc_pages\n");
460 if (2 * rbsize
<= allocsize
) {
461 spin_lock_bh(&trans_pcie
->alloc_page_lock
);
462 if (!trans_pcie
->alloc_page
) {
464 trans_pcie
->alloc_page
= page
;
465 trans_pcie
->alloc_page_used
= rbsize
;
467 spin_unlock_bh(&trans_pcie
->alloc_page_lock
);
475 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
477 * A used RBD is an Rx buffer that has been given to the stack. To use it again
478 * a page must be allocated and the RBD must point to the page. This function
479 * doesn't change the HW pointer but handles the list of pages that is used by
480 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
483 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans
*trans
, gfp_t priority
,
486 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
487 struct iwl_rx_mem_buffer
*rxb
;
493 spin_lock(&rxq
->lock
);
494 if (list_empty(&rxq
->rx_used
)) {
495 spin_unlock(&rxq
->lock
);
498 spin_unlock(&rxq
->lock
);
500 page
= iwl_pcie_rx_alloc_page(trans
, &offset
, priority
);
504 spin_lock(&rxq
->lock
);
506 if (list_empty(&rxq
->rx_used
)) {
507 spin_unlock(&rxq
->lock
);
508 __free_pages(page
, trans_pcie
->rx_page_order
);
511 rxb
= list_first_entry(&rxq
->rx_used
, struct iwl_rx_mem_buffer
,
513 list_del(&rxb
->list
);
514 spin_unlock(&rxq
->lock
);
518 rxb
->offset
= offset
;
519 /* Get physical address of the RB */
521 dma_map_page(trans
->dev
, page
, rxb
->offset
,
522 trans_pcie
->rx_buf_bytes
,
524 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
526 spin_lock(&rxq
->lock
);
527 list_add(&rxb
->list
, &rxq
->rx_used
);
528 spin_unlock(&rxq
->lock
);
529 __free_pages(page
, trans_pcie
->rx_page_order
);
533 spin_lock(&rxq
->lock
);
535 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
538 spin_unlock(&rxq
->lock
);
542 void iwl_pcie_free_rbs_pool(struct iwl_trans
*trans
)
544 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
547 for (i
= 0; i
< RX_POOL_SIZE(trans_pcie
->num_rx_bufs
); i
++) {
548 if (!trans_pcie
->rx_pool
[i
].page
)
550 dma_unmap_page(trans
->dev
, trans_pcie
->rx_pool
[i
].page_dma
,
551 trans_pcie
->rx_buf_bytes
, DMA_FROM_DEVICE
);
552 __free_pages(trans_pcie
->rx_pool
[i
].page
,
553 trans_pcie
->rx_page_order
);
554 trans_pcie
->rx_pool
[i
].page
= NULL
;
559 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
561 * Allocates for each received request 8 pages
562 * Called as a scheduled work item.
564 static void iwl_pcie_rx_allocator(struct iwl_trans
*trans
)
566 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
567 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
568 struct list_head local_empty
;
569 int pending
= atomic_read(&rba
->req_pending
);
571 IWL_DEBUG_TPT(trans
, "Pending allocation requests = %d\n", pending
);
573 /* If we were scheduled - there is at least one request */
574 spin_lock(&rba
->lock
);
575 /* swap out the rba->rbd_empty to a local list */
576 list_replace_init(&rba
->rbd_empty
, &local_empty
);
577 spin_unlock(&rba
->lock
);
581 LIST_HEAD(local_allocated
);
582 gfp_t gfp_mask
= GFP_KERNEL
;
584 /* Do not post a warning if there are only a few requests */
585 if (pending
< RX_PENDING_WATERMARK
)
586 gfp_mask
|= __GFP_NOWARN
;
588 for (i
= 0; i
< RX_CLAIM_REQ_ALLOC
;) {
589 struct iwl_rx_mem_buffer
*rxb
;
592 /* List should never be empty - each reused RBD is
593 * returned to the list, and initial pool covers any
594 * possible gap between the time the page is allocated
595 * to the time the RBD is added.
597 BUG_ON(list_empty(&local_empty
));
598 /* Get the first rxb from the rbd list */
599 rxb
= list_first_entry(&local_empty
,
600 struct iwl_rx_mem_buffer
, list
);
603 /* Alloc a new receive buffer */
604 page
= iwl_pcie_rx_alloc_page(trans
, &rxb
->offset
,
610 /* Get physical address of the RB */
611 rxb
->page_dma
= dma_map_page(trans
->dev
, page
,
613 trans_pcie
->rx_buf_bytes
,
615 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
617 __free_pages(page
, trans_pcie
->rx_page_order
);
621 /* move the allocated entry to the out list */
622 list_move(&rxb
->list
, &local_allocated
);
626 atomic_dec(&rba
->req_pending
);
630 pending
= atomic_read(&rba
->req_pending
);
633 "Got more pending allocation requests = %d\n",
637 spin_lock(&rba
->lock
);
638 /* add the allocated rbds to the allocator allocated list */
639 list_splice_tail(&local_allocated
, &rba
->rbd_allocated
);
640 /* get more empty RBDs for current pending requests */
641 list_splice_tail_init(&rba
->rbd_empty
, &local_empty
);
642 spin_unlock(&rba
->lock
);
644 atomic_inc(&rba
->req_ready
);
648 spin_lock(&rba
->lock
);
649 /* return unused rbds to the allocator empty list */
650 list_splice_tail(&local_empty
, &rba
->rbd_empty
);
651 spin_unlock(&rba
->lock
);
653 IWL_DEBUG_TPT(trans
, "%s, exit.\n", __func__
);
657 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
659 .* Called by queue when the queue posted allocation request and
660 * has freed 8 RBDs in order to restock itself.
661 * This function directly moves the allocated RBs to the queue's ownership
662 * and updates the relevant counters.
664 static void iwl_pcie_rx_allocator_get(struct iwl_trans
*trans
,
667 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
668 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
671 lockdep_assert_held(&rxq
->lock
);
674 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
675 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
676 * function will return early, as there are no ready requests.
677 * atomic_dec_if_positive will perofrm the *actual* decrement only if
678 * req_ready > 0, i.e. - there are ready requests and the function
679 * hands one request to the caller.
681 if (atomic_dec_if_positive(&rba
->req_ready
) < 0)
684 spin_lock(&rba
->lock
);
685 for (i
= 0; i
< RX_CLAIM_REQ_ALLOC
; i
++) {
686 /* Get next free Rx buffer, remove it from free list */
687 struct iwl_rx_mem_buffer
*rxb
=
688 list_first_entry(&rba
->rbd_allocated
,
689 struct iwl_rx_mem_buffer
, list
);
691 list_move(&rxb
->list
, &rxq
->rx_free
);
693 spin_unlock(&rba
->lock
);
695 rxq
->used_count
-= RX_CLAIM_REQ_ALLOC
;
696 rxq
->free_count
+= RX_CLAIM_REQ_ALLOC
;
699 void iwl_pcie_rx_allocator_work(struct work_struct
*data
)
701 struct iwl_rb_allocator
*rba_p
=
702 container_of(data
, struct iwl_rb_allocator
, rx_alloc
);
703 struct iwl_trans_pcie
*trans_pcie
=
704 container_of(rba_p
, struct iwl_trans_pcie
, rba
);
706 iwl_pcie_rx_allocator(trans_pcie
->trans
);
709 static int iwl_pcie_free_bd_size(struct iwl_trans
*trans
, bool use_rx_td
)
711 struct iwl_rx_transfer_desc
*rx_td
;
714 return sizeof(*rx_td
);
716 return trans
->trans_cfg
->mq_rx_supported
? sizeof(__le64
) :
720 static void iwl_pcie_free_rxq_dma(struct iwl_trans
*trans
,
723 struct device
*dev
= trans
->dev
;
724 bool use_rx_td
= (trans
->trans_cfg
->device_family
>=
725 IWL_DEVICE_FAMILY_AX210
);
726 int free_size
= iwl_pcie_free_bd_size(trans
, use_rx_td
);
729 dma_free_coherent(trans
->dev
,
730 free_size
* rxq
->queue_size
,
731 rxq
->bd
, rxq
->bd_dma
);
735 rxq
->rb_stts_dma
= 0;
739 dma_free_coherent(trans
->dev
,
740 (use_rx_td
? sizeof(*rxq
->cd
) :
741 sizeof(__le32
)) * rxq
->queue_size
,
742 rxq
->used_bd
, rxq
->used_bd_dma
);
743 rxq
->used_bd_dma
= 0;
746 if (trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
)
750 dma_free_coherent(dev
, sizeof(__le16
),
751 rxq
->tr_tail
, rxq
->tr_tail_dma
);
752 rxq
->tr_tail_dma
= 0;
756 dma_free_coherent(dev
, sizeof(__le16
),
757 rxq
->cr_tail
, rxq
->cr_tail_dma
);
758 rxq
->cr_tail_dma
= 0;
762 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans
*trans
,
765 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
766 struct device
*dev
= trans
->dev
;
769 bool use_rx_td
= (trans
->trans_cfg
->device_family
>=
770 IWL_DEVICE_FAMILY_AX210
);
771 size_t rb_stts_size
= use_rx_td
? sizeof(__le16
) :
772 sizeof(struct iwl_rb_status
);
774 spin_lock_init(&rxq
->lock
);
775 if (trans
->trans_cfg
->mq_rx_supported
)
776 rxq
->queue_size
= trans
->cfg
->num_rbds
;
778 rxq
->queue_size
= RX_QUEUE_SIZE
;
780 free_size
= iwl_pcie_free_bd_size(trans
, use_rx_td
);
783 * Allocate the circular buffer of Read Buffer Descriptors
786 rxq
->bd
= dma_alloc_coherent(dev
, free_size
* rxq
->queue_size
,
787 &rxq
->bd_dma
, GFP_KERNEL
);
791 if (trans
->trans_cfg
->mq_rx_supported
) {
792 rxq
->used_bd
= dma_alloc_coherent(dev
,
793 (use_rx_td
? sizeof(*rxq
->cd
) : sizeof(__le32
)) * rxq
->queue_size
,
800 rxq
->rb_stts
= trans_pcie
->base_rb_stts
+ rxq
->id
* rb_stts_size
;
802 trans_pcie
->base_rb_stts_dma
+ rxq
->id
* rb_stts_size
;
807 /* Allocate the driver's pointer to TR tail */
808 rxq
->tr_tail
= dma_alloc_coherent(dev
, sizeof(__le16
),
809 &rxq
->tr_tail_dma
, GFP_KERNEL
);
813 /* Allocate the driver's pointer to CR tail */
814 rxq
->cr_tail
= dma_alloc_coherent(dev
, sizeof(__le16
),
815 &rxq
->cr_tail_dma
, GFP_KERNEL
);
822 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
823 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
825 iwl_pcie_free_rxq_dma(trans
, rxq
);
831 static int iwl_pcie_rx_alloc(struct iwl_trans
*trans
)
833 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
834 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
836 size_t rb_stts_size
= trans
->trans_cfg
->device_family
>=
837 IWL_DEVICE_FAMILY_AX210
?
838 sizeof(__le16
) : sizeof(struct iwl_rb_status
);
840 if (WARN_ON(trans_pcie
->rxq
))
843 trans_pcie
->rxq
= kcalloc(trans
->num_rx_queues
, sizeof(struct iwl_rxq
),
845 trans_pcie
->rx_pool
= kcalloc(RX_POOL_SIZE(trans_pcie
->num_rx_bufs
),
846 sizeof(trans_pcie
->rx_pool
[0]),
848 trans_pcie
->global_table
=
849 kcalloc(RX_POOL_SIZE(trans_pcie
->num_rx_bufs
),
850 sizeof(trans_pcie
->global_table
[0]),
852 if (!trans_pcie
->rxq
|| !trans_pcie
->rx_pool
||
853 !trans_pcie
->global_table
) {
858 spin_lock_init(&rba
->lock
);
861 * Allocate the driver's pointer to receive buffer status.
862 * Allocate for all queues continuously (HW requirement).
864 trans_pcie
->base_rb_stts
=
865 dma_alloc_coherent(trans
->dev
,
866 rb_stts_size
* trans
->num_rx_queues
,
867 &trans_pcie
->base_rb_stts_dma
,
869 if (!trans_pcie
->base_rb_stts
) {
874 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
875 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
878 ret
= iwl_pcie_alloc_rxq_dma(trans
, rxq
);
885 if (trans_pcie
->base_rb_stts
) {
886 dma_free_coherent(trans
->dev
,
887 rb_stts_size
* trans
->num_rx_queues
,
888 trans_pcie
->base_rb_stts
,
889 trans_pcie
->base_rb_stts_dma
);
890 trans_pcie
->base_rb_stts
= NULL
;
891 trans_pcie
->base_rb_stts_dma
= 0;
893 kfree(trans_pcie
->rx_pool
);
894 kfree(trans_pcie
->global_table
);
895 kfree(trans_pcie
->rxq
);
900 static void iwl_pcie_rx_hw_init(struct iwl_trans
*trans
, struct iwl_rxq
*rxq
)
902 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
905 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
907 switch (trans_pcie
->rx_buf_size
) {
909 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
912 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
915 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K
;
919 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
922 if (!iwl_trans_grab_nic_access(trans
, &flags
))
926 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
927 /* reset and flush pointers */
928 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_RBDCB_WPTR
, 0);
929 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ
, 0);
930 iwl_write32(trans
, FH_RSCSR_CHNL0_RDPTR
, 0);
932 /* Reset driver's Rx queue write index */
933 iwl_write32(trans
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
935 /* Tell device where to find RBD circular buffer in DRAM */
936 iwl_write32(trans
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
937 (u32
)(rxq
->bd_dma
>> 8));
939 /* Tell device where in DRAM to update its Rx status */
940 iwl_write32(trans
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
941 rxq
->rb_stts_dma
>> 4);
944 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
945 * the credit mechanism in 5000 HW RX FIFO
946 * Direct rx interrupts to hosts
947 * Rx buffer size 4 or 8k or 12k
951 iwl_write32(trans
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
952 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
953 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
954 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
956 (RX_RB_TIMEOUT
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
) |
957 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
959 iwl_trans_release_nic_access(trans
, &flags
);
961 /* Set interrupt coalescing timer to default (2048 usecs) */
962 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
964 /* W/A for interrupt coalescing bug in 7260 and 3160 */
965 if (trans
->cfg
->host_interrupt_operation_mode
)
966 iwl_set_bit(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_OPER_MODE
);
969 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans
*trans
)
971 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
972 u32 rb_size
, enabled
= 0;
976 switch (trans_pcie
->rx_buf_size
) {
978 rb_size
= RFH_RXF_DMA_RB_SIZE_2K
;
981 rb_size
= RFH_RXF_DMA_RB_SIZE_4K
;
984 rb_size
= RFH_RXF_DMA_RB_SIZE_8K
;
987 rb_size
= RFH_RXF_DMA_RB_SIZE_12K
;
991 rb_size
= RFH_RXF_DMA_RB_SIZE_4K
;
994 if (!iwl_trans_grab_nic_access(trans
, &flags
))
998 iwl_write_prph_no_grab(trans
, RFH_RXF_DMA_CFG
, 0);
999 /* disable free amd used rx queue operation */
1000 iwl_write_prph_no_grab(trans
, RFH_RXF_RXQ_ACTIVE
, 0);
1002 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
1003 /* Tell device where to find RBD free table in DRAM */
1004 iwl_write_prph64_no_grab(trans
,
1005 RFH_Q_FRBDCB_BA_LSB(i
),
1006 trans_pcie
->rxq
[i
].bd_dma
);
1007 /* Tell device where to find RBD used table in DRAM */
1008 iwl_write_prph64_no_grab(trans
,
1009 RFH_Q_URBDCB_BA_LSB(i
),
1010 trans_pcie
->rxq
[i
].used_bd_dma
);
1011 /* Tell device where in DRAM to update its Rx status */
1012 iwl_write_prph64_no_grab(trans
,
1013 RFH_Q_URBD_STTS_WPTR_LSB(i
),
1014 trans_pcie
->rxq
[i
].rb_stts_dma
);
1015 /* Reset device indice tables */
1016 iwl_write_prph_no_grab(trans
, RFH_Q_FRBDCB_WIDX(i
), 0);
1017 iwl_write_prph_no_grab(trans
, RFH_Q_FRBDCB_RIDX(i
), 0);
1018 iwl_write_prph_no_grab(trans
, RFH_Q_URBDCB_WIDX(i
), 0);
1020 enabled
|= BIT(i
) | BIT(i
+ 16);
1025 * Rx buffer size 4 or 8k or 12k
1026 * Min RB size 4 or 8
1027 * Drop frames that exceed RB size
1030 iwl_write_prph_no_grab(trans
, RFH_RXF_DMA_CFG
,
1031 RFH_DMA_EN_ENABLE_VAL
| rb_size
|
1032 RFH_RXF_DMA_MIN_RB_4_8
|
1033 RFH_RXF_DMA_DROP_TOO_LARGE_MASK
|
1034 RFH_RXF_DMA_RBDCB_SIZE_512
);
1037 * Activate DMA snooping.
1038 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
1039 * Default queue is 0
1041 iwl_write_prph_no_grab(trans
, RFH_GEN_CFG
,
1042 RFH_GEN_CFG_RFH_DMA_SNOOP
|
1043 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM
, 0) |
1044 RFH_GEN_CFG_SERVICE_DMA_SNOOP
|
1045 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE
,
1046 trans
->cfg
->integrated
?
1047 RFH_GEN_CFG_RB_CHUNK_SIZE_64
:
1048 RFH_GEN_CFG_RB_CHUNK_SIZE_128
));
1049 /* Enable the relevant rx queues */
1050 iwl_write_prph_no_grab(trans
, RFH_RXF_RXQ_ACTIVE
, enabled
);
1052 iwl_trans_release_nic_access(trans
, &flags
);
1054 /* Set interrupt coalescing timer to default (2048 usecs) */
1055 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
1058 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq
*rxq
)
1060 lockdep_assert_held(&rxq
->lock
);
1062 INIT_LIST_HEAD(&rxq
->rx_free
);
1063 INIT_LIST_HEAD(&rxq
->rx_used
);
1064 rxq
->free_count
= 0;
1065 rxq
->used_count
= 0;
1068 int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
)
1074 static int _iwl_pcie_rx_init(struct iwl_trans
*trans
)
1076 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1077 struct iwl_rxq
*def_rxq
;
1078 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1079 int i
, err
, queue_size
, allocator_pool_size
, num_alloc
;
1081 if (!trans_pcie
->rxq
) {
1082 err
= iwl_pcie_rx_alloc(trans
);
1086 def_rxq
= trans_pcie
->rxq
;
1088 cancel_work_sync(&rba
->rx_alloc
);
1090 spin_lock(&rba
->lock
);
1091 atomic_set(&rba
->req_pending
, 0);
1092 atomic_set(&rba
->req_ready
, 0);
1093 INIT_LIST_HEAD(&rba
->rbd_allocated
);
1094 INIT_LIST_HEAD(&rba
->rbd_empty
);
1095 spin_unlock(&rba
->lock
);
1097 /* free all first - we might be reconfigured for a different size */
1098 iwl_pcie_free_rbs_pool(trans
);
1100 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
1101 def_rxq
->queue
[i
] = NULL
;
1103 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
1104 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
1106 spin_lock(&rxq
->lock
);
1108 * Set read write pointer to reflect that we have processed
1109 * and used all buffers, but have not restocked the Rx queue
1110 * with fresh buffers
1114 rxq
->write_actual
= 0;
1115 memset(rxq
->rb_stts
, 0,
1116 (trans
->trans_cfg
->device_family
>=
1117 IWL_DEVICE_FAMILY_AX210
) ?
1118 sizeof(__le16
) : sizeof(struct iwl_rb_status
));
1120 iwl_pcie_rx_init_rxb_lists(rxq
);
1122 if (!rxq
->napi
.poll
)
1123 netif_napi_add(&trans_pcie
->napi_dev
, &rxq
->napi
,
1124 iwl_pcie_dummy_napi_poll
, 64);
1126 spin_unlock(&rxq
->lock
);
1129 /* move the pool to the default queue and allocator ownerships */
1130 queue_size
= trans
->trans_cfg
->mq_rx_supported
?
1131 trans_pcie
->num_rx_bufs
- 1 : RX_QUEUE_SIZE
;
1132 allocator_pool_size
= trans
->num_rx_queues
*
1133 (RX_CLAIM_REQ_ALLOC
- RX_POST_REQ_ALLOC
);
1134 num_alloc
= queue_size
+ allocator_pool_size
;
1136 for (i
= 0; i
< num_alloc
; i
++) {
1137 struct iwl_rx_mem_buffer
*rxb
= &trans_pcie
->rx_pool
[i
];
1139 if (i
< allocator_pool_size
)
1140 list_add(&rxb
->list
, &rba
->rbd_empty
);
1142 list_add(&rxb
->list
, &def_rxq
->rx_used
);
1143 trans_pcie
->global_table
[i
] = rxb
;
1144 rxb
->vid
= (u16
)(i
+ 1);
1145 rxb
->invalid
= true;
1148 iwl_pcie_rxq_alloc_rbs(trans
, GFP_KERNEL
, def_rxq
);
1153 int iwl_pcie_rx_init(struct iwl_trans
*trans
)
1155 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1156 int ret
= _iwl_pcie_rx_init(trans
);
1161 if (trans
->trans_cfg
->mq_rx_supported
)
1162 iwl_pcie_rx_mq_hw_init(trans
);
1164 iwl_pcie_rx_hw_init(trans
, trans_pcie
->rxq
);
1166 iwl_pcie_rxq_restock(trans
, trans_pcie
->rxq
);
1168 spin_lock(&trans_pcie
->rxq
->lock
);
1169 iwl_pcie_rxq_inc_wr_ptr(trans
, trans_pcie
->rxq
);
1170 spin_unlock(&trans_pcie
->rxq
->lock
);
1175 int iwl_pcie_gen2_rx_init(struct iwl_trans
*trans
)
1177 /* Set interrupt coalescing timer to default (2048 usecs) */
1178 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
1181 * We don't configure the RFH.
1182 * Restock will be done at alive, after firmware configured the RFH.
1184 return _iwl_pcie_rx_init(trans
);
1187 void iwl_pcie_rx_free(struct iwl_trans
*trans
)
1189 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1190 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1192 size_t rb_stts_size
= trans
->trans_cfg
->device_family
>=
1193 IWL_DEVICE_FAMILY_AX210
?
1194 sizeof(__le16
) : sizeof(struct iwl_rb_status
);
1197 * if rxq is NULL, it means that nothing has been allocated,
1200 if (!trans_pcie
->rxq
) {
1201 IWL_DEBUG_INFO(trans
, "Free NULL rx context\n");
1205 cancel_work_sync(&rba
->rx_alloc
);
1207 iwl_pcie_free_rbs_pool(trans
);
1209 if (trans_pcie
->base_rb_stts
) {
1210 dma_free_coherent(trans
->dev
,
1211 rb_stts_size
* trans
->num_rx_queues
,
1212 trans_pcie
->base_rb_stts
,
1213 trans_pcie
->base_rb_stts_dma
);
1214 trans_pcie
->base_rb_stts
= NULL
;
1215 trans_pcie
->base_rb_stts_dma
= 0;
1218 for (i
= 0; i
< trans
->num_rx_queues
; i
++) {
1219 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
[i
];
1221 iwl_pcie_free_rxq_dma(trans
, rxq
);
1224 netif_napi_del(&rxq
->napi
);
1226 kfree(trans_pcie
->rx_pool
);
1227 kfree(trans_pcie
->global_table
);
1228 kfree(trans_pcie
->rxq
);
1230 if (trans_pcie
->alloc_page
)
1231 __free_pages(trans_pcie
->alloc_page
, trans_pcie
->rx_page_order
);
1234 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq
*rxq
,
1235 struct iwl_rb_allocator
*rba
)
1237 spin_lock(&rba
->lock
);
1238 list_splice_tail_init(&rxq
->rx_used
, &rba
->rbd_empty
);
1239 spin_unlock(&rba
->lock
);
1243 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1245 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1246 * When there are 2 empty RBDs - a request for allocation is posted
1248 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans
*trans
,
1249 struct iwl_rx_mem_buffer
*rxb
,
1250 struct iwl_rxq
*rxq
, bool emergency
)
1252 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1253 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1255 /* Move the RBD to the used list, will be moved to allocator in batches
1256 * before claiming or posting a request*/
1257 list_add_tail(&rxb
->list
, &rxq
->rx_used
);
1259 if (unlikely(emergency
))
1262 /* Count the allocator owned RBDs */
1265 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1266 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1267 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1268 * after but we still need to post another request.
1270 if ((rxq
->used_count
% RX_CLAIM_REQ_ALLOC
) == RX_POST_REQ_ALLOC
) {
1271 /* Move the 2 RBDs to the allocator ownership.
1272 Allocator has another 6 from pool for the request completion*/
1273 iwl_pcie_rx_move_to_allocator(rxq
, rba
);
1275 atomic_inc(&rba
->req_pending
);
1276 queue_work(rba
->alloc_wq
, &rba
->rx_alloc
);
1280 static void iwl_pcie_rx_handle_rb(struct iwl_trans
*trans
,
1281 struct iwl_rxq
*rxq
,
1282 struct iwl_rx_mem_buffer
*rxb
,
1286 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1287 struct iwl_txq
*txq
= trans_pcie
->txq
[trans_pcie
->cmd_queue
];
1288 bool page_stolen
= false;
1289 int max_len
= trans_pcie
->rx_buf_bytes
;
1295 dma_unmap_page(trans
->dev
, rxb
->page_dma
, max_len
, DMA_FROM_DEVICE
);
1297 while (offset
+ sizeof(u32
) + sizeof(struct iwl_cmd_header
) < max_len
) {
1298 struct iwl_rx_packet
*pkt
;
1301 int index
, cmd_index
, len
;
1302 struct iwl_rx_cmd_buffer rxcb
= {
1303 ._offset
= rxb
->offset
+ offset
,
1304 ._rx_page_order
= trans_pcie
->rx_page_order
,
1306 ._page_stolen
= false,
1307 .truesize
= max_len
,
1310 pkt
= rxb_addr(&rxcb
);
1312 if (pkt
->len_n_flags
== cpu_to_le32(FH_RSCSR_FRAME_INVALID
)) {
1314 "Q %d: RB end marker at offset %d\n",
1319 WARN((le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_RXQ_MASK
) >>
1320 FH_RSCSR_RXQ_POS
!= rxq
->id
,
1321 "frame on invalid queue - is on %d and indicates %d\n",
1323 (le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_RXQ_MASK
) >>
1327 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1329 iwl_get_cmd_string(trans
,
1330 iwl_cmd_id(pkt
->hdr
.cmd
,
1333 pkt
->hdr
.group_id
, pkt
->hdr
.cmd
,
1334 le16_to_cpu(pkt
->hdr
.sequence
));
1336 len
= iwl_rx_packet_len(pkt
);
1337 len
+= sizeof(u32
); /* account for status word */
1338 trace_iwlwifi_dev_rx(trans
->dev
, trans
, pkt
, len
);
1339 trace_iwlwifi_dev_rx_data(trans
->dev
, trans
, pkt
, len
);
1341 /* Reclaim a command buffer only if this packet is a response
1342 * to a (driver-originated) command.
1343 * If the packet (e.g. Rx frame) originated from uCode,
1344 * there is no command buffer to reclaim.
1345 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1346 * but apparently a few don't get set; catch them here. */
1347 reclaim
= !(pkt
->hdr
.sequence
& SEQ_RX_FRAME
);
1348 if (reclaim
&& !pkt
->hdr
.group_id
) {
1351 for (i
= 0; i
< trans_pcie
->n_no_reclaim_cmds
; i
++) {
1352 if (trans_pcie
->no_reclaim_cmds
[i
] ==
1360 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
1361 index
= SEQ_TO_INDEX(sequence
);
1362 cmd_index
= iwl_pcie_get_cmd_index(txq
, index
);
1364 if (rxq
->id
== trans_pcie
->def_rx_queue
)
1365 iwl_op_mode_rx(trans
->op_mode
, &rxq
->napi
,
1368 iwl_op_mode_rx_rss(trans
->op_mode
, &rxq
->napi
,
1372 kzfree(txq
->entries
[cmd_index
].free_buf
);
1373 txq
->entries
[cmd_index
].free_buf
= NULL
;
1377 * After here, we should always check rxcb._page_stolen,
1378 * if it is true then one of the handlers took the page.
1382 /* Invoke any callbacks, transfer the buffer to caller,
1383 * and fire off the (possibly) blocking
1384 * iwl_trans_send_cmd()
1385 * as we reclaim the driver command queue */
1386 if (!rxcb
._page_stolen
)
1387 iwl_pcie_hcmd_complete(trans
, &rxcb
);
1389 IWL_WARN(trans
, "Claim null rxb?\n");
1392 page_stolen
|= rxcb
._page_stolen
;
1393 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
)
1395 offset
+= ALIGN(len
, FH_RSCSR_FRAME_ALIGN
);
1398 /* page was stolen from us -- free our reference */
1400 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
1404 /* Reuse the page if possible. For notification packets and
1405 * SKBs that fail to Rx correctly, add them back into the
1406 * rx_free list for reuse later. */
1407 if (rxb
->page
!= NULL
) {
1409 dma_map_page(trans
->dev
, rxb
->page
, rxb
->offset
,
1410 trans_pcie
->rx_buf_bytes
,
1412 if (dma_mapping_error(trans
->dev
, rxb
->page_dma
)) {
1414 * free the page(s) as well to not break
1415 * the invariant that the items on the used
1416 * list have no page(s)
1418 __free_pages(rxb
->page
, trans_pcie
->rx_page_order
);
1420 iwl_pcie_rx_reuse_rbd(trans
, rxb
, rxq
, emergency
);
1422 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
1426 iwl_pcie_rx_reuse_rbd(trans
, rxb
, rxq
, emergency
);
1429 static struct iwl_rx_mem_buffer
*iwl_pcie_get_rxb(struct iwl_trans
*trans
,
1430 struct iwl_rxq
*rxq
, int i
)
1432 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1433 struct iwl_rx_mem_buffer
*rxb
;
1436 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc
) != 32);
1438 if (!trans
->trans_cfg
->mq_rx_supported
) {
1439 rxb
= rxq
->queue
[i
];
1440 rxq
->queue
[i
] = NULL
;
1444 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
)
1445 vid
= le16_to_cpu(rxq
->cd
[i
].rbid
);
1447 vid
= le32_to_cpu(rxq
->bd_32
[i
]) & 0x0FFF; /* 12-bit VID */
1449 if (!vid
|| vid
> RX_POOL_SIZE(trans_pcie
->num_rx_bufs
))
1452 rxb
= trans_pcie
->global_table
[vid
- 1];
1456 IWL_DEBUG_RX(trans
, "Got virtual RB ID %u\n", (u32
)rxb
->vid
);
1458 rxb
->invalid
= true;
1463 WARN(1, "Invalid rxb from HW %u\n", (u32
)vid
);
1464 iwl_force_nmi(trans
);
1469 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1471 static void iwl_pcie_rx_handle(struct iwl_trans
*trans
, int queue
)
1473 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1474 struct napi_struct
*napi
;
1475 struct iwl_rxq
*rxq
;
1476 u32 r
, i
, count
= 0;
1477 bool emergency
= false;
1479 if (WARN_ON_ONCE(!trans_pcie
->rxq
|| !trans_pcie
->rxq
[queue
].bd
))
1482 rxq
= &trans_pcie
->rxq
[queue
];
1485 spin_lock(&rxq
->lock
);
1486 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1487 * buffer that the driver may process (last buffer filled by ucode). */
1488 r
= le16_to_cpu(iwl_get_closed_rb_stts(trans
, rxq
)) & 0x0FFF;
1491 /* W/A 9000 device step A0 wrap-around bug */
1492 r
&= (rxq
->queue_size
- 1);
1494 /* Rx interrupt, but nothing sent from uCode */
1496 IWL_DEBUG_RX(trans
, "Q %d: HW = SW = %d\n", rxq
->id
, r
);
1499 struct iwl_rb_allocator
*rba
= &trans_pcie
->rba
;
1500 struct iwl_rx_mem_buffer
*rxb
;
1501 /* number of RBDs still waiting for page allocation */
1502 u32 rb_pending_alloc
=
1503 atomic_read(&trans_pcie
->rba
.req_pending
) *
1506 if (unlikely(rb_pending_alloc
>= rxq
->queue_size
/ 2 &&
1508 iwl_pcie_rx_move_to_allocator(rxq
, rba
);
1510 IWL_DEBUG_TPT(trans
,
1511 "RX path is in emergency. Pending allocations %d\n",
1515 IWL_DEBUG_RX(trans
, "Q %d: HW = %d, SW = %d\n", rxq
->id
, r
, i
);
1517 rxb
= iwl_pcie_get_rxb(trans
, rxq
, i
);
1521 iwl_pcie_rx_handle_rb(trans
, rxq
, rxb
, emergency
, i
);
1523 i
= (i
+ 1) & (rxq
->queue_size
- 1);
1526 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1527 * try to claim the pre-allocated buffers from the allocator.
1528 * If not ready - will try to reclaim next time.
1529 * There is no need to reschedule work - allocator exits only
1532 if (rxq
->used_count
>= RX_CLAIM_REQ_ALLOC
)
1533 iwl_pcie_rx_allocator_get(trans
, rxq
);
1535 if (rxq
->used_count
% RX_CLAIM_REQ_ALLOC
== 0 && !emergency
) {
1536 /* Add the remaining empty RBDs for allocator use */
1537 iwl_pcie_rx_move_to_allocator(rxq
, rba
);
1538 } else if (emergency
) {
1542 if (rb_pending_alloc
< rxq
->queue_size
/ 3) {
1543 IWL_DEBUG_TPT(trans
,
1544 "RX path exited emergency. Pending allocations %d\n",
1550 spin_unlock(&rxq
->lock
);
1551 iwl_pcie_rxq_alloc_rbs(trans
, GFP_ATOMIC
, rxq
);
1552 iwl_pcie_rxq_restock(trans
, rxq
);
1558 /* Backtrack one entry */
1560 /* update cr tail with the rxq read pointer */
1561 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
)
1562 *rxq
->cr_tail
= cpu_to_le16(r
);
1563 spin_unlock(&rxq
->lock
);
1566 * handle a case where in emergency there are some unallocated RBDs.
1567 * those RBDs are in the used list, but are not tracked by the queue's
1568 * used_count which counts allocator owned RBDs.
1569 * unallocated emergency RBDs must be allocated on exit, otherwise
1570 * when called again the function may not be in emergency mode and
1571 * they will be handed to the allocator with no tracking in the RBD
1572 * allocator counters, which will lead to them never being claimed back
1574 * by allocating them here, they are now in the queue free list, and
1575 * will be restocked by the next call of iwl_pcie_rxq_restock.
1577 if (unlikely(emergency
&& count
))
1578 iwl_pcie_rxq_alloc_rbs(trans
, GFP_ATOMIC
, rxq
);
1582 napi_gro_flush(napi
, false);
1584 if (napi
->rx_count
) {
1585 netif_receive_skb_list(&napi
->rx_list
);
1586 INIT_LIST_HEAD(&napi
->rx_list
);
1591 iwl_pcie_rxq_restock(trans
, rxq
);
1594 static struct iwl_trans_pcie
*iwl_pcie_get_trans_pcie(struct msix_entry
*entry
)
1596 u8 queue
= entry
->entry
;
1597 struct msix_entry
*entries
= entry
- queue
;
1599 return container_of(entries
, struct iwl_trans_pcie
, msix_entries
[0]);
1603 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1604 * This interrupt handler should be used with RSS queue only.
1606 irqreturn_t
iwl_pcie_irq_rx_msix_handler(int irq
, void *dev_id
)
1608 struct msix_entry
*entry
= dev_id
;
1609 struct iwl_trans_pcie
*trans_pcie
= iwl_pcie_get_trans_pcie(entry
);
1610 struct iwl_trans
*trans
= trans_pcie
->trans
;
1612 trace_iwlwifi_dev_irq_msix(trans
->dev
, entry
, false, 0, 0);
1614 if (WARN_ON(entry
->entry
>= trans
->num_rx_queues
))
1617 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1620 iwl_pcie_rx_handle(trans
, entry
->entry
);
1623 iwl_pcie_clear_irq(trans
, entry
);
1625 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1631 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1633 static void iwl_pcie_irq_handle_error(struct iwl_trans
*trans
)
1635 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1638 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1639 if (trans
->cfg
->internal_wimax_coex
&&
1640 !trans
->cfg
->apmg_not_supported
&&
1641 (!(iwl_read_prph(trans
, APMG_CLK_CTRL_REG
) &
1642 APMS_CLK_VAL_MRB_FUNC_MODE
) ||
1643 (iwl_read_prph(trans
, APMG_PS_CTRL_REG
) &
1644 APMG_PS_CTRL_VAL_RESET_REQ
))) {
1645 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1646 iwl_op_mode_wimax_active(trans
->op_mode
);
1647 wake_up(&trans_pcie
->wait_command_queue
);
1651 for (i
= 0; i
< trans
->trans_cfg
->base_params
->num_of_queues
; i
++) {
1652 if (!trans_pcie
->txq
[i
])
1654 del_timer(&trans_pcie
->txq
[i
]->stuck_timer
);
1657 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1658 * before we wake up the command caller, to ensure a proper cleanup. */
1659 iwl_trans_fw_error(trans
);
1661 clear_bit(STATUS_SYNC_HCMD_ACTIVE
, &trans
->status
);
1662 wake_up(&trans_pcie
->wait_command_queue
);
1665 static u32
iwl_pcie_int_cause_non_ict(struct iwl_trans
*trans
)
1669 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans
)->irq_lock
);
1671 trace_iwlwifi_dev_irq(trans
->dev
);
1673 /* Discover which interrupts are active/pending */
1674 inta
= iwl_read32(trans
, CSR_INT
);
1676 /* the thread will service interrupts and re-enable them */
1680 /* a device (PCI-E) page is 4096 bytes long */
1681 #define ICT_SHIFT 12
1682 #define ICT_SIZE (1 << ICT_SHIFT)
1683 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1685 /* interrupt handler using ict table, with this interrupt driver will
1686 * stop using INTA register to get device's interrupt, reading this register
1687 * is expensive, device will write interrupts in ICT dram table, increment
1688 * index then will fire interrupt to driver, driver will OR all ICT table
1689 * entries from current index up to table entry with 0 value. the result is
1690 * the interrupt we need to service, driver will set the entries back to 0 and
1693 static u32
iwl_pcie_int_cause_ict(struct iwl_trans
*trans
)
1695 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1700 trace_iwlwifi_dev_irq(trans
->dev
);
1702 /* Ignore interrupt if there's nothing in NIC to service.
1703 * This may be due to IRQ shared with another device,
1704 * or due to sporadic interrupts thrown from our NIC. */
1705 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1706 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
, read
);
1711 * Collect all entries up to the first 0, starting from ict_index;
1712 * note we already read at ict_index.
1716 IWL_DEBUG_ISR(trans
, "ICT index %d value 0x%08X\n",
1717 trans_pcie
->ict_index
, read
);
1718 trans_pcie
->ict_tbl
[trans_pcie
->ict_index
] = 0;
1719 trans_pcie
->ict_index
=
1720 ((trans_pcie
->ict_index
+ 1) & (ICT_COUNT
- 1));
1722 read
= le32_to_cpu(trans_pcie
->ict_tbl
[trans_pcie
->ict_index
]);
1723 trace_iwlwifi_dev_ict_read(trans
->dev
, trans_pcie
->ict_index
,
1727 /* We should not get this value, just ignore it. */
1728 if (val
== 0xffffffff)
1732 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1733 * (bit 15 before shifting it to 31) to clear when using interrupt
1734 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1735 * so we use them to decide on the real state of the Rx bit.
1736 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1741 inta
= (0xff & val
) | ((0xff00 & val
) << 16);
1745 void iwl_pcie_handle_rfkill_irq(struct iwl_trans
*trans
)
1747 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1748 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1749 bool hw_rfkill
, prev
, report
;
1751 mutex_lock(&trans_pcie
->mutex
);
1752 prev
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1753 hw_rfkill
= iwl_is_rfkill_set(trans
);
1755 set_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1756 set_bit(STATUS_RFKILL_HW
, &trans
->status
);
1758 if (trans_pcie
->opmode_down
)
1761 report
= test_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1763 IWL_WARN(trans
, "RF_KILL bit toggled to %s.\n",
1764 hw_rfkill
? "disable radio" : "enable radio");
1766 isr_stats
->rfkill
++;
1769 iwl_trans_pcie_rf_kill(trans
, report
);
1770 mutex_unlock(&trans_pcie
->mutex
);
1773 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE
,
1775 IWL_DEBUG_RF_KILL(trans
,
1776 "Rfkill while SYNC HCMD in flight\n");
1777 wake_up(&trans_pcie
->wait_command_queue
);
1779 clear_bit(STATUS_RFKILL_HW
, &trans
->status
);
1780 if (trans_pcie
->opmode_down
)
1781 clear_bit(STATUS_RFKILL_OPMODE
, &trans
->status
);
1785 irqreturn_t
iwl_pcie_irq_handler(int irq
, void *dev_id
)
1787 struct iwl_trans
*trans
= dev_id
;
1788 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1789 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1793 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
1795 spin_lock(&trans_pcie
->irq_lock
);
1797 /* dram interrupt table not set yet,
1798 * use legacy interrupt.
1800 if (likely(trans_pcie
->use_ict
))
1801 inta
= iwl_pcie_int_cause_ict(trans
);
1803 inta
= iwl_pcie_int_cause_non_ict(trans
);
1805 if (iwl_have_debug_level(IWL_DL_ISR
)) {
1806 IWL_DEBUG_ISR(trans
,
1807 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1808 inta
, trans_pcie
->inta_mask
,
1809 iwl_read32(trans
, CSR_INT_MASK
),
1810 iwl_read32(trans
, CSR_FH_INT_STATUS
));
1811 if (inta
& (~trans_pcie
->inta_mask
))
1812 IWL_DEBUG_ISR(trans
,
1813 "We got a masked interrupt (0x%08x)\n",
1814 inta
& (~trans_pcie
->inta_mask
));
1817 inta
&= trans_pcie
->inta_mask
;
1820 * Ignore interrupt if there's nothing in NIC to service.
1821 * This may be due to IRQ shared with another device,
1822 * or due to sporadic interrupts thrown from our NIC.
1824 if (unlikely(!inta
)) {
1825 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
1827 * Re-enable interrupts here since we don't
1828 * have anything to service
1830 if (test_bit(STATUS_INT_ENABLED
, &trans
->status
))
1831 _iwl_enable_interrupts(trans
);
1832 spin_unlock(&trans_pcie
->irq_lock
);
1833 lock_map_release(&trans
->sync_cmd_lockdep_map
);
1837 if (unlikely(inta
== 0xFFFFFFFF || (inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
1839 * Hardware disappeared. It might have
1840 * already raised an interrupt.
1842 IWL_WARN(trans
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
1843 spin_unlock(&trans_pcie
->irq_lock
);
1847 /* Ack/clear/reset pending uCode interrupts.
1848 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1850 /* There is a hardware bug in the interrupt mask function that some
1851 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1852 * they are disabled in the CSR_INT_MASK register. Furthermore the
1853 * ICT interrupt handling mechanism has another bug that might cause
1854 * these unmasked interrupts fail to be detected. We workaround the
1855 * hardware bugs here by ACKing all the possible interrupts so that
1856 * interrupt coalescing can still be achieved.
1858 iwl_write32(trans
, CSR_INT
, inta
| ~trans_pcie
->inta_mask
);
1860 if (iwl_have_debug_level(IWL_DL_ISR
))
1861 IWL_DEBUG_ISR(trans
, "inta 0x%08x, enabled 0x%08x\n",
1862 inta
, iwl_read32(trans
, CSR_INT_MASK
));
1864 spin_unlock(&trans_pcie
->irq_lock
);
1866 /* Now service all interrupt bits discovered above. */
1867 if (inta
& CSR_INT_BIT_HW_ERR
) {
1868 IWL_ERR(trans
, "Hardware error detected. Restarting.\n");
1870 /* Tell the device to stop sending interrupts */
1871 iwl_disable_interrupts(trans
);
1874 iwl_pcie_irq_handle_error(trans
);
1876 handled
|= CSR_INT_BIT_HW_ERR
;
1881 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1882 if (inta
& CSR_INT_BIT_SCD
) {
1883 IWL_DEBUG_ISR(trans
,
1884 "Scheduler finished to transmit the frame/frames.\n");
1888 /* Alive notification via Rx interrupt will do the real work */
1889 if (inta
& CSR_INT_BIT_ALIVE
) {
1890 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
1892 if (trans
->trans_cfg
->gen2
) {
1894 * We can restock, since firmware configured
1897 iwl_pcie_rxmq_restock(trans
, trans_pcie
->rxq
);
1900 handled
|= CSR_INT_BIT_ALIVE
;
1903 /* Safely ignore these bits for debug checks below */
1904 inta
&= ~(CSR_INT_BIT_SCD
| CSR_INT_BIT_ALIVE
);
1906 /* HW RF KILL switch toggled */
1907 if (inta
& CSR_INT_BIT_RF_KILL
) {
1908 iwl_pcie_handle_rfkill_irq(trans
);
1909 handled
|= CSR_INT_BIT_RF_KILL
;
1912 /* Chip got too hot and stopped itself */
1913 if (inta
& CSR_INT_BIT_CT_KILL
) {
1914 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
1915 isr_stats
->ctkill
++;
1916 handled
|= CSR_INT_BIT_CT_KILL
;
1919 /* Error detected by uCode */
1920 if (inta
& CSR_INT_BIT_SW_ERR
) {
1921 IWL_ERR(trans
, "Microcode SW error detected. "
1922 " Restarting 0x%X.\n", inta
);
1924 iwl_pcie_irq_handle_error(trans
);
1925 handled
|= CSR_INT_BIT_SW_ERR
;
1928 /* uCode wakes up after power-down sleep */
1929 if (inta
& CSR_INT_BIT_WAKEUP
) {
1930 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
1931 iwl_pcie_rxq_check_wrptr(trans
);
1932 iwl_pcie_txq_check_wrptrs(trans
);
1934 isr_stats
->wakeup
++;
1936 handled
|= CSR_INT_BIT_WAKEUP
;
1939 /* All uCode command responses, including Tx command responses,
1940 * Rx "responses" (frame-received notification), and other
1941 * notifications from uCode come through here*/
1942 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
|
1943 CSR_INT_BIT_RX_PERIODIC
)) {
1944 IWL_DEBUG_ISR(trans
, "Rx interrupt\n");
1945 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
)) {
1946 handled
|= (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
);
1947 iwl_write32(trans
, CSR_FH_INT_STATUS
,
1948 CSR_FH_INT_RX_MASK
);
1950 if (inta
& CSR_INT_BIT_RX_PERIODIC
) {
1951 handled
|= CSR_INT_BIT_RX_PERIODIC
;
1953 CSR_INT
, CSR_INT_BIT_RX_PERIODIC
);
1955 /* Sending RX interrupt require many steps to be done in the
1957 * 1- write interrupt to current index in ICT table.
1959 * 3- update RX shared data to indicate last write index.
1960 * 4- send interrupt.
1961 * This could lead to RX race, driver could receive RX interrupt
1962 * but the shared data changes does not reflect this;
1963 * periodic interrupt will detect any dangling Rx activity.
1966 /* Disable periodic interrupt; we use it as just a one-shot. */
1967 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
1968 CSR_INT_PERIODIC_DIS
);
1971 * Enable periodic interrupt in 8 msec only if we received
1972 * real RX interrupt (instead of just periodic int), to catch
1973 * any dangling Rx interrupt. If it was just the periodic
1974 * interrupt, there was no dangling Rx activity, and no need
1975 * to extend the periodic interrupt; one-shot is enough.
1977 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
))
1978 iwl_write8(trans
, CSR_INT_PERIODIC_REG
,
1979 CSR_INT_PERIODIC_ENA
);
1984 iwl_pcie_rx_handle(trans
, 0);
1988 /* This "Tx" DMA channel is used only for loading uCode */
1989 if (inta
& CSR_INT_BIT_FH_TX
) {
1990 iwl_write32(trans
, CSR_FH_INT_STATUS
, CSR_FH_INT_TX_MASK
);
1991 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
1993 handled
|= CSR_INT_BIT_FH_TX
;
1994 /* Wake up uCode load routine, now that load is complete */
1995 trans_pcie
->ucode_write_complete
= true;
1996 wake_up(&trans_pcie
->ucode_write_waitq
);
1999 if (inta
& ~handled
) {
2000 IWL_ERR(trans
, "Unhandled INTA bits 0x%08x\n", inta
& ~handled
);
2001 isr_stats
->unhandled
++;
2004 if (inta
& ~(trans_pcie
->inta_mask
)) {
2005 IWL_WARN(trans
, "Disabled INTA bits 0x%08x were pending\n",
2006 inta
& ~trans_pcie
->inta_mask
);
2009 spin_lock(&trans_pcie
->irq_lock
);
2010 /* only Re-enable all interrupt if disabled by irq */
2011 if (test_bit(STATUS_INT_ENABLED
, &trans
->status
))
2012 _iwl_enable_interrupts(trans
);
2013 /* we are loading the firmware, enable FH_TX interrupt only */
2014 else if (handled
& CSR_INT_BIT_FH_TX
)
2015 iwl_enable_fw_load_int(trans
);
2016 /* Re-enable RF_KILL if it occurred */
2017 else if (handled
& CSR_INT_BIT_RF_KILL
)
2018 iwl_enable_rfkill_int(trans
);
2019 /* Re-enable the ALIVE / Rx interrupt if it occurred */
2020 else if (handled
& (CSR_INT_BIT_ALIVE
| CSR_INT_BIT_FH_RX
))
2021 iwl_enable_fw_load_int_ctx_info(trans
);
2022 spin_unlock(&trans_pcie
->irq_lock
);
2025 lock_map_release(&trans
->sync_cmd_lockdep_map
);
2029 /******************************************************************************
2033 ******************************************************************************/
2035 /* Free dram table */
2036 void iwl_pcie_free_ict(struct iwl_trans
*trans
)
2038 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2040 if (trans_pcie
->ict_tbl
) {
2041 dma_free_coherent(trans
->dev
, ICT_SIZE
,
2042 trans_pcie
->ict_tbl
,
2043 trans_pcie
->ict_tbl_dma
);
2044 trans_pcie
->ict_tbl
= NULL
;
2045 trans_pcie
->ict_tbl_dma
= 0;
2050 * allocate dram shared table, it is an aligned memory
2051 * block of ICT_SIZE.
2052 * also reset all data related to ICT table interrupt.
2054 int iwl_pcie_alloc_ict(struct iwl_trans
*trans
)
2056 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2058 trans_pcie
->ict_tbl
=
2059 dma_alloc_coherent(trans
->dev
, ICT_SIZE
,
2060 &trans_pcie
->ict_tbl_dma
, GFP_KERNEL
);
2061 if (!trans_pcie
->ict_tbl
)
2064 /* just an API sanity check ... it is guaranteed to be aligned */
2065 if (WARN_ON(trans_pcie
->ict_tbl_dma
& (ICT_SIZE
- 1))) {
2066 iwl_pcie_free_ict(trans
);
2073 /* Device is going up inform it about using ICT interrupt table,
2074 * also we need to tell the driver to start using ICT interrupt.
2076 void iwl_pcie_reset_ict(struct iwl_trans
*trans
)
2078 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2081 if (!trans_pcie
->ict_tbl
)
2084 spin_lock(&trans_pcie
->irq_lock
);
2085 _iwl_disable_interrupts(trans
);
2087 memset(trans_pcie
->ict_tbl
, 0, ICT_SIZE
);
2089 val
= trans_pcie
->ict_tbl_dma
>> ICT_SHIFT
;
2091 val
|= CSR_DRAM_INT_TBL_ENABLE
|
2092 CSR_DRAM_INIT_TBL_WRAP_CHECK
|
2093 CSR_DRAM_INIT_TBL_WRITE_POINTER
;
2095 IWL_DEBUG_ISR(trans
, "CSR_DRAM_INT_TBL_REG =0x%x\n", val
);
2097 iwl_write32(trans
, CSR_DRAM_INT_TBL_REG
, val
);
2098 trans_pcie
->use_ict
= true;
2099 trans_pcie
->ict_index
= 0;
2100 iwl_write32(trans
, CSR_INT
, trans_pcie
->inta_mask
);
2101 _iwl_enable_interrupts(trans
);
2102 spin_unlock(&trans_pcie
->irq_lock
);
2105 /* Device is going down disable ict interrupt usage */
2106 void iwl_pcie_disable_ict(struct iwl_trans
*trans
)
2108 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
2110 spin_lock(&trans_pcie
->irq_lock
);
2111 trans_pcie
->use_ict
= false;
2112 spin_unlock(&trans_pcie
->irq_lock
);
2115 irqreturn_t
iwl_pcie_isr(int irq
, void *data
)
2117 struct iwl_trans
*trans
= data
;
2122 /* Disable (but don't clear!) interrupts here to avoid
2123 * back-to-back ISRs and sporadic interrupts from our NIC.
2124 * If we have something to service, the tasklet will re-enable ints.
2125 * If we *don't* have something, we'll re-enable before leaving here.
2127 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
2129 return IRQ_WAKE_THREAD
;
2132 irqreturn_t
iwl_pcie_msix_isr(int irq
, void *data
)
2134 return IRQ_WAKE_THREAD
;
2137 irqreturn_t
iwl_pcie_irq_msix_handler(int irq
, void *dev_id
)
2139 struct msix_entry
*entry
= dev_id
;
2140 struct iwl_trans_pcie
*trans_pcie
= iwl_pcie_get_trans_pcie(entry
);
2141 struct iwl_trans
*trans
= trans_pcie
->trans
;
2142 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
2143 u32 inta_fh
, inta_hw
;
2145 lock_map_acquire(&trans
->sync_cmd_lockdep_map
);
2147 spin_lock(&trans_pcie
->irq_lock
);
2148 inta_fh
= iwl_read32(trans
, CSR_MSIX_FH_INT_CAUSES_AD
);
2149 inta_hw
= iwl_read32(trans
, CSR_MSIX_HW_INT_CAUSES_AD
);
2151 * Clear causes registers to avoid being handling the same cause.
2153 iwl_write32(trans
, CSR_MSIX_FH_INT_CAUSES_AD
, inta_fh
);
2154 iwl_write32(trans
, CSR_MSIX_HW_INT_CAUSES_AD
, inta_hw
);
2155 spin_unlock(&trans_pcie
->irq_lock
);
2157 trace_iwlwifi_dev_irq_msix(trans
->dev
, entry
, true, inta_fh
, inta_hw
);
2159 if (unlikely(!(inta_fh
| inta_hw
))) {
2160 IWL_DEBUG_ISR(trans
, "Ignore interrupt, inta == 0\n");
2161 lock_map_release(&trans
->sync_cmd_lockdep_map
);
2165 if (iwl_have_debug_level(IWL_DL_ISR
)) {
2166 IWL_DEBUG_ISR(trans
,
2167 "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2168 inta_fh
, trans_pcie
->fh_mask
,
2169 iwl_read32(trans
, CSR_MSIX_FH_INT_MASK_AD
));
2170 if (inta_fh
& ~trans_pcie
->fh_mask
)
2171 IWL_DEBUG_ISR(trans
,
2172 "We got a masked interrupt (0x%08x)\n",
2173 inta_fh
& ~trans_pcie
->fh_mask
);
2176 inta_fh
&= trans_pcie
->fh_mask
;
2178 if ((trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_NON_RX
) &&
2179 inta_fh
& MSIX_FH_INT_CAUSES_Q0
) {
2181 iwl_pcie_rx_handle(trans
, 0);
2185 if ((trans_pcie
->shared_vec_mask
& IWL_SHARED_IRQ_FIRST_RSS
) &&
2186 inta_fh
& MSIX_FH_INT_CAUSES_Q1
) {
2188 iwl_pcie_rx_handle(trans
, 1);
2192 /* This "Tx" DMA channel is used only for loading uCode */
2193 if (inta_fh
& MSIX_FH_INT_CAUSES_D2S_CH0_NUM
) {
2194 IWL_DEBUG_ISR(trans
, "uCode load interrupt\n");
2197 * Wake up uCode load routine,
2198 * now that load is complete
2200 trans_pcie
->ucode_write_complete
= true;
2201 wake_up(&trans_pcie
->ucode_write_waitq
);
2204 /* Error detected by uCode */
2205 if ((inta_fh
& MSIX_FH_INT_CAUSES_FH_ERR
) ||
2206 (inta_hw
& MSIX_HW_INT_CAUSES_REG_SW_ERR
)) {
2208 "Microcode SW error detected. Restarting 0x%X.\n",
2211 iwl_pcie_irq_handle_error(trans
);
2214 /* After checking FH register check HW register */
2215 if (iwl_have_debug_level(IWL_DL_ISR
)) {
2216 IWL_DEBUG_ISR(trans
,
2217 "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2218 inta_hw
, trans_pcie
->hw_mask
,
2219 iwl_read32(trans
, CSR_MSIX_HW_INT_MASK_AD
));
2220 if (inta_hw
& ~trans_pcie
->hw_mask
)
2221 IWL_DEBUG_ISR(trans
,
2222 "We got a masked interrupt 0x%08x\n",
2223 inta_hw
& ~trans_pcie
->hw_mask
);
2226 inta_hw
&= trans_pcie
->hw_mask
;
2228 /* Alive notification via Rx interrupt will do the real work */
2229 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_ALIVE
) {
2230 IWL_DEBUG_ISR(trans
, "Alive interrupt\n");
2232 if (trans
->trans_cfg
->gen2
) {
2233 /* We can restock, since firmware configured the RFH */
2234 iwl_pcie_rxmq_restock(trans
, trans_pcie
->rxq
);
2238 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_WAKEUP
) {
2240 le32_to_cpu(trans_pcie
->prph_info
->sleep_notif
);
2241 if (sleep_notif
== IWL_D3_SLEEP_STATUS_SUSPEND
||
2242 sleep_notif
== IWL_D3_SLEEP_STATUS_RESUME
) {
2243 IWL_DEBUG_ISR(trans
,
2244 "Sx interrupt: sleep notification = 0x%x\n",
2246 trans_pcie
->sx_complete
= true;
2247 wake_up(&trans_pcie
->sx_waitq
);
2249 /* uCode wakes up after power-down sleep */
2250 IWL_DEBUG_ISR(trans
, "Wakeup interrupt\n");
2251 iwl_pcie_rxq_check_wrptr(trans
);
2252 iwl_pcie_txq_check_wrptrs(trans
);
2254 isr_stats
->wakeup
++;
2258 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_IML
) {
2259 /* Reflect IML transfer status */
2260 int res
= iwl_read32(trans
, CSR_IML_RESP_ADDR
);
2262 IWL_DEBUG_ISR(trans
, "IML transfer status: %d\n", res
);
2263 if (res
== IWL_IMAGE_RESP_FAIL
) {
2265 iwl_pcie_irq_handle_error(trans
);
2269 /* Chip got too hot and stopped itself */
2270 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_CT_KILL
) {
2271 IWL_ERR(trans
, "Microcode CT kill error detected.\n");
2272 isr_stats
->ctkill
++;
2275 /* HW RF KILL switch toggled */
2276 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_RF_KILL
)
2277 iwl_pcie_handle_rfkill_irq(trans
);
2279 if (inta_hw
& MSIX_HW_INT_CAUSES_REG_HW_ERR
) {
2281 "Hardware error detected. Restarting.\n");
2284 trans
->dbg
.hw_error
= true;
2285 iwl_pcie_irq_handle_error(trans
);
2288 iwl_pcie_clear_irq(trans
, entry
);
2290 lock_map_release(&trans
->sync_cmd_lockdep_map
);