1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #ifndef __iwl_trans_int_pcie_h__
65 #define __iwl_trans_int_pcie_h__
67 #include <linux/spinlock.h>
68 #include <linux/interrupt.h>
69 #include <linux/skbuff.h>
70 #include <linux/wait.h>
71 #include <linux/pci.h>
72 #include <linux/timer.h>
73 #include <linux/cpu.h>
77 #include "iwl-trans.h"
78 #include "iwl-debug.h"
80 #include "iwl-op-mode.h"
83 /* We need 2 entries for the TX command and header, and another one might
84 * be needed for potential data in the SKB's head. The remaining ones can
87 #define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
90 * RX related structures and functions
92 #define RX_NUM_QUEUES 1
93 #define RX_POST_REQ_ALLOC 2
94 #define RX_CLAIM_REQ_ALLOC 8
95 #define RX_PENDING_WATERMARK 16
96 #define FIRST_RX_QUEUE 512
100 /*This file includes the declaration that are internal to the
101 * trans_pcie layer */
104 * struct iwl_rx_mem_buffer
105 * @page_dma: bus address of rxb page
106 * @page: driver's pointer to the rxb page
107 * @invalid: rxb is in driver ownership - not owned by HW
108 * @vid: index of this rxb in the global table
109 * @offset: indicates which offset of the page (in bytes)
110 * this buffer uses (if multiple RBs fit into one page)
112 struct iwl_rx_mem_buffer
{
117 struct list_head list
;
122 * struct isr_statistics - interrupt statistics
125 struct isr_statistics
{
140 * struct iwl_rx_transfer_desc - transfer descriptor
141 * @addr: ptr to free buffer start address
142 * @rbid: unique tag of the buffer
143 * @reserved: reserved
145 struct iwl_rx_transfer_desc
{
151 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
154 * struct iwl_rx_completion_desc - completion descriptor
155 * @reserved1: reserved
156 * @rbid: unique tag of the received buffer
157 * @flags: flags (0: fragmented, all others: reserved)
158 * @reserved2: reserved
160 struct iwl_rx_completion_desc
{
168 * struct iwl_rxq - Rx queue
170 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
171 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
172 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
173 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
174 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
175 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
176 * @tr_tail: driver's pointer to the transmission ring tail buffer
177 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
178 * @cr_tail: driver's pointer to the completion ring tail buffer
179 * @cr_tail_dma: physical address of the buffer for the completion ring tail
180 * @read: Shared index to newest available Rx buffer
181 * @write: Shared index to oldest written Rx packet
182 * @free_count: Number of pre-allocated buffers in rx_free
183 * @used_count: Number of RBDs handled to allocator to use for allocation
185 * @rx_free: list of RBDs with allocated RB ready for use
186 * @rx_used: list of RBDs with no RB attached
187 * @need_update: flag to indicate we need to update read/write index
188 * @rb_stts: driver's pointer to receive buffer status
189 * @rb_stts_dma: bus address of receive buffer status
191 * @queue: actual rx queue. Not used for multi-rx queue.
193 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
202 struct iwl_rx_completion_desc
*cd
;
204 dma_addr_t used_bd_dma
;
206 dma_addr_t tr_tail_dma
;
208 dma_addr_t cr_tail_dma
;
215 struct list_head rx_free
;
216 struct list_head rx_used
;
219 dma_addr_t rb_stts_dma
;
221 struct napi_struct napi
;
222 struct iwl_rx_mem_buffer
*queue
[RX_QUEUE_SIZE
];
226 * struct iwl_rb_allocator - Rx allocator
227 * @req_pending: number of requests the allcator had not processed yet
228 * @req_ready: number of requests honored and ready for claiming
229 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
230 * the queue. This is a list of &struct iwl_rx_mem_buffer
231 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
232 * of &struct iwl_rx_mem_buffer
233 * @lock: protects the rbd_allocated and rbd_empty lists
234 * @alloc_wq: work queue for background calls
235 * @rx_alloc: work struct for background calls
237 struct iwl_rb_allocator
{
238 atomic_t req_pending
;
240 struct list_head rbd_allocated
;
241 struct list_head rbd_empty
;
243 struct workqueue_struct
*alloc_wq
;
244 struct work_struct rx_alloc
;
254 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
255 * @index -- current index
257 static inline int iwl_queue_inc_wrap(struct iwl_trans
*trans
, int index
)
260 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
264 * iwl_get_closed_rb_stts - get closed rb stts from different structs
265 * @rxq - the rxq to get the rb stts from
267 static inline __le16
iwl_get_closed_rb_stts(struct iwl_trans
*trans
,
270 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
271 __le16
*rb_stts
= rxq
->rb_stts
;
273 return READ_ONCE(*rb_stts
);
275 struct iwl_rb_status
*rb_stts
= rxq
->rb_stts
;
277 return READ_ONCE(rb_stts
->closed_rb_num
);
282 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
283 * @index -- current index
285 static inline int iwl_queue_dec_wrap(struct iwl_trans
*trans
, int index
)
288 (trans
->trans_cfg
->base_params
->max_tfd_queue_size
- 1);
291 struct iwl_cmd_meta
{
292 /* only for SYNC commands, iff the reply skb is wanted */
293 struct iwl_host_cmd
*source
;
299 * The FH will write back to the first TB only, so we need to copy some data
300 * into the buffer regardless of whether it should be mapped or not.
301 * This indicates how big the first TB must be to include the scratch buffer
302 * and the assigned PN.
303 * Since PN location is 8 bytes at offset 12, it's 20 now.
304 * If we make it bigger then allocations will be bigger and copy slower, so
305 * that's probably not useful.
307 #define IWL_FIRST_TB_SIZE 20
308 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
310 struct iwl_pcie_txq_entry
{
313 /* buffer to free after command completes */
314 const void *free_buf
;
315 struct iwl_cmd_meta meta
;
318 struct iwl_pcie_first_tb_buf
{
319 u8 buf
[IWL_FIRST_TB_SIZE_ALIGN
];
323 * struct iwl_txq - Tx Queue for DMA
324 * @q: generic Rx/Tx queue descriptor
325 * @tfds: transmit frame descriptors (DMA memory)
326 * @first_tb_bufs: start of command headers, including scratch buffers, for
327 * the writeback -- this is DMA memory and an array holding one buffer
328 * for each command on the queue
329 * @first_tb_dma: DMA address for the first_tb_bufs start
330 * @entries: transmit entries (driver state)
332 * @stuck_timer: timer that fires if queue gets stuck
333 * @trans_pcie: pointer back to transport (for timer)
334 * @need_update: indicates need to update read/write index
335 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
336 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
337 * @frozen: tx stuck queue timer is frozen
338 * @frozen_expiry_remainder: remember how long until the timer fires
339 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
340 * @write_ptr: 1-st empty entry (index) host_w
341 * @read_ptr: last used entry (index) host_r
342 * @dma_addr: physical addr for BD's
343 * @n_window: safe queue window
345 * @low_mark: low watermark, resume queue if free space more than this
346 * @high_mark: high watermark, stop queue if free space less than this
348 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
349 * descriptors) and required locking structures.
351 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
352 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
353 * there might be HW changes in the future). For the normal TX
354 * queues, n_window, which is the size of the software queue data
355 * is also 256; however, for the command queue, n_window is only
356 * 32 since we don't need so many commands pending. Since the HW
357 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
358 * This means that we end up with the following:
359 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
360 * SW entries: | 0 | ... | 31 |
361 * where N is a number between 0 and 7. This means that the SW
362 * data is a window overlayed over the HW queue.
366 struct iwl_pcie_first_tb_buf
*first_tb_bufs
;
367 dma_addr_t first_tb_dma
;
368 struct iwl_pcie_txq_entry
*entries
;
370 unsigned long frozen_expiry_remainder
;
371 struct timer_list stuck_timer
;
372 struct iwl_trans_pcie
*trans_pcie
;
377 unsigned long wd_timeout
;
378 struct sk_buff_head overflow_q
;
379 struct iwl_dma_ptr bc_tbl
;
392 static inline dma_addr_t
393 iwl_pcie_get_first_tb_dma(struct iwl_txq
*txq
, int idx
)
395 return txq
->first_tb_dma
+
396 sizeof(struct iwl_pcie_first_tb_buf
) * idx
;
399 struct iwl_tso_hdr_page
{
404 #ifdef CONFIG_IWLWIFI_DEBUGFS
406 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
409 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
410 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
411 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
412 * set the file can no longer be used.
414 enum iwl_fw_mon_dbgfs_state
{
415 IWL_FW_MON_DBGFS_STATE_CLOSED
,
416 IWL_FW_MON_DBGFS_STATE_OPEN
,
417 IWL_FW_MON_DBGFS_STATE_DISABLED
,
422 * enum iwl_shared_irq_flags - level of sharing for irq
423 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
424 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
426 enum iwl_shared_irq_flags
{
427 IWL_SHARED_IRQ_NON_RX
= BIT(0),
428 IWL_SHARED_IRQ_FIRST_RSS
= BIT(1),
432 * enum iwl_image_response_code - image response values
433 * @IWL_IMAGE_RESP_DEF: the default value of the register
434 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
435 * @IWL_IMAGE_RESP_FAIL: iml reading failed
437 enum iwl_image_response_code
{
438 IWL_IMAGE_RESP_DEF
= 0,
439 IWL_IMAGE_RESP_SUCCESS
= 1,
440 IWL_IMAGE_RESP_FAIL
= 2,
444 * struct cont_rec: continuous recording data structure
445 * @prev_wr_ptr: the last address that was read in monitor_data
447 * @prev_wrap_cnt: the wrap count that was used during the last read in
448 * monitor_data debugfs file
449 * @state: the state of monitor_data debugfs file as described
450 * in &iwl_fw_mon_dbgfs_state enum
451 * @mutex: locked while reading from monitor_data debugfs file
453 #ifdef CONFIG_IWLWIFI_DEBUGFS
458 /* Used to sync monitor_data debugfs file with driver unload flow */
464 * struct iwl_trans_pcie - PCIe transport specific data
465 * @rxq: all the RX queue data
466 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
467 * @global_table: table mapping received VID from hw to rxb
468 * @rba: allocator for RX replenishing
469 * @ctxt_info: context information for FW self init
470 * @ctxt_info_gen3: context information for gen3 devices
471 * @prph_info: prph info for self init
472 * @prph_scratch: prph scratch for self init
473 * @ctxt_info_dma_addr: dma addr of context information
474 * @prph_info_dma_addr: dma addr of prph info
475 * @prph_scratch_dma_addr: dma addr of prph scratch
476 * @ctxt_info_dma_addr: dma addr of context information
477 * @init_dram: DRAM data of firmware image (including paging).
478 * Context information addresses will be taken from here.
479 * This is driver's local copy for keeping track of size and
480 * count for allocating and freeing the memory.
481 * @trans: pointer to the generic transport area
482 * @scd_base_addr: scheduler sram base address in SRAM
483 * @scd_bc_tbls: pointer to the byte count table of the scheduler
484 * @kw: keep warm address
485 * @pci_dev: basic pci-network driver stuff
486 * @hw_base: pci hardware address support
487 * @ucode_write_complete: indicates that the ucode has been copied.
488 * @ucode_write_waitq: wait queue for uCode load
489 * @cmd_queue - command queue number
490 * @def_rx_queue - default rx queue number
491 * @rx_buf_size: Rx buffer size
492 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
493 * @scd_set_active: should the transport configure the SCD for HCMD queue
494 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
496 * @rx_page_order: page order for receive buffer size
497 * @rx_buf_bytes: RX buffer (RB) size in bytes
498 * @reg_lock: protect hw register access
499 * @mutex: to protect stop_device / start_fw / start_hw
500 * @cmd_in_flight: true when we have a host command in flight
501 #ifdef CONFIG_IWLWIFI_DEBUGFS
502 * @fw_mon_data: fw continuous recording data
504 * @msix_entries: array of MSI-X entries
505 * @msix_enabled: true if managed to enable MSI-X
506 * @shared_vec_mask: the type of causes the shared vector handles
507 * (see iwl_shared_irq_flags).
508 * @alloc_vecs: the number of interrupt vectors allocated by the OS
509 * @def_irq: default irq for non rx causes
510 * @fh_init_mask: initial unmasked fh causes
511 * @hw_init_mask: initial unmasked hw causes
512 * @fh_mask: current unmasked fh causes
513 * @hw_mask: current unmasked hw causes
514 * @in_rescan: true if we have triggered a device rescan
515 * @base_rb_stts: base virtual address of receive buffer status for all queues
516 * @base_rb_stts_dma: base physical address of receive buffer status
517 * @supported_dma_mask: DMA mask to validate the actual address against,
518 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
519 * @alloc_page_lock: spinlock for the page allocator
520 * @alloc_page: allocated page to still use parts of
521 * @alloc_page_used: how much of the allocated page was already used (bytes)
523 struct iwl_trans_pcie
{
525 struct iwl_rx_mem_buffer
*rx_pool
;
526 struct iwl_rx_mem_buffer
**global_table
;
527 struct iwl_rb_allocator rba
;
529 struct iwl_context_info
*ctxt_info
;
530 struct iwl_context_info_gen3
*ctxt_info_gen3
;
532 struct iwl_prph_info
*prph_info
;
533 struct iwl_prph_scratch
*prph_scratch
;
534 dma_addr_t ctxt_info_dma_addr
;
535 dma_addr_t prph_info_dma_addr
;
536 dma_addr_t prph_scratch_dma_addr
;
537 dma_addr_t iml_dma_addr
;
538 struct iwl_trans
*trans
;
540 struct net_device napi_dev
;
542 struct __percpu iwl_tso_hdr_page
*tso_hdr_page
;
546 dma_addr_t ict_tbl_dma
;
549 bool is_down
, opmode_down
;
551 struct isr_statistics isr_stats
;
557 struct iwl_dma_ptr scd_bc_tbls
;
558 struct iwl_dma_ptr kw
;
560 struct iwl_txq
*txq_memory
;
561 struct iwl_txq
*txq
[IWL_MAX_TVQM_QUEUES
];
562 unsigned long queue_used
[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES
)];
563 unsigned long queue_stopped
[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES
)];
565 /* PCI bus related data */
566 struct pci_dev
*pci_dev
;
567 void __iomem
*hw_base
;
569 bool ucode_write_complete
;
571 wait_queue_head_t ucode_write_waitq
;
572 wait_queue_head_t wait_command_queue
;
573 wait_queue_head_t sx_waitq
;
575 u8 page_offs
, dev_cmd_offs
;
580 unsigned int cmd_q_wdg_timeout
;
581 u8 n_no_reclaim_cmds
;
582 u8 no_reclaim_cmds
[MAX_NO_RECLAIM_CMDS
];
587 enum iwl_amsdu_size rx_buf_size
;
591 bool pcie_dbg_dumped_once
;
594 u32 supported_dma_mask
;
596 /* allocator lock for the two values below */
597 spinlock_t alloc_page_lock
;
598 struct page
*alloc_page
;
601 /*protect hw register */
603 bool cmd_hold_nic_awake
;
605 #ifdef CONFIG_IWLWIFI_DEBUGFS
606 struct cont_rec fw_mon_data
;
609 struct msix_entry msix_entries
[IWL_MAX_RX_HW_QUEUES
];
618 cpumask_t affinity_mask
[IWL_MAX_RX_HW_QUEUES
];
619 u16 tx_cmd_queue_size
;
623 dma_addr_t base_rb_stts_dma
;
626 static inline struct iwl_trans_pcie
*
627 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans
*trans
)
629 return (void *)trans
->trans_specific
;
632 static inline void iwl_pcie_clear_irq(struct iwl_trans
*trans
,
633 struct msix_entry
*entry
)
636 * Before sending the interrupt the HW disables it to prevent
637 * a nested interrupt. This is done by writing 1 to the corresponding
638 * bit in the mask register. After handling the interrupt, it should be
639 * re-enabled by clearing this bit. This register is defined as
640 * write 1 clear (W1C) register, meaning that it's being clear
641 * by writing 1 to the bit.
643 iwl_write32(trans
, CSR_MSIX_AUTOMASK_ST_AD
, BIT(entry
->entry
));
646 static inline struct iwl_trans
*
647 iwl_trans_pcie_get_trans(struct iwl_trans_pcie
*trans_pcie
)
649 return container_of((void *)trans_pcie
, struct iwl_trans
,
654 * Convention: trans API functions: iwl_trans_pcie_XXX
655 * Other functions: iwl_pcie_XXX
658 *iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
659 const struct pci_device_id
*ent
,
660 const struct iwl_cfg_trans_params
*cfg_trans
);
661 void iwl_trans_pcie_free(struct iwl_trans
*trans
);
663 /*****************************************************
665 ******************************************************/
666 int iwl_pcie_rx_init(struct iwl_trans
*trans
);
667 int iwl_pcie_gen2_rx_init(struct iwl_trans
*trans
);
668 irqreturn_t
iwl_pcie_msix_isr(int irq
, void *data
);
669 irqreturn_t
iwl_pcie_irq_handler(int irq
, void *dev_id
);
670 irqreturn_t
iwl_pcie_irq_msix_handler(int irq
, void *dev_id
);
671 irqreturn_t
iwl_pcie_irq_rx_msix_handler(int irq
, void *dev_id
);
672 int iwl_pcie_rx_stop(struct iwl_trans
*trans
);
673 void iwl_pcie_rx_free(struct iwl_trans
*trans
);
674 void iwl_pcie_free_rbs_pool(struct iwl_trans
*trans
);
675 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq
*rxq
);
676 int iwl_pcie_dummy_napi_poll(struct napi_struct
*napi
, int budget
);
677 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans
*trans
, gfp_t priority
,
678 struct iwl_rxq
*rxq
);
680 /*****************************************************
681 * ICT - interrupt handling
682 ******************************************************/
683 irqreturn_t
iwl_pcie_isr(int irq
, void *data
);
684 int iwl_pcie_alloc_ict(struct iwl_trans
*trans
);
685 void iwl_pcie_free_ict(struct iwl_trans
*trans
);
686 void iwl_pcie_reset_ict(struct iwl_trans
*trans
);
687 void iwl_pcie_disable_ict(struct iwl_trans
*trans
);
689 /*****************************************************
691 ******************************************************/
693 * We need this inline in case dma_addr_t is only 32-bits - since the
694 * hardware is always 64-bit, the issue can still occur in that case,
695 * so use u64 for 'phys' here to force the addition in 64-bit.
697 static inline bool iwl_pcie_crosses_4g_boundary(u64 phys
, u16 len
)
699 return upper_32_bits(phys
) != upper_32_bits(phys
+ len
);
702 int iwl_pcie_tx_init(struct iwl_trans
*trans
);
703 int iwl_pcie_gen2_tx_init(struct iwl_trans
*trans
, int txq_id
,
705 void iwl_pcie_tx_start(struct iwl_trans
*trans
, u32 scd_base_addr
);
706 int iwl_pcie_tx_stop(struct iwl_trans
*trans
);
707 void iwl_pcie_tx_free(struct iwl_trans
*trans
);
708 bool iwl_trans_pcie_txq_enable(struct iwl_trans
*trans
, int queue
, u16 ssn
,
709 const struct iwl_trans_txq_scd_cfg
*cfg
,
710 unsigned int wdg_timeout
);
711 void iwl_trans_pcie_txq_disable(struct iwl_trans
*trans
, int queue
,
713 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans
*trans
, u32 txq_id
,
715 void iwl_trans_pcie_log_scd_error(struct iwl_trans
*trans
,
716 struct iwl_txq
*txq
);
717 int iwl_trans_pcie_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
718 struct iwl_device_tx_cmd
*dev_cmd
, int txq_id
);
719 void iwl_pcie_txq_check_wrptrs(struct iwl_trans
*trans
);
720 int iwl_trans_pcie_send_hcmd(struct iwl_trans
*trans
, struct iwl_host_cmd
*cmd
);
721 void iwl_pcie_cmdq_reclaim(struct iwl_trans
*trans
, int txq_id
, int idx
);
722 void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans
*trans
,
723 struct iwl_txq
*txq
);
724 void iwl_pcie_hcmd_complete(struct iwl_trans
*trans
,
725 struct iwl_rx_cmd_buffer
*rxb
);
726 void iwl_trans_pcie_reclaim(struct iwl_trans
*trans
, int txq_id
, int ssn
,
727 struct sk_buff_head
*skbs
);
728 void iwl_trans_pcie_set_q_ptrs(struct iwl_trans
*trans
, int txq_id
, int ptr
);
729 void iwl_trans_pcie_tx_reset(struct iwl_trans
*trans
);
731 static inline u16
iwl_pcie_tfd_tb_get_len(struct iwl_trans
*trans
, void *_tfd
,
734 if (trans
->trans_cfg
->use_tfh
) {
735 struct iwl_tfh_tfd
*tfd
= _tfd
;
736 struct iwl_tfh_tb
*tb
= &tfd
->tbs
[idx
];
738 return le16_to_cpu(tb
->tb_len
);
740 struct iwl_tfd
*tfd
= _tfd
;
741 struct iwl_tfd_tb
*tb
= &tfd
->tbs
[idx
];
743 return le16_to_cpu(tb
->hi_n_len
) >> 4;
747 /*****************************************************
749 ******************************************************/
750 void iwl_pcie_dump_csr(struct iwl_trans
*trans
);
752 /*****************************************************
754 ******************************************************/
755 static inline void _iwl_disable_interrupts(struct iwl_trans
*trans
)
757 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
759 clear_bit(STATUS_INT_ENABLED
, &trans
->status
);
760 if (!trans_pcie
->msix_enabled
) {
761 /* disable interrupts from uCode/NIC to host */
762 iwl_write32(trans
, CSR_INT_MASK
, 0x00000000);
764 /* acknowledge/clear/reset any interrupts still pending
765 * from uCode or flow handler (Rx/Tx DMA) */
766 iwl_write32(trans
, CSR_INT
, 0xffffffff);
767 iwl_write32(trans
, CSR_FH_INT_STATUS
, 0xffffffff);
769 /* disable all the interrupt we might use */
770 iwl_write32(trans
, CSR_MSIX_FH_INT_MASK_AD
,
771 trans_pcie
->fh_init_mask
);
772 iwl_write32(trans
, CSR_MSIX_HW_INT_MASK_AD
,
773 trans_pcie
->hw_init_mask
);
775 IWL_DEBUG_ISR(trans
, "Disabled interrupts\n");
778 #define IWL_NUM_OF_COMPLETION_RINGS 31
779 #define IWL_NUM_OF_TRANSFER_RINGS 527
781 static inline int iwl_pcie_get_num_sections(const struct fw_img
*fw
,
786 while (start
< fw
->num_sec
&&
787 fw
->sec
[start
].offset
!= CPU1_CPU2_SEPARATOR_SECTION
&&
788 fw
->sec
[start
].offset
!= PAGING_SEPARATOR_SECTION
) {
796 static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans
*trans
,
797 const struct fw_desc
*sec
,
798 struct iwl_dram_data
*dram
)
800 dram
->block
= dma_alloc_coherent(trans
->dev
, sec
->len
,
806 dram
->size
= sec
->len
;
807 memcpy(dram
->block
, sec
->data
, sec
->len
);
812 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans
*trans
)
814 struct iwl_self_init_dram
*dram
= &trans
->init_dram
;
818 WARN_ON(dram
->fw_cnt
);
822 for (i
= 0; i
< dram
->fw_cnt
; i
++)
823 dma_free_coherent(trans
->dev
, dram
->fw
[i
].size
,
824 dram
->fw
[i
].block
, dram
->fw
[i
].physical
);
831 static inline void iwl_disable_interrupts(struct iwl_trans
*trans
)
833 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
835 spin_lock(&trans_pcie
->irq_lock
);
836 _iwl_disable_interrupts(trans
);
837 spin_unlock(&trans_pcie
->irq_lock
);
840 static inline void _iwl_enable_interrupts(struct iwl_trans
*trans
)
842 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
844 IWL_DEBUG_ISR(trans
, "Enabling interrupts\n");
845 set_bit(STATUS_INT_ENABLED
, &trans
->status
);
846 if (!trans_pcie
->msix_enabled
) {
847 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
848 iwl_write32(trans
, CSR_INT_MASK
, trans_pcie
->inta_mask
);
851 * fh/hw_mask keeps all the unmasked causes.
852 * Unlike msi, in msix cause is enabled when it is unset.
854 trans_pcie
->hw_mask
= trans_pcie
->hw_init_mask
;
855 trans_pcie
->fh_mask
= trans_pcie
->fh_init_mask
;
856 iwl_write32(trans
, CSR_MSIX_FH_INT_MASK_AD
,
857 ~trans_pcie
->fh_mask
);
858 iwl_write32(trans
, CSR_MSIX_HW_INT_MASK_AD
,
859 ~trans_pcie
->hw_mask
);
863 static inline void iwl_enable_interrupts(struct iwl_trans
*trans
)
865 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
867 spin_lock(&trans_pcie
->irq_lock
);
868 _iwl_enable_interrupts(trans
);
869 spin_unlock(&trans_pcie
->irq_lock
);
871 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans
*trans
, u32 msk
)
873 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
875 iwl_write32(trans
, CSR_MSIX_HW_INT_MASK_AD
, ~msk
);
876 trans_pcie
->hw_mask
= msk
;
879 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans
*trans
, u32 msk
)
881 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
883 iwl_write32(trans
, CSR_MSIX_FH_INT_MASK_AD
, ~msk
);
884 trans_pcie
->fh_mask
= msk
;
887 static inline void iwl_enable_fw_load_int(struct iwl_trans
*trans
)
889 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
891 IWL_DEBUG_ISR(trans
, "Enabling FW load interrupt\n");
892 if (!trans_pcie
->msix_enabled
) {
893 trans_pcie
->inta_mask
= CSR_INT_BIT_FH_TX
;
894 iwl_write32(trans
, CSR_INT_MASK
, trans_pcie
->inta_mask
);
896 iwl_write32(trans
, CSR_MSIX_HW_INT_MASK_AD
,
897 trans_pcie
->hw_init_mask
);
898 iwl_enable_fh_int_msk_msix(trans
,
899 MSIX_FH_INT_CAUSES_D2S_CH0_NUM
);
903 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans
*trans
)
905 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
907 IWL_DEBUG_ISR(trans
, "Enabling ALIVE interrupt only\n");
909 if (!trans_pcie
->msix_enabled
) {
911 * When we'll receive the ALIVE interrupt, the ISR will call
912 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
913 * interrupt (which is not really needed anymore) but also the
914 * RX interrupt which will allow us to receive the ALIVE
915 * notification (which is Rx) and continue the flow.
917 trans_pcie
->inta_mask
= CSR_INT_BIT_ALIVE
| CSR_INT_BIT_FH_RX
;
918 iwl_write32(trans
, CSR_INT_MASK
, trans_pcie
->inta_mask
);
920 iwl_enable_hw_int_msk_msix(trans
,
921 MSIX_HW_INT_CAUSES_REG_ALIVE
);
923 * Leave all the FH causes enabled to get the ALIVE
926 iwl_enable_fh_int_msk_msix(trans
, trans_pcie
->fh_init_mask
);
930 static inline u16
iwl_pcie_get_cmd_index(const struct iwl_txq
*q
, u32 index
)
932 return index
& (q
->n_window
- 1);
935 static inline void *iwl_pcie_get_tfd(struct iwl_trans
*trans
,
936 struct iwl_txq
*txq
, int idx
)
938 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
940 if (trans
->trans_cfg
->use_tfh
)
941 idx
= iwl_pcie_get_cmd_index(txq
, idx
);
943 return txq
->tfds
+ trans_pcie
->tfd_size
* idx
;
946 static inline const char *queue_name(struct device
*dev
,
947 struct iwl_trans_pcie
*trans_p
, int i
)
949 if (trans_p
->shared_vec_mask
) {
950 int vec
= trans_p
->shared_vec_mask
&
951 IWL_SHARED_IRQ_FIRST_RSS
? 1 : 0;
954 return DRV_NAME
": shared IRQ";
956 return devm_kasprintf(dev
, GFP_KERNEL
,
957 DRV_NAME
": queue %d", i
+ vec
);
960 return DRV_NAME
": default queue";
962 if (i
== trans_p
->alloc_vecs
- 1)
963 return DRV_NAME
": exception";
965 return devm_kasprintf(dev
, GFP_KERNEL
,
966 DRV_NAME
": queue %d", i
);
969 static inline void iwl_enable_rfkill_int(struct iwl_trans
*trans
)
971 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
973 IWL_DEBUG_ISR(trans
, "Enabling rfkill interrupt\n");
974 if (!trans_pcie
->msix_enabled
) {
975 trans_pcie
->inta_mask
= CSR_INT_BIT_RF_KILL
;
976 iwl_write32(trans
, CSR_INT_MASK
, trans_pcie
->inta_mask
);
978 iwl_write32(trans
, CSR_MSIX_FH_INT_MASK_AD
,
979 trans_pcie
->fh_init_mask
);
980 iwl_enable_hw_int_msk_msix(trans
,
981 MSIX_HW_INT_CAUSES_REG_RF_KILL
);
984 if (trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_9000
) {
986 * On 9000-series devices this bit isn't enabled by default, so
987 * when we power down the device we need set the bit to allow it
988 * to wake up the PCI-E bus for RF-kill interrupts.
990 iwl_set_bit(trans
, CSR_GP_CNTRL
,
991 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN
);
995 void iwl_pcie_handle_rfkill_irq(struct iwl_trans
*trans
);
997 static inline void iwl_wake_queue(struct iwl_trans
*trans
,
1000 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1002 if (test_and_clear_bit(txq
->id
, trans_pcie
->queue_stopped
)) {
1003 IWL_DEBUG_TX_QUEUES(trans
, "Wake hwq %d\n", txq
->id
);
1004 iwl_op_mode_queue_not_full(trans
->op_mode
, txq
->id
);
1008 static inline void iwl_stop_queue(struct iwl_trans
*trans
,
1009 struct iwl_txq
*txq
)
1011 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1013 if (!test_and_set_bit(txq
->id
, trans_pcie
->queue_stopped
)) {
1014 iwl_op_mode_queue_full(trans
->op_mode
, txq
->id
);
1015 IWL_DEBUG_TX_QUEUES(trans
, "Stop hwq %d\n", txq
->id
);
1017 IWL_DEBUG_TX_QUEUES(trans
, "hwq %d already stopped\n",
1021 static inline bool iwl_queue_used(const struct iwl_txq
*q
, int i
)
1023 int index
= iwl_pcie_get_cmd_index(q
, i
);
1024 int r
= iwl_pcie_get_cmd_index(q
, q
->read_ptr
);
1025 int w
= iwl_pcie_get_cmd_index(q
, q
->write_ptr
);
1028 (index
>= r
&& index
< w
) :
1029 !(index
< r
&& index
>= w
);
1032 static inline bool iwl_is_rfkill_set(struct iwl_trans
*trans
)
1034 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1036 lockdep_assert_held(&trans_pcie
->mutex
);
1038 if (trans_pcie
->debug_rfkill
== 1)
1041 return !(iwl_read32(trans
, CSR_GP_CNTRL
) &
1042 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
);
1045 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
,
1046 u32 reg
, u32 mask
, u32 value
)
1050 #ifdef CONFIG_IWLWIFI_DEBUG
1051 WARN_ON_ONCE(value
& ~mask
);
1054 v
= iwl_read32(trans
, reg
);
1057 iwl_write32(trans
, reg
, v
);
1060 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans
*trans
,
1063 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, 0);
1066 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans
*trans
,
1069 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, mask
);
1072 static inline bool iwl_pcie_dbg_on(struct iwl_trans
*trans
)
1074 return (trans
->dbg
.dest_tlv
|| iwl_trans_dbg_ini_valid(trans
));
1077 void iwl_trans_pcie_rf_kill(struct iwl_trans
*trans
, bool state
);
1078 void iwl_trans_pcie_dump_regs(struct iwl_trans
*trans
);
1079 void iwl_trans_pcie_sync_nmi(struct iwl_trans
*trans
);
1081 #ifdef CONFIG_IWLWIFI_DEBUGFS
1082 void iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
);
1084 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
) { }
1087 void iwl_pcie_rx_allocator_work(struct work_struct
*data
);
1089 /* common functions that are used by gen2 transport */
1090 int iwl_pcie_gen2_apm_init(struct iwl_trans
*trans
);
1091 void iwl_pcie_apm_config(struct iwl_trans
*trans
);
1092 int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
);
1093 void iwl_pcie_synchronize_irqs(struct iwl_trans
*trans
);
1094 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans
*trans
);
1095 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans
*trans
,
1096 bool was_in_rfkill
);
1097 void iwl_pcie_txq_free_tfd(struct iwl_trans
*trans
, struct iwl_txq
*txq
);
1098 int iwl_queue_space(struct iwl_trans
*trans
, const struct iwl_txq
*q
);
1099 void iwl_pcie_apm_stop_master(struct iwl_trans
*trans
);
1100 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie
*trans_pcie
);
1101 int iwl_pcie_txq_init(struct iwl_trans
*trans
, struct iwl_txq
*txq
,
1102 int slots_num
, bool cmd_queue
);
1103 int iwl_pcie_txq_alloc(struct iwl_trans
*trans
,
1104 struct iwl_txq
*txq
, int slots_num
, bool cmd_queue
);
1105 int iwl_pcie_alloc_dma_ptr(struct iwl_trans
*trans
,
1106 struct iwl_dma_ptr
*ptr
, size_t size
);
1107 void iwl_pcie_free_dma_ptr(struct iwl_trans
*trans
, struct iwl_dma_ptr
*ptr
);
1108 void iwl_pcie_apply_destination(struct iwl_trans
*trans
);
1109 void iwl_pcie_free_tso_page(struct iwl_trans_pcie
*trans_pcie
,
1110 struct sk_buff
*skb
);
1112 struct iwl_tso_hdr_page
*get_page_hdr(struct iwl_trans
*trans
, size_t len
,
1113 struct sk_buff
*skb
);
1116 /* common functions that are used by gen3 transport */
1117 void iwl_pcie_alloc_fw_monitor(struct iwl_trans
*trans
, u8 max_power
);
1119 /* transport gen 2 exported functions */
1120 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans
*trans
,
1121 const struct fw_img
*fw
, bool run_in_rfkill
);
1122 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
);
1123 void iwl_pcie_gen2_txq_free_memory(struct iwl_trans
*trans
,
1124 struct iwl_txq
*txq
);
1125 int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans
*trans
,
1126 struct iwl_txq
**intxq
, int size
,
1127 unsigned int timeout
);
1128 int iwl_trans_pcie_txq_alloc_response(struct iwl_trans
*trans
,
1129 struct iwl_txq
*txq
,
1130 struct iwl_host_cmd
*hcmd
);
1131 int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans
*trans
,
1132 __le16 flags
, u8 sta_id
, u8 tid
,
1133 int cmd_id
, int size
,
1134 unsigned int timeout
);
1135 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans
*trans
, int queue
);
1136 int iwl_trans_pcie_gen2_tx(struct iwl_trans
*trans
, struct sk_buff
*skb
,
1137 struct iwl_device_tx_cmd
*dev_cmd
, int txq_id
);
1138 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans
*trans
,
1139 struct iwl_host_cmd
*cmd
);
1140 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans
*trans
);
1141 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans
*trans
);
1142 void iwl_pcie_gen2_txq_unmap(struct iwl_trans
*trans
, int txq_id
);
1143 void iwl_pcie_gen2_tx_free(struct iwl_trans
*trans
);
1144 void iwl_pcie_gen2_tx_stop(struct iwl_trans
*trans
);
1145 void iwl_pcie_d3_complete_suspend(struct iwl_trans
*trans
,
1146 bool test
, bool reset
);
1147 #endif /* __iwl_trans_int_pcie_h__ */