1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
72 #include "iwl-trans.h"
75 #include "iwl-agn-hw.h"
78 static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
,
79 u32 reg
, u32 mask
, u32 value
)
83 #ifdef CONFIG_IWLWIFI_DEBUG
84 WARN_ON_ONCE(value
& ~mask
);
87 v
= iwl_read32(trans
, reg
);
90 iwl_write32(trans
, reg
, v
);
93 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans
*trans
,
96 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, 0);
99 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans
*trans
,
102 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, mask
);
105 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
107 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
108 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
109 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
110 ~APMG_PS_CTRL_MSK_PWR_SRC
);
112 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
113 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
114 ~APMG_PS_CTRL_MSK_PWR_SRC
);
118 #define PCI_CFG_RETRY_TIMEOUT 0x041
120 static void iwl_pcie_apm_config(struct iwl_trans
*trans
)
122 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
126 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
127 * Check if BIOS (or OS) enabled L1-ASPM on this device.
128 * If so (likely), disable L0S, so device moves directly L0->L1;
129 * costs negligible amount of power savings.
130 * If not (unlikely), enable L0S, so there is at least some
131 * power savings, even without L1.
133 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
134 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) {
135 /* L1-ASPM enabled; disable(!) L0S */
136 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
137 dev_info(trans
->dev
, "L1 Enabled; Disabling L0S\n");
139 /* L1-ASPM disabled; enable(!) L0S */
140 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
141 dev_info(trans
->dev
, "L1 Disabled; Enabling L0S\n");
143 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
147 * Start up NIC's basic functionality after it has been reset
148 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
149 * NOTE: This does not load uCode nor start the embedded processor
151 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
153 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
155 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
158 * Use "set_bit" below rather than "write", to preserve any hardware
159 * bits already set by default after reset.
162 /* Disable L0S exit timer (platform NMI Work/Around) */
163 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
164 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
167 * Disable L0s without affecting L1;
168 * don't wait for ICH L0s (ICH bug W/A)
170 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
171 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
173 /* Set FH wait threshold to maximum (HW error during stress W/A) */
174 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
177 * Enable HAP INTA (interrupt from management bus) to
178 * wake device's PCI Express link L1a -> L0s
180 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
181 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
183 iwl_pcie_apm_config(trans
);
185 /* Configure analog phase-lock-loop before activating to D0A */
186 if (trans
->cfg
->base_params
->pll_cfg_val
)
187 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
188 trans
->cfg
->base_params
->pll_cfg_val
);
191 * Set "initialization complete" bit to move adapter from
192 * D0U* --> D0A* (powered-up active) state.
194 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
197 * Wait for clock stabilization; once stabilized, access to
198 * device-internal resources is supported, e.g. iwl_write_prph()
199 * and accesses to uCode SRAM.
201 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
202 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
203 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
205 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
210 * Enable DMA clock and wait for it to stabilize.
212 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
213 * do not disable clocks. This preserves any hardware bits already
214 * set by default in "CLK_CTRL_REG" after reset.
216 iwl_write_prph(trans
, APMG_CLK_EN_REG
, APMG_CLK_VAL_DMA_CLK_RQT
);
219 /* Disable L1-Active */
220 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
221 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
223 set_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
229 static int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
233 /* stop device's busmaster DMA activity */
234 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
236 ret
= iwl_poll_bit(trans
, CSR_RESET
,
237 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
238 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
240 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
242 IWL_DEBUG_INFO(trans
, "stop master\n");
247 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
)
249 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
250 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
252 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
254 /* Stop device's DMA activity */
255 iwl_pcie_apm_stop_master(trans
);
257 /* Reset the entire device */
258 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
263 * Clear "initialization complete" bit to move adapter from
264 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
266 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
267 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
270 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
272 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
276 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
277 iwl_pcie_apm_init(trans
);
279 /* Set interrupt coalescing calibration timer to default (512 usecs) */
280 iwl_write8(trans
, CSR_INT_COALESCING
, IWL_HOST_INT_CALIB_TIMEOUT_DEF
);
282 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
284 iwl_pcie_set_pwr(trans
, false);
286 iwl_op_mode_nic_config(trans
->op_mode
);
288 /* Allocate the RX queue, or reset if it is already allocated */
289 iwl_pcie_rx_init(trans
);
291 /* Allocate or reset and init all Tx and Command queues */
292 if (iwl_pcie_tx_init(trans
))
295 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
296 /* enable shadow regs in HW */
297 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
298 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
304 #define HW_READY_TIMEOUT (50)
306 /* Note: returns poll_bit return value, which is >= 0 if success */
307 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
311 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
312 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
314 /* See if we got it */
315 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
316 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
317 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
320 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
324 /* Note: returns standard 0/-ERROR code */
325 static int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
330 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
332 ret
= iwl_pcie_set_hw_ready(trans
);
333 /* If the card is ready, exit 0 */
337 /* If HW is not ready, prepare the conditions to check again */
338 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
339 CSR_HW_IF_CONFIG_REG_PREPARE
);
342 ret
= iwl_pcie_set_hw_ready(trans
);
346 usleep_range(200, 1000);
348 } while (t
< 150000);
356 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
, u32 dst_addr
,
357 dma_addr_t phy_addr
, u32 byte_cnt
)
359 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
362 trans_pcie
->ucode_write_complete
= false;
364 iwl_write_direct32(trans
,
365 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
366 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
368 iwl_write_direct32(trans
,
369 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
372 iwl_write_direct32(trans
,
373 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
374 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
376 iwl_write_direct32(trans
,
377 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
378 (iwl_get_dma_hi_addr(phy_addr
)
379 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
381 iwl_write_direct32(trans
,
382 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
383 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
384 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
385 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
387 iwl_write_direct32(trans
,
388 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
389 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
390 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
391 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
393 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
394 trans_pcie
->ucode_write_complete
, 5 * HZ
);
396 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
403 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
404 const struct fw_desc
*section
)
408 u32 offset
, chunk_sz
= section
->len
;
411 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
414 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
415 GFP_KERNEL
| __GFP_NOWARN
);
417 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
418 chunk_sz
= PAGE_SIZE
;
419 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
420 &p_addr
, GFP_KERNEL
);
425 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
428 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
430 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
431 ret
= iwl_pcie_load_firmware_chunk(trans
,
432 section
->offset
+ offset
,
436 "Could not load the [%d] uCode section\n",
442 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
446 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
447 const struct fw_img
*image
)
451 for (i
= 0; i
< IWL_UCODE_SECTION_MAX
; i
++) {
452 if (!image
->sec
[i
].data
)
455 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
460 /* Remove all resets to allow NIC to operate */
461 iwl_write32(trans
, CSR_RESET
, 0);
466 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
467 const struct fw_img
*fw
, bool run_in_rfkill
)
469 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
473 /* This may fail if AMT took ownership of the device */
474 if (iwl_pcie_prepare_card_hw(trans
)) {
475 IWL_WARN(trans
, "Exit HW not ready\n");
479 clear_bit(STATUS_FW_ERROR
, &trans_pcie
->status
);
481 iwl_enable_rfkill_int(trans
);
483 /* If platform's RF_KILL switch is NOT set to KILL */
484 hw_rfkill
= iwl_is_rfkill_set(trans
);
486 set_bit(STATUS_RFKILL
, &trans_pcie
->status
);
488 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
489 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
490 if (hw_rfkill
&& !run_in_rfkill
)
493 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
495 ret
= iwl_pcie_nic_init(trans
);
497 IWL_ERR(trans
, "Unable to init nic\n");
501 /* make sure rfkill handshake bits are cleared */
502 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
503 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
504 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
506 /* clear (again), then enable host interrupts */
507 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
508 iwl_enable_interrupts(trans
);
510 /* really make sure rfkill handshake bits are cleared */
511 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
512 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
514 /* Load the given image to the HW */
515 return iwl_pcie_load_given_ucode(trans
, fw
);
518 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
520 iwl_pcie_reset_ict(trans
);
521 iwl_pcie_tx_start(trans
, scd_addr
);
524 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
526 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
529 /* tell the device to stop sending interrupts */
530 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
531 iwl_disable_interrupts(trans
);
532 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
534 /* device going down, Stop using ICT table */
535 iwl_pcie_disable_ict(trans
);
538 * If a HW restart happens during firmware loading,
539 * then the firmware loading might call this function
540 * and later it might be called again due to the
541 * restart. So don't process again if the device is
544 if (test_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
)) {
545 iwl_pcie_tx_stop(trans
);
546 iwl_pcie_rx_stop(trans
);
548 /* Power-down device's busmaster DMA clocks */
549 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
550 APMG_CLK_VAL_DMA_CLK_RQT
);
554 /* Make sure (redundant) we've released our request to stay awake */
555 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
556 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
558 /* Stop the device, and put it in low power state */
559 iwl_pcie_apm_stop(trans
);
561 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
562 * Clean again the interrupt here
564 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
565 iwl_disable_interrupts(trans
);
566 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
568 iwl_enable_rfkill_int(trans
);
570 /* stop and reset the on-board processor */
571 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
573 /* clear all status bits */
574 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
575 clear_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
);
576 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
577 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
578 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
581 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
)
583 iwl_disable_interrupts(trans
);
586 * in testing mode, the host stays awake and the
587 * hardware won't be reset (not even partially)
592 iwl_pcie_disable_ict(trans
);
594 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
595 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
596 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
597 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
600 * reset TX queues -- some of their registers reset during S3
601 * so if we don't reset everything here the D3 image would try
602 * to execute some invalid memory upon resume
604 iwl_trans_pcie_tx_reset(trans
);
606 iwl_pcie_set_pwr(trans
, true);
609 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
610 enum iwl_d3_status
*status
,
617 iwl_enable_interrupts(trans
);
618 *status
= IWL_D3_STATUS_ALIVE
;
622 iwl_pcie_set_pwr(trans
, false);
624 val
= iwl_read32(trans
, CSR_RESET
);
625 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
) {
626 *status
= IWL_D3_STATUS_RESET
;
631 * Also enables interrupts - none will happen as the device doesn't
632 * know we're waking it up, only when the opmode actually tells it
635 iwl_pcie_reset_ict(trans
);
637 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
638 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
640 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
641 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
642 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
645 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
649 iwl_trans_pcie_tx_reset(trans
);
651 ret
= iwl_pcie_rx_init(trans
);
653 IWL_ERR(trans
, "Failed to resume the device (RX reset)\n");
657 *status
= IWL_D3_STATUS_ALIVE
;
661 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
663 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
667 err
= iwl_pcie_prepare_card_hw(trans
);
669 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
673 iwl_pcie_apm_init(trans
);
675 /* From now on, the op_mode will be kept updated about RF kill state */
676 iwl_enable_rfkill_int(trans
);
678 hw_rfkill
= iwl_is_rfkill_set(trans
);
680 set_bit(STATUS_RFKILL
, &trans_pcie
->status
);
682 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
683 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
688 static void iwl_trans_pcie_stop_hw(struct iwl_trans
*trans
,
689 bool op_mode_leaving
)
691 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
695 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
696 iwl_disable_interrupts(trans
);
697 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
699 iwl_pcie_apm_stop(trans
);
701 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
702 iwl_disable_interrupts(trans
);
703 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
705 iwl_pcie_disable_ict(trans
);
707 if (!op_mode_leaving
) {
709 * Even if we stop the HW, we still want the RF kill
712 iwl_enable_rfkill_int(trans
);
715 * Check again since the RF kill state may have changed while
716 * all the interrupts were disabled, in this case we couldn't
717 * receive the RF kill interrupt and update the state in the
720 hw_rfkill
= iwl_is_rfkill_set(trans
);
722 set_bit(STATUS_RFKILL
, &trans_pcie
->status
);
724 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
725 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
729 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
731 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
734 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
736 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
739 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
741 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
744 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
746 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
747 ((reg
& 0x000FFFFF) | (3 << 24)));
748 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
751 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
754 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
755 ((addr
& 0x000FFFFF) | (3 << 24)));
756 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
759 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
760 const struct iwl_trans_config
*trans_cfg
)
762 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
764 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
765 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
766 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
767 trans_pcie
->n_no_reclaim_cmds
= 0;
769 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
770 if (trans_pcie
->n_no_reclaim_cmds
)
771 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
772 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
774 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
775 if (trans_pcie
->rx_buf_size_8k
)
776 trans_pcie
->rx_page_order
= get_order(8 * 1024);
778 trans_pcie
->rx_page_order
= get_order(4 * 1024);
780 trans_pcie
->wd_timeout
=
781 msecs_to_jiffies(trans_cfg
->queue_watchdog_timeout
);
783 trans_pcie
->command_names
= trans_cfg
->command_names
;
784 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
787 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
789 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
791 synchronize_irq(trans_pcie
->pci_dev
->irq
);
793 iwl_pcie_tx_free(trans
);
794 iwl_pcie_rx_free(trans
);
796 free_irq(trans_pcie
->pci_dev
->irq
, trans
);
797 iwl_pcie_free_ict(trans
);
799 pci_disable_msi(trans_pcie
->pci_dev
);
800 iounmap(trans_pcie
->hw_base
);
801 pci_release_regions(trans_pcie
->pci_dev
);
802 pci_disable_device(trans_pcie
->pci_dev
);
803 kmem_cache_destroy(trans
->dev_cmd_pool
);
808 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
810 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
813 set_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
815 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
818 #ifdef CONFIG_PM_SLEEP
819 static int iwl_trans_pcie_suspend(struct iwl_trans
*trans
)
824 static int iwl_trans_pcie_resume(struct iwl_trans
*trans
)
828 iwl_enable_rfkill_int(trans
);
830 hw_rfkill
= iwl_is_rfkill_set(trans
);
831 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
835 #endif /* CONFIG_PM_SLEEP */
837 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
, bool silent
,
838 unsigned long *flags
)
841 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
843 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
845 /* this bit wakes up the NIC */
846 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
847 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
850 * These bits say the device is running, and should keep running for
851 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
852 * but they do not indicate that embedded SRAM is restored yet;
853 * 3945 and 4965 have volatile SRAM, and must save/restore contents
854 * to/from host DRAM when sleeping/waking for power-saving.
855 * Each direction takes approximately 1/4 millisecond; with this
856 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
857 * series of register accesses are expected (e.g. reading Event Log),
858 * to keep device from sleeping.
860 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
861 * SRAM is okay/restored. We don't check that here because this call
862 * is just for hardware register access; but GP1 MAC_SLEEP check is a
863 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
865 * 5000 series and later (including 1000 series) have non-volatile SRAM,
866 * and do not save/restore SRAM when power cycling.
868 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
869 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
870 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
871 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
872 if (unlikely(ret
< 0)) {
873 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
875 u32 val
= iwl_read32(trans
, CSR_GP_CNTRL
);
877 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
879 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
885 * Fool sparse by faking we release the lock - sparse will
886 * track nic_access anyway.
888 __release(&trans_pcie
->reg_lock
);
892 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
893 unsigned long *flags
)
895 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
897 lockdep_assert_held(&trans_pcie
->reg_lock
);
900 * Fool sparse by faking we acquiring the lock - sparse will
901 * track nic_access anyway.
903 __acquire(&trans_pcie
->reg_lock
);
905 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
906 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
908 * Above we read the CSR_GP_CNTRL register, which will flush
909 * any previous writes, but we need the write that clears the
910 * MAC_ACCESS_REQ bit to be performed before any other writes
911 * scheduled on different CPUs (after we drop reg_lock).
914 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
917 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
918 void *buf
, int dwords
)
924 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
925 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
926 for (offs
= 0; offs
< dwords
; offs
++)
927 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
928 iwl_trans_release_nic_access(trans
, &flags
);
935 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
936 const void *buf
, int dwords
)
940 const u32
*vals
= buf
;
942 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
943 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
944 for (offs
= 0; offs
< dwords
; offs
++)
945 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
946 vals
? vals
[offs
] : 0);
947 iwl_trans_release_nic_access(trans
, &flags
);
954 #define IWL_FLUSH_WAIT_MS 2000
956 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
)
958 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
962 unsigned long now
= jiffies
;
967 /* waiting for all the tx frames complete might take a while */
968 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
969 if (cnt
== trans_pcie
->cmd_queue
)
971 txq
= &trans_pcie
->txq
[cnt
];
973 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
974 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
977 if (q
->read_ptr
!= q
->write_ptr
) {
979 "fail to flush all tx fifo queues Q %d\n", cnt
);
988 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
989 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
991 scd_sram_addr
= trans_pcie
->scd_base_addr
+
992 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
993 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
995 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
997 for (cnt
= 0; cnt
< FH_TCSR_CHNL_NUM
; cnt
++)
998 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", cnt
,
999 iwl_read_direct32(trans
, FH_TX_TRB_REG(cnt
)));
1001 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1002 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(cnt
));
1003 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1004 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1006 iwl_trans_read_mem32(trans
, trans_pcie
->scd_base_addr
+
1007 SCD_TRANS_TBL_OFFSET_QUEUE(cnt
));
1010 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
1012 tbl_dw
= tbl_dw
& 0x0000FFFF;
1015 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1016 cnt
, active
? "" : "in", fifo
, tbl_dw
,
1017 iwl_read_prph(trans
,
1018 SCD_QUEUE_RDPTR(cnt
)) & (txq
->q
.n_bd
- 1),
1019 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(cnt
)));
1025 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
1026 u32 mask
, u32 value
)
1028 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1029 unsigned long flags
;
1031 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1032 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
1033 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1036 static const char *get_fh_string(int cmd
)
1038 #define IWL_CMD(x) case x: return #x
1040 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG
);
1041 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG
);
1042 IWL_CMD(FH_RSCSR_CHNL0_WPTR
);
1043 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG
);
1044 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG
);
1045 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG
);
1046 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
);
1047 IWL_CMD(FH_TSSR_TX_STATUS_REG
);
1048 IWL_CMD(FH_TSSR_TX_ERROR_REG
);
1055 int iwl_pcie_dump_fh(struct iwl_trans
*trans
, char **buf
)
1058 static const u32 fh_tbl
[] = {
1059 FH_RSCSR_CHNL0_STTS_WPTR_REG
,
1060 FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
1061 FH_RSCSR_CHNL0_WPTR
,
1062 FH_MEM_RCSR_CHNL0_CONFIG_REG
,
1063 FH_MEM_RSSR_SHARED_CTRL_REG
,
1064 FH_MEM_RSSR_RX_STATUS_REG
,
1065 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
,
1066 FH_TSSR_TX_STATUS_REG
,
1067 FH_TSSR_TX_ERROR_REG
1070 #ifdef CONFIG_IWLWIFI_DEBUGFS
1073 size_t bufsz
= ARRAY_SIZE(fh_tbl
) * 48 + 40;
1075 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
1079 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1080 "FH register values:\n");
1082 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++)
1083 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1085 get_fh_string(fh_tbl
[i
]),
1086 iwl_read_direct32(trans
, fh_tbl
[i
]));
1092 IWL_ERR(trans
, "FH register values:\n");
1093 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++)
1094 IWL_ERR(trans
, " %34s: 0X%08x\n",
1095 get_fh_string(fh_tbl
[i
]),
1096 iwl_read_direct32(trans
, fh_tbl
[i
]));
1101 static const char *get_csr_string(int cmd
)
1103 #define IWL_CMD(x) case x: return #x
1105 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1106 IWL_CMD(CSR_INT_COALESCING
);
1108 IWL_CMD(CSR_INT_MASK
);
1109 IWL_CMD(CSR_FH_INT_STATUS
);
1110 IWL_CMD(CSR_GPIO_IN
);
1112 IWL_CMD(CSR_GP_CNTRL
);
1113 IWL_CMD(CSR_HW_REV
);
1114 IWL_CMD(CSR_EEPROM_REG
);
1115 IWL_CMD(CSR_EEPROM_GP
);
1116 IWL_CMD(CSR_OTP_GP_REG
);
1117 IWL_CMD(CSR_GIO_REG
);
1118 IWL_CMD(CSR_GP_UCODE_REG
);
1119 IWL_CMD(CSR_GP_DRIVER_REG
);
1120 IWL_CMD(CSR_UCODE_DRV_GP1
);
1121 IWL_CMD(CSR_UCODE_DRV_GP2
);
1122 IWL_CMD(CSR_LED_REG
);
1123 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1124 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1125 IWL_CMD(CSR_ANA_PLL_CFG
);
1126 IWL_CMD(CSR_HW_REV_WA_REG
);
1127 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1134 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
1137 static const u32 csr_tbl
[] = {
1138 CSR_HW_IF_CONFIG_REG
,
1156 CSR_DRAM_INT_TBL_REG
,
1157 CSR_GIO_CHICKEN_BITS
,
1160 CSR_DBG_HPET_MEM_REG
1162 IWL_ERR(trans
, "CSR values:\n");
1163 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1164 "CSR_INT_PERIODIC_REG)\n");
1165 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1166 IWL_ERR(trans
, " %25s: 0X%08x\n",
1167 get_csr_string(csr_tbl
[i
]),
1168 iwl_read32(trans
, csr_tbl
[i
]));
1172 #ifdef CONFIG_IWLWIFI_DEBUGFS
1173 /* create and remove of files */
1174 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1175 if (!debugfs_create_file(#name, mode, parent, trans, \
1176 &iwl_dbgfs_##name##_ops)) \
1180 /* file operation */
1181 #define DEBUGFS_READ_FUNC(name) \
1182 static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1183 char __user *user_buf, \
1184 size_t count, loff_t *ppos);
1186 #define DEBUGFS_WRITE_FUNC(name) \
1187 static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1188 const char __user *user_buf, \
1189 size_t count, loff_t *ppos);
1191 #define DEBUGFS_READ_FILE_OPS(name) \
1192 DEBUGFS_READ_FUNC(name); \
1193 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1194 .read = iwl_dbgfs_##name##_read, \
1195 .open = simple_open, \
1196 .llseek = generic_file_llseek, \
1199 #define DEBUGFS_WRITE_FILE_OPS(name) \
1200 DEBUGFS_WRITE_FUNC(name); \
1201 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1202 .write = iwl_dbgfs_##name##_write, \
1203 .open = simple_open, \
1204 .llseek = generic_file_llseek, \
1207 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1208 DEBUGFS_READ_FUNC(name); \
1209 DEBUGFS_WRITE_FUNC(name); \
1210 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1211 .write = iwl_dbgfs_##name##_write, \
1212 .read = iwl_dbgfs_##name##_read, \
1213 .open = simple_open, \
1214 .llseek = generic_file_llseek, \
1217 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1218 char __user
*user_buf
,
1219 size_t count
, loff_t
*ppos
)
1221 struct iwl_trans
*trans
= file
->private_data
;
1222 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1223 struct iwl_txq
*txq
;
1224 struct iwl_queue
*q
;
1231 bufsz
= sizeof(char) * 64 * trans
->cfg
->base_params
->num_of_queues
;
1233 if (!trans_pcie
->txq
)
1236 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1240 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1241 txq
= &trans_pcie
->txq
[cnt
];
1243 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1244 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1245 cnt
, q
->read_ptr
, q
->write_ptr
,
1246 !!test_bit(cnt
, trans_pcie
->queue_used
),
1247 !!test_bit(cnt
, trans_pcie
->queue_stopped
));
1249 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1254 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1255 char __user
*user_buf
,
1256 size_t count
, loff_t
*ppos
)
1258 struct iwl_trans
*trans
= file
->private_data
;
1259 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1260 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
1263 const size_t bufsz
= sizeof(buf
);
1265 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1267 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1269 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1272 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1273 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1275 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1276 "closed_rb_num: Not Allocated\n");
1278 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1281 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1282 char __user
*user_buf
,
1283 size_t count
, loff_t
*ppos
)
1285 struct iwl_trans
*trans
= file
->private_data
;
1286 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1287 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1291 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1294 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1298 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1299 "Interrupt Statistics Report:\n");
1301 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1303 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1305 if (isr_stats
->sw
|| isr_stats
->hw
) {
1306 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1307 "\tLast Restarting Code: 0x%X\n",
1308 isr_stats
->err_code
);
1310 #ifdef CONFIG_IWLWIFI_DEBUG
1311 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1313 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1316 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1317 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1319 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1322 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1325 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1326 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1328 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1331 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1332 isr_stats
->unhandled
);
1334 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1339 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1340 const char __user
*user_buf
,
1341 size_t count
, loff_t
*ppos
)
1343 struct iwl_trans
*trans
= file
->private_data
;
1344 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1345 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1351 memset(buf
, 0, sizeof(buf
));
1352 buf_size
= min(count
, sizeof(buf
) - 1);
1353 if (copy_from_user(buf
, user_buf
, buf_size
))
1355 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1357 if (reset_flag
== 0)
1358 memset(isr_stats
, 0, sizeof(*isr_stats
));
1363 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1364 const char __user
*user_buf
,
1365 size_t count
, loff_t
*ppos
)
1367 struct iwl_trans
*trans
= file
->private_data
;
1372 memset(buf
, 0, sizeof(buf
));
1373 buf_size
= min(count
, sizeof(buf
) - 1);
1374 if (copy_from_user(buf
, user_buf
, buf_size
))
1376 if (sscanf(buf
, "%d", &csr
) != 1)
1379 iwl_pcie_dump_csr(trans
);
1384 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1385 char __user
*user_buf
,
1386 size_t count
, loff_t
*ppos
)
1388 struct iwl_trans
*trans
= file
->private_data
;
1391 ssize_t ret
= -EFAULT
;
1393 ret
= pos
= iwl_pcie_dump_fh(trans
, &buf
);
1395 ret
= simple_read_from_buffer(user_buf
,
1396 count
, ppos
, buf
, pos
);
1403 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
1404 DEBUGFS_READ_FILE_OPS(fh_reg
);
1405 DEBUGFS_READ_FILE_OPS(rx_queue
);
1406 DEBUGFS_READ_FILE_OPS(tx_queue
);
1407 DEBUGFS_WRITE_FILE_OPS(csr
);
1410 * Create the debugfs files and directories
1413 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1416 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
1417 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
1418 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
1419 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
1420 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
1424 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
1428 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1433 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1435 static const struct iwl_trans_ops trans_ops_pcie
= {
1436 .start_hw
= iwl_trans_pcie_start_hw
,
1437 .stop_hw
= iwl_trans_pcie_stop_hw
,
1438 .fw_alive
= iwl_trans_pcie_fw_alive
,
1439 .start_fw
= iwl_trans_pcie_start_fw
,
1440 .stop_device
= iwl_trans_pcie_stop_device
,
1442 .d3_suspend
= iwl_trans_pcie_d3_suspend
,
1443 .d3_resume
= iwl_trans_pcie_d3_resume
,
1445 .send_cmd
= iwl_trans_pcie_send_hcmd
,
1447 .tx
= iwl_trans_pcie_tx
,
1448 .reclaim
= iwl_trans_pcie_reclaim
,
1450 .txq_disable
= iwl_trans_pcie_txq_disable
,
1451 .txq_enable
= iwl_trans_pcie_txq_enable
,
1453 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
1455 .wait_tx_queue_empty
= iwl_trans_pcie_wait_txq_empty
,
1457 #ifdef CONFIG_PM_SLEEP
1458 .suspend
= iwl_trans_pcie_suspend
,
1459 .resume
= iwl_trans_pcie_resume
,
1461 .write8
= iwl_trans_pcie_write8
,
1462 .write32
= iwl_trans_pcie_write32
,
1463 .read32
= iwl_trans_pcie_read32
,
1464 .read_prph
= iwl_trans_pcie_read_prph
,
1465 .write_prph
= iwl_trans_pcie_write_prph
,
1466 .read_mem
= iwl_trans_pcie_read_mem
,
1467 .write_mem
= iwl_trans_pcie_write_mem
,
1468 .configure
= iwl_trans_pcie_configure
,
1469 .set_pmi
= iwl_trans_pcie_set_pmi
,
1470 .grab_nic_access
= iwl_trans_pcie_grab_nic_access
,
1471 .release_nic_access
= iwl_trans_pcie_release_nic_access
,
1472 .set_bits_mask
= iwl_trans_pcie_set_bits_mask
,
1475 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
1476 const struct pci_device_id
*ent
,
1477 const struct iwl_cfg
*cfg
)
1479 struct iwl_trans_pcie
*trans_pcie
;
1480 struct iwl_trans
*trans
;
1484 trans
= kzalloc(sizeof(struct iwl_trans
) +
1485 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
1490 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1492 trans
->ops
= &trans_ops_pcie
;
1494 trans_lockdep_init(trans
);
1495 trans_pcie
->trans
= trans
;
1496 spin_lock_init(&trans_pcie
->irq_lock
);
1497 spin_lock_init(&trans_pcie
->reg_lock
);
1498 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
1500 /* W/A - seems to solve weird behavior. We need to remove this if we
1501 * don't want to stay in L1 all the time. This wastes a lot of power */
1502 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
|
1503 PCIE_LINK_STATE_CLKPM
);
1505 if (pci_enable_device(pdev
)) {
1510 pci_set_master(pdev
);
1512 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
1514 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
1516 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1518 err
= pci_set_consistent_dma_mask(pdev
,
1520 /* both attempts failed: */
1522 dev_err(&pdev
->dev
, "No suitable DMA available\n");
1523 goto out_pci_disable_device
;
1527 err
= pci_request_regions(pdev
, DRV_NAME
);
1529 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
1530 goto out_pci_disable_device
;
1533 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
1534 if (!trans_pcie
->hw_base
) {
1535 dev_err(&pdev
->dev
, "pci_ioremap_bar failed\n");
1537 goto out_pci_release_regions
;
1540 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1541 * PCI Tx retries from interfering with C3 CPU state */
1542 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
1544 err
= pci_enable_msi(pdev
);
1546 dev_err(&pdev
->dev
, "pci_enable_msi failed(0X%x)\n", err
);
1547 /* enable rfkill interrupt: hw bug w/a */
1548 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
1549 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
1550 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
1551 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
1555 trans
->dev
= &pdev
->dev
;
1556 trans_pcie
->pci_dev
= pdev
;
1557 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
1558 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
1559 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
1560 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
1562 /* Initialize the wait queue for commands */
1563 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
1565 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
1566 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
1568 trans
->dev_cmd_headroom
= 0;
1569 trans
->dev_cmd_pool
=
1570 kmem_cache_create(trans
->dev_cmd_pool_name
,
1571 sizeof(struct iwl_device_cmd
)
1572 + trans
->dev_cmd_headroom
,
1577 if (!trans
->dev_cmd_pool
)
1578 goto out_pci_disable_msi
;
1580 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
1582 if (iwl_pcie_alloc_ict(trans
))
1583 goto out_free_cmd_pool
;
1585 if (request_threaded_irq(pdev
->irq
, iwl_pcie_isr_ict
,
1586 iwl_pcie_irq_handler
,
1587 IRQF_SHARED
, DRV_NAME
, trans
)) {
1588 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
1595 iwl_pcie_free_ict(trans
);
1597 kmem_cache_destroy(trans
->dev_cmd_pool
);
1598 out_pci_disable_msi
:
1599 pci_disable_msi(pdev
);
1600 out_pci_release_regions
:
1601 pci_release_regions(pdev
);
1602 out_pci_disable_device
:
1603 pci_disable_device(pdev
);