1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
72 #include "iwl-trans.h"
75 #include "iwl-agn-hw.h"
78 static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
,
79 u32 reg
, u32 mask
, u32 value
)
83 #ifdef CONFIG_IWLWIFI_DEBUG
84 WARN_ON_ONCE(value
& ~mask
);
87 v
= iwl_read32(trans
, reg
);
90 iwl_write32(trans
, reg
, v
);
93 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans
*trans
,
96 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, 0);
99 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans
*trans
,
102 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, mask
);
105 static void iwl_pcie_set_pwr(struct iwl_trans
*trans
, bool vaux
)
107 if (vaux
&& pci_pme_capable(to_pci_dev(trans
->dev
), PCI_D3cold
))
108 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
109 APMG_PS_CTRL_VAL_PWR_SRC_VAUX
,
110 ~APMG_PS_CTRL_MSK_PWR_SRC
);
112 iwl_set_bits_mask_prph(trans
, APMG_PS_CTRL_REG
,
113 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
114 ~APMG_PS_CTRL_MSK_PWR_SRC
);
118 #define PCI_CFG_RETRY_TIMEOUT 0x041
120 static void iwl_pcie_apm_config(struct iwl_trans
*trans
)
122 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
127 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
128 * Check if BIOS (or OS) enabled L1-ASPM on this device.
129 * If so (likely), disable L0S, so device moves directly L0->L1;
130 * costs negligible amount of power savings.
131 * If not (unlikely), enable L0S, so there is at least some
132 * power savings, even without L1.
134 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_LNKCTL
, &lctl
);
135 if (lctl
& PCI_EXP_LNKCTL_ASPM_L1
)
136 iwl_set_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
138 iwl_clear_bit(trans
, CSR_GIO_REG
, CSR_GIO_REG_VAL_L0S_ENABLED
);
139 trans
->pm_support
= !(lctl
& PCI_EXP_LNKCTL_ASPM_L0S
);
141 pcie_capability_read_word(trans_pcie
->pci_dev
, PCI_EXP_DEVCTL2
, &cap
);
142 trans
->ltr_enabled
= cap
& PCI_EXP_DEVCTL2_LTR_EN
;
143 dev_info(trans
->dev
, "L1 %sabled - LTR %sabled\n",
144 (lctl
& PCI_EXP_LNKCTL_ASPM_L1
) ? "En" : "Dis",
145 trans
->ltr_enabled
? "En" : "Dis");
149 * Start up NIC's basic functionality after it has been reset
150 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
151 * NOTE: This does not load uCode nor start the embedded processor
153 static int iwl_pcie_apm_init(struct iwl_trans
*trans
)
155 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
157 IWL_DEBUG_INFO(trans
, "Init card's basic functions\n");
160 * Use "set_bit" below rather than "write", to preserve any hardware
161 * bits already set by default after reset.
164 /* Disable L0S exit timer (platform NMI Work/Around) */
165 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
166 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
169 * Disable L0s without affecting L1;
170 * don't wait for ICH L0s (ICH bug W/A)
172 iwl_set_bit(trans
, CSR_GIO_CHICKEN_BITS
,
173 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
175 /* Set FH wait threshold to maximum (HW error during stress W/A) */
176 iwl_set_bit(trans
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
179 * Enable HAP INTA (interrupt from management bus) to
180 * wake device's PCI Express link L1a -> L0s
182 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
183 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
185 iwl_pcie_apm_config(trans
);
187 /* Configure analog phase-lock-loop before activating to D0A */
188 if (trans
->cfg
->base_params
->pll_cfg_val
)
189 iwl_set_bit(trans
, CSR_ANA_PLL_CFG
,
190 trans
->cfg
->base_params
->pll_cfg_val
);
193 * Set "initialization complete" bit to move adapter from
194 * D0U* --> D0A* (powered-up active) state.
196 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
199 * Wait for clock stabilization; once stabilized, access to
200 * device-internal resources is supported, e.g. iwl_write_prph()
201 * and accesses to uCode SRAM.
203 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
204 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
205 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
207 IWL_DEBUG_INFO(trans
, "Failed to init the card\n");
211 if (trans
->cfg
->host_interrupt_operation_mode
) {
213 * This is a bit of an abuse - This is needed for 7260 / 3160
214 * only check host_interrupt_operation_mode even if this is
215 * not related to host_interrupt_operation_mode.
217 * Enable the oscillator to count wake up time for L1 exit. This
218 * consumes slightly more power (100uA) - but allows to be sure
219 * that we wake up from L1 on time.
221 * This looks weird: read twice the same register, discard the
222 * value, set a bit, and yet again, read that same register
223 * just to discard the value. But that's the way the hardware
226 iwl_read_prph(trans
, OSC_CLK
);
227 iwl_read_prph(trans
, OSC_CLK
);
228 iwl_set_bits_prph(trans
, OSC_CLK
, OSC_CLK_FORCE_CONTROL
);
229 iwl_read_prph(trans
, OSC_CLK
);
230 iwl_read_prph(trans
, OSC_CLK
);
234 * Enable DMA clock and wait for it to stabilize.
236 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
237 * do not disable clocks. This preserves any hardware bits already
238 * set by default in "CLK_CTRL_REG" after reset.
240 iwl_write_prph(trans
, APMG_CLK_EN_REG
, APMG_CLK_VAL_DMA_CLK_RQT
);
243 /* Disable L1-Active */
244 iwl_set_bits_prph(trans
, APMG_PCIDEV_STT_REG
,
245 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
247 set_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
253 static int iwl_pcie_apm_stop_master(struct iwl_trans
*trans
)
257 /* stop device's busmaster DMA activity */
258 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
260 ret
= iwl_poll_bit(trans
, CSR_RESET
,
261 CSR_RESET_REG_FLAG_MASTER_DISABLED
,
262 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
264 IWL_WARN(trans
, "Master Disable Timed Out, 100 usec\n");
266 IWL_DEBUG_INFO(trans
, "stop master\n");
271 static void iwl_pcie_apm_stop(struct iwl_trans
*trans
)
273 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
274 IWL_DEBUG_INFO(trans
, "Stop card, put in low power state\n");
276 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
278 /* Stop device's DMA activity */
279 iwl_pcie_apm_stop_master(trans
);
281 /* Reset the entire device */
282 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
287 * Clear "initialization complete" bit to move adapter from
288 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
290 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
291 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
294 static int iwl_pcie_nic_init(struct iwl_trans
*trans
)
296 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
300 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
301 iwl_pcie_apm_init(trans
);
303 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
305 iwl_pcie_set_pwr(trans
, false);
307 iwl_op_mode_nic_config(trans
->op_mode
);
309 /* Allocate the RX queue, or reset if it is already allocated */
310 iwl_pcie_rx_init(trans
);
312 /* Allocate or reset and init all Tx and Command queues */
313 if (iwl_pcie_tx_init(trans
))
316 if (trans
->cfg
->base_params
->shadow_reg_enable
) {
317 /* enable shadow regs in HW */
318 iwl_set_bit(trans
, CSR_MAC_SHADOW_REG_CTRL
, 0x800FFFFF);
319 IWL_DEBUG_INFO(trans
, "Enabling shadow registers in device\n");
325 #define HW_READY_TIMEOUT (50)
327 /* Note: returns poll_bit return value, which is >= 0 if success */
328 static int iwl_pcie_set_hw_ready(struct iwl_trans
*trans
)
332 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
333 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
335 /* See if we got it */
336 ret
= iwl_poll_bit(trans
, CSR_HW_IF_CONFIG_REG
,
337 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
338 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
341 IWL_DEBUG_INFO(trans
, "hardware%s ready\n", ret
< 0 ? " not" : "");
345 /* Note: returns standard 0/-ERROR code */
346 static int iwl_pcie_prepare_card_hw(struct iwl_trans
*trans
)
352 IWL_DEBUG_INFO(trans
, "iwl_trans_prepare_card_hw enter\n");
354 ret
= iwl_pcie_set_hw_ready(trans
);
355 /* If the card is ready, exit 0 */
359 for (iter
= 0; iter
< 10; iter
++) {
360 /* If HW is not ready, prepare the conditions to check again */
361 iwl_set_bit(trans
, CSR_HW_IF_CONFIG_REG
,
362 CSR_HW_IF_CONFIG_REG_PREPARE
);
365 ret
= iwl_pcie_set_hw_ready(trans
);
369 usleep_range(200, 1000);
371 } while (t
< 150000);
375 IWL_DEBUG_INFO(trans
, "got NIC after %d iterations\n", iter
);
383 static int iwl_pcie_load_firmware_chunk(struct iwl_trans
*trans
, u32 dst_addr
,
384 dma_addr_t phy_addr
, u32 byte_cnt
)
386 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
389 trans_pcie
->ucode_write_complete
= false;
391 iwl_write_direct32(trans
,
392 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
393 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
);
395 iwl_write_direct32(trans
,
396 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL
),
399 iwl_write_direct32(trans
,
400 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL
),
401 phy_addr
& FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
);
403 iwl_write_direct32(trans
,
404 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL
),
405 (iwl_get_dma_hi_addr(phy_addr
)
406 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
) | byte_cnt
);
408 iwl_write_direct32(trans
,
409 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL
),
410 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
|
411 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
|
412 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
);
414 iwl_write_direct32(trans
,
415 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL
),
416 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
|
417 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
|
418 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
);
420 ret
= wait_event_timeout(trans_pcie
->ucode_write_waitq
,
421 trans_pcie
->ucode_write_complete
, 5 * HZ
);
423 IWL_ERR(trans
, "Failed to load firmware chunk!\n");
430 static int iwl_pcie_load_section(struct iwl_trans
*trans
, u8 section_num
,
431 const struct fw_desc
*section
)
435 u32 offset
, chunk_sz
= section
->len
;
438 IWL_DEBUG_FW(trans
, "[%d] uCode section being loaded...\n",
441 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
, &p_addr
,
442 GFP_KERNEL
| __GFP_NOWARN
);
444 IWL_DEBUG_INFO(trans
, "Falling back to small chunks of DMA\n");
445 chunk_sz
= PAGE_SIZE
;
446 v_addr
= dma_alloc_coherent(trans
->dev
, chunk_sz
,
447 &p_addr
, GFP_KERNEL
);
452 for (offset
= 0; offset
< section
->len
; offset
+= chunk_sz
) {
455 copy_size
= min_t(u32
, chunk_sz
, section
->len
- offset
);
457 memcpy(v_addr
, (u8
*)section
->data
+ offset
, copy_size
);
458 ret
= iwl_pcie_load_firmware_chunk(trans
,
459 section
->offset
+ offset
,
463 "Could not load the [%d] uCode section\n",
469 dma_free_coherent(trans
->dev
, chunk_sz
, v_addr
, p_addr
);
473 static int iwl_pcie_load_given_ucode(struct iwl_trans
*trans
,
474 const struct fw_img
*image
)
478 for (i
= 0; i
< IWL_UCODE_SECTION_MAX
; i
++) {
479 if (!image
->sec
[i
].data
)
482 ret
= iwl_pcie_load_section(trans
, i
, &image
->sec
[i
]);
487 /* Remove all resets to allow NIC to operate */
488 iwl_write32(trans
, CSR_RESET
, 0);
493 static int iwl_trans_pcie_start_fw(struct iwl_trans
*trans
,
494 const struct fw_img
*fw
, bool run_in_rfkill
)
496 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
500 /* This may fail if AMT took ownership of the device */
501 if (iwl_pcie_prepare_card_hw(trans
)) {
502 IWL_WARN(trans
, "Exit HW not ready\n");
506 clear_bit(STATUS_FW_ERROR
, &trans_pcie
->status
);
508 iwl_enable_rfkill_int(trans
);
510 /* If platform's RF_KILL switch is NOT set to KILL */
511 hw_rfkill
= iwl_is_rfkill_set(trans
);
513 set_bit(STATUS_RFKILL
, &trans_pcie
->status
);
515 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
516 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
517 if (hw_rfkill
&& !run_in_rfkill
)
520 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
522 ret
= iwl_pcie_nic_init(trans
);
524 IWL_ERR(trans
, "Unable to init nic\n");
528 /* make sure rfkill handshake bits are cleared */
529 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
530 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
,
531 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
533 /* clear (again), then enable host interrupts */
534 iwl_write32(trans
, CSR_INT
, 0xFFFFFFFF);
535 iwl_enable_interrupts(trans
);
537 /* really make sure rfkill handshake bits are cleared */
538 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
539 iwl_write32(trans
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
541 /* Load the given image to the HW */
542 return iwl_pcie_load_given_ucode(trans
, fw
);
545 static void iwl_trans_pcie_fw_alive(struct iwl_trans
*trans
, u32 scd_addr
)
547 iwl_pcie_reset_ict(trans
);
548 iwl_pcie_tx_start(trans
, scd_addr
);
551 static void iwl_trans_pcie_stop_device(struct iwl_trans
*trans
)
553 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
556 /* tell the device to stop sending interrupts */
557 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
558 iwl_disable_interrupts(trans
);
559 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
561 /* device going down, Stop using ICT table */
562 iwl_pcie_disable_ict(trans
);
565 * If a HW restart happens during firmware loading,
566 * then the firmware loading might call this function
567 * and later it might be called again due to the
568 * restart. So don't process again if the device is
571 if (test_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
)) {
572 iwl_pcie_tx_stop(trans
);
573 iwl_pcie_rx_stop(trans
);
575 /* Power-down device's busmaster DMA clocks */
576 iwl_write_prph(trans
, APMG_CLK_DIS_REG
,
577 APMG_CLK_VAL_DMA_CLK_RQT
);
581 /* Make sure (redundant) we've released our request to stay awake */
582 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
583 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
585 /* Stop the device, and put it in low power state */
586 iwl_pcie_apm_stop(trans
);
588 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
589 * Clean again the interrupt here
591 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
592 iwl_disable_interrupts(trans
);
593 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
595 iwl_enable_rfkill_int(trans
);
597 /* stop and reset the on-board processor */
598 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
600 /* clear all status bits */
601 clear_bit(STATUS_HCMD_ACTIVE
, &trans_pcie
->status
);
602 clear_bit(STATUS_INT_ENABLED
, &trans_pcie
->status
);
603 clear_bit(STATUS_DEVICE_ENABLED
, &trans_pcie
->status
);
604 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
605 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
608 static void iwl_trans_pcie_d3_suspend(struct iwl_trans
*trans
, bool test
)
610 iwl_disable_interrupts(trans
);
613 * in testing mode, the host stays awake and the
614 * hardware won't be reset (not even partially)
619 iwl_pcie_disable_ict(trans
);
621 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
622 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
623 iwl_clear_bit(trans
, CSR_GP_CNTRL
,
624 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
627 * reset TX queues -- some of their registers reset during S3
628 * so if we don't reset everything here the D3 image would try
629 * to execute some invalid memory upon resume
631 iwl_trans_pcie_tx_reset(trans
);
633 iwl_pcie_set_pwr(trans
, true);
636 static int iwl_trans_pcie_d3_resume(struct iwl_trans
*trans
,
637 enum iwl_d3_status
*status
,
644 iwl_enable_interrupts(trans
);
645 *status
= IWL_D3_STATUS_ALIVE
;
649 iwl_pcie_set_pwr(trans
, false);
651 val
= iwl_read32(trans
, CSR_RESET
);
652 if (val
& CSR_RESET_REG_FLAG_NEVO_RESET
) {
653 *status
= IWL_D3_STATUS_RESET
;
658 * Also enables interrupts - none will happen as the device doesn't
659 * know we're waking it up, only when the opmode actually tells it
662 iwl_pcie_reset_ict(trans
);
664 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
665 iwl_set_bit(trans
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
667 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
668 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
669 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
672 IWL_ERR(trans
, "Failed to resume the device (mac ready)\n");
676 iwl_trans_pcie_tx_reset(trans
);
678 ret
= iwl_pcie_rx_init(trans
);
680 IWL_ERR(trans
, "Failed to resume the device (RX reset)\n");
684 *status
= IWL_D3_STATUS_ALIVE
;
688 static int iwl_trans_pcie_start_hw(struct iwl_trans
*trans
)
690 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
694 err
= iwl_pcie_prepare_card_hw(trans
);
696 IWL_ERR(trans
, "Error while preparing HW: %d\n", err
);
700 /* Reset the entire device */
701 iwl_set_bit(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
703 usleep_range(10, 15);
705 iwl_pcie_apm_init(trans
);
707 /* From now on, the op_mode will be kept updated about RF kill state */
708 iwl_enable_rfkill_int(trans
);
710 hw_rfkill
= iwl_is_rfkill_set(trans
);
712 set_bit(STATUS_RFKILL
, &trans_pcie
->status
);
714 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
715 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
720 static void iwl_trans_pcie_stop_hw(struct iwl_trans
*trans
,
721 bool op_mode_leaving
)
723 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
727 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
728 iwl_disable_interrupts(trans
);
729 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
731 iwl_pcie_apm_stop(trans
);
733 spin_lock_irqsave(&trans_pcie
->irq_lock
, flags
);
734 iwl_disable_interrupts(trans
);
735 spin_unlock_irqrestore(&trans_pcie
->irq_lock
, flags
);
737 iwl_pcie_disable_ict(trans
);
739 if (!op_mode_leaving
) {
741 * Even if we stop the HW, we still want the RF kill
744 iwl_enable_rfkill_int(trans
);
747 * Check again since the RF kill state may have changed while
748 * all the interrupts were disabled, in this case we couldn't
749 * receive the RF kill interrupt and update the state in the
752 hw_rfkill
= iwl_is_rfkill_set(trans
);
754 set_bit(STATUS_RFKILL
, &trans_pcie
->status
);
756 clear_bit(STATUS_RFKILL
, &trans_pcie
->status
);
757 iwl_op_mode_hw_rf_kill(trans
->op_mode
, hw_rfkill
);
761 static void iwl_trans_pcie_write8(struct iwl_trans
*trans
, u32 ofs
, u8 val
)
763 writeb(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
766 static void iwl_trans_pcie_write32(struct iwl_trans
*trans
, u32 ofs
, u32 val
)
768 writel(val
, IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
771 static u32
iwl_trans_pcie_read32(struct iwl_trans
*trans
, u32 ofs
)
773 return readl(IWL_TRANS_GET_PCIE_TRANS(trans
)->hw_base
+ ofs
);
776 static u32
iwl_trans_pcie_read_prph(struct iwl_trans
*trans
, u32 reg
)
778 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_RADDR
,
779 ((reg
& 0x000FFFFF) | (3 << 24)));
780 return iwl_trans_pcie_read32(trans
, HBUS_TARG_PRPH_RDAT
);
783 static void iwl_trans_pcie_write_prph(struct iwl_trans
*trans
, u32 addr
,
786 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WADDR
,
787 ((addr
& 0x000FFFFF) | (3 << 24)));
788 iwl_trans_pcie_write32(trans
, HBUS_TARG_PRPH_WDAT
, val
);
791 static void iwl_trans_pcie_configure(struct iwl_trans
*trans
,
792 const struct iwl_trans_config
*trans_cfg
)
794 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
796 trans_pcie
->cmd_queue
= trans_cfg
->cmd_queue
;
797 trans_pcie
->cmd_fifo
= trans_cfg
->cmd_fifo
;
798 if (WARN_ON(trans_cfg
->n_no_reclaim_cmds
> MAX_NO_RECLAIM_CMDS
))
799 trans_pcie
->n_no_reclaim_cmds
= 0;
801 trans_pcie
->n_no_reclaim_cmds
= trans_cfg
->n_no_reclaim_cmds
;
802 if (trans_pcie
->n_no_reclaim_cmds
)
803 memcpy(trans_pcie
->no_reclaim_cmds
, trans_cfg
->no_reclaim_cmds
,
804 trans_pcie
->n_no_reclaim_cmds
* sizeof(u8
));
806 trans_pcie
->rx_buf_size_8k
= trans_cfg
->rx_buf_size_8k
;
807 if (trans_pcie
->rx_buf_size_8k
)
808 trans_pcie
->rx_page_order
= get_order(8 * 1024);
810 trans_pcie
->rx_page_order
= get_order(4 * 1024);
812 trans_pcie
->wd_timeout
=
813 msecs_to_jiffies(trans_cfg
->queue_watchdog_timeout
);
815 trans_pcie
->command_names
= trans_cfg
->command_names
;
816 trans_pcie
->bc_table_dword
= trans_cfg
->bc_table_dword
;
819 void iwl_trans_pcie_free(struct iwl_trans
*trans
)
821 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
823 synchronize_irq(trans_pcie
->pci_dev
->irq
);
825 iwl_pcie_tx_free(trans
);
826 iwl_pcie_rx_free(trans
);
828 free_irq(trans_pcie
->pci_dev
->irq
, trans
);
829 iwl_pcie_free_ict(trans
);
831 pci_disable_msi(trans_pcie
->pci_dev
);
832 iounmap(trans_pcie
->hw_base
);
833 pci_release_regions(trans_pcie
->pci_dev
);
834 pci_disable_device(trans_pcie
->pci_dev
);
835 kmem_cache_destroy(trans
->dev_cmd_pool
);
840 static void iwl_trans_pcie_set_pmi(struct iwl_trans
*trans
, bool state
)
842 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
845 set_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
847 clear_bit(STATUS_TPOWER_PMI
, &trans_pcie
->status
);
850 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans
*trans
, bool silent
,
851 unsigned long *flags
)
854 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
856 spin_lock_irqsave(&trans_pcie
->reg_lock
, *flags
);
858 /* this bit wakes up the NIC */
859 __iwl_trans_pcie_set_bit(trans
, CSR_GP_CNTRL
,
860 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
863 * These bits say the device is running, and should keep running for
864 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
865 * but they do not indicate that embedded SRAM is restored yet;
866 * 3945 and 4965 have volatile SRAM, and must save/restore contents
867 * to/from host DRAM when sleeping/waking for power-saving.
868 * Each direction takes approximately 1/4 millisecond; with this
869 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
870 * series of register accesses are expected (e.g. reading Event Log),
871 * to keep device from sleeping.
873 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
874 * SRAM is okay/restored. We don't check that here because this call
875 * is just for hardware register access; but GP1 MAC_SLEEP check is a
876 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
878 * 5000 series and later (including 1000 series) have non-volatile SRAM,
879 * and do not save/restore SRAM when power cycling.
881 ret
= iwl_poll_bit(trans
, CSR_GP_CNTRL
,
882 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN
,
883 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
|
884 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP
), 15000);
885 if (unlikely(ret
< 0)) {
886 iwl_write32(trans
, CSR_RESET
, CSR_RESET_REG_FLAG_FORCE_NMI
);
888 u32 val
= iwl_read32(trans
, CSR_GP_CNTRL
);
890 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
892 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
898 * Fool sparse by faking we release the lock - sparse will
899 * track nic_access anyway.
901 __release(&trans_pcie
->reg_lock
);
905 static void iwl_trans_pcie_release_nic_access(struct iwl_trans
*trans
,
906 unsigned long *flags
)
908 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
910 lockdep_assert_held(&trans_pcie
->reg_lock
);
913 * Fool sparse by faking we acquiring the lock - sparse will
914 * track nic_access anyway.
916 __acquire(&trans_pcie
->reg_lock
);
918 __iwl_trans_pcie_clear_bit(trans
, CSR_GP_CNTRL
,
919 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
921 * Above we read the CSR_GP_CNTRL register, which will flush
922 * any previous writes, but we need the write that clears the
923 * MAC_ACCESS_REQ bit to be performed before any other writes
924 * scheduled on different CPUs (after we drop reg_lock).
927 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, *flags
);
930 static int iwl_trans_pcie_read_mem(struct iwl_trans
*trans
, u32 addr
,
931 void *buf
, int dwords
)
937 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
938 iwl_write32(trans
, HBUS_TARG_MEM_RADDR
, addr
);
939 for (offs
= 0; offs
< dwords
; offs
++)
940 vals
[offs
] = iwl_read32(trans
, HBUS_TARG_MEM_RDAT
);
941 iwl_trans_release_nic_access(trans
, &flags
);
948 static int iwl_trans_pcie_write_mem(struct iwl_trans
*trans
, u32 addr
,
949 const void *buf
, int dwords
)
953 const u32
*vals
= buf
;
955 if (iwl_trans_grab_nic_access(trans
, false, &flags
)) {
956 iwl_write32(trans
, HBUS_TARG_MEM_WADDR
, addr
);
957 for (offs
= 0; offs
< dwords
; offs
++)
958 iwl_write32(trans
, HBUS_TARG_MEM_WDAT
,
959 vals
? vals
[offs
] : 0);
960 iwl_trans_release_nic_access(trans
, &flags
);
967 #define IWL_FLUSH_WAIT_MS 2000
969 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans
*trans
)
971 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
975 unsigned long now
= jiffies
;
980 /* waiting for all the tx frames complete might take a while */
981 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
982 if (cnt
== trans_pcie
->cmd_queue
)
984 txq
= &trans_pcie
->txq
[cnt
];
986 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
987 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
990 if (q
->read_ptr
!= q
->write_ptr
) {
992 "fail to flush all tx fifo queues Q %d\n", cnt
);
1001 IWL_ERR(trans
, "Current SW read_ptr %d write_ptr %d\n",
1002 txq
->q
.read_ptr
, txq
->q
.write_ptr
);
1004 scd_sram_addr
= trans_pcie
->scd_base_addr
+
1005 SCD_TX_STTS_QUEUE_OFFSET(txq
->q
.id
);
1006 iwl_trans_read_mem_bytes(trans
, scd_sram_addr
, buf
, sizeof(buf
));
1008 iwl_print_hex_error(trans
, buf
, sizeof(buf
));
1010 for (cnt
= 0; cnt
< FH_TCSR_CHNL_NUM
; cnt
++)
1011 IWL_ERR(trans
, "FH TRBs(%d) = 0x%08x\n", cnt
,
1012 iwl_read_direct32(trans
, FH_TX_TRB_REG(cnt
)));
1014 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1015 u32 status
= iwl_read_prph(trans
, SCD_QUEUE_STATUS_BITS(cnt
));
1016 u8 fifo
= (status
>> SCD_QUEUE_STTS_REG_POS_TXF
) & 0x7;
1017 bool active
= !!(status
& BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE
));
1019 iwl_trans_read_mem32(trans
, trans_pcie
->scd_base_addr
+
1020 SCD_TRANS_TBL_OFFSET_QUEUE(cnt
));
1023 tbl_dw
= (tbl_dw
& 0xFFFF0000) >> 16;
1025 tbl_dw
= tbl_dw
& 0x0000FFFF;
1028 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1029 cnt
, active
? "" : "in", fifo
, tbl_dw
,
1030 iwl_read_prph(trans
,
1031 SCD_QUEUE_RDPTR(cnt
)) & (txq
->q
.n_bd
- 1),
1032 iwl_read_prph(trans
, SCD_QUEUE_WRPTR(cnt
)));
1038 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans
*trans
, u32 reg
,
1039 u32 mask
, u32 value
)
1041 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1042 unsigned long flags
;
1044 spin_lock_irqsave(&trans_pcie
->reg_lock
, flags
);
1045 __iwl_trans_pcie_set_bits_mask(trans
, reg
, mask
, value
);
1046 spin_unlock_irqrestore(&trans_pcie
->reg_lock
, flags
);
1049 static const char *get_csr_string(int cmd
)
1051 #define IWL_CMD(x) case x: return #x
1053 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
1054 IWL_CMD(CSR_INT_COALESCING
);
1056 IWL_CMD(CSR_INT_MASK
);
1057 IWL_CMD(CSR_FH_INT_STATUS
);
1058 IWL_CMD(CSR_GPIO_IN
);
1060 IWL_CMD(CSR_GP_CNTRL
);
1061 IWL_CMD(CSR_HW_REV
);
1062 IWL_CMD(CSR_EEPROM_REG
);
1063 IWL_CMD(CSR_EEPROM_GP
);
1064 IWL_CMD(CSR_OTP_GP_REG
);
1065 IWL_CMD(CSR_GIO_REG
);
1066 IWL_CMD(CSR_GP_UCODE_REG
);
1067 IWL_CMD(CSR_GP_DRIVER_REG
);
1068 IWL_CMD(CSR_UCODE_DRV_GP1
);
1069 IWL_CMD(CSR_UCODE_DRV_GP2
);
1070 IWL_CMD(CSR_LED_REG
);
1071 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
1072 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
1073 IWL_CMD(CSR_ANA_PLL_CFG
);
1074 IWL_CMD(CSR_HW_REV_WA_REG
);
1075 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
1082 void iwl_pcie_dump_csr(struct iwl_trans
*trans
)
1085 static const u32 csr_tbl
[] = {
1086 CSR_HW_IF_CONFIG_REG
,
1104 CSR_DRAM_INT_TBL_REG
,
1105 CSR_GIO_CHICKEN_BITS
,
1108 CSR_DBG_HPET_MEM_REG
1110 IWL_ERR(trans
, "CSR values:\n");
1111 IWL_ERR(trans
, "(2nd byte of CSR_INT_COALESCING is "
1112 "CSR_INT_PERIODIC_REG)\n");
1113 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
1114 IWL_ERR(trans
, " %25s: 0X%08x\n",
1115 get_csr_string(csr_tbl
[i
]),
1116 iwl_read32(trans
, csr_tbl
[i
]));
1120 #ifdef CONFIG_IWLWIFI_DEBUGFS
1121 /* create and remove of files */
1122 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1123 if (!debugfs_create_file(#name, mode, parent, trans, \
1124 &iwl_dbgfs_##name##_ops)) \
1128 /* file operation */
1129 #define DEBUGFS_READ_FILE_OPS(name) \
1130 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1131 .read = iwl_dbgfs_##name##_read, \
1132 .open = simple_open, \
1133 .llseek = generic_file_llseek, \
1136 #define DEBUGFS_WRITE_FILE_OPS(name) \
1137 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1138 .write = iwl_dbgfs_##name##_write, \
1139 .open = simple_open, \
1140 .llseek = generic_file_llseek, \
1143 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1144 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1145 .write = iwl_dbgfs_##name##_write, \
1146 .read = iwl_dbgfs_##name##_read, \
1147 .open = simple_open, \
1148 .llseek = generic_file_llseek, \
1151 static ssize_t
iwl_dbgfs_tx_queue_read(struct file
*file
,
1152 char __user
*user_buf
,
1153 size_t count
, loff_t
*ppos
)
1155 struct iwl_trans
*trans
= file
->private_data
;
1156 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1157 struct iwl_txq
*txq
;
1158 struct iwl_queue
*q
;
1165 bufsz
= sizeof(char) * 64 * trans
->cfg
->base_params
->num_of_queues
;
1167 if (!trans_pcie
->txq
)
1170 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1174 for (cnt
= 0; cnt
< trans
->cfg
->base_params
->num_of_queues
; cnt
++) {
1175 txq
= &trans_pcie
->txq
[cnt
];
1177 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1178 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1179 cnt
, q
->read_ptr
, q
->write_ptr
,
1180 !!test_bit(cnt
, trans_pcie
->queue_used
),
1181 !!test_bit(cnt
, trans_pcie
->queue_stopped
));
1183 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1188 static ssize_t
iwl_dbgfs_rx_queue_read(struct file
*file
,
1189 char __user
*user_buf
,
1190 size_t count
, loff_t
*ppos
)
1192 struct iwl_trans
*trans
= file
->private_data
;
1193 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1194 struct iwl_rxq
*rxq
= &trans_pcie
->rxq
;
1197 const size_t bufsz
= sizeof(buf
);
1199 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "read: %u\n",
1201 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "write: %u\n",
1203 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "free_count: %u\n",
1206 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "closed_rb_num: %u\n",
1207 le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF);
1209 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1210 "closed_rb_num: Not Allocated\n");
1212 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1215 static ssize_t
iwl_dbgfs_interrupt_read(struct file
*file
,
1216 char __user
*user_buf
,
1217 size_t count
, loff_t
*ppos
)
1219 struct iwl_trans
*trans
= file
->private_data
;
1220 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1221 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1225 int bufsz
= 24 * 64; /* 24 items * 64 char per item */
1228 buf
= kzalloc(bufsz
, GFP_KERNEL
);
1232 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1233 "Interrupt Statistics Report:\n");
1235 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "HW Error:\t\t\t %u\n",
1237 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "SW Error:\t\t\t %u\n",
1239 if (isr_stats
->sw
|| isr_stats
->hw
) {
1240 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1241 "\tLast Restarting Code: 0x%X\n",
1242 isr_stats
->err_code
);
1244 #ifdef CONFIG_IWLWIFI_DEBUG
1245 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Frame transmitted:\t\t %u\n",
1247 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Alive interrupt:\t\t %u\n",
1250 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1251 "HW RF KILL switch toggled:\t %u\n", isr_stats
->rfkill
);
1253 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "CT KILL:\t\t\t %u\n",
1256 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Wakeup Interrupt:\t\t %u\n",
1259 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
,
1260 "Rx command responses:\t\t %u\n", isr_stats
->rx
);
1262 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Tx/FH interrupt:\t\t %u\n",
1265 pos
+= scnprintf(buf
+ pos
, bufsz
- pos
, "Unexpected INTA:\t\t %u\n",
1266 isr_stats
->unhandled
);
1268 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, pos
);
1273 static ssize_t
iwl_dbgfs_interrupt_write(struct file
*file
,
1274 const char __user
*user_buf
,
1275 size_t count
, loff_t
*ppos
)
1277 struct iwl_trans
*trans
= file
->private_data
;
1278 struct iwl_trans_pcie
*trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1279 struct isr_statistics
*isr_stats
= &trans_pcie
->isr_stats
;
1285 memset(buf
, 0, sizeof(buf
));
1286 buf_size
= min(count
, sizeof(buf
) - 1);
1287 if (copy_from_user(buf
, user_buf
, buf_size
))
1289 if (sscanf(buf
, "%x", &reset_flag
) != 1)
1291 if (reset_flag
== 0)
1292 memset(isr_stats
, 0, sizeof(*isr_stats
));
1297 static ssize_t
iwl_dbgfs_csr_write(struct file
*file
,
1298 const char __user
*user_buf
,
1299 size_t count
, loff_t
*ppos
)
1301 struct iwl_trans
*trans
= file
->private_data
;
1306 memset(buf
, 0, sizeof(buf
));
1307 buf_size
= min(count
, sizeof(buf
) - 1);
1308 if (copy_from_user(buf
, user_buf
, buf_size
))
1310 if (sscanf(buf
, "%d", &csr
) != 1)
1313 iwl_pcie_dump_csr(trans
);
1318 static ssize_t
iwl_dbgfs_fh_reg_read(struct file
*file
,
1319 char __user
*user_buf
,
1320 size_t count
, loff_t
*ppos
)
1322 struct iwl_trans
*trans
= file
->private_data
;
1325 ssize_t ret
= -EFAULT
;
1327 ret
= pos
= iwl_dump_fh(trans
, &buf
);
1329 ret
= simple_read_from_buffer(user_buf
,
1330 count
, ppos
, buf
, pos
);
1337 DEBUGFS_READ_WRITE_FILE_OPS(interrupt
);
1338 DEBUGFS_READ_FILE_OPS(fh_reg
);
1339 DEBUGFS_READ_FILE_OPS(rx_queue
);
1340 DEBUGFS_READ_FILE_OPS(tx_queue
);
1341 DEBUGFS_WRITE_FILE_OPS(csr
);
1344 * Create the debugfs files and directories
1347 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1350 DEBUGFS_ADD_FILE(rx_queue
, dir
, S_IRUSR
);
1351 DEBUGFS_ADD_FILE(tx_queue
, dir
, S_IRUSR
);
1352 DEBUGFS_ADD_FILE(interrupt
, dir
, S_IWUSR
| S_IRUSR
);
1353 DEBUGFS_ADD_FILE(csr
, dir
, S_IWUSR
);
1354 DEBUGFS_ADD_FILE(fh_reg
, dir
, S_IRUSR
);
1358 IWL_ERR(trans
, "failed to create the trans debugfs entry\n");
1362 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans
*trans
,
1367 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1369 static const struct iwl_trans_ops trans_ops_pcie
= {
1370 .start_hw
= iwl_trans_pcie_start_hw
,
1371 .stop_hw
= iwl_trans_pcie_stop_hw
,
1372 .fw_alive
= iwl_trans_pcie_fw_alive
,
1373 .start_fw
= iwl_trans_pcie_start_fw
,
1374 .stop_device
= iwl_trans_pcie_stop_device
,
1376 .d3_suspend
= iwl_trans_pcie_d3_suspend
,
1377 .d3_resume
= iwl_trans_pcie_d3_resume
,
1379 .send_cmd
= iwl_trans_pcie_send_hcmd
,
1381 .tx
= iwl_trans_pcie_tx
,
1382 .reclaim
= iwl_trans_pcie_reclaim
,
1384 .txq_disable
= iwl_trans_pcie_txq_disable
,
1385 .txq_enable
= iwl_trans_pcie_txq_enable
,
1387 .dbgfs_register
= iwl_trans_pcie_dbgfs_register
,
1389 .wait_tx_queue_empty
= iwl_trans_pcie_wait_txq_empty
,
1391 .write8
= iwl_trans_pcie_write8
,
1392 .write32
= iwl_trans_pcie_write32
,
1393 .read32
= iwl_trans_pcie_read32
,
1394 .read_prph
= iwl_trans_pcie_read_prph
,
1395 .write_prph
= iwl_trans_pcie_write_prph
,
1396 .read_mem
= iwl_trans_pcie_read_mem
,
1397 .write_mem
= iwl_trans_pcie_write_mem
,
1398 .configure
= iwl_trans_pcie_configure
,
1399 .set_pmi
= iwl_trans_pcie_set_pmi
,
1400 .grab_nic_access
= iwl_trans_pcie_grab_nic_access
,
1401 .release_nic_access
= iwl_trans_pcie_release_nic_access
,
1402 .set_bits_mask
= iwl_trans_pcie_set_bits_mask
,
1405 struct iwl_trans
*iwl_trans_pcie_alloc(struct pci_dev
*pdev
,
1406 const struct pci_device_id
*ent
,
1407 const struct iwl_cfg
*cfg
)
1409 struct iwl_trans_pcie
*trans_pcie
;
1410 struct iwl_trans
*trans
;
1414 trans
= kzalloc(sizeof(struct iwl_trans
) +
1415 sizeof(struct iwl_trans_pcie
), GFP_KERNEL
);
1421 trans_pcie
= IWL_TRANS_GET_PCIE_TRANS(trans
);
1423 trans
->ops
= &trans_ops_pcie
;
1425 trans_lockdep_init(trans
);
1426 trans_pcie
->trans
= trans
;
1427 spin_lock_init(&trans_pcie
->irq_lock
);
1428 spin_lock_init(&trans_pcie
->reg_lock
);
1429 init_waitqueue_head(&trans_pcie
->ucode_write_waitq
);
1431 err
= pci_enable_device(pdev
);
1435 if (!cfg
->base_params
->pcie_l1_allowed
) {
1437 * W/A - seems to solve weird behavior. We need to remove this
1438 * if we don't want to stay in L1 all the time. This wastes a
1441 pci_disable_link_state(pdev
, PCIE_LINK_STATE_L0S
|
1442 PCIE_LINK_STATE_L1
|
1443 PCIE_LINK_STATE_CLKPM
);
1446 pci_set_master(pdev
);
1448 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(36));
1450 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(36));
1452 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1454 err
= pci_set_consistent_dma_mask(pdev
,
1456 /* both attempts failed: */
1458 dev_err(&pdev
->dev
, "No suitable DMA available\n");
1459 goto out_pci_disable_device
;
1463 err
= pci_request_regions(pdev
, DRV_NAME
);
1465 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
1466 goto out_pci_disable_device
;
1469 trans_pcie
->hw_base
= pci_ioremap_bar(pdev
, 0);
1470 if (!trans_pcie
->hw_base
) {
1471 dev_err(&pdev
->dev
, "pci_ioremap_bar failed\n");
1473 goto out_pci_release_regions
;
1476 /* We disable the RETRY_TIMEOUT register (0x41) to keep
1477 * PCI Tx retries from interfering with C3 CPU state */
1478 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
1480 err
= pci_enable_msi(pdev
);
1482 dev_err(&pdev
->dev
, "pci_enable_msi failed(0X%x)\n", err
);
1483 /* enable rfkill interrupt: hw bug w/a */
1484 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_cmd
);
1485 if (pci_cmd
& PCI_COMMAND_INTX_DISABLE
) {
1486 pci_cmd
&= ~PCI_COMMAND_INTX_DISABLE
;
1487 pci_write_config_word(pdev
, PCI_COMMAND
, pci_cmd
);
1491 trans
->dev
= &pdev
->dev
;
1492 trans_pcie
->pci_dev
= pdev
;
1493 trans
->hw_rev
= iwl_read32(trans
, CSR_HW_REV
);
1494 trans
->hw_id
= (pdev
->device
<< 16) + pdev
->subsystem_device
;
1495 snprintf(trans
->hw_id_str
, sizeof(trans
->hw_id_str
),
1496 "PCI ID: 0x%04X:0x%04X", pdev
->device
, pdev
->subsystem_device
);
1498 /* Initialize the wait queue for commands */
1499 init_waitqueue_head(&trans_pcie
->wait_command_queue
);
1501 snprintf(trans
->dev_cmd_pool_name
, sizeof(trans
->dev_cmd_pool_name
),
1502 "iwl_cmd_pool:%s", dev_name(trans
->dev
));
1504 trans
->dev_cmd_headroom
= 0;
1505 trans
->dev_cmd_pool
=
1506 kmem_cache_create(trans
->dev_cmd_pool_name
,
1507 sizeof(struct iwl_device_cmd
)
1508 + trans
->dev_cmd_headroom
,
1513 if (!trans
->dev_cmd_pool
) {
1515 goto out_pci_disable_msi
;
1518 trans_pcie
->inta_mask
= CSR_INI_SET_MASK
;
1520 if (iwl_pcie_alloc_ict(trans
))
1521 goto out_free_cmd_pool
;
1523 err
= request_threaded_irq(pdev
->irq
, iwl_pcie_isr_ict
,
1524 iwl_pcie_irq_handler
,
1525 IRQF_SHARED
, DRV_NAME
, trans
);
1527 IWL_ERR(trans
, "Error allocating IRQ %d\n", pdev
->irq
);
1534 iwl_pcie_free_ict(trans
);
1536 kmem_cache_destroy(trans
->dev_cmd_pool
);
1537 out_pci_disable_msi
:
1538 pci_disable_msi(pdev
);
1539 out_pci_release_regions
:
1540 pci_release_regions(pdev
);
1541 out_pci_disable_device
:
1542 pci_disable_device(pdev
);
1546 return ERR_PTR(err
);