1 /* Copyright (c) 2014 Broadcom Corporation
3 * Permission to use, copy, modify, and/or distribute this software for any
4 * purpose with or without fee is hereby granted, provided that the above
5 * copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/firmware.h>
19 #include <linux/pci.h>
20 #include <linux/vmalloc.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/bcma/bcma.h>
24 #include <linux/sched.h>
25 #include <asm/unaligned.h>
28 #include <chipcommon.h>
29 #include <brcmu_utils.h>
30 #include <brcmu_wifi.h>
31 #include <brcm_hw_ids.h>
35 #include "commonring.h"
44 enum brcmf_pcie_state
{
45 BRCMFMAC_PCIE_STATE_DOWN
,
46 BRCMFMAC_PCIE_STATE_UP
49 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
50 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
51 BRCMF_FW_DEF(4350C
, "brcmfmac4350c2-pcie");
52 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
53 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
54 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
55 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
56 BRCMF_FW_DEF(4365B
, "brcmfmac4365b-pcie");
57 BRCMF_FW_DEF(4365C
, "brcmfmac4365c-pcie");
58 BRCMF_FW_DEF(4366B
, "brcmfmac4366b-pcie");
59 BRCMF_FW_DEF(4366C
, "brcmfmac4366c-pcie");
60 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
62 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames
[] = {
63 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID
, 0xFFFFFFFF, 43602),
64 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID
, 0xFFFFFFF0, 4366C
),
65 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID
, 0x000000FF, 4350C
),
66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID
, 0xFFFFFF00, 4350),
67 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID
, 0xFFFFFFF0, 4365C
),
68 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID
, 0xFFFFFFFF, 4356),
69 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID
, 0xFFFFFFFF, 43570),
70 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID
, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID
, 0xFFFFFFFF, 43570),
72 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID
, 0xFFFFFFFF, 4358),
73 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID
, 0xFFFFFFFF, 4359),
74 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID
, 0x0000000F, 4365B
),
75 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID
, 0xFFFFFFF0, 4365C
),
76 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID
, 0x0000000F, 4366B
),
77 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID
, 0xFFFFFFF0, 4366C
),
78 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID
, 0xFFFFFFF0, 4366C
),
79 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID
, 0xFFFFFFFF, 4371),
82 #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
84 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
86 /* backplane addres space accessed by BAR0 */
87 #define BRCMF_PCIE_BAR0_WINDOW 0x80
88 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
89 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
91 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
92 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
94 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
95 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
97 #define BRCMF_PCIE_REG_INTSTATUS 0x90
98 #define BRCMF_PCIE_REG_INTMASK 0x94
99 #define BRCMF_PCIE_REG_SBMBX 0x98
101 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
103 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
104 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
105 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
106 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
107 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
108 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
109 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
111 #define BRCMF_PCIE2_INTA 0x01
112 #define BRCMF_PCIE2_INTB 0x02
114 #define BRCMF_PCIE_INT_0 0x01
115 #define BRCMF_PCIE_INT_1 0x02
116 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
119 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
120 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
121 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
122 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
123 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
124 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
125 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
126 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
127 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
128 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
130 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
131 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
132 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
133 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
134 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
135 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
136 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
137 BRCMF_PCIE_MB_INT_D2H3_DB1)
139 #define BRCMF_PCIE_SHARED_VERSION_7 7
140 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
141 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
142 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
143 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
144 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
145 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
147 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
148 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
150 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
151 #define BRCMF_SHARED_RING_BASE_OFFSET 52
152 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
153 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
154 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
155 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
156 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
157 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
158 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
159 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
160 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
162 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
163 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
164 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
165 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
167 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
168 #define BRCMF_RING_MAX_ITEM_OFFSET 4
169 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
170 #define BRCMF_RING_MEM_SZ 16
171 #define BRCMF_RING_STATE_SZ 8
173 #define BRCMF_DEF_MAX_RXBUFPOST 255
175 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
176 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
177 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
179 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
180 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
182 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
183 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
184 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
185 #define BRCMF_D2H_DEV_FWHALT 0x10000000
187 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
188 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
189 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
190 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
192 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
194 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
195 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
196 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
197 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
198 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
199 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
200 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
201 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
202 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
203 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
204 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
205 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
206 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
208 /* Magic number at a magic location to find RAM size */
209 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
210 #define BRCMF_RAMSIZE_OFFSET 0x6c
213 struct brcmf_pcie_console
{
222 struct brcmf_pcie_shared_info
{
223 u32 tcm_base_address
;
225 struct brcmf_pcie_ringbuf
*commonrings
[BRCMF_NROF_COMMON_MSGRINGS
];
226 struct brcmf_pcie_ringbuf
*flowrings
;
229 u16 max_submissionrings
;
230 u16 max_completionrings
;
232 u32 htod_mb_data_addr
;
233 u32 dtoh_mb_data_addr
;
235 struct brcmf_pcie_console console
;
237 dma_addr_t scratch_dmahandle
;
239 dma_addr_t ringupd_dmahandle
;
243 struct brcmf_pcie_core_info
{
248 struct brcmf_pciedev_info
{
249 enum brcmf_pcie_state state
;
251 struct pci_dev
*pdev
;
252 char fw_name
[BRCMF_FW_NAME_LEN
];
253 char nvram_name
[BRCMF_FW_NAME_LEN
];
258 struct brcmf_chip
*ci
;
260 struct brcmf_pcie_shared_info shared
;
261 wait_queue_head_t mbdata_resp_wait
;
262 bool mbdata_completed
;
268 dma_addr_t idxbuf_dmahandle
;
269 u16 (*read_ptr
)(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
);
270 void (*write_ptr
)(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
272 struct brcmf_mp_device
*settings
;
275 struct brcmf_pcie_ringbuf
{
276 struct brcmf_commonring commonring
;
277 dma_addr_t dma_handle
;
280 struct brcmf_pciedev_info
*devinfo
;
285 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
287 * @ringmem: dongle memory pointer to ring memory location
288 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
289 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
290 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
291 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
292 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
293 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
294 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
295 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
296 * @max_flowrings: maximum number of tx flow rings supported.
297 * @max_submissionrings: maximum number of submission rings(h2d) supported.
298 * @max_completionrings: maximum number of completion rings(d2h) supported.
300 struct brcmf_pcie_dhi_ringinfo
{
302 __le32 h2d_w_idx_ptr
;
303 __le32 h2d_r_idx_ptr
;
304 __le32 d2h_w_idx_ptr
;
305 __le32 d2h_r_idx_ptr
;
306 struct msgbuf_buf_addr h2d_w_idx_hostaddr
;
307 struct msgbuf_buf_addr h2d_r_idx_hostaddr
;
308 struct msgbuf_buf_addr d2h_w_idx_hostaddr
;
309 struct msgbuf_buf_addr d2h_r_idx_hostaddr
;
310 __le16 max_flowrings
;
311 __le16 max_submissionrings
;
312 __le16 max_completionrings
;
315 static const u32 brcmf_ring_max_item
[BRCMF_NROF_COMMON_MSGRINGS
] = {
316 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM
,
317 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM
,
318 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM
,
319 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM
,
320 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
323 static const u32 brcmf_ring_itemsize_pre_v7
[BRCMF_NROF_COMMON_MSGRINGS
] = {
324 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE
,
325 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE
,
326 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE
,
327 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7
,
328 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
331 static const u32 brcmf_ring_itemsize
[BRCMF_NROF_COMMON_MSGRINGS
] = {
332 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE
,
333 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE
,
334 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE
,
335 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE
,
336 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
341 brcmf_pcie_read_reg32(struct brcmf_pciedev_info
*devinfo
, u32 reg_offset
)
343 void __iomem
*address
= devinfo
->regs
+ reg_offset
;
345 return (ioread32(address
));
350 brcmf_pcie_write_reg32(struct brcmf_pciedev_info
*devinfo
, u32 reg_offset
,
353 void __iomem
*address
= devinfo
->regs
+ reg_offset
;
355 iowrite32(value
, address
);
360 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
)
362 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
364 return (ioread8(address
));
369 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
)
371 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
373 return (ioread16(address
));
378 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
381 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
383 iowrite16(value
, address
);
388 brcmf_pcie_read_idx(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
)
390 u16
*address
= devinfo
->idxbuf
+ mem_offset
;
397 brcmf_pcie_write_idx(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
400 u16
*address
= devinfo
->idxbuf
+ mem_offset
;
407 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
)
409 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
411 return (ioread32(address
));
416 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
419 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
421 iowrite32(value
, address
);
426 brcmf_pcie_read_ram32(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
)
428 void __iomem
*addr
= devinfo
->tcm
+ devinfo
->ci
->rambase
+ mem_offset
;
430 return (ioread32(addr
));
435 brcmf_pcie_write_ram32(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
438 void __iomem
*addr
= devinfo
->tcm
+ devinfo
->ci
->rambase
+ mem_offset
;
440 iowrite32(value
, addr
);
445 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
446 void *srcaddr
, u32 len
)
448 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
453 if (((ulong
)address
& 4) || ((ulong
)srcaddr
& 4) || (len
& 4)) {
454 if (((ulong
)address
& 2) || ((ulong
)srcaddr
& 2) || (len
& 2)) {
455 src8
= (u8
*)srcaddr
;
457 iowrite8(*src8
, address
);
464 src16
= (__le16
*)srcaddr
;
466 iowrite16(le16_to_cpu(*src16
), address
);
474 src32
= (__le32
*)srcaddr
;
476 iowrite32(le32_to_cpu(*src32
), address
);
486 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info
*devinfo
, u32 mem_offset
,
487 void *dstaddr
, u32 len
)
489 void __iomem
*address
= devinfo
->tcm
+ mem_offset
;
494 if (((ulong
)address
& 4) || ((ulong
)dstaddr
& 4) || (len
& 4)) {
495 if (((ulong
)address
& 2) || ((ulong
)dstaddr
& 2) || (len
& 2)) {
496 dst8
= (u8
*)dstaddr
;
498 *dst8
= ioread8(address
);
505 dst16
= (__le16
*)dstaddr
;
507 *dst16
= cpu_to_le16(ioread16(address
));
515 dst32
= (__le32
*)dstaddr
;
517 *dst32
= cpu_to_le32(ioread32(address
));
526 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
527 CHIPCREGOFFS(reg), value)
531 brcmf_pcie_select_core(struct brcmf_pciedev_info
*devinfo
, u16 coreid
)
533 const struct pci_dev
*pdev
= devinfo
->pdev
;
534 struct brcmf_core
*core
;
537 core
= brcmf_chip_get_core(devinfo
->ci
, coreid
);
539 bar0_win
= core
->base
;
540 pci_write_config_dword(pdev
, BRCMF_PCIE_BAR0_WINDOW
, bar0_win
);
541 if (pci_read_config_dword(pdev
, BRCMF_PCIE_BAR0_WINDOW
,
543 if (bar0_win
!= core
->base
) {
544 bar0_win
= core
->base
;
545 pci_write_config_dword(pdev
,
546 BRCMF_PCIE_BAR0_WINDOW
,
551 brcmf_err("Unsupported core selected %x\n", coreid
);
556 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info
*devinfo
)
558 struct brcmf_core
*core
;
559 u16 cfg_offset
[] = { BRCMF_PCIE_CFGREG_STATUS_CMD
,
560 BRCMF_PCIE_CFGREG_PM_CSR
,
561 BRCMF_PCIE_CFGREG_MSI_CAP
,
562 BRCMF_PCIE_CFGREG_MSI_ADDR_L
,
563 BRCMF_PCIE_CFGREG_MSI_ADDR_H
,
564 BRCMF_PCIE_CFGREG_MSI_DATA
,
565 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2
,
566 BRCMF_PCIE_CFGREG_RBAR_CTRL
,
567 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1
,
568 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG
,
569 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG
};
578 brcmf_pcie_select_core(devinfo
, BCMA_CORE_PCIE2
);
579 pci_read_config_dword(devinfo
->pdev
, BRCMF_PCIE_REG_LINK_STATUS_CTRL
,
581 val
= lsc
& (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB
);
582 pci_write_config_dword(devinfo
->pdev
, BRCMF_PCIE_REG_LINK_STATUS_CTRL
,
586 brcmf_pcie_select_core(devinfo
, BCMA_CORE_CHIPCOMMON
);
587 WRITECC32(devinfo
, watchdog
, 4);
591 brcmf_pcie_select_core(devinfo
, BCMA_CORE_PCIE2
);
592 pci_write_config_dword(devinfo
->pdev
, BRCMF_PCIE_REG_LINK_STATUS_CTRL
,
595 core
= brcmf_chip_get_core(devinfo
->ci
, BCMA_CORE_PCIE2
);
596 if (core
->rev
<= 13) {
597 for (i
= 0; i
< ARRAY_SIZE(cfg_offset
); i
++) {
598 brcmf_pcie_write_reg32(devinfo
,
599 BRCMF_PCIE_PCIE2REG_CONFIGADDR
,
601 val
= brcmf_pcie_read_reg32(devinfo
,
602 BRCMF_PCIE_PCIE2REG_CONFIGDATA
);
603 brcmf_dbg(PCIE
, "config offset 0x%04x, value 0x%04x\n",
605 brcmf_pcie_write_reg32(devinfo
,
606 BRCMF_PCIE_PCIE2REG_CONFIGDATA
,
613 static void brcmf_pcie_attach(struct brcmf_pciedev_info
*devinfo
)
617 /* BAR1 window may not be sized properly */
618 brcmf_pcie_select_core(devinfo
, BCMA_CORE_PCIE2
);
619 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_CONFIGADDR
, 0x4e0);
620 config
= brcmf_pcie_read_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_CONFIGDATA
);
621 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_CONFIGDATA
, config
);
623 device_wakeup_enable(&devinfo
->pdev
->dev
);
627 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info
*devinfo
)
629 if (devinfo
->ci
->chip
== BRCM_CC_43602_CHIP_ID
) {
630 brcmf_pcie_select_core(devinfo
, BCMA_CORE_ARM_CR4
);
631 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_ARMCR4REG_BANKIDX
,
633 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_ARMCR4REG_BANKPDA
,
635 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_ARMCR4REG_BANKIDX
,
637 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_ARMCR4REG_BANKPDA
,
644 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info
*devinfo
,
647 struct brcmf_core
*core
;
649 if (devinfo
->ci
->chip
== BRCM_CC_43602_CHIP_ID
) {
650 core
= brcmf_chip_get_core(devinfo
->ci
, BCMA_CORE_INTERNAL_MEM
);
651 brcmf_chip_resetcore(core
, 0, 0, 0);
654 if (!brcmf_chip_set_active(devinfo
->ci
, resetintr
))
661 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info
*devinfo
, u32 htod_mb_data
)
663 struct brcmf_pcie_shared_info
*shared
;
665 u32 cur_htod_mb_data
;
668 shared
= &devinfo
->shared
;
669 addr
= shared
->htod_mb_data_addr
;
670 cur_htod_mb_data
= brcmf_pcie_read_tcm32(devinfo
, addr
);
672 if (cur_htod_mb_data
!= 0)
673 brcmf_dbg(PCIE
, "MB transaction is already pending 0x%04x\n",
677 while (cur_htod_mb_data
!= 0) {
682 cur_htod_mb_data
= brcmf_pcie_read_tcm32(devinfo
, addr
);
685 brcmf_pcie_write_tcm32(devinfo
, addr
, htod_mb_data
);
686 pci_write_config_dword(devinfo
->pdev
, BRCMF_PCIE_REG_SBMBX
, 1);
687 pci_write_config_dword(devinfo
->pdev
, BRCMF_PCIE_REG_SBMBX
, 1);
693 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info
*devinfo
)
695 struct brcmf_pcie_shared_info
*shared
;
699 shared
= &devinfo
->shared
;
700 addr
= shared
->dtoh_mb_data_addr
;
701 dtoh_mb_data
= brcmf_pcie_read_tcm32(devinfo
, addr
);
706 brcmf_pcie_write_tcm32(devinfo
, addr
, 0);
708 brcmf_dbg(PCIE
, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data
);
709 if (dtoh_mb_data
& BRCMF_D2H_DEV_DS_ENTER_REQ
) {
710 brcmf_dbg(PCIE
, "D2H_MB_DATA: DEEP SLEEP REQ\n");
711 brcmf_pcie_send_mb_data(devinfo
, BRCMF_H2D_HOST_DS_ACK
);
712 brcmf_dbg(PCIE
, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
714 if (dtoh_mb_data
& BRCMF_D2H_DEV_DS_EXIT_NOTE
)
715 brcmf_dbg(PCIE
, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
716 if (dtoh_mb_data
& BRCMF_D2H_DEV_D3_ACK
) {
717 brcmf_dbg(PCIE
, "D2H_MB_DATA: D3 ACK\n");
718 devinfo
->mbdata_completed
= true;
719 wake_up(&devinfo
->mbdata_resp_wait
);
721 if (dtoh_mb_data
& BRCMF_D2H_DEV_FWHALT
) {
722 brcmf_dbg(PCIE
, "D2H_MB_DATA: FW HALT\n");
723 brcmf_dev_coredump(&devinfo
->pdev
->dev
);
728 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info
*devinfo
)
730 struct brcmf_pcie_shared_info
*shared
;
731 struct brcmf_pcie_console
*console
;
734 shared
= &devinfo
->shared
;
735 console
= &shared
->console
;
736 addr
= shared
->tcm_base_address
+ BRCMF_SHARED_CONSOLE_ADDR_OFFSET
;
737 console
->base_addr
= brcmf_pcie_read_tcm32(devinfo
, addr
);
739 addr
= console
->base_addr
+ BRCMF_CONSOLE_BUFADDR_OFFSET
;
740 console
->buf_addr
= brcmf_pcie_read_tcm32(devinfo
, addr
);
741 addr
= console
->base_addr
+ BRCMF_CONSOLE_BUFSIZE_OFFSET
;
742 console
->bufsize
= brcmf_pcie_read_tcm32(devinfo
, addr
);
744 brcmf_dbg(FWCON
, "Console: base %x, buf %x, size %d\n",
745 console
->base_addr
, console
->buf_addr
, console
->bufsize
);
749 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info
*devinfo
)
751 struct brcmf_pcie_console
*console
;
756 if (!BRCMF_FWCON_ON())
759 console
= &devinfo
->shared
.console
;
760 addr
= console
->base_addr
+ BRCMF_CONSOLE_WRITEIDX_OFFSET
;
761 newidx
= brcmf_pcie_read_tcm32(devinfo
, addr
);
762 while (newidx
!= console
->read_idx
) {
763 addr
= console
->buf_addr
+ console
->read_idx
;
764 ch
= brcmf_pcie_read_tcm8(devinfo
, addr
);
766 if (console
->read_idx
== console
->bufsize
)
767 console
->read_idx
= 0;
770 console
->log_str
[console
->log_idx
] = ch
;
773 (console
->log_idx
== (sizeof(console
->log_str
) - 2))) {
775 console
->log_str
[console
->log_idx
] = ch
;
779 console
->log_str
[console
->log_idx
] = 0;
780 pr_debug("CONSOLE: %s", console
->log_str
);
781 console
->log_idx
= 0;
787 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info
*devinfo
)
789 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXMASK
, 0);
793 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info
*devinfo
)
795 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXMASK
,
796 BRCMF_PCIE_MB_INT_D2H_DB
|
797 BRCMF_PCIE_MB_INT_FN0_0
|
798 BRCMF_PCIE_MB_INT_FN0_1
);
801 static void brcmf_pcie_hostready(struct brcmf_pciedev_info
*devinfo
)
803 if (devinfo
->shared
.flags
& BRCMF_PCIE_SHARED_HOSTRDY_DB1
)
804 brcmf_pcie_write_reg32(devinfo
,
805 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1
, 1);
808 static irqreturn_t
brcmf_pcie_quick_check_isr(int irq
, void *arg
)
810 struct brcmf_pciedev_info
*devinfo
= (struct brcmf_pciedev_info
*)arg
;
812 if (brcmf_pcie_read_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
)) {
813 brcmf_pcie_intr_disable(devinfo
);
814 brcmf_dbg(PCIE
, "Enter\n");
815 return IRQ_WAKE_THREAD
;
821 static irqreturn_t
brcmf_pcie_isr_thread(int irq
, void *arg
)
823 struct brcmf_pciedev_info
*devinfo
= (struct brcmf_pciedev_info
*)arg
;
826 devinfo
->in_irq
= true;
827 status
= brcmf_pcie_read_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
);
828 brcmf_dbg(PCIE
, "Enter %x\n", status
);
830 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
,
832 if (status
& (BRCMF_PCIE_MB_INT_FN0_0
|
833 BRCMF_PCIE_MB_INT_FN0_1
))
834 brcmf_pcie_handle_mb_data(devinfo
);
835 if (status
& BRCMF_PCIE_MB_INT_D2H_DB
) {
836 if (devinfo
->state
== BRCMFMAC_PCIE_STATE_UP
)
837 brcmf_proto_msgbuf_rx_trigger(
838 &devinfo
->pdev
->dev
);
841 brcmf_pcie_bus_console_read(devinfo
);
842 if (devinfo
->state
== BRCMFMAC_PCIE_STATE_UP
)
843 brcmf_pcie_intr_enable(devinfo
);
844 devinfo
->in_irq
= false;
849 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info
*devinfo
)
851 struct pci_dev
*pdev
;
853 pdev
= devinfo
->pdev
;
855 brcmf_pcie_intr_disable(devinfo
);
857 brcmf_dbg(PCIE
, "Enter\n");
859 pci_enable_msi(pdev
);
860 if (request_threaded_irq(pdev
->irq
, brcmf_pcie_quick_check_isr
,
861 brcmf_pcie_isr_thread
, IRQF_SHARED
,
862 "brcmf_pcie_intr", devinfo
)) {
863 pci_disable_msi(pdev
);
864 brcmf_err("Failed to request IRQ %d\n", pdev
->irq
);
867 devinfo
->irq_allocated
= true;
872 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info
*devinfo
)
874 struct pci_dev
*pdev
;
878 if (!devinfo
->irq_allocated
)
881 pdev
= devinfo
->pdev
;
883 brcmf_pcie_intr_disable(devinfo
);
884 free_irq(pdev
->irq
, devinfo
);
885 pci_disable_msi(pdev
);
889 while ((devinfo
->in_irq
) && (count
< 20)) {
894 brcmf_err("Still in IRQ (processing) !!!\n");
896 status
= brcmf_pcie_read_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
);
897 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
, status
);
899 devinfo
->irq_allocated
= false;
903 static int brcmf_pcie_ring_mb_write_rptr(void *ctx
)
905 struct brcmf_pcie_ringbuf
*ring
= (struct brcmf_pcie_ringbuf
*)ctx
;
906 struct brcmf_pciedev_info
*devinfo
= ring
->devinfo
;
907 struct brcmf_commonring
*commonring
= &ring
->commonring
;
909 if (devinfo
->state
!= BRCMFMAC_PCIE_STATE_UP
)
912 brcmf_dbg(PCIE
, "W r_ptr %d (%d), ring %d\n", commonring
->r_ptr
,
913 commonring
->w_ptr
, ring
->id
);
915 devinfo
->write_ptr(devinfo
, ring
->r_idx_addr
, commonring
->r_ptr
);
921 static int brcmf_pcie_ring_mb_write_wptr(void *ctx
)
923 struct brcmf_pcie_ringbuf
*ring
= (struct brcmf_pcie_ringbuf
*)ctx
;
924 struct brcmf_pciedev_info
*devinfo
= ring
->devinfo
;
925 struct brcmf_commonring
*commonring
= &ring
->commonring
;
927 if (devinfo
->state
!= BRCMFMAC_PCIE_STATE_UP
)
930 brcmf_dbg(PCIE
, "W w_ptr %d (%d), ring %d\n", commonring
->w_ptr
,
931 commonring
->r_ptr
, ring
->id
);
933 devinfo
->write_ptr(devinfo
, ring
->w_idx_addr
, commonring
->w_ptr
);
939 static int brcmf_pcie_ring_mb_ring_bell(void *ctx
)
941 struct brcmf_pcie_ringbuf
*ring
= (struct brcmf_pcie_ringbuf
*)ctx
;
942 struct brcmf_pciedev_info
*devinfo
= ring
->devinfo
;
944 if (devinfo
->state
!= BRCMFMAC_PCIE_STATE_UP
)
947 brcmf_dbg(PCIE
, "RING !\n");
948 /* Any arbitrary value will do, lets use 1 */
949 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0
, 1);
955 static int brcmf_pcie_ring_mb_update_rptr(void *ctx
)
957 struct brcmf_pcie_ringbuf
*ring
= (struct brcmf_pcie_ringbuf
*)ctx
;
958 struct brcmf_pciedev_info
*devinfo
= ring
->devinfo
;
959 struct brcmf_commonring
*commonring
= &ring
->commonring
;
961 if (devinfo
->state
!= BRCMFMAC_PCIE_STATE_UP
)
964 commonring
->r_ptr
= devinfo
->read_ptr(devinfo
, ring
->r_idx_addr
);
966 brcmf_dbg(PCIE
, "R r_ptr %d (%d), ring %d\n", commonring
->r_ptr
,
967 commonring
->w_ptr
, ring
->id
);
973 static int brcmf_pcie_ring_mb_update_wptr(void *ctx
)
975 struct brcmf_pcie_ringbuf
*ring
= (struct brcmf_pcie_ringbuf
*)ctx
;
976 struct brcmf_pciedev_info
*devinfo
= ring
->devinfo
;
977 struct brcmf_commonring
*commonring
= &ring
->commonring
;
979 if (devinfo
->state
!= BRCMFMAC_PCIE_STATE_UP
)
982 commonring
->w_ptr
= devinfo
->read_ptr(devinfo
, ring
->w_idx_addr
);
984 brcmf_dbg(PCIE
, "R w_ptr %d (%d), ring %d\n", commonring
->w_ptr
,
985 commonring
->r_ptr
, ring
->id
);
992 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info
*devinfo
,
993 u32 size
, u32 tcm_dma_phys_addr
,
994 dma_addr_t
*dma_handle
)
999 ring
= dma_alloc_coherent(&devinfo
->pdev
->dev
, size
, dma_handle
,
1004 address
= (u64
)*dma_handle
;
1005 brcmf_pcie_write_tcm32(devinfo
, tcm_dma_phys_addr
,
1006 address
& 0xffffffff);
1007 brcmf_pcie_write_tcm32(devinfo
, tcm_dma_phys_addr
+ 4, address
>> 32);
1009 memset(ring
, 0, size
);
1015 static struct brcmf_pcie_ringbuf
*
1016 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info
*devinfo
, u32 ring_id
,
1017 u32 tcm_ring_phys_addr
)
1020 dma_addr_t dma_handle
;
1021 struct brcmf_pcie_ringbuf
*ring
;
1024 const u32
*ring_itemsize_array
;
1026 if (devinfo
->shared
.version
< BRCMF_PCIE_SHARED_VERSION_7
)
1027 ring_itemsize_array
= brcmf_ring_itemsize_pre_v7
;
1029 ring_itemsize_array
= brcmf_ring_itemsize
;
1031 size
= brcmf_ring_max_item
[ring_id
] * ring_itemsize_array
[ring_id
];
1032 dma_buf
= brcmf_pcie_init_dmabuffer_for_device(devinfo
, size
,
1033 tcm_ring_phys_addr
+ BRCMF_RING_MEM_BASE_ADDR_OFFSET
,
1038 addr
= tcm_ring_phys_addr
+ BRCMF_RING_MAX_ITEM_OFFSET
;
1039 brcmf_pcie_write_tcm16(devinfo
, addr
, brcmf_ring_max_item
[ring_id
]);
1040 addr
= tcm_ring_phys_addr
+ BRCMF_RING_LEN_ITEMS_OFFSET
;
1041 brcmf_pcie_write_tcm16(devinfo
, addr
, ring_itemsize_array
[ring_id
]);
1043 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1045 dma_free_coherent(&devinfo
->pdev
->dev
, size
, dma_buf
,
1049 brcmf_commonring_config(&ring
->commonring
, brcmf_ring_max_item
[ring_id
],
1050 ring_itemsize_array
[ring_id
], dma_buf
);
1051 ring
->dma_handle
= dma_handle
;
1052 ring
->devinfo
= devinfo
;
1053 brcmf_commonring_register_cb(&ring
->commonring
,
1054 brcmf_pcie_ring_mb_ring_bell
,
1055 brcmf_pcie_ring_mb_update_rptr
,
1056 brcmf_pcie_ring_mb_update_wptr
,
1057 brcmf_pcie_ring_mb_write_rptr
,
1058 brcmf_pcie_ring_mb_write_wptr
, ring
);
1064 static void brcmf_pcie_release_ringbuffer(struct device
*dev
,
1065 struct brcmf_pcie_ringbuf
*ring
)
1073 dma_buf
= ring
->commonring
.buf_addr
;
1075 size
= ring
->commonring
.depth
* ring
->commonring
.item_len
;
1076 dma_free_coherent(dev
, size
, dma_buf
, ring
->dma_handle
);
1082 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info
*devinfo
)
1086 for (i
= 0; i
< BRCMF_NROF_COMMON_MSGRINGS
; i
++) {
1087 brcmf_pcie_release_ringbuffer(&devinfo
->pdev
->dev
,
1088 devinfo
->shared
.commonrings
[i
]);
1089 devinfo
->shared
.commonrings
[i
] = NULL
;
1091 kfree(devinfo
->shared
.flowrings
);
1092 devinfo
->shared
.flowrings
= NULL
;
1093 if (devinfo
->idxbuf
) {
1094 dma_free_coherent(&devinfo
->pdev
->dev
,
1097 devinfo
->idxbuf_dmahandle
);
1098 devinfo
->idxbuf
= NULL
;
1103 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info
*devinfo
)
1105 struct brcmf_pcie_ringbuf
*ring
;
1106 struct brcmf_pcie_ringbuf
*rings
;
1116 struct brcmf_pcie_dhi_ringinfo ringinfo
;
1118 u16 max_submissionrings
;
1119 u16 max_completionrings
;
1121 memcpy_fromio(&ringinfo
, devinfo
->tcm
+ devinfo
->shared
.ring_info_addr
,
1123 if (devinfo
->shared
.version
>= 6) {
1124 max_submissionrings
= le16_to_cpu(ringinfo
.max_submissionrings
);
1125 max_flowrings
= le16_to_cpu(ringinfo
.max_flowrings
);
1126 max_completionrings
= le16_to_cpu(ringinfo
.max_completionrings
);
1128 max_submissionrings
= le16_to_cpu(ringinfo
.max_flowrings
);
1129 max_flowrings
= max_submissionrings
-
1130 BRCMF_NROF_H2D_COMMON_MSGRINGS
;
1131 max_completionrings
= BRCMF_NROF_D2H_COMMON_MSGRINGS
;
1134 if (devinfo
->dma_idx_sz
!= 0) {
1135 bufsz
= (max_submissionrings
+ max_completionrings
) *
1136 devinfo
->dma_idx_sz
* 2;
1137 devinfo
->idxbuf
= dma_alloc_coherent(&devinfo
->pdev
->dev
, bufsz
,
1138 &devinfo
->idxbuf_dmahandle
,
1140 if (!devinfo
->idxbuf
)
1141 devinfo
->dma_idx_sz
= 0;
1144 if (devinfo
->dma_idx_sz
== 0) {
1145 d2h_w_idx_ptr
= le32_to_cpu(ringinfo
.d2h_w_idx_ptr
);
1146 d2h_r_idx_ptr
= le32_to_cpu(ringinfo
.d2h_r_idx_ptr
);
1147 h2d_w_idx_ptr
= le32_to_cpu(ringinfo
.h2d_w_idx_ptr
);
1148 h2d_r_idx_ptr
= le32_to_cpu(ringinfo
.h2d_r_idx_ptr
);
1149 idx_offset
= sizeof(u32
);
1150 devinfo
->write_ptr
= brcmf_pcie_write_tcm16
;
1151 devinfo
->read_ptr
= brcmf_pcie_read_tcm16
;
1152 brcmf_dbg(PCIE
, "Using TCM indices\n");
1154 memset(devinfo
->idxbuf
, 0, bufsz
);
1155 devinfo
->idxbuf_sz
= bufsz
;
1156 idx_offset
= devinfo
->dma_idx_sz
;
1157 devinfo
->write_ptr
= brcmf_pcie_write_idx
;
1158 devinfo
->read_ptr
= brcmf_pcie_read_idx
;
1161 address
= (u64
)devinfo
->idxbuf_dmahandle
;
1162 ringinfo
.h2d_w_idx_hostaddr
.low_addr
=
1163 cpu_to_le32(address
& 0xffffffff);
1164 ringinfo
.h2d_w_idx_hostaddr
.high_addr
=
1165 cpu_to_le32(address
>> 32);
1167 h2d_r_idx_ptr
= h2d_w_idx_ptr
+
1168 max_submissionrings
* idx_offset
;
1169 address
+= max_submissionrings
* idx_offset
;
1170 ringinfo
.h2d_r_idx_hostaddr
.low_addr
=
1171 cpu_to_le32(address
& 0xffffffff);
1172 ringinfo
.h2d_r_idx_hostaddr
.high_addr
=
1173 cpu_to_le32(address
>> 32);
1175 d2h_w_idx_ptr
= h2d_r_idx_ptr
+
1176 max_submissionrings
* idx_offset
;
1177 address
+= max_submissionrings
* idx_offset
;
1178 ringinfo
.d2h_w_idx_hostaddr
.low_addr
=
1179 cpu_to_le32(address
& 0xffffffff);
1180 ringinfo
.d2h_w_idx_hostaddr
.high_addr
=
1181 cpu_to_le32(address
>> 32);
1183 d2h_r_idx_ptr
= d2h_w_idx_ptr
+
1184 max_completionrings
* idx_offset
;
1185 address
+= max_completionrings
* idx_offset
;
1186 ringinfo
.d2h_r_idx_hostaddr
.low_addr
=
1187 cpu_to_le32(address
& 0xffffffff);
1188 ringinfo
.d2h_r_idx_hostaddr
.high_addr
=
1189 cpu_to_le32(address
>> 32);
1191 memcpy_toio(devinfo
->tcm
+ devinfo
->shared
.ring_info_addr
,
1192 &ringinfo
, sizeof(ringinfo
));
1193 brcmf_dbg(PCIE
, "Using host memory indices\n");
1196 ring_mem_ptr
= le32_to_cpu(ringinfo
.ringmem
);
1198 for (i
= 0; i
< BRCMF_NROF_H2D_COMMON_MSGRINGS
; i
++) {
1199 ring
= brcmf_pcie_alloc_dma_and_ring(devinfo
, i
, ring_mem_ptr
);
1202 ring
->w_idx_addr
= h2d_w_idx_ptr
;
1203 ring
->r_idx_addr
= h2d_r_idx_ptr
;
1205 devinfo
->shared
.commonrings
[i
] = ring
;
1207 h2d_w_idx_ptr
+= idx_offset
;
1208 h2d_r_idx_ptr
+= idx_offset
;
1209 ring_mem_ptr
+= BRCMF_RING_MEM_SZ
;
1212 for (i
= BRCMF_NROF_H2D_COMMON_MSGRINGS
;
1213 i
< BRCMF_NROF_COMMON_MSGRINGS
; i
++) {
1214 ring
= brcmf_pcie_alloc_dma_and_ring(devinfo
, i
, ring_mem_ptr
);
1217 ring
->w_idx_addr
= d2h_w_idx_ptr
;
1218 ring
->r_idx_addr
= d2h_r_idx_ptr
;
1220 devinfo
->shared
.commonrings
[i
] = ring
;
1222 d2h_w_idx_ptr
+= idx_offset
;
1223 d2h_r_idx_ptr
+= idx_offset
;
1224 ring_mem_ptr
+= BRCMF_RING_MEM_SZ
;
1227 devinfo
->shared
.max_flowrings
= max_flowrings
;
1228 devinfo
->shared
.max_submissionrings
= max_submissionrings
;
1229 devinfo
->shared
.max_completionrings
= max_completionrings
;
1230 rings
= kcalloc(max_flowrings
, sizeof(*ring
), GFP_KERNEL
);
1234 brcmf_dbg(PCIE
, "Nr of flowrings is %d\n", max_flowrings
);
1236 for (i
= 0; i
< max_flowrings
; i
++) {
1238 ring
->devinfo
= devinfo
;
1239 ring
->id
= i
+ BRCMF_H2D_MSGRING_FLOWRING_IDSTART
;
1240 brcmf_commonring_register_cb(&ring
->commonring
,
1241 brcmf_pcie_ring_mb_ring_bell
,
1242 brcmf_pcie_ring_mb_update_rptr
,
1243 brcmf_pcie_ring_mb_update_wptr
,
1244 brcmf_pcie_ring_mb_write_rptr
,
1245 brcmf_pcie_ring_mb_write_wptr
,
1247 ring
->w_idx_addr
= h2d_w_idx_ptr
;
1248 ring
->r_idx_addr
= h2d_r_idx_ptr
;
1249 h2d_w_idx_ptr
+= idx_offset
;
1250 h2d_r_idx_ptr
+= idx_offset
;
1252 devinfo
->shared
.flowrings
= rings
;
1257 brcmf_err("Allocating ring buffers failed\n");
1258 brcmf_pcie_release_ringbuffers(devinfo
);
1264 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info
*devinfo
)
1266 if (devinfo
->shared
.scratch
)
1267 dma_free_coherent(&devinfo
->pdev
->dev
,
1268 BRCMF_DMA_D2H_SCRATCH_BUF_LEN
,
1269 devinfo
->shared
.scratch
,
1270 devinfo
->shared
.scratch_dmahandle
);
1271 if (devinfo
->shared
.ringupd
)
1272 dma_free_coherent(&devinfo
->pdev
->dev
,
1273 BRCMF_DMA_D2H_RINGUPD_BUF_LEN
,
1274 devinfo
->shared
.ringupd
,
1275 devinfo
->shared
.ringupd_dmahandle
);
1278 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info
*devinfo
)
1283 devinfo
->shared
.scratch
=
1284 dma_zalloc_coherent(&devinfo
->pdev
->dev
,
1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN
,
1286 &devinfo
->shared
.scratch_dmahandle
,
1288 if (!devinfo
->shared
.scratch
)
1291 addr
= devinfo
->shared
.tcm_base_address
+
1292 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET
;
1293 address
= (u64
)devinfo
->shared
.scratch_dmahandle
;
1294 brcmf_pcie_write_tcm32(devinfo
, addr
, address
& 0xffffffff);
1295 brcmf_pcie_write_tcm32(devinfo
, addr
+ 4, address
>> 32);
1296 addr
= devinfo
->shared
.tcm_base_address
+
1297 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET
;
1298 brcmf_pcie_write_tcm32(devinfo
, addr
, BRCMF_DMA_D2H_SCRATCH_BUF_LEN
);
1300 devinfo
->shared
.ringupd
=
1301 dma_zalloc_coherent(&devinfo
->pdev
->dev
,
1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN
,
1303 &devinfo
->shared
.ringupd_dmahandle
,
1305 if (!devinfo
->shared
.ringupd
)
1308 addr
= devinfo
->shared
.tcm_base_address
+
1309 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET
;
1310 address
= (u64
)devinfo
->shared
.ringupd_dmahandle
;
1311 brcmf_pcie_write_tcm32(devinfo
, addr
, address
& 0xffffffff);
1312 brcmf_pcie_write_tcm32(devinfo
, addr
+ 4, address
>> 32);
1313 addr
= devinfo
->shared
.tcm_base_address
+
1314 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET
;
1315 brcmf_pcie_write_tcm32(devinfo
, addr
, BRCMF_DMA_D2H_RINGUPD_BUF_LEN
);
1319 brcmf_err("Allocating scratch buffers failed\n");
1320 brcmf_pcie_release_scratchbuffers(devinfo
);
1325 static void brcmf_pcie_down(struct device
*dev
)
1330 static int brcmf_pcie_tx(struct device
*dev
, struct sk_buff
*skb
)
1336 static int brcmf_pcie_tx_ctlpkt(struct device
*dev
, unsigned char *msg
,
1343 static int brcmf_pcie_rx_ctlpkt(struct device
*dev
, unsigned char *msg
,
1350 static void brcmf_pcie_wowl_config(struct device
*dev
, bool enabled
)
1352 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1353 struct brcmf_pciedev
*buspub
= bus_if
->bus_priv
.pcie
;
1354 struct brcmf_pciedev_info
*devinfo
= buspub
->devinfo
;
1356 brcmf_dbg(PCIE
, "Configuring WOWL, enabled=%d\n", enabled
);
1357 devinfo
->wowl_enabled
= enabled
;
1361 static size_t brcmf_pcie_get_ramsize(struct device
*dev
)
1363 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1364 struct brcmf_pciedev
*buspub
= bus_if
->bus_priv
.pcie
;
1365 struct brcmf_pciedev_info
*devinfo
= buspub
->devinfo
;
1367 return devinfo
->ci
->ramsize
- devinfo
->ci
->srsize
;
1371 static int brcmf_pcie_get_memdump(struct device
*dev
, void *data
, size_t len
)
1373 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1374 struct brcmf_pciedev
*buspub
= bus_if
->bus_priv
.pcie
;
1375 struct brcmf_pciedev_info
*devinfo
= buspub
->devinfo
;
1377 brcmf_dbg(PCIE
, "dump at 0x%08X: len=%zu\n", devinfo
->ci
->rambase
, len
);
1378 brcmf_pcie_copy_dev_tomem(devinfo
, devinfo
->ci
->rambase
, data
, len
);
1383 int brcmf_pcie_get_fwname(struct device
*dev
, const char *ext
, u8
*fw_name
)
1385 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
1386 struct brcmf_fw_request
*fwreq
;
1387 struct brcmf_fw_name fwnames
[] = {
1391 fwreq
= brcmf_fw_alloc_request(bus_if
->chip
, bus_if
->chiprev
,
1393 ARRAY_SIZE(brcmf_pcie_fwnames
),
1394 fwnames
, ARRAY_SIZE(fwnames
));
1402 static const struct brcmf_bus_ops brcmf_pcie_bus_ops
= {
1403 .txdata
= brcmf_pcie_tx
,
1404 .stop
= brcmf_pcie_down
,
1405 .txctl
= brcmf_pcie_tx_ctlpkt
,
1406 .rxctl
= brcmf_pcie_rx_ctlpkt
,
1407 .wowl_config
= brcmf_pcie_wowl_config
,
1408 .get_ramsize
= brcmf_pcie_get_ramsize
,
1409 .get_memdump
= brcmf_pcie_get_memdump
,
1410 .get_fwname
= brcmf_pcie_get_fwname
,
1415 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info
*devinfo
, u8
*data
,
1421 if (data_len
< BRCMF_RAMSIZE_OFFSET
+ 8)
1424 field
= (__le32
*)&data
[BRCMF_RAMSIZE_OFFSET
];
1425 if (le32_to_cpup(field
) != BRCMF_RAMSIZE_MAGIC
)
1428 newsize
= le32_to_cpup(field
);
1430 brcmf_dbg(PCIE
, "Found ramsize info in FW, adjusting to 0x%x\n",
1432 devinfo
->ci
->ramsize
= newsize
;
1437 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info
*devinfo
,
1440 struct brcmf_pcie_shared_info
*shared
;
1443 shared
= &devinfo
->shared
;
1444 shared
->tcm_base_address
= sharedram_addr
;
1446 shared
->flags
= brcmf_pcie_read_tcm32(devinfo
, sharedram_addr
);
1447 shared
->version
= (u8
)(shared
->flags
& BRCMF_PCIE_SHARED_VERSION_MASK
);
1448 brcmf_dbg(PCIE
, "PCIe protocol version %d\n", shared
->version
);
1449 if ((shared
->version
> BRCMF_PCIE_MAX_SHARED_VERSION
) ||
1450 (shared
->version
< BRCMF_PCIE_MIN_SHARED_VERSION
)) {
1451 brcmf_err("Unsupported PCIE version %d\n", shared
->version
);
1455 /* check firmware support dma indicies */
1456 if (shared
->flags
& BRCMF_PCIE_SHARED_DMA_INDEX
) {
1457 if (shared
->flags
& BRCMF_PCIE_SHARED_DMA_2B_IDX
)
1458 devinfo
->dma_idx_sz
= sizeof(u16
);
1460 devinfo
->dma_idx_sz
= sizeof(u32
);
1463 addr
= sharedram_addr
+ BRCMF_SHARED_MAX_RXBUFPOST_OFFSET
;
1464 shared
->max_rxbufpost
= brcmf_pcie_read_tcm16(devinfo
, addr
);
1465 if (shared
->max_rxbufpost
== 0)
1466 shared
->max_rxbufpost
= BRCMF_DEF_MAX_RXBUFPOST
;
1468 addr
= sharedram_addr
+ BRCMF_SHARED_RX_DATAOFFSET_OFFSET
;
1469 shared
->rx_dataoffset
= brcmf_pcie_read_tcm32(devinfo
, addr
);
1471 addr
= sharedram_addr
+ BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET
;
1472 shared
->htod_mb_data_addr
= brcmf_pcie_read_tcm32(devinfo
, addr
);
1474 addr
= sharedram_addr
+ BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET
;
1475 shared
->dtoh_mb_data_addr
= brcmf_pcie_read_tcm32(devinfo
, addr
);
1477 addr
= sharedram_addr
+ BRCMF_SHARED_RING_INFO_ADDR_OFFSET
;
1478 shared
->ring_info_addr
= brcmf_pcie_read_tcm32(devinfo
, addr
);
1480 brcmf_dbg(PCIE
, "max rx buf post %d, rx dataoffset %d\n",
1481 shared
->max_rxbufpost
, shared
->rx_dataoffset
);
1483 brcmf_pcie_bus_console_init(devinfo
);
1489 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info
*devinfo
,
1490 const struct firmware
*fw
, void *nvram
,
1494 u32 sharedram_addr_written
;
1500 brcmf_dbg(PCIE
, "Halt ARM.\n");
1501 err
= brcmf_pcie_enter_download_state(devinfo
);
1505 brcmf_dbg(PCIE
, "Download FW %s\n", devinfo
->fw_name
);
1506 brcmf_pcie_copy_mem_todev(devinfo
, devinfo
->ci
->rambase
,
1507 (void *)fw
->data
, fw
->size
);
1509 resetintr
= get_unaligned_le32(fw
->data
);
1510 release_firmware(fw
);
1512 /* reset last 4 bytes of RAM address. to be used for shared
1513 * area. This identifies when FW is running
1515 brcmf_pcie_write_ram32(devinfo
, devinfo
->ci
->ramsize
- 4, 0);
1518 brcmf_dbg(PCIE
, "Download NVRAM %s\n", devinfo
->nvram_name
);
1519 address
= devinfo
->ci
->rambase
+ devinfo
->ci
->ramsize
-
1521 brcmf_pcie_copy_mem_todev(devinfo
, address
, nvram
, nvram_len
);
1522 brcmf_fw_nvram_free(nvram
);
1524 brcmf_dbg(PCIE
, "No matching NVRAM file found %s\n",
1525 devinfo
->nvram_name
);
1528 sharedram_addr_written
= brcmf_pcie_read_ram32(devinfo
,
1529 devinfo
->ci
->ramsize
-
1531 brcmf_dbg(PCIE
, "Bring ARM in running state\n");
1532 err
= brcmf_pcie_exit_download_state(devinfo
, resetintr
);
1536 brcmf_dbg(PCIE
, "Wait for FW init\n");
1537 sharedram_addr
= sharedram_addr_written
;
1538 loop_counter
= BRCMF_PCIE_FW_UP_TIMEOUT
/ 50;
1539 while ((sharedram_addr
== sharedram_addr_written
) && (loop_counter
)) {
1541 sharedram_addr
= brcmf_pcie_read_ram32(devinfo
,
1542 devinfo
->ci
->ramsize
-
1546 if (sharedram_addr
== sharedram_addr_written
) {
1547 brcmf_err("FW failed to initialize\n");
1550 brcmf_dbg(PCIE
, "Shared RAM addr: 0x%08x\n", sharedram_addr
);
1552 return (brcmf_pcie_init_share_ram_info(devinfo
, sharedram_addr
));
1556 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info
*devinfo
)
1558 struct pci_dev
*pdev
;
1560 phys_addr_t bar0_addr
, bar1_addr
;
1563 pdev
= devinfo
->pdev
;
1565 err
= pci_enable_device(pdev
);
1567 brcmf_err("pci_enable_device failed err=%d\n", err
);
1571 pci_set_master(pdev
);
1573 /* Bar-0 mapped address */
1574 bar0_addr
= pci_resource_start(pdev
, 0);
1575 /* Bar-1 mapped address */
1576 bar1_addr
= pci_resource_start(pdev
, 2);
1577 /* read Bar-1 mapped memory range */
1578 bar1_size
= pci_resource_len(pdev
, 2);
1579 if ((bar1_size
== 0) || (bar1_addr
== 0)) {
1580 brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1581 bar1_size
, (unsigned long long)bar1_addr
);
1585 devinfo
->regs
= ioremap_nocache(bar0_addr
, BRCMF_PCIE_REG_MAP_SIZE
);
1586 devinfo
->tcm
= ioremap_nocache(bar1_addr
, bar1_size
);
1588 if (!devinfo
->regs
|| !devinfo
->tcm
) {
1589 brcmf_err("ioremap() failed (%p,%p)\n", devinfo
->regs
,
1593 brcmf_dbg(PCIE
, "Phys addr : reg space = %p base addr %#016llx\n",
1594 devinfo
->regs
, (unsigned long long)bar0_addr
);
1595 brcmf_dbg(PCIE
, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1596 devinfo
->tcm
, (unsigned long long)bar1_addr
,
1597 (unsigned int)bar1_size
);
1603 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info
*devinfo
)
1606 iounmap(devinfo
->tcm
);
1608 iounmap(devinfo
->regs
);
1610 pci_disable_device(devinfo
->pdev
);
1614 static u32
brcmf_pcie_buscore_prep_addr(const struct pci_dev
*pdev
, u32 addr
)
1618 ret_addr
= addr
& (BRCMF_PCIE_BAR0_REG_SIZE
- 1);
1619 addr
&= ~(BRCMF_PCIE_BAR0_REG_SIZE
- 1);
1620 pci_write_config_dword(pdev
, BRCMF_PCIE_BAR0_WINDOW
, addr
);
1626 static u32
brcmf_pcie_buscore_read32(void *ctx
, u32 addr
)
1628 struct brcmf_pciedev_info
*devinfo
= (struct brcmf_pciedev_info
*)ctx
;
1630 addr
= brcmf_pcie_buscore_prep_addr(devinfo
->pdev
, addr
);
1631 return brcmf_pcie_read_reg32(devinfo
, addr
);
1635 static void brcmf_pcie_buscore_write32(void *ctx
, u32 addr
, u32 value
)
1637 struct brcmf_pciedev_info
*devinfo
= (struct brcmf_pciedev_info
*)ctx
;
1639 addr
= brcmf_pcie_buscore_prep_addr(devinfo
->pdev
, addr
);
1640 brcmf_pcie_write_reg32(devinfo
, addr
, value
);
1644 static int brcmf_pcie_buscoreprep(void *ctx
)
1646 return brcmf_pcie_get_resource(ctx
);
1650 static int brcmf_pcie_buscore_reset(void *ctx
, struct brcmf_chip
*chip
)
1652 struct brcmf_pciedev_info
*devinfo
= (struct brcmf_pciedev_info
*)ctx
;
1656 brcmf_pcie_reset_device(devinfo
);
1658 val
= brcmf_pcie_read_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
);
1659 if (val
!= 0xffffffff)
1660 brcmf_pcie_write_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_MAILBOXINT
,
1667 static void brcmf_pcie_buscore_activate(void *ctx
, struct brcmf_chip
*chip
,
1670 struct brcmf_pciedev_info
*devinfo
= (struct brcmf_pciedev_info
*)ctx
;
1672 brcmf_pcie_write_tcm32(devinfo
, 0, rstvec
);
1676 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops
= {
1677 .prepare
= brcmf_pcie_buscoreprep
,
1678 .reset
= brcmf_pcie_buscore_reset
,
1679 .activate
= brcmf_pcie_buscore_activate
,
1680 .read32
= brcmf_pcie_buscore_read32
,
1681 .write32
= brcmf_pcie_buscore_write32
,
1684 #define BRCMF_PCIE_FW_CODE 0
1685 #define BRCMF_PCIE_FW_NVRAM 1
1687 static void brcmf_pcie_setup(struct device
*dev
, int ret
,
1688 struct brcmf_fw_request
*fwreq
)
1690 const struct firmware
*fw
;
1692 struct brcmf_bus
*bus
;
1693 struct brcmf_pciedev
*pcie_bus_dev
;
1694 struct brcmf_pciedev_info
*devinfo
;
1695 struct brcmf_commonring
**flowrings
;
1698 /* check firmware loading result */
1702 bus
= dev_get_drvdata(dev
);
1703 pcie_bus_dev
= bus
->bus_priv
.pcie
;
1704 devinfo
= pcie_bus_dev
->devinfo
;
1705 brcmf_pcie_attach(devinfo
);
1707 fw
= fwreq
->items
[BRCMF_PCIE_FW_CODE
].binary
;
1708 nvram
= fwreq
->items
[BRCMF_PCIE_FW_NVRAM
].nv_data
.data
;
1709 nvram_len
= fwreq
->items
[BRCMF_PCIE_FW_NVRAM
].nv_data
.len
;
1712 /* Some of the firmwares have the size of the memory of the device
1713 * defined inside the firmware. This is because part of the memory in
1714 * the device is shared and the devision is determined by FW. Parse
1715 * the firmware and adjust the chip memory size now.
1717 brcmf_pcie_adjust_ramsize(devinfo
, (u8
*)fw
->data
, fw
->size
);
1719 ret
= brcmf_pcie_download_fw_nvram(devinfo
, fw
, nvram
, nvram_len
);
1723 devinfo
->state
= BRCMFMAC_PCIE_STATE_UP
;
1725 ret
= brcmf_pcie_init_ringbuffers(devinfo
);
1729 ret
= brcmf_pcie_init_scratchbuffers(devinfo
);
1733 brcmf_pcie_select_core(devinfo
, BCMA_CORE_PCIE2
);
1734 ret
= brcmf_pcie_request_irq(devinfo
);
1738 /* hook the commonrings in the bus structure. */
1739 for (i
= 0; i
< BRCMF_NROF_COMMON_MSGRINGS
; i
++)
1740 bus
->msgbuf
->commonrings
[i
] =
1741 &devinfo
->shared
.commonrings
[i
]->commonring
;
1743 flowrings
= kcalloc(devinfo
->shared
.max_flowrings
, sizeof(*flowrings
),
1748 for (i
= 0; i
< devinfo
->shared
.max_flowrings
; i
++)
1749 flowrings
[i
] = &devinfo
->shared
.flowrings
[i
].commonring
;
1750 bus
->msgbuf
->flowrings
= flowrings
;
1752 bus
->msgbuf
->rx_dataoffset
= devinfo
->shared
.rx_dataoffset
;
1753 bus
->msgbuf
->max_rxbufpost
= devinfo
->shared
.max_rxbufpost
;
1754 bus
->msgbuf
->max_flowrings
= devinfo
->shared
.max_flowrings
;
1756 init_waitqueue_head(&devinfo
->mbdata_resp_wait
);
1758 brcmf_pcie_intr_enable(devinfo
);
1759 brcmf_pcie_hostready(devinfo
);
1760 if (brcmf_attach(&devinfo
->pdev
->dev
, devinfo
->settings
) == 0)
1763 brcmf_pcie_bus_console_read(devinfo
);
1766 device_release_driver(dev
);
1769 static struct brcmf_fw_request
*
1770 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info
*devinfo
)
1772 struct brcmf_fw_request
*fwreq
;
1773 struct brcmf_fw_name fwnames
[] = {
1774 { ".bin", devinfo
->fw_name
},
1775 { ".txt", devinfo
->nvram_name
},
1778 fwreq
= brcmf_fw_alloc_request(devinfo
->ci
->chip
, devinfo
->ci
->chiprev
,
1780 ARRAY_SIZE(brcmf_pcie_fwnames
),
1781 fwnames
, ARRAY_SIZE(fwnames
));
1785 fwreq
->items
[BRCMF_PCIE_FW_CODE
].type
= BRCMF_FW_TYPE_BINARY
;
1786 fwreq
->items
[BRCMF_PCIE_FW_NVRAM
].type
= BRCMF_FW_TYPE_NVRAM
;
1787 fwreq
->items
[BRCMF_PCIE_FW_NVRAM
].flags
= BRCMF_FW_REQF_OPTIONAL
;
1788 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1789 fwreq
->domain_nr
= pci_domain_nr(devinfo
->pdev
->bus
) + 1;
1790 fwreq
->bus_nr
= devinfo
->pdev
->bus
->number
;
1796 brcmf_pcie_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1799 struct brcmf_fw_request
*fwreq
;
1800 struct brcmf_pciedev_info
*devinfo
;
1801 struct brcmf_pciedev
*pcie_bus_dev
;
1802 struct brcmf_bus
*bus
;
1804 brcmf_dbg(PCIE
, "Enter %x:%x\n", pdev
->vendor
, pdev
->device
);
1807 devinfo
= kzalloc(sizeof(*devinfo
), GFP_KERNEL
);
1808 if (devinfo
== NULL
)
1811 devinfo
->pdev
= pdev
;
1812 pcie_bus_dev
= NULL
;
1813 devinfo
->ci
= brcmf_chip_attach(devinfo
, &brcmf_pcie_buscore_ops
);
1814 if (IS_ERR(devinfo
->ci
)) {
1815 ret
= PTR_ERR(devinfo
->ci
);
1820 pcie_bus_dev
= kzalloc(sizeof(*pcie_bus_dev
), GFP_KERNEL
);
1821 if (pcie_bus_dev
== NULL
) {
1826 devinfo
->settings
= brcmf_get_module_param(&devinfo
->pdev
->dev
,
1829 devinfo
->ci
->chiprev
);
1830 if (!devinfo
->settings
) {
1835 bus
= kzalloc(sizeof(*bus
), GFP_KERNEL
);
1840 bus
->msgbuf
= kzalloc(sizeof(*bus
->msgbuf
), GFP_KERNEL
);
1847 /* hook it all together. */
1848 pcie_bus_dev
->devinfo
= devinfo
;
1849 pcie_bus_dev
->bus
= bus
;
1850 bus
->dev
= &pdev
->dev
;
1851 bus
->bus_priv
.pcie
= pcie_bus_dev
;
1852 bus
->ops
= &brcmf_pcie_bus_ops
;
1853 bus
->proto_type
= BRCMF_PROTO_MSGBUF
;
1854 bus
->chip
= devinfo
->coreid
;
1855 bus
->wowl_supported
= pci_pme_capable(pdev
, PCI_D3hot
);
1856 dev_set_drvdata(&pdev
->dev
, bus
);
1858 fwreq
= brcmf_pcie_prepare_fw_request(devinfo
);
1864 ret
= brcmf_fw_get_firmwares(bus
->dev
, fwreq
, brcmf_pcie_setup
);
1875 brcmf_err("failed %x:%x\n", pdev
->vendor
, pdev
->device
);
1876 brcmf_pcie_release_resource(devinfo
);
1878 brcmf_chip_detach(devinfo
->ci
);
1879 if (devinfo
->settings
)
1880 brcmf_release_module_param(devinfo
->settings
);
1881 kfree(pcie_bus_dev
);
1888 brcmf_pcie_remove(struct pci_dev
*pdev
)
1890 struct brcmf_pciedev_info
*devinfo
;
1891 struct brcmf_bus
*bus
;
1893 brcmf_dbg(PCIE
, "Enter\n");
1895 bus
= dev_get_drvdata(&pdev
->dev
);
1899 devinfo
= bus
->bus_priv
.pcie
->devinfo
;
1901 devinfo
->state
= BRCMFMAC_PCIE_STATE_DOWN
;
1903 brcmf_pcie_intr_disable(devinfo
);
1905 brcmf_detach(&pdev
->dev
);
1907 kfree(bus
->bus_priv
.pcie
);
1908 kfree(bus
->msgbuf
->flowrings
);
1912 brcmf_pcie_release_irq(devinfo
);
1913 brcmf_pcie_release_scratchbuffers(devinfo
);
1914 brcmf_pcie_release_ringbuffers(devinfo
);
1915 brcmf_pcie_reset_device(devinfo
);
1916 brcmf_pcie_release_resource(devinfo
);
1919 brcmf_chip_detach(devinfo
->ci
);
1920 if (devinfo
->settings
)
1921 brcmf_release_module_param(devinfo
->settings
);
1924 dev_set_drvdata(&pdev
->dev
, NULL
);
1931 static int brcmf_pcie_pm_enter_D3(struct device
*dev
)
1933 struct brcmf_pciedev_info
*devinfo
;
1934 struct brcmf_bus
*bus
;
1936 brcmf_dbg(PCIE
, "Enter\n");
1938 bus
= dev_get_drvdata(dev
);
1939 devinfo
= bus
->bus_priv
.pcie
->devinfo
;
1941 brcmf_bus_change_state(bus
, BRCMF_BUS_DOWN
);
1943 devinfo
->mbdata_completed
= false;
1944 brcmf_pcie_send_mb_data(devinfo
, BRCMF_H2D_HOST_D3_INFORM
);
1946 wait_event_timeout(devinfo
->mbdata_resp_wait
, devinfo
->mbdata_completed
,
1947 BRCMF_PCIE_MBDATA_TIMEOUT
);
1948 if (!devinfo
->mbdata_completed
) {
1949 brcmf_err("Timeout on response for entering D3 substate\n");
1950 brcmf_bus_change_state(bus
, BRCMF_BUS_UP
);
1954 devinfo
->state
= BRCMFMAC_PCIE_STATE_DOWN
;
1960 static int brcmf_pcie_pm_leave_D3(struct device
*dev
)
1962 struct brcmf_pciedev_info
*devinfo
;
1963 struct brcmf_bus
*bus
;
1964 struct pci_dev
*pdev
;
1967 brcmf_dbg(PCIE
, "Enter\n");
1969 bus
= dev_get_drvdata(dev
);
1970 devinfo
= bus
->bus_priv
.pcie
->devinfo
;
1971 brcmf_dbg(PCIE
, "Enter, dev=%p, bus=%p\n", dev
, bus
);
1973 /* Check if device is still up and running, if so we are ready */
1974 if (brcmf_pcie_read_reg32(devinfo
, BRCMF_PCIE_PCIE2REG_INTMASK
) != 0) {
1975 brcmf_dbg(PCIE
, "Try to wakeup device....\n");
1976 if (brcmf_pcie_send_mb_data(devinfo
, BRCMF_H2D_HOST_D0_INFORM
))
1978 brcmf_dbg(PCIE
, "Hot resume, continue....\n");
1979 devinfo
->state
= BRCMFMAC_PCIE_STATE_UP
;
1980 brcmf_pcie_select_core(devinfo
, BCMA_CORE_PCIE2
);
1981 brcmf_bus_change_state(bus
, BRCMF_BUS_UP
);
1982 brcmf_pcie_intr_enable(devinfo
);
1983 brcmf_pcie_hostready(devinfo
);
1988 brcmf_chip_detach(devinfo
->ci
);
1990 pdev
= devinfo
->pdev
;
1991 brcmf_pcie_remove(pdev
);
1993 err
= brcmf_pcie_probe(pdev
, NULL
);
1995 brcmf_err("probe after resume failed, err=%d\n", err
);
2001 static const struct dev_pm_ops brcmf_pciedrvr_pm
= {
2002 .suspend
= brcmf_pcie_pm_enter_D3
,
2003 .resume
= brcmf_pcie_pm_leave_D3
,
2004 .freeze
= brcmf_pcie_pm_enter_D3
,
2005 .restore
= brcmf_pcie_pm_leave_D3
,
2009 #endif /* CONFIG_PM */
2012 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2013 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2014 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2015 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2016 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2018 static const struct pci_device_id brcmf_pcie_devid_table
[] = {
2019 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID
),
2020 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID
),
2021 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID
),
2022 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID
),
2023 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID
),
2024 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID
),
2025 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID
),
2026 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID
),
2027 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID
),
2028 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID
),
2029 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID
),
2030 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID
),
2031 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID
),
2032 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM
, 0x4365),
2033 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID
),
2034 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID
),
2035 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID
),
2036 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID
),
2037 { /* end: all zeroes */ }
2041 MODULE_DEVICE_TABLE(pci
, brcmf_pcie_devid_table
);
2044 static struct pci_driver brcmf_pciedrvr
= {
2046 .name
= KBUILD_MODNAME
,
2047 .id_table
= brcmf_pcie_devid_table
,
2048 .probe
= brcmf_pcie_probe
,
2049 .remove
= brcmf_pcie_remove
,
2051 .driver
.pm
= &brcmf_pciedrvr_pm
,
2053 .driver
.coredump
= brcmf_dev_coredump
,
2057 void brcmf_pcie_register(void)
2061 brcmf_dbg(PCIE
, "Enter\n");
2062 err
= pci_register_driver(&brcmf_pciedrvr
);
2064 brcmf_err("PCIE driver registration failed, err=%d\n", err
);
2068 void brcmf_pcie_exit(void)
2070 brcmf_dbg(PCIE
, "Enter\n");
2071 pci_unregister_driver(&brcmf_pciedrvr
);