brcmfmac: disable PCIe interrupts before bus reset
[linux/fpc-iii.git] / drivers / net / wireless / broadcom / brcm80211 / brcmfmac / pcie.c
blob3be60aef54650d1c139cca8fc87fb5eadc9bf119
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2014 Broadcom Corporation
4 */
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/firmware.h>
9 #include <linux/pci.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/bcma/bcma.h>
14 #include <linux/sched.h>
15 #include <asm/unaligned.h>
17 #include <soc.h>
18 #include <chipcommon.h>
19 #include <brcmu_utils.h>
20 #include <brcmu_wifi.h>
21 #include <brcm_hw_ids.h>
23 /* Custom brcmf_err() that takes bus arg and passes it further */
24 #define brcmf_err(bus, fmt, ...) \
25 do { \
26 if (IS_ENABLED(CONFIG_BRCMDBG) || \
27 IS_ENABLED(CONFIG_BRCM_TRACING) || \
28 net_ratelimit()) \
29 __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \
30 } while (0)
32 #include "debug.h"
33 #include "bus.h"
34 #include "commonring.h"
35 #include "msgbuf.h"
36 #include "pcie.h"
37 #include "firmware.h"
38 #include "chip.h"
39 #include "core.h"
40 #include "common.h"
43 enum brcmf_pcie_state {
44 BRCMFMAC_PCIE_STATE_DOWN,
45 BRCMFMAC_PCIE_STATE_UP
48 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
49 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
50 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
51 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
52 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
53 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
54 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
55 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
56 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
57 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
58 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
59 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
61 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
62 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
63 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
64 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
65 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
66 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
67 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
68 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
69 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
70 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
72 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
73 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
74 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
75 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
76 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
77 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
78 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
81 #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */
83 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
85 /* backplane addres space accessed by BAR0 */
86 #define BRCMF_PCIE_BAR0_WINDOW 0x80
87 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
88 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
90 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
91 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
93 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
94 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
96 #define BRCMF_PCIE_REG_INTSTATUS 0x90
97 #define BRCMF_PCIE_REG_INTMASK 0x94
98 #define BRCMF_PCIE_REG_SBMBX 0x98
100 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
102 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
103 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
104 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
105 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
106 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
107 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
108 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
110 #define BRCMF_PCIE2_INTA 0x01
111 #define BRCMF_PCIE2_INTB 0x02
113 #define BRCMF_PCIE_INT_0 0x01
114 #define BRCMF_PCIE_INT_1 0x02
115 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
116 BRCMF_PCIE_INT_1)
118 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
119 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
120 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
121 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
122 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
123 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
124 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
125 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
126 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
127 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
129 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
130 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
131 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
132 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
133 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
134 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
135 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
136 BRCMF_PCIE_MB_INT_D2H3_DB1)
138 #define BRCMF_PCIE_SHARED_VERSION_7 7
139 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
140 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
141 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
142 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
143 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
144 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
146 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
147 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
149 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
150 #define BRCMF_SHARED_RING_BASE_OFFSET 52
151 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
152 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
153 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
154 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
155 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
156 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
157 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
158 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
159 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
161 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
162 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
163 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
164 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
166 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
167 #define BRCMF_RING_MAX_ITEM_OFFSET 4
168 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
169 #define BRCMF_RING_MEM_SZ 16
170 #define BRCMF_RING_STATE_SZ 8
172 #define BRCMF_DEF_MAX_RXBUFPOST 255
174 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
175 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
176 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
178 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
179 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
181 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
182 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
183 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
184 #define BRCMF_D2H_DEV_FWHALT 0x10000000
186 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
187 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
188 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
189 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
191 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
193 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
194 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
195 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
196 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
197 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
198 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
199 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
200 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
201 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
202 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
203 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
204 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
205 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
207 /* Magic number at a magic location to find RAM size */
208 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
209 #define BRCMF_RAMSIZE_OFFSET 0x6c
212 struct brcmf_pcie_console {
213 u32 base_addr;
214 u32 buf_addr;
215 u32 bufsize;
216 u32 read_idx;
217 u8 log_str[256];
218 u8 log_idx;
221 struct brcmf_pcie_shared_info {
222 u32 tcm_base_address;
223 u32 flags;
224 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
225 struct brcmf_pcie_ringbuf *flowrings;
226 u16 max_rxbufpost;
227 u16 max_flowrings;
228 u16 max_submissionrings;
229 u16 max_completionrings;
230 u32 rx_dataoffset;
231 u32 htod_mb_data_addr;
232 u32 dtoh_mb_data_addr;
233 u32 ring_info_addr;
234 struct brcmf_pcie_console console;
235 void *scratch;
236 dma_addr_t scratch_dmahandle;
237 void *ringupd;
238 dma_addr_t ringupd_dmahandle;
239 u8 version;
242 struct brcmf_pcie_core_info {
243 u32 base;
244 u32 wrapbase;
247 struct brcmf_pciedev_info {
248 enum brcmf_pcie_state state;
249 bool in_irq;
250 struct pci_dev *pdev;
251 char fw_name[BRCMF_FW_NAME_LEN];
252 char nvram_name[BRCMF_FW_NAME_LEN];
253 void __iomem *regs;
254 void __iomem *tcm;
255 u32 ram_base;
256 u32 ram_size;
257 struct brcmf_chip *ci;
258 u32 coreid;
259 struct brcmf_pcie_shared_info shared;
260 wait_queue_head_t mbdata_resp_wait;
261 bool mbdata_completed;
262 bool irq_allocated;
263 bool wowl_enabled;
264 u8 dma_idx_sz;
265 void *idxbuf;
266 u32 idxbuf_sz;
267 dma_addr_t idxbuf_dmahandle;
268 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
269 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
270 u16 value);
271 struct brcmf_mp_device *settings;
274 struct brcmf_pcie_ringbuf {
275 struct brcmf_commonring commonring;
276 dma_addr_t dma_handle;
277 u32 w_idx_addr;
278 u32 r_idx_addr;
279 struct brcmf_pciedev_info *devinfo;
280 u8 id;
284 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
286 * @ringmem: dongle memory pointer to ring memory location
287 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
288 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
289 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
290 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
291 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
292 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
293 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
294 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
295 * @max_flowrings: maximum number of tx flow rings supported.
296 * @max_submissionrings: maximum number of submission rings(h2d) supported.
297 * @max_completionrings: maximum number of completion rings(d2h) supported.
299 struct brcmf_pcie_dhi_ringinfo {
300 __le32 ringmem;
301 __le32 h2d_w_idx_ptr;
302 __le32 h2d_r_idx_ptr;
303 __le32 d2h_w_idx_ptr;
304 __le32 d2h_r_idx_ptr;
305 struct msgbuf_buf_addr h2d_w_idx_hostaddr;
306 struct msgbuf_buf_addr h2d_r_idx_hostaddr;
307 struct msgbuf_buf_addr d2h_w_idx_hostaddr;
308 struct msgbuf_buf_addr d2h_r_idx_hostaddr;
309 __le16 max_flowrings;
310 __le16 max_submissionrings;
311 __le16 max_completionrings;
314 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
315 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
316 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
317 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
318 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
319 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
322 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
323 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
324 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
325 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
326 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
327 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
330 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
331 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
332 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
333 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
334 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
335 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
338 static void brcmf_pcie_setup(struct device *dev, int ret,
339 struct brcmf_fw_request *fwreq);
340 static struct brcmf_fw_request *
341 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
343 static u32
344 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
346 void __iomem *address = devinfo->regs + reg_offset;
348 return (ioread32(address));
352 static void
353 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
354 u32 value)
356 void __iomem *address = devinfo->regs + reg_offset;
358 iowrite32(value, address);
362 static u8
363 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
365 void __iomem *address = devinfo->tcm + mem_offset;
367 return (ioread8(address));
371 static u16
372 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
374 void __iomem *address = devinfo->tcm + mem_offset;
376 return (ioread16(address));
380 static void
381 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
382 u16 value)
384 void __iomem *address = devinfo->tcm + mem_offset;
386 iowrite16(value, address);
390 static u16
391 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
393 u16 *address = devinfo->idxbuf + mem_offset;
395 return (*(address));
399 static void
400 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
401 u16 value)
403 u16 *address = devinfo->idxbuf + mem_offset;
405 *(address) = value;
409 static u32
410 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
412 void __iomem *address = devinfo->tcm + mem_offset;
414 return (ioread32(address));
418 static void
419 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
420 u32 value)
422 void __iomem *address = devinfo->tcm + mem_offset;
424 iowrite32(value, address);
428 static u32
429 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
431 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
433 return (ioread32(addr));
437 static void
438 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
439 u32 value)
441 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
443 iowrite32(value, addr);
447 static void
448 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
449 void *srcaddr, u32 len)
451 void __iomem *address = devinfo->tcm + mem_offset;
452 __le32 *src32;
453 __le16 *src16;
454 u8 *src8;
456 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
457 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
458 src8 = (u8 *)srcaddr;
459 while (len) {
460 iowrite8(*src8, address);
461 address++;
462 src8++;
463 len--;
465 } else {
466 len = len / 2;
467 src16 = (__le16 *)srcaddr;
468 while (len) {
469 iowrite16(le16_to_cpu(*src16), address);
470 address += 2;
471 src16++;
472 len--;
475 } else {
476 len = len / 4;
477 src32 = (__le32 *)srcaddr;
478 while (len) {
479 iowrite32(le32_to_cpu(*src32), address);
480 address += 4;
481 src32++;
482 len--;
488 static void
489 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
490 void *dstaddr, u32 len)
492 void __iomem *address = devinfo->tcm + mem_offset;
493 __le32 *dst32;
494 __le16 *dst16;
495 u8 *dst8;
497 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
498 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
499 dst8 = (u8 *)dstaddr;
500 while (len) {
501 *dst8 = ioread8(address);
502 address++;
503 dst8++;
504 len--;
506 } else {
507 len = len / 2;
508 dst16 = (__le16 *)dstaddr;
509 while (len) {
510 *dst16 = cpu_to_le16(ioread16(address));
511 address += 2;
512 dst16++;
513 len--;
516 } else {
517 len = len / 4;
518 dst32 = (__le32 *)dstaddr;
519 while (len) {
520 *dst32 = cpu_to_le32(ioread32(address));
521 address += 4;
522 dst32++;
523 len--;
529 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
530 CHIPCREGOFFS(reg), value)
533 static void
534 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
536 const struct pci_dev *pdev = devinfo->pdev;
537 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
538 struct brcmf_core *core;
539 u32 bar0_win;
541 core = brcmf_chip_get_core(devinfo->ci, coreid);
542 if (core) {
543 bar0_win = core->base;
544 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
545 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
546 &bar0_win) == 0) {
547 if (bar0_win != core->base) {
548 bar0_win = core->base;
549 pci_write_config_dword(pdev,
550 BRCMF_PCIE_BAR0_WINDOW,
551 bar0_win);
554 } else {
555 brcmf_err(bus, "Unsupported core selected %x\n", coreid);
560 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
562 struct brcmf_core *core;
563 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
564 BRCMF_PCIE_CFGREG_PM_CSR,
565 BRCMF_PCIE_CFGREG_MSI_CAP,
566 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
567 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
568 BRCMF_PCIE_CFGREG_MSI_DATA,
569 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
570 BRCMF_PCIE_CFGREG_RBAR_CTRL,
571 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
572 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
573 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
574 u32 i;
575 u32 val;
576 u32 lsc;
578 if (!devinfo->ci)
579 return;
581 /* Disable ASPM */
582 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
583 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
584 &lsc);
585 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
586 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
587 val);
589 /* Watchdog reset */
590 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
591 WRITECC32(devinfo, watchdog, 4);
592 msleep(100);
594 /* Restore ASPM */
595 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
596 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
597 lsc);
599 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
600 if (core->rev <= 13) {
601 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
602 brcmf_pcie_write_reg32(devinfo,
603 BRCMF_PCIE_PCIE2REG_CONFIGADDR,
604 cfg_offset[i]);
605 val = brcmf_pcie_read_reg32(devinfo,
606 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
607 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
608 cfg_offset[i], val);
609 brcmf_pcie_write_reg32(devinfo,
610 BRCMF_PCIE_PCIE2REG_CONFIGDATA,
611 val);
617 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
619 u32 config;
621 /* BAR1 window may not be sized properly */
622 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
623 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
624 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
625 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
627 device_wakeup_enable(&devinfo->pdev->dev);
631 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
633 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
634 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
635 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
637 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
639 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
641 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
644 return 0;
648 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
649 u32 resetintr)
651 struct brcmf_core *core;
653 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
654 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
655 brcmf_chip_resetcore(core, 0, 0, 0);
658 if (!brcmf_chip_set_active(devinfo->ci, resetintr))
659 return -EINVAL;
660 return 0;
664 static int
665 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
667 struct brcmf_pcie_shared_info *shared;
668 struct brcmf_core *core;
669 u32 addr;
670 u32 cur_htod_mb_data;
671 u32 i;
673 shared = &devinfo->shared;
674 addr = shared->htod_mb_data_addr;
675 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
677 if (cur_htod_mb_data != 0)
678 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
679 cur_htod_mb_data);
681 i = 0;
682 while (cur_htod_mb_data != 0) {
683 msleep(10);
684 i++;
685 if (i > 100)
686 return -EIO;
687 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
690 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
691 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
693 /* Send mailbox interrupt twice as a hardware workaround */
694 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
695 if (core->rev <= 13)
696 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
698 return 0;
702 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
704 struct brcmf_pcie_shared_info *shared;
705 u32 addr;
706 u32 dtoh_mb_data;
708 shared = &devinfo->shared;
709 addr = shared->dtoh_mb_data_addr;
710 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
712 if (!dtoh_mb_data)
713 return;
715 brcmf_pcie_write_tcm32(devinfo, addr, 0);
717 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
718 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
719 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
720 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
721 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
723 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
724 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
725 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
726 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
727 devinfo->mbdata_completed = true;
728 wake_up(&devinfo->mbdata_resp_wait);
730 if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
731 brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
732 brcmf_fw_crashed(&devinfo->pdev->dev);
737 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
739 struct brcmf_pcie_shared_info *shared;
740 struct brcmf_pcie_console *console;
741 u32 addr;
743 shared = &devinfo->shared;
744 console = &shared->console;
745 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
746 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
748 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
749 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
750 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
751 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
753 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
754 console->base_addr, console->buf_addr, console->bufsize);
758 * brcmf_pcie_bus_console_read - reads firmware messages
760 * @error: specifies if error has occurred (prints messages unconditionally)
762 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
763 bool error)
765 struct pci_dev *pdev = devinfo->pdev;
766 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
767 struct brcmf_pcie_console *console;
768 u32 addr;
769 u8 ch;
770 u32 newidx;
772 if (!error && !BRCMF_FWCON_ON())
773 return;
775 console = &devinfo->shared.console;
776 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
777 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
778 while (newidx != console->read_idx) {
779 addr = console->buf_addr + console->read_idx;
780 ch = brcmf_pcie_read_tcm8(devinfo, addr);
781 console->read_idx++;
782 if (console->read_idx == console->bufsize)
783 console->read_idx = 0;
784 if (ch == '\r')
785 continue;
786 console->log_str[console->log_idx] = ch;
787 console->log_idx++;
788 if ((ch != '\n') &&
789 (console->log_idx == (sizeof(console->log_str) - 2))) {
790 ch = '\n';
791 console->log_str[console->log_idx] = ch;
792 console->log_idx++;
794 if (ch == '\n') {
795 console->log_str[console->log_idx] = 0;
796 if (error)
797 __brcmf_err(bus, __func__, "CONSOLE: %s",
798 console->log_str);
799 else
800 pr_debug("CONSOLE: %s", console->log_str);
801 console->log_idx = 0;
807 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
809 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
813 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
815 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
816 BRCMF_PCIE_MB_INT_D2H_DB |
817 BRCMF_PCIE_MB_INT_FN0_0 |
818 BRCMF_PCIE_MB_INT_FN0_1);
821 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
823 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
824 brcmf_pcie_write_reg32(devinfo,
825 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
828 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
830 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
832 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
833 brcmf_pcie_intr_disable(devinfo);
834 brcmf_dbg(PCIE, "Enter\n");
835 return IRQ_WAKE_THREAD;
837 return IRQ_NONE;
841 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
843 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
844 u32 status;
846 devinfo->in_irq = true;
847 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
848 brcmf_dbg(PCIE, "Enter %x\n", status);
849 if (status) {
850 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
851 status);
852 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
853 BRCMF_PCIE_MB_INT_FN0_1))
854 brcmf_pcie_handle_mb_data(devinfo);
855 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
856 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
857 brcmf_proto_msgbuf_rx_trigger(
858 &devinfo->pdev->dev);
861 brcmf_pcie_bus_console_read(devinfo, false);
862 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
863 brcmf_pcie_intr_enable(devinfo);
864 devinfo->in_irq = false;
865 return IRQ_HANDLED;
869 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
871 struct pci_dev *pdev = devinfo->pdev;
872 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
874 brcmf_pcie_intr_disable(devinfo);
876 brcmf_dbg(PCIE, "Enter\n");
878 pci_enable_msi(pdev);
879 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
880 brcmf_pcie_isr_thread, IRQF_SHARED,
881 "brcmf_pcie_intr", devinfo)) {
882 pci_disable_msi(pdev);
883 brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
884 return -EIO;
886 devinfo->irq_allocated = true;
887 return 0;
891 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
893 struct pci_dev *pdev = devinfo->pdev;
894 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
895 u32 status;
896 u32 count;
898 if (!devinfo->irq_allocated)
899 return;
901 brcmf_pcie_intr_disable(devinfo);
902 free_irq(pdev->irq, devinfo);
903 pci_disable_msi(pdev);
905 msleep(50);
906 count = 0;
907 while ((devinfo->in_irq) && (count < 20)) {
908 msleep(50);
909 count++;
911 if (devinfo->in_irq)
912 brcmf_err(bus, "Still in IRQ (processing) !!!\n");
914 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
915 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
917 devinfo->irq_allocated = false;
921 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
923 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
924 struct brcmf_pciedev_info *devinfo = ring->devinfo;
925 struct brcmf_commonring *commonring = &ring->commonring;
927 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
928 return -EIO;
930 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
931 commonring->w_ptr, ring->id);
933 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
935 return 0;
939 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
941 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
942 struct brcmf_pciedev_info *devinfo = ring->devinfo;
943 struct brcmf_commonring *commonring = &ring->commonring;
945 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
946 return -EIO;
948 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
949 commonring->r_ptr, ring->id);
951 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
953 return 0;
957 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
959 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
960 struct brcmf_pciedev_info *devinfo = ring->devinfo;
962 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
963 return -EIO;
965 brcmf_dbg(PCIE, "RING !\n");
966 /* Any arbitrary value will do, lets use 1 */
967 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
969 return 0;
973 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
975 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
976 struct brcmf_pciedev_info *devinfo = ring->devinfo;
977 struct brcmf_commonring *commonring = &ring->commonring;
979 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
980 return -EIO;
982 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
984 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
985 commonring->w_ptr, ring->id);
987 return 0;
991 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
993 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
994 struct brcmf_pciedev_info *devinfo = ring->devinfo;
995 struct brcmf_commonring *commonring = &ring->commonring;
997 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
998 return -EIO;
1000 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
1002 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
1003 commonring->r_ptr, ring->id);
1005 return 0;
1009 static void *
1010 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
1011 u32 size, u32 tcm_dma_phys_addr,
1012 dma_addr_t *dma_handle)
1014 void *ring;
1015 u64 address;
1017 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
1018 GFP_KERNEL);
1019 if (!ring)
1020 return NULL;
1022 address = (u64)*dma_handle;
1023 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
1024 address & 0xffffffff);
1025 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
1027 memset(ring, 0, size);
1029 return (ring);
1033 static struct brcmf_pcie_ringbuf *
1034 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1035 u32 tcm_ring_phys_addr)
1037 void *dma_buf;
1038 dma_addr_t dma_handle;
1039 struct brcmf_pcie_ringbuf *ring;
1040 u32 size;
1041 u32 addr;
1042 const u32 *ring_itemsize_array;
1044 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1045 ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1046 else
1047 ring_itemsize_array = brcmf_ring_itemsize;
1049 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1050 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1051 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1052 &dma_handle);
1053 if (!dma_buf)
1054 return NULL;
1056 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1057 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1058 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1059 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1061 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1062 if (!ring) {
1063 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1064 dma_handle);
1065 return NULL;
1067 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1068 ring_itemsize_array[ring_id], dma_buf);
1069 ring->dma_handle = dma_handle;
1070 ring->devinfo = devinfo;
1071 brcmf_commonring_register_cb(&ring->commonring,
1072 brcmf_pcie_ring_mb_ring_bell,
1073 brcmf_pcie_ring_mb_update_rptr,
1074 brcmf_pcie_ring_mb_update_wptr,
1075 brcmf_pcie_ring_mb_write_rptr,
1076 brcmf_pcie_ring_mb_write_wptr, ring);
1078 return (ring);
1082 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1083 struct brcmf_pcie_ringbuf *ring)
1085 void *dma_buf;
1086 u32 size;
1088 if (!ring)
1089 return;
1091 dma_buf = ring->commonring.buf_addr;
1092 if (dma_buf) {
1093 size = ring->commonring.depth * ring->commonring.item_len;
1094 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1096 kfree(ring);
1100 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1102 u32 i;
1104 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1105 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1106 devinfo->shared.commonrings[i]);
1107 devinfo->shared.commonrings[i] = NULL;
1109 kfree(devinfo->shared.flowrings);
1110 devinfo->shared.flowrings = NULL;
1111 if (devinfo->idxbuf) {
1112 dma_free_coherent(&devinfo->pdev->dev,
1113 devinfo->idxbuf_sz,
1114 devinfo->idxbuf,
1115 devinfo->idxbuf_dmahandle);
1116 devinfo->idxbuf = NULL;
1121 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1123 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1124 struct brcmf_pcie_ringbuf *ring;
1125 struct brcmf_pcie_ringbuf *rings;
1126 u32 d2h_w_idx_ptr;
1127 u32 d2h_r_idx_ptr;
1128 u32 h2d_w_idx_ptr;
1129 u32 h2d_r_idx_ptr;
1130 u32 ring_mem_ptr;
1131 u32 i;
1132 u64 address;
1133 u32 bufsz;
1134 u8 idx_offset;
1135 struct brcmf_pcie_dhi_ringinfo ringinfo;
1136 u16 max_flowrings;
1137 u16 max_submissionrings;
1138 u16 max_completionrings;
1140 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1141 sizeof(ringinfo));
1142 if (devinfo->shared.version >= 6) {
1143 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1144 max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1145 max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1146 } else {
1147 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1148 max_flowrings = max_submissionrings -
1149 BRCMF_NROF_H2D_COMMON_MSGRINGS;
1150 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1153 if (devinfo->dma_idx_sz != 0) {
1154 bufsz = (max_submissionrings + max_completionrings) *
1155 devinfo->dma_idx_sz * 2;
1156 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1157 &devinfo->idxbuf_dmahandle,
1158 GFP_KERNEL);
1159 if (!devinfo->idxbuf)
1160 devinfo->dma_idx_sz = 0;
1163 if (devinfo->dma_idx_sz == 0) {
1164 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1165 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1166 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1167 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1168 idx_offset = sizeof(u32);
1169 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1170 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1171 brcmf_dbg(PCIE, "Using TCM indices\n");
1172 } else {
1173 memset(devinfo->idxbuf, 0, bufsz);
1174 devinfo->idxbuf_sz = bufsz;
1175 idx_offset = devinfo->dma_idx_sz;
1176 devinfo->write_ptr = brcmf_pcie_write_idx;
1177 devinfo->read_ptr = brcmf_pcie_read_idx;
1179 h2d_w_idx_ptr = 0;
1180 address = (u64)devinfo->idxbuf_dmahandle;
1181 ringinfo.h2d_w_idx_hostaddr.low_addr =
1182 cpu_to_le32(address & 0xffffffff);
1183 ringinfo.h2d_w_idx_hostaddr.high_addr =
1184 cpu_to_le32(address >> 32);
1186 h2d_r_idx_ptr = h2d_w_idx_ptr +
1187 max_submissionrings * idx_offset;
1188 address += max_submissionrings * idx_offset;
1189 ringinfo.h2d_r_idx_hostaddr.low_addr =
1190 cpu_to_le32(address & 0xffffffff);
1191 ringinfo.h2d_r_idx_hostaddr.high_addr =
1192 cpu_to_le32(address >> 32);
1194 d2h_w_idx_ptr = h2d_r_idx_ptr +
1195 max_submissionrings * idx_offset;
1196 address += max_submissionrings * idx_offset;
1197 ringinfo.d2h_w_idx_hostaddr.low_addr =
1198 cpu_to_le32(address & 0xffffffff);
1199 ringinfo.d2h_w_idx_hostaddr.high_addr =
1200 cpu_to_le32(address >> 32);
1202 d2h_r_idx_ptr = d2h_w_idx_ptr +
1203 max_completionrings * idx_offset;
1204 address += max_completionrings * idx_offset;
1205 ringinfo.d2h_r_idx_hostaddr.low_addr =
1206 cpu_to_le32(address & 0xffffffff);
1207 ringinfo.d2h_r_idx_hostaddr.high_addr =
1208 cpu_to_le32(address >> 32);
1210 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1211 &ringinfo, sizeof(ringinfo));
1212 brcmf_dbg(PCIE, "Using host memory indices\n");
1215 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1217 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1218 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1219 if (!ring)
1220 goto fail;
1221 ring->w_idx_addr = h2d_w_idx_ptr;
1222 ring->r_idx_addr = h2d_r_idx_ptr;
1223 ring->id = i;
1224 devinfo->shared.commonrings[i] = ring;
1226 h2d_w_idx_ptr += idx_offset;
1227 h2d_r_idx_ptr += idx_offset;
1228 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1231 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1232 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1233 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1234 if (!ring)
1235 goto fail;
1236 ring->w_idx_addr = d2h_w_idx_ptr;
1237 ring->r_idx_addr = d2h_r_idx_ptr;
1238 ring->id = i;
1239 devinfo->shared.commonrings[i] = ring;
1241 d2h_w_idx_ptr += idx_offset;
1242 d2h_r_idx_ptr += idx_offset;
1243 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1246 devinfo->shared.max_flowrings = max_flowrings;
1247 devinfo->shared.max_submissionrings = max_submissionrings;
1248 devinfo->shared.max_completionrings = max_completionrings;
1249 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1250 if (!rings)
1251 goto fail;
1253 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1255 for (i = 0; i < max_flowrings; i++) {
1256 ring = &rings[i];
1257 ring->devinfo = devinfo;
1258 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1259 brcmf_commonring_register_cb(&ring->commonring,
1260 brcmf_pcie_ring_mb_ring_bell,
1261 brcmf_pcie_ring_mb_update_rptr,
1262 brcmf_pcie_ring_mb_update_wptr,
1263 brcmf_pcie_ring_mb_write_rptr,
1264 brcmf_pcie_ring_mb_write_wptr,
1265 ring);
1266 ring->w_idx_addr = h2d_w_idx_ptr;
1267 ring->r_idx_addr = h2d_r_idx_ptr;
1268 h2d_w_idx_ptr += idx_offset;
1269 h2d_r_idx_ptr += idx_offset;
1271 devinfo->shared.flowrings = rings;
1273 return 0;
1275 fail:
1276 brcmf_err(bus, "Allocating ring buffers failed\n");
1277 brcmf_pcie_release_ringbuffers(devinfo);
1278 return -ENOMEM;
1282 static void
1283 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1285 if (devinfo->shared.scratch)
1286 dma_free_coherent(&devinfo->pdev->dev,
1287 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1288 devinfo->shared.scratch,
1289 devinfo->shared.scratch_dmahandle);
1290 if (devinfo->shared.ringupd)
1291 dma_free_coherent(&devinfo->pdev->dev,
1292 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1293 devinfo->shared.ringupd,
1294 devinfo->shared.ringupd_dmahandle);
1297 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1299 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1300 u64 address;
1301 u32 addr;
1303 devinfo->shared.scratch =
1304 dma_alloc_coherent(&devinfo->pdev->dev,
1305 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1306 &devinfo->shared.scratch_dmahandle,
1307 GFP_KERNEL);
1308 if (!devinfo->shared.scratch)
1309 goto fail;
1311 addr = devinfo->shared.tcm_base_address +
1312 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1313 address = (u64)devinfo->shared.scratch_dmahandle;
1314 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1315 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1316 addr = devinfo->shared.tcm_base_address +
1317 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1318 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1320 devinfo->shared.ringupd =
1321 dma_alloc_coherent(&devinfo->pdev->dev,
1322 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1323 &devinfo->shared.ringupd_dmahandle,
1324 GFP_KERNEL);
1325 if (!devinfo->shared.ringupd)
1326 goto fail;
1328 addr = devinfo->shared.tcm_base_address +
1329 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1330 address = (u64)devinfo->shared.ringupd_dmahandle;
1331 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1332 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1333 addr = devinfo->shared.tcm_base_address +
1334 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1335 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1336 return 0;
1338 fail:
1339 brcmf_err(bus, "Allocating scratch buffers failed\n");
1340 brcmf_pcie_release_scratchbuffers(devinfo);
1341 return -ENOMEM;
1345 static void brcmf_pcie_down(struct device *dev)
1350 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1352 return 0;
1356 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1357 uint len)
1359 return 0;
1363 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1364 uint len)
1366 return 0;
1370 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1372 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1373 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1374 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1376 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1377 devinfo->wowl_enabled = enabled;
1381 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1383 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1384 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1385 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1387 return devinfo->ci->ramsize - devinfo->ci->srsize;
1391 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1393 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1394 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1395 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1397 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1398 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1399 return 0;
1402 static
1403 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1405 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1406 struct brcmf_fw_request *fwreq;
1407 struct brcmf_fw_name fwnames[] = {
1408 { ext, fw_name },
1411 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1412 brcmf_pcie_fwnames,
1413 ARRAY_SIZE(brcmf_pcie_fwnames),
1414 fwnames, ARRAY_SIZE(fwnames));
1415 if (!fwreq)
1416 return -ENOMEM;
1418 kfree(fwreq);
1419 return 0;
1422 static int brcmf_pcie_reset(struct device *dev)
1424 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1425 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1426 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1427 struct brcmf_fw_request *fwreq;
1428 int err;
1430 brcmf_pcie_intr_disable(devinfo);
1432 brcmf_pcie_bus_console_read(devinfo, true);
1434 brcmf_detach(dev);
1436 brcmf_pcie_release_irq(devinfo);
1437 brcmf_pcie_release_scratchbuffers(devinfo);
1438 brcmf_pcie_release_ringbuffers(devinfo);
1439 brcmf_pcie_reset_device(devinfo);
1441 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1442 if (!fwreq) {
1443 dev_err(dev, "Failed to prepare FW request\n");
1444 return -ENOMEM;
1447 err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
1448 if (err) {
1449 dev_err(dev, "Failed to prepare FW request\n");
1450 kfree(fwreq);
1453 return err;
1456 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1457 .txdata = brcmf_pcie_tx,
1458 .stop = brcmf_pcie_down,
1459 .txctl = brcmf_pcie_tx_ctlpkt,
1460 .rxctl = brcmf_pcie_rx_ctlpkt,
1461 .wowl_config = brcmf_pcie_wowl_config,
1462 .get_ramsize = brcmf_pcie_get_ramsize,
1463 .get_memdump = brcmf_pcie_get_memdump,
1464 .get_fwname = brcmf_pcie_get_fwname,
1465 .reset = brcmf_pcie_reset,
1469 static void
1470 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1471 u32 data_len)
1473 __le32 *field;
1474 u32 newsize;
1476 if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1477 return;
1479 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1480 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1481 return;
1482 field++;
1483 newsize = le32_to_cpup(field);
1485 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1486 newsize);
1487 devinfo->ci->ramsize = newsize;
1491 static int
1492 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1493 u32 sharedram_addr)
1495 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1496 struct brcmf_pcie_shared_info *shared;
1497 u32 addr;
1499 shared = &devinfo->shared;
1500 shared->tcm_base_address = sharedram_addr;
1502 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1503 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1504 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1505 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1506 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1507 brcmf_err(bus, "Unsupported PCIE version %d\n",
1508 shared->version);
1509 return -EINVAL;
1512 /* check firmware support dma indicies */
1513 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1514 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1515 devinfo->dma_idx_sz = sizeof(u16);
1516 else
1517 devinfo->dma_idx_sz = sizeof(u32);
1520 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1521 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1522 if (shared->max_rxbufpost == 0)
1523 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1525 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1526 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1528 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1529 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1531 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1532 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1534 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1535 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1537 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1538 shared->max_rxbufpost, shared->rx_dataoffset);
1540 brcmf_pcie_bus_console_init(devinfo);
1542 return 0;
1546 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1547 const struct firmware *fw, void *nvram,
1548 u32 nvram_len)
1550 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1551 u32 sharedram_addr;
1552 u32 sharedram_addr_written;
1553 u32 loop_counter;
1554 int err;
1555 u32 address;
1556 u32 resetintr;
1558 brcmf_dbg(PCIE, "Halt ARM.\n");
1559 err = brcmf_pcie_enter_download_state(devinfo);
1560 if (err)
1561 return err;
1563 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1564 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1565 (void *)fw->data, fw->size);
1567 resetintr = get_unaligned_le32(fw->data);
1568 release_firmware(fw);
1570 /* reset last 4 bytes of RAM address. to be used for shared
1571 * area. This identifies when FW is running
1573 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1575 if (nvram) {
1576 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1577 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1578 nvram_len;
1579 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1580 brcmf_fw_nvram_free(nvram);
1581 } else {
1582 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1583 devinfo->nvram_name);
1586 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1587 devinfo->ci->ramsize -
1589 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1590 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1591 if (err)
1592 return err;
1594 brcmf_dbg(PCIE, "Wait for FW init\n");
1595 sharedram_addr = sharedram_addr_written;
1596 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1597 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1598 msleep(50);
1599 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1600 devinfo->ci->ramsize -
1602 loop_counter--;
1604 if (sharedram_addr == sharedram_addr_written) {
1605 brcmf_err(bus, "FW failed to initialize\n");
1606 return -ENODEV;
1608 if (sharedram_addr < devinfo->ci->rambase ||
1609 sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1610 brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1611 sharedram_addr);
1612 return -ENODEV;
1614 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1616 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1620 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1622 struct pci_dev *pdev = devinfo->pdev;
1623 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1624 int err;
1625 phys_addr_t bar0_addr, bar1_addr;
1626 ulong bar1_size;
1628 err = pci_enable_device(pdev);
1629 if (err) {
1630 brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1631 return err;
1634 pci_set_master(pdev);
1636 /* Bar-0 mapped address */
1637 bar0_addr = pci_resource_start(pdev, 0);
1638 /* Bar-1 mapped address */
1639 bar1_addr = pci_resource_start(pdev, 2);
1640 /* read Bar-1 mapped memory range */
1641 bar1_size = pci_resource_len(pdev, 2);
1642 if ((bar1_size == 0) || (bar1_addr == 0)) {
1643 brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1644 bar1_size, (unsigned long long)bar1_addr);
1645 return -EINVAL;
1648 devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1649 devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size);
1651 if (!devinfo->regs || !devinfo->tcm) {
1652 brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1653 devinfo->tcm);
1654 return -EINVAL;
1656 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1657 devinfo->regs, (unsigned long long)bar0_addr);
1658 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1659 devinfo->tcm, (unsigned long long)bar1_addr,
1660 (unsigned int)bar1_size);
1662 return 0;
1666 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1668 if (devinfo->tcm)
1669 iounmap(devinfo->tcm);
1670 if (devinfo->regs)
1671 iounmap(devinfo->regs);
1673 pci_disable_device(devinfo->pdev);
1677 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1679 u32 ret_addr;
1681 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1682 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1683 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1685 return ret_addr;
1689 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1691 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1693 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1694 return brcmf_pcie_read_reg32(devinfo, addr);
1698 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1700 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1702 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1703 brcmf_pcie_write_reg32(devinfo, addr, value);
1707 static int brcmf_pcie_buscoreprep(void *ctx)
1709 return brcmf_pcie_get_resource(ctx);
1713 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1715 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1716 u32 val;
1718 devinfo->ci = chip;
1719 brcmf_pcie_reset_device(devinfo);
1721 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1722 if (val != 0xffffffff)
1723 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1724 val);
1726 return 0;
1730 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1731 u32 rstvec)
1733 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1735 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1739 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1740 .prepare = brcmf_pcie_buscoreprep,
1741 .reset = brcmf_pcie_buscore_reset,
1742 .activate = brcmf_pcie_buscore_activate,
1743 .read32 = brcmf_pcie_buscore_read32,
1744 .write32 = brcmf_pcie_buscore_write32,
1747 #define BRCMF_PCIE_FW_CODE 0
1748 #define BRCMF_PCIE_FW_NVRAM 1
1750 static void brcmf_pcie_setup(struct device *dev, int ret,
1751 struct brcmf_fw_request *fwreq)
1753 const struct firmware *fw;
1754 void *nvram;
1755 struct brcmf_bus *bus;
1756 struct brcmf_pciedev *pcie_bus_dev;
1757 struct brcmf_pciedev_info *devinfo;
1758 struct brcmf_commonring **flowrings;
1759 u32 i, nvram_len;
1761 /* check firmware loading result */
1762 if (ret)
1763 goto fail;
1765 bus = dev_get_drvdata(dev);
1766 pcie_bus_dev = bus->bus_priv.pcie;
1767 devinfo = pcie_bus_dev->devinfo;
1768 brcmf_pcie_attach(devinfo);
1770 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1771 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1772 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1773 kfree(fwreq);
1775 ret = brcmf_chip_get_raminfo(devinfo->ci);
1776 if (ret) {
1777 brcmf_err(bus, "Failed to get RAM info\n");
1778 goto fail;
1781 /* Some of the firmwares have the size of the memory of the device
1782 * defined inside the firmware. This is because part of the memory in
1783 * the device is shared and the devision is determined by FW. Parse
1784 * the firmware and adjust the chip memory size now.
1786 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1788 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1789 if (ret)
1790 goto fail;
1792 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1794 ret = brcmf_pcie_init_ringbuffers(devinfo);
1795 if (ret)
1796 goto fail;
1798 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1799 if (ret)
1800 goto fail;
1802 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1803 ret = brcmf_pcie_request_irq(devinfo);
1804 if (ret)
1805 goto fail;
1807 /* hook the commonrings in the bus structure. */
1808 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1809 bus->msgbuf->commonrings[i] =
1810 &devinfo->shared.commonrings[i]->commonring;
1812 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1813 GFP_KERNEL);
1814 if (!flowrings)
1815 goto fail;
1817 for (i = 0; i < devinfo->shared.max_flowrings; i++)
1818 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1819 bus->msgbuf->flowrings = flowrings;
1821 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1822 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1823 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1825 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1827 brcmf_pcie_intr_enable(devinfo);
1828 brcmf_pcie_hostready(devinfo);
1830 ret = brcmf_attach(&devinfo->pdev->dev);
1831 if (ret)
1832 goto fail;
1834 brcmf_pcie_bus_console_read(devinfo, false);
1836 return;
1838 fail:
1839 device_release_driver(dev);
1842 static struct brcmf_fw_request *
1843 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1845 struct brcmf_fw_request *fwreq;
1846 struct brcmf_fw_name fwnames[] = {
1847 { ".bin", devinfo->fw_name },
1848 { ".txt", devinfo->nvram_name },
1851 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1852 brcmf_pcie_fwnames,
1853 ARRAY_SIZE(brcmf_pcie_fwnames),
1854 fwnames, ARRAY_SIZE(fwnames));
1855 if (!fwreq)
1856 return NULL;
1858 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1859 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1860 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1861 fwreq->board_type = devinfo->settings->board_type;
1862 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1863 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1864 fwreq->bus_nr = devinfo->pdev->bus->number;
1866 return fwreq;
1869 static int
1870 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1872 int ret;
1873 struct brcmf_fw_request *fwreq;
1874 struct brcmf_pciedev_info *devinfo;
1875 struct brcmf_pciedev *pcie_bus_dev;
1876 struct brcmf_bus *bus;
1878 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1880 ret = -ENOMEM;
1881 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1882 if (devinfo == NULL)
1883 return ret;
1885 devinfo->pdev = pdev;
1886 pcie_bus_dev = NULL;
1887 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1888 if (IS_ERR(devinfo->ci)) {
1889 ret = PTR_ERR(devinfo->ci);
1890 devinfo->ci = NULL;
1891 goto fail;
1894 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1895 if (pcie_bus_dev == NULL) {
1896 ret = -ENOMEM;
1897 goto fail;
1900 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1901 BRCMF_BUSTYPE_PCIE,
1902 devinfo->ci->chip,
1903 devinfo->ci->chiprev);
1904 if (!devinfo->settings) {
1905 ret = -ENOMEM;
1906 goto fail;
1909 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1910 if (!bus) {
1911 ret = -ENOMEM;
1912 goto fail;
1914 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1915 if (!bus->msgbuf) {
1916 ret = -ENOMEM;
1917 kfree(bus);
1918 goto fail;
1921 /* hook it all together. */
1922 pcie_bus_dev->devinfo = devinfo;
1923 pcie_bus_dev->bus = bus;
1924 bus->dev = &pdev->dev;
1925 bus->bus_priv.pcie = pcie_bus_dev;
1926 bus->ops = &brcmf_pcie_bus_ops;
1927 bus->proto_type = BRCMF_PROTO_MSGBUF;
1928 bus->chip = devinfo->coreid;
1929 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1930 dev_set_drvdata(&pdev->dev, bus);
1932 ret = brcmf_alloc(&devinfo->pdev->dev, devinfo->settings);
1933 if (ret)
1934 goto fail_bus;
1936 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1937 if (!fwreq) {
1938 ret = -ENOMEM;
1939 goto fail_bus;
1942 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1943 if (ret < 0) {
1944 kfree(fwreq);
1945 goto fail_bus;
1947 return 0;
1949 fail_bus:
1950 kfree(bus->msgbuf);
1951 kfree(bus);
1952 fail:
1953 brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1954 brcmf_pcie_release_resource(devinfo);
1955 if (devinfo->ci)
1956 brcmf_chip_detach(devinfo->ci);
1957 if (devinfo->settings)
1958 brcmf_release_module_param(devinfo->settings);
1959 kfree(pcie_bus_dev);
1960 kfree(devinfo);
1961 return ret;
1965 static void
1966 brcmf_pcie_remove(struct pci_dev *pdev)
1968 struct brcmf_pciedev_info *devinfo;
1969 struct brcmf_bus *bus;
1971 brcmf_dbg(PCIE, "Enter\n");
1973 bus = dev_get_drvdata(&pdev->dev);
1974 if (bus == NULL)
1975 return;
1977 devinfo = bus->bus_priv.pcie->devinfo;
1979 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1980 if (devinfo->ci)
1981 brcmf_pcie_intr_disable(devinfo);
1983 brcmf_detach(&pdev->dev);
1984 brcmf_free(&pdev->dev);
1986 kfree(bus->bus_priv.pcie);
1987 kfree(bus->msgbuf->flowrings);
1988 kfree(bus->msgbuf);
1989 kfree(bus);
1991 brcmf_pcie_release_irq(devinfo);
1992 brcmf_pcie_release_scratchbuffers(devinfo);
1993 brcmf_pcie_release_ringbuffers(devinfo);
1994 brcmf_pcie_reset_device(devinfo);
1995 brcmf_pcie_release_resource(devinfo);
1997 if (devinfo->ci)
1998 brcmf_chip_detach(devinfo->ci);
1999 if (devinfo->settings)
2000 brcmf_release_module_param(devinfo->settings);
2002 kfree(devinfo);
2003 dev_set_drvdata(&pdev->dev, NULL);
2007 #ifdef CONFIG_PM
2010 static int brcmf_pcie_pm_enter_D3(struct device *dev)
2012 struct brcmf_pciedev_info *devinfo;
2013 struct brcmf_bus *bus;
2015 brcmf_dbg(PCIE, "Enter\n");
2017 bus = dev_get_drvdata(dev);
2018 devinfo = bus->bus_priv.pcie->devinfo;
2020 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
2022 devinfo->mbdata_completed = false;
2023 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
2025 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
2026 BRCMF_PCIE_MBDATA_TIMEOUT);
2027 if (!devinfo->mbdata_completed) {
2028 brcmf_err(bus, "Timeout on response for entering D3 substate\n");
2029 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2030 return -EIO;
2033 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
2035 return 0;
2039 static int brcmf_pcie_pm_leave_D3(struct device *dev)
2041 struct brcmf_pciedev_info *devinfo;
2042 struct brcmf_bus *bus;
2043 struct pci_dev *pdev;
2044 int err;
2046 brcmf_dbg(PCIE, "Enter\n");
2048 bus = dev_get_drvdata(dev);
2049 devinfo = bus->bus_priv.pcie->devinfo;
2050 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
2052 /* Check if device is still up and running, if so we are ready */
2053 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
2054 brcmf_dbg(PCIE, "Try to wakeup device....\n");
2055 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
2056 goto cleanup;
2057 brcmf_dbg(PCIE, "Hot resume, continue....\n");
2058 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
2059 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2060 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2061 brcmf_pcie_intr_enable(devinfo);
2062 brcmf_pcie_hostready(devinfo);
2063 return 0;
2066 cleanup:
2067 brcmf_chip_detach(devinfo->ci);
2068 devinfo->ci = NULL;
2069 pdev = devinfo->pdev;
2070 brcmf_pcie_remove(pdev);
2072 err = brcmf_pcie_probe(pdev, NULL);
2073 if (err)
2074 brcmf_err(bus, "probe after resume failed, err=%d\n", err);
2076 return err;
2080 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2081 .suspend = brcmf_pcie_pm_enter_D3,
2082 .resume = brcmf_pcie_pm_leave_D3,
2083 .freeze = brcmf_pcie_pm_enter_D3,
2084 .restore = brcmf_pcie_pm_leave_D3,
2088 #endif /* CONFIG_PM */
2091 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2092 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2093 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2094 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2095 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2097 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2098 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2099 BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2100 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2101 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2102 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2103 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2104 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2105 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2106 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2107 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2108 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2109 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2110 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2111 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2112 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2113 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2114 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2115 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2116 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2117 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2118 { /* end: all zeroes */ }
2122 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2125 static struct pci_driver brcmf_pciedrvr = {
2126 .node = {},
2127 .name = KBUILD_MODNAME,
2128 .id_table = brcmf_pcie_devid_table,
2129 .probe = brcmf_pcie_probe,
2130 .remove = brcmf_pcie_remove,
2131 #ifdef CONFIG_PM
2132 .driver.pm = &brcmf_pciedrvr_pm,
2133 #endif
2134 .driver.coredump = brcmf_dev_coredump,
2138 void brcmf_pcie_register(void)
2140 int err;
2142 brcmf_dbg(PCIE, "Enter\n");
2143 err = pci_register_driver(&brcmf_pciedrvr);
2144 if (err)
2145 brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
2146 err);
2150 void brcmf_pcie_exit(void)
2152 brcmf_dbg(PCIE, "Enter\n");
2153 pci_unregister_driver(&brcmf_pciedrvr);