WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / wireless / broadcom / brcm80211 / brcmfmac / pcie.c
blob45bc502fcb3413d560e4394348a10a7187d53575
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2014 Broadcom Corporation
4 */
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/firmware.h>
9 #include <linux/pci.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/bcma/bcma.h>
14 #include <linux/sched.h>
15 #include <asm/unaligned.h>
17 #include <soc.h>
18 #include <chipcommon.h>
19 #include <brcmu_utils.h>
20 #include <brcmu_wifi.h>
21 #include <brcm_hw_ids.h>
23 /* Custom brcmf_err() that takes bus arg and passes it further */
24 #define brcmf_err(bus, fmt, ...) \
25 do { \
26 if (IS_ENABLED(CONFIG_BRCMDBG) || \
27 IS_ENABLED(CONFIG_BRCM_TRACING) || \
28 net_ratelimit()) \
29 __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \
30 } while (0)
32 #include "debug.h"
33 #include "bus.h"
34 #include "commonring.h"
35 #include "msgbuf.h"
36 #include "pcie.h"
37 #include "firmware.h"
38 #include "chip.h"
39 #include "core.h"
40 #include "common.h"
43 enum brcmf_pcie_state {
44 BRCMFMAC_PCIE_STATE_DOWN,
45 BRCMFMAC_PCIE_STATE_UP
48 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
49 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
50 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
51 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
52 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
53 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
54 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
55 BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
56 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
57 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
58 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
59 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
60 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
62 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
63 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
64 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
65 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
67 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
68 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
69 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
70 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
72 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
73 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
74 BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
75 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
76 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
77 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
78 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
79 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
80 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
83 #define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
85 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
87 /* backplane addres space accessed by BAR0 */
88 #define BRCMF_PCIE_BAR0_WINDOW 0x80
89 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
90 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
92 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
93 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
95 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
96 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
98 #define BRCMF_PCIE_REG_INTSTATUS 0x90
99 #define BRCMF_PCIE_REG_INTMASK 0x94
100 #define BRCMF_PCIE_REG_SBMBX 0x98
102 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
104 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
105 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
106 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
107 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
108 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
109 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
110 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
112 #define BRCMF_PCIE2_INTA 0x01
113 #define BRCMF_PCIE2_INTB 0x02
115 #define BRCMF_PCIE_INT_0 0x01
116 #define BRCMF_PCIE_INT_1 0x02
117 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
118 BRCMF_PCIE_INT_1)
120 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
121 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
122 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
123 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
124 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
125 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
126 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
127 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
128 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
129 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
131 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
132 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
133 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
134 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
135 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
136 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
137 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
138 BRCMF_PCIE_MB_INT_D2H3_DB1)
140 #define BRCMF_PCIE_SHARED_VERSION_7 7
141 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
142 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
143 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
144 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
145 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
146 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
148 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
149 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
151 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
152 #define BRCMF_SHARED_RING_BASE_OFFSET 52
153 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
154 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
155 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
156 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
157 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
158 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
159 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
160 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
161 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
163 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
164 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
165 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
166 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
168 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
169 #define BRCMF_RING_MAX_ITEM_OFFSET 4
170 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
171 #define BRCMF_RING_MEM_SZ 16
172 #define BRCMF_RING_STATE_SZ 8
174 #define BRCMF_DEF_MAX_RXBUFPOST 255
176 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
177 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
178 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
180 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
181 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
183 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
184 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
185 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
186 #define BRCMF_D2H_DEV_FWHALT 0x10000000
188 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
189 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
190 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
191 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
193 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
195 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
196 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
197 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
198 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
199 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
200 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
201 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
202 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
203 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
204 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
205 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
206 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
207 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
209 /* Magic number at a magic location to find RAM size */
210 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
211 #define BRCMF_RAMSIZE_OFFSET 0x6c
214 struct brcmf_pcie_console {
215 u32 base_addr;
216 u32 buf_addr;
217 u32 bufsize;
218 u32 read_idx;
219 u8 log_str[256];
220 u8 log_idx;
223 struct brcmf_pcie_shared_info {
224 u32 tcm_base_address;
225 u32 flags;
226 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
227 struct brcmf_pcie_ringbuf *flowrings;
228 u16 max_rxbufpost;
229 u16 max_flowrings;
230 u16 max_submissionrings;
231 u16 max_completionrings;
232 u32 rx_dataoffset;
233 u32 htod_mb_data_addr;
234 u32 dtoh_mb_data_addr;
235 u32 ring_info_addr;
236 struct brcmf_pcie_console console;
237 void *scratch;
238 dma_addr_t scratch_dmahandle;
239 void *ringupd;
240 dma_addr_t ringupd_dmahandle;
241 u8 version;
244 struct brcmf_pcie_core_info {
245 u32 base;
246 u32 wrapbase;
249 struct brcmf_pciedev_info {
250 enum brcmf_pcie_state state;
251 bool in_irq;
252 struct pci_dev *pdev;
253 char fw_name[BRCMF_FW_NAME_LEN];
254 char nvram_name[BRCMF_FW_NAME_LEN];
255 void __iomem *regs;
256 void __iomem *tcm;
257 u32 ram_base;
258 u32 ram_size;
259 struct brcmf_chip *ci;
260 u32 coreid;
261 struct brcmf_pcie_shared_info shared;
262 wait_queue_head_t mbdata_resp_wait;
263 bool mbdata_completed;
264 bool irq_allocated;
265 bool wowl_enabled;
266 u8 dma_idx_sz;
267 void *idxbuf;
268 u32 idxbuf_sz;
269 dma_addr_t idxbuf_dmahandle;
270 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
271 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
272 u16 value);
273 struct brcmf_mp_device *settings;
276 struct brcmf_pcie_ringbuf {
277 struct brcmf_commonring commonring;
278 dma_addr_t dma_handle;
279 u32 w_idx_addr;
280 u32 r_idx_addr;
281 struct brcmf_pciedev_info *devinfo;
282 u8 id;
286 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
288 * @ringmem: dongle memory pointer to ring memory location
289 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
290 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
291 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
292 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
293 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
294 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
295 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
296 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
297 * @max_flowrings: maximum number of tx flow rings supported.
298 * @max_submissionrings: maximum number of submission rings(h2d) supported.
299 * @max_completionrings: maximum number of completion rings(d2h) supported.
301 struct brcmf_pcie_dhi_ringinfo {
302 __le32 ringmem;
303 __le32 h2d_w_idx_ptr;
304 __le32 h2d_r_idx_ptr;
305 __le32 d2h_w_idx_ptr;
306 __le32 d2h_r_idx_ptr;
307 struct msgbuf_buf_addr h2d_w_idx_hostaddr;
308 struct msgbuf_buf_addr h2d_r_idx_hostaddr;
309 struct msgbuf_buf_addr d2h_w_idx_hostaddr;
310 struct msgbuf_buf_addr d2h_r_idx_hostaddr;
311 __le16 max_flowrings;
312 __le16 max_submissionrings;
313 __le16 max_completionrings;
316 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
317 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
318 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
319 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
320 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
321 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
324 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
325 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
326 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
327 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
328 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
329 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
332 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
333 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
334 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
335 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
336 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
337 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
340 static void brcmf_pcie_setup(struct device *dev, int ret,
341 struct brcmf_fw_request *fwreq);
342 static struct brcmf_fw_request *
343 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
345 static u32
346 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
348 void __iomem *address = devinfo->regs + reg_offset;
350 return (ioread32(address));
354 static void
355 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
356 u32 value)
358 void __iomem *address = devinfo->regs + reg_offset;
360 iowrite32(value, address);
364 static u8
365 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
367 void __iomem *address = devinfo->tcm + mem_offset;
369 return (ioread8(address));
373 static u16
374 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
376 void __iomem *address = devinfo->tcm + mem_offset;
378 return (ioread16(address));
382 static void
383 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
384 u16 value)
386 void __iomem *address = devinfo->tcm + mem_offset;
388 iowrite16(value, address);
392 static u16
393 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
395 u16 *address = devinfo->idxbuf + mem_offset;
397 return (*(address));
401 static void
402 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
403 u16 value)
405 u16 *address = devinfo->idxbuf + mem_offset;
407 *(address) = value;
411 static u32
412 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
414 void __iomem *address = devinfo->tcm + mem_offset;
416 return (ioread32(address));
420 static void
421 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
422 u32 value)
424 void __iomem *address = devinfo->tcm + mem_offset;
426 iowrite32(value, address);
430 static u32
431 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
433 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
435 return (ioread32(addr));
439 static void
440 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
441 u32 value)
443 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
445 iowrite32(value, addr);
449 static void
450 brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
451 void *srcaddr, u32 len)
453 void __iomem *address = devinfo->tcm + mem_offset;
454 __le32 *src32;
455 __le16 *src16;
456 u8 *src8;
458 if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
459 if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
460 src8 = (u8 *)srcaddr;
461 while (len) {
462 iowrite8(*src8, address);
463 address++;
464 src8++;
465 len--;
467 } else {
468 len = len / 2;
469 src16 = (__le16 *)srcaddr;
470 while (len) {
471 iowrite16(le16_to_cpu(*src16), address);
472 address += 2;
473 src16++;
474 len--;
477 } else {
478 len = len / 4;
479 src32 = (__le32 *)srcaddr;
480 while (len) {
481 iowrite32(le32_to_cpu(*src32), address);
482 address += 4;
483 src32++;
484 len--;
490 static void
491 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
492 void *dstaddr, u32 len)
494 void __iomem *address = devinfo->tcm + mem_offset;
495 __le32 *dst32;
496 __le16 *dst16;
497 u8 *dst8;
499 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
500 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
501 dst8 = (u8 *)dstaddr;
502 while (len) {
503 *dst8 = ioread8(address);
504 address++;
505 dst8++;
506 len--;
508 } else {
509 len = len / 2;
510 dst16 = (__le16 *)dstaddr;
511 while (len) {
512 *dst16 = cpu_to_le16(ioread16(address));
513 address += 2;
514 dst16++;
515 len--;
518 } else {
519 len = len / 4;
520 dst32 = (__le32 *)dstaddr;
521 while (len) {
522 *dst32 = cpu_to_le32(ioread32(address));
523 address += 4;
524 dst32++;
525 len--;
531 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
532 CHIPCREGOFFS(reg), value)
535 static void
536 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
538 const struct pci_dev *pdev = devinfo->pdev;
539 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
540 struct brcmf_core *core;
541 u32 bar0_win;
543 core = brcmf_chip_get_core(devinfo->ci, coreid);
544 if (core) {
545 bar0_win = core->base;
546 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
547 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
548 &bar0_win) == 0) {
549 if (bar0_win != core->base) {
550 bar0_win = core->base;
551 pci_write_config_dword(pdev,
552 BRCMF_PCIE_BAR0_WINDOW,
553 bar0_win);
556 } else {
557 brcmf_err(bus, "Unsupported core selected %x\n", coreid);
562 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
564 struct brcmf_core *core;
565 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
566 BRCMF_PCIE_CFGREG_PM_CSR,
567 BRCMF_PCIE_CFGREG_MSI_CAP,
568 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
569 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
570 BRCMF_PCIE_CFGREG_MSI_DATA,
571 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
572 BRCMF_PCIE_CFGREG_RBAR_CTRL,
573 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
574 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
575 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
576 u32 i;
577 u32 val;
578 u32 lsc;
580 if (!devinfo->ci)
581 return;
583 /* Disable ASPM */
584 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
585 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
586 &lsc);
587 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
588 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
589 val);
591 /* Watchdog reset */
592 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
593 WRITECC32(devinfo, watchdog, 4);
594 msleep(100);
596 /* Restore ASPM */
597 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
598 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
599 lsc);
601 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
602 if (core->rev <= 13) {
603 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
604 brcmf_pcie_write_reg32(devinfo,
605 BRCMF_PCIE_PCIE2REG_CONFIGADDR,
606 cfg_offset[i]);
607 val = brcmf_pcie_read_reg32(devinfo,
608 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
609 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
610 cfg_offset[i], val);
611 brcmf_pcie_write_reg32(devinfo,
612 BRCMF_PCIE_PCIE2REG_CONFIGDATA,
613 val);
619 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
621 u32 config;
623 /* BAR1 window may not be sized properly */
624 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
625 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
626 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
627 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
629 device_wakeup_enable(&devinfo->pdev->dev);
633 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
635 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
636 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
637 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
639 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
641 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
643 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
646 return 0;
650 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
651 u32 resetintr)
653 struct brcmf_core *core;
655 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
656 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
657 brcmf_chip_resetcore(core, 0, 0, 0);
660 if (!brcmf_chip_set_active(devinfo->ci, resetintr))
661 return -EINVAL;
662 return 0;
666 static int
667 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
669 struct brcmf_pcie_shared_info *shared;
670 struct brcmf_core *core;
671 u32 addr;
672 u32 cur_htod_mb_data;
673 u32 i;
675 shared = &devinfo->shared;
676 addr = shared->htod_mb_data_addr;
677 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
679 if (cur_htod_mb_data != 0)
680 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
681 cur_htod_mb_data);
683 i = 0;
684 while (cur_htod_mb_data != 0) {
685 msleep(10);
686 i++;
687 if (i > 100)
688 return -EIO;
689 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
692 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
693 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
695 /* Send mailbox interrupt twice as a hardware workaround */
696 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
697 if (core->rev <= 13)
698 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
700 return 0;
704 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
706 struct brcmf_pcie_shared_info *shared;
707 u32 addr;
708 u32 dtoh_mb_data;
710 shared = &devinfo->shared;
711 addr = shared->dtoh_mb_data_addr;
712 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
714 if (!dtoh_mb_data)
715 return;
717 brcmf_pcie_write_tcm32(devinfo, addr, 0);
719 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
720 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
721 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
722 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
723 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
725 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
726 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
727 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
728 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
729 devinfo->mbdata_completed = true;
730 wake_up(&devinfo->mbdata_resp_wait);
732 if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
733 brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
734 brcmf_fw_crashed(&devinfo->pdev->dev);
739 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
741 struct brcmf_pcie_shared_info *shared;
742 struct brcmf_pcie_console *console;
743 u32 addr;
745 shared = &devinfo->shared;
746 console = &shared->console;
747 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
748 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
750 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
751 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
752 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
753 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
755 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
756 console->base_addr, console->buf_addr, console->bufsize);
760 * brcmf_pcie_bus_console_read - reads firmware messages
762 * @devinfo: pointer to the device data structure
763 * @error: specifies if error has occurred (prints messages unconditionally)
765 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
766 bool error)
768 struct pci_dev *pdev = devinfo->pdev;
769 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
770 struct brcmf_pcie_console *console;
771 u32 addr;
772 u8 ch;
773 u32 newidx;
775 if (!error && !BRCMF_FWCON_ON())
776 return;
778 console = &devinfo->shared.console;
779 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
780 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
781 while (newidx != console->read_idx) {
782 addr = console->buf_addr + console->read_idx;
783 ch = brcmf_pcie_read_tcm8(devinfo, addr);
784 console->read_idx++;
785 if (console->read_idx == console->bufsize)
786 console->read_idx = 0;
787 if (ch == '\r')
788 continue;
789 console->log_str[console->log_idx] = ch;
790 console->log_idx++;
791 if ((ch != '\n') &&
792 (console->log_idx == (sizeof(console->log_str) - 2))) {
793 ch = '\n';
794 console->log_str[console->log_idx] = ch;
795 console->log_idx++;
797 if (ch == '\n') {
798 console->log_str[console->log_idx] = 0;
799 if (error)
800 __brcmf_err(bus, __func__, "CONSOLE: %s",
801 console->log_str);
802 else
803 pr_debug("CONSOLE: %s", console->log_str);
804 console->log_idx = 0;
810 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
812 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
816 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
818 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
819 BRCMF_PCIE_MB_INT_D2H_DB |
820 BRCMF_PCIE_MB_INT_FN0_0 |
821 BRCMF_PCIE_MB_INT_FN0_1);
824 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
826 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
827 brcmf_pcie_write_reg32(devinfo,
828 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
831 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
833 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
835 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
836 brcmf_pcie_intr_disable(devinfo);
837 brcmf_dbg(PCIE, "Enter\n");
838 return IRQ_WAKE_THREAD;
840 return IRQ_NONE;
844 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
846 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
847 u32 status;
849 devinfo->in_irq = true;
850 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
851 brcmf_dbg(PCIE, "Enter %x\n", status);
852 if (status) {
853 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
854 status);
855 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
856 BRCMF_PCIE_MB_INT_FN0_1))
857 brcmf_pcie_handle_mb_data(devinfo);
858 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
859 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
860 brcmf_proto_msgbuf_rx_trigger(
861 &devinfo->pdev->dev);
864 brcmf_pcie_bus_console_read(devinfo, false);
865 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
866 brcmf_pcie_intr_enable(devinfo);
867 devinfo->in_irq = false;
868 return IRQ_HANDLED;
872 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
874 struct pci_dev *pdev = devinfo->pdev;
875 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
877 brcmf_pcie_intr_disable(devinfo);
879 brcmf_dbg(PCIE, "Enter\n");
881 pci_enable_msi(pdev);
882 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
883 brcmf_pcie_isr_thread, IRQF_SHARED,
884 "brcmf_pcie_intr", devinfo)) {
885 pci_disable_msi(pdev);
886 brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
887 return -EIO;
889 devinfo->irq_allocated = true;
890 return 0;
894 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
896 struct pci_dev *pdev = devinfo->pdev;
897 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
898 u32 status;
899 u32 count;
901 if (!devinfo->irq_allocated)
902 return;
904 brcmf_pcie_intr_disable(devinfo);
905 free_irq(pdev->irq, devinfo);
906 pci_disable_msi(pdev);
908 msleep(50);
909 count = 0;
910 while ((devinfo->in_irq) && (count < 20)) {
911 msleep(50);
912 count++;
914 if (devinfo->in_irq)
915 brcmf_err(bus, "Still in IRQ (processing) !!!\n");
917 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
918 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
920 devinfo->irq_allocated = false;
924 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
926 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
927 struct brcmf_pciedev_info *devinfo = ring->devinfo;
928 struct brcmf_commonring *commonring = &ring->commonring;
930 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
931 return -EIO;
933 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
934 commonring->w_ptr, ring->id);
936 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
938 return 0;
942 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
944 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
945 struct brcmf_pciedev_info *devinfo = ring->devinfo;
946 struct brcmf_commonring *commonring = &ring->commonring;
948 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
949 return -EIO;
951 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
952 commonring->r_ptr, ring->id);
954 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
956 return 0;
960 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
962 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
963 struct brcmf_pciedev_info *devinfo = ring->devinfo;
965 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
966 return -EIO;
968 brcmf_dbg(PCIE, "RING !\n");
969 /* Any arbitrary value will do, lets use 1 */
970 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
972 return 0;
976 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
978 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
979 struct brcmf_pciedev_info *devinfo = ring->devinfo;
980 struct brcmf_commonring *commonring = &ring->commonring;
982 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
983 return -EIO;
985 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
987 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
988 commonring->w_ptr, ring->id);
990 return 0;
994 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
996 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
997 struct brcmf_pciedev_info *devinfo = ring->devinfo;
998 struct brcmf_commonring *commonring = &ring->commonring;
1000 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
1001 return -EIO;
1003 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
1005 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
1006 commonring->r_ptr, ring->id);
1008 return 0;
1012 static void *
1013 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
1014 u32 size, u32 tcm_dma_phys_addr,
1015 dma_addr_t *dma_handle)
1017 void *ring;
1018 u64 address;
1020 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
1021 GFP_KERNEL);
1022 if (!ring)
1023 return NULL;
1025 address = (u64)*dma_handle;
1026 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
1027 address & 0xffffffff);
1028 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
1030 return (ring);
1034 static struct brcmf_pcie_ringbuf *
1035 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
1036 u32 tcm_ring_phys_addr)
1038 void *dma_buf;
1039 dma_addr_t dma_handle;
1040 struct brcmf_pcie_ringbuf *ring;
1041 u32 size;
1042 u32 addr;
1043 const u32 *ring_itemsize_array;
1045 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1046 ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1047 else
1048 ring_itemsize_array = brcmf_ring_itemsize;
1050 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1051 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1052 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1053 &dma_handle);
1054 if (!dma_buf)
1055 return NULL;
1057 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1058 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1059 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1060 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1062 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1063 if (!ring) {
1064 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1065 dma_handle);
1066 return NULL;
1068 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1069 ring_itemsize_array[ring_id], dma_buf);
1070 ring->dma_handle = dma_handle;
1071 ring->devinfo = devinfo;
1072 brcmf_commonring_register_cb(&ring->commonring,
1073 brcmf_pcie_ring_mb_ring_bell,
1074 brcmf_pcie_ring_mb_update_rptr,
1075 brcmf_pcie_ring_mb_update_wptr,
1076 brcmf_pcie_ring_mb_write_rptr,
1077 brcmf_pcie_ring_mb_write_wptr, ring);
1079 return (ring);
1083 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1084 struct brcmf_pcie_ringbuf *ring)
1086 void *dma_buf;
1087 u32 size;
1089 if (!ring)
1090 return;
1092 dma_buf = ring->commonring.buf_addr;
1093 if (dma_buf) {
1094 size = ring->commonring.depth * ring->commonring.item_len;
1095 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1097 kfree(ring);
1101 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1103 u32 i;
1105 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1106 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1107 devinfo->shared.commonrings[i]);
1108 devinfo->shared.commonrings[i] = NULL;
1110 kfree(devinfo->shared.flowrings);
1111 devinfo->shared.flowrings = NULL;
1112 if (devinfo->idxbuf) {
1113 dma_free_coherent(&devinfo->pdev->dev,
1114 devinfo->idxbuf_sz,
1115 devinfo->idxbuf,
1116 devinfo->idxbuf_dmahandle);
1117 devinfo->idxbuf = NULL;
1122 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1124 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1125 struct brcmf_pcie_ringbuf *ring;
1126 struct brcmf_pcie_ringbuf *rings;
1127 u32 d2h_w_idx_ptr;
1128 u32 d2h_r_idx_ptr;
1129 u32 h2d_w_idx_ptr;
1130 u32 h2d_r_idx_ptr;
1131 u32 ring_mem_ptr;
1132 u32 i;
1133 u64 address;
1134 u32 bufsz;
1135 u8 idx_offset;
1136 struct brcmf_pcie_dhi_ringinfo ringinfo;
1137 u16 max_flowrings;
1138 u16 max_submissionrings;
1139 u16 max_completionrings;
1141 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1142 sizeof(ringinfo));
1143 if (devinfo->shared.version >= 6) {
1144 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1145 max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1146 max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1147 } else {
1148 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1149 max_flowrings = max_submissionrings -
1150 BRCMF_NROF_H2D_COMMON_MSGRINGS;
1151 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1154 if (devinfo->dma_idx_sz != 0) {
1155 bufsz = (max_submissionrings + max_completionrings) *
1156 devinfo->dma_idx_sz * 2;
1157 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1158 &devinfo->idxbuf_dmahandle,
1159 GFP_KERNEL);
1160 if (!devinfo->idxbuf)
1161 devinfo->dma_idx_sz = 0;
1164 if (devinfo->dma_idx_sz == 0) {
1165 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1166 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1167 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1168 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1169 idx_offset = sizeof(u32);
1170 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1171 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1172 brcmf_dbg(PCIE, "Using TCM indices\n");
1173 } else {
1174 memset(devinfo->idxbuf, 0, bufsz);
1175 devinfo->idxbuf_sz = bufsz;
1176 idx_offset = devinfo->dma_idx_sz;
1177 devinfo->write_ptr = brcmf_pcie_write_idx;
1178 devinfo->read_ptr = brcmf_pcie_read_idx;
1180 h2d_w_idx_ptr = 0;
1181 address = (u64)devinfo->idxbuf_dmahandle;
1182 ringinfo.h2d_w_idx_hostaddr.low_addr =
1183 cpu_to_le32(address & 0xffffffff);
1184 ringinfo.h2d_w_idx_hostaddr.high_addr =
1185 cpu_to_le32(address >> 32);
1187 h2d_r_idx_ptr = h2d_w_idx_ptr +
1188 max_submissionrings * idx_offset;
1189 address += max_submissionrings * idx_offset;
1190 ringinfo.h2d_r_idx_hostaddr.low_addr =
1191 cpu_to_le32(address & 0xffffffff);
1192 ringinfo.h2d_r_idx_hostaddr.high_addr =
1193 cpu_to_le32(address >> 32);
1195 d2h_w_idx_ptr = h2d_r_idx_ptr +
1196 max_submissionrings * idx_offset;
1197 address += max_submissionrings * idx_offset;
1198 ringinfo.d2h_w_idx_hostaddr.low_addr =
1199 cpu_to_le32(address & 0xffffffff);
1200 ringinfo.d2h_w_idx_hostaddr.high_addr =
1201 cpu_to_le32(address >> 32);
1203 d2h_r_idx_ptr = d2h_w_idx_ptr +
1204 max_completionrings * idx_offset;
1205 address += max_completionrings * idx_offset;
1206 ringinfo.d2h_r_idx_hostaddr.low_addr =
1207 cpu_to_le32(address & 0xffffffff);
1208 ringinfo.d2h_r_idx_hostaddr.high_addr =
1209 cpu_to_le32(address >> 32);
1211 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1212 &ringinfo, sizeof(ringinfo));
1213 brcmf_dbg(PCIE, "Using host memory indices\n");
1216 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1218 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1219 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1220 if (!ring)
1221 goto fail;
1222 ring->w_idx_addr = h2d_w_idx_ptr;
1223 ring->r_idx_addr = h2d_r_idx_ptr;
1224 ring->id = i;
1225 devinfo->shared.commonrings[i] = ring;
1227 h2d_w_idx_ptr += idx_offset;
1228 h2d_r_idx_ptr += idx_offset;
1229 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1232 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1233 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1234 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1235 if (!ring)
1236 goto fail;
1237 ring->w_idx_addr = d2h_w_idx_ptr;
1238 ring->r_idx_addr = d2h_r_idx_ptr;
1239 ring->id = i;
1240 devinfo->shared.commonrings[i] = ring;
1242 d2h_w_idx_ptr += idx_offset;
1243 d2h_r_idx_ptr += idx_offset;
1244 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1247 devinfo->shared.max_flowrings = max_flowrings;
1248 devinfo->shared.max_submissionrings = max_submissionrings;
1249 devinfo->shared.max_completionrings = max_completionrings;
1250 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1251 if (!rings)
1252 goto fail;
1254 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1256 for (i = 0; i < max_flowrings; i++) {
1257 ring = &rings[i];
1258 ring->devinfo = devinfo;
1259 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1260 brcmf_commonring_register_cb(&ring->commonring,
1261 brcmf_pcie_ring_mb_ring_bell,
1262 brcmf_pcie_ring_mb_update_rptr,
1263 brcmf_pcie_ring_mb_update_wptr,
1264 brcmf_pcie_ring_mb_write_rptr,
1265 brcmf_pcie_ring_mb_write_wptr,
1266 ring);
1267 ring->w_idx_addr = h2d_w_idx_ptr;
1268 ring->r_idx_addr = h2d_r_idx_ptr;
1269 h2d_w_idx_ptr += idx_offset;
1270 h2d_r_idx_ptr += idx_offset;
1272 devinfo->shared.flowrings = rings;
1274 return 0;
1276 fail:
1277 brcmf_err(bus, "Allocating ring buffers failed\n");
1278 brcmf_pcie_release_ringbuffers(devinfo);
1279 return -ENOMEM;
1283 static void
1284 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1286 if (devinfo->shared.scratch)
1287 dma_free_coherent(&devinfo->pdev->dev,
1288 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1289 devinfo->shared.scratch,
1290 devinfo->shared.scratch_dmahandle);
1291 if (devinfo->shared.ringupd)
1292 dma_free_coherent(&devinfo->pdev->dev,
1293 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1294 devinfo->shared.ringupd,
1295 devinfo->shared.ringupd_dmahandle);
1298 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1300 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1301 u64 address;
1302 u32 addr;
1304 devinfo->shared.scratch =
1305 dma_alloc_coherent(&devinfo->pdev->dev,
1306 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1307 &devinfo->shared.scratch_dmahandle,
1308 GFP_KERNEL);
1309 if (!devinfo->shared.scratch)
1310 goto fail;
1312 addr = devinfo->shared.tcm_base_address +
1313 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1314 address = (u64)devinfo->shared.scratch_dmahandle;
1315 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1316 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1317 addr = devinfo->shared.tcm_base_address +
1318 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1319 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1321 devinfo->shared.ringupd =
1322 dma_alloc_coherent(&devinfo->pdev->dev,
1323 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1324 &devinfo->shared.ringupd_dmahandle,
1325 GFP_KERNEL);
1326 if (!devinfo->shared.ringupd)
1327 goto fail;
1329 addr = devinfo->shared.tcm_base_address +
1330 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1331 address = (u64)devinfo->shared.ringupd_dmahandle;
1332 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1333 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1334 addr = devinfo->shared.tcm_base_address +
1335 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1336 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1337 return 0;
1339 fail:
1340 brcmf_err(bus, "Allocating scratch buffers failed\n");
1341 brcmf_pcie_release_scratchbuffers(devinfo);
1342 return -ENOMEM;
1346 static void brcmf_pcie_down(struct device *dev)
1351 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1353 return 0;
1357 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1358 uint len)
1360 return 0;
1364 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1365 uint len)
1367 return 0;
1371 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1373 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1374 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1375 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1377 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1378 devinfo->wowl_enabled = enabled;
1382 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1384 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1385 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1386 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1388 return devinfo->ci->ramsize - devinfo->ci->srsize;
1392 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1394 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1395 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1396 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1398 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1399 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1400 return 0;
1403 static
1404 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1406 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1407 struct brcmf_fw_request *fwreq;
1408 struct brcmf_fw_name fwnames[] = {
1409 { ext, fw_name },
1412 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1413 brcmf_pcie_fwnames,
1414 ARRAY_SIZE(brcmf_pcie_fwnames),
1415 fwnames, ARRAY_SIZE(fwnames));
1416 if (!fwreq)
1417 return -ENOMEM;
1419 kfree(fwreq);
1420 return 0;
1423 static int brcmf_pcie_reset(struct device *dev)
1425 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1426 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1427 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1428 struct brcmf_fw_request *fwreq;
1429 int err;
1431 brcmf_pcie_intr_disable(devinfo);
1433 brcmf_pcie_bus_console_read(devinfo, true);
1435 brcmf_detach(dev);
1437 brcmf_pcie_release_irq(devinfo);
1438 brcmf_pcie_release_scratchbuffers(devinfo);
1439 brcmf_pcie_release_ringbuffers(devinfo);
1440 brcmf_pcie_reset_device(devinfo);
1442 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1443 if (!fwreq) {
1444 dev_err(dev, "Failed to prepare FW request\n");
1445 return -ENOMEM;
1448 err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
1449 if (err) {
1450 dev_err(dev, "Failed to prepare FW request\n");
1451 kfree(fwreq);
1454 return err;
1457 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1458 .txdata = brcmf_pcie_tx,
1459 .stop = brcmf_pcie_down,
1460 .txctl = brcmf_pcie_tx_ctlpkt,
1461 .rxctl = brcmf_pcie_rx_ctlpkt,
1462 .wowl_config = brcmf_pcie_wowl_config,
1463 .get_ramsize = brcmf_pcie_get_ramsize,
1464 .get_memdump = brcmf_pcie_get_memdump,
1465 .get_fwname = brcmf_pcie_get_fwname,
1466 .reset = brcmf_pcie_reset,
1470 static void
1471 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1472 u32 data_len)
1474 __le32 *field;
1475 u32 newsize;
1477 if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1478 return;
1480 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1481 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1482 return;
1483 field++;
1484 newsize = le32_to_cpup(field);
1486 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1487 newsize);
1488 devinfo->ci->ramsize = newsize;
1492 static int
1493 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1494 u32 sharedram_addr)
1496 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1497 struct brcmf_pcie_shared_info *shared;
1498 u32 addr;
1500 shared = &devinfo->shared;
1501 shared->tcm_base_address = sharedram_addr;
1503 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1504 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1505 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1506 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1507 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1508 brcmf_err(bus, "Unsupported PCIE version %d\n",
1509 shared->version);
1510 return -EINVAL;
1513 /* check firmware support dma indicies */
1514 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1515 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1516 devinfo->dma_idx_sz = sizeof(u16);
1517 else
1518 devinfo->dma_idx_sz = sizeof(u32);
1521 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1522 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1523 if (shared->max_rxbufpost == 0)
1524 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1526 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1527 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1529 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1530 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1532 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1533 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1535 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1536 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1538 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1539 shared->max_rxbufpost, shared->rx_dataoffset);
1541 brcmf_pcie_bus_console_init(devinfo);
1543 return 0;
1547 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1548 const struct firmware *fw, void *nvram,
1549 u32 nvram_len)
1551 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1552 u32 sharedram_addr;
1553 u32 sharedram_addr_written;
1554 u32 loop_counter;
1555 int err;
1556 u32 address;
1557 u32 resetintr;
1559 brcmf_dbg(PCIE, "Halt ARM.\n");
1560 err = brcmf_pcie_enter_download_state(devinfo);
1561 if (err)
1562 return err;
1564 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1565 brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
1566 (void *)fw->data, fw->size);
1568 resetintr = get_unaligned_le32(fw->data);
1569 release_firmware(fw);
1571 /* reset last 4 bytes of RAM address. to be used for shared
1572 * area. This identifies when FW is running
1574 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1576 if (nvram) {
1577 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1578 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1579 nvram_len;
1580 brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
1581 brcmf_fw_nvram_free(nvram);
1582 } else {
1583 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1584 devinfo->nvram_name);
1587 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1588 devinfo->ci->ramsize -
1590 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1591 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1592 if (err)
1593 return err;
1595 brcmf_dbg(PCIE, "Wait for FW init\n");
1596 sharedram_addr = sharedram_addr_written;
1597 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1598 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1599 msleep(50);
1600 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1601 devinfo->ci->ramsize -
1603 loop_counter--;
1605 if (sharedram_addr == sharedram_addr_written) {
1606 brcmf_err(bus, "FW failed to initialize\n");
1607 return -ENODEV;
1609 if (sharedram_addr < devinfo->ci->rambase ||
1610 sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1611 brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1612 sharedram_addr);
1613 return -ENODEV;
1615 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1617 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1621 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1623 struct pci_dev *pdev = devinfo->pdev;
1624 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1625 int err;
1626 phys_addr_t bar0_addr, bar1_addr;
1627 ulong bar1_size;
1629 err = pci_enable_device(pdev);
1630 if (err) {
1631 brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1632 return err;
1635 pci_set_master(pdev);
1637 /* Bar-0 mapped address */
1638 bar0_addr = pci_resource_start(pdev, 0);
1639 /* Bar-1 mapped address */
1640 bar1_addr = pci_resource_start(pdev, 2);
1641 /* read Bar-1 mapped memory range */
1642 bar1_size = pci_resource_len(pdev, 2);
1643 if ((bar1_size == 0) || (bar1_addr == 0)) {
1644 brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1645 bar1_size, (unsigned long long)bar1_addr);
1646 return -EINVAL;
1649 devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1650 devinfo->tcm = ioremap(bar1_addr, bar1_size);
1652 if (!devinfo->regs || !devinfo->tcm) {
1653 brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1654 devinfo->tcm);
1655 return -EINVAL;
1657 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1658 devinfo->regs, (unsigned long long)bar0_addr);
1659 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1660 devinfo->tcm, (unsigned long long)bar1_addr,
1661 (unsigned int)bar1_size);
1663 return 0;
1667 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1669 if (devinfo->tcm)
1670 iounmap(devinfo->tcm);
1671 if (devinfo->regs)
1672 iounmap(devinfo->regs);
1674 pci_disable_device(devinfo->pdev);
1678 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1680 u32 ret_addr;
1682 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1683 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1684 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1686 return ret_addr;
1690 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1692 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1694 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1695 return brcmf_pcie_read_reg32(devinfo, addr);
1699 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1701 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1703 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1704 brcmf_pcie_write_reg32(devinfo, addr, value);
1708 static int brcmf_pcie_buscoreprep(void *ctx)
1710 return brcmf_pcie_get_resource(ctx);
1714 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1716 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1717 u32 val;
1719 devinfo->ci = chip;
1720 brcmf_pcie_reset_device(devinfo);
1722 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1723 if (val != 0xffffffff)
1724 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1725 val);
1727 return 0;
1731 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1732 u32 rstvec)
1734 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1736 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1740 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1741 .prepare = brcmf_pcie_buscoreprep,
1742 .reset = brcmf_pcie_buscore_reset,
1743 .activate = brcmf_pcie_buscore_activate,
1744 .read32 = brcmf_pcie_buscore_read32,
1745 .write32 = brcmf_pcie_buscore_write32,
1748 #define BRCMF_PCIE_FW_CODE 0
1749 #define BRCMF_PCIE_FW_NVRAM 1
1751 static void brcmf_pcie_setup(struct device *dev, int ret,
1752 struct brcmf_fw_request *fwreq)
1754 const struct firmware *fw;
1755 void *nvram;
1756 struct brcmf_bus *bus;
1757 struct brcmf_pciedev *pcie_bus_dev;
1758 struct brcmf_pciedev_info *devinfo;
1759 struct brcmf_commonring **flowrings;
1760 u32 i, nvram_len;
1762 /* check firmware loading result */
1763 if (ret)
1764 goto fail;
1766 bus = dev_get_drvdata(dev);
1767 pcie_bus_dev = bus->bus_priv.pcie;
1768 devinfo = pcie_bus_dev->devinfo;
1769 brcmf_pcie_attach(devinfo);
1771 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1772 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1773 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1774 kfree(fwreq);
1776 ret = brcmf_chip_get_raminfo(devinfo->ci);
1777 if (ret) {
1778 brcmf_err(bus, "Failed to get RAM info\n");
1779 goto fail;
1782 /* Some of the firmwares have the size of the memory of the device
1783 * defined inside the firmware. This is because part of the memory in
1784 * the device is shared and the devision is determined by FW. Parse
1785 * the firmware and adjust the chip memory size now.
1787 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1789 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1790 if (ret)
1791 goto fail;
1793 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1795 ret = brcmf_pcie_init_ringbuffers(devinfo);
1796 if (ret)
1797 goto fail;
1799 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1800 if (ret)
1801 goto fail;
1803 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1804 ret = brcmf_pcie_request_irq(devinfo);
1805 if (ret)
1806 goto fail;
1808 /* hook the commonrings in the bus structure. */
1809 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1810 bus->msgbuf->commonrings[i] =
1811 &devinfo->shared.commonrings[i]->commonring;
1813 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1814 GFP_KERNEL);
1815 if (!flowrings)
1816 goto fail;
1818 for (i = 0; i < devinfo->shared.max_flowrings; i++)
1819 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1820 bus->msgbuf->flowrings = flowrings;
1822 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1823 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1824 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1826 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1828 brcmf_pcie_intr_enable(devinfo);
1829 brcmf_pcie_hostready(devinfo);
1831 ret = brcmf_attach(&devinfo->pdev->dev);
1832 if (ret)
1833 goto fail;
1835 brcmf_pcie_bus_console_read(devinfo, false);
1837 return;
1839 fail:
1840 device_release_driver(dev);
1843 static struct brcmf_fw_request *
1844 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1846 struct brcmf_fw_request *fwreq;
1847 struct brcmf_fw_name fwnames[] = {
1848 { ".bin", devinfo->fw_name },
1849 { ".txt", devinfo->nvram_name },
1852 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1853 brcmf_pcie_fwnames,
1854 ARRAY_SIZE(brcmf_pcie_fwnames),
1855 fwnames, ARRAY_SIZE(fwnames));
1856 if (!fwreq)
1857 return NULL;
1859 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1860 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1861 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1862 fwreq->board_type = devinfo->settings->board_type;
1863 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1864 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1865 fwreq->bus_nr = devinfo->pdev->bus->number;
1867 return fwreq;
1870 static int
1871 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1873 int ret;
1874 struct brcmf_fw_request *fwreq;
1875 struct brcmf_pciedev_info *devinfo;
1876 struct brcmf_pciedev *pcie_bus_dev;
1877 struct brcmf_bus *bus;
1879 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1881 ret = -ENOMEM;
1882 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1883 if (devinfo == NULL)
1884 return ret;
1886 devinfo->pdev = pdev;
1887 pcie_bus_dev = NULL;
1888 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1889 if (IS_ERR(devinfo->ci)) {
1890 ret = PTR_ERR(devinfo->ci);
1891 devinfo->ci = NULL;
1892 goto fail;
1895 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1896 if (pcie_bus_dev == NULL) {
1897 ret = -ENOMEM;
1898 goto fail;
1901 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1902 BRCMF_BUSTYPE_PCIE,
1903 devinfo->ci->chip,
1904 devinfo->ci->chiprev);
1905 if (!devinfo->settings) {
1906 ret = -ENOMEM;
1907 goto fail;
1910 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1911 if (!bus) {
1912 ret = -ENOMEM;
1913 goto fail;
1915 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1916 if (!bus->msgbuf) {
1917 ret = -ENOMEM;
1918 kfree(bus);
1919 goto fail;
1922 /* hook it all together. */
1923 pcie_bus_dev->devinfo = devinfo;
1924 pcie_bus_dev->bus = bus;
1925 bus->dev = &pdev->dev;
1926 bus->bus_priv.pcie = pcie_bus_dev;
1927 bus->ops = &brcmf_pcie_bus_ops;
1928 bus->proto_type = BRCMF_PROTO_MSGBUF;
1929 bus->chip = devinfo->coreid;
1930 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1931 dev_set_drvdata(&pdev->dev, bus);
1933 ret = brcmf_alloc(&devinfo->pdev->dev, devinfo->settings);
1934 if (ret)
1935 goto fail_bus;
1937 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1938 if (!fwreq) {
1939 ret = -ENOMEM;
1940 goto fail_brcmf;
1943 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1944 if (ret < 0) {
1945 kfree(fwreq);
1946 goto fail_brcmf;
1948 return 0;
1950 fail_brcmf:
1951 brcmf_free(&devinfo->pdev->dev);
1952 fail_bus:
1953 kfree(bus->msgbuf);
1954 kfree(bus);
1955 fail:
1956 brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1957 brcmf_pcie_release_resource(devinfo);
1958 if (devinfo->ci)
1959 brcmf_chip_detach(devinfo->ci);
1960 if (devinfo->settings)
1961 brcmf_release_module_param(devinfo->settings);
1962 kfree(pcie_bus_dev);
1963 kfree(devinfo);
1964 return ret;
1968 static void
1969 brcmf_pcie_remove(struct pci_dev *pdev)
1971 struct brcmf_pciedev_info *devinfo;
1972 struct brcmf_bus *bus;
1974 brcmf_dbg(PCIE, "Enter\n");
1976 bus = dev_get_drvdata(&pdev->dev);
1977 if (bus == NULL)
1978 return;
1980 devinfo = bus->bus_priv.pcie->devinfo;
1982 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1983 if (devinfo->ci)
1984 brcmf_pcie_intr_disable(devinfo);
1986 brcmf_detach(&pdev->dev);
1987 brcmf_free(&pdev->dev);
1989 kfree(bus->bus_priv.pcie);
1990 kfree(bus->msgbuf->flowrings);
1991 kfree(bus->msgbuf);
1992 kfree(bus);
1994 brcmf_pcie_release_irq(devinfo);
1995 brcmf_pcie_release_scratchbuffers(devinfo);
1996 brcmf_pcie_release_ringbuffers(devinfo);
1997 brcmf_pcie_reset_device(devinfo);
1998 brcmf_pcie_release_resource(devinfo);
2000 if (devinfo->ci)
2001 brcmf_chip_detach(devinfo->ci);
2002 if (devinfo->settings)
2003 brcmf_release_module_param(devinfo->settings);
2005 kfree(devinfo);
2006 dev_set_drvdata(&pdev->dev, NULL);
2010 #ifdef CONFIG_PM
2013 static int brcmf_pcie_pm_enter_D3(struct device *dev)
2015 struct brcmf_pciedev_info *devinfo;
2016 struct brcmf_bus *bus;
2018 brcmf_dbg(PCIE, "Enter\n");
2020 bus = dev_get_drvdata(dev);
2021 devinfo = bus->bus_priv.pcie->devinfo;
2023 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
2025 devinfo->mbdata_completed = false;
2026 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
2028 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
2029 BRCMF_PCIE_MBDATA_TIMEOUT);
2030 if (!devinfo->mbdata_completed) {
2031 brcmf_err(bus, "Timeout on response for entering D3 substate\n");
2032 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2033 return -EIO;
2036 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
2038 return 0;
2042 static int brcmf_pcie_pm_leave_D3(struct device *dev)
2044 struct brcmf_pciedev_info *devinfo;
2045 struct brcmf_bus *bus;
2046 struct pci_dev *pdev;
2047 int err;
2049 brcmf_dbg(PCIE, "Enter\n");
2051 bus = dev_get_drvdata(dev);
2052 devinfo = bus->bus_priv.pcie->devinfo;
2053 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
2055 /* Check if device is still up and running, if so we are ready */
2056 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
2057 brcmf_dbg(PCIE, "Try to wakeup device....\n");
2058 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
2059 goto cleanup;
2060 brcmf_dbg(PCIE, "Hot resume, continue....\n");
2061 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
2062 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2063 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2064 brcmf_pcie_intr_enable(devinfo);
2065 brcmf_pcie_hostready(devinfo);
2066 return 0;
2069 cleanup:
2070 brcmf_chip_detach(devinfo->ci);
2071 devinfo->ci = NULL;
2072 pdev = devinfo->pdev;
2073 brcmf_pcie_remove(pdev);
2075 err = brcmf_pcie_probe(pdev, NULL);
2076 if (err)
2077 brcmf_err(bus, "probe after resume failed, err=%d\n", err);
2079 return err;
2083 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2084 .suspend = brcmf_pcie_pm_enter_D3,
2085 .resume = brcmf_pcie_pm_leave_D3,
2086 .freeze = brcmf_pcie_pm_enter_D3,
2087 .restore = brcmf_pcie_pm_leave_D3,
2091 #endif /* CONFIG_PM */
2094 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2095 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2096 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2097 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2098 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2100 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2101 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2102 BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2103 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2104 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2105 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2106 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2107 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2108 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2109 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2110 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2111 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2112 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2113 BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
2114 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2115 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2116 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2117 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2118 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2119 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2120 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2121 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2122 { /* end: all zeroes */ }
2126 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2129 static struct pci_driver brcmf_pciedrvr = {
2130 .node = {},
2131 .name = KBUILD_MODNAME,
2132 .id_table = brcmf_pcie_devid_table,
2133 .probe = brcmf_pcie_probe,
2134 .remove = brcmf_pcie_remove,
2135 #ifdef CONFIG_PM
2136 .driver.pm = &brcmf_pciedrvr_pm,
2137 #endif
2138 .driver.coredump = brcmf_dev_coredump,
2142 void brcmf_pcie_register(void)
2144 int err;
2146 brcmf_dbg(PCIE, "Enter\n");
2147 err = pci_register_driver(&brcmf_pciedrvr);
2148 if (err)
2149 brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
2150 err);
2154 void brcmf_pcie_exit(void)
2156 brcmf_dbg(PCIE, "Enter\n");
2157 pci_unregister_driver(&brcmf_pciedrvr);