2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/platform_data/brcmfmac-sdio.h>
35 #include <linux/moduleparam.h>
36 #include <asm/unaligned.h>
38 #include <brcmu_wifi.h>
39 #include <brcmu_utils.h>
40 #include <brcm_hw_ids.h>
42 #include "sdio_host.h"
43 #include "sdio_chip.h"
46 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
50 #define BRCMF_TRAP_INFO_SIZE 80
52 #define CBUF_LEN (128)
54 /* Device console log buffer state */
55 #define CONSOLE_BUFFER_MAX 2024
58 __le32 buf
; /* Can't be pointer on (64-bit) hosts */
61 char *_buf_compat
; /* Redundant pointer for backward compat. */
66 * When there is no UART (e.g. Quickturn),
67 * the host should write a complete
68 * input line directly into cbuf and then write
69 * the length into vcons_in.
70 * This may also be used when there is a real UART
71 * (at risk of conflicting with
72 * the real UART). vcons_out is currently unused.
77 /* Output (logging) buffer
78 * Console output is written to a ring buffer log_buf at index log_idx.
79 * The host may read the output when it sees log_idx advance.
80 * Output will be lost if the output wraps around faster than the host
83 struct rte_log_le log_le
;
85 /* Console input line buffer
86 * Characters are read one at a time into cbuf
87 * until <CR> is received, then
88 * the buffer is processed as a command line.
89 * Also used for virtual UART.
96 #include <chipcommon.h>
100 #include "tracepoint.h"
102 #define TXQLEN 2048 /* bulk tx queue length */
103 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
104 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
107 #define TXRETRIES 2 /* # of retries for tx frames */
109 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
112 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
115 #define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
117 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
119 #define MEMBLOCK 2048 /* Block size used for downloading
121 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
122 biggest possible glom */
124 #define BRCMF_FIRSTREAD (1 << 6)
127 /* SBSDIO_DEVICE_CTL */
129 /* 1: device will assert busy signal when receiving CMD53 */
130 #define SBSDIO_DEVCTL_SETBUSY 0x01
131 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
132 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
133 /* 1: mask all interrupts to host except the chipActive (rev 8) */
134 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
135 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
136 * sdio bus power cycle to clear (rev 9) */
137 #define SBSDIO_DEVCTL_PADS_ISO 0x08
138 /* Force SD->SB reset mapping (rev 11) */
139 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
140 /* Determined by CoreControl bit */
141 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
142 /* Force backplane reset */
143 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
144 /* Force no backplane reset */
145 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
147 /* direct(mapped) cis space */
149 /* MAPPED common CIS address */
150 #define SBSDIO_CIS_BASE_COMMON 0x1000
151 /* maximum bytes in one CIS */
152 #define SBSDIO_CIS_SIZE_LIMIT 0x200
153 /* cis offset addr is < 17 bits */
154 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
156 /* manfid tuple length, include tuple, link bytes */
157 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
160 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
161 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
162 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
163 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
164 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
165 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
166 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
167 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
168 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
169 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
170 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
171 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
172 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
173 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
174 #define I_PC (1 << 10) /* descriptor error */
175 #define I_PD (1 << 11) /* data error */
176 #define I_DE (1 << 12) /* Descriptor protocol Error */
177 #define I_RU (1 << 13) /* Receive descriptor Underflow */
178 #define I_RO (1 << 14) /* Receive fifo Overflow */
179 #define I_XU (1 << 15) /* Transmit fifo Underflow */
180 #define I_RI (1 << 16) /* Receive Interrupt */
181 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
182 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
183 #define I_XI (1 << 24) /* Transmit Interrupt */
184 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
185 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
186 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
187 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
188 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
189 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
190 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
191 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
192 #define I_DMA (I_RI | I_XI | I_ERRORS)
195 #define CC_CISRDY (1 << 0) /* CIS Ready */
196 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
197 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
198 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
199 #define CC_XMTDATAAVAIL_MODE (1 << 4)
200 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
203 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
204 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
205 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
206 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
209 * Software allocation of To SB Mailbox resources
212 /* tosbmailbox bits corresponding to intstatus bits */
213 #define SMB_NAK (1 << 0) /* Frame NAK */
214 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
215 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
216 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
218 /* tosbmailboxdata */
219 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
222 * Software allocation of To Host Mailbox resources
226 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
227 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
228 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
229 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
231 /* tohostmailboxdata */
232 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
233 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
234 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
235 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
237 #define HMB_DATA_FCDATA_MASK 0xff000000
238 #define HMB_DATA_FCDATA_SHIFT 24
240 #define HMB_DATA_VERSION_MASK 0x00ff0000
241 #define HMB_DATA_VERSION_SHIFT 16
244 * Software-defined protocol header
247 /* Current protocol version */
248 #define SDPCM_PROT_VERSION 4
251 * Shared structure between dongle and the host.
252 * The structure contains pointers to trap or assert information.
254 #define SDPCM_SHARED_VERSION 0x0003
255 #define SDPCM_SHARED_VERSION_MASK 0x00FF
256 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
257 #define SDPCM_SHARED_ASSERT 0x0200
258 #define SDPCM_SHARED_TRAP 0x0400
260 /* Space for header read, limit for data packets */
261 #define MAX_HDR_READ (1 << 6)
262 #define MAX_RX_DATASZ 2048
264 /* Bump up limit on waiting for HT to account for first startup;
265 * if the image is doing a CRC calculation before programming the PMU
266 * for HT availability, it could take a couple hundred ms more, so
267 * max out at a 1 second (1000000us).
269 #undef PMU_MAX_TRANSITION_DLY
270 #define PMU_MAX_TRANSITION_DLY 1000000
272 /* Value for ChipClockCSR during initial setup */
273 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
274 SBSDIO_ALP_AVAIL_REQ)
276 /* Flags for SDH calls */
277 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
279 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
280 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
283 #define BRCMF_IDLE_INTERVAL 1
285 #define KSO_WAIT_US 50
286 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
289 * Conversion of 802.1D priority to precedence level
291 static uint
prio2prec(u32 prio
)
293 return (prio
== PRIO_8021D_NONE
|| prio
== PRIO_8021D_BE
) ?
298 /* Device console log buffer state */
299 struct brcmf_console
{
300 uint count
; /* Poll interval msec counter */
301 uint log_addr
; /* Log struct address (fixed) */
302 struct rte_log_le log_le
; /* Log struct (host copy) */
303 uint bufsize
; /* Size of log buffer */
304 u8
*buf
; /* Log buffer (host copy) */
305 uint last
; /* Last buffer read index */
308 struct brcmf_trap_info
{
322 __le32 r9
; /* sb/v6 */
323 __le32 r10
; /* sl/v7 */
324 __le32 r11
; /* fp/v8 */
332 struct sdpcm_shared
{
336 u32 assert_file_addr
;
338 u32 console_addr
; /* Address of struct rte_console */
344 struct sdpcm_shared_le
{
347 __le32 assert_exp_addr
;
348 __le32 assert_file_addr
;
350 __le32 console_addr
; /* Address of struct rte_console */
351 __le32 msgtrace_addr
;
356 /* dongle SDIO bus specific header info */
357 struct brcmf_sdio_hdrinfo
{
368 /* misc chip info needed by some of the routines */
369 /* Private data for SDIO bus interaction */
371 struct brcmf_sdio_dev
*sdiodev
; /* sdio device handler */
372 struct brcmf_chip
*ci
; /* Chip info struct */
374 u32 ramsize
; /* Size of RAM in SOCRAM (bytes) */
376 u32 hostintmask
; /* Copy of Host Interrupt Mask */
377 atomic_t intstatus
; /* Intstatus bits (events) pending */
378 atomic_t fcstate
; /* State of dongle flow-control */
380 uint blocksize
; /* Block size of SDIO transfers */
381 uint roundup
; /* Max roundup limit */
383 struct pktq txq
; /* Queue length used for flow-control */
384 u8 flowcontrol
; /* per prio flow control bitmask */
385 u8 tx_seq
; /* Transmit sequence number (next) */
386 u8 tx_max
; /* Maximum transmit sequence allowed */
388 u8
*hdrbuf
; /* buffer for handling rx frame */
389 u8
*rxhdr
; /* Header of current rx frame (in hdrbuf) */
390 u8 rx_seq
; /* Receive sequence number (expected) */
391 struct brcmf_sdio_hdrinfo cur_read
;
392 /* info of current read frame */
393 bool rxskip
; /* Skip receive (awaiting NAK ACK) */
394 bool rxpending
; /* Data frame pending in dongle */
396 uint rxbound
; /* Rx frames to read before resched */
397 uint txbound
; /* Tx frames to send before resched */
400 struct sk_buff
*glomd
; /* Packet containing glomming descriptor */
401 struct sk_buff_head glom
; /* Packet list for glommed superframe */
402 uint glomerr
; /* Glom packet read errors */
404 u8
*rxbuf
; /* Buffer for receiving control packets */
405 uint rxblen
; /* Allocated length of rxbuf */
406 u8
*rxctl
; /* Aligned pointer into rxbuf */
407 u8
*rxctl_orig
; /* pointer for freeing rxctl */
408 uint rxlen
; /* Length of valid data in buffer */
409 spinlock_t rxctl_lock
; /* protection lock for ctrl frame resources */
411 u8 sdpcm_ver
; /* Bus protocol reported by dongle */
413 bool intr
; /* Use interrupts */
414 bool poll
; /* Use polling */
415 atomic_t ipend
; /* Device interrupt is pending */
416 uint spurious
; /* Count of spurious interrupts */
417 uint pollrate
; /* Ticks between device polls */
418 uint polltick
; /* Tick counter */
421 uint console_interval
;
422 struct brcmf_console console
; /* Console output polling support */
423 uint console_addr
; /* Console address from shared struct */
426 uint clkstate
; /* State of sd and backplane clock(s) */
427 bool activity
; /* Activity flag for clock down */
428 s32 idletime
; /* Control for activity timeout */
429 s32 idlecount
; /* Activity timeout counter */
430 s32 idleclock
; /* How to set bus driver when idle */
431 bool rxflow_mode
; /* Rx flow control mode */
432 bool rxflow
; /* Is rx flow control on */
433 bool alp_only
; /* Don't use HT clock (ALP only) */
437 bool ctrl_frame_stat
;
440 wait_queue_head_t ctrl_wait
;
441 wait_queue_head_t dcmd_resp_wait
;
443 struct timer_list timer
;
444 struct completion watchdog_wait
;
445 struct task_struct
*watchdog_tsk
;
449 struct workqueue_struct
*brcmf_wq
;
450 struct work_struct datawork
;
453 bool txoff
; /* Transmit flow-controlled */
454 struct brcmf_sdio_count sdcnt
;
455 bool sr_enabled
; /* SaveRestore enabled */
456 bool sleeping
; /* SDIO bus sleeping */
458 u8 tx_hdrlen
; /* sdio bus header length for tx packet */
459 bool txglom
; /* host tx glomming enable flag */
460 struct sk_buff
*txglom_sgpad
; /* scatter-gather padding buffer */
461 u16 head_align
; /* buffer pointer alignment */
462 u16 sgentry_align
; /* scatter-gather buffer alignment */
468 #define CLK_PENDING 2
472 static int qcount
[NUMPRIO
];
475 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
477 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
479 /* Retry count for register access failures */
480 static const uint retry_limit
= 2;
482 /* Limit on rounding up frames */
483 static const uint max_roundup
= 512;
487 static int brcmf_sdio_txglomsz
= BRCMF_DEFAULT_TXGLOM_SIZE
;
488 module_param_named(txglomsz
, brcmf_sdio_txglomsz
, int, 0);
489 MODULE_PARM_DESC(txglomsz
, "maximum tx packet chain size [SDIO]");
491 enum brcmf_sdio_frmtype
{
492 BRCMF_SDIO_FT_NORMAL
,
497 #define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
498 #define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
499 #define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
500 #define BCM43241B0_NVRAM_NAME "brcm/brcmfmac43241b0-sdio.txt"
501 #define BCM43241B4_FIRMWARE_NAME "brcm/brcmfmac43241b4-sdio.bin"
502 #define BCM43241B4_NVRAM_NAME "brcm/brcmfmac43241b4-sdio.txt"
503 #define BCM4329_FIRMWARE_NAME "brcm/brcmfmac4329-sdio.bin"
504 #define BCM4329_NVRAM_NAME "brcm/brcmfmac4329-sdio.txt"
505 #define BCM4330_FIRMWARE_NAME "brcm/brcmfmac4330-sdio.bin"
506 #define BCM4330_NVRAM_NAME "brcm/brcmfmac4330-sdio.txt"
507 #define BCM4334_FIRMWARE_NAME "brcm/brcmfmac4334-sdio.bin"
508 #define BCM4334_NVRAM_NAME "brcm/brcmfmac4334-sdio.txt"
509 #define BCM4335_FIRMWARE_NAME "brcm/brcmfmac4335-sdio.bin"
510 #define BCM4335_NVRAM_NAME "brcm/brcmfmac4335-sdio.txt"
511 #define BCM43362_FIRMWARE_NAME "brcm/brcmfmac43362-sdio.bin"
512 #define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt"
513 #define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin"
514 #define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt"
516 MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME
);
517 MODULE_FIRMWARE(BCM43143_NVRAM_NAME
);
518 MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME
);
519 MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME
);
520 MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME
);
521 MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME
);
522 MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME
);
523 MODULE_FIRMWARE(BCM4329_NVRAM_NAME
);
524 MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME
);
525 MODULE_FIRMWARE(BCM4330_NVRAM_NAME
);
526 MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME
);
527 MODULE_FIRMWARE(BCM4334_NVRAM_NAME
);
528 MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME
);
529 MODULE_FIRMWARE(BCM4335_NVRAM_NAME
);
530 MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME
);
531 MODULE_FIRMWARE(BCM43362_NVRAM_NAME
);
532 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME
);
533 MODULE_FIRMWARE(BCM4339_NVRAM_NAME
);
535 struct brcmf_firmware_names
{
542 enum brcmf_firmware_type
{
547 #define BRCMF_FIRMWARE_NVRAM(name) \
548 name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
550 static const struct brcmf_firmware_names brcmf_fwname_data
[] = {
551 { BCM43143_CHIP_ID
, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143
) },
552 { BCM43241_CHIP_ID
, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0
) },
553 { BCM43241_CHIP_ID
, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4
) },
554 { BCM4329_CHIP_ID
, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329
) },
555 { BCM4330_CHIP_ID
, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330
) },
556 { BCM4334_CHIP_ID
, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334
) },
557 { BCM4335_CHIP_ID
, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335
) },
558 { BCM43362_CHIP_ID
, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362
) },
559 { BCM4339_CHIP_ID
, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339
) }
563 static const struct firmware
*brcmf_sdio_get_fw(struct brcmf_sdio
*bus
,
564 enum brcmf_firmware_type type
)
566 const struct firmware
*fw
;
570 for (i
= 0; i
< ARRAY_SIZE(brcmf_fwname_data
); i
++) {
571 if (brcmf_fwname_data
[i
].chipid
== bus
->ci
->chip
&&
572 brcmf_fwname_data
[i
].revmsk
& BIT(bus
->ci
->chiprev
)) {
574 case BRCMF_FIRMWARE_BIN
:
575 name
= brcmf_fwname_data
[i
].bin
;
577 case BRCMF_FIRMWARE_NVRAM
:
578 name
= brcmf_fwname_data
[i
].nv
;
581 brcmf_err("invalid firmware type (%d)\n", type
);
587 brcmf_err("Unknown chipid %d [%d]\n",
588 bus
->ci
->chip
, bus
->ci
->chiprev
);
592 err
= request_firmware(&fw
, name
, &bus
->sdiodev
->func
[2]->dev
);
593 if ((err
) || (!fw
)) {
594 brcmf_err("fail to request firmware %s (%d)\n", name
, err
);
601 static void pkt_align(struct sk_buff
*p
, int len
, int align
)
604 datalign
= (unsigned long)(p
->data
);
605 datalign
= roundup(datalign
, (align
)) - datalign
;
607 skb_pull(p
, datalign
);
611 /* To check if there's window offered */
612 static bool data_ok(struct brcmf_sdio
*bus
)
614 return (u8
)(bus
->tx_max
- bus
->tx_seq
) != 0 &&
615 ((u8
)(bus
->tx_max
- bus
->tx_seq
) & 0x80) == 0;
619 * Reads a register in the SDIO hardware block. This block occupies a series of
620 * adresses on the 32 bit backplane bus.
623 r_sdreg32(struct brcmf_sdio
*bus
, u32
*regvar
, u32 offset
)
625 u8 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
628 *regvar
= brcmf_sdiod_regrl(bus
->sdiodev
,
629 bus
->ci
->c_inf
[idx
].base
+ offset
, &ret
);
635 w_sdreg32(struct brcmf_sdio
*bus
, u32 regval
, u32 reg_offset
)
637 u8 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
640 brcmf_sdiod_regwl(bus
->sdiodev
,
641 bus
->ci
->c_inf
[idx
].base
+ reg_offset
,
648 brcmf_sdio_kso_control(struct brcmf_sdio
*bus
, bool on
)
650 u8 wr_val
= 0, rd_val
, cmp_val
, bmask
;
654 brcmf_dbg(TRACE
, "Enter\n");
656 wr_val
= (on
<< SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT
);
657 /* 1st KSO write goes to AOS wake up core if device is asleep */
658 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
661 brcmf_err("SDIO_AOS KSO write error: %d\n", err
);
666 /* device WAKEUP through KSO:
667 * write bit 0 & read back until
668 * both bits 0 (kso bit) & 1 (dev on status) are set
670 cmp_val
= SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
|
671 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK
;
673 usleep_range(2000, 3000);
675 /* Put device to sleep, turn off KSO */
677 /* only check for bit0, bit1(dev on status) may not
678 * get cleared right away
680 bmask
= SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
;
684 /* reliable KSO bit set/clr:
685 * the sdiod sleep write access is synced to PMU 32khz clk
686 * just one write attempt may fail,
687 * read it back until it matches written value
689 rd_val
= brcmf_sdiod_regrb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
691 if (((rd_val
& bmask
) == cmp_val
) && !err
)
693 brcmf_dbg(SDIO
, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
694 try_cnt
, MAX_KSO_ATTEMPTS
, err
);
696 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
698 } while (try_cnt
++ < MAX_KSO_ATTEMPTS
);
703 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
705 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
707 /* Turn backplane clock on or off */
708 static int brcmf_sdio_htclk(struct brcmf_sdio
*bus
, bool on
, bool pendok
)
711 u8 clkctl
, clkreq
, devctl
;
712 unsigned long timeout
;
714 brcmf_dbg(SDIO
, "Enter\n");
718 if (bus
->sr_enabled
) {
719 bus
->clkstate
= (on
? CLK_AVAIL
: CLK_SDONLY
);
724 /* Request HT Avail */
726 bus
->alp_only
? SBSDIO_ALP_AVAIL_REQ
: SBSDIO_HT_AVAIL_REQ
;
728 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
731 brcmf_err("HT Avail request error: %d\n", err
);
735 /* Check current status */
736 clkctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
737 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
739 brcmf_err("HT Avail read error: %d\n", err
);
743 /* Go to pending and await interrupt if appropriate */
744 if (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
) && pendok
) {
745 /* Allow only clock-available interrupt */
746 devctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
747 SBSDIO_DEVICE_CTL
, &err
);
749 brcmf_err("Devctl error setting CA: %d\n",
754 devctl
|= SBSDIO_DEVCTL_CA_INT_ONLY
;
755 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
757 brcmf_dbg(SDIO
, "CLKCTL: set PENDING\n");
758 bus
->clkstate
= CLK_PENDING
;
761 } else if (bus
->clkstate
== CLK_PENDING
) {
762 /* Cancel CA-only interrupt filter */
763 devctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
764 SBSDIO_DEVICE_CTL
, &err
);
765 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
766 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
770 /* Otherwise, wait here (polling) for HT Avail */
772 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY
/1000);
773 while (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
)) {
774 clkctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
775 SBSDIO_FUNC1_CHIPCLKCSR
,
777 if (time_after(jiffies
, timeout
))
780 usleep_range(5000, 10000);
783 brcmf_err("HT Avail request error: %d\n", err
);
786 if (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
)) {
787 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
788 PMU_MAX_TRANSITION_DLY
, clkctl
);
792 /* Mark clock available */
793 bus
->clkstate
= CLK_AVAIL
;
794 brcmf_dbg(SDIO
, "CLKCTL: turned ON\n");
797 if (!bus
->alp_only
) {
798 if (SBSDIO_ALPONLY(clkctl
))
799 brcmf_err("HT Clock should be on\n");
801 #endif /* defined (DEBUG) */
803 bus
->activity
= true;
807 if (bus
->clkstate
== CLK_PENDING
) {
808 /* Cancel CA-only interrupt filter */
809 devctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
810 SBSDIO_DEVICE_CTL
, &err
);
811 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
812 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
816 bus
->clkstate
= CLK_SDONLY
;
817 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
819 brcmf_dbg(SDIO
, "CLKCTL: turned OFF\n");
821 brcmf_err("Failed access turning clock off: %d\n",
829 /* Change idle/active SD state */
830 static int brcmf_sdio_sdclk(struct brcmf_sdio
*bus
, bool on
)
832 brcmf_dbg(SDIO
, "Enter\n");
835 bus
->clkstate
= CLK_SDONLY
;
837 bus
->clkstate
= CLK_NONE
;
842 /* Transition SD and backplane clock readiness */
843 static int brcmf_sdio_clkctl(struct brcmf_sdio
*bus
, uint target
, bool pendok
)
846 uint oldstate
= bus
->clkstate
;
849 brcmf_dbg(SDIO
, "Enter\n");
851 /* Early exit if we're already there */
852 if (bus
->clkstate
== target
) {
853 if (target
== CLK_AVAIL
) {
854 brcmf_sdio_wd_timer(bus
, BRCMF_WD_POLL_MS
);
855 bus
->activity
= true;
862 /* Make sure SD clock is available */
863 if (bus
->clkstate
== CLK_NONE
)
864 brcmf_sdio_sdclk(bus
, true);
865 /* Now request HT Avail on the backplane */
866 brcmf_sdio_htclk(bus
, true, pendok
);
867 brcmf_sdio_wd_timer(bus
, BRCMF_WD_POLL_MS
);
868 bus
->activity
= true;
872 /* Remove HT request, or bring up SD clock */
873 if (bus
->clkstate
== CLK_NONE
)
874 brcmf_sdio_sdclk(bus
, true);
875 else if (bus
->clkstate
== CLK_AVAIL
)
876 brcmf_sdio_htclk(bus
, false, false);
878 brcmf_err("request for %d -> %d\n",
879 bus
->clkstate
, target
);
880 brcmf_sdio_wd_timer(bus
, BRCMF_WD_POLL_MS
);
884 /* Make sure to remove HT request */
885 if (bus
->clkstate
== CLK_AVAIL
)
886 brcmf_sdio_htclk(bus
, false, false);
887 /* Now remove the SD clock */
888 brcmf_sdio_sdclk(bus
, false);
889 brcmf_sdio_wd_timer(bus
, 0);
893 brcmf_dbg(SDIO
, "%d -> %d\n", oldstate
, bus
->clkstate
);
900 brcmf_sdio_bus_sleep(struct brcmf_sdio
*bus
, bool sleep
, bool pendok
)
903 brcmf_dbg(TRACE
, "Enter\n");
904 brcmf_dbg(SDIO
, "request %s currently %s\n",
905 (sleep
? "SLEEP" : "WAKE"),
906 (bus
->sleeping
? "SLEEP" : "WAKE"));
908 /* If SR is enabled control bus state with KSO */
909 if (bus
->sr_enabled
) {
910 /* Done if we're already in the requested state */
911 if (sleep
== bus
->sleeping
)
916 /* Don't sleep if something is pending */
917 if (atomic_read(&bus
->intstatus
) ||
918 atomic_read(&bus
->ipend
) > 0 ||
919 (!atomic_read(&bus
->fcstate
) &&
920 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) &&
923 err
= brcmf_sdio_kso_control(bus
, false);
924 /* disable watchdog */
926 brcmf_sdio_wd_timer(bus
, 0);
929 err
= brcmf_sdio_kso_control(bus
, true);
933 bus
->sleeping
= sleep
;
934 brcmf_dbg(SDIO
, "new state %s\n",
935 (sleep
? "SLEEP" : "WAKE"));
937 brcmf_err("error while changing bus sleep state %d\n",
946 if (!bus
->sr_enabled
)
947 brcmf_sdio_clkctl(bus
, CLK_NONE
, pendok
);
949 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, pendok
);
956 static u32
brcmf_sdio_hostmail(struct brcmf_sdio
*bus
)
963 brcmf_dbg(SDIO
, "Enter\n");
965 /* Read mailbox data and ack that we did so */
966 ret
= r_sdreg32(bus
, &hmb_data
,
967 offsetof(struct sdpcmd_regs
, tohostmailboxdata
));
970 w_sdreg32(bus
, SMB_INT_ACK
,
971 offsetof(struct sdpcmd_regs
, tosbmailbox
));
972 bus
->sdcnt
.f1regdata
+= 2;
974 /* Dongle recomposed rx frames, accept them again */
975 if (hmb_data
& HMB_DATA_NAKHANDLED
) {
976 brcmf_dbg(SDIO
, "Dongle reports NAK handled, expect rtx of %d\n",
979 brcmf_err("unexpected NAKHANDLED!\n");
982 intstatus
|= I_HMB_FRAME_IND
;
986 * DEVREADY does not occur with gSPI.
988 if (hmb_data
& (HMB_DATA_DEVREADY
| HMB_DATA_FWREADY
)) {
990 (hmb_data
& HMB_DATA_VERSION_MASK
) >>
991 HMB_DATA_VERSION_SHIFT
;
992 if (bus
->sdpcm_ver
!= SDPCM_PROT_VERSION
)
993 brcmf_err("Version mismatch, dongle reports %d, "
995 bus
->sdpcm_ver
, SDPCM_PROT_VERSION
);
997 brcmf_dbg(SDIO
, "Dongle ready, protocol version %d\n",
1002 * Flow Control has been moved into the RX headers and this out of band
1003 * method isn't used any more.
1004 * remaining backward compatible with older dongles.
1006 if (hmb_data
& HMB_DATA_FC
) {
1007 fcbits
= (hmb_data
& HMB_DATA_FCDATA_MASK
) >>
1008 HMB_DATA_FCDATA_SHIFT
;
1010 if (fcbits
& ~bus
->flowcontrol
)
1011 bus
->sdcnt
.fc_xoff
++;
1013 if (bus
->flowcontrol
& ~fcbits
)
1014 bus
->sdcnt
.fc_xon
++;
1016 bus
->sdcnt
.fc_rcvd
++;
1017 bus
->flowcontrol
= fcbits
;
1020 /* Shouldn't be any others */
1021 if (hmb_data
& ~(HMB_DATA_DEVREADY
|
1022 HMB_DATA_NAKHANDLED
|
1025 HMB_DATA_FCDATA_MASK
| HMB_DATA_VERSION_MASK
))
1026 brcmf_err("Unknown mailbox data content: 0x%02x\n",
1032 static void brcmf_sdio_rxfail(struct brcmf_sdio
*bus
, bool abort
, bool rtx
)
1039 brcmf_err("%sterminate frame%s\n",
1040 abort
? "abort command, " : "",
1041 rtx
? ", send NAK" : "");
1044 brcmf_sdiod_abort(bus
->sdiodev
, SDIO_FUNC_2
);
1046 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
1048 bus
->sdcnt
.f1regdata
++;
1050 /* Wait until the packet has been flushed (device/FIFO stable) */
1051 for (lastrbc
= retries
= 0xffff; retries
> 0; retries
--) {
1052 hi
= brcmf_sdiod_regrb(bus
->sdiodev
,
1053 SBSDIO_FUNC1_RFRAMEBCHI
, &err
);
1054 lo
= brcmf_sdiod_regrb(bus
->sdiodev
,
1055 SBSDIO_FUNC1_RFRAMEBCLO
, &err
);
1056 bus
->sdcnt
.f1regdata
+= 2;
1058 if ((hi
== 0) && (lo
== 0))
1061 if ((hi
> (lastrbc
>> 8)) && (lo
> (lastrbc
& 0x00ff))) {
1062 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1063 lastrbc
, (hi
<< 8) + lo
);
1065 lastrbc
= (hi
<< 8) + lo
;
1069 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc
);
1071 brcmf_dbg(SDIO
, "flush took %d iterations\n", 0xffff - retries
);
1075 err
= w_sdreg32(bus
, SMB_NAK
,
1076 offsetof(struct sdpcmd_regs
, tosbmailbox
));
1078 bus
->sdcnt
.f1regdata
++;
1083 /* Clear partial in any case */
1084 bus
->cur_read
.len
= 0;
1087 /* return total length of buffer chain */
1088 static uint
brcmf_sdio_glom_len(struct brcmf_sdio
*bus
)
1094 skb_queue_walk(&bus
->glom
, p
)
1099 static void brcmf_sdio_free_glom(struct brcmf_sdio
*bus
)
1101 struct sk_buff
*cur
, *next
;
1103 skb_queue_walk_safe(&bus
->glom
, cur
, next
) {
1104 skb_unlink(cur
, &bus
->glom
);
1105 brcmu_pkt_buf_free_skb(cur
);
1110 * brcmfmac sdio bus specific header
1111 * This is the lowest layer header wrapped on the packets transmitted between
1112 * host and WiFi dongle which contains information needed for SDIO core and
1115 * It consists of 3 parts: hardware header, hardware extension header and
1117 * hardware header (frame tag) - 4 bytes
1118 * Byte 0~1: Frame length
1119 * Byte 2~3: Checksum, bit-wise inverse of frame length
1120 * hardware extension header - 8 bytes
1121 * Tx glom mode only, N/A for Rx or normal Tx
1122 * Byte 0~1: Packet length excluding hw frame tag
1124 * Byte 3: Frame flags, bit 0: last frame indication
1125 * Byte 4~5: Reserved
1126 * Byte 6~7: Tail padding length
1127 * software header - 8 bytes
1128 * Byte 0: Rx/Tx sequence number
1129 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1130 * Byte 2: Length of next data frame, reserved for Tx
1131 * Byte 3: Data offset
1132 * Byte 4: Flow control bits, reserved for Tx
1133 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1134 * Byte 6~7: Reserved
1136 #define SDPCM_HWHDR_LEN 4
1137 #define SDPCM_HWEXT_LEN 8
1138 #define SDPCM_SWHDR_LEN 8
1139 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1140 /* software header */
1141 #define SDPCM_SEQ_MASK 0x000000ff
1142 #define SDPCM_SEQ_WRAP 256
1143 #define SDPCM_CHANNEL_MASK 0x00000f00
1144 #define SDPCM_CHANNEL_SHIFT 8
1145 #define SDPCM_CONTROL_CHANNEL 0 /* Control */
1146 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
1147 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
1148 #define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
1149 #define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
1150 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
1151 #define SDPCM_NEXTLEN_MASK 0x00ff0000
1152 #define SDPCM_NEXTLEN_SHIFT 16
1153 #define SDPCM_DOFFSET_MASK 0xff000000
1154 #define SDPCM_DOFFSET_SHIFT 24
1155 #define SDPCM_FCMASK_MASK 0x000000ff
1156 #define SDPCM_WINDOW_MASK 0x0000ff00
1157 #define SDPCM_WINDOW_SHIFT 8
1159 static inline u8
brcmf_sdio_getdatoffset(u8
*swheader
)
1162 hdrvalue
= *(u32
*)swheader
;
1163 return (u8
)((hdrvalue
& SDPCM_DOFFSET_MASK
) >> SDPCM_DOFFSET_SHIFT
);
1166 static int brcmf_sdio_hdparse(struct brcmf_sdio
*bus
, u8
*header
,
1167 struct brcmf_sdio_hdrinfo
*rd
,
1168 enum brcmf_sdio_frmtype type
)
1171 u8 rx_seq
, fc
, tx_seq_max
;
1174 trace_brcmf_sdpcm_hdr(SDPCM_RX
, header
);
1177 len
= get_unaligned_le16(header
);
1178 checksum
= get_unaligned_le16(header
+ sizeof(u16
));
1179 /* All zero means no more to read */
1180 if (!(len
| checksum
)) {
1181 bus
->rxpending
= false;
1184 if ((u16
)(~(len
^ checksum
))) {
1185 brcmf_err("HW header checksum error\n");
1186 bus
->sdcnt
.rx_badhdr
++;
1187 brcmf_sdio_rxfail(bus
, false, false);
1190 if (len
< SDPCM_HDRLEN
) {
1191 brcmf_err("HW header length error\n");
1194 if (type
== BRCMF_SDIO_FT_SUPER
&&
1195 (roundup(len
, bus
->blocksize
) != rd
->len
)) {
1196 brcmf_err("HW superframe header length error\n");
1199 if (type
== BRCMF_SDIO_FT_SUB
&& len
> rd
->len
) {
1200 brcmf_err("HW subframe header length error\n");
1205 /* software header */
1206 header
+= SDPCM_HWHDR_LEN
;
1207 swheader
= le32_to_cpu(*(__le32
*)header
);
1208 if (type
== BRCMF_SDIO_FT_SUPER
&& SDPCM_GLOMDESC(header
)) {
1209 brcmf_err("Glom descriptor found in superframe head\n");
1213 rx_seq
= (u8
)(swheader
& SDPCM_SEQ_MASK
);
1214 rd
->channel
= (swheader
& SDPCM_CHANNEL_MASK
) >> SDPCM_CHANNEL_SHIFT
;
1215 if (len
> MAX_RX_DATASZ
&& rd
->channel
!= SDPCM_CONTROL_CHANNEL
&&
1216 type
!= BRCMF_SDIO_FT_SUPER
) {
1217 brcmf_err("HW header length too long\n");
1218 bus
->sdcnt
.rx_toolong
++;
1219 brcmf_sdio_rxfail(bus
, false, false);
1223 if (type
== BRCMF_SDIO_FT_SUPER
&& rd
->channel
!= SDPCM_GLOM_CHANNEL
) {
1224 brcmf_err("Wrong channel for superframe\n");
1228 if (type
== BRCMF_SDIO_FT_SUB
&& rd
->channel
!= SDPCM_DATA_CHANNEL
&&
1229 rd
->channel
!= SDPCM_EVENT_CHANNEL
) {
1230 brcmf_err("Wrong channel for subframe\n");
1234 rd
->dat_offset
= brcmf_sdio_getdatoffset(header
);
1235 if (rd
->dat_offset
< SDPCM_HDRLEN
|| rd
->dat_offset
> rd
->len
) {
1236 brcmf_err("seq %d: bad data offset\n", rx_seq
);
1237 bus
->sdcnt
.rx_badhdr
++;
1238 brcmf_sdio_rxfail(bus
, false, false);
1242 if (rd
->seq_num
!= rx_seq
) {
1243 brcmf_err("seq %d: sequence number error, expect %d\n",
1244 rx_seq
, rd
->seq_num
);
1245 bus
->sdcnt
.rx_badseq
++;
1246 rd
->seq_num
= rx_seq
;
1248 /* no need to check the reset for subframe */
1249 if (type
== BRCMF_SDIO_FT_SUB
)
1251 rd
->len_nxtfrm
= (swheader
& SDPCM_NEXTLEN_MASK
) >> SDPCM_NEXTLEN_SHIFT
;
1252 if (rd
->len_nxtfrm
<< 4 > MAX_RX_DATASZ
) {
1253 /* only warm for NON glom packet */
1254 if (rd
->channel
!= SDPCM_GLOM_CHANNEL
)
1255 brcmf_err("seq %d: next length error\n", rx_seq
);
1258 swheader
= le32_to_cpu(*(__le32
*)(header
+ 4));
1259 fc
= swheader
& SDPCM_FCMASK_MASK
;
1260 if (bus
->flowcontrol
!= fc
) {
1261 if (~bus
->flowcontrol
& fc
)
1262 bus
->sdcnt
.fc_xoff
++;
1263 if (bus
->flowcontrol
& ~fc
)
1264 bus
->sdcnt
.fc_xon
++;
1265 bus
->sdcnt
.fc_rcvd
++;
1266 bus
->flowcontrol
= fc
;
1268 tx_seq_max
= (swheader
& SDPCM_WINDOW_MASK
) >> SDPCM_WINDOW_SHIFT
;
1269 if ((u8
)(tx_seq_max
- bus
->tx_seq
) > 0x40) {
1270 brcmf_err("seq %d: max tx seq number error\n", rx_seq
);
1271 tx_seq_max
= bus
->tx_seq
+ 2;
1273 bus
->tx_max
= tx_seq_max
;
1278 static inline void brcmf_sdio_update_hwhdr(u8
*header
, u16 frm_length
)
1280 *(__le16
*)header
= cpu_to_le16(frm_length
);
1281 *(((__le16
*)header
) + 1) = cpu_to_le16(~frm_length
);
1284 static void brcmf_sdio_hdpack(struct brcmf_sdio
*bus
, u8
*header
,
1285 struct brcmf_sdio_hdrinfo
*hd_info
)
1290 brcmf_sdio_update_hwhdr(header
, hd_info
->len
);
1291 hdr_offset
= SDPCM_HWHDR_LEN
;
1294 hdrval
= (hd_info
->len
- hdr_offset
) | (hd_info
->lastfrm
<< 24);
1295 *((__le32
*)(header
+ hdr_offset
)) = cpu_to_le32(hdrval
);
1296 hdrval
= (u16
)hd_info
->tail_pad
<< 16;
1297 *(((__le32
*)(header
+ hdr_offset
)) + 1) = cpu_to_le32(hdrval
);
1298 hdr_offset
+= SDPCM_HWEXT_LEN
;
1301 hdrval
= hd_info
->seq_num
;
1302 hdrval
|= (hd_info
->channel
<< SDPCM_CHANNEL_SHIFT
) &
1304 hdrval
|= (hd_info
->dat_offset
<< SDPCM_DOFFSET_SHIFT
) &
1306 *((__le32
*)(header
+ hdr_offset
)) = cpu_to_le32(hdrval
);
1307 *(((__le32
*)(header
+ hdr_offset
)) + 1) = 0;
1308 trace_brcmf_sdpcm_hdr(SDPCM_TX
+ !!(bus
->txglom
), header
);
1311 static u8
brcmf_sdio_rxglom(struct brcmf_sdio
*bus
, u8 rxseq
)
1316 struct sk_buff
*pfirst
, *pnext
;
1321 struct brcmf_sdio_hdrinfo rd_new
;
1323 /* If packets, issue read(s) and send up packet chain */
1324 /* Return sequence numbers consumed? */
1326 brcmf_dbg(SDIO
, "start: glomd %p glom %p\n",
1327 bus
->glomd
, skb_peek(&bus
->glom
));
1329 /* If there's a descriptor, generate the packet chain */
1331 pfirst
= pnext
= NULL
;
1332 dlen
= (u16
) (bus
->glomd
->len
);
1333 dptr
= bus
->glomd
->data
;
1334 if (!dlen
|| (dlen
& 1)) {
1335 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1340 for (totlen
= num
= 0; dlen
; num
++) {
1341 /* Get (and move past) next length */
1342 sublen
= get_unaligned_le16(dptr
);
1343 dlen
-= sizeof(u16
);
1344 dptr
+= sizeof(u16
);
1345 if ((sublen
< SDPCM_HDRLEN
) ||
1346 ((num
== 0) && (sublen
< (2 * SDPCM_HDRLEN
)))) {
1347 brcmf_err("descriptor len %d bad: %d\n",
1352 if (sublen
% bus
->sgentry_align
) {
1353 brcmf_err("sublen %d not multiple of %d\n",
1354 sublen
, bus
->sgentry_align
);
1358 /* For last frame, adjust read len so total
1359 is a block multiple */
1362 (roundup(totlen
, bus
->blocksize
) - totlen
);
1363 totlen
= roundup(totlen
, bus
->blocksize
);
1366 /* Allocate/chain packet for next subframe */
1367 pnext
= brcmu_pkt_buf_get_skb(sublen
+ bus
->sgentry_align
);
1368 if (pnext
== NULL
) {
1369 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1373 skb_queue_tail(&bus
->glom
, pnext
);
1375 /* Adhere to start alignment requirements */
1376 pkt_align(pnext
, sublen
, bus
->sgentry_align
);
1379 /* If all allocations succeeded, save packet chain
1382 brcmf_dbg(GLOM
, "allocated %d-byte packet chain for %d subframes\n",
1384 if (BRCMF_GLOM_ON() && bus
->cur_read
.len
&&
1385 totlen
!= bus
->cur_read
.len
) {
1386 brcmf_dbg(GLOM
, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1387 bus
->cur_read
.len
, totlen
, rxseq
);
1389 pfirst
= pnext
= NULL
;
1391 brcmf_sdio_free_glom(bus
);
1395 /* Done with descriptor packet */
1396 brcmu_pkt_buf_free_skb(bus
->glomd
);
1398 bus
->cur_read
.len
= 0;
1401 /* Ok -- either we just generated a packet chain,
1402 or had one from before */
1403 if (!skb_queue_empty(&bus
->glom
)) {
1404 if (BRCMF_GLOM_ON()) {
1405 brcmf_dbg(GLOM
, "try superframe read, packet chain:\n");
1406 skb_queue_walk(&bus
->glom
, pnext
) {
1407 brcmf_dbg(GLOM
, " %p: %p len 0x%04x (%d)\n",
1408 pnext
, (u8
*) (pnext
->data
),
1409 pnext
->len
, pnext
->len
);
1413 pfirst
= skb_peek(&bus
->glom
);
1414 dlen
= (u16
) brcmf_sdio_glom_len(bus
);
1416 /* Do an SDIO read for the superframe. Configurable iovar to
1417 * read directly into the chained packet, or allocate a large
1418 * packet and and copy into the chain.
1420 sdio_claim_host(bus
->sdiodev
->func
[1]);
1421 errcode
= brcmf_sdiod_recv_chain(bus
->sdiodev
,
1423 sdio_release_host(bus
->sdiodev
->func
[1]);
1424 bus
->sdcnt
.f2rxdata
++;
1426 /* On failure, kill the superframe, allow a couple retries */
1428 brcmf_err("glom read of %d bytes failed: %d\n",
1431 sdio_claim_host(bus
->sdiodev
->func
[1]);
1432 if (bus
->glomerr
++ < 3) {
1433 brcmf_sdio_rxfail(bus
, true, true);
1436 brcmf_sdio_rxfail(bus
, true, false);
1437 bus
->sdcnt
.rxglomfail
++;
1438 brcmf_sdio_free_glom(bus
);
1440 sdio_release_host(bus
->sdiodev
->func
[1]);
1444 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1445 pfirst
->data
, min_t(int, pfirst
->len
, 48),
1448 rd_new
.seq_num
= rxseq
;
1450 sdio_claim_host(bus
->sdiodev
->func
[1]);
1451 errcode
= brcmf_sdio_hdparse(bus
, pfirst
->data
, &rd_new
,
1452 BRCMF_SDIO_FT_SUPER
);
1453 sdio_release_host(bus
->sdiodev
->func
[1]);
1454 bus
->cur_read
.len
= rd_new
.len_nxtfrm
<< 4;
1456 /* Remove superframe header, remember offset */
1457 skb_pull(pfirst
, rd_new
.dat_offset
);
1458 sfdoff
= rd_new
.dat_offset
;
1461 /* Validate all the subframe headers */
1462 skb_queue_walk(&bus
->glom
, pnext
) {
1463 /* leave when invalid subframe is found */
1467 rd_new
.len
= pnext
->len
;
1468 rd_new
.seq_num
= rxseq
++;
1469 sdio_claim_host(bus
->sdiodev
->func
[1]);
1470 errcode
= brcmf_sdio_hdparse(bus
, pnext
->data
, &rd_new
,
1472 sdio_release_host(bus
->sdiodev
->func
[1]);
1473 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1474 pnext
->data
, 32, "subframe:\n");
1480 /* Terminate frame on error, request
1482 sdio_claim_host(bus
->sdiodev
->func
[1]);
1483 if (bus
->glomerr
++ < 3) {
1484 /* Restore superframe header space */
1485 skb_push(pfirst
, sfdoff
);
1486 brcmf_sdio_rxfail(bus
, true, true);
1489 brcmf_sdio_rxfail(bus
, true, false);
1490 bus
->sdcnt
.rxglomfail
++;
1491 brcmf_sdio_free_glom(bus
);
1493 sdio_release_host(bus
->sdiodev
->func
[1]);
1494 bus
->cur_read
.len
= 0;
1498 /* Basic SD framing looks ok - process each packet (header) */
1500 skb_queue_walk_safe(&bus
->glom
, pfirst
, pnext
) {
1501 dptr
= (u8
*) (pfirst
->data
);
1502 sublen
= get_unaligned_le16(dptr
);
1503 doff
= brcmf_sdio_getdatoffset(&dptr
[SDPCM_HWHDR_LEN
]);
1505 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1507 "Rx Subframe Data:\n");
1509 __skb_trim(pfirst
, sublen
);
1510 skb_pull(pfirst
, doff
);
1512 if (pfirst
->len
== 0) {
1513 skb_unlink(pfirst
, &bus
->glom
);
1514 brcmu_pkt_buf_free_skb(pfirst
);
1518 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1520 min_t(int, pfirst
->len
, 32),
1521 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1522 bus
->glom
.qlen
, pfirst
, pfirst
->data
,
1523 pfirst
->len
, pfirst
->next
,
1525 skb_unlink(pfirst
, &bus
->glom
);
1526 brcmf_rx_frame(bus
->sdiodev
->dev
, pfirst
);
1527 bus
->sdcnt
.rxglompkts
++;
1530 bus
->sdcnt
.rxglomframes
++;
1535 static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio
*bus
, uint
*condition
,
1538 DECLARE_WAITQUEUE(wait
, current
);
1539 int timeout
= msecs_to_jiffies(DCMD_RESP_TIMEOUT
);
1541 /* Wait until control frame is available */
1542 add_wait_queue(&bus
->dcmd_resp_wait
, &wait
);
1543 set_current_state(TASK_INTERRUPTIBLE
);
1545 while (!(*condition
) && (!signal_pending(current
) && timeout
))
1546 timeout
= schedule_timeout(timeout
);
1548 if (signal_pending(current
))
1551 set_current_state(TASK_RUNNING
);
1552 remove_wait_queue(&bus
->dcmd_resp_wait
, &wait
);
1557 static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio
*bus
)
1559 if (waitqueue_active(&bus
->dcmd_resp_wait
))
1560 wake_up_interruptible(&bus
->dcmd_resp_wait
);
1565 brcmf_sdio_read_control(struct brcmf_sdio
*bus
, u8
*hdr
, uint len
, uint doff
)
1568 u8
*buf
= NULL
, *rbuf
;
1571 brcmf_dbg(TRACE
, "Enter\n");
1574 buf
= vzalloc(bus
->rxblen
);
1579 pad
= ((unsigned long)rbuf
% bus
->head_align
);
1581 rbuf
+= (bus
->head_align
- pad
);
1583 /* Copy the already-read portion over */
1584 memcpy(buf
, hdr
, BRCMF_FIRSTREAD
);
1585 if (len
<= BRCMF_FIRSTREAD
)
1588 /* Raise rdlen to next SDIO block to avoid tail command */
1589 rdlen
= len
- BRCMF_FIRSTREAD
;
1590 if (bus
->roundup
&& bus
->blocksize
&& (rdlen
> bus
->blocksize
)) {
1591 pad
= bus
->blocksize
- (rdlen
% bus
->blocksize
);
1592 if ((pad
<= bus
->roundup
) && (pad
< bus
->blocksize
) &&
1593 ((len
+ pad
) < bus
->sdiodev
->bus_if
->maxctl
))
1595 } else if (rdlen
% bus
->head_align
) {
1596 rdlen
+= bus
->head_align
- (rdlen
% bus
->head_align
);
1599 /* Drop if the read is too big or it exceeds our maximum */
1600 if ((rdlen
+ BRCMF_FIRSTREAD
) > bus
->sdiodev
->bus_if
->maxctl
) {
1601 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1602 rdlen
, bus
->sdiodev
->bus_if
->maxctl
);
1603 brcmf_sdio_rxfail(bus
, false, false);
1607 if ((len
- doff
) > bus
->sdiodev
->bus_if
->maxctl
) {
1608 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1609 len
, len
- doff
, bus
->sdiodev
->bus_if
->maxctl
);
1610 bus
->sdcnt
.rx_toolong
++;
1611 brcmf_sdio_rxfail(bus
, false, false);
1615 /* Read remain of frame body */
1616 sdret
= brcmf_sdiod_recv_buf(bus
->sdiodev
, rbuf
, rdlen
);
1617 bus
->sdcnt
.f2rxdata
++;
1619 /* Control frame failures need retransmission */
1621 brcmf_err("read %d control bytes failed: %d\n",
1623 bus
->sdcnt
.rxc_errors
++;
1624 brcmf_sdio_rxfail(bus
, true, true);
1627 memcpy(buf
+ BRCMF_FIRSTREAD
, rbuf
, rdlen
);
1631 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1632 buf
, len
, "RxCtrl:\n");
1634 /* Point to valid data and indicate its length */
1635 spin_lock_bh(&bus
->rxctl_lock
);
1637 brcmf_err("last control frame is being processed.\n");
1638 spin_unlock_bh(&bus
->rxctl_lock
);
1642 bus
->rxctl
= buf
+ doff
;
1643 bus
->rxctl_orig
= buf
;
1644 bus
->rxlen
= len
- doff
;
1645 spin_unlock_bh(&bus
->rxctl_lock
);
1648 /* Awake any waiters */
1649 brcmf_sdio_dcmd_resp_wake(bus
);
1652 /* Pad read to blocksize for efficiency */
1653 static void brcmf_sdio_pad(struct brcmf_sdio
*bus
, u16
*pad
, u16
*rdlen
)
1655 if (bus
->roundup
&& bus
->blocksize
&& *rdlen
> bus
->blocksize
) {
1656 *pad
= bus
->blocksize
- (*rdlen
% bus
->blocksize
);
1657 if (*pad
<= bus
->roundup
&& *pad
< bus
->blocksize
&&
1658 *rdlen
+ *pad
+ BRCMF_FIRSTREAD
< MAX_RX_DATASZ
)
1660 } else if (*rdlen
% bus
->head_align
) {
1661 *rdlen
+= bus
->head_align
- (*rdlen
% bus
->head_align
);
1665 static uint
brcmf_sdio_readframes(struct brcmf_sdio
*bus
, uint maxframes
)
1667 struct sk_buff
*pkt
; /* Packet for event or data frames */
1668 u16 pad
; /* Number of pad bytes to read */
1669 uint rxleft
= 0; /* Remaining number of frames allowed */
1670 int ret
; /* Return code from calls */
1671 uint rxcount
= 0; /* Total frames read */
1672 struct brcmf_sdio_hdrinfo
*rd
= &bus
->cur_read
, rd_new
;
1675 brcmf_dbg(TRACE
, "Enter\n");
1677 /* Not finished unless we encounter no more frames indication */
1678 bus
->rxpending
= true;
1680 for (rd
->seq_num
= bus
->rx_seq
, rxleft
= maxframes
;
1681 !bus
->rxskip
&& rxleft
&& brcmf_bus_ready(bus
->sdiodev
->bus_if
);
1682 rd
->seq_num
++, rxleft
--) {
1684 /* Handle glomming separately */
1685 if (bus
->glomd
|| !skb_queue_empty(&bus
->glom
)) {
1687 brcmf_dbg(GLOM
, "calling rxglom: glomd %p, glom %p\n",
1688 bus
->glomd
, skb_peek(&bus
->glom
));
1689 cnt
= brcmf_sdio_rxglom(bus
, rd
->seq_num
);
1690 brcmf_dbg(GLOM
, "rxglom returned %d\n", cnt
);
1691 rd
->seq_num
+= cnt
- 1;
1692 rxleft
= (rxleft
> cnt
) ? (rxleft
- cnt
) : 1;
1696 rd
->len_left
= rd
->len
;
1697 /* read header first for unknow frame length */
1698 sdio_claim_host(bus
->sdiodev
->func
[1]);
1700 ret
= brcmf_sdiod_recv_buf(bus
->sdiodev
,
1701 bus
->rxhdr
, BRCMF_FIRSTREAD
);
1702 bus
->sdcnt
.f2rxhdrs
++;
1704 brcmf_err("RXHEADER FAILED: %d\n",
1706 bus
->sdcnt
.rx_hdrfail
++;
1707 brcmf_sdio_rxfail(bus
, true, true);
1708 sdio_release_host(bus
->sdiodev
->func
[1]);
1712 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1713 bus
->rxhdr
, SDPCM_HDRLEN
,
1716 if (brcmf_sdio_hdparse(bus
, bus
->rxhdr
, rd
,
1717 BRCMF_SDIO_FT_NORMAL
)) {
1718 sdio_release_host(bus
->sdiodev
->func
[1]);
1719 if (!bus
->rxpending
)
1725 if (rd
->channel
== SDPCM_CONTROL_CHANNEL
) {
1726 brcmf_sdio_read_control(bus
, bus
->rxhdr
,
1729 /* prepare the descriptor for the next read */
1730 rd
->len
= rd
->len_nxtfrm
<< 4;
1732 /* treat all packet as event if we don't know */
1733 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1734 sdio_release_host(bus
->sdiodev
->func
[1]);
1737 rd
->len_left
= rd
->len
> BRCMF_FIRSTREAD
?
1738 rd
->len
- BRCMF_FIRSTREAD
: 0;
1739 head_read
= BRCMF_FIRSTREAD
;
1742 brcmf_sdio_pad(bus
, &pad
, &rd
->len_left
);
1744 pkt
= brcmu_pkt_buf_get_skb(rd
->len_left
+ head_read
+
1747 /* Give up on data, request rtx of events */
1748 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1749 brcmf_sdio_rxfail(bus
, false,
1750 RETRYCHAN(rd
->channel
));
1751 sdio_release_host(bus
->sdiodev
->func
[1]);
1754 skb_pull(pkt
, head_read
);
1755 pkt_align(pkt
, rd
->len_left
, bus
->head_align
);
1757 ret
= brcmf_sdiod_recv_pkt(bus
->sdiodev
, pkt
);
1758 bus
->sdcnt
.f2rxdata
++;
1759 sdio_release_host(bus
->sdiodev
->func
[1]);
1762 brcmf_err("read %d bytes from channel %d failed: %d\n",
1763 rd
->len
, rd
->channel
, ret
);
1764 brcmu_pkt_buf_free_skb(pkt
);
1765 sdio_claim_host(bus
->sdiodev
->func
[1]);
1766 brcmf_sdio_rxfail(bus
, true,
1767 RETRYCHAN(rd
->channel
));
1768 sdio_release_host(bus
->sdiodev
->func
[1]);
1773 skb_push(pkt
, head_read
);
1774 memcpy(pkt
->data
, bus
->rxhdr
, head_read
);
1777 memcpy(bus
->rxhdr
, pkt
->data
, SDPCM_HDRLEN
);
1778 rd_new
.seq_num
= rd
->seq_num
;
1779 sdio_claim_host(bus
->sdiodev
->func
[1]);
1780 if (brcmf_sdio_hdparse(bus
, bus
->rxhdr
, &rd_new
,
1781 BRCMF_SDIO_FT_NORMAL
)) {
1783 brcmu_pkt_buf_free_skb(pkt
);
1785 bus
->sdcnt
.rx_readahead_cnt
++;
1786 if (rd
->len
!= roundup(rd_new
.len
, 16)) {
1787 brcmf_err("frame length mismatch:read %d, should be %d\n",
1789 roundup(rd_new
.len
, 16) >> 4);
1791 brcmf_sdio_rxfail(bus
, true, true);
1792 sdio_release_host(bus
->sdiodev
->func
[1]);
1793 brcmu_pkt_buf_free_skb(pkt
);
1796 sdio_release_host(bus
->sdiodev
->func
[1]);
1797 rd
->len_nxtfrm
= rd_new
.len_nxtfrm
;
1798 rd
->channel
= rd_new
.channel
;
1799 rd
->dat_offset
= rd_new
.dat_offset
;
1801 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1804 bus
->rxhdr
, SDPCM_HDRLEN
,
1807 if (rd_new
.channel
== SDPCM_CONTROL_CHANNEL
) {
1808 brcmf_err("readahead on control packet %d?\n",
1810 /* Force retry w/normal header read */
1812 sdio_claim_host(bus
->sdiodev
->func
[1]);
1813 brcmf_sdio_rxfail(bus
, false, true);
1814 sdio_release_host(bus
->sdiodev
->func
[1]);
1815 brcmu_pkt_buf_free_skb(pkt
);
1820 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1821 pkt
->data
, rd
->len
, "Rx Data:\n");
1823 /* Save superframe descriptor and allocate packet frame */
1824 if (rd
->channel
== SDPCM_GLOM_CHANNEL
) {
1825 if (SDPCM_GLOMDESC(&bus
->rxhdr
[SDPCM_HWHDR_LEN
])) {
1826 brcmf_dbg(GLOM
, "glom descriptor, %d bytes:\n",
1828 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1831 __skb_trim(pkt
, rd
->len
);
1832 skb_pull(pkt
, SDPCM_HDRLEN
);
1835 brcmf_err("%s: glom superframe w/o "
1836 "descriptor!\n", __func__
);
1837 sdio_claim_host(bus
->sdiodev
->func
[1]);
1838 brcmf_sdio_rxfail(bus
, false, false);
1839 sdio_release_host(bus
->sdiodev
->func
[1]);
1841 /* prepare the descriptor for the next read */
1842 rd
->len
= rd
->len_nxtfrm
<< 4;
1844 /* treat all packet as event if we don't know */
1845 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1849 /* Fill in packet len and prio, deliver upward */
1850 __skb_trim(pkt
, rd
->len
);
1851 skb_pull(pkt
, rd
->dat_offset
);
1853 /* prepare the descriptor for the next read */
1854 rd
->len
= rd
->len_nxtfrm
<< 4;
1856 /* treat all packet as event if we don't know */
1857 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1859 if (pkt
->len
== 0) {
1860 brcmu_pkt_buf_free_skb(pkt
);
1864 brcmf_rx_frame(bus
->sdiodev
->dev
, pkt
);
1867 rxcount
= maxframes
- rxleft
;
1868 /* Message if we hit the limit */
1870 brcmf_dbg(DATA
, "hit rx limit of %d frames\n", maxframes
);
1872 brcmf_dbg(DATA
, "processed %d frames\n", rxcount
);
1873 /* Back off rxseq if awaiting rtx, update rx_seq */
1876 bus
->rx_seq
= rd
->seq_num
;
1882 brcmf_sdio_wait_event_wakeup(struct brcmf_sdio
*bus
)
1884 if (waitqueue_active(&bus
->ctrl_wait
))
1885 wake_up_interruptible(&bus
->ctrl_wait
);
1889 static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio
*bus
, struct sk_buff
*pkt
)
1894 dat_buf
= (u8
*)(pkt
->data
);
1896 /* Check head padding */
1897 head_pad
= ((unsigned long)dat_buf
% bus
->head_align
);
1899 if (skb_headroom(pkt
) < head_pad
) {
1900 bus
->sdiodev
->bus_if
->tx_realloc
++;
1902 if (skb_cow(pkt
, head_pad
))
1905 skb_push(pkt
, head_pad
);
1906 dat_buf
= (u8
*)(pkt
->data
);
1907 memset(dat_buf
, 0, head_pad
+ bus
->tx_hdrlen
);
1913 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
1916 /* flag marking a dummy skb added for DMA alignment requirement */
1917 #define ALIGN_SKB_FLAG 0x8000
1918 /* bit mask of data length chopped from the previous packet */
1919 #define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
1921 static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio
*bus
,
1922 struct sk_buff_head
*pktq
,
1923 struct sk_buff
*pkt
, u16 total_len
)
1925 struct brcmf_sdio_dev
*sdiodev
;
1926 struct sk_buff
*pkt_pad
;
1927 u16 tail_pad
, tail_chop
, chain_pad
;
1928 unsigned int blksize
;
1932 sdiodev
= bus
->sdiodev
;
1933 blksize
= sdiodev
->func
[SDIO_FUNC_2
]->cur_blksize
;
1934 /* sg entry alignment should be a divisor of block size */
1935 WARN_ON(blksize
% bus
->sgentry_align
);
1937 /* Check tail padding */
1938 lastfrm
= skb_queue_is_last(pktq
, pkt
);
1940 tail_chop
= pkt
->len
% bus
->sgentry_align
;
1942 tail_pad
= bus
->sgentry_align
- tail_chop
;
1943 chain_pad
= (total_len
+ tail_pad
) % blksize
;
1944 if (lastfrm
&& chain_pad
)
1945 tail_pad
+= blksize
- chain_pad
;
1946 if (skb_tailroom(pkt
) < tail_pad
&& pkt
->len
> blksize
) {
1947 pkt_pad
= bus
->txglom_sgpad
;
1948 if (pkt_pad
== NULL
)
1949 brcmu_pkt_buf_get_skb(tail_pad
+ tail_chop
);
1950 if (pkt_pad
== NULL
)
1952 ret
= brcmf_sdio_txpkt_hdalign(bus
, pkt_pad
);
1953 if (unlikely(ret
< 0))
1955 memcpy(pkt_pad
->data
,
1956 pkt
->data
+ pkt
->len
- tail_chop
,
1958 *(u32
*)(pkt_pad
->cb
) = ALIGN_SKB_FLAG
+ tail_chop
;
1959 skb_trim(pkt
, pkt
->len
- tail_chop
);
1960 __skb_queue_after(pktq
, pkt
, pkt_pad
);
1962 ntail
= pkt
->data_len
+ tail_pad
-
1963 (pkt
->end
- pkt
->tail
);
1964 if (skb_cloned(pkt
) || ntail
> 0)
1965 if (pskb_expand_head(pkt
, 0, ntail
, GFP_ATOMIC
))
1967 if (skb_linearize(pkt
))
1969 __skb_put(pkt
, tail_pad
);
1976 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1977 * @bus: brcmf_sdio structure pointer
1978 * @pktq: packet list pointer
1979 * @chan: virtual channel to transmit the packet
1981 * Processes to be applied to the packet
1982 * - Align data buffer pointer
1983 * - Align data buffer length
1985 * Return: negative value if there is error
1988 brcmf_sdio_txpkt_prep(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
,
1991 u16 head_pad
, total_len
;
1992 struct sk_buff
*pkt_next
;
1995 struct brcmf_sdio_hdrinfo hd_info
= {0};
1997 txseq
= bus
->tx_seq
;
1999 skb_queue_walk(pktq
, pkt_next
) {
2000 /* alignment packet inserted in previous
2001 * loop cycle can be skipped as it is
2002 * already properly aligned and does not
2003 * need an sdpcm header.
2005 if (*(u32
*)(pkt_next
->cb
) & ALIGN_SKB_FLAG
)
2008 /* align packet data pointer */
2009 ret
= brcmf_sdio_txpkt_hdalign(bus
, pkt_next
);
2012 head_pad
= (u16
)ret
;
2014 memset(pkt_next
->data
, 0, head_pad
+ bus
->tx_hdrlen
);
2016 total_len
+= pkt_next
->len
;
2018 hd_info
.len
= pkt_next
->len
;
2019 hd_info
.lastfrm
= skb_queue_is_last(pktq
, pkt_next
);
2020 if (bus
->txglom
&& pktq
->qlen
> 1) {
2021 ret
= brcmf_sdio_txpkt_prep_sg(bus
, pktq
,
2022 pkt_next
, total_len
);
2025 hd_info
.tail_pad
= (u16
)ret
;
2026 total_len
+= (u16
)ret
;
2029 hd_info
.channel
= chan
;
2030 hd_info
.dat_offset
= head_pad
+ bus
->tx_hdrlen
;
2031 hd_info
.seq_num
= txseq
++;
2033 /* Now fill the header */
2034 brcmf_sdio_hdpack(bus
, pkt_next
->data
, &hd_info
);
2036 if (BRCMF_BYTES_ON() &&
2037 ((BRCMF_CTL_ON() && chan
== SDPCM_CONTROL_CHANNEL
) ||
2038 (BRCMF_DATA_ON() && chan
!= SDPCM_CONTROL_CHANNEL
)))
2039 brcmf_dbg_hex_dump(true, pkt_next
, hd_info
.len
,
2041 else if (BRCMF_HDRS_ON())
2042 brcmf_dbg_hex_dump(true, pkt_next
,
2043 head_pad
+ bus
->tx_hdrlen
,
2046 /* Hardware length tag of the first packet should be total
2047 * length of the chain (including padding)
2050 brcmf_sdio_update_hwhdr(pktq
->next
->data
, total_len
);
2055 * brcmf_sdio_txpkt_postp - packet post processing for transmit
2056 * @bus: brcmf_sdio structure pointer
2057 * @pktq: packet list pointer
2059 * Processes to be applied to the packet
2060 * - Remove head padding
2061 * - Remove tail padding
2064 brcmf_sdio_txpkt_postp(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
)
2069 u32 dummy_flags
, chop_len
;
2070 struct sk_buff
*pkt_next
, *tmp
, *pkt_prev
;
2072 skb_queue_walk_safe(pktq
, pkt_next
, tmp
) {
2073 dummy_flags
= *(u32
*)(pkt_next
->cb
);
2074 if (dummy_flags
& ALIGN_SKB_FLAG
) {
2075 chop_len
= dummy_flags
& ALIGN_SKB_CHOP_LEN_MASK
;
2077 pkt_prev
= pkt_next
->prev
;
2078 skb_put(pkt_prev
, chop_len
);
2080 __skb_unlink(pkt_next
, pktq
);
2081 brcmu_pkt_buf_free_skb(pkt_next
);
2083 hdr
= pkt_next
->data
+ bus
->tx_hdrlen
- SDPCM_SWHDR_LEN
;
2084 dat_offset
= le32_to_cpu(*(__le32
*)hdr
);
2085 dat_offset
= (dat_offset
& SDPCM_DOFFSET_MASK
) >>
2086 SDPCM_DOFFSET_SHIFT
;
2087 skb_pull(pkt_next
, dat_offset
);
2089 tail_pad
= le16_to_cpu(*(__le16
*)(hdr
- 2));
2090 skb_trim(pkt_next
, pkt_next
->len
- tail_pad
);
2096 /* Writes a HW/SW header into the packet and sends it. */
2097 /* Assumes: (a) header space already there, (b) caller holds lock */
2098 static int brcmf_sdio_txpkt(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
,
2103 struct sk_buff
*pkt_next
, *tmp
;
2105 brcmf_dbg(TRACE
, "Enter\n");
2107 ret
= brcmf_sdio_txpkt_prep(bus
, pktq
, chan
);
2111 sdio_claim_host(bus
->sdiodev
->func
[1]);
2112 ret
= brcmf_sdiod_send_pkt(bus
->sdiodev
, pktq
);
2113 bus
->sdcnt
.f2txdata
++;
2116 /* On failure, abort the command and terminate the frame */
2117 brcmf_dbg(INFO
, "sdio error %d, abort command and terminate frame\n",
2119 bus
->sdcnt
.tx_sderrs
++;
2121 brcmf_sdiod_abort(bus
->sdiodev
, SDIO_FUNC_2
);
2122 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
2124 bus
->sdcnt
.f1regdata
++;
2126 for (i
= 0; i
< 3; i
++) {
2128 hi
= brcmf_sdiod_regrb(bus
->sdiodev
,
2129 SBSDIO_FUNC1_WFRAMEBCHI
, NULL
);
2130 lo
= brcmf_sdiod_regrb(bus
->sdiodev
,
2131 SBSDIO_FUNC1_WFRAMEBCLO
, NULL
);
2132 bus
->sdcnt
.f1regdata
+= 2;
2133 if ((hi
== 0) && (lo
== 0))
2137 sdio_release_host(bus
->sdiodev
->func
[1]);
2140 brcmf_sdio_txpkt_postp(bus
, pktq
);
2142 bus
->tx_seq
= (bus
->tx_seq
+ pktq
->qlen
) % SDPCM_SEQ_WRAP
;
2143 skb_queue_walk_safe(pktq
, pkt_next
, tmp
) {
2144 __skb_unlink(pkt_next
, pktq
);
2145 brcmf_txcomplete(bus
->sdiodev
->dev
, pkt_next
, ret
== 0);
2150 static uint
brcmf_sdio_sendfromq(struct brcmf_sdio
*bus
, uint maxframes
)
2152 struct sk_buff
*pkt
;
2153 struct sk_buff_head pktq
;
2155 int ret
= 0, prec_out
, i
;
2157 u8 tx_prec_map
, pkt_num
;
2159 brcmf_dbg(TRACE
, "Enter\n");
2161 tx_prec_map
= ~bus
->flowcontrol
;
2163 /* Send frames until the limit or some other event */
2164 for (cnt
= 0; (cnt
< maxframes
) && data_ok(bus
);) {
2166 __skb_queue_head_init(&pktq
);
2168 pkt_num
= min_t(u8
, bus
->tx_max
- bus
->tx_seq
,
2169 brcmf_sdio_txglomsz
);
2170 pkt_num
= min_t(u32
, pkt_num
,
2171 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
));
2172 spin_lock_bh(&bus
->txqlock
);
2173 for (i
= 0; i
< pkt_num
; i
++) {
2174 pkt
= brcmu_pktq_mdeq(&bus
->txq
, tx_prec_map
,
2178 __skb_queue_tail(&pktq
, pkt
);
2180 spin_unlock_bh(&bus
->txqlock
);
2184 ret
= brcmf_sdio_txpkt(bus
, &pktq
, SDPCM_DATA_CHANNEL
);
2187 /* In poll mode, need to check for other events */
2188 if (!bus
->intr
&& cnt
) {
2189 /* Check device status, signal pending interrupt */
2190 sdio_claim_host(bus
->sdiodev
->func
[1]);
2191 ret
= r_sdreg32(bus
, &intstatus
,
2192 offsetof(struct sdpcmd_regs
,
2194 sdio_release_host(bus
->sdiodev
->func
[1]);
2195 bus
->sdcnt
.f2txdata
++;
2198 if (intstatus
& bus
->hostintmask
)
2199 atomic_set(&bus
->ipend
, 1);
2203 /* Deflow-control stack if needed */
2204 if ((bus
->sdiodev
->bus_if
->state
== BRCMF_BUS_DATA
) &&
2205 bus
->txoff
&& (pktq_len(&bus
->txq
) < TXLOW
)) {
2207 brcmf_txflowblock(bus
->sdiodev
->dev
, false);
2213 static void brcmf_sdio_bus_stop(struct device
*dev
)
2215 u32 local_hostintmask
;
2218 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2219 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2220 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2222 brcmf_dbg(TRACE
, "Enter\n");
2224 if (bus
->watchdog_tsk
) {
2225 send_sig(SIGTERM
, bus
->watchdog_tsk
, 1);
2226 kthread_stop(bus
->watchdog_tsk
);
2227 bus
->watchdog_tsk
= NULL
;
2230 if (bus_if
->state
== BRCMF_BUS_DOWN
) {
2231 sdio_claim_host(sdiodev
->func
[1]);
2233 /* Enable clock for device interrupts */
2234 brcmf_sdio_bus_sleep(bus
, false, false);
2236 /* Disable and clear interrupts at the chip level also */
2237 w_sdreg32(bus
, 0, offsetof(struct sdpcmd_regs
, hostintmask
));
2238 local_hostintmask
= bus
->hostintmask
;
2239 bus
->hostintmask
= 0;
2241 /* Force backplane clocks to assure F2 interrupt propagates */
2242 saveclk
= brcmf_sdiod_regrb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
2245 brcmf_sdiod_regwb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
2246 (saveclk
| SBSDIO_FORCE_HT
), &err
);
2248 brcmf_err("Failed to force clock for F2: err %d\n",
2251 /* Turn off the bus (F2), free any pending packets */
2252 brcmf_dbg(INTR
, "disable SDIO interrupts\n");
2253 sdio_disable_func(sdiodev
->func
[SDIO_FUNC_2
]);
2255 /* Clear any pending interrupts now that F2 is disabled */
2256 w_sdreg32(bus
, local_hostintmask
,
2257 offsetof(struct sdpcmd_regs
, intstatus
));
2259 sdio_release_host(sdiodev
->func
[1]);
2261 /* Clear the data packet queues */
2262 brcmu_pktq_flush(&bus
->txq
, true, NULL
, NULL
);
2264 /* Clear any held glomming stuff */
2266 brcmu_pkt_buf_free_skb(bus
->glomd
);
2267 brcmf_sdio_free_glom(bus
);
2269 /* Clear rx control and wake any waiters */
2270 spin_lock_bh(&bus
->rxctl_lock
);
2272 spin_unlock_bh(&bus
->rxctl_lock
);
2273 brcmf_sdio_dcmd_resp_wake(bus
);
2275 /* Reset some F2 state stuff */
2276 bus
->rxskip
= false;
2277 bus
->tx_seq
= bus
->rx_seq
= 0;
2280 static inline void brcmf_sdio_clrintr(struct brcmf_sdio
*bus
)
2282 unsigned long flags
;
2284 if (bus
->sdiodev
->oob_irq_requested
) {
2285 spin_lock_irqsave(&bus
->sdiodev
->irq_en_lock
, flags
);
2286 if (!bus
->sdiodev
->irq_en
&& !atomic_read(&bus
->ipend
)) {
2287 enable_irq(bus
->sdiodev
->pdata
->oob_irq_nr
);
2288 bus
->sdiodev
->irq_en
= true;
2290 spin_unlock_irqrestore(&bus
->sdiodev
->irq_en_lock
, flags
);
2294 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio
*bus
)
2301 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
2302 addr
= bus
->ci
->c_inf
[idx
].base
+
2303 offsetof(struct sdpcmd_regs
, intstatus
);
2305 val
= brcmf_sdiod_regrl(bus
->sdiodev
, addr
, &ret
);
2306 bus
->sdcnt
.f1regdata
++;
2310 val
&= bus
->hostintmask
;
2311 atomic_set(&bus
->fcstate
, !!(val
& I_HMB_FC_STATE
));
2313 /* Clear interrupts */
2315 brcmf_sdiod_regwl(bus
->sdiodev
, addr
, val
, &ret
);
2316 bus
->sdcnt
.f1regdata
++;
2320 atomic_set(&bus
->intstatus
, 0);
2322 for_each_set_bit(n
, &val
, 32)
2323 set_bit(n
, (unsigned long *)&bus
->intstatus
.counter
);
2329 static void brcmf_sdio_dpc(struct brcmf_sdio
*bus
)
2332 unsigned long intstatus
;
2333 uint rxlimit
= bus
->rxbound
; /* Rx frames to read before resched */
2334 uint txlimit
= bus
->txbound
; /* Tx frames to send before resched */
2335 uint framecnt
= 0; /* Temporary counter of tx/rx frames */
2338 brcmf_dbg(TRACE
, "Enter\n");
2340 sdio_claim_host(bus
->sdiodev
->func
[1]);
2342 /* If waiting for HTAVAIL, check status */
2343 if (!bus
->sr_enabled
&& bus
->clkstate
== CLK_PENDING
) {
2344 u8 clkctl
, devctl
= 0;
2347 /* Check for inconsistent device control */
2348 devctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
2349 SBSDIO_DEVICE_CTL
, &err
);
2352 /* Read CSR, if clock on switch to AVAIL, else ignore */
2353 clkctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
2354 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
2356 brcmf_dbg(SDIO
, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2359 if (SBSDIO_HTAV(clkctl
)) {
2360 devctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
2361 SBSDIO_DEVICE_CTL
, &err
);
2362 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
2363 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
2365 bus
->clkstate
= CLK_AVAIL
;
2369 /* Make sure backplane clock is on */
2370 brcmf_sdio_bus_sleep(bus
, false, true);
2372 /* Pending interrupt indicates new device status */
2373 if (atomic_read(&bus
->ipend
) > 0) {
2374 atomic_set(&bus
->ipend
, 0);
2375 err
= brcmf_sdio_intr_rstatus(bus
);
2378 /* Start with leftover status bits */
2379 intstatus
= atomic_xchg(&bus
->intstatus
, 0);
2381 /* Handle flow-control change: read new state in case our ack
2382 * crossed another change interrupt. If change still set, assume
2383 * FC ON for safety, let next loop through do the debounce.
2385 if (intstatus
& I_HMB_FC_CHANGE
) {
2386 intstatus
&= ~I_HMB_FC_CHANGE
;
2387 err
= w_sdreg32(bus
, I_HMB_FC_CHANGE
,
2388 offsetof(struct sdpcmd_regs
, intstatus
));
2390 err
= r_sdreg32(bus
, &newstatus
,
2391 offsetof(struct sdpcmd_regs
, intstatus
));
2392 bus
->sdcnt
.f1regdata
+= 2;
2393 atomic_set(&bus
->fcstate
,
2394 !!(newstatus
& (I_HMB_FC_STATE
| I_HMB_FC_CHANGE
)));
2395 intstatus
|= (newstatus
& bus
->hostintmask
);
2398 /* Handle host mailbox indication */
2399 if (intstatus
& I_HMB_HOST_INT
) {
2400 intstatus
&= ~I_HMB_HOST_INT
;
2401 intstatus
|= brcmf_sdio_hostmail(bus
);
2404 sdio_release_host(bus
->sdiodev
->func
[1]);
2406 /* Generally don't ask for these, can get CRC errors... */
2407 if (intstatus
& I_WR_OOSYNC
) {
2408 brcmf_err("Dongle reports WR_OOSYNC\n");
2409 intstatus
&= ~I_WR_OOSYNC
;
2412 if (intstatus
& I_RD_OOSYNC
) {
2413 brcmf_err("Dongle reports RD_OOSYNC\n");
2414 intstatus
&= ~I_RD_OOSYNC
;
2417 if (intstatus
& I_SBINT
) {
2418 brcmf_err("Dongle reports SBINT\n");
2419 intstatus
&= ~I_SBINT
;
2422 /* Would be active due to wake-wlan in gSPI */
2423 if (intstatus
& I_CHIPACTIVE
) {
2424 brcmf_dbg(INFO
, "Dongle reports CHIPACTIVE\n");
2425 intstatus
&= ~I_CHIPACTIVE
;
2428 /* Ignore frame indications if rxskip is set */
2430 intstatus
&= ~I_HMB_FRAME_IND
;
2432 /* On frame indication, read available frames */
2433 if (PKT_AVAILABLE() && bus
->clkstate
== CLK_AVAIL
) {
2434 framecnt
= brcmf_sdio_readframes(bus
, rxlimit
);
2435 if (!bus
->rxpending
)
2436 intstatus
&= ~I_HMB_FRAME_IND
;
2437 rxlimit
-= min(framecnt
, rxlimit
);
2440 /* Keep still-pending events for next scheduling */
2442 for_each_set_bit(n
, &intstatus
, 32)
2443 set_bit(n
, (unsigned long *)&bus
->intstatus
.counter
);
2446 brcmf_sdio_clrintr(bus
);
2448 if (data_ok(bus
) && bus
->ctrl_frame_stat
&&
2449 (bus
->clkstate
== CLK_AVAIL
)) {
2452 sdio_claim_host(bus
->sdiodev
->func
[1]);
2453 err
= brcmf_sdiod_send_buf(bus
->sdiodev
, bus
->ctrl_frame_buf
,
2454 (u32
)bus
->ctrl_frame_len
);
2457 /* On failure, abort the command and
2458 terminate the frame */
2459 brcmf_dbg(INFO
, "sdio error %d, abort command and terminate frame\n",
2461 bus
->sdcnt
.tx_sderrs
++;
2463 brcmf_sdiod_abort(bus
->sdiodev
, SDIO_FUNC_2
);
2465 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
2467 bus
->sdcnt
.f1regdata
++;
2469 for (i
= 0; i
< 3; i
++) {
2471 hi
= brcmf_sdiod_regrb(bus
->sdiodev
,
2472 SBSDIO_FUNC1_WFRAMEBCHI
,
2474 lo
= brcmf_sdiod_regrb(bus
->sdiodev
,
2475 SBSDIO_FUNC1_WFRAMEBCLO
,
2477 bus
->sdcnt
.f1regdata
+= 2;
2478 if ((hi
== 0) && (lo
== 0))
2483 bus
->tx_seq
= (bus
->tx_seq
+ 1) % SDPCM_SEQ_WRAP
;
2485 sdio_release_host(bus
->sdiodev
->func
[1]);
2486 bus
->ctrl_frame_stat
= false;
2487 brcmf_sdio_wait_event_wakeup(bus
);
2489 /* Send queued frames (limit 1 if rx may still be pending) */
2490 else if ((bus
->clkstate
== CLK_AVAIL
) && !atomic_read(&bus
->fcstate
) &&
2491 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) && txlimit
2493 framecnt
= bus
->rxpending
? min(txlimit
, bus
->txminmax
) :
2495 framecnt
= brcmf_sdio_sendfromq(bus
, framecnt
);
2496 txlimit
-= framecnt
;
2499 if (!brcmf_bus_ready(bus
->sdiodev
->bus_if
) || (err
!= 0)) {
2500 brcmf_err("failed backplane access over SDIO, halting operation\n");
2501 atomic_set(&bus
->intstatus
, 0);
2502 } else if (atomic_read(&bus
->intstatus
) ||
2503 atomic_read(&bus
->ipend
) > 0 ||
2504 (!atomic_read(&bus
->fcstate
) &&
2505 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) &&
2506 data_ok(bus
)) || PKT_AVAILABLE()) {
2507 atomic_inc(&bus
->dpc_tskcnt
);
2510 /* If we're done for now, turn off clock request. */
2511 if ((bus
->clkstate
!= CLK_PENDING
)
2512 && bus
->idletime
== BRCMF_IDLE_IMMEDIATE
) {
2513 bus
->activity
= false;
2514 brcmf_dbg(SDIO
, "idle state\n");
2515 sdio_claim_host(bus
->sdiodev
->func
[1]);
2516 brcmf_sdio_bus_sleep(bus
, true, false);
2517 sdio_release_host(bus
->sdiodev
->func
[1]);
2521 static struct pktq
*brcmf_sdio_bus_gettxq(struct device
*dev
)
2523 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2524 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2525 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2530 static int brcmf_sdio_bus_txdata(struct device
*dev
, struct sk_buff
*pkt
)
2534 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2535 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2536 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2539 brcmf_dbg(TRACE
, "Enter\n");
2543 /* Add space for the header */
2544 skb_push(pkt
, bus
->tx_hdrlen
);
2545 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2547 prec
= prio2prec((pkt
->priority
& PRIOMASK
));
2549 /* Check for existing queue, current flow-control,
2550 pending event, or pending clock */
2551 brcmf_dbg(TRACE
, "deferring pktq len %d\n", pktq_len(&bus
->txq
));
2552 bus
->sdcnt
.fcqueued
++;
2554 /* Priority based enq */
2555 spin_lock_irqsave(&bus
->txqlock
, flags
);
2556 if (!brcmf_c_prec_enq(bus
->sdiodev
->dev
, &bus
->txq
, pkt
, prec
)) {
2557 skb_pull(pkt
, bus
->tx_hdrlen
);
2558 brcmf_err("out of bus->txq !!!\n");
2564 if (pktq_len(&bus
->txq
) >= TXHI
) {
2566 brcmf_txflowblock(bus
->sdiodev
->dev
, true);
2568 spin_unlock_irqrestore(&bus
->txqlock
, flags
);
2571 if (pktq_plen(&bus
->txq
, prec
) > qcount
[prec
])
2572 qcount
[prec
] = pktq_plen(&bus
->txq
, prec
);
2575 if (atomic_read(&bus
->dpc_tskcnt
) == 0) {
2576 atomic_inc(&bus
->dpc_tskcnt
);
2577 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
2584 #define CONSOLE_LINE_MAX 192
2586 static int brcmf_sdio_readconsole(struct brcmf_sdio
*bus
)
2588 struct brcmf_console
*c
= &bus
->console
;
2589 u8 line
[CONSOLE_LINE_MAX
], ch
;
2593 /* Don't do anything until FWREADY updates console address */
2594 if (bus
->console_addr
== 0)
2597 /* Read console log struct */
2598 addr
= bus
->console_addr
+ offsetof(struct rte_console
, log_le
);
2599 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
, (u8
*)&c
->log_le
,
2604 /* Allocate console buffer (one time only) */
2605 if (c
->buf
== NULL
) {
2606 c
->bufsize
= le32_to_cpu(c
->log_le
.buf_size
);
2607 c
->buf
= kmalloc(c
->bufsize
, GFP_ATOMIC
);
2612 idx
= le32_to_cpu(c
->log_le
.idx
);
2614 /* Protect against corrupt value */
2615 if (idx
> c
->bufsize
)
2618 /* Skip reading the console buffer if the index pointer
2623 /* Read the console buffer */
2624 addr
= le32_to_cpu(c
->log_le
.buf
);
2625 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
, c
->buf
, c
->bufsize
);
2629 while (c
->last
!= idx
) {
2630 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2; n
++) {
2631 if (c
->last
== idx
) {
2632 /* This would output a partial line.
2634 * the buffer pointer and output this
2635 * line next time around.
2640 c
->last
= c
->bufsize
- n
;
2643 ch
= c
->buf
[c
->last
];
2644 c
->last
= (c
->last
+ 1) % c
->bufsize
;
2651 if (line
[n
- 1] == '\r')
2654 pr_debug("CONSOLE: %s\n", line
);
2663 static int brcmf_sdio_tx_frame(struct brcmf_sdio
*bus
, u8
*frame
, u16 len
)
2668 bus
->ctrl_frame_stat
= false;
2669 ret
= brcmf_sdiod_send_buf(bus
->sdiodev
, frame
, len
);
2672 /* On failure, abort the command and terminate the frame */
2673 brcmf_dbg(INFO
, "sdio error %d, abort command and terminate frame\n",
2675 bus
->sdcnt
.tx_sderrs
++;
2677 brcmf_sdiod_abort(bus
->sdiodev
, SDIO_FUNC_2
);
2679 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
2681 bus
->sdcnt
.f1regdata
++;
2683 for (i
= 0; i
< 3; i
++) {
2685 hi
= brcmf_sdiod_regrb(bus
->sdiodev
,
2686 SBSDIO_FUNC1_WFRAMEBCHI
, NULL
);
2687 lo
= brcmf_sdiod_regrb(bus
->sdiodev
,
2688 SBSDIO_FUNC1_WFRAMEBCLO
, NULL
);
2689 bus
->sdcnt
.f1regdata
+= 2;
2690 if (hi
== 0 && lo
== 0)
2696 bus
->tx_seq
= (bus
->tx_seq
+ 1) % SDPCM_SEQ_WRAP
;
2702 brcmf_sdio_bus_txctl(struct device
*dev
, unsigned char *msg
, uint msglen
)
2709 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2710 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2711 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2712 struct brcmf_sdio_hdrinfo hd_info
= {0};
2714 brcmf_dbg(TRACE
, "Enter\n");
2716 /* Back the pointer to make a room for bus header */
2717 frame
= msg
- bus
->tx_hdrlen
;
2718 len
= (msglen
+= bus
->tx_hdrlen
);
2720 /* Add alignment padding (optional for ctl frames) */
2721 doff
= ((unsigned long)frame
% bus
->head_align
);
2726 memset(frame
, 0, doff
+ bus
->tx_hdrlen
);
2728 /* precondition: doff < bus->head_align */
2729 doff
+= bus
->tx_hdrlen
;
2731 /* Round send length to next SDIO block */
2733 if (bus
->roundup
&& bus
->blocksize
&& (len
> bus
->blocksize
)) {
2734 pad
= bus
->blocksize
- (len
% bus
->blocksize
);
2735 if ((pad
> bus
->roundup
) || (pad
>= bus
->blocksize
))
2737 } else if (len
% bus
->head_align
) {
2738 pad
= bus
->head_align
- (len
% bus
->head_align
);
2742 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2744 /* Make sure backplane clock is on */
2745 sdio_claim_host(bus
->sdiodev
->func
[1]);
2746 brcmf_sdio_bus_sleep(bus
, false, false);
2747 sdio_release_host(bus
->sdiodev
->func
[1]);
2749 hd_info
.len
= (u16
)msglen
;
2750 hd_info
.channel
= SDPCM_CONTROL_CHANNEL
;
2751 hd_info
.dat_offset
= doff
;
2752 hd_info
.seq_num
= bus
->tx_seq
;
2753 hd_info
.lastfrm
= true;
2754 hd_info
.tail_pad
= pad
;
2755 brcmf_sdio_hdpack(bus
, frame
, &hd_info
);
2758 brcmf_sdio_update_hwhdr(frame
, len
);
2760 if (!data_ok(bus
)) {
2761 brcmf_dbg(INFO
, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2762 bus
->tx_max
, bus
->tx_seq
);
2763 bus
->ctrl_frame_stat
= true;
2765 bus
->ctrl_frame_buf
= frame
;
2766 bus
->ctrl_frame_len
= len
;
2768 wait_event_interruptible_timeout(bus
->ctrl_wait
,
2769 !bus
->ctrl_frame_stat
,
2770 msecs_to_jiffies(2000));
2772 if (!bus
->ctrl_frame_stat
) {
2773 brcmf_dbg(SDIO
, "ctrl_frame_stat == false\n");
2776 brcmf_dbg(SDIO
, "ctrl_frame_stat == true\n");
2782 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2783 frame
, len
, "Tx Frame:\n");
2784 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2786 frame
, min_t(u16
, len
, 16), "TxHdr:\n");
2789 sdio_claim_host(bus
->sdiodev
->func
[1]);
2790 ret
= brcmf_sdio_tx_frame(bus
, frame
, len
);
2791 sdio_release_host(bus
->sdiodev
->func
[1]);
2792 } while (ret
< 0 && retries
++ < TXRETRIES
);
2795 if ((bus
->idletime
== BRCMF_IDLE_IMMEDIATE
) &&
2796 atomic_read(&bus
->dpc_tskcnt
) == 0) {
2797 bus
->activity
= false;
2798 sdio_claim_host(bus
->sdiodev
->func
[1]);
2799 brcmf_dbg(INFO
, "idle\n");
2800 brcmf_sdio_clkctl(bus
, CLK_NONE
, true);
2801 sdio_release_host(bus
->sdiodev
->func
[1]);
2805 bus
->sdcnt
.tx_ctlerrs
++;
2807 bus
->sdcnt
.tx_ctlpkts
++;
2809 return ret
? -EIO
: 0;
2813 static inline bool brcmf_sdio_valid_shared_address(u32 addr
)
2815 return !(addr
== 0 || ((~addr
>> 16) & 0xffff) == (addr
& 0xffff));
2818 static int brcmf_sdio_readshared(struct brcmf_sdio
*bus
,
2819 struct sdpcm_shared
*sh
)
2824 struct sdpcm_shared_le sh_le
;
2827 shaddr
= bus
->ci
->rambase
+ bus
->ramsize
- 4;
2830 * Read last word in socram to determine
2831 * address of sdpcm_shared structure
2833 sdio_claim_host(bus
->sdiodev
->func
[1]);
2834 brcmf_sdio_bus_sleep(bus
, false, false);
2835 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, shaddr
, (u8
*)&addr_le
, 4);
2836 sdio_release_host(bus
->sdiodev
->func
[1]);
2840 addr
= le32_to_cpu(addr_le
);
2842 brcmf_dbg(SDIO
, "sdpcm_shared address 0x%08X\n", addr
);
2845 * Check if addr is valid.
2846 * NVRAM length at the end of memory should have been overwritten.
2848 if (!brcmf_sdio_valid_shared_address(addr
)) {
2849 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2854 /* Read hndrte_shared structure */
2855 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
, (u8
*)&sh_le
,
2856 sizeof(struct sdpcm_shared_le
));
2861 sh
->flags
= le32_to_cpu(sh_le
.flags
);
2862 sh
->trap_addr
= le32_to_cpu(sh_le
.trap_addr
);
2863 sh
->assert_exp_addr
= le32_to_cpu(sh_le
.assert_exp_addr
);
2864 sh
->assert_file_addr
= le32_to_cpu(sh_le
.assert_file_addr
);
2865 sh
->assert_line
= le32_to_cpu(sh_le
.assert_line
);
2866 sh
->console_addr
= le32_to_cpu(sh_le
.console_addr
);
2867 sh
->msgtrace_addr
= le32_to_cpu(sh_le
.msgtrace_addr
);
2869 if ((sh
->flags
& SDPCM_SHARED_VERSION_MASK
) > SDPCM_SHARED_VERSION
) {
2870 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2871 SDPCM_SHARED_VERSION
,
2872 sh
->flags
& SDPCM_SHARED_VERSION_MASK
);
2879 static int brcmf_sdio_dump_console(struct brcmf_sdio
*bus
,
2880 struct sdpcm_shared
*sh
, char __user
*data
,
2883 u32 addr
, console_ptr
, console_size
, console_index
;
2884 char *conbuf
= NULL
;
2890 /* obtain console information from device memory */
2891 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
);
2892 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
,
2893 (u8
*)&sh_val
, sizeof(u32
));
2896 console_ptr
= le32_to_cpu(sh_val
);
2898 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
.buf_size
);
2899 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
,
2900 (u8
*)&sh_val
, sizeof(u32
));
2903 console_size
= le32_to_cpu(sh_val
);
2905 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
.idx
);
2906 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
,
2907 (u8
*)&sh_val
, sizeof(u32
));
2910 console_index
= le32_to_cpu(sh_val
);
2912 /* allocate buffer for console data */
2913 if (console_size
<= CONSOLE_BUFFER_MAX
)
2914 conbuf
= vzalloc(console_size
+1);
2919 /* obtain the console data from device */
2920 conbuf
[console_size
] = '\0';
2921 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, console_ptr
, (u8
*)conbuf
,
2926 rv
= simple_read_from_buffer(data
, count
, &pos
,
2927 conbuf
+ console_index
,
2928 console_size
- console_index
);
2933 if (console_index
> 0) {
2935 rv
= simple_read_from_buffer(data
+nbytes
, count
, &pos
,
2936 conbuf
, console_index
- 1);
2946 static int brcmf_sdio_trap_info(struct brcmf_sdio
*bus
, struct sdpcm_shared
*sh
,
2947 char __user
*data
, size_t count
)
2951 struct brcmf_trap_info tr
;
2954 if ((sh
->flags
& SDPCM_SHARED_TRAP
) == 0) {
2955 brcmf_dbg(INFO
, "no trap in firmware\n");
2959 error
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, sh
->trap_addr
, (u8
*)&tr
,
2960 sizeof(struct brcmf_trap_info
));
2964 res
= scnprintf(buf
, sizeof(buf
),
2965 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2966 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2967 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2968 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2969 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2970 le32_to_cpu(tr
.type
), le32_to_cpu(tr
.epc
),
2971 le32_to_cpu(tr
.cpsr
), le32_to_cpu(tr
.spsr
),
2972 le32_to_cpu(tr
.r13
), le32_to_cpu(tr
.r14
),
2973 le32_to_cpu(tr
.pc
), sh
->trap_addr
,
2974 le32_to_cpu(tr
.r0
), le32_to_cpu(tr
.r1
),
2975 le32_to_cpu(tr
.r2
), le32_to_cpu(tr
.r3
),
2976 le32_to_cpu(tr
.r4
), le32_to_cpu(tr
.r5
),
2977 le32_to_cpu(tr
.r6
), le32_to_cpu(tr
.r7
));
2979 return simple_read_from_buffer(data
, count
, &pos
, buf
, res
);
2982 static int brcmf_sdio_assert_info(struct brcmf_sdio
*bus
,
2983 struct sdpcm_shared
*sh
, char __user
*data
,
2988 char file
[80] = "?";
2989 char expr
[80] = "<???>";
2993 if ((sh
->flags
& SDPCM_SHARED_ASSERT_BUILT
) == 0) {
2994 brcmf_dbg(INFO
, "firmware not built with -assert\n");
2996 } else if ((sh
->flags
& SDPCM_SHARED_ASSERT
) == 0) {
2997 brcmf_dbg(INFO
, "no assert in dongle\n");
3001 sdio_claim_host(bus
->sdiodev
->func
[1]);
3002 if (sh
->assert_file_addr
!= 0) {
3003 error
= brcmf_sdiod_ramrw(bus
->sdiodev
, false,
3004 sh
->assert_file_addr
, (u8
*)file
, 80);
3008 if (sh
->assert_exp_addr
!= 0) {
3009 error
= brcmf_sdiod_ramrw(bus
->sdiodev
, false,
3010 sh
->assert_exp_addr
, (u8
*)expr
, 80);
3014 sdio_release_host(bus
->sdiodev
->func
[1]);
3016 res
= scnprintf(buf
, sizeof(buf
),
3017 "dongle assert: %s:%d: assert(%s)\n",
3018 file
, sh
->assert_line
, expr
);
3019 return simple_read_from_buffer(data
, count
, &pos
, buf
, res
);
3022 static int brcmf_sdio_checkdied(struct brcmf_sdio
*bus
)
3025 struct sdpcm_shared sh
;
3027 error
= brcmf_sdio_readshared(bus
, &sh
);
3032 if ((sh
.flags
& SDPCM_SHARED_ASSERT_BUILT
) == 0)
3033 brcmf_dbg(INFO
, "firmware not built with -assert\n");
3034 else if (sh
.flags
& SDPCM_SHARED_ASSERT
)
3035 brcmf_err("assertion in dongle\n");
3037 if (sh
.flags
& SDPCM_SHARED_TRAP
)
3038 brcmf_err("firmware trap in dongle\n");
3043 static int brcmf_sdio_died_dump(struct brcmf_sdio
*bus
, char __user
*data
,
3044 size_t count
, loff_t
*ppos
)
3047 struct sdpcm_shared sh
;
3054 error
= brcmf_sdio_readshared(bus
, &sh
);
3058 error
= brcmf_sdio_assert_info(bus
, &sh
, data
, count
);
3063 error
= brcmf_sdio_trap_info(bus
, &sh
, data
+nbytes
, count
);
3068 error
= brcmf_sdio_dump_console(bus
, &sh
, data
+nbytes
, count
);
3079 static ssize_t
brcmf_sdio_forensic_read(struct file
*f
, char __user
*data
,
3080 size_t count
, loff_t
*ppos
)
3082 struct brcmf_sdio
*bus
= f
->private_data
;
3085 res
= brcmf_sdio_died_dump(bus
, data
, count
, ppos
);
3088 return (ssize_t
)res
;
3091 static const struct file_operations brcmf_sdio_forensic_ops
= {
3092 .owner
= THIS_MODULE
,
3093 .open
= simple_open
,
3094 .read
= brcmf_sdio_forensic_read
3097 static void brcmf_sdio_debugfs_create(struct brcmf_sdio
*bus
)
3099 struct brcmf_pub
*drvr
= bus
->sdiodev
->bus_if
->drvr
;
3100 struct dentry
*dentry
= brcmf_debugfs_get_devdir(drvr
);
3102 if (IS_ERR_OR_NULL(dentry
))
3105 debugfs_create_file("forensics", S_IRUGO
, dentry
, bus
,
3106 &brcmf_sdio_forensic_ops
);
3107 brcmf_debugfs_create_sdio_count(drvr
, &bus
->sdcnt
);
3110 static int brcmf_sdio_checkdied(struct brcmf_sdio
*bus
)
3115 static void brcmf_sdio_debugfs_create(struct brcmf_sdio
*bus
)
3121 brcmf_sdio_bus_rxctl(struct device
*dev
, unsigned char *msg
, uint msglen
)
3127 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3128 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3129 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3131 brcmf_dbg(TRACE
, "Enter\n");
3133 /* Wait until control frame is available */
3134 timeleft
= brcmf_sdio_dcmd_resp_wait(bus
, &bus
->rxlen
, &pending
);
3136 spin_lock_bh(&bus
->rxctl_lock
);
3138 memcpy(msg
, bus
->rxctl
, min(msglen
, rxlen
));
3140 buf
= bus
->rxctl_orig
;
3141 bus
->rxctl_orig
= NULL
;
3143 spin_unlock_bh(&bus
->rxctl_lock
);
3147 brcmf_dbg(CTL
, "resumed on rxctl frame, got %d expected %d\n",
3149 } else if (timeleft
== 0) {
3150 brcmf_err("resumed on timeout\n");
3151 brcmf_sdio_checkdied(bus
);
3152 } else if (pending
) {
3153 brcmf_dbg(CTL
, "cancelled\n");
3154 return -ERESTARTSYS
;
3156 brcmf_dbg(CTL
, "resumed for unknown reason?\n");
3157 brcmf_sdio_checkdied(bus
);
3161 bus
->sdcnt
.rx_ctlpkts
++;
3163 bus
->sdcnt
.rx_ctlerrs
++;
3165 return rxlen
? (int)rxlen
: -ETIMEDOUT
;
3170 brcmf_sdio_verifymemory(struct brcmf_sdio_dev
*sdiodev
, u32 ram_addr
,
3171 u8
*ram_data
, uint ram_sz
)
3180 /* read back and verify */
3181 brcmf_dbg(INFO
, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr
,
3183 ram_cmp
= kmalloc(MEMBLOCK
, GFP_KERNEL
);
3184 /* do not proceed while no memory but */
3190 while (offset
< ram_sz
) {
3191 len
= ((offset
+ MEMBLOCK
) < ram_sz
) ? MEMBLOCK
:
3193 err
= brcmf_sdiod_ramrw(sdiodev
, false, address
, ram_cmp
, len
);
3195 brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3199 } else if (memcmp(ram_cmp
, &ram_data
[offset
], len
)) {
3200 brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
3215 brcmf_sdio_verifymemory(struct brcmf_sdio_dev
*sdiodev
, u32 ram_addr
,
3216 u8
*ram_data
, uint ram_sz
)
3222 static int brcmf_sdio_download_code_file(struct brcmf_sdio
*bus
,
3223 const struct firmware
*fw
)
3230 brcmf_dbg(TRACE
, "Enter\n");
3234 address
= bus
->ci
->rambase
;
3235 while (offset
< fw
->size
) {
3236 len
= ((offset
+ MEMBLOCK
) < fw
->size
) ? MEMBLOCK
:
3238 err
= brcmf_sdiod_ramrw(bus
->sdiodev
, true, address
,
3239 (u8
*)&fw
->data
[offset
], len
);
3241 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3249 if (!brcmf_sdio_verifymemory(bus
->sdiodev
, bus
->ci
->rambase
,
3250 (u8
*)fw
->data
, fw
->size
))
3256 static int brcmf_sdio_download_nvram(struct brcmf_sdio
*bus
,
3257 const struct firmware
*nv
)
3264 brcmf_dbg(TRACE
, "Enter\n");
3266 vars
= brcmf_nvram_strip(nv
, &varsz
);
3271 address
= bus
->ci
->ramsize
- varsz
+ bus
->ci
->rambase
;
3272 err
= brcmf_sdiod_ramrw(bus
->sdiodev
, true, address
, vars
, varsz
);
3274 brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
3275 err
, varsz
, address
);
3276 else if (!brcmf_sdio_verifymemory(bus
->sdiodev
, address
, vars
, varsz
))
3279 brcmf_nvram_free(vars
);
3284 static int brcmf_sdio_download_firmware(struct brcmf_sdio
*bus
)
3286 int bcmerror
= -EFAULT
;
3287 const struct firmware
*fw
;
3290 sdio_claim_host(bus
->sdiodev
->func
[1]);
3291 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, false);
3293 /* Keep arm in reset */
3294 brcmf_sdio_chip_enter_download(bus
->sdiodev
, bus
->ci
);
3296 fw
= brcmf_sdio_get_fw(bus
, BRCMF_FIRMWARE_BIN
);
3302 rstvec
= get_unaligned_le32(fw
->data
);
3303 brcmf_dbg(SDIO
, "firmware rstvec: %x\n", rstvec
);
3305 bcmerror
= brcmf_sdio_download_code_file(bus
, fw
);
3306 release_firmware(fw
);
3308 brcmf_err("dongle image file download failed\n");
3312 fw
= brcmf_sdio_get_fw(bus
, BRCMF_FIRMWARE_NVRAM
);
3318 bcmerror
= brcmf_sdio_download_nvram(bus
, fw
);
3319 release_firmware(fw
);
3321 brcmf_err("dongle nvram file download failed\n");
3325 /* Take arm out of reset */
3326 if (!brcmf_sdio_chip_exit_download(bus
->sdiodev
, bus
->ci
, rstvec
)) {
3327 brcmf_err("error getting out of ARM core reset\n");
3331 /* Allow HT Clock now that the ARM is running. */
3332 brcmf_bus_change_state(bus
->sdiodev
->bus_if
, BRCMF_BUS_LOAD
);
3336 brcmf_sdio_clkctl(bus
, CLK_SDONLY
, false);
3337 sdio_release_host(bus
->sdiodev
->func
[1]);
3341 static bool brcmf_sdio_sr_capable(struct brcmf_sdio
*bus
)
3343 u32 addr
, reg
, pmu_cc3_mask
= ~0;
3346 brcmf_dbg(TRACE
, "Enter\n");
3348 /* old chips with PMU version less than 17 don't support save restore */
3349 if (bus
->ci
->pmurev
< 17)
3352 switch (bus
->ci
->chip
) {
3353 case BCM43241_CHIP_ID
:
3354 case BCM4335_CHIP_ID
:
3355 case BCM4339_CHIP_ID
:
3356 /* read PMU chipcontrol register 3 */
3357 addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
, chipcontrol_addr
);
3358 brcmf_sdiod_regwl(bus
->sdiodev
, addr
, 3, NULL
);
3359 addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
, chipcontrol_data
);
3360 reg
= brcmf_sdiod_regrl(bus
->sdiodev
, addr
, NULL
);
3361 return (reg
& pmu_cc3_mask
) != 0;
3363 addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
, pmucapabilities_ext
);
3364 reg
= brcmf_sdiod_regrl(bus
->sdiodev
, addr
, &err
);
3365 if ((reg
& PCAPEXT_SR_SUPPORTED_MASK
) == 0)
3368 addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
, retention_ctl
);
3369 reg
= brcmf_sdiod_regrl(bus
->sdiodev
, addr
, NULL
);
3370 return (reg
& (PMU_RCTL_MACPHY_DISABLE_MASK
|
3371 PMU_RCTL_LOGIC_DISABLE_MASK
)) == 0;
3375 static void brcmf_sdio_sr_init(struct brcmf_sdio
*bus
)
3380 brcmf_dbg(TRACE
, "Enter\n");
3382 val
= brcmf_sdiod_regrb(bus
->sdiodev
, SBSDIO_FUNC1_WAKEUPCTRL
, &err
);
3384 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3388 val
|= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT
;
3389 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_WAKEUPCTRL
, val
, &err
);
3391 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3395 /* Add CMD14 Support */
3396 brcmf_sdiod_regwb(bus
->sdiodev
, SDIO_CCCR_BRCM_CARDCAP
,
3397 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT
|
3398 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT
),
3401 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3405 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3406 SBSDIO_FORCE_HT
, &err
);
3408 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3413 bus
->sr_enabled
= true;
3414 brcmf_dbg(INFO
, "SR enabled\n");
3417 /* enable KSO bit */
3418 static int brcmf_sdio_kso_init(struct brcmf_sdio
*bus
)
3423 brcmf_dbg(TRACE
, "Enter\n");
3425 /* KSO bit added in SDIO core rev 12 */
3426 if (bus
->ci
->c_inf
[1].rev
< 12)
3429 val
= brcmf_sdiod_regrb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
, &err
);
3431 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3435 if (!(val
& SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
)) {
3436 val
|= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN
<<
3437 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT
);
3438 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
3441 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3450 static int brcmf_sdio_bus_preinit(struct device
*dev
)
3452 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3453 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3454 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3460 /* the commands below use the terms tx and rx from
3461 * a device perspective, ie. bus:txglom affects the
3462 * bus transfers from device to host.
3464 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
3465 if (bus
->ci
->c_inf
[idx
].rev
< 12) {
3466 /* for sdio core rev < 12, disable txgloming */
3468 err
= brcmf_iovar_data_set(dev
, "bus:txglom", &value
,
3471 /* otherwise, set txglomalign */
3474 value
= sdiodev
->pdata
->sd_sgentry_align
;
3475 /* SDIO ADMA requires at least 32 bit alignment */
3476 value
= max_t(u32
, value
, 4);
3477 err
= brcmf_iovar_data_set(dev
, "bus:txglomalign", &value
,
3484 bus
->tx_hdrlen
= SDPCM_HWHDR_LEN
+ SDPCM_SWHDR_LEN
;
3485 if (sdiodev
->sg_support
) {
3486 bus
->txglom
= false;
3488 pad_size
= bus
->sdiodev
->func
[2]->cur_blksize
<< 1;
3489 bus
->txglom_sgpad
= brcmu_pkt_buf_get_skb(pad_size
);
3490 if (!bus
->txglom_sgpad
)
3491 brcmf_err("allocating txglom padding skb failed, reduced performance\n");
3493 err
= brcmf_iovar_data_set(bus
->sdiodev
->dev
, "bus:rxglom",
3494 &value
, sizeof(u32
));
3496 /* bus:rxglom is allowed to fail */
3500 bus
->tx_hdrlen
+= SDPCM_HWEXT_LEN
;
3503 brcmf_bus_add_txhdrlen(bus
->sdiodev
->dev
, bus
->tx_hdrlen
);
3509 static int brcmf_sdio_bus_init(struct device
*dev
)
3511 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3512 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3513 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3517 brcmf_dbg(TRACE
, "Enter\n");
3519 /* try to download image and nvram to the dongle */
3520 if (bus_if
->state
== BRCMF_BUS_DOWN
) {
3521 bus
->alp_only
= true;
3522 err
= brcmf_sdio_download_firmware(bus
);
3525 bus
->alp_only
= false;
3528 if (!bus
->sdiodev
->bus_if
->drvr
)
3531 /* Start the watchdog timer */
3532 bus
->sdcnt
.tickcnt
= 0;
3533 brcmf_sdio_wd_timer(bus
, BRCMF_WD_POLL_MS
);
3535 sdio_claim_host(bus
->sdiodev
->func
[1]);
3537 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3538 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, false);
3539 if (bus
->clkstate
!= CLK_AVAIL
)
3542 /* Force clocks on backplane to be sure F2 interrupt propagates */
3543 saveclk
= brcmf_sdiod_regrb(bus
->sdiodev
,
3544 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
3546 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3547 (saveclk
| SBSDIO_FORCE_HT
), &err
);
3550 brcmf_err("Failed to force clock for F2: err %d\n", err
);
3554 /* Enable function 2 (frame transfers) */
3555 w_sdreg32(bus
, SDPCM_PROT_VERSION
<< SMB_DATA_VERSION_SHIFT
,
3556 offsetof(struct sdpcmd_regs
, tosbmailboxdata
));
3557 err
= sdio_enable_func(bus
->sdiodev
->func
[SDIO_FUNC_2
]);
3560 brcmf_dbg(INFO
, "enable F2: err=%d\n", err
);
3562 /* If F2 successfully enabled, set core and enable interrupts */
3564 /* Set up the interrupt mask and enable interrupts */
3565 bus
->hostintmask
= HOSTINTMASK
;
3566 w_sdreg32(bus
, bus
->hostintmask
,
3567 offsetof(struct sdpcmd_regs
, hostintmask
));
3569 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_WATERMARK
, 8, &err
);
3571 /* Disable F2 again */
3572 sdio_disable_func(bus
->sdiodev
->func
[SDIO_FUNC_2
]);
3576 if (brcmf_sdio_sr_capable(bus
)) {
3577 brcmf_sdio_sr_init(bus
);
3579 /* Restore previous clock setting */
3580 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3585 ret
= brcmf_sdiod_intr_register(bus
->sdiodev
);
3587 brcmf_err("intr register failed:%d\n", ret
);
3590 /* If we didn't come up, turn off backplane clock */
3592 brcmf_sdio_clkctl(bus
, CLK_NONE
, false);
3595 sdio_release_host(bus
->sdiodev
->func
[1]);
3600 void brcmf_sdio_isr(struct brcmf_sdio
*bus
)
3602 brcmf_dbg(TRACE
, "Enter\n");
3605 brcmf_err("bus is null pointer, exiting\n");
3609 if (!brcmf_bus_ready(bus
->sdiodev
->bus_if
)) {
3610 brcmf_err("bus is down. we have nothing to do\n");
3613 /* Count the interrupt call */
3614 bus
->sdcnt
.intrcount
++;
3616 atomic_set(&bus
->ipend
, 1);
3618 if (brcmf_sdio_intr_rstatus(bus
)) {
3619 brcmf_err("failed backplane access\n");
3622 /* Disable additional interrupts (is this needed now)? */
3624 brcmf_err("isr w/o interrupt configured!\n");
3626 atomic_inc(&bus
->dpc_tskcnt
);
3627 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3630 static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio
*bus
)
3633 struct brcmf_bus
*bus_if
= dev_get_drvdata(bus
->sdiodev
->dev
);
3636 brcmf_dbg(TIMER
, "Enter\n");
3638 /* Poll period: check device if appropriate. */
3639 if (!bus
->sr_enabled
&&
3640 bus
->poll
&& (++bus
->polltick
>= bus
->pollrate
)) {
3643 /* Reset poll tick */
3646 /* Check device if no interrupts */
3648 (bus
->sdcnt
.intrcount
== bus
->sdcnt
.lastintrs
)) {
3650 if (atomic_read(&bus
->dpc_tskcnt
) == 0) {
3653 sdio_claim_host(bus
->sdiodev
->func
[1]);
3654 devpend
= brcmf_sdiod_regrb(bus
->sdiodev
,
3657 sdio_release_host(bus
->sdiodev
->func
[1]);
3659 devpend
& (INTR_STATUS_FUNC1
|
3663 /* If there is something, make like the ISR and
3666 bus
->sdcnt
.pollcnt
++;
3667 atomic_set(&bus
->ipend
, 1);
3669 atomic_inc(&bus
->dpc_tskcnt
);
3670 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3674 /* Update interrupt tracking */
3675 bus
->sdcnt
.lastintrs
= bus
->sdcnt
.intrcount
;
3678 /* Poll for console output periodically */
3679 if (bus_if
&& bus_if
->state
== BRCMF_BUS_DATA
&&
3680 bus
->console_interval
!= 0) {
3681 bus
->console
.count
+= BRCMF_WD_POLL_MS
;
3682 if (bus
->console
.count
>= bus
->console_interval
) {
3683 bus
->console
.count
-= bus
->console_interval
;
3684 sdio_claim_host(bus
->sdiodev
->func
[1]);
3685 /* Make sure backplane clock is on */
3686 brcmf_sdio_bus_sleep(bus
, false, false);
3687 if (brcmf_sdio_readconsole(bus
) < 0)
3689 bus
->console_interval
= 0;
3690 sdio_release_host(bus
->sdiodev
->func
[1]);
3695 /* On idle timeout clear activity flag and/or turn off clock */
3696 if ((bus
->idletime
> 0) && (bus
->clkstate
== CLK_AVAIL
)) {
3697 if (++bus
->idlecount
>= bus
->idletime
) {
3699 if (bus
->activity
) {
3700 bus
->activity
= false;
3701 brcmf_sdio_wd_timer(bus
, BRCMF_WD_POLL_MS
);
3703 brcmf_dbg(SDIO
, "idle\n");
3704 sdio_claim_host(bus
->sdiodev
->func
[1]);
3705 brcmf_sdio_bus_sleep(bus
, true, false);
3706 sdio_release_host(bus
->sdiodev
->func
[1]);
3711 return (atomic_read(&bus
->ipend
) > 0);
3714 static void brcmf_sdio_dataworker(struct work_struct
*work
)
3716 struct brcmf_sdio
*bus
= container_of(work
, struct brcmf_sdio
,
3719 while (atomic_read(&bus
->dpc_tskcnt
)) {
3720 brcmf_sdio_dpc(bus
);
3721 atomic_dec(&bus
->dpc_tskcnt
);
3726 brcmf_sdio_probe_attach(struct brcmf_sdio
*bus
)
3734 sdio_claim_host(bus
->sdiodev
->func
[1]);
3736 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3737 brcmf_sdiod_regrl(bus
->sdiodev
, SI_ENUM_BASE
, NULL
));
3740 * Force PLL off until brcmf_sdio_chip_attach()
3741 * programs PLL control regs
3744 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3745 BRCMF_INIT_CLKCTL1
, &err
);
3747 clkctl
= brcmf_sdiod_regrb(bus
->sdiodev
,
3748 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
3750 if (err
|| ((clkctl
& ~SBSDIO_AVBITS
) != BRCMF_INIT_CLKCTL1
)) {
3751 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3752 err
, BRCMF_INIT_CLKCTL1
, clkctl
);
3756 /* SDIO register access works so moving
3757 * state from UNKNOWN to DOWN.
3759 brcmf_bus_change_state(bus
->sdiodev
->bus_if
, BRCMF_BUS_DOWN
);
3761 if (brcmf_sdio_chip_attach(bus
->sdiodev
, &bus
->ci
)) {
3762 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3766 if (brcmf_sdio_kso_init(bus
)) {
3767 brcmf_err("error enabling KSO\n");
3771 if ((bus
->sdiodev
->pdata
) && (bus
->sdiodev
->pdata
->drive_strength
))
3772 drivestrength
= bus
->sdiodev
->pdata
->drive_strength
;
3774 drivestrength
= DEFAULT_SDIO_DRIVE_STRENGTH
;
3775 brcmf_sdio_chip_drivestrengthinit(bus
->sdiodev
, bus
->ci
, drivestrength
);
3777 /* Get info on the SOCRAM cores... */
3778 bus
->ramsize
= bus
->ci
->ramsize
;
3779 if (!(bus
->ramsize
)) {
3780 brcmf_err("failed to find SOCRAM memory!\n");
3784 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3785 reg_val
= brcmf_sdiod_regrb(bus
->sdiodev
,
3786 SDIO_CCCR_BRCM_CARDCTRL
, &err
);
3790 reg_val
|= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET
;
3792 brcmf_sdiod_regwb(bus
->sdiodev
,
3793 SDIO_CCCR_BRCM_CARDCTRL
, reg_val
, &err
);
3797 /* set PMUControl so a backplane reset does PMU state reload */
3798 reg_addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
,
3800 reg_val
= brcmf_sdiod_regrl(bus
->sdiodev
,
3806 reg_val
|= (BCMA_CC_PMU_CTL_RES_RELOAD
<< BCMA_CC_PMU_CTL_RES_SHIFT
);
3808 brcmf_sdiod_regwl(bus
->sdiodev
,
3816 sdio_release_host(bus
->sdiodev
->func
[1]);
3818 brcmu_pktq_init(&bus
->txq
, (PRIOMASK
+ 1), TXQLEN
);
3820 /* allocate header buffer */
3821 bus
->hdrbuf
= kzalloc(MAX_HDR_READ
+ bus
->head_align
, GFP_KERNEL
);
3824 /* Locate an appropriately-aligned portion of hdrbuf */
3825 bus
->rxhdr
= (u8
*) roundup((unsigned long)&bus
->hdrbuf
[0],
3828 /* Set the poll and/or interrupt flags */
3837 sdio_release_host(bus
->sdiodev
->func
[1]);
3842 brcmf_sdio_watchdog_thread(void *data
)
3844 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*)data
;
3846 allow_signal(SIGTERM
);
3847 /* Run until signal received */
3849 if (kthread_should_stop())
3851 if (!wait_for_completion_interruptible(&bus
->watchdog_wait
)) {
3852 brcmf_sdio_bus_watchdog(bus
);
3853 /* Count the tick for reference */
3854 bus
->sdcnt
.tickcnt
++;
3862 brcmf_sdio_watchdog(unsigned long data
)
3864 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*)data
;
3866 if (bus
->watchdog_tsk
) {
3867 complete(&bus
->watchdog_wait
);
3868 /* Reschedule the watchdog */
3869 if (bus
->wd_timer_valid
)
3870 mod_timer(&bus
->timer
,
3871 jiffies
+ BRCMF_WD_POLL_MS
* HZ
/ 1000);
3875 static struct brcmf_bus_ops brcmf_sdio_bus_ops
= {
3876 .stop
= brcmf_sdio_bus_stop
,
3877 .preinit
= brcmf_sdio_bus_preinit
,
3878 .init
= brcmf_sdio_bus_init
,
3879 .txdata
= brcmf_sdio_bus_txdata
,
3880 .txctl
= brcmf_sdio_bus_txctl
,
3881 .rxctl
= brcmf_sdio_bus_rxctl
,
3882 .gettxq
= brcmf_sdio_bus_gettxq
,
3885 struct brcmf_sdio
*brcmf_sdio_probe(struct brcmf_sdio_dev
*sdiodev
)
3888 struct brcmf_sdio
*bus
;
3890 brcmf_dbg(TRACE
, "Enter\n");
3892 /* Allocate private bus interface state */
3893 bus
= kzalloc(sizeof(struct brcmf_sdio
), GFP_ATOMIC
);
3897 bus
->sdiodev
= sdiodev
;
3899 skb_queue_head_init(&bus
->glom
);
3900 bus
->txbound
= BRCMF_TXBOUND
;
3901 bus
->rxbound
= BRCMF_RXBOUND
;
3902 bus
->txminmax
= BRCMF_TXMINMAX
;
3903 bus
->tx_seq
= SDPCM_SEQ_WRAP
- 1;
3905 /* platform specific configuration:
3906 * alignments must be at least 4 bytes for ADMA
3908 bus
->head_align
= ALIGNMENT
;
3909 bus
->sgentry_align
= ALIGNMENT
;
3910 if (sdiodev
->pdata
) {
3911 if (sdiodev
->pdata
->sd_head_align
> ALIGNMENT
)
3912 bus
->head_align
= sdiodev
->pdata
->sd_head_align
;
3913 if (sdiodev
->pdata
->sd_sgentry_align
> ALIGNMENT
)
3914 bus
->sgentry_align
= sdiodev
->pdata
->sd_sgentry_align
;
3917 INIT_WORK(&bus
->datawork
, brcmf_sdio_dataworker
);
3918 bus
->brcmf_wq
= create_singlethread_workqueue("brcmf_wq");
3919 if (bus
->brcmf_wq
== NULL
) {
3920 brcmf_err("insufficient memory to create txworkqueue\n");
3924 /* attempt to attach to the dongle */
3925 if (!(brcmf_sdio_probe_attach(bus
))) {
3926 brcmf_err("brcmf_sdio_probe_attach failed\n");
3930 spin_lock_init(&bus
->rxctl_lock
);
3931 spin_lock_init(&bus
->txqlock
);
3932 init_waitqueue_head(&bus
->ctrl_wait
);
3933 init_waitqueue_head(&bus
->dcmd_resp_wait
);
3935 /* Set up the watchdog timer */
3936 init_timer(&bus
->timer
);
3937 bus
->timer
.data
= (unsigned long)bus
;
3938 bus
->timer
.function
= brcmf_sdio_watchdog
;
3940 /* Initialize watchdog thread */
3941 init_completion(&bus
->watchdog_wait
);
3942 bus
->watchdog_tsk
= kthread_run(brcmf_sdio_watchdog_thread
,
3943 bus
, "brcmf_watchdog");
3944 if (IS_ERR(bus
->watchdog_tsk
)) {
3945 pr_warn("brcmf_watchdog thread failed to start\n");
3946 bus
->watchdog_tsk
= NULL
;
3948 /* Initialize DPC thread */
3949 atomic_set(&bus
->dpc_tskcnt
, 0);
3951 /* Assign bus interface call back */
3952 bus
->sdiodev
->bus_if
->dev
= bus
->sdiodev
->dev
;
3953 bus
->sdiodev
->bus_if
->ops
= &brcmf_sdio_bus_ops
;
3954 bus
->sdiodev
->bus_if
->chip
= bus
->ci
->chip
;
3955 bus
->sdiodev
->bus_if
->chiprev
= bus
->ci
->chiprev
;
3957 /* default sdio bus header length for tx packet */
3958 bus
->tx_hdrlen
= SDPCM_HWHDR_LEN
+ SDPCM_SWHDR_LEN
;
3960 /* Attach to the common layer, reserve hdr space */
3961 ret
= brcmf_attach(bus
->sdiodev
->dev
);
3963 brcmf_err("brcmf_attach failed\n");
3967 /* Allocate buffers */
3968 if (bus
->sdiodev
->bus_if
->maxctl
) {
3970 roundup((bus
->sdiodev
->bus_if
->maxctl
+ SDPCM_HDRLEN
),
3971 ALIGNMENT
) + bus
->head_align
;
3972 bus
->rxbuf
= kmalloc(bus
->rxblen
, GFP_ATOMIC
);
3973 if (!(bus
->rxbuf
)) {
3974 brcmf_err("rxbuf allocation failed\n");
3979 sdio_claim_host(bus
->sdiodev
->func
[1]);
3981 /* Disable F2 to clear any intermediate frame state on the dongle */
3982 sdio_disable_func(bus
->sdiodev
->func
[SDIO_FUNC_2
]);
3984 bus
->rxflow
= false;
3986 /* Done with backplane-dependent accesses, can drop clock... */
3987 brcmf_sdiod_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, 0, NULL
);
3989 sdio_release_host(bus
->sdiodev
->func
[1]);
3991 /* ...and initialize clock/power states */
3992 bus
->clkstate
= CLK_SDONLY
;
3993 bus
->idletime
= BRCMF_IDLE_INTERVAL
;
3994 bus
->idleclock
= BRCMF_IDLE_ACTIVE
;
3996 /* Query the F2 block size, set roundup accordingly */
3997 bus
->blocksize
= bus
->sdiodev
->func
[2]->cur_blksize
;
3998 bus
->roundup
= min(max_roundup
, bus
->blocksize
);
4001 bus
->sleeping
= false;
4002 bus
->sr_enabled
= false;
4004 brcmf_sdio_debugfs_create(bus
);
4005 brcmf_dbg(INFO
, "completed!!\n");
4007 /* if firmware path present try to download and bring up bus */
4008 ret
= brcmf_bus_start(bus
->sdiodev
->dev
);
4010 brcmf_err("dongle is not responding\n");
4017 brcmf_sdio_remove(bus
);
4021 /* Detach and free everything */
4022 void brcmf_sdio_remove(struct brcmf_sdio
*bus
)
4024 brcmf_dbg(TRACE
, "Enter\n");
4027 /* De-register interrupt handler */
4028 brcmf_sdiod_intr_unregister(bus
->sdiodev
);
4030 cancel_work_sync(&bus
->datawork
);
4032 destroy_workqueue(bus
->brcmf_wq
);
4034 if (bus
->sdiodev
->bus_if
->drvr
) {
4035 brcmf_detach(bus
->sdiodev
->dev
);
4039 if (bus
->sdiodev
->bus_if
->state
== BRCMF_BUS_DOWN
) {
4040 sdio_claim_host(bus
->sdiodev
->func
[1]);
4041 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, false);
4042 /* Leave the device in state where it is
4043 * 'quiet'. This is done by putting it in
4044 * download_state which essentially resets
4045 * all necessary cores.
4048 brcmf_sdio_chip_enter_download(bus
->sdiodev
,
4050 brcmf_sdio_clkctl(bus
, CLK_NONE
, false);
4051 sdio_release_host(bus
->sdiodev
->func
[1]);
4053 brcmf_sdio_chip_detach(&bus
->ci
);
4056 brcmu_pkt_buf_free_skb(bus
->txglom_sgpad
);
4062 brcmf_dbg(TRACE
, "Disconnected\n");
4065 void brcmf_sdio_wd_timer(struct brcmf_sdio
*bus
, uint wdtick
)
4067 /* Totally stop the timer */
4068 if (!wdtick
&& bus
->wd_timer_valid
) {
4069 del_timer_sync(&bus
->timer
);
4070 bus
->wd_timer_valid
= false;
4071 bus
->save_ms
= wdtick
;
4075 /* don't start the wd until fw is loaded */
4076 if (bus
->sdiodev
->bus_if
->state
!= BRCMF_BUS_DATA
)
4080 if (bus
->save_ms
!= BRCMF_WD_POLL_MS
) {
4081 if (bus
->wd_timer_valid
)
4082 /* Stop timer and restart at new value */
4083 del_timer_sync(&bus
->timer
);
4085 /* Create timer again when watchdog period is
4086 dynamically changed or in the first instance
4088 bus
->timer
.expires
=
4089 jiffies
+ BRCMF_WD_POLL_MS
* HZ
/ 1000;
4090 add_timer(&bus
->timer
);
4093 /* Re arm the timer, at last watchdog period */
4094 mod_timer(&bus
->timer
,
4095 jiffies
+ BRCMF_WD_POLL_MS
* HZ
/ 1000);
4098 bus
->wd_timer_valid
= true;
4099 bus
->save_ms
= wdtick
;