2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/types.h>
18 #include <linux/atomic.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/printk.h>
22 #include <linux/pci_ids.h>
23 #include <linux/netdevice.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/sdio_ids.h>
28 #include <linux/mmc/sdio_func.h>
29 #include <linux/mmc/card.h>
30 #include <linux/semaphore.h>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/bcma/bcma.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 #include <asm/unaligned.h>
38 #include <brcmu_wifi.h>
39 #include <brcmu_utils.h>
40 #include <brcm_hw_ids.h>
49 #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500)
50 #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500)
54 #define BRCMF_TRAP_INFO_SIZE 80
56 #define CBUF_LEN (128)
58 /* Device console log buffer state */
59 #define CONSOLE_BUFFER_MAX 2024
62 __le32 buf
; /* Can't be pointer on (64-bit) hosts */
65 char *_buf_compat
; /* Redundant pointer for backward compat. */
70 * When there is no UART (e.g. Quickturn),
71 * the host should write a complete
72 * input line directly into cbuf and then write
73 * the length into vcons_in.
74 * This may also be used when there is a real UART
75 * (at risk of conflicting with
76 * the real UART). vcons_out is currently unused.
81 /* Output (logging) buffer
82 * Console output is written to a ring buffer log_buf at index log_idx.
83 * The host may read the output when it sees log_idx advance.
84 * Output will be lost if the output wraps around faster than the host
87 struct rte_log_le log_le
;
89 /* Console input line buffer
90 * Characters are read one at a time into cbuf
91 * until <CR> is received, then
92 * the buffer is processed as a command line.
93 * Also used for virtual UART.
100 #include <chipcommon.h>
104 #include "tracepoint.h"
106 #define TXQLEN 2048 /* bulk tx queue length */
107 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
108 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
111 #define TXRETRIES 2 /* # of retries for tx frames */
113 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
116 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
119 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
121 #define MEMBLOCK 2048 /* Block size used for downloading
123 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
124 biggest possible glom */
126 #define BRCMF_FIRSTREAD (1 << 6)
128 #define BRCMF_CONSOLE 10 /* watchdog interval to poll console */
130 /* SBSDIO_DEVICE_CTL */
132 /* 1: device will assert busy signal when receiving CMD53 */
133 #define SBSDIO_DEVCTL_SETBUSY 0x01
134 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
135 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
136 /* 1: mask all interrupts to host except the chipActive (rev 8) */
137 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
138 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
139 * sdio bus power cycle to clear (rev 9) */
140 #define SBSDIO_DEVCTL_PADS_ISO 0x08
141 /* Force SD->SB reset mapping (rev 11) */
142 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
143 /* Determined by CoreControl bit */
144 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
145 /* Force backplane reset */
146 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
147 /* Force no backplane reset */
148 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
150 /* direct(mapped) cis space */
152 /* MAPPED common CIS address */
153 #define SBSDIO_CIS_BASE_COMMON 0x1000
154 /* maximum bytes in one CIS */
155 #define SBSDIO_CIS_SIZE_LIMIT 0x200
156 /* cis offset addr is < 17 bits */
157 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
159 /* manfid tuple length, include tuple, link bytes */
160 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
162 #define SD_REG(field) \
163 (offsetof(struct sdpcmd_regs, field))
165 /* SDIO function 1 register CHIPCLKCSR */
166 /* Force ALP request to backplane */
167 #define SBSDIO_FORCE_ALP 0x01
168 /* Force HT request to backplane */
169 #define SBSDIO_FORCE_HT 0x02
170 /* Force ILP request to backplane */
171 #define SBSDIO_FORCE_ILP 0x04
172 /* Make ALP ready (power up xtal) */
173 #define SBSDIO_ALP_AVAIL_REQ 0x08
174 /* Make HT ready (power up PLL) */
175 #define SBSDIO_HT_AVAIL_REQ 0x10
176 /* Squelch clock requests from HW */
177 #define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
178 /* Status: ALP is ready */
179 #define SBSDIO_ALP_AVAIL 0x40
180 /* Status: HT is ready */
181 #define SBSDIO_HT_AVAIL 0x80
182 #define SBSDIO_CSR_MASK 0x1F
183 #define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
184 #define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
185 #define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
186 #define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
187 #define SBSDIO_CLKAV(regval, alponly) \
188 (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
191 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
192 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
193 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
194 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
195 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
196 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
197 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
198 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
199 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
200 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
201 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
202 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
203 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
204 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
205 #define I_PC (1 << 10) /* descriptor error */
206 #define I_PD (1 << 11) /* data error */
207 #define I_DE (1 << 12) /* Descriptor protocol Error */
208 #define I_RU (1 << 13) /* Receive descriptor Underflow */
209 #define I_RO (1 << 14) /* Receive fifo Overflow */
210 #define I_XU (1 << 15) /* Transmit fifo Underflow */
211 #define I_RI (1 << 16) /* Receive Interrupt */
212 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
213 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
214 #define I_XI (1 << 24) /* Transmit Interrupt */
215 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
216 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
217 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
218 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
219 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
220 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
221 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
222 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
223 #define I_DMA (I_RI | I_XI | I_ERRORS)
226 #define CC_CISRDY (1 << 0) /* CIS Ready */
227 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
228 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
229 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
230 #define CC_XMTDATAAVAIL_MODE (1 << 4)
231 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
234 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
235 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
236 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
237 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
240 * Software allocation of To SB Mailbox resources
243 /* tosbmailbox bits corresponding to intstatus bits */
244 #define SMB_NAK (1 << 0) /* Frame NAK */
245 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
246 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
247 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
249 /* tosbmailboxdata */
250 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
253 * Software allocation of To Host Mailbox resources
257 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
258 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
259 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
260 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
262 /* tohostmailboxdata */
263 #define HMB_DATA_NAKHANDLED 0x0001 /* retransmit NAK'd frame */
264 #define HMB_DATA_DEVREADY 0x0002 /* talk to host after enable */
265 #define HMB_DATA_FC 0x0004 /* per prio flowcontrol update flag */
266 #define HMB_DATA_FWREADY 0x0008 /* fw ready for protocol activity */
267 #define HMB_DATA_FWHALT 0x0010 /* firmware halted */
269 #define HMB_DATA_FCDATA_MASK 0xff000000
270 #define HMB_DATA_FCDATA_SHIFT 24
272 #define HMB_DATA_VERSION_MASK 0x00ff0000
273 #define HMB_DATA_VERSION_SHIFT 16
276 * Software-defined protocol header
279 /* Current protocol version */
280 #define SDPCM_PROT_VERSION 4
283 * Shared structure between dongle and the host.
284 * The structure contains pointers to trap or assert information.
286 #define SDPCM_SHARED_VERSION 0x0003
287 #define SDPCM_SHARED_VERSION_MASK 0x00FF
288 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
289 #define SDPCM_SHARED_ASSERT 0x0200
290 #define SDPCM_SHARED_TRAP 0x0400
292 /* Space for header read, limit for data packets */
293 #define MAX_HDR_READ (1 << 6)
294 #define MAX_RX_DATASZ 2048
296 /* Bump up limit on waiting for HT to account for first startup;
297 * if the image is doing a CRC calculation before programming the PMU
298 * for HT availability, it could take a couple hundred ms more, so
299 * max out at a 1 second (1000000us).
301 #undef PMU_MAX_TRANSITION_DLY
302 #define PMU_MAX_TRANSITION_DLY 1000000
304 /* Value for ChipClockCSR during initial setup */
305 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
306 SBSDIO_ALP_AVAIL_REQ)
308 /* Flags for SDH calls */
309 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
311 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
314 #define BRCMF_IDLE_INTERVAL 1
316 #define KSO_WAIT_US 50
317 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
318 #define BRCMF_SDIO_MAX_ACCESS_ERRORS 5
321 * Conversion of 802.1D priority to precedence level
323 static uint
prio2prec(u32 prio
)
325 return (prio
== PRIO_8021D_NONE
|| prio
== PRIO_8021D_BE
) ?
330 /* Device console log buffer state */
331 struct brcmf_console
{
332 uint count
; /* Poll interval msec counter */
333 uint log_addr
; /* Log struct address (fixed) */
334 struct rte_log_le log_le
; /* Log struct (host copy) */
335 uint bufsize
; /* Size of log buffer */
336 u8
*buf
; /* Log buffer (host copy) */
337 uint last
; /* Last buffer read index */
340 struct brcmf_trap_info
{
354 __le32 r9
; /* sb/v6 */
355 __le32 r10
; /* sl/v7 */
356 __le32 r11
; /* fp/v8 */
364 struct sdpcm_shared
{
368 u32 assert_file_addr
;
370 u32 console_addr
; /* Address of struct rte_console */
376 struct sdpcm_shared_le
{
379 __le32 assert_exp_addr
;
380 __le32 assert_file_addr
;
382 __le32 console_addr
; /* Address of struct rte_console */
383 __le32 msgtrace_addr
;
388 /* dongle SDIO bus specific header info */
389 struct brcmf_sdio_hdrinfo
{
401 * hold counter variables
403 struct brcmf_sdio_count
{
404 uint intrcount
; /* Count of device interrupt callbacks */
405 uint lastintrs
; /* Count as of last watchdog timer */
406 uint pollcnt
; /* Count of active polls */
407 uint regfails
; /* Count of R_REG failures */
408 uint tx_sderrs
; /* Count of tx attempts with sd errors */
409 uint fcqueued
; /* Tx packets that got queued */
410 uint rxrtx
; /* Count of rtx requests (NAK to dongle) */
411 uint rx_toolong
; /* Receive frames too long to receive */
412 uint rxc_errors
; /* SDIO errors when reading control frames */
413 uint rx_hdrfail
; /* SDIO errors on header reads */
414 uint rx_badhdr
; /* Bad received headers (roosync?) */
415 uint rx_badseq
; /* Mismatched rx sequence number */
416 uint fc_rcvd
; /* Number of flow-control events received */
417 uint fc_xoff
; /* Number which turned on flow-control */
418 uint fc_xon
; /* Number which turned off flow-control */
419 uint rxglomfail
; /* Failed deglom attempts */
420 uint rxglomframes
; /* Number of glom frames (superframes) */
421 uint rxglompkts
; /* Number of packets from glom frames */
422 uint f2rxhdrs
; /* Number of header reads */
423 uint f2rxdata
; /* Number of frame data reads */
424 uint f2txdata
; /* Number of f2 frame writes */
425 uint f1regdata
; /* Number of f1 register accesses */
426 uint tickcnt
; /* Number of watchdog been schedule */
427 ulong tx_ctlerrs
; /* Err of sending ctrl frames */
428 ulong tx_ctlpkts
; /* Ctrl frames sent to dongle */
429 ulong rx_ctlerrs
; /* Err of processing rx ctrl frames */
430 ulong rx_ctlpkts
; /* Ctrl frames processed from dongle */
431 ulong rx_readahead_cnt
; /* packets where header read-ahead was used */
434 /* misc chip info needed by some of the routines */
435 /* Private data for SDIO bus interaction */
437 struct brcmf_sdio_dev
*sdiodev
; /* sdio device handler */
438 struct brcmf_chip
*ci
; /* Chip info struct */
439 struct brcmf_core
*sdio_core
; /* sdio core info struct */
441 u32 hostintmask
; /* Copy of Host Interrupt Mask */
442 atomic_t intstatus
; /* Intstatus bits (events) pending */
443 atomic_t fcstate
; /* State of dongle flow-control */
445 uint blocksize
; /* Block size of SDIO transfers */
446 uint roundup
; /* Max roundup limit */
448 struct pktq txq
; /* Queue length used for flow-control */
449 u8 flowcontrol
; /* per prio flow control bitmask */
450 u8 tx_seq
; /* Transmit sequence number (next) */
451 u8 tx_max
; /* Maximum transmit sequence allowed */
453 u8
*hdrbuf
; /* buffer for handling rx frame */
454 u8
*rxhdr
; /* Header of current rx frame (in hdrbuf) */
455 u8 rx_seq
; /* Receive sequence number (expected) */
456 struct brcmf_sdio_hdrinfo cur_read
;
457 /* info of current read frame */
458 bool rxskip
; /* Skip receive (awaiting NAK ACK) */
459 bool rxpending
; /* Data frame pending in dongle */
461 uint rxbound
; /* Rx frames to read before resched */
462 uint txbound
; /* Tx frames to send before resched */
465 struct sk_buff
*glomd
; /* Packet containing glomming descriptor */
466 struct sk_buff_head glom
; /* Packet list for glommed superframe */
468 u8
*rxbuf
; /* Buffer for receiving control packets */
469 uint rxblen
; /* Allocated length of rxbuf */
470 u8
*rxctl
; /* Aligned pointer into rxbuf */
471 u8
*rxctl_orig
; /* pointer for freeing rxctl */
472 uint rxlen
; /* Length of valid data in buffer */
473 spinlock_t rxctl_lock
; /* protection lock for ctrl frame resources */
475 u8 sdpcm_ver
; /* Bus protocol reported by dongle */
477 bool intr
; /* Use interrupts */
478 bool poll
; /* Use polling */
479 atomic_t ipend
; /* Device interrupt is pending */
480 uint spurious
; /* Count of spurious interrupts */
481 uint pollrate
; /* Ticks between device polls */
482 uint polltick
; /* Tick counter */
485 uint console_interval
;
486 struct brcmf_console console
; /* Console output polling support */
487 uint console_addr
; /* Console address from shared struct */
490 uint clkstate
; /* State of sd and backplane clock(s) */
491 s32 idletime
; /* Control for activity timeout */
492 s32 idlecount
; /* Activity timeout counter */
493 s32 idleclock
; /* How to set bus driver when idle */
494 bool rxflow_mode
; /* Rx flow control mode */
495 bool rxflow
; /* Is rx flow control on */
496 bool alp_only
; /* Don't use HT clock (ALP only) */
500 bool ctrl_frame_stat
;
503 spinlock_t txq_lock
; /* protect bus->txq */
504 wait_queue_head_t ctrl_wait
;
505 wait_queue_head_t dcmd_resp_wait
;
507 struct timer_list timer
;
508 struct completion watchdog_wait
;
509 struct task_struct
*watchdog_tsk
;
512 struct workqueue_struct
*brcmf_wq
;
513 struct work_struct datawork
;
517 bool txoff
; /* Transmit flow-controlled */
518 struct brcmf_sdio_count sdcnt
;
519 bool sr_enabled
; /* SaveRestore enabled */
522 u8 tx_hdrlen
; /* sdio bus header length for tx packet */
523 bool txglom
; /* host tx glomming enable flag */
524 u16 head_align
; /* buffer pointer alignment */
525 u16 sgentry_align
; /* scatter-gather buffer alignment */
531 #define CLK_PENDING 2
535 static int qcount
[NUMPRIO
];
538 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
540 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
542 /* Limit on rounding up frames */
543 static const uint max_roundup
= 512;
545 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
551 enum brcmf_sdio_frmtype
{
552 BRCMF_SDIO_FT_NORMAL
,
557 #define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
559 /* SDIO Pad drive strength to select value mappings */
560 struct sdiod_drive_str
{
561 u8 strength
; /* Pad Drive Strength in mA */
562 u8 sel
; /* Chip-specific select value */
565 /* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
566 static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8
[] = {
577 /* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
578 static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8
[] = {
588 /* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
589 static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8
[] = {
595 /* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
596 static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3
[] = {
603 BRCMF_FW_NVRAM_DEF(43143, "brcmfmac43143-sdio.bin", "brcmfmac43143-sdio.txt");
604 BRCMF_FW_NVRAM_DEF(43241B0
, "brcmfmac43241b0-sdio.bin",
605 "brcmfmac43241b0-sdio.txt");
606 BRCMF_FW_NVRAM_DEF(43241B4
, "brcmfmac43241b4-sdio.bin",
607 "brcmfmac43241b4-sdio.txt");
608 BRCMF_FW_NVRAM_DEF(43241B5
, "brcmfmac43241b5-sdio.bin",
609 "brcmfmac43241b5-sdio.txt");
610 BRCMF_FW_NVRAM_DEF(4329, "brcmfmac4329-sdio.bin", "brcmfmac4329-sdio.txt");
611 BRCMF_FW_NVRAM_DEF(4330, "brcmfmac4330-sdio.bin", "brcmfmac4330-sdio.txt");
612 BRCMF_FW_NVRAM_DEF(4334, "brcmfmac4334-sdio.bin", "brcmfmac4334-sdio.txt");
613 BRCMF_FW_NVRAM_DEF(43340, "brcmfmac43340-sdio.bin", "brcmfmac43340-sdio.txt");
614 BRCMF_FW_NVRAM_DEF(4335, "brcmfmac4335-sdio.bin", "brcmfmac4335-sdio.txt");
615 BRCMF_FW_NVRAM_DEF(43362, "brcmfmac43362-sdio.bin", "brcmfmac43362-sdio.txt");
616 BRCMF_FW_NVRAM_DEF(4339, "brcmfmac4339-sdio.bin", "brcmfmac4339-sdio.txt");
617 BRCMF_FW_NVRAM_DEF(43430A0
, "brcmfmac43430a0-sdio.bin", "brcmfmac43430a0-sdio.txt");
618 /* Note the names are not postfixed with a1 for backward compatibility */
619 BRCMF_FW_NVRAM_DEF(43430A1
, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt");
620 BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt");
621 BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt");
622 BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt");
623 BRCMF_FW_NVRAM_DEF(4373, "brcmfmac4373-sdio.bin", "brcmfmac4373-sdio.txt");
625 static struct brcmf_firmware_mapping brcmf_sdio_fwnames
[] = {
626 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID
, 0xFFFFFFFF, 43143),
627 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID
, 0x0000001F, 43241B0
),
628 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID
, 0x00000020, 43241B4
),
629 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43241_CHIP_ID
, 0xFFFFFFC0, 43241B5
),
630 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4329_CHIP_ID
, 0xFFFFFFFF, 4329),
631 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4330_CHIP_ID
, 0xFFFFFFFF, 4330),
632 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4334_CHIP_ID
, 0xFFFFFFFF, 4334),
633 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43340_CHIP_ID
, 0xFFFFFFFF, 43340),
634 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43341_CHIP_ID
, 0xFFFFFFFF, 43340),
635 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID
, 0xFFFFFFFF, 4335),
636 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID
, 0xFFFFFFFE, 43362),
637 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID
, 0xFFFFFFFF, 4339),
638 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID
, 0x00000001, 43430A0
),
639 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID
, 0xFFFFFFFE, 43430A1
),
640 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID
, 0xFFFFFFC0, 43455),
641 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID
, 0xFFFFFFFF, 4354),
642 BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID
, 0xFFFFFFFF, 4356),
643 BRCMF_FW_NVRAM_ENTRY(CY_CC_4373_CHIP_ID
, 0xFFFFFFFF, 4373)
646 static void pkt_align(struct sk_buff
*p
, int len
, int align
)
649 datalign
= (unsigned long)(p
->data
);
650 datalign
= roundup(datalign
, (align
)) - datalign
;
652 skb_pull(p
, datalign
);
656 /* To check if there's window offered */
657 static bool data_ok(struct brcmf_sdio
*bus
)
659 return (u8
)(bus
->tx_max
- bus
->tx_seq
) != 0 &&
660 ((u8
)(bus
->tx_max
- bus
->tx_seq
) & 0x80) == 0;
664 brcmf_sdio_kso_control(struct brcmf_sdio
*bus
, bool on
)
666 u8 wr_val
= 0, rd_val
, cmp_val
, bmask
;
671 brcmf_dbg(TRACE
, "Enter: on=%d\n", on
);
673 wr_val
= (on
<< SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT
);
674 /* 1st KSO write goes to AOS wake up core if device is asleep */
675 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
, wr_val
, &err
);
678 /* device WAKEUP through KSO:
679 * write bit 0 & read back until
680 * both bits 0 (kso bit) & 1 (dev on status) are set
682 cmp_val
= SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
|
683 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK
;
685 usleep_range(2000, 3000);
687 /* Put device to sleep, turn off KSO */
689 /* only check for bit0, bit1(dev on status) may not
690 * get cleared right away
692 bmask
= SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
;
696 /* reliable KSO bit set/clr:
697 * the sdiod sleep write access is synced to PMU 32khz clk
698 * just one write attempt may fail,
699 * read it back until it matches written value
701 rd_val
= brcmf_sdiod_readb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
704 if ((rd_val
& bmask
) == cmp_val
)
708 /* bail out upon subsequent access errors */
709 if (err
&& (err_cnt
++ > BRCMF_SDIO_MAX_ACCESS_ERRORS
))
713 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
, wr_val
,
716 } while (try_cnt
++ < MAX_KSO_ATTEMPTS
);
719 brcmf_dbg(SDIO
, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt
,
722 if (try_cnt
> MAX_KSO_ATTEMPTS
)
723 brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val
, err
);
728 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
730 /* Turn backplane clock on or off */
731 static int brcmf_sdio_htclk(struct brcmf_sdio
*bus
, bool on
, bool pendok
)
734 u8 clkctl
, clkreq
, devctl
;
735 unsigned long timeout
;
737 brcmf_dbg(SDIO
, "Enter\n");
741 if (bus
->sr_enabled
) {
742 bus
->clkstate
= (on
? CLK_AVAIL
: CLK_SDONLY
);
747 /* Request HT Avail */
749 bus
->alp_only
? SBSDIO_ALP_AVAIL_REQ
: SBSDIO_HT_AVAIL_REQ
;
751 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
754 brcmf_err("HT Avail request error: %d\n", err
);
758 /* Check current status */
759 clkctl
= brcmf_sdiod_readb(bus
->sdiodev
,
760 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
762 brcmf_err("HT Avail read error: %d\n", err
);
766 /* Go to pending and await interrupt if appropriate */
767 if (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
) && pendok
) {
768 /* Allow only clock-available interrupt */
769 devctl
= brcmf_sdiod_readb(bus
->sdiodev
,
770 SBSDIO_DEVICE_CTL
, &err
);
772 brcmf_err("Devctl error setting CA: %d\n", err
);
776 devctl
|= SBSDIO_DEVCTL_CA_INT_ONLY
;
777 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
779 brcmf_dbg(SDIO
, "CLKCTL: set PENDING\n");
780 bus
->clkstate
= CLK_PENDING
;
783 } else if (bus
->clkstate
== CLK_PENDING
) {
784 /* Cancel CA-only interrupt filter */
785 devctl
= brcmf_sdiod_readb(bus
->sdiodev
,
786 SBSDIO_DEVICE_CTL
, &err
);
787 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
788 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
792 /* Otherwise, wait here (polling) for HT Avail */
794 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY
/1000);
795 while (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
)) {
796 clkctl
= brcmf_sdiod_readb(bus
->sdiodev
,
797 SBSDIO_FUNC1_CHIPCLKCSR
,
799 if (time_after(jiffies
, timeout
))
802 usleep_range(5000, 10000);
805 brcmf_err("HT Avail request error: %d\n", err
);
808 if (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
)) {
809 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
810 PMU_MAX_TRANSITION_DLY
, clkctl
);
814 /* Mark clock available */
815 bus
->clkstate
= CLK_AVAIL
;
816 brcmf_dbg(SDIO
, "CLKCTL: turned ON\n");
819 if (!bus
->alp_only
) {
820 if (SBSDIO_ALPONLY(clkctl
))
821 brcmf_err("HT Clock should be on\n");
823 #endif /* defined (DEBUG) */
828 if (bus
->clkstate
== CLK_PENDING
) {
829 /* Cancel CA-only interrupt filter */
830 devctl
= brcmf_sdiod_readb(bus
->sdiodev
,
831 SBSDIO_DEVICE_CTL
, &err
);
832 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
833 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
837 bus
->clkstate
= CLK_SDONLY
;
838 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
840 brcmf_dbg(SDIO
, "CLKCTL: turned OFF\n");
842 brcmf_err("Failed access turning clock off: %d\n",
850 /* Change idle/active SD state */
851 static int brcmf_sdio_sdclk(struct brcmf_sdio
*bus
, bool on
)
853 brcmf_dbg(SDIO
, "Enter\n");
856 bus
->clkstate
= CLK_SDONLY
;
858 bus
->clkstate
= CLK_NONE
;
863 /* Transition SD and backplane clock readiness */
864 static int brcmf_sdio_clkctl(struct brcmf_sdio
*bus
, uint target
, bool pendok
)
867 uint oldstate
= bus
->clkstate
;
870 brcmf_dbg(SDIO
, "Enter\n");
872 /* Early exit if we're already there */
873 if (bus
->clkstate
== target
)
878 /* Make sure SD clock is available */
879 if (bus
->clkstate
== CLK_NONE
)
880 brcmf_sdio_sdclk(bus
, true);
881 /* Now request HT Avail on the backplane */
882 brcmf_sdio_htclk(bus
, true, pendok
);
886 /* Remove HT request, or bring up SD clock */
887 if (bus
->clkstate
== CLK_NONE
)
888 brcmf_sdio_sdclk(bus
, true);
889 else if (bus
->clkstate
== CLK_AVAIL
)
890 brcmf_sdio_htclk(bus
, false, false);
892 brcmf_err("request for %d -> %d\n",
893 bus
->clkstate
, target
);
897 /* Make sure to remove HT request */
898 if (bus
->clkstate
== CLK_AVAIL
)
899 brcmf_sdio_htclk(bus
, false, false);
900 /* Now remove the SD clock */
901 brcmf_sdio_sdclk(bus
, false);
905 brcmf_dbg(SDIO
, "%d -> %d\n", oldstate
, bus
->clkstate
);
912 brcmf_sdio_bus_sleep(struct brcmf_sdio
*bus
, bool sleep
, bool pendok
)
917 brcmf_dbg(SDIO
, "Enter: request %s currently %s\n",
918 (sleep
? "SLEEP" : "WAKE"),
919 (bus
->sleeping
? "SLEEP" : "WAKE"));
921 /* If SR is enabled control bus state with KSO */
922 if (bus
->sr_enabled
) {
923 /* Done if we're already in the requested state */
924 if (sleep
== bus
->sleeping
)
929 clkcsr
= brcmf_sdiod_readb(bus
->sdiodev
,
930 SBSDIO_FUNC1_CHIPCLKCSR
,
932 if ((clkcsr
& SBSDIO_CSR_MASK
) == 0) {
933 brcmf_dbg(SDIO
, "no clock, set ALP\n");
934 brcmf_sdiod_writeb(bus
->sdiodev
,
935 SBSDIO_FUNC1_CHIPCLKCSR
,
936 SBSDIO_ALP_AVAIL_REQ
, &err
);
938 err
= brcmf_sdio_kso_control(bus
, false);
940 err
= brcmf_sdio_kso_control(bus
, true);
943 brcmf_err("error while changing bus sleep state %d\n",
952 if (!bus
->sr_enabled
)
953 brcmf_sdio_clkctl(bus
, CLK_NONE
, pendok
);
955 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, pendok
);
956 brcmf_sdio_wd_timer(bus
, true);
958 bus
->sleeping
= sleep
;
959 brcmf_dbg(SDIO
, "new state %s\n",
960 (sleep
? "SLEEP" : "WAKE"));
962 brcmf_dbg(SDIO
, "Exit: err=%d\n", err
);
968 static inline bool brcmf_sdio_valid_shared_address(u32 addr
)
970 return !(addr
== 0 || ((~addr
>> 16) & 0xffff) == (addr
& 0xffff));
973 static int brcmf_sdio_readshared(struct brcmf_sdio
*bus
,
974 struct sdpcm_shared
*sh
)
979 struct sdpcm_shared_le sh_le
;
982 sdio_claim_host(bus
->sdiodev
->func1
);
983 brcmf_sdio_bus_sleep(bus
, false, false);
986 * Read last word in socram to determine
987 * address of sdpcm_shared structure
989 shaddr
= bus
->ci
->rambase
+ bus
->ci
->ramsize
- 4;
990 if (!bus
->ci
->rambase
&& brcmf_chip_sr_capable(bus
->ci
))
991 shaddr
-= bus
->ci
->srsize
;
992 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, shaddr
,
998 * Check if addr is valid.
999 * NVRAM length at the end of memory should have been overwritten.
1001 addr
= le32_to_cpu(addr_le
);
1002 if (!brcmf_sdio_valid_shared_address(addr
)) {
1003 brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr
);
1008 brcmf_dbg(INFO
, "sdpcm_shared address 0x%08X\n", addr
);
1010 /* Read hndrte_shared structure */
1011 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
, (u8
*)&sh_le
,
1012 sizeof(struct sdpcm_shared_le
));
1016 sdio_release_host(bus
->sdiodev
->func1
);
1019 sh
->flags
= le32_to_cpu(sh_le
.flags
);
1020 sh
->trap_addr
= le32_to_cpu(sh_le
.trap_addr
);
1021 sh
->assert_exp_addr
= le32_to_cpu(sh_le
.assert_exp_addr
);
1022 sh
->assert_file_addr
= le32_to_cpu(sh_le
.assert_file_addr
);
1023 sh
->assert_line
= le32_to_cpu(sh_le
.assert_line
);
1024 sh
->console_addr
= le32_to_cpu(sh_le
.console_addr
);
1025 sh
->msgtrace_addr
= le32_to_cpu(sh_le
.msgtrace_addr
);
1027 if ((sh
->flags
& SDPCM_SHARED_VERSION_MASK
) > SDPCM_SHARED_VERSION
) {
1028 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
1029 SDPCM_SHARED_VERSION
,
1030 sh
->flags
& SDPCM_SHARED_VERSION_MASK
);
1036 brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
1038 sdio_release_host(bus
->sdiodev
->func1
);
1042 static void brcmf_sdio_get_console_addr(struct brcmf_sdio
*bus
)
1044 struct sdpcm_shared sh
;
1046 if (brcmf_sdio_readshared(bus
, &sh
) == 0)
1047 bus
->console_addr
= sh
.console_addr
;
1050 static void brcmf_sdio_get_console_addr(struct brcmf_sdio
*bus
)
1055 static u32
brcmf_sdio_hostmail(struct brcmf_sdio
*bus
)
1057 struct brcmf_sdio_dev
*sdiod
= bus
->sdiodev
;
1058 struct brcmf_core
*core
= bus
->sdio_core
;
1064 brcmf_dbg(SDIO
, "Enter\n");
1066 /* Read mailbox data and ack that we did so */
1067 hmb_data
= brcmf_sdiod_readl(sdiod
,
1068 core
->base
+ SD_REG(tohostmailboxdata
),
1072 brcmf_sdiod_writel(sdiod
, core
->base
+ SD_REG(tosbmailbox
),
1075 bus
->sdcnt
.f1regdata
+= 2;
1077 /* dongle indicates the firmware has halted/crashed */
1078 if (hmb_data
& HMB_DATA_FWHALT
)
1079 brcmf_err("mailbox indicates firmware halted\n");
1081 /* Dongle recomposed rx frames, accept them again */
1082 if (hmb_data
& HMB_DATA_NAKHANDLED
) {
1083 brcmf_dbg(SDIO
, "Dongle reports NAK handled, expect rtx of %d\n",
1086 brcmf_err("unexpected NAKHANDLED!\n");
1088 bus
->rxskip
= false;
1089 intstatus
|= I_HMB_FRAME_IND
;
1093 * DEVREADY does not occur with gSPI.
1095 if (hmb_data
& (HMB_DATA_DEVREADY
| HMB_DATA_FWREADY
)) {
1097 (hmb_data
& HMB_DATA_VERSION_MASK
) >>
1098 HMB_DATA_VERSION_SHIFT
;
1099 if (bus
->sdpcm_ver
!= SDPCM_PROT_VERSION
)
1100 brcmf_err("Version mismatch, dongle reports %d, "
1102 bus
->sdpcm_ver
, SDPCM_PROT_VERSION
);
1104 brcmf_dbg(SDIO
, "Dongle ready, protocol version %d\n",
1108 * Retrieve console state address now that firmware should have
1111 brcmf_sdio_get_console_addr(bus
);
1115 * Flow Control has been moved into the RX headers and this out of band
1116 * method isn't used any more.
1117 * remaining backward compatible with older dongles.
1119 if (hmb_data
& HMB_DATA_FC
) {
1120 fcbits
= (hmb_data
& HMB_DATA_FCDATA_MASK
) >>
1121 HMB_DATA_FCDATA_SHIFT
;
1123 if (fcbits
& ~bus
->flowcontrol
)
1124 bus
->sdcnt
.fc_xoff
++;
1126 if (bus
->flowcontrol
& ~fcbits
)
1127 bus
->sdcnt
.fc_xon
++;
1129 bus
->sdcnt
.fc_rcvd
++;
1130 bus
->flowcontrol
= fcbits
;
1133 /* Shouldn't be any others */
1134 if (hmb_data
& ~(HMB_DATA_DEVREADY
|
1135 HMB_DATA_NAKHANDLED
|
1139 HMB_DATA_FCDATA_MASK
| HMB_DATA_VERSION_MASK
))
1140 brcmf_err("Unknown mailbox data content: 0x%02x\n",
1146 static void brcmf_sdio_rxfail(struct brcmf_sdio
*bus
, bool abort
, bool rtx
)
1148 struct brcmf_sdio_dev
*sdiod
= bus
->sdiodev
;
1149 struct brcmf_core
*core
= bus
->sdio_core
;
1155 brcmf_err("%sterminate frame%s\n",
1156 abort
? "abort command, " : "",
1157 rtx
? ", send NAK" : "");
1160 brcmf_sdiod_abort(bus
->sdiodev
, bus
->sdiodev
->func2
);
1162 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
, SFC_RF_TERM
,
1164 bus
->sdcnt
.f1regdata
++;
1166 /* Wait until the packet has been flushed (device/FIFO stable) */
1167 for (lastrbc
= retries
= 0xffff; retries
> 0; retries
--) {
1168 hi
= brcmf_sdiod_readb(bus
->sdiodev
, SBSDIO_FUNC1_RFRAMEBCHI
,
1170 lo
= brcmf_sdiod_readb(bus
->sdiodev
, SBSDIO_FUNC1_RFRAMEBCLO
,
1172 bus
->sdcnt
.f1regdata
+= 2;
1174 if ((hi
== 0) && (lo
== 0))
1177 if ((hi
> (lastrbc
>> 8)) && (lo
> (lastrbc
& 0x00ff))) {
1178 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
1179 lastrbc
, (hi
<< 8) + lo
);
1181 lastrbc
= (hi
<< 8) + lo
;
1185 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc
);
1187 brcmf_dbg(SDIO
, "flush took %d iterations\n", 0xffff - retries
);
1191 brcmf_sdiod_writel(sdiod
, core
->base
+ SD_REG(tosbmailbox
),
1194 bus
->sdcnt
.f1regdata
++;
1199 /* Clear partial in any case */
1200 bus
->cur_read
.len
= 0;
1203 static void brcmf_sdio_txfail(struct brcmf_sdio
*bus
)
1205 struct brcmf_sdio_dev
*sdiodev
= bus
->sdiodev
;
1208 /* On failure, abort the command and terminate the frame */
1209 brcmf_err("sdio error, abort command and terminate frame\n");
1210 bus
->sdcnt
.tx_sderrs
++;
1212 brcmf_sdiod_abort(sdiodev
, sdiodev
->func2
);
1213 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_FRAMECTRL
, SFC_WF_TERM
, NULL
);
1214 bus
->sdcnt
.f1regdata
++;
1216 for (i
= 0; i
< 3; i
++) {
1217 hi
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_WFRAMEBCHI
, NULL
);
1218 lo
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_WFRAMEBCLO
, NULL
);
1219 bus
->sdcnt
.f1regdata
+= 2;
1220 if ((hi
== 0) && (lo
== 0))
1225 /* return total length of buffer chain */
1226 static uint
brcmf_sdio_glom_len(struct brcmf_sdio
*bus
)
1232 skb_queue_walk(&bus
->glom
, p
)
1237 static void brcmf_sdio_free_glom(struct brcmf_sdio
*bus
)
1239 struct sk_buff
*cur
, *next
;
1241 skb_queue_walk_safe(&bus
->glom
, cur
, next
) {
1242 skb_unlink(cur
, &bus
->glom
);
1243 brcmu_pkt_buf_free_skb(cur
);
1248 * brcmfmac sdio bus specific header
1249 * This is the lowest layer header wrapped on the packets transmitted between
1250 * host and WiFi dongle which contains information needed for SDIO core and
1253 * It consists of 3 parts: hardware header, hardware extension header and
1255 * hardware header (frame tag) - 4 bytes
1256 * Byte 0~1: Frame length
1257 * Byte 2~3: Checksum, bit-wise inverse of frame length
1258 * hardware extension header - 8 bytes
1259 * Tx glom mode only, N/A for Rx or normal Tx
1260 * Byte 0~1: Packet length excluding hw frame tag
1262 * Byte 3: Frame flags, bit 0: last frame indication
1263 * Byte 4~5: Reserved
1264 * Byte 6~7: Tail padding length
1265 * software header - 8 bytes
1266 * Byte 0: Rx/Tx sequence number
1267 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1268 * Byte 2: Length of next data frame, reserved for Tx
1269 * Byte 3: Data offset
1270 * Byte 4: Flow control bits, reserved for Tx
1271 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1272 * Byte 6~7: Reserved
1274 #define SDPCM_HWHDR_LEN 4
1275 #define SDPCM_HWEXT_LEN 8
1276 #define SDPCM_SWHDR_LEN 8
1277 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1278 /* software header */
1279 #define SDPCM_SEQ_MASK 0x000000ff
1280 #define SDPCM_SEQ_WRAP 256
1281 #define SDPCM_CHANNEL_MASK 0x00000f00
1282 #define SDPCM_CHANNEL_SHIFT 8
1283 #define SDPCM_CONTROL_CHANNEL 0 /* Control */
1284 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
1285 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
1286 #define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
1287 #define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
1288 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
1289 #define SDPCM_NEXTLEN_MASK 0x00ff0000
1290 #define SDPCM_NEXTLEN_SHIFT 16
1291 #define SDPCM_DOFFSET_MASK 0xff000000
1292 #define SDPCM_DOFFSET_SHIFT 24
1293 #define SDPCM_FCMASK_MASK 0x000000ff
1294 #define SDPCM_WINDOW_MASK 0x0000ff00
1295 #define SDPCM_WINDOW_SHIFT 8
1297 static inline u8
brcmf_sdio_getdatoffset(u8
*swheader
)
1300 hdrvalue
= *(u32
*)swheader
;
1301 return (u8
)((hdrvalue
& SDPCM_DOFFSET_MASK
) >> SDPCM_DOFFSET_SHIFT
);
1304 static inline bool brcmf_sdio_fromevntchan(u8
*swheader
)
1309 hdrvalue
= *(u32
*)swheader
;
1310 ret
= (u8
)((hdrvalue
& SDPCM_CHANNEL_MASK
) >> SDPCM_CHANNEL_SHIFT
);
1312 return (ret
== SDPCM_EVENT_CHANNEL
);
1315 static int brcmf_sdio_hdparse(struct brcmf_sdio
*bus
, u8
*header
,
1316 struct brcmf_sdio_hdrinfo
*rd
,
1317 enum brcmf_sdio_frmtype type
)
1320 u8 rx_seq
, fc
, tx_seq_max
;
1323 trace_brcmf_sdpcm_hdr(SDPCM_RX
, header
);
1326 len
= get_unaligned_le16(header
);
1327 checksum
= get_unaligned_le16(header
+ sizeof(u16
));
1328 /* All zero means no more to read */
1329 if (!(len
| checksum
)) {
1330 bus
->rxpending
= false;
1333 if ((u16
)(~(len
^ checksum
))) {
1334 brcmf_err("HW header checksum error\n");
1335 bus
->sdcnt
.rx_badhdr
++;
1336 brcmf_sdio_rxfail(bus
, false, false);
1339 if (len
< SDPCM_HDRLEN
) {
1340 brcmf_err("HW header length error\n");
1343 if (type
== BRCMF_SDIO_FT_SUPER
&&
1344 (roundup(len
, bus
->blocksize
) != rd
->len
)) {
1345 brcmf_err("HW superframe header length error\n");
1348 if (type
== BRCMF_SDIO_FT_SUB
&& len
> rd
->len
) {
1349 brcmf_err("HW subframe header length error\n");
1354 /* software header */
1355 header
+= SDPCM_HWHDR_LEN
;
1356 swheader
= le32_to_cpu(*(__le32
*)header
);
1357 if (type
== BRCMF_SDIO_FT_SUPER
&& SDPCM_GLOMDESC(header
)) {
1358 brcmf_err("Glom descriptor found in superframe head\n");
1362 rx_seq
= (u8
)(swheader
& SDPCM_SEQ_MASK
);
1363 rd
->channel
= (swheader
& SDPCM_CHANNEL_MASK
) >> SDPCM_CHANNEL_SHIFT
;
1364 if (len
> MAX_RX_DATASZ
&& rd
->channel
!= SDPCM_CONTROL_CHANNEL
&&
1365 type
!= BRCMF_SDIO_FT_SUPER
) {
1366 brcmf_err("HW header length too long\n");
1367 bus
->sdcnt
.rx_toolong
++;
1368 brcmf_sdio_rxfail(bus
, false, false);
1372 if (type
== BRCMF_SDIO_FT_SUPER
&& rd
->channel
!= SDPCM_GLOM_CHANNEL
) {
1373 brcmf_err("Wrong channel for superframe\n");
1377 if (type
== BRCMF_SDIO_FT_SUB
&& rd
->channel
!= SDPCM_DATA_CHANNEL
&&
1378 rd
->channel
!= SDPCM_EVENT_CHANNEL
) {
1379 brcmf_err("Wrong channel for subframe\n");
1383 rd
->dat_offset
= brcmf_sdio_getdatoffset(header
);
1384 if (rd
->dat_offset
< SDPCM_HDRLEN
|| rd
->dat_offset
> rd
->len
) {
1385 brcmf_err("seq %d: bad data offset\n", rx_seq
);
1386 bus
->sdcnt
.rx_badhdr
++;
1387 brcmf_sdio_rxfail(bus
, false, false);
1391 if (rd
->seq_num
!= rx_seq
) {
1392 brcmf_dbg(SDIO
, "seq %d, expected %d\n", rx_seq
, rd
->seq_num
);
1393 bus
->sdcnt
.rx_badseq
++;
1394 rd
->seq_num
= rx_seq
;
1396 /* no need to check the reset for subframe */
1397 if (type
== BRCMF_SDIO_FT_SUB
)
1399 rd
->len_nxtfrm
= (swheader
& SDPCM_NEXTLEN_MASK
) >> SDPCM_NEXTLEN_SHIFT
;
1400 if (rd
->len_nxtfrm
<< 4 > MAX_RX_DATASZ
) {
1401 /* only warm for NON glom packet */
1402 if (rd
->channel
!= SDPCM_GLOM_CHANNEL
)
1403 brcmf_err("seq %d: next length error\n", rx_seq
);
1406 swheader
= le32_to_cpu(*(__le32
*)(header
+ 4));
1407 fc
= swheader
& SDPCM_FCMASK_MASK
;
1408 if (bus
->flowcontrol
!= fc
) {
1409 if (~bus
->flowcontrol
& fc
)
1410 bus
->sdcnt
.fc_xoff
++;
1411 if (bus
->flowcontrol
& ~fc
)
1412 bus
->sdcnt
.fc_xon
++;
1413 bus
->sdcnt
.fc_rcvd
++;
1414 bus
->flowcontrol
= fc
;
1416 tx_seq_max
= (swheader
& SDPCM_WINDOW_MASK
) >> SDPCM_WINDOW_SHIFT
;
1417 if ((u8
)(tx_seq_max
- bus
->tx_seq
) > 0x40) {
1418 brcmf_err("seq %d: max tx seq number error\n", rx_seq
);
1419 tx_seq_max
= bus
->tx_seq
+ 2;
1421 bus
->tx_max
= tx_seq_max
;
1426 static inline void brcmf_sdio_update_hwhdr(u8
*header
, u16 frm_length
)
1428 *(__le16
*)header
= cpu_to_le16(frm_length
);
1429 *(((__le16
*)header
) + 1) = cpu_to_le16(~frm_length
);
1432 static void brcmf_sdio_hdpack(struct brcmf_sdio
*bus
, u8
*header
,
1433 struct brcmf_sdio_hdrinfo
*hd_info
)
1438 brcmf_sdio_update_hwhdr(header
, hd_info
->len
);
1439 hdr_offset
= SDPCM_HWHDR_LEN
;
1442 hdrval
= (hd_info
->len
- hdr_offset
) | (hd_info
->lastfrm
<< 24);
1443 *((__le32
*)(header
+ hdr_offset
)) = cpu_to_le32(hdrval
);
1444 hdrval
= (u16
)hd_info
->tail_pad
<< 16;
1445 *(((__le32
*)(header
+ hdr_offset
)) + 1) = cpu_to_le32(hdrval
);
1446 hdr_offset
+= SDPCM_HWEXT_LEN
;
1449 hdrval
= hd_info
->seq_num
;
1450 hdrval
|= (hd_info
->channel
<< SDPCM_CHANNEL_SHIFT
) &
1452 hdrval
|= (hd_info
->dat_offset
<< SDPCM_DOFFSET_SHIFT
) &
1454 *((__le32
*)(header
+ hdr_offset
)) = cpu_to_le32(hdrval
);
1455 *(((__le32
*)(header
+ hdr_offset
)) + 1) = 0;
1456 trace_brcmf_sdpcm_hdr(SDPCM_TX
+ !!(bus
->txglom
), header
);
1459 static u8
brcmf_sdio_rxglom(struct brcmf_sdio
*bus
, u8 rxseq
)
1464 struct sk_buff
*pfirst
, *pnext
;
1469 struct brcmf_sdio_hdrinfo rd_new
;
1471 /* If packets, issue read(s) and send up packet chain */
1472 /* Return sequence numbers consumed? */
1474 brcmf_dbg(SDIO
, "start: glomd %p glom %p\n",
1475 bus
->glomd
, skb_peek(&bus
->glom
));
1477 /* If there's a descriptor, generate the packet chain */
1479 pfirst
= pnext
= NULL
;
1480 dlen
= (u16
) (bus
->glomd
->len
);
1481 dptr
= bus
->glomd
->data
;
1482 if (!dlen
|| (dlen
& 1)) {
1483 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1488 for (totlen
= num
= 0; dlen
; num
++) {
1489 /* Get (and move past) next length */
1490 sublen
= get_unaligned_le16(dptr
);
1491 dlen
-= sizeof(u16
);
1492 dptr
+= sizeof(u16
);
1493 if ((sublen
< SDPCM_HDRLEN
) ||
1494 ((num
== 0) && (sublen
< (2 * SDPCM_HDRLEN
)))) {
1495 brcmf_err("descriptor len %d bad: %d\n",
1500 if (sublen
% bus
->sgentry_align
) {
1501 brcmf_err("sublen %d not multiple of %d\n",
1502 sublen
, bus
->sgentry_align
);
1506 /* For last frame, adjust read len so total
1507 is a block multiple */
1510 (roundup(totlen
, bus
->blocksize
) - totlen
);
1511 totlen
= roundup(totlen
, bus
->blocksize
);
1514 /* Allocate/chain packet for next subframe */
1515 pnext
= brcmu_pkt_buf_get_skb(sublen
+ bus
->sgentry_align
);
1516 if (pnext
== NULL
) {
1517 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1521 skb_queue_tail(&bus
->glom
, pnext
);
1523 /* Adhere to start alignment requirements */
1524 pkt_align(pnext
, sublen
, bus
->sgentry_align
);
1527 /* If all allocations succeeded, save packet chain
1530 brcmf_dbg(GLOM
, "allocated %d-byte packet chain for %d subframes\n",
1532 if (BRCMF_GLOM_ON() && bus
->cur_read
.len
&&
1533 totlen
!= bus
->cur_read
.len
) {
1534 brcmf_dbg(GLOM
, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1535 bus
->cur_read
.len
, totlen
, rxseq
);
1537 pfirst
= pnext
= NULL
;
1539 brcmf_sdio_free_glom(bus
);
1543 /* Done with descriptor packet */
1544 brcmu_pkt_buf_free_skb(bus
->glomd
);
1546 bus
->cur_read
.len
= 0;
1549 /* Ok -- either we just generated a packet chain,
1550 or had one from before */
1551 if (!skb_queue_empty(&bus
->glom
)) {
1552 if (BRCMF_GLOM_ON()) {
1553 brcmf_dbg(GLOM
, "try superframe read, packet chain:\n");
1554 skb_queue_walk(&bus
->glom
, pnext
) {
1555 brcmf_dbg(GLOM
, " %p: %p len 0x%04x (%d)\n",
1556 pnext
, (u8
*) (pnext
->data
),
1557 pnext
->len
, pnext
->len
);
1561 pfirst
= skb_peek(&bus
->glom
);
1562 dlen
= (u16
) brcmf_sdio_glom_len(bus
);
1564 /* Do an SDIO read for the superframe. Configurable iovar to
1565 * read directly into the chained packet, or allocate a large
1566 * packet and and copy into the chain.
1568 sdio_claim_host(bus
->sdiodev
->func1
);
1569 errcode
= brcmf_sdiod_recv_chain(bus
->sdiodev
,
1571 sdio_release_host(bus
->sdiodev
->func1
);
1572 bus
->sdcnt
.f2rxdata
++;
1574 /* On failure, kill the superframe */
1576 brcmf_err("glom read of %d bytes failed: %d\n",
1579 sdio_claim_host(bus
->sdiodev
->func1
);
1580 brcmf_sdio_rxfail(bus
, true, false);
1581 bus
->sdcnt
.rxglomfail
++;
1582 brcmf_sdio_free_glom(bus
);
1583 sdio_release_host(bus
->sdiodev
->func1
);
1587 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1588 pfirst
->data
, min_t(int, pfirst
->len
, 48),
1591 rd_new
.seq_num
= rxseq
;
1593 sdio_claim_host(bus
->sdiodev
->func1
);
1594 errcode
= brcmf_sdio_hdparse(bus
, pfirst
->data
, &rd_new
,
1595 BRCMF_SDIO_FT_SUPER
);
1596 sdio_release_host(bus
->sdiodev
->func1
);
1597 bus
->cur_read
.len
= rd_new
.len_nxtfrm
<< 4;
1599 /* Remove superframe header, remember offset */
1600 skb_pull(pfirst
, rd_new
.dat_offset
);
1601 sfdoff
= rd_new
.dat_offset
;
1604 /* Validate all the subframe headers */
1605 skb_queue_walk(&bus
->glom
, pnext
) {
1606 /* leave when invalid subframe is found */
1610 rd_new
.len
= pnext
->len
;
1611 rd_new
.seq_num
= rxseq
++;
1612 sdio_claim_host(bus
->sdiodev
->func1
);
1613 errcode
= brcmf_sdio_hdparse(bus
, pnext
->data
, &rd_new
,
1615 sdio_release_host(bus
->sdiodev
->func1
);
1616 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1617 pnext
->data
, 32, "subframe:\n");
1623 /* Terminate frame on error */
1624 sdio_claim_host(bus
->sdiodev
->func1
);
1625 brcmf_sdio_rxfail(bus
, true, false);
1626 bus
->sdcnt
.rxglomfail
++;
1627 brcmf_sdio_free_glom(bus
);
1628 sdio_release_host(bus
->sdiodev
->func1
);
1629 bus
->cur_read
.len
= 0;
1633 /* Basic SD framing looks ok - process each packet (header) */
1635 skb_queue_walk_safe(&bus
->glom
, pfirst
, pnext
) {
1636 dptr
= (u8
*) (pfirst
->data
);
1637 sublen
= get_unaligned_le16(dptr
);
1638 doff
= brcmf_sdio_getdatoffset(&dptr
[SDPCM_HWHDR_LEN
]);
1640 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1642 "Rx Subframe Data:\n");
1644 __skb_trim(pfirst
, sublen
);
1645 skb_pull(pfirst
, doff
);
1647 if (pfirst
->len
== 0) {
1648 skb_unlink(pfirst
, &bus
->glom
);
1649 brcmu_pkt_buf_free_skb(pfirst
);
1653 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1655 min_t(int, pfirst
->len
, 32),
1656 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1657 bus
->glom
.qlen
, pfirst
, pfirst
->data
,
1658 pfirst
->len
, pfirst
->next
,
1660 skb_unlink(pfirst
, &bus
->glom
);
1661 if (brcmf_sdio_fromevntchan(&dptr
[SDPCM_HWHDR_LEN
]))
1662 brcmf_rx_event(bus
->sdiodev
->dev
, pfirst
);
1664 brcmf_rx_frame(bus
->sdiodev
->dev
, pfirst
,
1666 bus
->sdcnt
.rxglompkts
++;
1669 bus
->sdcnt
.rxglomframes
++;
1674 static int brcmf_sdio_dcmd_resp_wait(struct brcmf_sdio
*bus
, uint
*condition
,
1677 DECLARE_WAITQUEUE(wait
, current
);
1678 int timeout
= DCMD_RESP_TIMEOUT
;
1680 /* Wait until control frame is available */
1681 add_wait_queue(&bus
->dcmd_resp_wait
, &wait
);
1682 set_current_state(TASK_INTERRUPTIBLE
);
1684 while (!(*condition
) && (!signal_pending(current
) && timeout
))
1685 timeout
= schedule_timeout(timeout
);
1687 if (signal_pending(current
))
1690 set_current_state(TASK_RUNNING
);
1691 remove_wait_queue(&bus
->dcmd_resp_wait
, &wait
);
1696 static int brcmf_sdio_dcmd_resp_wake(struct brcmf_sdio
*bus
)
1698 wake_up_interruptible(&bus
->dcmd_resp_wait
);
1703 brcmf_sdio_read_control(struct brcmf_sdio
*bus
, u8
*hdr
, uint len
, uint doff
)
1706 u8
*buf
= NULL
, *rbuf
;
1709 brcmf_dbg(TRACE
, "Enter\n");
1712 buf
= vzalloc(bus
->rxblen
);
1717 pad
= ((unsigned long)rbuf
% bus
->head_align
);
1719 rbuf
+= (bus
->head_align
- pad
);
1721 /* Copy the already-read portion over */
1722 memcpy(buf
, hdr
, BRCMF_FIRSTREAD
);
1723 if (len
<= BRCMF_FIRSTREAD
)
1726 /* Raise rdlen to next SDIO block to avoid tail command */
1727 rdlen
= len
- BRCMF_FIRSTREAD
;
1728 if (bus
->roundup
&& bus
->blocksize
&& (rdlen
> bus
->blocksize
)) {
1729 pad
= bus
->blocksize
- (rdlen
% bus
->blocksize
);
1730 if ((pad
<= bus
->roundup
) && (pad
< bus
->blocksize
) &&
1731 ((len
+ pad
) < bus
->sdiodev
->bus_if
->maxctl
))
1733 } else if (rdlen
% bus
->head_align
) {
1734 rdlen
+= bus
->head_align
- (rdlen
% bus
->head_align
);
1737 /* Drop if the read is too big or it exceeds our maximum */
1738 if ((rdlen
+ BRCMF_FIRSTREAD
) > bus
->sdiodev
->bus_if
->maxctl
) {
1739 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1740 rdlen
, bus
->sdiodev
->bus_if
->maxctl
);
1741 brcmf_sdio_rxfail(bus
, false, false);
1745 if ((len
- doff
) > bus
->sdiodev
->bus_if
->maxctl
) {
1746 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1747 len
, len
- doff
, bus
->sdiodev
->bus_if
->maxctl
);
1748 bus
->sdcnt
.rx_toolong
++;
1749 brcmf_sdio_rxfail(bus
, false, false);
1753 /* Read remain of frame body */
1754 sdret
= brcmf_sdiod_recv_buf(bus
->sdiodev
, rbuf
, rdlen
);
1755 bus
->sdcnt
.f2rxdata
++;
1757 /* Control frame failures need retransmission */
1759 brcmf_err("read %d control bytes failed: %d\n",
1761 bus
->sdcnt
.rxc_errors
++;
1762 brcmf_sdio_rxfail(bus
, true, true);
1765 memcpy(buf
+ BRCMF_FIRSTREAD
, rbuf
, rdlen
);
1769 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1770 buf
, len
, "RxCtrl:\n");
1772 /* Point to valid data and indicate its length */
1773 spin_lock_bh(&bus
->rxctl_lock
);
1775 brcmf_err("last control frame is being processed.\n");
1776 spin_unlock_bh(&bus
->rxctl_lock
);
1780 bus
->rxctl
= buf
+ doff
;
1781 bus
->rxctl_orig
= buf
;
1782 bus
->rxlen
= len
- doff
;
1783 spin_unlock_bh(&bus
->rxctl_lock
);
1786 /* Awake any waiters */
1787 brcmf_sdio_dcmd_resp_wake(bus
);
1790 /* Pad read to blocksize for efficiency */
1791 static void brcmf_sdio_pad(struct brcmf_sdio
*bus
, u16
*pad
, u16
*rdlen
)
1793 if (bus
->roundup
&& bus
->blocksize
&& *rdlen
> bus
->blocksize
) {
1794 *pad
= bus
->blocksize
- (*rdlen
% bus
->blocksize
);
1795 if (*pad
<= bus
->roundup
&& *pad
< bus
->blocksize
&&
1796 *rdlen
+ *pad
+ BRCMF_FIRSTREAD
< MAX_RX_DATASZ
)
1798 } else if (*rdlen
% bus
->head_align
) {
1799 *rdlen
+= bus
->head_align
- (*rdlen
% bus
->head_align
);
1803 static uint
brcmf_sdio_readframes(struct brcmf_sdio
*bus
, uint maxframes
)
1805 struct sk_buff
*pkt
; /* Packet for event or data frames */
1806 u16 pad
; /* Number of pad bytes to read */
1807 uint rxleft
= 0; /* Remaining number of frames allowed */
1808 int ret
; /* Return code from calls */
1809 uint rxcount
= 0; /* Total frames read */
1810 struct brcmf_sdio_hdrinfo
*rd
= &bus
->cur_read
, rd_new
;
1813 brcmf_dbg(TRACE
, "Enter\n");
1815 /* Not finished unless we encounter no more frames indication */
1816 bus
->rxpending
= true;
1818 for (rd
->seq_num
= bus
->rx_seq
, rxleft
= maxframes
;
1819 !bus
->rxskip
&& rxleft
&& bus
->sdiodev
->state
== BRCMF_SDIOD_DATA
;
1820 rd
->seq_num
++, rxleft
--) {
1822 /* Handle glomming separately */
1823 if (bus
->glomd
|| !skb_queue_empty(&bus
->glom
)) {
1825 brcmf_dbg(GLOM
, "calling rxglom: glomd %p, glom %p\n",
1826 bus
->glomd
, skb_peek(&bus
->glom
));
1827 cnt
= brcmf_sdio_rxglom(bus
, rd
->seq_num
);
1828 brcmf_dbg(GLOM
, "rxglom returned %d\n", cnt
);
1829 rd
->seq_num
+= cnt
- 1;
1830 rxleft
= (rxleft
> cnt
) ? (rxleft
- cnt
) : 1;
1834 rd
->len_left
= rd
->len
;
1835 /* read header first for unknow frame length */
1836 sdio_claim_host(bus
->sdiodev
->func1
);
1838 ret
= brcmf_sdiod_recv_buf(bus
->sdiodev
,
1839 bus
->rxhdr
, BRCMF_FIRSTREAD
);
1840 bus
->sdcnt
.f2rxhdrs
++;
1842 brcmf_err("RXHEADER FAILED: %d\n",
1844 bus
->sdcnt
.rx_hdrfail
++;
1845 brcmf_sdio_rxfail(bus
, true, true);
1846 sdio_release_host(bus
->sdiodev
->func1
);
1850 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1851 bus
->rxhdr
, SDPCM_HDRLEN
,
1854 if (brcmf_sdio_hdparse(bus
, bus
->rxhdr
, rd
,
1855 BRCMF_SDIO_FT_NORMAL
)) {
1856 sdio_release_host(bus
->sdiodev
->func1
);
1857 if (!bus
->rxpending
)
1863 if (rd
->channel
== SDPCM_CONTROL_CHANNEL
) {
1864 brcmf_sdio_read_control(bus
, bus
->rxhdr
,
1867 /* prepare the descriptor for the next read */
1868 rd
->len
= rd
->len_nxtfrm
<< 4;
1870 /* treat all packet as event if we don't know */
1871 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1872 sdio_release_host(bus
->sdiodev
->func1
);
1875 rd
->len_left
= rd
->len
> BRCMF_FIRSTREAD
?
1876 rd
->len
- BRCMF_FIRSTREAD
: 0;
1877 head_read
= BRCMF_FIRSTREAD
;
1880 brcmf_sdio_pad(bus
, &pad
, &rd
->len_left
);
1882 pkt
= brcmu_pkt_buf_get_skb(rd
->len_left
+ head_read
+
1885 /* Give up on data, request rtx of events */
1886 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1887 brcmf_sdio_rxfail(bus
, false,
1888 RETRYCHAN(rd
->channel
));
1889 sdio_release_host(bus
->sdiodev
->func1
);
1892 skb_pull(pkt
, head_read
);
1893 pkt_align(pkt
, rd
->len_left
, bus
->head_align
);
1895 ret
= brcmf_sdiod_recv_pkt(bus
->sdiodev
, pkt
);
1896 bus
->sdcnt
.f2rxdata
++;
1897 sdio_release_host(bus
->sdiodev
->func1
);
1900 brcmf_err("read %d bytes from channel %d failed: %d\n",
1901 rd
->len
, rd
->channel
, ret
);
1902 brcmu_pkt_buf_free_skb(pkt
);
1903 sdio_claim_host(bus
->sdiodev
->func1
);
1904 brcmf_sdio_rxfail(bus
, true,
1905 RETRYCHAN(rd
->channel
));
1906 sdio_release_host(bus
->sdiodev
->func1
);
1911 skb_push(pkt
, head_read
);
1912 memcpy(pkt
->data
, bus
->rxhdr
, head_read
);
1915 memcpy(bus
->rxhdr
, pkt
->data
, SDPCM_HDRLEN
);
1916 rd_new
.seq_num
= rd
->seq_num
;
1917 sdio_claim_host(bus
->sdiodev
->func1
);
1918 if (brcmf_sdio_hdparse(bus
, bus
->rxhdr
, &rd_new
,
1919 BRCMF_SDIO_FT_NORMAL
)) {
1921 brcmu_pkt_buf_free_skb(pkt
);
1923 bus
->sdcnt
.rx_readahead_cnt
++;
1924 if (rd
->len
!= roundup(rd_new
.len
, 16)) {
1925 brcmf_err("frame length mismatch:read %d, should be %d\n",
1927 roundup(rd_new
.len
, 16) >> 4);
1929 brcmf_sdio_rxfail(bus
, true, true);
1930 sdio_release_host(bus
->sdiodev
->func1
);
1931 brcmu_pkt_buf_free_skb(pkt
);
1934 sdio_release_host(bus
->sdiodev
->func1
);
1935 rd
->len_nxtfrm
= rd_new
.len_nxtfrm
;
1936 rd
->channel
= rd_new
.channel
;
1937 rd
->dat_offset
= rd_new
.dat_offset
;
1939 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1942 bus
->rxhdr
, SDPCM_HDRLEN
,
1945 if (rd_new
.channel
== SDPCM_CONTROL_CHANNEL
) {
1946 brcmf_err("readahead on control packet %d?\n",
1948 /* Force retry w/normal header read */
1950 sdio_claim_host(bus
->sdiodev
->func1
);
1951 brcmf_sdio_rxfail(bus
, false, true);
1952 sdio_release_host(bus
->sdiodev
->func1
);
1953 brcmu_pkt_buf_free_skb(pkt
);
1958 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1959 pkt
->data
, rd
->len
, "Rx Data:\n");
1961 /* Save superframe descriptor and allocate packet frame */
1962 if (rd
->channel
== SDPCM_GLOM_CHANNEL
) {
1963 if (SDPCM_GLOMDESC(&bus
->rxhdr
[SDPCM_HWHDR_LEN
])) {
1964 brcmf_dbg(GLOM
, "glom descriptor, %d bytes:\n",
1966 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1969 __skb_trim(pkt
, rd
->len
);
1970 skb_pull(pkt
, SDPCM_HDRLEN
);
1973 brcmf_err("%s: glom superframe w/o "
1974 "descriptor!\n", __func__
);
1975 sdio_claim_host(bus
->sdiodev
->func1
);
1976 brcmf_sdio_rxfail(bus
, false, false);
1977 sdio_release_host(bus
->sdiodev
->func1
);
1979 /* prepare the descriptor for the next read */
1980 rd
->len
= rd
->len_nxtfrm
<< 4;
1982 /* treat all packet as event if we don't know */
1983 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1987 /* Fill in packet len and prio, deliver upward */
1988 __skb_trim(pkt
, rd
->len
);
1989 skb_pull(pkt
, rd
->dat_offset
);
1992 brcmu_pkt_buf_free_skb(pkt
);
1993 else if (rd
->channel
== SDPCM_EVENT_CHANNEL
)
1994 brcmf_rx_event(bus
->sdiodev
->dev
, pkt
);
1996 brcmf_rx_frame(bus
->sdiodev
->dev
, pkt
,
1999 /* prepare the descriptor for the next read */
2000 rd
->len
= rd
->len_nxtfrm
<< 4;
2002 /* treat all packet as event if we don't know */
2003 rd
->channel
= SDPCM_EVENT_CHANNEL
;
2006 rxcount
= maxframes
- rxleft
;
2007 /* Message if we hit the limit */
2009 brcmf_dbg(DATA
, "hit rx limit of %d frames\n", maxframes
);
2011 brcmf_dbg(DATA
, "processed %d frames\n", rxcount
);
2012 /* Back off rxseq if awaiting rtx, update rx_seq */
2015 bus
->rx_seq
= rd
->seq_num
;
2021 brcmf_sdio_wait_event_wakeup(struct brcmf_sdio
*bus
)
2023 wake_up_interruptible(&bus
->ctrl_wait
);
2027 static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio
*bus
, struct sk_buff
*pkt
)
2029 struct brcmf_bus_stats
*stats
;
2033 dat_buf
= (u8
*)(pkt
->data
);
2035 /* Check head padding */
2036 head_pad
= ((unsigned long)dat_buf
% bus
->head_align
);
2038 if (skb_headroom(pkt
) < head_pad
) {
2039 stats
= &bus
->sdiodev
->bus_if
->stats
;
2040 atomic_inc(&stats
->pktcowed
);
2041 if (skb_cow_head(pkt
, head_pad
)) {
2042 atomic_inc(&stats
->pktcow_failed
);
2047 skb_push(pkt
, head_pad
);
2048 dat_buf
= (u8
*)(pkt
->data
);
2050 memset(dat_buf
, 0, head_pad
+ bus
->tx_hdrlen
);
2055 * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
2058 /* flag marking a dummy skb added for DMA alignment requirement */
2059 #define ALIGN_SKB_FLAG 0x8000
2060 /* bit mask of data length chopped from the previous packet */
2061 #define ALIGN_SKB_CHOP_LEN_MASK 0x7fff
2063 static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio
*bus
,
2064 struct sk_buff_head
*pktq
,
2065 struct sk_buff
*pkt
, u16 total_len
)
2067 struct brcmf_sdio_dev
*sdiodev
;
2068 struct sk_buff
*pkt_pad
;
2069 u16 tail_pad
, tail_chop
, chain_pad
;
2070 unsigned int blksize
;
2074 sdiodev
= bus
->sdiodev
;
2075 blksize
= sdiodev
->func2
->cur_blksize
;
2076 /* sg entry alignment should be a divisor of block size */
2077 WARN_ON(blksize
% bus
->sgentry_align
);
2079 /* Check tail padding */
2080 lastfrm
= skb_queue_is_last(pktq
, pkt
);
2082 tail_chop
= pkt
->len
% bus
->sgentry_align
;
2084 tail_pad
= bus
->sgentry_align
- tail_chop
;
2085 chain_pad
= (total_len
+ tail_pad
) % blksize
;
2086 if (lastfrm
&& chain_pad
)
2087 tail_pad
+= blksize
- chain_pad
;
2088 if (skb_tailroom(pkt
) < tail_pad
&& pkt
->len
> blksize
) {
2089 pkt_pad
= brcmu_pkt_buf_get_skb(tail_pad
+ tail_chop
+
2091 if (pkt_pad
== NULL
)
2093 ret
= brcmf_sdio_txpkt_hdalign(bus
, pkt_pad
);
2094 if (unlikely(ret
< 0)) {
2098 memcpy(pkt_pad
->data
,
2099 pkt
->data
+ pkt
->len
- tail_chop
,
2101 *(u16
*)(pkt_pad
->cb
) = ALIGN_SKB_FLAG
+ tail_chop
;
2102 skb_trim(pkt
, pkt
->len
- tail_chop
);
2103 skb_trim(pkt_pad
, tail_pad
+ tail_chop
);
2104 __skb_queue_after(pktq
, pkt
, pkt_pad
);
2106 ntail
= pkt
->data_len
+ tail_pad
-
2107 (pkt
->end
- pkt
->tail
);
2108 if (skb_cloned(pkt
) || ntail
> 0)
2109 if (pskb_expand_head(pkt
, 0, ntail
, GFP_ATOMIC
))
2111 if (skb_linearize(pkt
))
2113 __skb_put(pkt
, tail_pad
);
2120 * brcmf_sdio_txpkt_prep - packet preparation for transmit
2121 * @bus: brcmf_sdio structure pointer
2122 * @pktq: packet list pointer
2123 * @chan: virtual channel to transmit the packet
2125 * Processes to be applied to the packet
2126 * - Align data buffer pointer
2127 * - Align data buffer length
2129 * Return: negative value if there is error
2132 brcmf_sdio_txpkt_prep(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
,
2135 u16 head_pad
, total_len
;
2136 struct sk_buff
*pkt_next
;
2139 struct brcmf_sdio_hdrinfo hd_info
= {0};
2141 txseq
= bus
->tx_seq
;
2143 skb_queue_walk(pktq
, pkt_next
) {
2144 /* alignment packet inserted in previous
2145 * loop cycle can be skipped as it is
2146 * already properly aligned and does not
2147 * need an sdpcm header.
2149 if (*(u16
*)(pkt_next
->cb
) & ALIGN_SKB_FLAG
)
2152 /* align packet data pointer */
2153 ret
= brcmf_sdio_txpkt_hdalign(bus
, pkt_next
);
2156 head_pad
= (u16
)ret
;
2158 memset(pkt_next
->data
+ bus
->tx_hdrlen
, 0, head_pad
);
2160 total_len
+= pkt_next
->len
;
2162 hd_info
.len
= pkt_next
->len
;
2163 hd_info
.lastfrm
= skb_queue_is_last(pktq
, pkt_next
);
2164 if (bus
->txglom
&& pktq
->qlen
> 1) {
2165 ret
= brcmf_sdio_txpkt_prep_sg(bus
, pktq
,
2166 pkt_next
, total_len
);
2169 hd_info
.tail_pad
= (u16
)ret
;
2170 total_len
+= (u16
)ret
;
2173 hd_info
.channel
= chan
;
2174 hd_info
.dat_offset
= head_pad
+ bus
->tx_hdrlen
;
2175 hd_info
.seq_num
= txseq
++;
2177 /* Now fill the header */
2178 brcmf_sdio_hdpack(bus
, pkt_next
->data
, &hd_info
);
2180 if (BRCMF_BYTES_ON() &&
2181 ((BRCMF_CTL_ON() && chan
== SDPCM_CONTROL_CHANNEL
) ||
2182 (BRCMF_DATA_ON() && chan
!= SDPCM_CONTROL_CHANNEL
)))
2183 brcmf_dbg_hex_dump(true, pkt_next
->data
, hd_info
.len
,
2185 else if (BRCMF_HDRS_ON())
2186 brcmf_dbg_hex_dump(true, pkt_next
->data
,
2187 head_pad
+ bus
->tx_hdrlen
,
2190 /* Hardware length tag of the first packet should be total
2191 * length of the chain (including padding)
2194 brcmf_sdio_update_hwhdr(pktq
->next
->data
, total_len
);
2199 * brcmf_sdio_txpkt_postp - packet post processing for transmit
2200 * @bus: brcmf_sdio structure pointer
2201 * @pktq: packet list pointer
2203 * Processes to be applied to the packet
2204 * - Remove head padding
2205 * - Remove tail padding
2208 brcmf_sdio_txpkt_postp(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
)
2213 u16 dummy_flags
, chop_len
;
2214 struct sk_buff
*pkt_next
, *tmp
, *pkt_prev
;
2216 skb_queue_walk_safe(pktq
, pkt_next
, tmp
) {
2217 dummy_flags
= *(u16
*)(pkt_next
->cb
);
2218 if (dummy_flags
& ALIGN_SKB_FLAG
) {
2219 chop_len
= dummy_flags
& ALIGN_SKB_CHOP_LEN_MASK
;
2221 pkt_prev
= pkt_next
->prev
;
2222 skb_put(pkt_prev
, chop_len
);
2224 __skb_unlink(pkt_next
, pktq
);
2225 brcmu_pkt_buf_free_skb(pkt_next
);
2227 hdr
= pkt_next
->data
+ bus
->tx_hdrlen
- SDPCM_SWHDR_LEN
;
2228 dat_offset
= le32_to_cpu(*(__le32
*)hdr
);
2229 dat_offset
= (dat_offset
& SDPCM_DOFFSET_MASK
) >>
2230 SDPCM_DOFFSET_SHIFT
;
2231 skb_pull(pkt_next
, dat_offset
);
2233 tail_pad
= le16_to_cpu(*(__le16
*)(hdr
- 2));
2234 skb_trim(pkt_next
, pkt_next
->len
- tail_pad
);
2240 /* Writes a HW/SW header into the packet and sends it. */
2241 /* Assumes: (a) header space already there, (b) caller holds lock */
2242 static int brcmf_sdio_txpkt(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
,
2246 struct sk_buff
*pkt_next
, *tmp
;
2248 brcmf_dbg(TRACE
, "Enter\n");
2250 ret
= brcmf_sdio_txpkt_prep(bus
, pktq
, chan
);
2254 sdio_claim_host(bus
->sdiodev
->func1
);
2255 ret
= brcmf_sdiod_send_pkt(bus
->sdiodev
, pktq
);
2256 bus
->sdcnt
.f2txdata
++;
2259 brcmf_sdio_txfail(bus
);
2261 sdio_release_host(bus
->sdiodev
->func1
);
2264 brcmf_sdio_txpkt_postp(bus
, pktq
);
2266 bus
->tx_seq
= (bus
->tx_seq
+ pktq
->qlen
) % SDPCM_SEQ_WRAP
;
2267 skb_queue_walk_safe(pktq
, pkt_next
, tmp
) {
2268 __skb_unlink(pkt_next
, pktq
);
2269 brcmf_proto_bcdc_txcomplete(bus
->sdiodev
->dev
, pkt_next
,
2275 static uint
brcmf_sdio_sendfromq(struct brcmf_sdio
*bus
, uint maxframes
)
2277 struct sk_buff
*pkt
;
2278 struct sk_buff_head pktq
;
2279 u32 intstat_addr
= bus
->sdio_core
->base
+ SD_REG(intstatus
);
2281 int ret
= 0, prec_out
, i
;
2283 u8 tx_prec_map
, pkt_num
;
2285 brcmf_dbg(TRACE
, "Enter\n");
2287 tx_prec_map
= ~bus
->flowcontrol
;
2289 /* Send frames until the limit or some other event */
2290 for (cnt
= 0; (cnt
< maxframes
) && data_ok(bus
);) {
2293 pkt_num
= min_t(u8
, bus
->tx_max
- bus
->tx_seq
,
2294 bus
->sdiodev
->txglomsz
);
2295 pkt_num
= min_t(u32
, pkt_num
,
2296 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
));
2297 __skb_queue_head_init(&pktq
);
2298 spin_lock_bh(&bus
->txq_lock
);
2299 for (i
= 0; i
< pkt_num
; i
++) {
2300 pkt
= brcmu_pktq_mdeq(&bus
->txq
, tx_prec_map
,
2304 __skb_queue_tail(&pktq
, pkt
);
2306 spin_unlock_bh(&bus
->txq_lock
);
2310 ret
= brcmf_sdio_txpkt(bus
, &pktq
, SDPCM_DATA_CHANNEL
);
2314 /* In poll mode, need to check for other events */
2316 /* Check device status, signal pending interrupt */
2317 sdio_claim_host(bus
->sdiodev
->func1
);
2318 intstatus
= brcmf_sdiod_readl(bus
->sdiodev
,
2319 intstat_addr
, &ret
);
2320 sdio_release_host(bus
->sdiodev
->func1
);
2322 bus
->sdcnt
.f2txdata
++;
2325 if (intstatus
& bus
->hostintmask
)
2326 atomic_set(&bus
->ipend
, 1);
2330 /* Deflow-control stack if needed */
2331 if ((bus
->sdiodev
->state
== BRCMF_SDIOD_DATA
) &&
2332 bus
->txoff
&& (pktq_len(&bus
->txq
) < TXLOW
)) {
2334 brcmf_proto_bcdc_txflowblock(bus
->sdiodev
->dev
, false);
2340 static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio
*bus
, u8
*frame
, u16 len
)
2345 struct brcmf_sdio_hdrinfo hd_info
= {0};
2348 brcmf_dbg(TRACE
, "Enter\n");
2350 /* Back the pointer to make room for bus header */
2351 frame
-= bus
->tx_hdrlen
;
2352 len
+= bus
->tx_hdrlen
;
2354 /* Add alignment padding (optional for ctl frames) */
2355 doff
= ((unsigned long)frame
% bus
->head_align
);
2359 memset(frame
+ bus
->tx_hdrlen
, 0, doff
);
2362 /* Round send length to next SDIO block */
2364 if (bus
->roundup
&& bus
->blocksize
&& (len
> bus
->blocksize
)) {
2365 pad
= bus
->blocksize
- (len
% bus
->blocksize
);
2366 if ((pad
> bus
->roundup
) || (pad
>= bus
->blocksize
))
2368 } else if (len
% bus
->head_align
) {
2369 pad
= bus
->head_align
- (len
% bus
->head_align
);
2373 hd_info
.len
= len
- pad
;
2374 hd_info
.channel
= SDPCM_CONTROL_CHANNEL
;
2375 hd_info
.dat_offset
= doff
+ bus
->tx_hdrlen
;
2376 hd_info
.seq_num
= bus
->tx_seq
;
2377 hd_info
.lastfrm
= true;
2378 hd_info
.tail_pad
= pad
;
2379 brcmf_sdio_hdpack(bus
, frame
, &hd_info
);
2382 brcmf_sdio_update_hwhdr(frame
, len
);
2384 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2385 frame
, len
, "Tx Frame:\n");
2386 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2388 frame
, min_t(u16
, len
, 16), "TxHdr:\n");
2391 ret
= brcmf_sdiod_send_buf(bus
->sdiodev
, frame
, len
);
2394 brcmf_sdio_txfail(bus
);
2396 bus
->tx_seq
= (bus
->tx_seq
+ 1) % SDPCM_SEQ_WRAP
;
2397 } while (ret
< 0 && retries
++ < TXRETRIES
);
2402 static void brcmf_sdio_bus_stop(struct device
*dev
)
2404 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2405 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2406 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2407 struct brcmf_core
*core
= bus
->sdio_core
;
2408 u32 local_hostintmask
;
2412 brcmf_dbg(TRACE
, "Enter\n");
2414 if (bus
->watchdog_tsk
) {
2415 send_sig(SIGTERM
, bus
->watchdog_tsk
, 1);
2416 kthread_stop(bus
->watchdog_tsk
);
2417 bus
->watchdog_tsk
= NULL
;
2420 if (sdiodev
->state
!= BRCMF_SDIOD_NOMEDIUM
) {
2421 sdio_claim_host(sdiodev
->func1
);
2423 /* Enable clock for device interrupts */
2424 brcmf_sdio_bus_sleep(bus
, false, false);
2426 /* Disable and clear interrupts at the chip level also */
2427 brcmf_sdiod_writel(sdiodev
, core
->base
+ SD_REG(hostintmask
),
2430 local_hostintmask
= bus
->hostintmask
;
2431 bus
->hostintmask
= 0;
2433 /* Force backplane clocks to assure F2 interrupt propagates */
2434 saveclk
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
2437 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
2438 (saveclk
| SBSDIO_FORCE_HT
), &err
);
2440 brcmf_err("Failed to force clock for F2: err %d\n",
2443 /* Turn off the bus (F2), free any pending packets */
2444 brcmf_dbg(INTR
, "disable SDIO interrupts\n");
2445 sdio_disable_func(sdiodev
->func2
);
2447 /* Clear any pending interrupts now that F2 is disabled */
2448 brcmf_sdiod_writel(sdiodev
, core
->base
+ SD_REG(intstatus
),
2449 local_hostintmask
, NULL
);
2451 sdio_release_host(sdiodev
->func1
);
2453 /* Clear the data packet queues */
2454 brcmu_pktq_flush(&bus
->txq
, true, NULL
, NULL
);
2456 /* Clear any held glomming stuff */
2457 brcmu_pkt_buf_free_skb(bus
->glomd
);
2458 brcmf_sdio_free_glom(bus
);
2460 /* Clear rx control and wake any waiters */
2461 spin_lock_bh(&bus
->rxctl_lock
);
2463 spin_unlock_bh(&bus
->rxctl_lock
);
2464 brcmf_sdio_dcmd_resp_wake(bus
);
2466 /* Reset some F2 state stuff */
2467 bus
->rxskip
= false;
2468 bus
->tx_seq
= bus
->rx_seq
= 0;
2471 static inline void brcmf_sdio_clrintr(struct brcmf_sdio
*bus
)
2473 struct brcmf_sdio_dev
*sdiodev
;
2474 unsigned long flags
;
2476 sdiodev
= bus
->sdiodev
;
2477 if (sdiodev
->oob_irq_requested
) {
2478 spin_lock_irqsave(&sdiodev
->irq_en_lock
, flags
);
2479 if (!sdiodev
->irq_en
&& !atomic_read(&bus
->ipend
)) {
2480 enable_irq(sdiodev
->settings
->bus
.sdio
.oob_irq_nr
);
2481 sdiodev
->irq_en
= true;
2483 spin_unlock_irqrestore(&sdiodev
->irq_en_lock
, flags
);
2487 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio
*bus
)
2489 struct brcmf_core
*core
= bus
->sdio_core
;
2494 addr
= core
->base
+ SD_REG(intstatus
);
2496 val
= brcmf_sdiod_readl(bus
->sdiodev
, addr
, &ret
);
2497 bus
->sdcnt
.f1regdata
++;
2501 val
&= bus
->hostintmask
;
2502 atomic_set(&bus
->fcstate
, !!(val
& I_HMB_FC_STATE
));
2504 /* Clear interrupts */
2506 brcmf_sdiod_writel(bus
->sdiodev
, addr
, val
, &ret
);
2507 bus
->sdcnt
.f1regdata
++;
2508 atomic_or(val
, &bus
->intstatus
);
2514 static void brcmf_sdio_dpc(struct brcmf_sdio
*bus
)
2516 struct brcmf_sdio_dev
*sdiod
= bus
->sdiodev
;
2518 u32 intstat_addr
= bus
->sdio_core
->base
+ SD_REG(intstatus
);
2519 unsigned long intstatus
;
2520 uint txlimit
= bus
->txbound
; /* Tx frames to send before resched */
2521 uint framecnt
; /* Temporary counter of tx/rx frames */
2524 brcmf_dbg(TRACE
, "Enter\n");
2526 sdio_claim_host(bus
->sdiodev
->func1
);
2528 /* If waiting for HTAVAIL, check status */
2529 if (!bus
->sr_enabled
&& bus
->clkstate
== CLK_PENDING
) {
2530 u8 clkctl
, devctl
= 0;
2533 /* Check for inconsistent device control */
2534 devctl
= brcmf_sdiod_readb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
2538 /* Read CSR, if clock on switch to AVAIL, else ignore */
2539 clkctl
= brcmf_sdiod_readb(bus
->sdiodev
,
2540 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
2542 brcmf_dbg(SDIO
, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2545 if (SBSDIO_HTAV(clkctl
)) {
2546 devctl
= brcmf_sdiod_readb(bus
->sdiodev
,
2547 SBSDIO_DEVICE_CTL
, &err
);
2548 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
2549 brcmf_sdiod_writeb(bus
->sdiodev
,
2550 SBSDIO_DEVICE_CTL
, devctl
, &err
);
2551 bus
->clkstate
= CLK_AVAIL
;
2555 /* Make sure backplane clock is on */
2556 brcmf_sdio_bus_sleep(bus
, false, true);
2558 /* Pending interrupt indicates new device status */
2559 if (atomic_read(&bus
->ipend
) > 0) {
2560 atomic_set(&bus
->ipend
, 0);
2561 err
= brcmf_sdio_intr_rstatus(bus
);
2564 /* Start with leftover status bits */
2565 intstatus
= atomic_xchg(&bus
->intstatus
, 0);
2567 /* Handle flow-control change: read new state in case our ack
2568 * crossed another change interrupt. If change still set, assume
2569 * FC ON for safety, let next loop through do the debounce.
2571 if (intstatus
& I_HMB_FC_CHANGE
) {
2572 intstatus
&= ~I_HMB_FC_CHANGE
;
2573 brcmf_sdiod_writel(sdiod
, intstat_addr
, I_HMB_FC_CHANGE
, &err
);
2575 newstatus
= brcmf_sdiod_readl(sdiod
, intstat_addr
, &err
);
2577 bus
->sdcnt
.f1regdata
+= 2;
2578 atomic_set(&bus
->fcstate
,
2579 !!(newstatus
& (I_HMB_FC_STATE
| I_HMB_FC_CHANGE
)));
2580 intstatus
|= (newstatus
& bus
->hostintmask
);
2583 /* Handle host mailbox indication */
2584 if (intstatus
& I_HMB_HOST_INT
) {
2585 intstatus
&= ~I_HMB_HOST_INT
;
2586 intstatus
|= brcmf_sdio_hostmail(bus
);
2589 sdio_release_host(bus
->sdiodev
->func1
);
2591 /* Generally don't ask for these, can get CRC errors... */
2592 if (intstatus
& I_WR_OOSYNC
) {
2593 brcmf_err("Dongle reports WR_OOSYNC\n");
2594 intstatus
&= ~I_WR_OOSYNC
;
2597 if (intstatus
& I_RD_OOSYNC
) {
2598 brcmf_err("Dongle reports RD_OOSYNC\n");
2599 intstatus
&= ~I_RD_OOSYNC
;
2602 if (intstatus
& I_SBINT
) {
2603 brcmf_err("Dongle reports SBINT\n");
2604 intstatus
&= ~I_SBINT
;
2607 /* Would be active due to wake-wlan in gSPI */
2608 if (intstatus
& I_CHIPACTIVE
) {
2609 brcmf_dbg(INFO
, "Dongle reports CHIPACTIVE\n");
2610 intstatus
&= ~I_CHIPACTIVE
;
2613 /* Ignore frame indications if rxskip is set */
2615 intstatus
&= ~I_HMB_FRAME_IND
;
2617 /* On frame indication, read available frames */
2618 if ((intstatus
& I_HMB_FRAME_IND
) && (bus
->clkstate
== CLK_AVAIL
)) {
2619 brcmf_sdio_readframes(bus
, bus
->rxbound
);
2620 if (!bus
->rxpending
)
2621 intstatus
&= ~I_HMB_FRAME_IND
;
2624 /* Keep still-pending events for next scheduling */
2626 atomic_or(intstatus
, &bus
->intstatus
);
2628 brcmf_sdio_clrintr(bus
);
2630 if (bus
->ctrl_frame_stat
&& (bus
->clkstate
== CLK_AVAIL
) &&
2632 sdio_claim_host(bus
->sdiodev
->func1
);
2633 if (bus
->ctrl_frame_stat
) {
2634 err
= brcmf_sdio_tx_ctrlframe(bus
, bus
->ctrl_frame_buf
,
2635 bus
->ctrl_frame_len
);
2636 bus
->ctrl_frame_err
= err
;
2638 bus
->ctrl_frame_stat
= false;
2640 sdio_release_host(bus
->sdiodev
->func1
);
2641 brcmf_sdio_wait_event_wakeup(bus
);
2643 /* Send queued frames (limit 1 if rx may still be pending) */
2644 if ((bus
->clkstate
== CLK_AVAIL
) && !atomic_read(&bus
->fcstate
) &&
2645 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) && txlimit
&&
2647 framecnt
= bus
->rxpending
? min(txlimit
, bus
->txminmax
) :
2649 brcmf_sdio_sendfromq(bus
, framecnt
);
2652 if ((bus
->sdiodev
->state
!= BRCMF_SDIOD_DATA
) || (err
!= 0)) {
2653 brcmf_err("failed backplane access over SDIO, halting operation\n");
2654 atomic_set(&bus
->intstatus
, 0);
2655 if (bus
->ctrl_frame_stat
) {
2656 sdio_claim_host(bus
->sdiodev
->func1
);
2657 if (bus
->ctrl_frame_stat
) {
2658 bus
->ctrl_frame_err
= -ENODEV
;
2660 bus
->ctrl_frame_stat
= false;
2661 brcmf_sdio_wait_event_wakeup(bus
);
2663 sdio_release_host(bus
->sdiodev
->func1
);
2665 } else if (atomic_read(&bus
->intstatus
) ||
2666 atomic_read(&bus
->ipend
) > 0 ||
2667 (!atomic_read(&bus
->fcstate
) &&
2668 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) &&
2670 bus
->dpc_triggered
= true;
2674 static struct pktq
*brcmf_sdio_bus_gettxq(struct device
*dev
)
2676 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2677 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2678 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2683 static bool brcmf_sdio_prec_enq(struct pktq
*q
, struct sk_buff
*pkt
, int prec
)
2686 int eprec
= -1; /* precedence to evict from */
2688 /* Fast case, precedence queue is not full and we are also not
2689 * exceeding total queue length
2691 if (!pktq_pfull(q
, prec
) && !pktq_full(q
)) {
2692 brcmu_pktq_penq(q
, prec
, pkt
);
2696 /* Determine precedence from which to evict packet, if any */
2697 if (pktq_pfull(q
, prec
)) {
2699 } else if (pktq_full(q
)) {
2700 p
= brcmu_pktq_peek_tail(q
, &eprec
);
2705 /* Evict if needed */
2707 /* Detect queueing to unconfigured precedence */
2709 return false; /* refuse newer (incoming) packet */
2710 /* Evict packet according to discard policy */
2711 p
= brcmu_pktq_pdeq_tail(q
, eprec
);
2713 brcmf_err("brcmu_pktq_pdeq_tail() failed\n");
2714 brcmu_pkt_buf_free_skb(p
);
2718 p
= brcmu_pktq_penq(q
, prec
, pkt
);
2720 brcmf_err("brcmu_pktq_penq() failed\n");
2725 static int brcmf_sdio_bus_txdata(struct device
*dev
, struct sk_buff
*pkt
)
2729 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2730 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2731 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2733 brcmf_dbg(TRACE
, "Enter: pkt: data %p len %d\n", pkt
->data
, pkt
->len
);
2734 if (sdiodev
->state
!= BRCMF_SDIOD_DATA
)
2737 /* Add space for the header */
2738 skb_push(pkt
, bus
->tx_hdrlen
);
2739 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2741 prec
= prio2prec((pkt
->priority
& PRIOMASK
));
2743 /* Check for existing queue, current flow-control,
2744 pending event, or pending clock */
2745 brcmf_dbg(TRACE
, "deferring pktq len %d\n", pktq_len(&bus
->txq
));
2746 bus
->sdcnt
.fcqueued
++;
2748 /* Priority based enq */
2749 spin_lock_bh(&bus
->txq_lock
);
2750 /* reset bus_flags in packet cb */
2751 *(u16
*)(pkt
->cb
) = 0;
2752 if (!brcmf_sdio_prec_enq(&bus
->txq
, pkt
, prec
)) {
2753 skb_pull(pkt
, bus
->tx_hdrlen
);
2754 brcmf_err("out of bus->txq !!!\n");
2760 if (pktq_len(&bus
->txq
) >= TXHI
) {
2762 brcmf_proto_bcdc_txflowblock(dev
, true);
2764 spin_unlock_bh(&bus
->txq_lock
);
2767 if (pktq_plen(&bus
->txq
, prec
) > qcount
[prec
])
2768 qcount
[prec
] = pktq_plen(&bus
->txq
, prec
);
2771 brcmf_sdio_trigger_dpc(bus
);
2776 #define CONSOLE_LINE_MAX 192
2778 static int brcmf_sdio_readconsole(struct brcmf_sdio
*bus
)
2780 struct brcmf_console
*c
= &bus
->console
;
2781 u8 line
[CONSOLE_LINE_MAX
], ch
;
2785 /* Don't do anything until FWREADY updates console address */
2786 if (bus
->console_addr
== 0)
2789 /* Read console log struct */
2790 addr
= bus
->console_addr
+ offsetof(struct rte_console
, log_le
);
2791 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
, (u8
*)&c
->log_le
,
2796 /* Allocate console buffer (one time only) */
2797 if (c
->buf
== NULL
) {
2798 c
->bufsize
= le32_to_cpu(c
->log_le
.buf_size
);
2799 c
->buf
= kmalloc(c
->bufsize
, GFP_ATOMIC
);
2804 idx
= le32_to_cpu(c
->log_le
.idx
);
2806 /* Protect against corrupt value */
2807 if (idx
> c
->bufsize
)
2810 /* Skip reading the console buffer if the index pointer
2815 /* Read the console buffer */
2816 addr
= le32_to_cpu(c
->log_le
.buf
);
2817 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
, c
->buf
, c
->bufsize
);
2821 while (c
->last
!= idx
) {
2822 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2; n
++) {
2823 if (c
->last
== idx
) {
2824 /* This would output a partial line.
2826 * the buffer pointer and output this
2827 * line next time around.
2832 c
->last
= c
->bufsize
- n
;
2835 ch
= c
->buf
[c
->last
];
2836 c
->last
= (c
->last
+ 1) % c
->bufsize
;
2843 if (line
[n
- 1] == '\r')
2846 pr_debug("CONSOLE: %s\n", line
);
2856 brcmf_sdio_bus_txctl(struct device
*dev
, unsigned char *msg
, uint msglen
)
2858 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2859 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2860 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2863 brcmf_dbg(TRACE
, "Enter\n");
2864 if (sdiodev
->state
!= BRCMF_SDIOD_DATA
)
2868 bus
->ctrl_frame_buf
= msg
;
2869 bus
->ctrl_frame_len
= msglen
;
2871 bus
->ctrl_frame_stat
= true;
2873 brcmf_sdio_trigger_dpc(bus
);
2874 wait_event_interruptible_timeout(bus
->ctrl_wait
, !bus
->ctrl_frame_stat
,
2877 if (bus
->ctrl_frame_stat
) {
2878 sdio_claim_host(bus
->sdiodev
->func1
);
2879 if (bus
->ctrl_frame_stat
) {
2880 brcmf_dbg(SDIO
, "ctrl_frame timeout\n");
2881 bus
->ctrl_frame_stat
= false;
2884 sdio_release_host(bus
->sdiodev
->func1
);
2887 brcmf_dbg(SDIO
, "ctrl_frame complete, err=%d\n",
2888 bus
->ctrl_frame_err
);
2890 ret
= bus
->ctrl_frame_err
;
2894 bus
->sdcnt
.tx_ctlerrs
++;
2896 bus
->sdcnt
.tx_ctlpkts
++;
2902 static int brcmf_sdio_dump_console(struct seq_file
*seq
, struct brcmf_sdio
*bus
,
2903 struct sdpcm_shared
*sh
)
2905 u32 addr
, console_ptr
, console_size
, console_index
;
2906 char *conbuf
= NULL
;
2910 /* obtain console information from device memory */
2911 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
);
2912 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
,
2913 (u8
*)&sh_val
, sizeof(u32
));
2916 console_ptr
= le32_to_cpu(sh_val
);
2918 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
.buf_size
);
2919 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
,
2920 (u8
*)&sh_val
, sizeof(u32
));
2923 console_size
= le32_to_cpu(sh_val
);
2925 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
.idx
);
2926 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, addr
,
2927 (u8
*)&sh_val
, sizeof(u32
));
2930 console_index
= le32_to_cpu(sh_val
);
2932 /* allocate buffer for console data */
2933 if (console_size
<= CONSOLE_BUFFER_MAX
)
2934 conbuf
= vzalloc(console_size
+1);
2939 /* obtain the console data from device */
2940 conbuf
[console_size
] = '\0';
2941 rv
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, console_ptr
, (u8
*)conbuf
,
2946 rv
= seq_write(seq
, conbuf
+ console_index
,
2947 console_size
- console_index
);
2951 if (console_index
> 0)
2952 rv
= seq_write(seq
, conbuf
, console_index
- 1);
2959 static int brcmf_sdio_trap_info(struct seq_file
*seq
, struct brcmf_sdio
*bus
,
2960 struct sdpcm_shared
*sh
)
2963 struct brcmf_trap_info tr
;
2965 if ((sh
->flags
& SDPCM_SHARED_TRAP
) == 0) {
2966 brcmf_dbg(INFO
, "no trap in firmware\n");
2970 error
= brcmf_sdiod_ramrw(bus
->sdiodev
, false, sh
->trap_addr
, (u8
*)&tr
,
2971 sizeof(struct brcmf_trap_info
));
2976 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2977 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2978 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2979 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2980 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2981 le32_to_cpu(tr
.type
), le32_to_cpu(tr
.epc
),
2982 le32_to_cpu(tr
.cpsr
), le32_to_cpu(tr
.spsr
),
2983 le32_to_cpu(tr
.r13
), le32_to_cpu(tr
.r14
),
2984 le32_to_cpu(tr
.pc
), sh
->trap_addr
,
2985 le32_to_cpu(tr
.r0
), le32_to_cpu(tr
.r1
),
2986 le32_to_cpu(tr
.r2
), le32_to_cpu(tr
.r3
),
2987 le32_to_cpu(tr
.r4
), le32_to_cpu(tr
.r5
),
2988 le32_to_cpu(tr
.r6
), le32_to_cpu(tr
.r7
));
2993 static int brcmf_sdio_assert_info(struct seq_file
*seq
, struct brcmf_sdio
*bus
,
2994 struct sdpcm_shared
*sh
)
2997 char file
[80] = "?";
2998 char expr
[80] = "<???>";
3000 if ((sh
->flags
& SDPCM_SHARED_ASSERT_BUILT
) == 0) {
3001 brcmf_dbg(INFO
, "firmware not built with -assert\n");
3003 } else if ((sh
->flags
& SDPCM_SHARED_ASSERT
) == 0) {
3004 brcmf_dbg(INFO
, "no assert in dongle\n");
3008 sdio_claim_host(bus
->sdiodev
->func1
);
3009 if (sh
->assert_file_addr
!= 0) {
3010 error
= brcmf_sdiod_ramrw(bus
->sdiodev
, false,
3011 sh
->assert_file_addr
, (u8
*)file
, 80);
3015 if (sh
->assert_exp_addr
!= 0) {
3016 error
= brcmf_sdiod_ramrw(bus
->sdiodev
, false,
3017 sh
->assert_exp_addr
, (u8
*)expr
, 80);
3021 sdio_release_host(bus
->sdiodev
->func1
);
3023 seq_printf(seq
, "dongle assert: %s:%d: assert(%s)\n",
3024 file
, sh
->assert_line
, expr
);
3028 static int brcmf_sdio_checkdied(struct brcmf_sdio
*bus
)
3031 struct sdpcm_shared sh
;
3033 error
= brcmf_sdio_readshared(bus
, &sh
);
3038 if ((sh
.flags
& SDPCM_SHARED_ASSERT_BUILT
) == 0)
3039 brcmf_dbg(INFO
, "firmware not built with -assert\n");
3040 else if (sh
.flags
& SDPCM_SHARED_ASSERT
)
3041 brcmf_err("assertion in dongle\n");
3043 if (sh
.flags
& SDPCM_SHARED_TRAP
)
3044 brcmf_err("firmware trap in dongle\n");
3049 static int brcmf_sdio_died_dump(struct seq_file
*seq
, struct brcmf_sdio
*bus
)
3052 struct sdpcm_shared sh
;
3054 error
= brcmf_sdio_readshared(bus
, &sh
);
3058 error
= brcmf_sdio_assert_info(seq
, bus
, &sh
);
3062 error
= brcmf_sdio_trap_info(seq
, bus
, &sh
);
3066 error
= brcmf_sdio_dump_console(seq
, bus
, &sh
);
3072 static int brcmf_sdio_forensic_read(struct seq_file
*seq
, void *data
)
3074 struct brcmf_bus
*bus_if
= dev_get_drvdata(seq
->private);
3075 struct brcmf_sdio
*bus
= bus_if
->bus_priv
.sdio
->bus
;
3077 return brcmf_sdio_died_dump(seq
, bus
);
3080 static int brcmf_debugfs_sdio_count_read(struct seq_file
*seq
, void *data
)
3082 struct brcmf_bus
*bus_if
= dev_get_drvdata(seq
->private);
3083 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3084 struct brcmf_sdio_count
*sdcnt
= &sdiodev
->bus
->sdcnt
;
3087 "intrcount: %u\nlastintrs: %u\n"
3088 "pollcnt: %u\nregfails: %u\n"
3089 "tx_sderrs: %u\nfcqueued: %u\n"
3090 "rxrtx: %u\nrx_toolong: %u\n"
3091 "rxc_errors: %u\nrx_hdrfail: %u\n"
3092 "rx_badhdr: %u\nrx_badseq: %u\n"
3093 "fc_rcvd: %u\nfc_xoff: %u\n"
3094 "fc_xon: %u\nrxglomfail: %u\n"
3095 "rxglomframes: %u\nrxglompkts: %u\n"
3096 "f2rxhdrs: %u\nf2rxdata: %u\n"
3097 "f2txdata: %u\nf1regdata: %u\n"
3098 "tickcnt: %u\ntx_ctlerrs: %lu\n"
3099 "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
3100 "rx_ctlpkts: %lu\nrx_readahead: %lu\n",
3101 sdcnt
->intrcount
, sdcnt
->lastintrs
,
3102 sdcnt
->pollcnt
, sdcnt
->regfails
,
3103 sdcnt
->tx_sderrs
, sdcnt
->fcqueued
,
3104 sdcnt
->rxrtx
, sdcnt
->rx_toolong
,
3105 sdcnt
->rxc_errors
, sdcnt
->rx_hdrfail
,
3106 sdcnt
->rx_badhdr
, sdcnt
->rx_badseq
,
3107 sdcnt
->fc_rcvd
, sdcnt
->fc_xoff
,
3108 sdcnt
->fc_xon
, sdcnt
->rxglomfail
,
3109 sdcnt
->rxglomframes
, sdcnt
->rxglompkts
,
3110 sdcnt
->f2rxhdrs
, sdcnt
->f2rxdata
,
3111 sdcnt
->f2txdata
, sdcnt
->f1regdata
,
3112 sdcnt
->tickcnt
, sdcnt
->tx_ctlerrs
,
3113 sdcnt
->tx_ctlpkts
, sdcnt
->rx_ctlerrs
,
3114 sdcnt
->rx_ctlpkts
, sdcnt
->rx_readahead_cnt
);
3119 static void brcmf_sdio_debugfs_create(struct brcmf_sdio
*bus
)
3121 struct brcmf_pub
*drvr
= bus
->sdiodev
->bus_if
->drvr
;
3122 struct dentry
*dentry
= brcmf_debugfs_get_devdir(drvr
);
3124 if (IS_ERR_OR_NULL(dentry
))
3127 bus
->console_interval
= BRCMF_CONSOLE
;
3129 brcmf_debugfs_add_entry(drvr
, "forensics", brcmf_sdio_forensic_read
);
3130 brcmf_debugfs_add_entry(drvr
, "counters",
3131 brcmf_debugfs_sdio_count_read
);
3132 debugfs_create_u32("console_interval", 0644, dentry
,
3133 &bus
->console_interval
);
3136 static int brcmf_sdio_checkdied(struct brcmf_sdio
*bus
)
3141 static void brcmf_sdio_debugfs_create(struct brcmf_sdio
*bus
)
3147 brcmf_sdio_bus_rxctl(struct device
*dev
, unsigned char *msg
, uint msglen
)
3153 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3154 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3155 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3157 brcmf_dbg(TRACE
, "Enter\n");
3158 if (sdiodev
->state
!= BRCMF_SDIOD_DATA
)
3161 /* Wait until control frame is available */
3162 timeleft
= brcmf_sdio_dcmd_resp_wait(bus
, &bus
->rxlen
, &pending
);
3164 spin_lock_bh(&bus
->rxctl_lock
);
3166 memcpy(msg
, bus
->rxctl
, min(msglen
, rxlen
));
3168 buf
= bus
->rxctl_orig
;
3169 bus
->rxctl_orig
= NULL
;
3171 spin_unlock_bh(&bus
->rxctl_lock
);
3175 brcmf_dbg(CTL
, "resumed on rxctl frame, got %d expected %d\n",
3177 } else if (timeleft
== 0) {
3178 brcmf_err("resumed on timeout\n");
3179 brcmf_sdio_checkdied(bus
);
3180 } else if (pending
) {
3181 brcmf_dbg(CTL
, "cancelled\n");
3182 return -ERESTARTSYS
;
3184 brcmf_dbg(CTL
, "resumed for unknown reason?\n");
3185 brcmf_sdio_checkdied(bus
);
3189 bus
->sdcnt
.rx_ctlpkts
++;
3191 bus
->sdcnt
.rx_ctlerrs
++;
3193 return rxlen
? (int)rxlen
: -ETIMEDOUT
;
3198 brcmf_sdio_verifymemory(struct brcmf_sdio_dev
*sdiodev
, u32 ram_addr
,
3199 u8
*ram_data
, uint ram_sz
)
3208 /* read back and verify */
3209 brcmf_dbg(INFO
, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr
,
3211 ram_cmp
= kmalloc(MEMBLOCK
, GFP_KERNEL
);
3212 /* do not proceed while no memory but */
3218 while (offset
< ram_sz
) {
3219 len
= ((offset
+ MEMBLOCK
) < ram_sz
) ? MEMBLOCK
:
3221 err
= brcmf_sdiod_ramrw(sdiodev
, false, address
, ram_cmp
, len
);
3223 brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3227 } else if (memcmp(ram_cmp
, &ram_data
[offset
], len
)) {
3228 brcmf_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n",
3243 brcmf_sdio_verifymemory(struct brcmf_sdio_dev
*sdiodev
, u32 ram_addr
,
3244 u8
*ram_data
, uint ram_sz
)
3250 static int brcmf_sdio_download_code_file(struct brcmf_sdio
*bus
,
3251 const struct firmware
*fw
)
3255 brcmf_dbg(TRACE
, "Enter\n");
3257 err
= brcmf_sdiod_ramrw(bus
->sdiodev
, true, bus
->ci
->rambase
,
3258 (u8
*)fw
->data
, fw
->size
);
3260 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3261 err
, (int)fw
->size
, bus
->ci
->rambase
);
3262 else if (!brcmf_sdio_verifymemory(bus
->sdiodev
, bus
->ci
->rambase
,
3263 (u8
*)fw
->data
, fw
->size
))
3269 static int brcmf_sdio_download_nvram(struct brcmf_sdio
*bus
,
3270 void *vars
, u32 varsz
)
3275 brcmf_dbg(TRACE
, "Enter\n");
3277 address
= bus
->ci
->ramsize
- varsz
+ bus
->ci
->rambase
;
3278 err
= brcmf_sdiod_ramrw(bus
->sdiodev
, true, address
, vars
, varsz
);
3280 brcmf_err("error %d on writing %d nvram bytes at 0x%08x\n",
3281 err
, varsz
, address
);
3282 else if (!brcmf_sdio_verifymemory(bus
->sdiodev
, address
, vars
, varsz
))
3288 static int brcmf_sdio_download_firmware(struct brcmf_sdio
*bus
,
3289 const struct firmware
*fw
,
3290 void *nvram
, u32 nvlen
)
3295 sdio_claim_host(bus
->sdiodev
->func1
);
3296 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, false);
3298 rstvec
= get_unaligned_le32(fw
->data
);
3299 brcmf_dbg(SDIO
, "firmware rstvec: %x\n", rstvec
);
3301 bcmerror
= brcmf_sdio_download_code_file(bus
, fw
);
3302 release_firmware(fw
);
3304 brcmf_err("dongle image file download failed\n");
3305 brcmf_fw_nvram_free(nvram
);
3309 bcmerror
= brcmf_sdio_download_nvram(bus
, nvram
, nvlen
);
3310 brcmf_fw_nvram_free(nvram
);
3312 brcmf_err("dongle nvram file download failed\n");
3316 /* Take arm out of reset */
3317 if (!brcmf_chip_set_active(bus
->ci
, rstvec
)) {
3318 brcmf_err("error getting out of ARM core reset\n");
3323 brcmf_sdio_clkctl(bus
, CLK_SDONLY
, false);
3324 sdio_release_host(bus
->sdiodev
->func1
);
3328 static void brcmf_sdio_sr_init(struct brcmf_sdio
*bus
)
3333 brcmf_dbg(TRACE
, "Enter\n");
3335 val
= brcmf_sdiod_readb(bus
->sdiodev
, SBSDIO_FUNC1_WAKEUPCTRL
, &err
);
3337 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3341 val
|= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT
;
3342 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_WAKEUPCTRL
, val
, &err
);
3344 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3348 /* Add CMD14 Support */
3349 brcmf_sdiod_func0_wb(bus
->sdiodev
, SDIO_CCCR_BRCM_CARDCAP
,
3350 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT
|
3351 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT
),
3354 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3358 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3359 SBSDIO_FORCE_HT
, &err
);
3361 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3366 bus
->sr_enabled
= true;
3367 brcmf_dbg(INFO
, "SR enabled\n");
3370 /* enable KSO bit */
3371 static int brcmf_sdio_kso_init(struct brcmf_sdio
*bus
)
3373 struct brcmf_core
*core
= bus
->sdio_core
;
3377 brcmf_dbg(TRACE
, "Enter\n");
3379 /* KSO bit added in SDIO core rev 12 */
3383 val
= brcmf_sdiod_readb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
, &err
);
3385 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3389 if (!(val
& SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
)) {
3390 val
|= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN
<<
3391 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT
);
3392 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
3395 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3404 static int brcmf_sdio_bus_preinit(struct device
*dev
)
3406 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3407 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3408 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3409 struct brcmf_core
*core
= bus
->sdio_core
;
3414 /* the commands below use the terms tx and rx from
3415 * a device perspective, ie. bus:txglom affects the
3416 * bus transfers from device to host.
3418 if (core
->rev
< 12) {
3419 /* for sdio core rev < 12, disable txgloming */
3421 err
= brcmf_iovar_data_set(dev
, "bus:txglom", &value
,
3424 /* otherwise, set txglomalign */
3425 value
= sdiodev
->settings
->bus
.sdio
.sd_sgentry_align
;
3426 /* SDIO ADMA requires at least 32 bit alignment */
3427 value
= max_t(u32
, value
, ALIGNMENT
);
3428 err
= brcmf_iovar_data_set(dev
, "bus:txglomalign", &value
,
3435 bus
->tx_hdrlen
= SDPCM_HWHDR_LEN
+ SDPCM_SWHDR_LEN
;
3436 if (sdiodev
->sg_support
) {
3437 bus
->txglom
= false;
3439 pad_size
= bus
->sdiodev
->func2
->cur_blksize
<< 1;
3440 err
= brcmf_iovar_data_set(bus
->sdiodev
->dev
, "bus:rxglom",
3441 &value
, sizeof(u32
));
3443 /* bus:rxglom is allowed to fail */
3447 bus
->tx_hdrlen
+= SDPCM_HWEXT_LEN
;
3450 brcmf_bus_add_txhdrlen(bus
->sdiodev
->dev
, bus
->tx_hdrlen
);
3456 static size_t brcmf_sdio_bus_get_ramsize(struct device
*dev
)
3458 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3459 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3460 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3462 return bus
->ci
->ramsize
- bus
->ci
->srsize
;
3465 static int brcmf_sdio_bus_get_memdump(struct device
*dev
, void *data
,
3468 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3469 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3470 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3476 brcmf_dbg(INFO
, "dump at 0x%08x: size=%zu\n", bus
->ci
->rambase
,
3479 address
= bus
->ci
->rambase
;
3481 sdio_claim_host(sdiodev
->func1
);
3482 while (offset
< mem_size
) {
3483 len
= ((offset
+ MEMBLOCK
) < mem_size
) ? MEMBLOCK
:
3485 err
= brcmf_sdiod_ramrw(sdiodev
, false, address
, data
, len
);
3487 brcmf_err("error %d on reading %d membytes at 0x%08x\n",
3497 sdio_release_host(sdiodev
->func1
);
3501 void brcmf_sdio_trigger_dpc(struct brcmf_sdio
*bus
)
3503 if (!bus
->dpc_triggered
) {
3504 bus
->dpc_triggered
= true;
3505 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3509 void brcmf_sdio_isr(struct brcmf_sdio
*bus
)
3511 brcmf_dbg(TRACE
, "Enter\n");
3514 brcmf_err("bus is null pointer, exiting\n");
3518 /* Count the interrupt call */
3519 bus
->sdcnt
.intrcount
++;
3521 atomic_set(&bus
->ipend
, 1);
3523 if (brcmf_sdio_intr_rstatus(bus
)) {
3524 brcmf_err("failed backplane access\n");
3527 /* Disable additional interrupts (is this needed now)? */
3529 brcmf_err("isr w/o interrupt configured!\n");
3531 bus
->dpc_triggered
= true;
3532 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3535 static void brcmf_sdio_bus_watchdog(struct brcmf_sdio
*bus
)
3537 brcmf_dbg(TIMER
, "Enter\n");
3539 /* Poll period: check device if appropriate. */
3540 if (!bus
->sr_enabled
&&
3541 bus
->poll
&& (++bus
->polltick
>= bus
->pollrate
)) {
3544 /* Reset poll tick */
3547 /* Check device if no interrupts */
3549 (bus
->sdcnt
.intrcount
== bus
->sdcnt
.lastintrs
)) {
3551 if (!bus
->dpc_triggered
) {
3554 sdio_claim_host(bus
->sdiodev
->func1
);
3555 devpend
= brcmf_sdiod_func0_rb(bus
->sdiodev
,
3556 SDIO_CCCR_INTx
, NULL
);
3557 sdio_release_host(bus
->sdiodev
->func1
);
3558 intstatus
= devpend
& (INTR_STATUS_FUNC1
|
3562 /* If there is something, make like the ISR and
3565 bus
->sdcnt
.pollcnt
++;
3566 atomic_set(&bus
->ipend
, 1);
3568 bus
->dpc_triggered
= true;
3569 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3573 /* Update interrupt tracking */
3574 bus
->sdcnt
.lastintrs
= bus
->sdcnt
.intrcount
;
3577 /* Poll for console output periodically */
3578 if (bus
->sdiodev
->state
== BRCMF_SDIOD_DATA
&& BRCMF_FWCON_ON() &&
3579 bus
->console_interval
!= 0) {
3580 bus
->console
.count
+= jiffies_to_msecs(BRCMF_WD_POLL
);
3581 if (bus
->console
.count
>= bus
->console_interval
) {
3582 bus
->console
.count
-= bus
->console_interval
;
3583 sdio_claim_host(bus
->sdiodev
->func1
);
3584 /* Make sure backplane clock is on */
3585 brcmf_sdio_bus_sleep(bus
, false, false);
3586 if (brcmf_sdio_readconsole(bus
) < 0)
3588 bus
->console_interval
= 0;
3589 sdio_release_host(bus
->sdiodev
->func1
);
3594 /* On idle timeout clear activity flag and/or turn off clock */
3595 if (!bus
->dpc_triggered
) {
3597 if ((!bus
->dpc_running
) && (bus
->idletime
> 0) &&
3598 (bus
->clkstate
== CLK_AVAIL
)) {
3600 if (bus
->idlecount
> bus
->idletime
) {
3601 brcmf_dbg(SDIO
, "idle\n");
3602 sdio_claim_host(bus
->sdiodev
->func1
);
3603 brcmf_sdio_wd_timer(bus
, false);
3605 brcmf_sdio_bus_sleep(bus
, true, false);
3606 sdio_release_host(bus
->sdiodev
->func1
);
3616 static void brcmf_sdio_dataworker(struct work_struct
*work
)
3618 struct brcmf_sdio
*bus
= container_of(work
, struct brcmf_sdio
,
3621 bus
->dpc_running
= true;
3623 while (READ_ONCE(bus
->dpc_triggered
)) {
3624 bus
->dpc_triggered
= false;
3625 brcmf_sdio_dpc(bus
);
3628 bus
->dpc_running
= false;
3629 if (brcmf_sdiod_freezing(bus
->sdiodev
)) {
3630 brcmf_sdiod_change_state(bus
->sdiodev
, BRCMF_SDIOD_DOWN
);
3631 brcmf_sdiod_try_freeze(bus
->sdiodev
);
3632 brcmf_sdiod_change_state(bus
->sdiodev
, BRCMF_SDIOD_DATA
);
3637 brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev
*sdiodev
,
3638 struct brcmf_chip
*ci
, u32 drivestrength
)
3640 const struct sdiod_drive_str
*str_tab
= NULL
;
3644 u32 drivestrength_sel
= 0;
3648 if (!(ci
->cc_caps
& CC_CAP_PMU
))
3651 switch (SDIOD_DRVSTR_KEY(ci
->chip
, ci
->pmurev
)) {
3652 case SDIOD_DRVSTR_KEY(BRCM_CC_4330_CHIP_ID
, 12):
3653 str_tab
= sdiod_drvstr_tab1_1v8
;
3654 str_mask
= 0x00003800;
3657 case SDIOD_DRVSTR_KEY(BRCM_CC_4334_CHIP_ID
, 17):
3658 str_tab
= sdiod_drvstr_tab6_1v8
;
3659 str_mask
= 0x00001800;
3662 case SDIOD_DRVSTR_KEY(BRCM_CC_43143_CHIP_ID
, 17):
3663 /* note: 43143 does not support tristate */
3664 i
= ARRAY_SIZE(sdiod_drvstr_tab2_3v3
) - 1;
3665 if (drivestrength
>= sdiod_drvstr_tab2_3v3
[i
].strength
) {
3666 str_tab
= sdiod_drvstr_tab2_3v3
;
3667 str_mask
= 0x00000007;
3670 brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
3671 ci
->name
, drivestrength
);
3673 case SDIOD_DRVSTR_KEY(BRCM_CC_43362_CHIP_ID
, 13):
3674 str_tab
= sdiod_drive_strength_tab5_1v8
;
3675 str_mask
= 0x00003800;
3679 brcmf_dbg(INFO
, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n",
3680 ci
->name
, ci
->chiprev
, ci
->pmurev
);
3684 if (str_tab
!= NULL
) {
3685 struct brcmf_core
*pmu
= brcmf_chip_get_pmu(ci
);
3687 for (i
= 0; str_tab
[i
].strength
!= 0; i
++) {
3688 if (drivestrength
>= str_tab
[i
].strength
) {
3689 drivestrength_sel
= str_tab
[i
].sel
;
3693 addr
= CORE_CC_REG(pmu
->base
, chipcontrol_addr
);
3694 brcmf_sdiod_writel(sdiodev
, addr
, 1, NULL
);
3695 cc_data_temp
= brcmf_sdiod_readl(sdiodev
, addr
, NULL
);
3696 cc_data_temp
&= ~str_mask
;
3697 drivestrength_sel
<<= str_shift
;
3698 cc_data_temp
|= drivestrength_sel
;
3699 brcmf_sdiod_writel(sdiodev
, addr
, cc_data_temp
, NULL
);
3701 brcmf_dbg(INFO
, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
3702 str_tab
[i
].strength
, drivestrength
, cc_data_temp
);
3706 static int brcmf_sdio_buscoreprep(void *ctx
)
3708 struct brcmf_sdio_dev
*sdiodev
= ctx
;
3712 /* Try forcing SDIO core to do ALPAvail request only */
3713 clkset
= SBSDIO_FORCE_HW_CLKREQ_OFF
| SBSDIO_ALP_AVAIL_REQ
;
3714 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, clkset
, &err
);
3716 brcmf_err("error writing for HT off\n");
3720 /* If register supported, wait for ALPAvail and then force ALP */
3721 /* This may take up to 15 milliseconds */
3722 clkval
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, NULL
);
3724 if ((clkval
& ~SBSDIO_AVBITS
) != clkset
) {
3725 brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
3730 SPINWAIT(((clkval
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3732 !SBSDIO_ALPAV(clkval
)),
3733 PMU_MAX_TRANSITION_DLY
);
3735 if (!SBSDIO_ALPAV(clkval
)) {
3736 brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
3741 clkset
= SBSDIO_FORCE_HW_CLKREQ_OFF
| SBSDIO_FORCE_ALP
;
3742 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, clkset
, &err
);
3745 /* Also, disable the extra SDIO pull-ups */
3746 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_SDIOPULLUP
, 0, NULL
);
3751 static void brcmf_sdio_buscore_activate(void *ctx
, struct brcmf_chip
*chip
,
3754 struct brcmf_sdio_dev
*sdiodev
= ctx
;
3755 struct brcmf_core
*core
= sdiodev
->bus
->sdio_core
;
3758 /* clear all interrupts */
3759 reg_addr
= core
->base
+ SD_REG(intstatus
);
3760 brcmf_sdiod_writel(sdiodev
, reg_addr
, 0xFFFFFFFF, NULL
);
3763 /* Write reset vector to address 0 */
3764 brcmf_sdiod_ramrw(sdiodev
, true, 0, (void *)&rstvec
,
3768 static u32
brcmf_sdio_buscore_read32(void *ctx
, u32 addr
)
3770 struct brcmf_sdio_dev
*sdiodev
= ctx
;
3773 val
= brcmf_sdiod_readl(sdiodev
, addr
, NULL
);
3776 * this is a bit of special handling if reading the chipcommon chipid
3777 * register. The 4339 is a next-gen of the 4335. It uses the same
3778 * SDIO device id as 4335 and the chipid register returns 4335 as well.
3779 * It can be identified as 4339 by looking at the chip revision. It
3780 * is corrected here so the chip.c module has the right info.
3782 if (addr
== CORE_CC_REG(SI_ENUM_BASE
, chipid
) &&
3783 (sdiodev
->func1
->device
== SDIO_DEVICE_ID_BROADCOM_4339
||
3784 sdiodev
->func1
->device
== SDIO_DEVICE_ID_BROADCOM_4335_4339
)) {
3785 rev
= (val
& CID_REV_MASK
) >> CID_REV_SHIFT
;
3787 val
&= ~CID_ID_MASK
;
3788 val
|= BRCM_CC_4339_CHIP_ID
;
3795 static void brcmf_sdio_buscore_write32(void *ctx
, u32 addr
, u32 val
)
3797 struct brcmf_sdio_dev
*sdiodev
= ctx
;
3799 brcmf_sdiod_writel(sdiodev
, addr
, val
, NULL
);
3802 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops
= {
3803 .prepare
= brcmf_sdio_buscoreprep
,
3804 .activate
= brcmf_sdio_buscore_activate
,
3805 .read32
= brcmf_sdio_buscore_read32
,
3806 .write32
= brcmf_sdio_buscore_write32
,
3810 brcmf_sdio_probe_attach(struct brcmf_sdio
*bus
)
3812 struct brcmf_sdio_dev
*sdiodev
;
3819 sdiodev
= bus
->sdiodev
;
3820 sdio_claim_host(sdiodev
->func1
);
3822 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3823 brcmf_sdiod_readl(sdiodev
, SI_ENUM_BASE
, NULL
));
3826 * Force PLL off until brcmf_chip_attach()
3827 * programs PLL control regs
3830 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, BRCMF_INIT_CLKCTL1
,
3833 clkctl
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3836 if (err
|| ((clkctl
& ~SBSDIO_AVBITS
) != BRCMF_INIT_CLKCTL1
)) {
3837 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3838 err
, BRCMF_INIT_CLKCTL1
, clkctl
);
3842 bus
->ci
= brcmf_chip_attach(sdiodev
, &brcmf_sdio_buscore_ops
);
3843 if (IS_ERR(bus
->ci
)) {
3844 brcmf_err("brcmf_chip_attach failed!\n");
3849 /* Pick up the SDIO core info struct from chip.c */
3850 bus
->sdio_core
= brcmf_chip_get_core(bus
->ci
, BCMA_CORE_SDIO_DEV
);
3851 if (!bus
->sdio_core
)
3854 /* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
3855 sdiodev
->cc_core
= brcmf_chip_get_core(bus
->ci
, BCMA_CORE_CHIPCOMMON
);
3856 if (!sdiodev
->cc_core
)
3859 sdiodev
->settings
= brcmf_get_module_param(sdiodev
->dev
,
3863 if (!sdiodev
->settings
) {
3864 brcmf_err("Failed to get device parameters\n");
3867 /* platform specific configuration:
3868 * alignments must be at least 4 bytes for ADMA
3870 bus
->head_align
= ALIGNMENT
;
3871 bus
->sgentry_align
= ALIGNMENT
;
3872 if (sdiodev
->settings
->bus
.sdio
.sd_head_align
> ALIGNMENT
)
3873 bus
->head_align
= sdiodev
->settings
->bus
.sdio
.sd_head_align
;
3874 if (sdiodev
->settings
->bus
.sdio
.sd_sgentry_align
> ALIGNMENT
)
3875 bus
->sgentry_align
=
3876 sdiodev
->settings
->bus
.sdio
.sd_sgentry_align
;
3878 /* allocate scatter-gather table. sg support
3879 * will be disabled upon allocation failure.
3881 brcmf_sdiod_sgtable_alloc(sdiodev
);
3883 #ifdef CONFIG_PM_SLEEP
3884 /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
3885 * is true or when platform data OOB irq is true).
3887 if ((sdio_get_host_pm_caps(sdiodev
->func1
) & MMC_PM_KEEP_POWER
) &&
3888 ((sdio_get_host_pm_caps(sdiodev
->func1
) & MMC_PM_WAKE_SDIO_IRQ
) ||
3889 (sdiodev
->settings
->bus
.sdio
.oob_irq_supported
)))
3890 sdiodev
->bus_if
->wowl_supported
= true;
3893 if (brcmf_sdio_kso_init(bus
)) {
3894 brcmf_err("error enabling KSO\n");
3898 if (sdiodev
->settings
->bus
.sdio
.drive_strength
)
3899 drivestrength
= sdiodev
->settings
->bus
.sdio
.drive_strength
;
3901 drivestrength
= DEFAULT_SDIO_DRIVE_STRENGTH
;
3902 brcmf_sdio_drivestrengthinit(sdiodev
, bus
->ci
, drivestrength
);
3904 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3905 reg_val
= brcmf_sdiod_func0_rb(sdiodev
, SDIO_CCCR_BRCM_CARDCTRL
, &err
);
3909 reg_val
|= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET
;
3911 brcmf_sdiod_func0_wb(sdiodev
, SDIO_CCCR_BRCM_CARDCTRL
, reg_val
, &err
);
3915 /* set PMUControl so a backplane reset does PMU state reload */
3916 reg_addr
= CORE_CC_REG(brcmf_chip_get_pmu(bus
->ci
)->base
, pmucontrol
);
3917 reg_val
= brcmf_sdiod_readl(sdiodev
, reg_addr
, &err
);
3921 reg_val
|= (BCMA_CC_PMU_CTL_RES_RELOAD
<< BCMA_CC_PMU_CTL_RES_SHIFT
);
3923 brcmf_sdiod_writel(sdiodev
, reg_addr
, reg_val
, &err
);
3927 sdio_release_host(sdiodev
->func1
);
3929 brcmu_pktq_init(&bus
->txq
, (PRIOMASK
+ 1), TXQLEN
);
3931 /* allocate header buffer */
3932 bus
->hdrbuf
= kzalloc(MAX_HDR_READ
+ bus
->head_align
, GFP_KERNEL
);
3935 /* Locate an appropriately-aligned portion of hdrbuf */
3936 bus
->rxhdr
= (u8
*) roundup((unsigned long)&bus
->hdrbuf
[0],
3939 /* Set the poll and/or interrupt flags */
3948 sdio_release_host(sdiodev
->func1
);
3953 brcmf_sdio_watchdog_thread(void *data
)
3955 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*)data
;
3958 allow_signal(SIGTERM
);
3959 /* Run until signal received */
3960 brcmf_sdiod_freezer_count(bus
->sdiodev
);
3962 if (kthread_should_stop())
3964 brcmf_sdiod_freezer_uncount(bus
->sdiodev
);
3965 wait
= wait_for_completion_interruptible(&bus
->watchdog_wait
);
3966 brcmf_sdiod_freezer_count(bus
->sdiodev
);
3967 brcmf_sdiod_try_freeze(bus
->sdiodev
);
3969 brcmf_sdio_bus_watchdog(bus
);
3970 /* Count the tick for reference */
3971 bus
->sdcnt
.tickcnt
++;
3972 reinit_completion(&bus
->watchdog_wait
);
3980 brcmf_sdio_watchdog(struct timer_list
*t
)
3982 struct brcmf_sdio
*bus
= from_timer(bus
, t
, timer
);
3984 if (bus
->watchdog_tsk
) {
3985 complete(&bus
->watchdog_wait
);
3986 /* Reschedule the watchdog */
3988 mod_timer(&bus
->timer
,
3989 jiffies
+ BRCMF_WD_POLL
);
3993 static int brcmf_sdio_get_fwname(struct device
*dev
, u32 chip
, u32 chiprev
,
3996 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3997 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
4000 if (sdiodev
->fw_name
[0] != '\0')
4001 strlcpy(fw_name
, sdiodev
->fw_name
, BRCMF_FW_NAME_LEN
);
4003 ret
= brcmf_fw_map_chip_to_name(chip
, chiprev
,
4005 ARRAY_SIZE(brcmf_sdio_fwnames
),
4011 static const struct brcmf_bus_ops brcmf_sdio_bus_ops
= {
4012 .stop
= brcmf_sdio_bus_stop
,
4013 .preinit
= brcmf_sdio_bus_preinit
,
4014 .txdata
= brcmf_sdio_bus_txdata
,
4015 .txctl
= brcmf_sdio_bus_txctl
,
4016 .rxctl
= brcmf_sdio_bus_rxctl
,
4017 .gettxq
= brcmf_sdio_bus_gettxq
,
4018 .wowl_config
= brcmf_sdio_wowl_config
,
4019 .get_ramsize
= brcmf_sdio_bus_get_ramsize
,
4020 .get_memdump
= brcmf_sdio_bus_get_memdump
,
4021 .get_fwname
= brcmf_sdio_get_fwname
,
4024 static void brcmf_sdio_firmware_callback(struct device
*dev
, int err
,
4025 const struct firmware
*code
,
4026 void *nvram
, u32 nvram_len
)
4028 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
4029 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
4030 struct brcmf_sdio
*bus
= sdiodev
->bus
;
4031 struct brcmf_sdio_dev
*sdiod
= bus
->sdiodev
;
4032 struct brcmf_core
*core
= bus
->sdio_core
;
4035 brcmf_dbg(TRACE
, "Enter: dev=%s, err=%d\n", dev_name(dev
), err
);
4043 /* try to download image and nvram to the dongle */
4044 bus
->alp_only
= true;
4045 err
= brcmf_sdio_download_firmware(bus
, code
, nvram
, nvram_len
);
4048 bus
->alp_only
= false;
4050 /* Start the watchdog timer */
4051 bus
->sdcnt
.tickcnt
= 0;
4052 brcmf_sdio_wd_timer(bus
, true);
4054 sdio_claim_host(sdiodev
->func1
);
4056 /* Make sure backplane clock is on, needed to generate F2 interrupt */
4057 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, false);
4058 if (bus
->clkstate
!= CLK_AVAIL
)
4061 /* Force clocks on backplane to be sure F2 interrupt propagates */
4062 saveclk
= brcmf_sdiod_readb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
4064 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
4065 (saveclk
| SBSDIO_FORCE_HT
), &err
);
4068 brcmf_err("Failed to force clock for F2: err %d\n", err
);
4072 /* Enable function 2 (frame transfers) */
4073 brcmf_sdiod_writel(sdiod
, core
->base
+ SD_REG(tosbmailboxdata
),
4074 SDPCM_PROT_VERSION
<< SMB_DATA_VERSION_SHIFT
, NULL
);
4076 err
= sdio_enable_func(sdiodev
->func2
);
4078 brcmf_dbg(INFO
, "enable F2: err=%d\n", err
);
4080 /* If F2 successfully enabled, set core and enable interrupts */
4082 /* Set up the interrupt mask and enable interrupts */
4083 bus
->hostintmask
= HOSTINTMASK
;
4084 brcmf_sdiod_writel(sdiod
, core
->base
+ SD_REG(hostintmask
),
4085 bus
->hostintmask
, NULL
);
4088 brcmf_sdiod_writeb(sdiodev
, SBSDIO_WATERMARK
, 8, &err
);
4090 /* Disable F2 again */
4091 sdio_disable_func(sdiodev
->func2
);
4095 if (brcmf_chip_sr_capable(bus
->ci
)) {
4096 brcmf_sdio_sr_init(bus
);
4098 /* Restore previous clock setting */
4099 brcmf_sdiod_writeb(sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
4104 /* Allow full data communication using DPC from now on. */
4105 brcmf_sdiod_change_state(bus
->sdiodev
, BRCMF_SDIOD_DATA
);
4107 err
= brcmf_sdiod_intr_register(sdiodev
);
4109 brcmf_err("intr register failed:%d\n", err
);
4112 /* If we didn't come up, turn off backplane clock */
4114 brcmf_sdio_clkctl(bus
, CLK_NONE
, false);
4116 sdio_release_host(sdiodev
->func1
);
4118 err
= brcmf_bus_started(dev
);
4120 brcmf_err("dongle is not responding\n");
4126 sdio_release_host(sdiodev
->func1
);
4128 brcmf_dbg(TRACE
, "failed: dev=%s, err=%d\n", dev_name(dev
), err
);
4129 device_release_driver(&sdiodev
->func2
->dev
);
4130 device_release_driver(dev
);
4133 struct brcmf_sdio
*brcmf_sdio_probe(struct brcmf_sdio_dev
*sdiodev
)
4136 struct brcmf_sdio
*bus
;
4137 struct workqueue_struct
*wq
;
4139 brcmf_dbg(TRACE
, "Enter\n");
4141 /* Allocate private bus interface state */
4142 bus
= kzalloc(sizeof(struct brcmf_sdio
), GFP_ATOMIC
);
4146 bus
->sdiodev
= sdiodev
;
4148 skb_queue_head_init(&bus
->glom
);
4149 bus
->txbound
= BRCMF_TXBOUND
;
4150 bus
->rxbound
= BRCMF_RXBOUND
;
4151 bus
->txminmax
= BRCMF_TXMINMAX
;
4152 bus
->tx_seq
= SDPCM_SEQ_WRAP
- 1;
4154 /* single-threaded workqueue */
4155 wq
= alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM
,
4156 dev_name(&sdiodev
->func1
->dev
));
4158 brcmf_err("insufficient memory to create txworkqueue\n");
4161 brcmf_sdiod_freezer_count(sdiodev
);
4162 INIT_WORK(&bus
->datawork
, brcmf_sdio_dataworker
);
4165 /* attempt to attach to the dongle */
4166 if (!(brcmf_sdio_probe_attach(bus
))) {
4167 brcmf_err("brcmf_sdio_probe_attach failed\n");
4171 spin_lock_init(&bus
->rxctl_lock
);
4172 spin_lock_init(&bus
->txq_lock
);
4173 init_waitqueue_head(&bus
->ctrl_wait
);
4174 init_waitqueue_head(&bus
->dcmd_resp_wait
);
4176 /* Set up the watchdog timer */
4177 timer_setup(&bus
->timer
, brcmf_sdio_watchdog
, 0);
4178 /* Initialize watchdog thread */
4179 init_completion(&bus
->watchdog_wait
);
4180 bus
->watchdog_tsk
= kthread_run(brcmf_sdio_watchdog_thread
,
4181 bus
, "brcmf_wdog/%s",
4182 dev_name(&sdiodev
->func1
->dev
));
4183 if (IS_ERR(bus
->watchdog_tsk
)) {
4184 pr_warn("brcmf_watchdog thread failed to start\n");
4185 bus
->watchdog_tsk
= NULL
;
4187 /* Initialize DPC thread */
4188 bus
->dpc_triggered
= false;
4189 bus
->dpc_running
= false;
4191 /* Assign bus interface call back */
4192 bus
->sdiodev
->bus_if
->dev
= bus
->sdiodev
->dev
;
4193 bus
->sdiodev
->bus_if
->ops
= &brcmf_sdio_bus_ops
;
4194 bus
->sdiodev
->bus_if
->chip
= bus
->ci
->chip
;
4195 bus
->sdiodev
->bus_if
->chiprev
= bus
->ci
->chiprev
;
4197 /* default sdio bus header length for tx packet */
4198 bus
->tx_hdrlen
= SDPCM_HWHDR_LEN
+ SDPCM_SWHDR_LEN
;
4200 /* Attach to the common layer, reserve hdr space */
4201 ret
= brcmf_attach(bus
->sdiodev
->dev
, bus
->sdiodev
->settings
);
4203 brcmf_err("brcmf_attach failed\n");
4207 /* Query the F2 block size, set roundup accordingly */
4208 bus
->blocksize
= bus
->sdiodev
->func2
->cur_blksize
;
4209 bus
->roundup
= min(max_roundup
, bus
->blocksize
);
4211 /* Allocate buffers */
4212 if (bus
->sdiodev
->bus_if
->maxctl
) {
4213 bus
->sdiodev
->bus_if
->maxctl
+= bus
->roundup
;
4215 roundup((bus
->sdiodev
->bus_if
->maxctl
+ SDPCM_HDRLEN
),
4216 ALIGNMENT
) + bus
->head_align
;
4217 bus
->rxbuf
= kmalloc(bus
->rxblen
, GFP_ATOMIC
);
4218 if (!(bus
->rxbuf
)) {
4219 brcmf_err("rxbuf allocation failed\n");
4224 sdio_claim_host(bus
->sdiodev
->func1
);
4226 /* Disable F2 to clear any intermediate frame state on the dongle */
4227 sdio_disable_func(bus
->sdiodev
->func2
);
4229 bus
->rxflow
= false;
4231 /* Done with backplane-dependent accesses, can drop clock... */
4232 brcmf_sdiod_writeb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, 0, NULL
);
4234 sdio_release_host(bus
->sdiodev
->func1
);
4236 /* ...and initialize clock/power states */
4237 bus
->clkstate
= CLK_SDONLY
;
4238 bus
->idletime
= BRCMF_IDLE_INTERVAL
;
4239 bus
->idleclock
= BRCMF_IDLE_ACTIVE
;
4242 bus
->sr_enabled
= false;
4244 brcmf_sdio_debugfs_create(bus
);
4245 brcmf_dbg(INFO
, "completed!!\n");
4247 ret
= brcmf_fw_map_chip_to_name(bus
->ci
->chip
, bus
->ci
->chiprev
,
4249 ARRAY_SIZE(brcmf_sdio_fwnames
),
4250 sdiodev
->fw_name
, sdiodev
->nvram_name
);
4254 ret
= brcmf_fw_get_firmwares(sdiodev
->dev
, BRCMF_FW_REQUEST_NVRAM
,
4255 sdiodev
->fw_name
, sdiodev
->nvram_name
,
4256 brcmf_sdio_firmware_callback
);
4258 brcmf_err("async firmware request failed: %d\n", ret
);
4265 brcmf_sdio_remove(bus
);
4269 /* Detach and free everything */
4270 void brcmf_sdio_remove(struct brcmf_sdio
*bus
)
4272 brcmf_dbg(TRACE
, "Enter\n");
4275 /* De-register interrupt handler */
4276 brcmf_sdiod_intr_unregister(bus
->sdiodev
);
4278 brcmf_detach(bus
->sdiodev
->dev
);
4280 cancel_work_sync(&bus
->datawork
);
4282 destroy_workqueue(bus
->brcmf_wq
);
4285 if (bus
->sdiodev
->state
!= BRCMF_SDIOD_NOMEDIUM
) {
4286 sdio_claim_host(bus
->sdiodev
->func1
);
4287 brcmf_sdio_wd_timer(bus
, false);
4288 brcmf_sdio_clkctl(bus
, CLK_AVAIL
, false);
4289 /* Leave the device in state where it is
4290 * 'passive'. This is done by resetting all
4294 brcmf_chip_set_passive(bus
->ci
);
4295 brcmf_sdio_clkctl(bus
, CLK_NONE
, false);
4296 sdio_release_host(bus
->sdiodev
->func1
);
4298 brcmf_chip_detach(bus
->ci
);
4300 if (bus
->sdiodev
->settings
)
4301 brcmf_release_module_param(bus
->sdiodev
->settings
);
4308 brcmf_dbg(TRACE
, "Disconnected\n");
4311 void brcmf_sdio_wd_timer(struct brcmf_sdio
*bus
, bool active
)
4313 /* Totally stop the timer */
4314 if (!active
&& bus
->wd_active
) {
4315 del_timer_sync(&bus
->timer
);
4316 bus
->wd_active
= false;
4320 /* don't start the wd until fw is loaded */
4321 if (bus
->sdiodev
->state
!= BRCMF_SDIOD_DATA
)
4325 if (!bus
->wd_active
) {
4326 /* Create timer again when watchdog period is
4327 dynamically changed or in the first instance
4329 bus
->timer
.expires
= jiffies
+ BRCMF_WD_POLL
;
4330 add_timer(&bus
->timer
);
4331 bus
->wd_active
= true;
4333 /* Re arm the timer, at last watchdog period */
4334 mod_timer(&bus
->timer
, jiffies
+ BRCMF_WD_POLL
);
4339 int brcmf_sdio_sleep(struct brcmf_sdio
*bus
, bool sleep
)
4343 sdio_claim_host(bus
->sdiodev
->func1
);
4344 ret
= brcmf_sdio_bus_sleep(bus
, sleep
, false);
4345 sdio_release_host(bus
->sdiodev
->func1
);