2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/printk.h>
21 #include <linux/pci_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/mmc/sdio.h>
26 #include <linux/mmc/sdio_func.h>
27 #include <linux/mmc/card.h>
28 #include <linux/semaphore.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
31 #include <linux/bcma/bcma.h>
32 #include <linux/debugfs.h>
33 #include <linux/vmalloc.h>
34 #include <linux/platform_data/brcmfmac-sdio.h>
35 #include <asm/unaligned.h>
37 #include <brcmu_wifi.h>
38 #include <brcmu_utils.h>
39 #include <brcm_hw_ids.h>
41 #include "sdio_host.h"
42 #include "sdio_chip.h"
44 #define DCMD_RESP_TIMEOUT 2000 /* In milli second */
48 #define BRCMF_TRAP_INFO_SIZE 80
50 #define CBUF_LEN (128)
52 /* Device console log buffer state */
53 #define CONSOLE_BUFFER_MAX 2024
56 __le32 buf
; /* Can't be pointer on (64-bit) hosts */
59 char *_buf_compat
; /* Redundant pointer for backward compat. */
64 * When there is no UART (e.g. Quickturn),
65 * the host should write a complete
66 * input line directly into cbuf and then write
67 * the length into vcons_in.
68 * This may also be used when there is a real UART
69 * (at risk of conflicting with
70 * the real UART). vcons_out is currently unused.
75 /* Output (logging) buffer
76 * Console output is written to a ring buffer log_buf at index log_idx.
77 * The host may read the output when it sees log_idx advance.
78 * Output will be lost if the output wraps around faster than the host
81 struct rte_log_le log_le
;
83 /* Console input line buffer
84 * Characters are read one at a time into cbuf
85 * until <CR> is received, then
86 * the buffer is processed as a command line.
87 * Also used for virtual UART.
94 #include <chipcommon.h>
98 #include "tracepoint.h"
100 #define TXQLEN 2048 /* bulk tx queue length */
101 #define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
102 #define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */
105 #define TXRETRIES 2 /* # of retries for tx frames */
107 #define BRCMF_RXBOUND 50 /* Default for max rx frames in
110 #define BRCMF_TXBOUND 20 /* Default for max tx frames in
113 #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
115 #define MEMBLOCK 2048 /* Block size used for downloading
117 #define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold
118 biggest possible glom */
120 #define BRCMF_FIRSTREAD (1 << 6)
123 /* SBSDIO_DEVICE_CTL */
125 /* 1: device will assert busy signal when receiving CMD53 */
126 #define SBSDIO_DEVCTL_SETBUSY 0x01
127 /* 1: assertion of sdio interrupt is synchronous to the sdio clock */
128 #define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02
129 /* 1: mask all interrupts to host except the chipActive (rev 8) */
130 #define SBSDIO_DEVCTL_CA_INT_ONLY 0x04
131 /* 1: isolate internal sdio signals, put external pads in tri-state; requires
132 * sdio bus power cycle to clear (rev 9) */
133 #define SBSDIO_DEVCTL_PADS_ISO 0x08
134 /* Force SD->SB reset mapping (rev 11) */
135 #define SBSDIO_DEVCTL_SB_RST_CTL 0x30
136 /* Determined by CoreControl bit */
137 #define SBSDIO_DEVCTL_RST_CORECTL 0x00
138 /* Force backplane reset */
139 #define SBSDIO_DEVCTL_RST_BPRESET 0x10
140 /* Force no backplane reset */
141 #define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
143 /* direct(mapped) cis space */
145 /* MAPPED common CIS address */
146 #define SBSDIO_CIS_BASE_COMMON 0x1000
147 /* maximum bytes in one CIS */
148 #define SBSDIO_CIS_SIZE_LIMIT 0x200
149 /* cis offset addr is < 17 bits */
150 #define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF
152 /* manfid tuple length, include tuple, link bytes */
153 #define SBSDIO_CIS_MANFID_TUPLE_LEN 6
156 #define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
157 #define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
158 #define I_SMB_SW2 (1 << 2) /* To SB Mail S/W interrupt 2 */
159 #define I_SMB_SW3 (1 << 3) /* To SB Mail S/W interrupt 3 */
160 #define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */
161 #define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */
162 #define I_HMB_SW0 (1 << 4) /* To Host Mail S/W interrupt 0 */
163 #define I_HMB_SW1 (1 << 5) /* To Host Mail S/W interrupt 1 */
164 #define I_HMB_SW2 (1 << 6) /* To Host Mail S/W interrupt 2 */
165 #define I_HMB_SW3 (1 << 7) /* To Host Mail S/W interrupt 3 */
166 #define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */
167 #define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */
168 #define I_WR_OOSYNC (1 << 8) /* Write Frame Out Of Sync */
169 #define I_RD_OOSYNC (1 << 9) /* Read Frame Out Of Sync */
170 #define I_PC (1 << 10) /* descriptor error */
171 #define I_PD (1 << 11) /* data error */
172 #define I_DE (1 << 12) /* Descriptor protocol Error */
173 #define I_RU (1 << 13) /* Receive descriptor Underflow */
174 #define I_RO (1 << 14) /* Receive fifo Overflow */
175 #define I_XU (1 << 15) /* Transmit fifo Underflow */
176 #define I_RI (1 << 16) /* Receive Interrupt */
177 #define I_BUSPWR (1 << 17) /* SDIO Bus Power Change (rev 9) */
178 #define I_XMTDATA_AVAIL (1 << 23) /* bits in fifo */
179 #define I_XI (1 << 24) /* Transmit Interrupt */
180 #define I_RF_TERM (1 << 25) /* Read Frame Terminate */
181 #define I_WF_TERM (1 << 26) /* Write Frame Terminate */
182 #define I_PCMCIA_XU (1 << 27) /* PCMCIA Transmit FIFO Underflow */
183 #define I_SBINT (1 << 28) /* sbintstatus Interrupt */
184 #define I_CHIPACTIVE (1 << 29) /* chip from doze to active state */
185 #define I_SRESET (1 << 30) /* CCCR RES interrupt */
186 #define I_IOE2 (1U << 31) /* CCCR IOE2 Bit Changed */
187 #define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU)
188 #define I_DMA (I_RI | I_XI | I_ERRORS)
191 #define CC_CISRDY (1 << 0) /* CIS Ready */
192 #define CC_BPRESEN (1 << 1) /* CCCR RES signal */
193 #define CC_F2RDY (1 << 2) /* set CCCR IOR2 bit */
194 #define CC_CLRPADSISO (1 << 3) /* clear SDIO pads isolation */
195 #define CC_XMTDATAAVAIL_MODE (1 << 4)
196 #define CC_XMTDATAAVAIL_CTRL (1 << 5)
199 #define SFC_RF_TERM (1 << 0) /* Read Frame Terminate */
200 #define SFC_WF_TERM (1 << 1) /* Write Frame Terminate */
201 #define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
202 #define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
205 * Software allocation of To SB Mailbox resources
208 /* tosbmailbox bits corresponding to intstatus bits */
209 #define SMB_NAK (1 << 0) /* Frame NAK */
210 #define SMB_INT_ACK (1 << 1) /* Host Interrupt ACK */
211 #define SMB_USE_OOB (1 << 2) /* Use OOB Wakeup */
212 #define SMB_DEV_INT (1 << 3) /* Miscellaneous Interrupt */
214 /* tosbmailboxdata */
215 #define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */
218 * Software allocation of To Host Mailbox resources
222 #define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */
223 #define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */
224 #define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */
225 #define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */
227 /* tohostmailboxdata */
228 #define HMB_DATA_NAKHANDLED 1 /* retransmit NAK'd frame */
229 #define HMB_DATA_DEVREADY 2 /* talk to host after enable */
230 #define HMB_DATA_FC 4 /* per prio flowcontrol update flag */
231 #define HMB_DATA_FWREADY 8 /* fw ready for protocol activity */
233 #define HMB_DATA_FCDATA_MASK 0xff000000
234 #define HMB_DATA_FCDATA_SHIFT 24
236 #define HMB_DATA_VERSION_MASK 0x00ff0000
237 #define HMB_DATA_VERSION_SHIFT 16
240 * Software-defined protocol header
243 /* Current protocol version */
244 #define SDPCM_PROT_VERSION 4
247 * Shared structure between dongle and the host.
248 * The structure contains pointers to trap or assert information.
250 #define SDPCM_SHARED_VERSION 0x0003
251 #define SDPCM_SHARED_VERSION_MASK 0x00FF
252 #define SDPCM_SHARED_ASSERT_BUILT 0x0100
253 #define SDPCM_SHARED_ASSERT 0x0200
254 #define SDPCM_SHARED_TRAP 0x0400
256 /* Space for header read, limit for data packets */
257 #define MAX_HDR_READ (1 << 6)
258 #define MAX_RX_DATASZ 2048
260 /* Maximum milliseconds to wait for F2 to come up */
261 #define BRCMF_WAIT_F2RDY 3000
263 /* Bump up limit on waiting for HT to account for first startup;
264 * if the image is doing a CRC calculation before programming the PMU
265 * for HT availability, it could take a couple hundred ms more, so
266 * max out at a 1 second (1000000us).
268 #undef PMU_MAX_TRANSITION_DLY
269 #define PMU_MAX_TRANSITION_DLY 1000000
271 /* Value for ChipClockCSR during initial setup */
272 #define BRCMF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \
273 SBSDIO_ALP_AVAIL_REQ)
275 /* Flags for SDH calls */
276 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
278 #define BRCMF_SDIO_FW_NAME "brcm/brcmfmac-sdio.bin"
279 #define BRCMF_SDIO_NV_NAME "brcm/brcmfmac-sdio.txt"
280 MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME
);
281 MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME
);
283 #define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
284 #define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
287 #define BRCMF_IDLE_INTERVAL 1
289 #define KSO_WAIT_US 50
290 #define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
293 * Conversion of 802.1D priority to precedence level
295 static uint
prio2prec(u32 prio
)
297 return (prio
== PRIO_8021D_NONE
|| prio
== PRIO_8021D_BE
) ?
302 /* Device console log buffer state */
303 struct brcmf_console
{
304 uint count
; /* Poll interval msec counter */
305 uint log_addr
; /* Log struct address (fixed) */
306 struct rte_log_le log_le
; /* Log struct (host copy) */
307 uint bufsize
; /* Size of log buffer */
308 u8
*buf
; /* Log buffer (host copy) */
309 uint last
; /* Last buffer read index */
312 struct brcmf_trap_info
{
326 __le32 r9
; /* sb/v6 */
327 __le32 r10
; /* sl/v7 */
328 __le32 r11
; /* fp/v8 */
336 struct sdpcm_shared
{
340 u32 assert_file_addr
;
342 u32 console_addr
; /* Address of struct rte_console */
348 struct sdpcm_shared_le
{
351 __le32 assert_exp_addr
;
352 __le32 assert_file_addr
;
354 __le32 console_addr
; /* Address of struct rte_console */
355 __le32 msgtrace_addr
;
360 /* dongle SDIO bus specific header info */
361 struct brcmf_sdio_hdrinfo
{
370 /* misc chip info needed by some of the routines */
371 /* Private data for SDIO bus interaction */
373 struct brcmf_sdio_dev
*sdiodev
; /* sdio device handler */
374 struct chip_info
*ci
; /* Chip info struct */
375 char *vars
; /* Variables (from CIS and/or other) */
376 uint varsz
; /* Size of variables buffer */
378 u32 ramsize
; /* Size of RAM in SOCRAM (bytes) */
380 u32 hostintmask
; /* Copy of Host Interrupt Mask */
381 atomic_t intstatus
; /* Intstatus bits (events) pending */
382 atomic_t fcstate
; /* State of dongle flow-control */
384 uint blocksize
; /* Block size of SDIO transfers */
385 uint roundup
; /* Max roundup limit */
387 struct pktq txq
; /* Queue length used for flow-control */
388 u8 flowcontrol
; /* per prio flow control bitmask */
389 u8 tx_seq
; /* Transmit sequence number (next) */
390 u8 tx_max
; /* Maximum transmit sequence allowed */
392 u8 hdrbuf
[MAX_HDR_READ
+ BRCMF_SDALIGN
];
393 u8
*rxhdr
; /* Header of current rx frame (in hdrbuf) */
394 u8 rx_seq
; /* Receive sequence number (expected) */
395 struct brcmf_sdio_hdrinfo cur_read
;
396 /* info of current read frame */
397 bool rxskip
; /* Skip receive (awaiting NAK ACK) */
398 bool rxpending
; /* Data frame pending in dongle */
400 uint rxbound
; /* Rx frames to read before resched */
401 uint txbound
; /* Tx frames to send before resched */
404 struct sk_buff
*glomd
; /* Packet containing glomming descriptor */
405 struct sk_buff_head glom
; /* Packet list for glommed superframe */
406 uint glomerr
; /* Glom packet read errors */
408 u8
*rxbuf
; /* Buffer for receiving control packets */
409 uint rxblen
; /* Allocated length of rxbuf */
410 u8
*rxctl
; /* Aligned pointer into rxbuf */
411 u8
*rxctl_orig
; /* pointer for freeing rxctl */
412 uint rxlen
; /* Length of valid data in buffer */
413 spinlock_t rxctl_lock
; /* protection lock for ctrl frame resources */
415 u8 sdpcm_ver
; /* Bus protocol reported by dongle */
417 bool intr
; /* Use interrupts */
418 bool poll
; /* Use polling */
419 atomic_t ipend
; /* Device interrupt is pending */
420 uint spurious
; /* Count of spurious interrupts */
421 uint pollrate
; /* Ticks between device polls */
422 uint polltick
; /* Tick counter */
425 uint console_interval
;
426 struct brcmf_console console
; /* Console output polling support */
427 uint console_addr
; /* Console address from shared struct */
430 uint clkstate
; /* State of sd and backplane clock(s) */
431 bool activity
; /* Activity flag for clock down */
432 s32 idletime
; /* Control for activity timeout */
433 s32 idlecount
; /* Activity timeout counter */
434 s32 idleclock
; /* How to set bus driver when idle */
435 bool rxflow_mode
; /* Rx flow control mode */
436 bool rxflow
; /* Is rx flow control on */
437 bool alp_only
; /* Don't use HT clock (ALP only) */
441 bool ctrl_frame_stat
;
444 wait_queue_head_t ctrl_wait
;
445 wait_queue_head_t dcmd_resp_wait
;
447 struct timer_list timer
;
448 struct completion watchdog_wait
;
449 struct task_struct
*watchdog_tsk
;
453 struct workqueue_struct
*brcmf_wq
;
454 struct work_struct datawork
;
457 const struct firmware
*firmware
;
460 bool txoff
; /* Transmit flow-controlled */
461 struct brcmf_sdio_count sdcnt
;
462 bool sr_enabled
; /* SaveRestore enabled */
463 bool sleeping
; /* SDIO bus sleeping */
465 u8 tx_hdrlen
; /* sdio bus header length for tx packet */
471 #define CLK_PENDING 2
475 static int qcount
[NUMPRIO
];
478 #define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
480 #define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
482 /* Retry count for register access failures */
483 static const uint retry_limit
= 2;
485 /* Limit on rounding up frames */
486 static const uint max_roundup
= 512;
490 enum brcmf_sdio_frmtype
{
491 BRCMF_SDIO_FT_NORMAL
,
496 static void pkt_align(struct sk_buff
*p
, int len
, int align
)
499 datalign
= (unsigned long)(p
->data
);
500 datalign
= roundup(datalign
, (align
)) - datalign
;
502 skb_pull(p
, datalign
);
506 /* To check if there's window offered */
507 static bool data_ok(struct brcmf_sdio
*bus
)
509 return (u8
)(bus
->tx_max
- bus
->tx_seq
) != 0 &&
510 ((u8
)(bus
->tx_max
- bus
->tx_seq
) & 0x80) == 0;
514 * Reads a register in the SDIO hardware block. This block occupies a series of
515 * adresses on the 32 bit backplane bus.
518 r_sdreg32(struct brcmf_sdio
*bus
, u32
*regvar
, u32 offset
)
520 u8 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
523 *regvar
= brcmf_sdio_regrl(bus
->sdiodev
,
524 bus
->ci
->c_inf
[idx
].base
+ offset
, &ret
);
530 w_sdreg32(struct brcmf_sdio
*bus
, u32 regval
, u32 reg_offset
)
532 u8 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
535 brcmf_sdio_regwl(bus
->sdiodev
,
536 bus
->ci
->c_inf
[idx
].base
+ reg_offset
,
543 brcmf_sdbrcm_kso_control(struct brcmf_sdio
*bus
, bool on
)
545 u8 wr_val
= 0, rd_val
, cmp_val
, bmask
;
549 brcmf_dbg(TRACE
, "Enter\n");
551 wr_val
= (on
<< SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT
);
552 /* 1st KSO write goes to AOS wake up core if device is asleep */
553 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
556 brcmf_err("SDIO_AOS KSO write error: %d\n", err
);
561 /* device WAKEUP through KSO:
562 * write bit 0 & read back until
563 * both bits 0 (kso bit) & 1 (dev on status) are set
565 cmp_val
= SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
|
566 SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK
;
568 usleep_range(2000, 3000);
570 /* Put device to sleep, turn off KSO */
572 /* only check for bit0, bit1(dev on status) may not
573 * get cleared right away
575 bmask
= SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
;
579 /* reliable KSO bit set/clr:
580 * the sdiod sleep write access is synced to PMU 32khz clk
581 * just one write attempt may fail,
582 * read it back until it matches written value
584 rd_val
= brcmf_sdio_regrb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
586 if (((rd_val
& bmask
) == cmp_val
) && !err
)
588 brcmf_dbg(SDIO
, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
589 try_cnt
, MAX_KSO_ATTEMPTS
, err
);
591 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
593 } while (try_cnt
++ < MAX_KSO_ATTEMPTS
);
598 #define PKT_AVAILABLE() (intstatus & I_HMB_FRAME_IND)
600 #define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE)
602 /* Turn backplane clock on or off */
603 static int brcmf_sdbrcm_htclk(struct brcmf_sdio
*bus
, bool on
, bool pendok
)
606 u8 clkctl
, clkreq
, devctl
;
607 unsigned long timeout
;
609 brcmf_dbg(SDIO
, "Enter\n");
613 if (bus
->sr_enabled
) {
614 bus
->clkstate
= (on
? CLK_AVAIL
: CLK_SDONLY
);
619 /* Request HT Avail */
621 bus
->alp_only
? SBSDIO_ALP_AVAIL_REQ
: SBSDIO_HT_AVAIL_REQ
;
623 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
626 brcmf_err("HT Avail request error: %d\n", err
);
630 /* Check current status */
631 clkctl
= brcmf_sdio_regrb(bus
->sdiodev
,
632 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
634 brcmf_err("HT Avail read error: %d\n", err
);
638 /* Go to pending and await interrupt if appropriate */
639 if (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
) && pendok
) {
640 /* Allow only clock-available interrupt */
641 devctl
= brcmf_sdio_regrb(bus
->sdiodev
,
642 SBSDIO_DEVICE_CTL
, &err
);
644 brcmf_err("Devctl error setting CA: %d\n",
649 devctl
|= SBSDIO_DEVCTL_CA_INT_ONLY
;
650 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
652 brcmf_dbg(SDIO
, "CLKCTL: set PENDING\n");
653 bus
->clkstate
= CLK_PENDING
;
656 } else if (bus
->clkstate
== CLK_PENDING
) {
657 /* Cancel CA-only interrupt filter */
658 devctl
= brcmf_sdio_regrb(bus
->sdiodev
,
659 SBSDIO_DEVICE_CTL
, &err
);
660 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
661 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
665 /* Otherwise, wait here (polling) for HT Avail */
667 msecs_to_jiffies(PMU_MAX_TRANSITION_DLY
/1000);
668 while (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
)) {
669 clkctl
= brcmf_sdio_regrb(bus
->sdiodev
,
670 SBSDIO_FUNC1_CHIPCLKCSR
,
672 if (time_after(jiffies
, timeout
))
675 usleep_range(5000, 10000);
678 brcmf_err("HT Avail request error: %d\n", err
);
681 if (!SBSDIO_CLKAV(clkctl
, bus
->alp_only
)) {
682 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
683 PMU_MAX_TRANSITION_DLY
, clkctl
);
687 /* Mark clock available */
688 bus
->clkstate
= CLK_AVAIL
;
689 brcmf_dbg(SDIO
, "CLKCTL: turned ON\n");
692 if (!bus
->alp_only
) {
693 if (SBSDIO_ALPONLY(clkctl
))
694 brcmf_err("HT Clock should be on\n");
696 #endif /* defined (DEBUG) */
698 bus
->activity
= true;
702 if (bus
->clkstate
== CLK_PENDING
) {
703 /* Cancel CA-only interrupt filter */
704 devctl
= brcmf_sdio_regrb(bus
->sdiodev
,
705 SBSDIO_DEVICE_CTL
, &err
);
706 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
707 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
711 bus
->clkstate
= CLK_SDONLY
;
712 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
714 brcmf_dbg(SDIO
, "CLKCTL: turned OFF\n");
716 brcmf_err("Failed access turning clock off: %d\n",
724 /* Change idle/active SD state */
725 static int brcmf_sdbrcm_sdclk(struct brcmf_sdio
*bus
, bool on
)
727 brcmf_dbg(SDIO
, "Enter\n");
730 bus
->clkstate
= CLK_SDONLY
;
732 bus
->clkstate
= CLK_NONE
;
737 /* Transition SD and backplane clock readiness */
738 static int brcmf_sdbrcm_clkctl(struct brcmf_sdio
*bus
, uint target
, bool pendok
)
741 uint oldstate
= bus
->clkstate
;
744 brcmf_dbg(SDIO
, "Enter\n");
746 /* Early exit if we're already there */
747 if (bus
->clkstate
== target
) {
748 if (target
== CLK_AVAIL
) {
749 brcmf_sdbrcm_wd_timer(bus
, BRCMF_WD_POLL_MS
);
750 bus
->activity
= true;
757 /* Make sure SD clock is available */
758 if (bus
->clkstate
== CLK_NONE
)
759 brcmf_sdbrcm_sdclk(bus
, true);
760 /* Now request HT Avail on the backplane */
761 brcmf_sdbrcm_htclk(bus
, true, pendok
);
762 brcmf_sdbrcm_wd_timer(bus
, BRCMF_WD_POLL_MS
);
763 bus
->activity
= true;
767 /* Remove HT request, or bring up SD clock */
768 if (bus
->clkstate
== CLK_NONE
)
769 brcmf_sdbrcm_sdclk(bus
, true);
770 else if (bus
->clkstate
== CLK_AVAIL
)
771 brcmf_sdbrcm_htclk(bus
, false, false);
773 brcmf_err("request for %d -> %d\n",
774 bus
->clkstate
, target
);
775 brcmf_sdbrcm_wd_timer(bus
, BRCMF_WD_POLL_MS
);
779 /* Make sure to remove HT request */
780 if (bus
->clkstate
== CLK_AVAIL
)
781 brcmf_sdbrcm_htclk(bus
, false, false);
782 /* Now remove the SD clock */
783 brcmf_sdbrcm_sdclk(bus
, false);
784 brcmf_sdbrcm_wd_timer(bus
, 0);
788 brcmf_dbg(SDIO
, "%d -> %d\n", oldstate
, bus
->clkstate
);
795 brcmf_sdbrcm_bus_sleep(struct brcmf_sdio
*bus
, bool sleep
, bool pendok
)
798 brcmf_dbg(TRACE
, "Enter\n");
799 brcmf_dbg(SDIO
, "request %s currently %s\n",
800 (sleep
? "SLEEP" : "WAKE"),
801 (bus
->sleeping
? "SLEEP" : "WAKE"));
803 /* If SR is enabled control bus state with KSO */
804 if (bus
->sr_enabled
) {
805 /* Done if we're already in the requested state */
806 if (sleep
== bus
->sleeping
)
811 /* Don't sleep if something is pending */
812 if (atomic_read(&bus
->intstatus
) ||
813 atomic_read(&bus
->ipend
) > 0 ||
814 (!atomic_read(&bus
->fcstate
) &&
815 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) &&
818 err
= brcmf_sdbrcm_kso_control(bus
, false);
819 /* disable watchdog */
821 brcmf_sdbrcm_wd_timer(bus
, 0);
824 err
= brcmf_sdbrcm_kso_control(bus
, true);
828 bus
->sleeping
= sleep
;
829 brcmf_dbg(SDIO
, "new state %s\n",
830 (sleep
? "SLEEP" : "WAKE"));
832 brcmf_err("error while changing bus sleep state %d\n",
841 if (!bus
->sr_enabled
)
842 brcmf_sdbrcm_clkctl(bus
, CLK_NONE
, pendok
);
844 brcmf_sdbrcm_clkctl(bus
, CLK_AVAIL
, pendok
);
851 static u32
brcmf_sdbrcm_hostmail(struct brcmf_sdio
*bus
)
858 brcmf_dbg(SDIO
, "Enter\n");
860 /* Read mailbox data and ack that we did so */
861 ret
= r_sdreg32(bus
, &hmb_data
,
862 offsetof(struct sdpcmd_regs
, tohostmailboxdata
));
865 w_sdreg32(bus
, SMB_INT_ACK
,
866 offsetof(struct sdpcmd_regs
, tosbmailbox
));
867 bus
->sdcnt
.f1regdata
+= 2;
869 /* Dongle recomposed rx frames, accept them again */
870 if (hmb_data
& HMB_DATA_NAKHANDLED
) {
871 brcmf_dbg(SDIO
, "Dongle reports NAK handled, expect rtx of %d\n",
874 brcmf_err("unexpected NAKHANDLED!\n");
877 intstatus
|= I_HMB_FRAME_IND
;
881 * DEVREADY does not occur with gSPI.
883 if (hmb_data
& (HMB_DATA_DEVREADY
| HMB_DATA_FWREADY
)) {
885 (hmb_data
& HMB_DATA_VERSION_MASK
) >>
886 HMB_DATA_VERSION_SHIFT
;
887 if (bus
->sdpcm_ver
!= SDPCM_PROT_VERSION
)
888 brcmf_err("Version mismatch, dongle reports %d, "
890 bus
->sdpcm_ver
, SDPCM_PROT_VERSION
);
892 brcmf_dbg(SDIO
, "Dongle ready, protocol version %d\n",
897 * Flow Control has been moved into the RX headers and this out of band
898 * method isn't used any more.
899 * remaining backward compatible with older dongles.
901 if (hmb_data
& HMB_DATA_FC
) {
902 fcbits
= (hmb_data
& HMB_DATA_FCDATA_MASK
) >>
903 HMB_DATA_FCDATA_SHIFT
;
905 if (fcbits
& ~bus
->flowcontrol
)
906 bus
->sdcnt
.fc_xoff
++;
908 if (bus
->flowcontrol
& ~fcbits
)
911 bus
->sdcnt
.fc_rcvd
++;
912 bus
->flowcontrol
= fcbits
;
915 /* Shouldn't be any others */
916 if (hmb_data
& ~(HMB_DATA_DEVREADY
|
917 HMB_DATA_NAKHANDLED
|
920 HMB_DATA_FCDATA_MASK
| HMB_DATA_VERSION_MASK
))
921 brcmf_err("Unknown mailbox data content: 0x%02x\n",
927 static void brcmf_sdbrcm_rxfail(struct brcmf_sdio
*bus
, bool abort
, bool rtx
)
934 brcmf_err("%sterminate frame%s\n",
935 abort
? "abort command, " : "",
936 rtx
? ", send NAK" : "");
939 brcmf_sdcard_abort(bus
->sdiodev
, SDIO_FUNC_2
);
941 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
943 bus
->sdcnt
.f1regdata
++;
945 /* Wait until the packet has been flushed (device/FIFO stable) */
946 for (lastrbc
= retries
= 0xffff; retries
> 0; retries
--) {
947 hi
= brcmf_sdio_regrb(bus
->sdiodev
,
948 SBSDIO_FUNC1_RFRAMEBCHI
, &err
);
949 lo
= brcmf_sdio_regrb(bus
->sdiodev
,
950 SBSDIO_FUNC1_RFRAMEBCLO
, &err
);
951 bus
->sdcnt
.f1regdata
+= 2;
953 if ((hi
== 0) && (lo
== 0))
956 if ((hi
> (lastrbc
>> 8)) && (lo
> (lastrbc
& 0x00ff))) {
957 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
958 lastrbc
, (hi
<< 8) + lo
);
960 lastrbc
= (hi
<< 8) + lo
;
964 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc
);
966 brcmf_dbg(SDIO
, "flush took %d iterations\n", 0xffff - retries
);
970 err
= w_sdreg32(bus
, SMB_NAK
,
971 offsetof(struct sdpcmd_regs
, tosbmailbox
));
973 bus
->sdcnt
.f1regdata
++;
978 /* Clear partial in any case */
979 bus
->cur_read
.len
= 0;
981 /* If we can't reach the device, signal failure */
983 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
986 /* return total length of buffer chain */
987 static uint
brcmf_sdbrcm_glom_len(struct brcmf_sdio
*bus
)
993 skb_queue_walk(&bus
->glom
, p
)
998 static void brcmf_sdbrcm_free_glom(struct brcmf_sdio
*bus
)
1000 struct sk_buff
*cur
, *next
;
1002 skb_queue_walk_safe(&bus
->glom
, cur
, next
) {
1003 skb_unlink(cur
, &bus
->glom
);
1004 brcmu_pkt_buf_free_skb(cur
);
1009 * brcmfmac sdio bus specific header
1010 * This is the lowest layer header wrapped on the packets transmitted between
1011 * host and WiFi dongle which contains information needed for SDIO core and
1014 * It consists of 2 parts: hw header and software header
1015 * hardware header (frame tag) - 4 bytes
1016 * Byte 0~1: Frame length
1017 * Byte 2~3: Checksum, bit-wise inverse of frame length
1018 * software header - 8 bytes
1019 * Byte 0: Rx/Tx sequence number
1020 * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
1021 * Byte 2: Length of next data frame, reserved for Tx
1022 * Byte 3: Data offset
1023 * Byte 4: Flow control bits, reserved for Tx
1024 * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
1025 * Byte 6~7: Reserved
1027 #define SDPCM_HWHDR_LEN 4
1028 #define SDPCM_SWHDR_LEN 8
1029 #define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
1030 /* software header */
1031 #define SDPCM_SEQ_MASK 0x000000ff
1032 #define SDPCM_SEQ_WRAP 256
1033 #define SDPCM_CHANNEL_MASK 0x00000f00
1034 #define SDPCM_CHANNEL_SHIFT 8
1035 #define SDPCM_CONTROL_CHANNEL 0 /* Control */
1036 #define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
1037 #define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
1038 #define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
1039 #define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
1040 #define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
1041 #define SDPCM_NEXTLEN_MASK 0x00ff0000
1042 #define SDPCM_NEXTLEN_SHIFT 16
1043 #define SDPCM_DOFFSET_MASK 0xff000000
1044 #define SDPCM_DOFFSET_SHIFT 24
1045 #define SDPCM_FCMASK_MASK 0x000000ff
1046 #define SDPCM_WINDOW_MASK 0x0000ff00
1047 #define SDPCM_WINDOW_SHIFT 8
1049 static inline u8
brcmf_sdio_getdatoffset(u8
*swheader
)
1052 hdrvalue
= *(u32
*)swheader
;
1053 return (u8
)((hdrvalue
& SDPCM_DOFFSET_MASK
) >> SDPCM_DOFFSET_SHIFT
);
1056 static int brcmf_sdio_hdparse(struct brcmf_sdio
*bus
, u8
*header
,
1057 struct brcmf_sdio_hdrinfo
*rd
,
1058 enum brcmf_sdio_frmtype type
)
1061 u8 rx_seq
, fc
, tx_seq_max
;
1065 len
= get_unaligned_le16(header
);
1066 checksum
= get_unaligned_le16(header
+ sizeof(u16
));
1067 /* All zero means no more to read */
1068 if (!(len
| checksum
)) {
1069 bus
->rxpending
= false;
1072 if ((u16
)(~(len
^ checksum
))) {
1073 brcmf_err("HW header checksum error\n");
1074 bus
->sdcnt
.rx_badhdr
++;
1075 brcmf_sdbrcm_rxfail(bus
, false, false);
1078 if (len
< SDPCM_HDRLEN
) {
1079 brcmf_err("HW header length error\n");
1082 if (type
== BRCMF_SDIO_FT_SUPER
&&
1083 (roundup(len
, bus
->blocksize
) != rd
->len
)) {
1084 brcmf_err("HW superframe header length error\n");
1087 if (type
== BRCMF_SDIO_FT_SUB
&& len
> rd
->len
) {
1088 brcmf_err("HW subframe header length error\n");
1093 /* software header */
1094 header
+= SDPCM_HWHDR_LEN
;
1095 swheader
= le32_to_cpu(*(__le32
*)header
);
1096 if (type
== BRCMF_SDIO_FT_SUPER
&& SDPCM_GLOMDESC(header
)) {
1097 brcmf_err("Glom descriptor found in superframe head\n");
1101 rx_seq
= (u8
)(swheader
& SDPCM_SEQ_MASK
);
1102 rd
->channel
= (swheader
& SDPCM_CHANNEL_MASK
) >> SDPCM_CHANNEL_SHIFT
;
1103 if (len
> MAX_RX_DATASZ
&& rd
->channel
!= SDPCM_CONTROL_CHANNEL
&&
1104 type
!= BRCMF_SDIO_FT_SUPER
) {
1105 brcmf_err("HW header length too long\n");
1106 bus
->sdcnt
.rx_toolong
++;
1107 brcmf_sdbrcm_rxfail(bus
, false, false);
1111 if (type
== BRCMF_SDIO_FT_SUPER
&& rd
->channel
!= SDPCM_GLOM_CHANNEL
) {
1112 brcmf_err("Wrong channel for superframe\n");
1116 if (type
== BRCMF_SDIO_FT_SUB
&& rd
->channel
!= SDPCM_DATA_CHANNEL
&&
1117 rd
->channel
!= SDPCM_EVENT_CHANNEL
) {
1118 brcmf_err("Wrong channel for subframe\n");
1122 rd
->dat_offset
= brcmf_sdio_getdatoffset(header
);
1123 if (rd
->dat_offset
< SDPCM_HDRLEN
|| rd
->dat_offset
> rd
->len
) {
1124 brcmf_err("seq %d: bad data offset\n", rx_seq
);
1125 bus
->sdcnt
.rx_badhdr
++;
1126 brcmf_sdbrcm_rxfail(bus
, false, false);
1130 if (rd
->seq_num
!= rx_seq
) {
1131 brcmf_err("seq %d: sequence number error, expect %d\n",
1132 rx_seq
, rd
->seq_num
);
1133 bus
->sdcnt
.rx_badseq
++;
1134 rd
->seq_num
= rx_seq
;
1136 /* no need to check the reset for subframe */
1137 if (type
== BRCMF_SDIO_FT_SUB
)
1139 rd
->len_nxtfrm
= (swheader
& SDPCM_NEXTLEN_MASK
) >> SDPCM_NEXTLEN_SHIFT
;
1140 if (rd
->len_nxtfrm
<< 4 > MAX_RX_DATASZ
) {
1141 /* only warm for NON glom packet */
1142 if (rd
->channel
!= SDPCM_GLOM_CHANNEL
)
1143 brcmf_err("seq %d: next length error\n", rx_seq
);
1146 swheader
= le32_to_cpu(*(__le32
*)(header
+ 4));
1147 fc
= swheader
& SDPCM_FCMASK_MASK
;
1148 if (bus
->flowcontrol
!= fc
) {
1149 if (~bus
->flowcontrol
& fc
)
1150 bus
->sdcnt
.fc_xoff
++;
1151 if (bus
->flowcontrol
& ~fc
)
1152 bus
->sdcnt
.fc_xon
++;
1153 bus
->sdcnt
.fc_rcvd
++;
1154 bus
->flowcontrol
= fc
;
1156 tx_seq_max
= (swheader
& SDPCM_WINDOW_MASK
) >> SDPCM_WINDOW_SHIFT
;
1157 if ((u8
)(tx_seq_max
- bus
->tx_seq
) > 0x40) {
1158 brcmf_err("seq %d: max tx seq number error\n", rx_seq
);
1159 tx_seq_max
= bus
->tx_seq
+ 2;
1161 bus
->tx_max
= tx_seq_max
;
1166 static inline void brcmf_sdio_update_hwhdr(u8
*header
, u16 frm_length
)
1168 *(__le16
*)header
= cpu_to_le16(frm_length
);
1169 *(((__le16
*)header
) + 1) = cpu_to_le16(~frm_length
);
1172 static void brcmf_sdio_hdpack(struct brcmf_sdio
*bus
, u8
*header
,
1173 struct brcmf_sdio_hdrinfo
*hd_info
)
1177 brcmf_sdio_update_hwhdr(header
, hd_info
->len
);
1179 sw_header
= bus
->tx_seq
;
1180 sw_header
|= (hd_info
->channel
<< SDPCM_CHANNEL_SHIFT
) &
1182 sw_header
|= (hd_info
->dat_offset
<< SDPCM_DOFFSET_SHIFT
) &
1184 *(((__le32
*)header
) + 1) = cpu_to_le32(sw_header
);
1185 *(((__le32
*)header
) + 2) = 0;
1188 static u8
brcmf_sdbrcm_rxglom(struct brcmf_sdio
*bus
, u8 rxseq
)
1194 struct sk_buff
*pfirst
, *pnext
;
1199 struct brcmf_sdio_hdrinfo rd_new
;
1201 /* If packets, issue read(s) and send up packet chain */
1202 /* Return sequence numbers consumed? */
1204 brcmf_dbg(SDIO
, "start: glomd %p glom %p\n",
1205 bus
->glomd
, skb_peek(&bus
->glom
));
1207 if (bus
->sdiodev
->pdata
)
1208 align
= bus
->sdiodev
->pdata
->sd_sgentry_align
;
1212 /* If there's a descriptor, generate the packet chain */
1214 pfirst
= pnext
= NULL
;
1215 dlen
= (u16
) (bus
->glomd
->len
);
1216 dptr
= bus
->glomd
->data
;
1217 if (!dlen
|| (dlen
& 1)) {
1218 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1223 for (totlen
= num
= 0; dlen
; num
++) {
1224 /* Get (and move past) next length */
1225 sublen
= get_unaligned_le16(dptr
);
1226 dlen
-= sizeof(u16
);
1227 dptr
+= sizeof(u16
);
1228 if ((sublen
< SDPCM_HDRLEN
) ||
1229 ((num
== 0) && (sublen
< (2 * SDPCM_HDRLEN
)))) {
1230 brcmf_err("descriptor len %d bad: %d\n",
1235 if (sublen
% align
) {
1236 brcmf_err("sublen %d not multiple of %d\n",
1241 /* For last frame, adjust read len so total
1242 is a block multiple */
1245 (roundup(totlen
, bus
->blocksize
) - totlen
);
1246 totlen
= roundup(totlen
, bus
->blocksize
);
1249 /* Allocate/chain packet for next subframe */
1250 pnext
= brcmu_pkt_buf_get_skb(sublen
+ align
);
1251 if (pnext
== NULL
) {
1252 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1256 skb_queue_tail(&bus
->glom
, pnext
);
1258 /* Adhere to start alignment requirements */
1259 pkt_align(pnext
, sublen
, align
);
1262 /* If all allocations succeeded, save packet chain
1265 brcmf_dbg(GLOM
, "allocated %d-byte packet chain for %d subframes\n",
1267 if (BRCMF_GLOM_ON() && bus
->cur_read
.len
&&
1268 totlen
!= bus
->cur_read
.len
) {
1269 brcmf_dbg(GLOM
, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
1270 bus
->cur_read
.len
, totlen
, rxseq
);
1272 pfirst
= pnext
= NULL
;
1274 brcmf_sdbrcm_free_glom(bus
);
1278 /* Done with descriptor packet */
1279 brcmu_pkt_buf_free_skb(bus
->glomd
);
1281 bus
->cur_read
.len
= 0;
1284 /* Ok -- either we just generated a packet chain,
1285 or had one from before */
1286 if (!skb_queue_empty(&bus
->glom
)) {
1287 if (BRCMF_GLOM_ON()) {
1288 brcmf_dbg(GLOM
, "try superframe read, packet chain:\n");
1289 skb_queue_walk(&bus
->glom
, pnext
) {
1290 brcmf_dbg(GLOM
, " %p: %p len 0x%04x (%d)\n",
1291 pnext
, (u8
*) (pnext
->data
),
1292 pnext
->len
, pnext
->len
);
1296 pfirst
= skb_peek(&bus
->glom
);
1297 dlen
= (u16
) brcmf_sdbrcm_glom_len(bus
);
1299 /* Do an SDIO read for the superframe. Configurable iovar to
1300 * read directly into the chained packet, or allocate a large
1301 * packet and and copy into the chain.
1303 sdio_claim_host(bus
->sdiodev
->func
[1]);
1304 errcode
= brcmf_sdcard_recv_chain(bus
->sdiodev
,
1305 bus
->sdiodev
->sbwad
,
1306 SDIO_FUNC_2
, F2SYNC
, &bus
->glom
);
1307 sdio_release_host(bus
->sdiodev
->func
[1]);
1308 bus
->sdcnt
.f2rxdata
++;
1310 /* On failure, kill the superframe, allow a couple retries */
1312 brcmf_err("glom read of %d bytes failed: %d\n",
1315 sdio_claim_host(bus
->sdiodev
->func
[1]);
1316 if (bus
->glomerr
++ < 3) {
1317 brcmf_sdbrcm_rxfail(bus
, true, true);
1320 brcmf_sdbrcm_rxfail(bus
, true, false);
1321 bus
->sdcnt
.rxglomfail
++;
1322 brcmf_sdbrcm_free_glom(bus
);
1324 sdio_release_host(bus
->sdiodev
->func
[1]);
1328 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1329 pfirst
->data
, min_t(int, pfirst
->len
, 48),
1332 rd_new
.seq_num
= rxseq
;
1334 sdio_claim_host(bus
->sdiodev
->func
[1]);
1335 errcode
= brcmf_sdio_hdparse(bus
, pfirst
->data
, &rd_new
,
1336 BRCMF_SDIO_FT_SUPER
);
1337 sdio_release_host(bus
->sdiodev
->func
[1]);
1338 bus
->cur_read
.len
= rd_new
.len_nxtfrm
<< 4;
1340 /* Remove superframe header, remember offset */
1341 skb_pull(pfirst
, rd_new
.dat_offset
);
1342 sfdoff
= rd_new
.dat_offset
;
1345 /* Validate all the subframe headers */
1346 skb_queue_walk(&bus
->glom
, pnext
) {
1347 /* leave when invalid subframe is found */
1351 rd_new
.len
= pnext
->len
;
1352 rd_new
.seq_num
= rxseq
++;
1353 sdio_claim_host(bus
->sdiodev
->func
[1]);
1354 errcode
= brcmf_sdio_hdparse(bus
, pnext
->data
, &rd_new
,
1356 sdio_release_host(bus
->sdiodev
->func
[1]);
1357 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1358 pnext
->data
, 32, "subframe:\n");
1364 /* Terminate frame on error, request
1366 sdio_claim_host(bus
->sdiodev
->func
[1]);
1367 if (bus
->glomerr
++ < 3) {
1368 /* Restore superframe header space */
1369 skb_push(pfirst
, sfdoff
);
1370 brcmf_sdbrcm_rxfail(bus
, true, true);
1373 brcmf_sdbrcm_rxfail(bus
, true, false);
1374 bus
->sdcnt
.rxglomfail
++;
1375 brcmf_sdbrcm_free_glom(bus
);
1377 sdio_release_host(bus
->sdiodev
->func
[1]);
1378 bus
->cur_read
.len
= 0;
1382 /* Basic SD framing looks ok - process each packet (header) */
1384 skb_queue_walk_safe(&bus
->glom
, pfirst
, pnext
) {
1385 dptr
= (u8
*) (pfirst
->data
);
1386 sublen
= get_unaligned_le16(dptr
);
1387 doff
= brcmf_sdio_getdatoffset(&dptr
[SDPCM_HWHDR_LEN
]);
1389 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1391 "Rx Subframe Data:\n");
1393 __skb_trim(pfirst
, sublen
);
1394 skb_pull(pfirst
, doff
);
1396 if (pfirst
->len
== 0) {
1397 skb_unlink(pfirst
, &bus
->glom
);
1398 brcmu_pkt_buf_free_skb(pfirst
);
1402 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1404 min_t(int, pfirst
->len
, 32),
1405 "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
1406 bus
->glom
.qlen
, pfirst
, pfirst
->data
,
1407 pfirst
->len
, pfirst
->next
,
1410 /* sent any remaining packets up */
1412 brcmf_rx_frames(bus
->sdiodev
->dev
, &bus
->glom
);
1414 bus
->sdcnt
.rxglomframes
++;
1415 bus
->sdcnt
.rxglompkts
+= bus
->glom
.qlen
;
1420 static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio
*bus
, uint
*condition
,
1423 DECLARE_WAITQUEUE(wait
, current
);
1424 int timeout
= msecs_to_jiffies(DCMD_RESP_TIMEOUT
);
1426 /* Wait until control frame is available */
1427 add_wait_queue(&bus
->dcmd_resp_wait
, &wait
);
1428 set_current_state(TASK_INTERRUPTIBLE
);
1430 while (!(*condition
) && (!signal_pending(current
) && timeout
))
1431 timeout
= schedule_timeout(timeout
);
1433 if (signal_pending(current
))
1436 set_current_state(TASK_RUNNING
);
1437 remove_wait_queue(&bus
->dcmd_resp_wait
, &wait
);
1442 static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio
*bus
)
1444 if (waitqueue_active(&bus
->dcmd_resp_wait
))
1445 wake_up_interruptible(&bus
->dcmd_resp_wait
);
1450 brcmf_sdbrcm_read_control(struct brcmf_sdio
*bus
, u8
*hdr
, uint len
, uint doff
)
1453 u8
*buf
= NULL
, *rbuf
;
1456 brcmf_dbg(TRACE
, "Enter\n");
1459 buf
= vzalloc(bus
->rxblen
);
1464 pad
= ((unsigned long)rbuf
% BRCMF_SDALIGN
);
1466 rbuf
+= (BRCMF_SDALIGN
- pad
);
1468 /* Copy the already-read portion over */
1469 memcpy(buf
, hdr
, BRCMF_FIRSTREAD
);
1470 if (len
<= BRCMF_FIRSTREAD
)
1473 /* Raise rdlen to next SDIO block to avoid tail command */
1474 rdlen
= len
- BRCMF_FIRSTREAD
;
1475 if (bus
->roundup
&& bus
->blocksize
&& (rdlen
> bus
->blocksize
)) {
1476 pad
= bus
->blocksize
- (rdlen
% bus
->blocksize
);
1477 if ((pad
<= bus
->roundup
) && (pad
< bus
->blocksize
) &&
1478 ((len
+ pad
) < bus
->sdiodev
->bus_if
->maxctl
))
1480 } else if (rdlen
% BRCMF_SDALIGN
) {
1481 rdlen
+= BRCMF_SDALIGN
- (rdlen
% BRCMF_SDALIGN
);
1484 /* Satisfy length-alignment requirements */
1485 if (rdlen
& (ALIGNMENT
- 1))
1486 rdlen
= roundup(rdlen
, ALIGNMENT
);
1488 /* Drop if the read is too big or it exceeds our maximum */
1489 if ((rdlen
+ BRCMF_FIRSTREAD
) > bus
->sdiodev
->bus_if
->maxctl
) {
1490 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1491 rdlen
, bus
->sdiodev
->bus_if
->maxctl
);
1492 brcmf_sdbrcm_rxfail(bus
, false, false);
1496 if ((len
- doff
) > bus
->sdiodev
->bus_if
->maxctl
) {
1497 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1498 len
, len
- doff
, bus
->sdiodev
->bus_if
->maxctl
);
1499 bus
->sdcnt
.rx_toolong
++;
1500 brcmf_sdbrcm_rxfail(bus
, false, false);
1504 /* Read remain of frame body */
1505 sdret
= brcmf_sdcard_recv_buf(bus
->sdiodev
,
1506 bus
->sdiodev
->sbwad
,
1508 F2SYNC
, rbuf
, rdlen
);
1509 bus
->sdcnt
.f2rxdata
++;
1511 /* Control frame failures need retransmission */
1513 brcmf_err("read %d control bytes failed: %d\n",
1515 bus
->sdcnt
.rxc_errors
++;
1516 brcmf_sdbrcm_rxfail(bus
, true, true);
1519 memcpy(buf
+ BRCMF_FIRSTREAD
, rbuf
, rdlen
);
1523 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1524 buf
, len
, "RxCtrl:\n");
1526 /* Point to valid data and indicate its length */
1527 spin_lock_bh(&bus
->rxctl_lock
);
1529 brcmf_err("last control frame is being processed.\n");
1530 spin_unlock_bh(&bus
->rxctl_lock
);
1534 bus
->rxctl
= buf
+ doff
;
1535 bus
->rxctl_orig
= buf
;
1536 bus
->rxlen
= len
- doff
;
1537 spin_unlock_bh(&bus
->rxctl_lock
);
1540 /* Awake any waiters */
1541 brcmf_sdbrcm_dcmd_resp_wake(bus
);
1544 /* Pad read to blocksize for efficiency */
1545 static void brcmf_pad(struct brcmf_sdio
*bus
, u16
*pad
, u16
*rdlen
)
1547 if (bus
->roundup
&& bus
->blocksize
&& *rdlen
> bus
->blocksize
) {
1548 *pad
= bus
->blocksize
- (*rdlen
% bus
->blocksize
);
1549 if (*pad
<= bus
->roundup
&& *pad
< bus
->blocksize
&&
1550 *rdlen
+ *pad
+ BRCMF_FIRSTREAD
< MAX_RX_DATASZ
)
1552 } else if (*rdlen
% BRCMF_SDALIGN
) {
1553 *rdlen
+= BRCMF_SDALIGN
- (*rdlen
% BRCMF_SDALIGN
);
1557 static uint
brcmf_sdio_readframes(struct brcmf_sdio
*bus
, uint maxframes
)
1559 struct sk_buff
*pkt
; /* Packet for event or data frames */
1560 struct sk_buff_head pktlist
; /* needed for bus interface */
1561 u16 pad
; /* Number of pad bytes to read */
1562 uint rxleft
= 0; /* Remaining number of frames allowed */
1563 int ret
; /* Return code from calls */
1564 uint rxcount
= 0; /* Total frames read */
1565 struct brcmf_sdio_hdrinfo
*rd
= &bus
->cur_read
, rd_new
;
1568 brcmf_dbg(TRACE
, "Enter\n");
1570 /* Not finished unless we encounter no more frames indication */
1571 bus
->rxpending
= true;
1573 for (rd
->seq_num
= bus
->rx_seq
, rxleft
= maxframes
;
1574 !bus
->rxskip
&& rxleft
&&
1575 bus
->sdiodev
->bus_if
->state
!= BRCMF_BUS_DOWN
;
1576 rd
->seq_num
++, rxleft
--) {
1578 /* Handle glomming separately */
1579 if (bus
->glomd
|| !skb_queue_empty(&bus
->glom
)) {
1581 brcmf_dbg(GLOM
, "calling rxglom: glomd %p, glom %p\n",
1582 bus
->glomd
, skb_peek(&bus
->glom
));
1583 cnt
= brcmf_sdbrcm_rxglom(bus
, rd
->seq_num
);
1584 brcmf_dbg(GLOM
, "rxglom returned %d\n", cnt
);
1585 rd
->seq_num
+= cnt
- 1;
1586 rxleft
= (rxleft
> cnt
) ? (rxleft
- cnt
) : 1;
1590 rd
->len_left
= rd
->len
;
1591 /* read header first for unknow frame length */
1592 sdio_claim_host(bus
->sdiodev
->func
[1]);
1594 ret
= brcmf_sdcard_recv_buf(bus
->sdiodev
,
1595 bus
->sdiodev
->sbwad
,
1596 SDIO_FUNC_2
, F2SYNC
,
1599 bus
->sdcnt
.f2rxhdrs
++;
1601 brcmf_err("RXHEADER FAILED: %d\n",
1603 bus
->sdcnt
.rx_hdrfail
++;
1604 brcmf_sdbrcm_rxfail(bus
, true, true);
1605 sdio_release_host(bus
->sdiodev
->func
[1]);
1609 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() || BRCMF_HDRS_ON(),
1610 bus
->rxhdr
, SDPCM_HDRLEN
,
1613 if (brcmf_sdio_hdparse(bus
, bus
->rxhdr
, rd
,
1614 BRCMF_SDIO_FT_NORMAL
)) {
1615 sdio_release_host(bus
->sdiodev
->func
[1]);
1616 if (!bus
->rxpending
)
1622 if (rd
->channel
== SDPCM_CONTROL_CHANNEL
) {
1623 brcmf_sdbrcm_read_control(bus
, bus
->rxhdr
,
1626 /* prepare the descriptor for the next read */
1627 rd
->len
= rd
->len_nxtfrm
<< 4;
1629 /* treat all packet as event if we don't know */
1630 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1631 sdio_release_host(bus
->sdiodev
->func
[1]);
1634 rd
->len_left
= rd
->len
> BRCMF_FIRSTREAD
?
1635 rd
->len
- BRCMF_FIRSTREAD
: 0;
1636 head_read
= BRCMF_FIRSTREAD
;
1639 brcmf_pad(bus
, &pad
, &rd
->len_left
);
1641 pkt
= brcmu_pkt_buf_get_skb(rd
->len_left
+ head_read
+
1644 /* Give up on data, request rtx of events */
1645 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1646 brcmf_sdbrcm_rxfail(bus
, false,
1647 RETRYCHAN(rd
->channel
));
1648 sdio_release_host(bus
->sdiodev
->func
[1]);
1651 skb_pull(pkt
, head_read
);
1652 pkt_align(pkt
, rd
->len_left
, BRCMF_SDALIGN
);
1654 ret
= brcmf_sdcard_recv_pkt(bus
->sdiodev
, bus
->sdiodev
->sbwad
,
1655 SDIO_FUNC_2
, F2SYNC
, pkt
);
1656 bus
->sdcnt
.f2rxdata
++;
1657 sdio_release_host(bus
->sdiodev
->func
[1]);
1660 brcmf_err("read %d bytes from channel %d failed: %d\n",
1661 rd
->len
, rd
->channel
, ret
);
1662 brcmu_pkt_buf_free_skb(pkt
);
1663 sdio_claim_host(bus
->sdiodev
->func
[1]);
1664 brcmf_sdbrcm_rxfail(bus
, true,
1665 RETRYCHAN(rd
->channel
));
1666 sdio_release_host(bus
->sdiodev
->func
[1]);
1671 skb_push(pkt
, head_read
);
1672 memcpy(pkt
->data
, bus
->rxhdr
, head_read
);
1675 memcpy(bus
->rxhdr
, pkt
->data
, SDPCM_HDRLEN
);
1676 rd_new
.seq_num
= rd
->seq_num
;
1677 sdio_claim_host(bus
->sdiodev
->func
[1]);
1678 if (brcmf_sdio_hdparse(bus
, bus
->rxhdr
, &rd_new
,
1679 BRCMF_SDIO_FT_NORMAL
)) {
1681 brcmu_pkt_buf_free_skb(pkt
);
1683 bus
->sdcnt
.rx_readahead_cnt
++;
1684 if (rd
->len
!= roundup(rd_new
.len
, 16)) {
1685 brcmf_err("frame length mismatch:read %d, should be %d\n",
1687 roundup(rd_new
.len
, 16) >> 4);
1689 brcmf_sdbrcm_rxfail(bus
, true, true);
1690 sdio_release_host(bus
->sdiodev
->func
[1]);
1691 brcmu_pkt_buf_free_skb(pkt
);
1694 sdio_release_host(bus
->sdiodev
->func
[1]);
1695 rd
->len_nxtfrm
= rd_new
.len_nxtfrm
;
1696 rd
->channel
= rd_new
.channel
;
1697 rd
->dat_offset
= rd_new
.dat_offset
;
1699 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
1702 bus
->rxhdr
, SDPCM_HDRLEN
,
1705 if (rd_new
.channel
== SDPCM_CONTROL_CHANNEL
) {
1706 brcmf_err("readahead on control packet %d?\n",
1708 /* Force retry w/normal header read */
1710 sdio_claim_host(bus
->sdiodev
->func
[1]);
1711 brcmf_sdbrcm_rxfail(bus
, false, true);
1712 sdio_release_host(bus
->sdiodev
->func
[1]);
1713 brcmu_pkt_buf_free_skb(pkt
);
1718 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1719 pkt
->data
, rd
->len
, "Rx Data:\n");
1721 /* Save superframe descriptor and allocate packet frame */
1722 if (rd
->channel
== SDPCM_GLOM_CHANNEL
) {
1723 if (SDPCM_GLOMDESC(&bus
->rxhdr
[SDPCM_HWHDR_LEN
])) {
1724 brcmf_dbg(GLOM
, "glom descriptor, %d bytes:\n",
1726 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1729 __skb_trim(pkt
, rd
->len
);
1730 skb_pull(pkt
, SDPCM_HDRLEN
);
1733 brcmf_err("%s: glom superframe w/o "
1734 "descriptor!\n", __func__
);
1735 sdio_claim_host(bus
->sdiodev
->func
[1]);
1736 brcmf_sdbrcm_rxfail(bus
, false, false);
1737 sdio_release_host(bus
->sdiodev
->func
[1]);
1739 /* prepare the descriptor for the next read */
1740 rd
->len
= rd
->len_nxtfrm
<< 4;
1742 /* treat all packet as event if we don't know */
1743 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1747 /* Fill in packet len and prio, deliver upward */
1748 __skb_trim(pkt
, rd
->len
);
1749 skb_pull(pkt
, rd
->dat_offset
);
1751 /* prepare the descriptor for the next read */
1752 rd
->len
= rd
->len_nxtfrm
<< 4;
1754 /* treat all packet as event if we don't know */
1755 rd
->channel
= SDPCM_EVENT_CHANNEL
;
1757 if (pkt
->len
== 0) {
1758 brcmu_pkt_buf_free_skb(pkt
);
1762 skb_queue_head_init(&pktlist
);
1763 skb_queue_tail(&pktlist
, pkt
);
1764 brcmf_rx_frames(bus
->sdiodev
->dev
, &pktlist
);
1767 rxcount
= maxframes
- rxleft
;
1768 /* Message if we hit the limit */
1770 brcmf_dbg(DATA
, "hit rx limit of %d frames\n", maxframes
);
1772 brcmf_dbg(DATA
, "processed %d frames\n", rxcount
);
1773 /* Back off rxseq if awaiting rtx, update rx_seq */
1776 bus
->rx_seq
= rd
->seq_num
;
1782 brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio
*bus
)
1784 if (waitqueue_active(&bus
->ctrl_wait
))
1785 wake_up_interruptible(&bus
->ctrl_wait
);
1789 /* flag marking a dummy skb added for DMA alignment requirement */
1790 #define DUMMY_SKB_FLAG 0x10000
1791 /* bit mask of data length chopped from the previous packet */
1792 #define DUMMY_SKB_CHOP_LEN_MASK 0xffff
1794 * brcmf_sdio_txpkt_prep - packet preparation for transmit
1795 * @bus: brcmf_sdio structure pointer
1796 * @pktq: packet list pointer
1797 * @chan: virtual channel to transmit the packet
1799 * Processes to be applied to the packet
1800 * - Align data buffer pointer
1801 * - Align data buffer length
1803 * Return: negative value if there is error
1806 brcmf_sdio_txpkt_prep(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
,
1809 u16 head_pad
, tail_pad
, tail_chop
, head_align
, sg_align
;
1811 struct sk_buff
*pkt_next
, *pkt_new
;
1813 unsigned blksize
= bus
->sdiodev
->func
[SDIO_FUNC_2
]->cur_blksize
;
1814 struct brcmf_sdio_hdrinfo hd_info
= {0};
1816 /* SDIO ADMA requires at least 32 bit alignment */
1819 if (bus
->sdiodev
->pdata
) {
1820 head_align
= bus
->sdiodev
->pdata
->sd_head_align
> 4 ?
1821 bus
->sdiodev
->pdata
->sd_head_align
: 4;
1822 sg_align
= bus
->sdiodev
->pdata
->sd_sgentry_align
> 4 ?
1823 bus
->sdiodev
->pdata
->sd_sgentry_align
: 4;
1825 /* sg entry alignment should be a divisor of block size */
1826 WARN_ON(blksize
% sg_align
);
1828 pkt_next
= pktq
->next
;
1829 dat_buf
= (u8
*)(pkt_next
->data
);
1831 /* Check head padding */
1832 head_pad
= ((unsigned long)dat_buf
% head_align
);
1834 if (skb_headroom(pkt_next
) < head_pad
) {
1835 bus
->sdiodev
->bus_if
->tx_realloc
++;
1837 if (skb_cow(pkt_next
, head_pad
))
1840 skb_push(pkt_next
, head_pad
);
1841 dat_buf
= (u8
*)(pkt_next
->data
);
1842 memset(dat_buf
, 0, head_pad
+ bus
->tx_hdrlen
);
1845 /* Check tail padding */
1847 tail_chop
= pkt_next
->len
% sg_align
;
1848 tail_pad
= sg_align
- tail_chop
;
1849 tail_pad
+= blksize
- (pkt_next
->len
+ tail_pad
) % blksize
;
1850 if (skb_tailroom(pkt_next
) < tail_pad
&& pkt_next
->len
> blksize
) {
1851 pkt_new
= brcmu_pkt_buf_get_skb(tail_pad
+ tail_chop
);
1852 if (pkt_new
== NULL
)
1854 memcpy(pkt_new
->data
,
1855 pkt_next
->data
+ pkt_next
->len
- tail_chop
,
1857 *(u32
*)(pkt_new
->cb
) = DUMMY_SKB_FLAG
+ tail_chop
;
1858 skb_trim(pkt_next
, pkt_next
->len
- tail_chop
);
1859 __skb_queue_after(pktq
, pkt_next
, pkt_new
);
1861 ntail
= pkt_next
->data_len
+ tail_pad
-
1862 (pkt_next
->end
- pkt_next
->tail
);
1863 if (skb_cloned(pkt_next
) || ntail
> 0)
1864 if (pskb_expand_head(pkt_next
, 0, ntail
, GFP_ATOMIC
))
1866 if (skb_linearize(pkt_next
))
1868 dat_buf
= (u8
*)(pkt_next
->data
);
1869 __skb_put(pkt_next
, tail_pad
);
1872 /* Now prep the header */
1874 hd_info
.len
= pkt_next
->len
+ tail_chop
;
1876 hd_info
.len
= pkt_next
->len
- tail_pad
;
1877 hd_info
.channel
= chan
;
1878 hd_info
.dat_offset
= head_pad
+ bus
->tx_hdrlen
;
1879 brcmf_sdio_hdpack(bus
, dat_buf
, &hd_info
);
1881 if (BRCMF_BYTES_ON() &&
1882 ((BRCMF_CTL_ON() && chan
== SDPCM_CONTROL_CHANNEL
) ||
1883 (BRCMF_DATA_ON() && chan
!= SDPCM_CONTROL_CHANNEL
)))
1884 brcmf_dbg_hex_dump(true, pkt_next
, hd_info
.len
, "Tx Frame:\n");
1885 else if (BRCMF_HDRS_ON())
1886 brcmf_dbg_hex_dump(true, pkt_next
, head_pad
+ bus
->tx_hdrlen
,
1893 * brcmf_sdio_txpkt_postp - packet post processing for transmit
1894 * @bus: brcmf_sdio structure pointer
1895 * @pktq: packet list pointer
1897 * Processes to be applied to the packet
1898 * - Remove head padding
1899 * - Remove tail padding
1902 brcmf_sdio_txpkt_postp(struct brcmf_sdio
*bus
, struct sk_buff_head
*pktq
)
1906 u32 dummy_flags
, chop_len
;
1907 struct sk_buff
*pkt_next
, *tmp
, *pkt_prev
;
1909 skb_queue_walk_safe(pktq
, pkt_next
, tmp
) {
1910 dummy_flags
= *(u32
*)(pkt_next
->cb
);
1911 if (dummy_flags
& DUMMY_SKB_FLAG
) {
1912 chop_len
= dummy_flags
& DUMMY_SKB_CHOP_LEN_MASK
;
1914 pkt_prev
= pkt_next
->prev
;
1915 memcpy(pkt_prev
->data
+ pkt_prev
->len
,
1916 pkt_next
->data
, chop_len
);
1917 skb_put(pkt_prev
, chop_len
);
1919 __skb_unlink(pkt_next
, pktq
);
1920 brcmu_pkt_buf_free_skb(pkt_next
);
1922 hdr
= pkt_next
->data
+ SDPCM_HWHDR_LEN
;
1923 dat_offset
= le32_to_cpu(*(__le32
*)hdr
);
1924 dat_offset
= (dat_offset
& SDPCM_DOFFSET_MASK
) >>
1925 SDPCM_DOFFSET_SHIFT
;
1926 skb_pull(pkt_next
, dat_offset
);
1931 /* Writes a HW/SW header into the packet and sends it. */
1932 /* Assumes: (a) header space already there, (b) caller holds lock */
1933 static int brcmf_sdbrcm_txpkt(struct brcmf_sdio
*bus
, struct sk_buff
*pkt
,
1938 struct sk_buff_head localq
;
1940 brcmf_dbg(TRACE
, "Enter\n");
1942 __skb_queue_head_init(&localq
);
1943 __skb_queue_tail(&localq
, pkt
);
1944 ret
= brcmf_sdio_txpkt_prep(bus
, &localq
, chan
);
1948 sdio_claim_host(bus
->sdiodev
->func
[1]);
1949 ret
= brcmf_sdcard_send_pkt(bus
->sdiodev
, bus
->sdiodev
->sbwad
,
1950 SDIO_FUNC_2
, F2SYNC
, &localq
);
1951 bus
->sdcnt
.f2txdata
++;
1954 /* On failure, abort the command and terminate the frame */
1955 brcmf_dbg(INFO
, "sdio error %d, abort command and terminate frame\n",
1957 bus
->sdcnt
.tx_sderrs
++;
1959 brcmf_sdcard_abort(bus
->sdiodev
, SDIO_FUNC_2
);
1960 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
1962 bus
->sdcnt
.f1regdata
++;
1964 for (i
= 0; i
< 3; i
++) {
1966 hi
= brcmf_sdio_regrb(bus
->sdiodev
,
1967 SBSDIO_FUNC1_WFRAMEBCHI
, NULL
);
1968 lo
= brcmf_sdio_regrb(bus
->sdiodev
,
1969 SBSDIO_FUNC1_WFRAMEBCLO
, NULL
);
1970 bus
->sdcnt
.f1regdata
+= 2;
1971 if ((hi
== 0) && (lo
== 0))
1976 sdio_release_host(bus
->sdiodev
->func
[1]);
1978 bus
->tx_seq
= (bus
->tx_seq
+ 1) % SDPCM_SEQ_WRAP
;
1981 brcmf_sdio_txpkt_postp(bus
, &localq
);
1982 __skb_dequeue_tail(&localq
);
1983 brcmf_txcomplete(bus
->sdiodev
->dev
, pkt
, ret
== 0);
1987 static uint
brcmf_sdbrcm_sendfromq(struct brcmf_sdio
*bus
, uint maxframes
)
1989 struct sk_buff
*pkt
;
1991 int ret
= 0, prec_out
;
1995 brcmf_dbg(TRACE
, "Enter\n");
1997 tx_prec_map
= ~bus
->flowcontrol
;
1999 /* Send frames until the limit or some other event */
2000 for (cnt
= 0; (cnt
< maxframes
) && data_ok(bus
); cnt
++) {
2001 spin_lock_bh(&bus
->txqlock
);
2002 pkt
= brcmu_pktq_mdeq(&bus
->txq
, tx_prec_map
, &prec_out
);
2004 spin_unlock_bh(&bus
->txqlock
);
2007 spin_unlock_bh(&bus
->txqlock
);
2009 ret
= brcmf_sdbrcm_txpkt(bus
, pkt
, SDPCM_DATA_CHANNEL
);
2011 /* In poll mode, need to check for other events */
2012 if (!bus
->intr
&& cnt
) {
2013 /* Check device status, signal pending interrupt */
2014 sdio_claim_host(bus
->sdiodev
->func
[1]);
2015 ret
= r_sdreg32(bus
, &intstatus
,
2016 offsetof(struct sdpcmd_regs
,
2018 sdio_release_host(bus
->sdiodev
->func
[1]);
2019 bus
->sdcnt
.f2txdata
++;
2022 if (intstatus
& bus
->hostintmask
)
2023 atomic_set(&bus
->ipend
, 1);
2027 /* Deflow-control stack if needed */
2028 if ((bus
->sdiodev
->bus_if
->state
== BRCMF_BUS_DATA
) &&
2029 bus
->txoff
&& (pktq_len(&bus
->txq
) < TXLOW
)) {
2031 brcmf_txflowblock(bus
->sdiodev
->dev
, false);
2037 static void brcmf_sdbrcm_bus_stop(struct device
*dev
)
2039 u32 local_hostintmask
;
2042 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2043 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2044 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2046 brcmf_dbg(TRACE
, "Enter\n");
2048 if (bus
->watchdog_tsk
) {
2049 send_sig(SIGTERM
, bus
->watchdog_tsk
, 1);
2050 kthread_stop(bus
->watchdog_tsk
);
2051 bus
->watchdog_tsk
= NULL
;
2054 sdio_claim_host(bus
->sdiodev
->func
[1]);
2056 /* Enable clock for device interrupts */
2057 brcmf_sdbrcm_bus_sleep(bus
, false, false);
2059 /* Disable and clear interrupts at the chip level also */
2060 w_sdreg32(bus
, 0, offsetof(struct sdpcmd_regs
, hostintmask
));
2061 local_hostintmask
= bus
->hostintmask
;
2062 bus
->hostintmask
= 0;
2064 /* Change our idea of bus state */
2065 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
2067 /* Force clocks on backplane to be sure F2 interrupt propagates */
2068 saveclk
= brcmf_sdio_regrb(bus
->sdiodev
,
2069 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
2071 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
2072 (saveclk
| SBSDIO_FORCE_HT
), &err
);
2075 brcmf_err("Failed to force clock for F2: err %d\n", err
);
2077 /* Turn off the bus (F2), free any pending packets */
2078 brcmf_dbg(INTR
, "disable SDIO interrupts\n");
2079 brcmf_sdio_regwb(bus
->sdiodev
, SDIO_CCCR_IOEx
, SDIO_FUNC_ENABLE_1
,
2082 /* Clear any pending interrupts now that F2 is disabled */
2083 w_sdreg32(bus
, local_hostintmask
,
2084 offsetof(struct sdpcmd_regs
, intstatus
));
2086 /* Turn off the backplane clock (only) */
2087 brcmf_sdbrcm_clkctl(bus
, CLK_SDONLY
, false);
2088 sdio_release_host(bus
->sdiodev
->func
[1]);
2090 /* Clear the data packet queues */
2091 brcmu_pktq_flush(&bus
->txq
, true, NULL
, NULL
);
2093 /* Clear any held glomming stuff */
2095 brcmu_pkt_buf_free_skb(bus
->glomd
);
2096 brcmf_sdbrcm_free_glom(bus
);
2098 /* Clear rx control and wake any waiters */
2099 spin_lock_bh(&bus
->rxctl_lock
);
2101 spin_unlock_bh(&bus
->rxctl_lock
);
2102 brcmf_sdbrcm_dcmd_resp_wake(bus
);
2104 /* Reset some F2 state stuff */
2105 bus
->rxskip
= false;
2106 bus
->tx_seq
= bus
->rx_seq
= 0;
2109 static inline void brcmf_sdbrcm_clrintr(struct brcmf_sdio
*bus
)
2111 unsigned long flags
;
2113 if (bus
->sdiodev
->oob_irq_requested
) {
2114 spin_lock_irqsave(&bus
->sdiodev
->irq_en_lock
, flags
);
2115 if (!bus
->sdiodev
->irq_en
&& !atomic_read(&bus
->ipend
)) {
2116 enable_irq(bus
->sdiodev
->pdata
->oob_irq_nr
);
2117 bus
->sdiodev
->irq_en
= true;
2119 spin_unlock_irqrestore(&bus
->sdiodev
->irq_en_lock
, flags
);
2123 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio
*bus
)
2130 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
2131 addr
= bus
->ci
->c_inf
[idx
].base
+
2132 offsetof(struct sdpcmd_regs
, intstatus
);
2134 ret
= brcmf_sdio_regrw_helper(bus
->sdiodev
, addr
, &val
, false);
2135 bus
->sdcnt
.f1regdata
++;
2139 val
&= bus
->hostintmask
;
2140 atomic_set(&bus
->fcstate
, !!(val
& I_HMB_FC_STATE
));
2142 /* Clear interrupts */
2144 ret
= brcmf_sdio_regrw_helper(bus
->sdiodev
, addr
, &val
, true);
2145 bus
->sdcnt
.f1regdata
++;
2149 atomic_set(&bus
->intstatus
, 0);
2151 for_each_set_bit(n
, &val
, 32)
2152 set_bit(n
, (unsigned long *)&bus
->intstatus
.counter
);
2158 static void brcmf_sdbrcm_dpc(struct brcmf_sdio
*bus
)
2161 unsigned long intstatus
;
2162 uint rxlimit
= bus
->rxbound
; /* Rx frames to read before resched */
2163 uint txlimit
= bus
->txbound
; /* Tx frames to send before resched */
2164 uint framecnt
= 0; /* Temporary counter of tx/rx frames */
2167 brcmf_dbg(TRACE
, "Enter\n");
2169 sdio_claim_host(bus
->sdiodev
->func
[1]);
2171 /* If waiting for HTAVAIL, check status */
2172 if (!bus
->sr_enabled
&& bus
->clkstate
== CLK_PENDING
) {
2173 u8 clkctl
, devctl
= 0;
2176 /* Check for inconsistent device control */
2177 devctl
= brcmf_sdio_regrb(bus
->sdiodev
,
2178 SBSDIO_DEVICE_CTL
, &err
);
2180 brcmf_err("error reading DEVCTL: %d\n", err
);
2181 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
2185 /* Read CSR, if clock on switch to AVAIL, else ignore */
2186 clkctl
= brcmf_sdio_regrb(bus
->sdiodev
,
2187 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
2189 brcmf_err("error reading CSR: %d\n",
2191 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
2194 brcmf_dbg(SDIO
, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
2197 if (SBSDIO_HTAV(clkctl
)) {
2198 devctl
= brcmf_sdio_regrb(bus
->sdiodev
,
2199 SBSDIO_DEVICE_CTL
, &err
);
2201 brcmf_err("error reading DEVCTL: %d\n",
2203 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
2205 devctl
&= ~SBSDIO_DEVCTL_CA_INT_ONLY
;
2206 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_DEVICE_CTL
,
2209 brcmf_err("error writing DEVCTL: %d\n",
2211 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
2213 bus
->clkstate
= CLK_AVAIL
;
2217 /* Make sure backplane clock is on */
2218 brcmf_sdbrcm_bus_sleep(bus
, false, true);
2220 /* Pending interrupt indicates new device status */
2221 if (atomic_read(&bus
->ipend
) > 0) {
2222 atomic_set(&bus
->ipend
, 0);
2223 err
= brcmf_sdio_intr_rstatus(bus
);
2226 /* Start with leftover status bits */
2227 intstatus
= atomic_xchg(&bus
->intstatus
, 0);
2229 /* Handle flow-control change: read new state in case our ack
2230 * crossed another change interrupt. If change still set, assume
2231 * FC ON for safety, let next loop through do the debounce.
2233 if (intstatus
& I_HMB_FC_CHANGE
) {
2234 intstatus
&= ~I_HMB_FC_CHANGE
;
2235 err
= w_sdreg32(bus
, I_HMB_FC_CHANGE
,
2236 offsetof(struct sdpcmd_regs
, intstatus
));
2238 err
= r_sdreg32(bus
, &newstatus
,
2239 offsetof(struct sdpcmd_regs
, intstatus
));
2240 bus
->sdcnt
.f1regdata
+= 2;
2241 atomic_set(&bus
->fcstate
,
2242 !!(newstatus
& (I_HMB_FC_STATE
| I_HMB_FC_CHANGE
)));
2243 intstatus
|= (newstatus
& bus
->hostintmask
);
2246 /* Handle host mailbox indication */
2247 if (intstatus
& I_HMB_HOST_INT
) {
2248 intstatus
&= ~I_HMB_HOST_INT
;
2249 intstatus
|= brcmf_sdbrcm_hostmail(bus
);
2252 sdio_release_host(bus
->sdiodev
->func
[1]);
2254 /* Generally don't ask for these, can get CRC errors... */
2255 if (intstatus
& I_WR_OOSYNC
) {
2256 brcmf_err("Dongle reports WR_OOSYNC\n");
2257 intstatus
&= ~I_WR_OOSYNC
;
2260 if (intstatus
& I_RD_OOSYNC
) {
2261 brcmf_err("Dongle reports RD_OOSYNC\n");
2262 intstatus
&= ~I_RD_OOSYNC
;
2265 if (intstatus
& I_SBINT
) {
2266 brcmf_err("Dongle reports SBINT\n");
2267 intstatus
&= ~I_SBINT
;
2270 /* Would be active due to wake-wlan in gSPI */
2271 if (intstatus
& I_CHIPACTIVE
) {
2272 brcmf_dbg(INFO
, "Dongle reports CHIPACTIVE\n");
2273 intstatus
&= ~I_CHIPACTIVE
;
2276 /* Ignore frame indications if rxskip is set */
2278 intstatus
&= ~I_HMB_FRAME_IND
;
2280 /* On frame indication, read available frames */
2281 if (PKT_AVAILABLE() && bus
->clkstate
== CLK_AVAIL
) {
2282 framecnt
= brcmf_sdio_readframes(bus
, rxlimit
);
2283 if (!bus
->rxpending
)
2284 intstatus
&= ~I_HMB_FRAME_IND
;
2285 rxlimit
-= min(framecnt
, rxlimit
);
2288 /* Keep still-pending events for next scheduling */
2290 for_each_set_bit(n
, &intstatus
, 32)
2291 set_bit(n
, (unsigned long *)&bus
->intstatus
.counter
);
2294 brcmf_sdbrcm_clrintr(bus
);
2296 if (data_ok(bus
) && bus
->ctrl_frame_stat
&&
2297 (bus
->clkstate
== CLK_AVAIL
)) {
2300 sdio_claim_host(bus
->sdiodev
->func
[1]);
2301 err
= brcmf_sdcard_send_buf(bus
->sdiodev
, bus
->sdiodev
->sbwad
,
2302 SDIO_FUNC_2
, F2SYNC
, bus
->ctrl_frame_buf
,
2303 (u32
) bus
->ctrl_frame_len
);
2306 /* On failure, abort the command and
2307 terminate the frame */
2308 brcmf_dbg(INFO
, "sdio error %d, abort command and terminate frame\n",
2310 bus
->sdcnt
.tx_sderrs
++;
2312 brcmf_sdcard_abort(bus
->sdiodev
, SDIO_FUNC_2
);
2314 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
2316 bus
->sdcnt
.f1regdata
++;
2318 for (i
= 0; i
< 3; i
++) {
2320 hi
= brcmf_sdio_regrb(bus
->sdiodev
,
2321 SBSDIO_FUNC1_WFRAMEBCHI
,
2323 lo
= brcmf_sdio_regrb(bus
->sdiodev
,
2324 SBSDIO_FUNC1_WFRAMEBCLO
,
2326 bus
->sdcnt
.f1regdata
+= 2;
2327 if ((hi
== 0) && (lo
== 0))
2332 bus
->tx_seq
= (bus
->tx_seq
+ 1) % SDPCM_SEQ_WRAP
;
2334 sdio_release_host(bus
->sdiodev
->func
[1]);
2335 bus
->ctrl_frame_stat
= false;
2336 brcmf_sdbrcm_wait_event_wakeup(bus
);
2338 /* Send queued frames (limit 1 if rx may still be pending) */
2339 else if ((bus
->clkstate
== CLK_AVAIL
) && !atomic_read(&bus
->fcstate
) &&
2340 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) && txlimit
2342 framecnt
= bus
->rxpending
? min(txlimit
, bus
->txminmax
) :
2344 framecnt
= brcmf_sdbrcm_sendfromq(bus
, framecnt
);
2345 txlimit
-= framecnt
;
2348 if ((bus
->sdiodev
->bus_if
->state
== BRCMF_BUS_DOWN
) || (err
!= 0)) {
2349 brcmf_err("failed backplane access over SDIO, halting operation\n");
2350 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
2351 atomic_set(&bus
->intstatus
, 0);
2352 } else if (atomic_read(&bus
->intstatus
) ||
2353 atomic_read(&bus
->ipend
) > 0 ||
2354 (!atomic_read(&bus
->fcstate
) &&
2355 brcmu_pktq_mlen(&bus
->txq
, ~bus
->flowcontrol
) &&
2356 data_ok(bus
)) || PKT_AVAILABLE()) {
2357 atomic_inc(&bus
->dpc_tskcnt
);
2360 /* If we're done for now, turn off clock request. */
2361 if ((bus
->clkstate
!= CLK_PENDING
)
2362 && bus
->idletime
== BRCMF_IDLE_IMMEDIATE
) {
2363 bus
->activity
= false;
2364 brcmf_dbg(SDIO
, "idle state\n");
2365 sdio_claim_host(bus
->sdiodev
->func
[1]);
2366 brcmf_sdbrcm_bus_sleep(bus
, true, false);
2367 sdio_release_host(bus
->sdiodev
->func
[1]);
2371 static struct pktq
*brcmf_sdbrcm_bus_gettxq(struct device
*dev
)
2373 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2374 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2375 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2380 static int brcmf_sdbrcm_bus_txdata(struct device
*dev
, struct sk_buff
*pkt
)
2384 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2385 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2386 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2389 brcmf_dbg(TRACE
, "Enter\n");
2393 /* Add space for the header */
2394 skb_push(pkt
, bus
->tx_hdrlen
);
2395 /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
2397 prec
= prio2prec((pkt
->priority
& PRIOMASK
));
2399 /* Check for existing queue, current flow-control,
2400 pending event, or pending clock */
2401 brcmf_dbg(TRACE
, "deferring pktq len %d\n", pktq_len(&bus
->txq
));
2402 bus
->sdcnt
.fcqueued
++;
2404 /* Priority based enq */
2405 spin_lock_irqsave(&bus
->txqlock
, flags
);
2406 if (!brcmf_c_prec_enq(bus
->sdiodev
->dev
, &bus
->txq
, pkt
, prec
)) {
2407 skb_pull(pkt
, bus
->tx_hdrlen
);
2408 brcmf_err("out of bus->txq !!!\n");
2414 if (pktq_len(&bus
->txq
) >= TXHI
) {
2416 brcmf_txflowblock(bus
->sdiodev
->dev
, true);
2418 spin_unlock_irqrestore(&bus
->txqlock
, flags
);
2421 if (pktq_plen(&bus
->txq
, prec
) > qcount
[prec
])
2422 qcount
[prec
] = pktq_plen(&bus
->txq
, prec
);
2425 if (atomic_read(&bus
->dpc_tskcnt
) == 0) {
2426 atomic_inc(&bus
->dpc_tskcnt
);
2427 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
2434 #define CONSOLE_LINE_MAX 192
2436 static int brcmf_sdbrcm_readconsole(struct brcmf_sdio
*bus
)
2438 struct brcmf_console
*c
= &bus
->console
;
2439 u8 line
[CONSOLE_LINE_MAX
], ch
;
2443 /* Don't do anything until FWREADY updates console address */
2444 if (bus
->console_addr
== 0)
2447 /* Read console log struct */
2448 addr
= bus
->console_addr
+ offsetof(struct rte_console
, log_le
);
2449 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, addr
, (u8
*)&c
->log_le
,
2454 /* Allocate console buffer (one time only) */
2455 if (c
->buf
== NULL
) {
2456 c
->bufsize
= le32_to_cpu(c
->log_le
.buf_size
);
2457 c
->buf
= kmalloc(c
->bufsize
, GFP_ATOMIC
);
2462 idx
= le32_to_cpu(c
->log_le
.idx
);
2464 /* Protect against corrupt value */
2465 if (idx
> c
->bufsize
)
2468 /* Skip reading the console buffer if the index pointer
2473 /* Read the console buffer */
2474 addr
= le32_to_cpu(c
->log_le
.buf
);
2475 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, addr
, c
->buf
, c
->bufsize
);
2479 while (c
->last
!= idx
) {
2480 for (n
= 0; n
< CONSOLE_LINE_MAX
- 2; n
++) {
2481 if (c
->last
== idx
) {
2482 /* This would output a partial line.
2484 * the buffer pointer and output this
2485 * line next time around.
2490 c
->last
= c
->bufsize
- n
;
2493 ch
= c
->buf
[c
->last
];
2494 c
->last
= (c
->last
+ 1) % c
->bufsize
;
2501 if (line
[n
- 1] == '\r')
2504 pr_debug("CONSOLE: %s\n", line
);
2513 static int brcmf_tx_frame(struct brcmf_sdio
*bus
, u8
*frame
, u16 len
)
2518 bus
->ctrl_frame_stat
= false;
2519 ret
= brcmf_sdcard_send_buf(bus
->sdiodev
, bus
->sdiodev
->sbwad
,
2520 SDIO_FUNC_2
, F2SYNC
, frame
, len
);
2523 /* On failure, abort the command and terminate the frame */
2524 brcmf_dbg(INFO
, "sdio error %d, abort command and terminate frame\n",
2526 bus
->sdcnt
.tx_sderrs
++;
2528 brcmf_sdcard_abort(bus
->sdiodev
, SDIO_FUNC_2
);
2530 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_FRAMECTRL
,
2532 bus
->sdcnt
.f1regdata
++;
2534 for (i
= 0; i
< 3; i
++) {
2536 hi
= brcmf_sdio_regrb(bus
->sdiodev
,
2537 SBSDIO_FUNC1_WFRAMEBCHI
, NULL
);
2538 lo
= brcmf_sdio_regrb(bus
->sdiodev
,
2539 SBSDIO_FUNC1_WFRAMEBCLO
, NULL
);
2540 bus
->sdcnt
.f1regdata
+= 2;
2541 if (hi
== 0 && lo
== 0)
2547 bus
->tx_seq
= (bus
->tx_seq
+ 1) % SDPCM_SEQ_WRAP
;
2553 brcmf_sdbrcm_bus_txctl(struct device
*dev
, unsigned char *msg
, uint msglen
)
2560 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2561 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2562 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2563 struct brcmf_sdio_hdrinfo hd_info
= {0};
2565 brcmf_dbg(TRACE
, "Enter\n");
2567 /* Back the pointer to make a room for bus header */
2568 frame
= msg
- bus
->tx_hdrlen
;
2569 len
= (msglen
+= bus
->tx_hdrlen
);
2571 /* Add alignment padding (optional for ctl frames) */
2572 doff
= ((unsigned long)frame
% BRCMF_SDALIGN
);
2577 memset(frame
, 0, doff
+ bus
->tx_hdrlen
);
2579 /* precondition: doff < BRCMF_SDALIGN */
2580 doff
+= bus
->tx_hdrlen
;
2582 /* Round send length to next SDIO block */
2583 if (bus
->roundup
&& bus
->blocksize
&& (len
> bus
->blocksize
)) {
2584 u16 pad
= bus
->blocksize
- (len
% bus
->blocksize
);
2585 if ((pad
<= bus
->roundup
) && (pad
< bus
->blocksize
))
2587 } else if (len
% BRCMF_SDALIGN
) {
2588 len
+= BRCMF_SDALIGN
- (len
% BRCMF_SDALIGN
);
2591 /* Satisfy length-alignment requirements */
2592 if (len
& (ALIGNMENT
- 1))
2593 len
= roundup(len
, ALIGNMENT
);
2595 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2597 /* Make sure backplane clock is on */
2598 sdio_claim_host(bus
->sdiodev
->func
[1]);
2599 brcmf_sdbrcm_bus_sleep(bus
, false, false);
2600 sdio_release_host(bus
->sdiodev
->func
[1]);
2602 hd_info
.len
= (u16
)msglen
;
2603 hd_info
.channel
= SDPCM_CONTROL_CHANNEL
;
2604 hd_info
.dat_offset
= doff
;
2605 brcmf_sdio_hdpack(bus
, frame
, &hd_info
);
2607 if (!data_ok(bus
)) {
2608 brcmf_dbg(INFO
, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
2609 bus
->tx_max
, bus
->tx_seq
);
2610 bus
->ctrl_frame_stat
= true;
2612 bus
->ctrl_frame_buf
= frame
;
2613 bus
->ctrl_frame_len
= len
;
2615 wait_event_interruptible_timeout(bus
->ctrl_wait
,
2616 !bus
->ctrl_frame_stat
,
2617 msecs_to_jiffies(2000));
2619 if (!bus
->ctrl_frame_stat
) {
2620 brcmf_dbg(SDIO
, "ctrl_frame_stat == false\n");
2623 brcmf_dbg(SDIO
, "ctrl_frame_stat == true\n");
2629 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
2630 frame
, len
, "Tx Frame:\n");
2631 brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
2633 frame
, min_t(u16
, len
, 16), "TxHdr:\n");
2636 sdio_claim_host(bus
->sdiodev
->func
[1]);
2637 ret
= brcmf_tx_frame(bus
, frame
, len
);
2638 sdio_release_host(bus
->sdiodev
->func
[1]);
2639 } while (ret
< 0 && retries
++ < TXRETRIES
);
2642 if ((bus
->idletime
== BRCMF_IDLE_IMMEDIATE
) &&
2643 atomic_read(&bus
->dpc_tskcnt
) == 0) {
2644 bus
->activity
= false;
2645 sdio_claim_host(bus
->sdiodev
->func
[1]);
2646 brcmf_dbg(INFO
, "idle\n");
2647 brcmf_sdbrcm_clkctl(bus
, CLK_NONE
, true);
2648 sdio_release_host(bus
->sdiodev
->func
[1]);
2652 bus
->sdcnt
.tx_ctlerrs
++;
2654 bus
->sdcnt
.tx_ctlpkts
++;
2656 return ret
? -EIO
: 0;
2660 static inline bool brcmf_sdio_valid_shared_address(u32 addr
)
2662 return !(addr
== 0 || ((~addr
>> 16) & 0xffff) == (addr
& 0xffff));
2665 static int brcmf_sdio_readshared(struct brcmf_sdio
*bus
,
2666 struct sdpcm_shared
*sh
)
2671 struct sdpcm_shared_le sh_le
;
2674 shaddr
= bus
->ci
->rambase
+ bus
->ramsize
- 4;
2677 * Read last word in socram to determine
2678 * address of sdpcm_shared structure
2680 sdio_claim_host(bus
->sdiodev
->func
[1]);
2681 brcmf_sdbrcm_bus_sleep(bus
, false, false);
2682 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, shaddr
, (u8
*)&addr_le
, 4);
2683 sdio_release_host(bus
->sdiodev
->func
[1]);
2687 addr
= le32_to_cpu(addr_le
);
2689 brcmf_dbg(SDIO
, "sdpcm_shared address 0x%08X\n", addr
);
2692 * Check if addr is valid.
2693 * NVRAM length at the end of memory should have been overwritten.
2695 if (!brcmf_sdio_valid_shared_address(addr
)) {
2696 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2701 /* Read hndrte_shared structure */
2702 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, addr
, (u8
*)&sh_le
,
2703 sizeof(struct sdpcm_shared_le
));
2708 sh
->flags
= le32_to_cpu(sh_le
.flags
);
2709 sh
->trap_addr
= le32_to_cpu(sh_le
.trap_addr
);
2710 sh
->assert_exp_addr
= le32_to_cpu(sh_le
.assert_exp_addr
);
2711 sh
->assert_file_addr
= le32_to_cpu(sh_le
.assert_file_addr
);
2712 sh
->assert_line
= le32_to_cpu(sh_le
.assert_line
);
2713 sh
->console_addr
= le32_to_cpu(sh_le
.console_addr
);
2714 sh
->msgtrace_addr
= le32_to_cpu(sh_le
.msgtrace_addr
);
2716 if ((sh
->flags
& SDPCM_SHARED_VERSION_MASK
) > SDPCM_SHARED_VERSION
) {
2717 brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
2718 SDPCM_SHARED_VERSION
,
2719 sh
->flags
& SDPCM_SHARED_VERSION_MASK
);
2726 static int brcmf_sdio_dump_console(struct brcmf_sdio
*bus
,
2727 struct sdpcm_shared
*sh
, char __user
*data
,
2730 u32 addr
, console_ptr
, console_size
, console_index
;
2731 char *conbuf
= NULL
;
2737 /* obtain console information from device memory */
2738 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
);
2739 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, addr
,
2740 (u8
*)&sh_val
, sizeof(u32
));
2743 console_ptr
= le32_to_cpu(sh_val
);
2745 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
.buf_size
);
2746 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, addr
,
2747 (u8
*)&sh_val
, sizeof(u32
));
2750 console_size
= le32_to_cpu(sh_val
);
2752 addr
= sh
->console_addr
+ offsetof(struct rte_console
, log_le
.idx
);
2753 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, addr
,
2754 (u8
*)&sh_val
, sizeof(u32
));
2757 console_index
= le32_to_cpu(sh_val
);
2759 /* allocate buffer for console data */
2760 if (console_size
<= CONSOLE_BUFFER_MAX
)
2761 conbuf
= vzalloc(console_size
+1);
2766 /* obtain the console data from device */
2767 conbuf
[console_size
] = '\0';
2768 rv
= brcmf_sdio_ramrw(bus
->sdiodev
, false, console_ptr
, (u8
*)conbuf
,
2773 rv
= simple_read_from_buffer(data
, count
, &pos
,
2774 conbuf
+ console_index
,
2775 console_size
- console_index
);
2780 if (console_index
> 0) {
2782 rv
= simple_read_from_buffer(data
+nbytes
, count
, &pos
,
2783 conbuf
, console_index
- 1);
2793 static int brcmf_sdio_trap_info(struct brcmf_sdio
*bus
, struct sdpcm_shared
*sh
,
2794 char __user
*data
, size_t count
)
2798 struct brcmf_trap_info tr
;
2801 if ((sh
->flags
& SDPCM_SHARED_TRAP
) == 0) {
2802 brcmf_dbg(INFO
, "no trap in firmware\n");
2806 error
= brcmf_sdio_ramrw(bus
->sdiodev
, false, sh
->trap_addr
, (u8
*)&tr
,
2807 sizeof(struct brcmf_trap_info
));
2811 res
= scnprintf(buf
, sizeof(buf
),
2812 "dongle trap info: type 0x%x @ epc 0x%08x\n"
2813 " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
2814 " lr 0x%08x pc 0x%08x offset 0x%x\n"
2815 " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
2816 " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
2817 le32_to_cpu(tr
.type
), le32_to_cpu(tr
.epc
),
2818 le32_to_cpu(tr
.cpsr
), le32_to_cpu(tr
.spsr
),
2819 le32_to_cpu(tr
.r13
), le32_to_cpu(tr
.r14
),
2820 le32_to_cpu(tr
.pc
), sh
->trap_addr
,
2821 le32_to_cpu(tr
.r0
), le32_to_cpu(tr
.r1
),
2822 le32_to_cpu(tr
.r2
), le32_to_cpu(tr
.r3
),
2823 le32_to_cpu(tr
.r4
), le32_to_cpu(tr
.r5
),
2824 le32_to_cpu(tr
.r6
), le32_to_cpu(tr
.r7
));
2826 return simple_read_from_buffer(data
, count
, &pos
, buf
, res
);
2829 static int brcmf_sdio_assert_info(struct brcmf_sdio
*bus
,
2830 struct sdpcm_shared
*sh
, char __user
*data
,
2835 char file
[80] = "?";
2836 char expr
[80] = "<???>";
2840 if ((sh
->flags
& SDPCM_SHARED_ASSERT_BUILT
) == 0) {
2841 brcmf_dbg(INFO
, "firmware not built with -assert\n");
2843 } else if ((sh
->flags
& SDPCM_SHARED_ASSERT
) == 0) {
2844 brcmf_dbg(INFO
, "no assert in dongle\n");
2848 sdio_claim_host(bus
->sdiodev
->func
[1]);
2849 if (sh
->assert_file_addr
!= 0) {
2850 error
= brcmf_sdio_ramrw(bus
->sdiodev
, false,
2851 sh
->assert_file_addr
, (u8
*)file
, 80);
2855 if (sh
->assert_exp_addr
!= 0) {
2856 error
= brcmf_sdio_ramrw(bus
->sdiodev
, false,
2857 sh
->assert_exp_addr
, (u8
*)expr
, 80);
2861 sdio_release_host(bus
->sdiodev
->func
[1]);
2863 res
= scnprintf(buf
, sizeof(buf
),
2864 "dongle assert: %s:%d: assert(%s)\n",
2865 file
, sh
->assert_line
, expr
);
2866 return simple_read_from_buffer(data
, count
, &pos
, buf
, res
);
2869 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio
*bus
)
2872 struct sdpcm_shared sh
;
2874 error
= brcmf_sdio_readshared(bus
, &sh
);
2879 if ((sh
.flags
& SDPCM_SHARED_ASSERT_BUILT
) == 0)
2880 brcmf_dbg(INFO
, "firmware not built with -assert\n");
2881 else if (sh
.flags
& SDPCM_SHARED_ASSERT
)
2882 brcmf_err("assertion in dongle\n");
2884 if (sh
.flags
& SDPCM_SHARED_TRAP
)
2885 brcmf_err("firmware trap in dongle\n");
2890 static int brcmf_sdbrcm_died_dump(struct brcmf_sdio
*bus
, char __user
*data
,
2891 size_t count
, loff_t
*ppos
)
2894 struct sdpcm_shared sh
;
2901 error
= brcmf_sdio_readshared(bus
, &sh
);
2905 error
= brcmf_sdio_assert_info(bus
, &sh
, data
, count
);
2910 error
= brcmf_sdio_trap_info(bus
, &sh
, data
+nbytes
, count
);
2915 error
= brcmf_sdio_dump_console(bus
, &sh
, data
+nbytes
, count
);
2926 static ssize_t
brcmf_sdio_forensic_read(struct file
*f
, char __user
*data
,
2927 size_t count
, loff_t
*ppos
)
2929 struct brcmf_sdio
*bus
= f
->private_data
;
2932 res
= brcmf_sdbrcm_died_dump(bus
, data
, count
, ppos
);
2935 return (ssize_t
)res
;
2938 static const struct file_operations brcmf_sdio_forensic_ops
= {
2939 .owner
= THIS_MODULE
,
2940 .open
= simple_open
,
2941 .read
= brcmf_sdio_forensic_read
2944 static void brcmf_sdio_debugfs_create(struct brcmf_sdio
*bus
)
2946 struct brcmf_pub
*drvr
= bus
->sdiodev
->bus_if
->drvr
;
2947 struct dentry
*dentry
= brcmf_debugfs_get_devdir(drvr
);
2949 if (IS_ERR_OR_NULL(dentry
))
2952 debugfs_create_file("forensics", S_IRUGO
, dentry
, bus
,
2953 &brcmf_sdio_forensic_ops
);
2954 brcmf_debugfs_create_sdio_count(drvr
, &bus
->sdcnt
);
2957 static int brcmf_sdbrcm_checkdied(struct brcmf_sdio
*bus
)
2962 static void brcmf_sdio_debugfs_create(struct brcmf_sdio
*bus
)
2968 brcmf_sdbrcm_bus_rxctl(struct device
*dev
, unsigned char *msg
, uint msglen
)
2974 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
2975 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
2976 struct brcmf_sdio
*bus
= sdiodev
->bus
;
2978 brcmf_dbg(TRACE
, "Enter\n");
2980 /* Wait until control frame is available */
2981 timeleft
= brcmf_sdbrcm_dcmd_resp_wait(bus
, &bus
->rxlen
, &pending
);
2983 spin_lock_bh(&bus
->rxctl_lock
);
2985 memcpy(msg
, bus
->rxctl
, min(msglen
, rxlen
));
2987 buf
= bus
->rxctl_orig
;
2988 bus
->rxctl_orig
= NULL
;
2990 spin_unlock_bh(&bus
->rxctl_lock
);
2994 brcmf_dbg(CTL
, "resumed on rxctl frame, got %d expected %d\n",
2996 } else if (timeleft
== 0) {
2997 brcmf_err("resumed on timeout\n");
2998 brcmf_sdbrcm_checkdied(bus
);
2999 } else if (pending
) {
3000 brcmf_dbg(CTL
, "cancelled\n");
3001 return -ERESTARTSYS
;
3003 brcmf_dbg(CTL
, "resumed for unknown reason?\n");
3004 brcmf_sdbrcm_checkdied(bus
);
3008 bus
->sdcnt
.rx_ctlpkts
++;
3010 bus
->sdcnt
.rx_ctlerrs
++;
3012 return rxlen
? (int)rxlen
: -ETIMEDOUT
;
3015 static bool brcmf_sdbrcm_download_state(struct brcmf_sdio
*bus
, bool enter
)
3017 struct chip_info
*ci
= bus
->ci
;
3019 /* To enter download state, disable ARM and reset SOCRAM.
3020 * To exit download state, simply reset ARM (default is RAM boot).
3023 bus
->alp_only
= true;
3025 brcmf_sdio_chip_enter_download(bus
->sdiodev
, ci
);
3027 if (!brcmf_sdio_chip_exit_download(bus
->sdiodev
, ci
, bus
->vars
,
3031 /* Allow HT Clock now that the ARM is running. */
3032 bus
->alp_only
= false;
3034 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_LOAD
;
3040 static int brcmf_sdbrcm_get_image(char *buf
, int len
, struct brcmf_sdio
*bus
)
3042 if (bus
->firmware
->size
< bus
->fw_ptr
+ len
)
3043 len
= bus
->firmware
->size
- bus
->fw_ptr
;
3045 memcpy(buf
, &bus
->firmware
->data
[bus
->fw_ptr
], len
);
3050 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio
*bus
)
3054 u8
*memblock
= NULL
, *memptr
;
3058 brcmf_dbg(INFO
, "Enter\n");
3060 ret
= request_firmware(&bus
->firmware
, BRCMF_SDIO_FW_NAME
,
3061 &bus
->sdiodev
->func
[2]->dev
);
3063 brcmf_err("Fail to request firmware %d\n", ret
);
3068 memptr
= memblock
= kmalloc(MEMBLOCK
+ BRCMF_SDALIGN
, GFP_ATOMIC
);
3069 if (memblock
== NULL
) {
3073 if ((u32
)(unsigned long)memblock
% BRCMF_SDALIGN
)
3074 memptr
+= (BRCMF_SDALIGN
-
3075 ((u32
)(unsigned long)memblock
% BRCMF_SDALIGN
));
3077 offset
= bus
->ci
->rambase
;
3079 /* Download image */
3080 len
= brcmf_sdbrcm_get_image((char *)memptr
, MEMBLOCK
, bus
);
3081 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_ARM_CR4
);
3082 if (BRCMF_MAX_CORENUM
!= idx
)
3083 memcpy(&bus
->ci
->rst_vec
, memptr
, sizeof(bus
->ci
->rst_vec
));
3085 ret
= brcmf_sdio_ramrw(bus
->sdiodev
, true, offset
, memptr
, len
);
3087 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3088 ret
, MEMBLOCK
, offset
);
3093 len
= brcmf_sdbrcm_get_image((char *)memptr
, MEMBLOCK
, bus
);
3099 release_firmware(bus
->firmware
);
3106 * ProcessVars:Takes a buffer of "<var>=<value>\n" lines read from a file
3107 * and ending in a NUL.
3108 * Removes carriage returns, empty lines, comment lines, and converts
3110 * Shortens buffer as needed and pads with NULs. End of buffer is marked
3114 static int brcmf_process_nvram_vars(struct brcmf_sdio
*bus
)
3121 uint buf_len
, n
, len
;
3123 len
= bus
->firmware
->size
;
3124 varbuf
= vmalloc(len
);
3128 memcpy(varbuf
, bus
->firmware
->data
, len
);
3131 findNewline
= false;
3134 for (n
= 0; n
< len
; n
++) {
3137 if (varbuf
[n
] == '\r')
3139 if (findNewline
&& varbuf
[n
] != '\n')
3141 findNewline
= false;
3142 if (varbuf
[n
] == '#') {
3146 if (varbuf
[n
] == '\n') {
3156 buf_len
= dp
- varbuf
;
3157 while (dp
< varbuf
+ n
)
3161 /* roundup needed for download to device */
3162 bus
->varsz
= roundup(buf_len
+ 1, 4);
3163 bus
->vars
= kmalloc(bus
->varsz
, GFP_KERNEL
);
3164 if (bus
->vars
== NULL
) {
3170 /* copy the processed variables and add null termination */
3171 memcpy(bus
->vars
, varbuf
, buf_len
);
3172 bus
->vars
[buf_len
] = 0;
3178 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio
*bus
)
3182 ret
= request_firmware(&bus
->firmware
, BRCMF_SDIO_NV_NAME
,
3183 &bus
->sdiodev
->func
[2]->dev
);
3185 brcmf_err("Fail to request nvram %d\n", ret
);
3189 ret
= brcmf_process_nvram_vars(bus
);
3191 release_firmware(bus
->firmware
);
3196 static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio
*bus
)
3200 /* Keep arm in reset */
3201 if (!brcmf_sdbrcm_download_state(bus
, true)) {
3202 brcmf_err("error placing ARM core in reset\n");
3206 if (brcmf_sdbrcm_download_code_file(bus
)) {
3207 brcmf_err("dongle image file download failed\n");
3211 if (brcmf_sdbrcm_download_nvram(bus
)) {
3212 brcmf_err("dongle nvram file download failed\n");
3216 /* Take arm out of reset */
3217 if (!brcmf_sdbrcm_download_state(bus
, false)) {
3218 brcmf_err("error getting out of ARM core reset\n");
3228 static bool brcmf_sdbrcm_sr_capable(struct brcmf_sdio
*bus
)
3232 brcmf_dbg(TRACE
, "Enter\n");
3234 /* old chips with PMU version less than 17 don't support save restore */
3235 if (bus
->ci
->pmurev
< 17)
3238 /* read PMU chipcontrol register 3*/
3239 addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
, chipcontrol_addr
);
3240 brcmf_sdio_regwl(bus
->sdiodev
, addr
, 3, NULL
);
3241 addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
, chipcontrol_data
);
3242 reg
= brcmf_sdio_regrl(bus
->sdiodev
, addr
, NULL
);
3247 static void brcmf_sdbrcm_sr_init(struct brcmf_sdio
*bus
)
3252 brcmf_dbg(TRACE
, "Enter\n");
3254 val
= brcmf_sdio_regrb(bus
->sdiodev
, SBSDIO_FUNC1_WAKEUPCTRL
,
3257 brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
3261 val
|= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT
;
3262 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_WAKEUPCTRL
,
3265 brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
3269 /* Add CMD14 Support */
3270 brcmf_sdio_regwb(bus
->sdiodev
, SDIO_CCCR_BRCM_CARDCAP
,
3271 (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT
|
3272 SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT
),
3275 brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
3279 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3280 SBSDIO_FORCE_HT
, &err
);
3282 brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
3287 bus
->sr_enabled
= true;
3288 brcmf_dbg(INFO
, "SR enabled\n");
3291 /* enable KSO bit */
3292 static int brcmf_sdbrcm_kso_init(struct brcmf_sdio
*bus
)
3297 brcmf_dbg(TRACE
, "Enter\n");
3299 /* KSO bit added in SDIO core rev 12 */
3300 if (bus
->ci
->c_inf
[1].rev
< 12)
3303 val
= brcmf_sdio_regrb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
3306 brcmf_err("error reading SBSDIO_FUNC1_SLEEPCSR\n");
3310 if (!(val
& SBSDIO_FUNC1_SLEEPCSR_KSO_MASK
)) {
3311 val
|= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN
<<
3312 SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT
);
3313 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_SLEEPCSR
,
3316 brcmf_err("error writing SBSDIO_FUNC1_SLEEPCSR\n");
3326 brcmf_sdbrcm_download_firmware(struct brcmf_sdio
*bus
)
3330 sdio_claim_host(bus
->sdiodev
->func
[1]);
3332 brcmf_sdbrcm_clkctl(bus
, CLK_AVAIL
, false);
3334 ret
= _brcmf_sdbrcm_download_firmware(bus
) == 0;
3336 brcmf_sdbrcm_clkctl(bus
, CLK_SDONLY
, false);
3338 sdio_release_host(bus
->sdiodev
->func
[1]);
3343 static int brcmf_sdbrcm_bus_init(struct device
*dev
)
3345 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev
);
3346 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
3347 struct brcmf_sdio
*bus
= sdiodev
->bus
;
3348 unsigned long timeout
;
3353 brcmf_dbg(TRACE
, "Enter\n");
3355 /* try to download image and nvram to the dongle */
3356 if (bus_if
->state
== BRCMF_BUS_DOWN
) {
3357 if (!(brcmf_sdbrcm_download_firmware(bus
)))
3361 if (!bus
->sdiodev
->bus_if
->drvr
)
3364 /* Start the watchdog timer */
3365 bus
->sdcnt
.tickcnt
= 0;
3366 brcmf_sdbrcm_wd_timer(bus
, BRCMF_WD_POLL_MS
);
3368 sdio_claim_host(bus
->sdiodev
->func
[1]);
3370 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3371 brcmf_sdbrcm_clkctl(bus
, CLK_AVAIL
, false);
3372 if (bus
->clkstate
!= CLK_AVAIL
)
3375 /* Force clocks on backplane to be sure F2 interrupt propagates */
3376 saveclk
= brcmf_sdio_regrb(bus
->sdiodev
,
3377 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
3379 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3380 (saveclk
| SBSDIO_FORCE_HT
), &err
);
3383 brcmf_err("Failed to force clock for F2: err %d\n", err
);
3387 /* Enable function 2 (frame transfers) */
3388 w_sdreg32(bus
, SDPCM_PROT_VERSION
<< SMB_DATA_VERSION_SHIFT
,
3389 offsetof(struct sdpcmd_regs
, tosbmailboxdata
));
3390 enable
= (SDIO_FUNC_ENABLE_1
| SDIO_FUNC_ENABLE_2
);
3392 brcmf_sdio_regwb(bus
->sdiodev
, SDIO_CCCR_IOEx
, enable
, NULL
);
3394 timeout
= jiffies
+ msecs_to_jiffies(BRCMF_WAIT_F2RDY
);
3396 while (enable
!= ready
) {
3397 ready
= brcmf_sdio_regrb(bus
->sdiodev
,
3398 SDIO_CCCR_IORx
, NULL
);
3399 if (time_after(jiffies
, timeout
))
3401 else if (time_after(jiffies
, timeout
- BRCMF_WAIT_F2RDY
+ 50))
3402 /* prevent busy waiting if it takes too long */
3403 msleep_interruptible(20);
3406 brcmf_dbg(INFO
, "enable 0x%02x, ready 0x%02x\n", enable
, ready
);
3408 /* If F2 successfully enabled, set core and enable interrupts */
3409 if (ready
== enable
) {
3410 /* Set up the interrupt mask and enable interrupts */
3411 bus
->hostintmask
= HOSTINTMASK
;
3412 w_sdreg32(bus
, bus
->hostintmask
,
3413 offsetof(struct sdpcmd_regs
, hostintmask
));
3415 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_WATERMARK
, 8, &err
);
3417 /* Disable F2 again */
3418 enable
= SDIO_FUNC_ENABLE_1
;
3419 brcmf_sdio_regwb(bus
->sdiodev
, SDIO_CCCR_IOEx
, enable
, NULL
);
3423 if (brcmf_sdbrcm_sr_capable(bus
)) {
3424 brcmf_sdbrcm_sr_init(bus
);
3426 /* Restore previous clock setting */
3427 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3432 ret
= brcmf_sdio_intr_register(bus
->sdiodev
);
3434 brcmf_err("intr register failed:%d\n", ret
);
3437 /* If we didn't come up, turn off backplane clock */
3438 if (bus_if
->state
!= BRCMF_BUS_DATA
)
3439 brcmf_sdbrcm_clkctl(bus
, CLK_NONE
, false);
3442 sdio_release_host(bus
->sdiodev
->func
[1]);
3447 void brcmf_sdbrcm_isr(void *arg
)
3449 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*) arg
;
3451 brcmf_dbg(TRACE
, "Enter\n");
3454 brcmf_err("bus is null pointer, exiting\n");
3458 if (bus
->sdiodev
->bus_if
->state
== BRCMF_BUS_DOWN
) {
3459 brcmf_err("bus is down. we have nothing to do\n");
3462 /* Count the interrupt call */
3463 bus
->sdcnt
.intrcount
++;
3465 atomic_set(&bus
->ipend
, 1);
3467 if (brcmf_sdio_intr_rstatus(bus
)) {
3468 brcmf_err("failed backplane access\n");
3469 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
3472 /* Disable additional interrupts (is this needed now)? */
3474 brcmf_err("isr w/o interrupt configured!\n");
3476 atomic_inc(&bus
->dpc_tskcnt
);
3477 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3480 static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio
*bus
)
3483 struct brcmf_bus
*bus_if
= dev_get_drvdata(bus
->sdiodev
->dev
);
3486 brcmf_dbg(TIMER
, "Enter\n");
3488 /* Poll period: check device if appropriate. */
3489 if (!bus
->sr_enabled
&&
3490 bus
->poll
&& (++bus
->polltick
>= bus
->pollrate
)) {
3493 /* Reset poll tick */
3496 /* Check device if no interrupts */
3498 (bus
->sdcnt
.intrcount
== bus
->sdcnt
.lastintrs
)) {
3500 if (atomic_read(&bus
->dpc_tskcnt
) == 0) {
3503 sdio_claim_host(bus
->sdiodev
->func
[1]);
3504 devpend
= brcmf_sdio_regrb(bus
->sdiodev
,
3507 sdio_release_host(bus
->sdiodev
->func
[1]);
3509 devpend
& (INTR_STATUS_FUNC1
|
3513 /* If there is something, make like the ISR and
3516 bus
->sdcnt
.pollcnt
++;
3517 atomic_set(&bus
->ipend
, 1);
3519 atomic_inc(&bus
->dpc_tskcnt
);
3520 queue_work(bus
->brcmf_wq
, &bus
->datawork
);
3524 /* Update interrupt tracking */
3525 bus
->sdcnt
.lastintrs
= bus
->sdcnt
.intrcount
;
3528 /* Poll for console output periodically */
3529 if (bus_if
&& bus_if
->state
== BRCMF_BUS_DATA
&&
3530 bus
->console_interval
!= 0) {
3531 bus
->console
.count
+= BRCMF_WD_POLL_MS
;
3532 if (bus
->console
.count
>= bus
->console_interval
) {
3533 bus
->console
.count
-= bus
->console_interval
;
3534 sdio_claim_host(bus
->sdiodev
->func
[1]);
3535 /* Make sure backplane clock is on */
3536 brcmf_sdbrcm_bus_sleep(bus
, false, false);
3537 if (brcmf_sdbrcm_readconsole(bus
) < 0)
3539 bus
->console_interval
= 0;
3540 sdio_release_host(bus
->sdiodev
->func
[1]);
3545 /* On idle timeout clear activity flag and/or turn off clock */
3546 if ((bus
->idletime
> 0) && (bus
->clkstate
== CLK_AVAIL
)) {
3547 if (++bus
->idlecount
>= bus
->idletime
) {
3549 if (bus
->activity
) {
3550 bus
->activity
= false;
3551 brcmf_sdbrcm_wd_timer(bus
, BRCMF_WD_POLL_MS
);
3553 brcmf_dbg(SDIO
, "idle\n");
3554 sdio_claim_host(bus
->sdiodev
->func
[1]);
3555 brcmf_sdbrcm_bus_sleep(bus
, true, false);
3556 sdio_release_host(bus
->sdiodev
->func
[1]);
3561 return (atomic_read(&bus
->ipend
) > 0);
3564 static void brcmf_sdio_dataworker(struct work_struct
*work
)
3566 struct brcmf_sdio
*bus
= container_of(work
, struct brcmf_sdio
,
3569 while (atomic_read(&bus
->dpc_tskcnt
)) {
3570 brcmf_sdbrcm_dpc(bus
);
3571 atomic_dec(&bus
->dpc_tskcnt
);
3575 static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio
*bus
)
3577 brcmf_dbg(TRACE
, "Enter\n");
3580 bus
->rxctl
= bus
->rxbuf
= NULL
;
3584 static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio
*bus
)
3586 brcmf_dbg(TRACE
, "Enter\n");
3588 if (bus
->sdiodev
->bus_if
->maxctl
) {
3590 roundup((bus
->sdiodev
->bus_if
->maxctl
+ SDPCM_HDRLEN
),
3591 ALIGNMENT
) + BRCMF_SDALIGN
;
3592 bus
->rxbuf
= kmalloc(bus
->rxblen
, GFP_ATOMIC
);
3601 brcmf_sdbrcm_probe_attach(struct brcmf_sdio
*bus
, u32 regsva
)
3609 bus
->alp_only
= true;
3611 sdio_claim_host(bus
->sdiodev
->func
[1]);
3613 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3614 brcmf_sdio_regrl(bus
->sdiodev
, SI_ENUM_BASE
, NULL
));
3617 * Force PLL off until brcmf_sdio_chip_attach()
3618 * programs PLL control regs
3621 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
,
3622 BRCMF_INIT_CLKCTL1
, &err
);
3624 clkctl
= brcmf_sdio_regrb(bus
->sdiodev
,
3625 SBSDIO_FUNC1_CHIPCLKCSR
, &err
);
3627 if (err
|| ((clkctl
& ~SBSDIO_AVBITS
) != BRCMF_INIT_CLKCTL1
)) {
3628 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3629 err
, BRCMF_INIT_CLKCTL1
, clkctl
);
3633 if (brcmf_sdio_chip_attach(bus
->sdiodev
, &bus
->ci
, regsva
)) {
3634 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3638 if (brcmf_sdbrcm_kso_init(bus
)) {
3639 brcmf_err("error enabling KSO\n");
3643 if ((bus
->sdiodev
->pdata
) && (bus
->sdiodev
->pdata
->drive_strength
))
3644 drivestrength
= bus
->sdiodev
->pdata
->drive_strength
;
3646 drivestrength
= DEFAULT_SDIO_DRIVE_STRENGTH
;
3647 brcmf_sdio_chip_drivestrengthinit(bus
->sdiodev
, bus
->ci
, drivestrength
);
3649 /* Get info on the SOCRAM cores... */
3650 bus
->ramsize
= bus
->ci
->ramsize
;
3651 if (!(bus
->ramsize
)) {
3652 brcmf_err("failed to find SOCRAM memory!\n");
3656 /* Set card control so an SDIO card reset does a WLAN backplane reset */
3657 reg_val
= brcmf_sdio_regrb(bus
->sdiodev
,
3658 SDIO_CCCR_BRCM_CARDCTRL
, &err
);
3662 reg_val
|= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET
;
3664 brcmf_sdio_regwb(bus
->sdiodev
,
3665 SDIO_CCCR_BRCM_CARDCTRL
, reg_val
, &err
);
3669 /* set PMUControl so a backplane reset does PMU state reload */
3670 reg_addr
= CORE_CC_REG(bus
->ci
->c_inf
[0].base
,
3672 reg_val
= brcmf_sdio_regrl(bus
->sdiodev
,
3678 reg_val
|= (BCMA_CC_PMU_CTL_RES_RELOAD
<< BCMA_CC_PMU_CTL_RES_SHIFT
);
3680 brcmf_sdio_regwl(bus
->sdiodev
,
3688 sdio_release_host(bus
->sdiodev
->func
[1]);
3690 brcmu_pktq_init(&bus
->txq
, (PRIOMASK
+ 1), TXQLEN
);
3692 /* Locate an appropriately-aligned portion of hdrbuf */
3693 bus
->rxhdr
= (u8
*) roundup((unsigned long)&bus
->hdrbuf
[0],
3696 /* Set the poll and/or interrupt flags */
3705 sdio_release_host(bus
->sdiodev
->func
[1]);
3709 static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio
*bus
)
3711 brcmf_dbg(TRACE
, "Enter\n");
3713 sdio_claim_host(bus
->sdiodev
->func
[1]);
3715 /* Disable F2 to clear any intermediate frame state on the dongle */
3716 brcmf_sdio_regwb(bus
->sdiodev
, SDIO_CCCR_IOEx
,
3717 SDIO_FUNC_ENABLE_1
, NULL
);
3719 bus
->sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
3720 bus
->rxflow
= false;
3722 /* Done with backplane-dependent accesses, can drop clock... */
3723 brcmf_sdio_regwb(bus
->sdiodev
, SBSDIO_FUNC1_CHIPCLKCSR
, 0, NULL
);
3725 sdio_release_host(bus
->sdiodev
->func
[1]);
3727 /* ...and initialize clock/power states */
3728 bus
->clkstate
= CLK_SDONLY
;
3729 bus
->idletime
= BRCMF_IDLE_INTERVAL
;
3730 bus
->idleclock
= BRCMF_IDLE_ACTIVE
;
3732 /* Query the F2 block size, set roundup accordingly */
3733 bus
->blocksize
= bus
->sdiodev
->func
[2]->cur_blksize
;
3734 bus
->roundup
= min(max_roundup
, bus
->blocksize
);
3737 bus
->sleeping
= false;
3738 bus
->sr_enabled
= false;
3744 brcmf_sdbrcm_watchdog_thread(void *data
)
3746 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*)data
;
3748 allow_signal(SIGTERM
);
3749 /* Run until signal received */
3751 if (kthread_should_stop())
3753 if (!wait_for_completion_interruptible(&bus
->watchdog_wait
)) {
3754 brcmf_sdbrcm_bus_watchdog(bus
);
3755 /* Count the tick for reference */
3756 bus
->sdcnt
.tickcnt
++;
3764 brcmf_sdbrcm_watchdog(unsigned long data
)
3766 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*)data
;
3768 if (bus
->watchdog_tsk
) {
3769 complete(&bus
->watchdog_wait
);
3770 /* Reschedule the watchdog */
3771 if (bus
->wd_timer_valid
)
3772 mod_timer(&bus
->timer
,
3773 jiffies
+ BRCMF_WD_POLL_MS
* HZ
/ 1000);
3777 static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio
*bus
)
3779 brcmf_dbg(TRACE
, "Enter\n");
3782 sdio_claim_host(bus
->sdiodev
->func
[1]);
3783 brcmf_sdbrcm_clkctl(bus
, CLK_AVAIL
, false);
3784 brcmf_sdbrcm_clkctl(bus
, CLK_NONE
, false);
3785 sdio_release_host(bus
->sdiodev
->func
[1]);
3786 brcmf_sdio_chip_detach(&bus
->ci
);
3787 if (bus
->vars
&& bus
->varsz
)
3792 brcmf_dbg(TRACE
, "Disconnected\n");
3795 /* Detach and free everything */
3796 static void brcmf_sdbrcm_release(struct brcmf_sdio
*bus
)
3798 brcmf_dbg(TRACE
, "Enter\n");
3801 /* De-register interrupt handler */
3802 brcmf_sdio_intr_unregister(bus
->sdiodev
);
3804 cancel_work_sync(&bus
->datawork
);
3806 destroy_workqueue(bus
->brcmf_wq
);
3808 if (bus
->sdiodev
->bus_if
->drvr
) {
3809 brcmf_detach(bus
->sdiodev
->dev
);
3810 brcmf_sdbrcm_release_dongle(bus
);
3813 brcmf_sdbrcm_release_malloc(bus
);
3818 brcmf_dbg(TRACE
, "Disconnected\n");
3821 static struct brcmf_bus_ops brcmf_sdio_bus_ops
= {
3822 .stop
= brcmf_sdbrcm_bus_stop
,
3823 .init
= brcmf_sdbrcm_bus_init
,
3824 .txdata
= brcmf_sdbrcm_bus_txdata
,
3825 .txctl
= brcmf_sdbrcm_bus_txctl
,
3826 .rxctl
= brcmf_sdbrcm_bus_rxctl
,
3827 .gettxq
= brcmf_sdbrcm_bus_gettxq
,
3830 void *brcmf_sdbrcm_probe(u32 regsva
, struct brcmf_sdio_dev
*sdiodev
)
3833 struct brcmf_sdio
*bus
;
3834 struct brcmf_bus_dcmd
*dlst
;
3836 u32 txglomalign
= 0;
3839 brcmf_dbg(TRACE
, "Enter\n");
3841 /* We make an assumption about address window mappings:
3842 * regsva == SI_ENUM_BASE*/
3844 /* Allocate private bus interface state */
3845 bus
= kzalloc(sizeof(struct brcmf_sdio
), GFP_ATOMIC
);
3849 bus
->sdiodev
= sdiodev
;
3851 skb_queue_head_init(&bus
->glom
);
3852 bus
->txbound
= BRCMF_TXBOUND
;
3853 bus
->rxbound
= BRCMF_RXBOUND
;
3854 bus
->txminmax
= BRCMF_TXMINMAX
;
3855 bus
->tx_seq
= SDPCM_SEQ_WRAP
- 1;
3857 INIT_WORK(&bus
->datawork
, brcmf_sdio_dataworker
);
3858 bus
->brcmf_wq
= create_singlethread_workqueue("brcmf_wq");
3859 if (bus
->brcmf_wq
== NULL
) {
3860 brcmf_err("insufficient memory to create txworkqueue\n");
3864 /* attempt to attach to the dongle */
3865 if (!(brcmf_sdbrcm_probe_attach(bus
, regsva
))) {
3866 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
3870 spin_lock_init(&bus
->rxctl_lock
);
3871 spin_lock_init(&bus
->txqlock
);
3872 init_waitqueue_head(&bus
->ctrl_wait
);
3873 init_waitqueue_head(&bus
->dcmd_resp_wait
);
3875 /* Set up the watchdog timer */
3876 init_timer(&bus
->timer
);
3877 bus
->timer
.data
= (unsigned long)bus
;
3878 bus
->timer
.function
= brcmf_sdbrcm_watchdog
;
3880 /* Initialize watchdog thread */
3881 init_completion(&bus
->watchdog_wait
);
3882 bus
->watchdog_tsk
= kthread_run(brcmf_sdbrcm_watchdog_thread
,
3883 bus
, "brcmf_watchdog");
3884 if (IS_ERR(bus
->watchdog_tsk
)) {
3885 pr_warn("brcmf_watchdog thread failed to start\n");
3886 bus
->watchdog_tsk
= NULL
;
3888 /* Initialize DPC thread */
3889 atomic_set(&bus
->dpc_tskcnt
, 0);
3891 /* Assign bus interface call back */
3892 bus
->sdiodev
->bus_if
->dev
= bus
->sdiodev
->dev
;
3893 bus
->sdiodev
->bus_if
->ops
= &brcmf_sdio_bus_ops
;
3894 bus
->sdiodev
->bus_if
->chip
= bus
->ci
->chip
;
3895 bus
->sdiodev
->bus_if
->chiprev
= bus
->ci
->chiprev
;
3897 /* default sdio bus header length for tx packet */
3898 bus
->tx_hdrlen
= SDPCM_HWHDR_LEN
+ SDPCM_SWHDR_LEN
;
3900 /* Attach to the common layer, reserve hdr space */
3901 ret
= brcmf_attach(bus
->tx_hdrlen
, bus
->sdiodev
->dev
);
3903 brcmf_err("brcmf_attach failed\n");
3907 /* Allocate buffers */
3908 if (!(brcmf_sdbrcm_probe_malloc(bus
))) {
3909 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
3913 if (!(brcmf_sdbrcm_probe_init(bus
))) {
3914 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
3918 brcmf_sdio_debugfs_create(bus
);
3919 brcmf_dbg(INFO
, "completed!!\n");
3921 /* sdio bus core specific dcmd */
3922 idx
= brcmf_sdio_chip_getinfidx(bus
->ci
, BCMA_CORE_SDIO_DEV
);
3923 dlst
= kzalloc(sizeof(struct brcmf_bus_dcmd
), GFP_KERNEL
);
3925 if (bus
->ci
->c_inf
[idx
].rev
< 12) {
3926 /* for sdio core rev < 12, disable txgloming */
3928 dlst
->name
= "bus:txglom";
3929 dlst
->param
= (char *)&dngl_txglom
;
3930 dlst
->param_len
= sizeof(u32
);
3932 /* otherwise, set txglomalign */
3934 txglomalign
= sdiodev
->pdata
->sd_sgentry_align
;
3935 /* SDIO ADMA requires at least 32 bit alignment */
3936 if (txglomalign
< 4)
3938 dlst
->name
= "bus:txglomalign";
3939 dlst
->param
= (char *)&txglomalign
;
3940 dlst
->param_len
= sizeof(u32
);
3942 list_add(&dlst
->list
, &bus
->sdiodev
->bus_if
->dcmd_list
);
3945 /* if firmware path present try to download and bring up bus */
3946 ret
= brcmf_bus_start(bus
->sdiodev
->dev
);
3948 brcmf_err("dongle is not responding\n");
3955 brcmf_sdbrcm_release(bus
);
3959 void brcmf_sdbrcm_disconnect(void *ptr
)
3961 struct brcmf_sdio
*bus
= (struct brcmf_sdio
*)ptr
;
3963 brcmf_dbg(TRACE
, "Enter\n");
3966 brcmf_sdbrcm_release(bus
);
3968 brcmf_dbg(TRACE
, "Disconnected\n");
3972 brcmf_sdbrcm_wd_timer(struct brcmf_sdio
*bus
, uint wdtick
)
3974 /* Totally stop the timer */
3975 if (!wdtick
&& bus
->wd_timer_valid
) {
3976 del_timer_sync(&bus
->timer
);
3977 bus
->wd_timer_valid
= false;
3978 bus
->save_ms
= wdtick
;
3982 /* don't start the wd until fw is loaded */
3983 if (bus
->sdiodev
->bus_if
->state
== BRCMF_BUS_DOWN
)
3987 if (bus
->save_ms
!= BRCMF_WD_POLL_MS
) {
3988 if (bus
->wd_timer_valid
)
3989 /* Stop timer and restart at new value */
3990 del_timer_sync(&bus
->timer
);
3992 /* Create timer again when watchdog period is
3993 dynamically changed or in the first instance
3995 bus
->timer
.expires
=
3996 jiffies
+ BRCMF_WD_POLL_MS
* HZ
/ 1000;
3997 add_timer(&bus
->timer
);
4000 /* Re arm the timer, at last watchdog period */
4001 mod_timer(&bus
->timer
,
4002 jiffies
+ BRCMF_WD_POLL_MS
* HZ
/ 1000);
4005 bus
->wd_timer_valid
= true;
4006 bus
->save_ms
= wdtick
;