2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 /* ****************** SDIO CARD Interface Functions **************************/
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/export.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 #include <linux/sched.h>
24 #include <linux/completion.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/sdio_func.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/platform_data/brcmfmac-sdio.h>
33 #include <brcm_hw_ids.h>
34 #include <brcmu_utils.h>
35 #include <brcmu_wifi.h>
39 #include "sdio_host.h"
41 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
44 static irqreturn_t
brcmf_sdio_oob_irqhandler(int irq
, void *dev_id
)
46 struct brcmf_bus
*bus_if
= dev_get_drvdata(dev_id
);
47 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
49 brcmf_dbg(INTR
, "OOB intr triggered\n");
51 /* out-of-band interrupt is level-triggered which won't
52 * be cleared until dpc
54 if (sdiodev
->irq_en
) {
55 disable_irq_nosync(irq
);
56 sdiodev
->irq_en
= false;
59 brcmf_sdbrcm_isr(sdiodev
->bus
);
64 static void brcmf_sdio_ib_irqhandler(struct sdio_func
*func
)
66 struct brcmf_bus
*bus_if
= dev_get_drvdata(&func
->dev
);
67 struct brcmf_sdio_dev
*sdiodev
= bus_if
->bus_priv
.sdio
;
69 brcmf_dbg(INTR
, "IB intr triggered\n");
71 brcmf_sdbrcm_isr(sdiodev
->bus
);
74 /* dummy handler for SDIO function 2 interrupt */
75 static void brcmf_sdio_dummy_irqhandler(struct sdio_func
*func
)
79 int brcmf_sdio_intr_register(struct brcmf_sdio_dev
*sdiodev
)
85 if ((sdiodev
->pdata
) && (sdiodev
->pdata
->oob_irq_supported
)) {
86 brcmf_dbg(SDIO
, "Enter, register OOB IRQ %d\n",
87 sdiodev
->pdata
->oob_irq_nr
);
88 ret
= request_irq(sdiodev
->pdata
->oob_irq_nr
,
89 brcmf_sdio_oob_irqhandler
,
90 sdiodev
->pdata
->oob_irq_flags
,
92 &sdiodev
->func
[1]->dev
);
94 brcmf_err("request_irq failed %d\n", ret
);
97 sdiodev
->oob_irq_requested
= true;
98 spin_lock_init(&sdiodev
->irq_en_lock
);
99 spin_lock_irqsave(&sdiodev
->irq_en_lock
, flags
);
100 sdiodev
->irq_en
= true;
101 spin_unlock_irqrestore(&sdiodev
->irq_en_lock
, flags
);
103 ret
= enable_irq_wake(sdiodev
->pdata
->oob_irq_nr
);
105 brcmf_err("enable_irq_wake failed %d\n", ret
);
108 sdiodev
->irq_wake
= true;
110 sdio_claim_host(sdiodev
->func
[1]);
112 /* must configure SDIO_CCCR_IENx to enable irq */
113 data
= brcmf_sdio_regrb(sdiodev
, SDIO_CCCR_IENx
, &ret
);
114 data
|= 1 << SDIO_FUNC_1
| 1 << SDIO_FUNC_2
| 1;
115 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_IENx
, data
, &ret
);
117 /* redirect, configure and enable io for interrupt signal */
118 data
= SDIO_SEPINT_MASK
| SDIO_SEPINT_OE
;
119 if (sdiodev
->pdata
->oob_irq_flags
& IRQF_TRIGGER_HIGH
)
120 data
|= SDIO_SEPINT_ACT_HI
;
121 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_BRCM_SEPINT
, data
, &ret
);
123 sdio_release_host(sdiodev
->func
[1]);
125 brcmf_dbg(SDIO
, "Entering\n");
126 sdio_claim_host(sdiodev
->func
[1]);
127 sdio_claim_irq(sdiodev
->func
[1], brcmf_sdio_ib_irqhandler
);
128 sdio_claim_irq(sdiodev
->func
[2], brcmf_sdio_dummy_irqhandler
);
129 sdio_release_host(sdiodev
->func
[1]);
135 int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev
*sdiodev
)
137 brcmf_dbg(SDIO
, "Entering\n");
139 if ((sdiodev
->pdata
) && (sdiodev
->pdata
->oob_irq_supported
)) {
140 sdio_claim_host(sdiodev
->func
[1]);
141 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_BRCM_SEPINT
, 0, NULL
);
142 brcmf_sdio_regwb(sdiodev
, SDIO_CCCR_IENx
, 0, NULL
);
143 sdio_release_host(sdiodev
->func
[1]);
145 if (sdiodev
->oob_irq_requested
) {
146 sdiodev
->oob_irq_requested
= false;
147 if (sdiodev
->irq_wake
) {
148 disable_irq_wake(sdiodev
->pdata
->oob_irq_nr
);
149 sdiodev
->irq_wake
= false;
151 free_irq(sdiodev
->pdata
->oob_irq_nr
,
152 &sdiodev
->func
[1]->dev
);
153 sdiodev
->irq_en
= false;
156 sdio_claim_host(sdiodev
->func
[1]);
157 sdio_release_irq(sdiodev
->func
[2]);
158 sdio_release_irq(sdiodev
->func
[1]);
159 sdio_release_host(sdiodev
->func
[1]);
166 brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev
*sdiodev
, u32 address
)
172 addr
[0] = (address
>> 8) & SBSDIO_SBADDRLOW_MASK
;
173 addr
[1] = (address
>> 16) & SBSDIO_SBADDRMID_MASK
;
174 addr
[2] = (address
>> 24) & SBSDIO_SBADDRHIGH_MASK
;
176 for (i
= 0; i
< 3; i
++) {
180 usleep_range(1000, 2000);
181 err
= brcmf_sdioh_request_byte(sdiodev
, SDIOH_WRITE
,
182 SDIO_FUNC_1
, SBSDIO_FUNC1_SBADDRLOW
+ i
,
184 } while (err
!= 0 && retry
++ < SDIOH_API_ACCESS_RETRY_LIMIT
);
187 brcmf_err("failed at addr:0x%0x\n",
188 SBSDIO_FUNC1_SBADDRLOW
+ i
);
197 brcmf_sdio_addrprep(struct brcmf_sdio_dev
*sdiodev
, uint width
, u32
*addr
)
199 uint bar0
= *addr
& ~SBSDIO_SB_OFT_ADDR_MASK
;
202 if (bar0
!= sdiodev
->sbwad
) {
203 err
= brcmf_sdcard_set_sbaddr_window(sdiodev
, bar0
);
207 sdiodev
->sbwad
= bar0
;
210 *addr
&= SBSDIO_SB_OFT_ADDR_MASK
;
213 *addr
|= SBSDIO_SB_ACCESS_2_4B_FLAG
;
219 brcmf_sdio_regrw_helper(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
220 void *data
, bool write
)
222 u8 func_num
, reg_size
;
227 * figure out how to read the register based on address range
228 * 0x00 ~ 0x7FF: function 0 CCCR and FBR
229 * 0x10000 ~ 0x1FFFF: function 1 miscellaneous registers
230 * The rest: function 1 silicon backplane core registers
232 if ((addr
& ~REG_F0_REG_MASK
) == 0) {
233 func_num
= SDIO_FUNC_0
;
235 } else if ((addr
& ~REG_F1_MISC_MASK
) == 0) {
236 func_num
= SDIO_FUNC_1
;
239 func_num
= SDIO_FUNC_1
;
242 brcmf_sdio_addrprep(sdiodev
, reg_size
, &addr
);
247 memset(data
, 0, reg_size
);
248 if (retry
) /* wait for 1 ms till bus get settled down */
249 usleep_range(1000, 2000);
251 ret
= brcmf_sdioh_request_byte(sdiodev
, write
,
252 func_num
, addr
, data
);
254 ret
= brcmf_sdioh_request_word(sdiodev
, write
,
255 func_num
, addr
, data
, 4);
256 } while (ret
!= 0 && retry
++ < SDIOH_API_ACCESS_RETRY_LIMIT
);
259 brcmf_err("failed with %d\n", ret
);
264 u8
brcmf_sdio_regrb(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, int *ret
)
269 brcmf_dbg(SDIO
, "addr:0x%08x\n", addr
);
270 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, false);
271 brcmf_dbg(SDIO
, "data:0x%02x\n", data
);
279 u32
brcmf_sdio_regrl(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, int *ret
)
284 brcmf_dbg(SDIO
, "addr:0x%08x\n", addr
);
285 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, false);
286 brcmf_dbg(SDIO
, "data:0x%08x\n", data
);
294 void brcmf_sdio_regwb(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
299 brcmf_dbg(SDIO
, "addr:0x%08x, data:0x%02x\n", addr
, data
);
300 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, true);
306 void brcmf_sdio_regwl(struct brcmf_sdio_dev
*sdiodev
, u32 addr
,
311 brcmf_dbg(SDIO
, "addr:0x%08x, data:0x%08x\n", addr
, data
);
312 retval
= brcmf_sdio_regrw_helper(sdiodev
, addr
, &data
, true);
319 * brcmf_sdio_buffrw - SDIO interface function for block data access
320 * @sdiodev: brcmfmac sdio device
321 * @fn: SDIO function number
322 * @write: direction flag
323 * @addr: dongle memory address as source/destination
326 * This function takes the respbonsibility as the interface function to MMC
327 * stack for block data access. It assumes that the skb passed down by the
328 * caller has already been padded and aligned.
330 static int brcmf_sdio_buffrw(struct brcmf_sdio_dev
*sdiodev
, uint fn
,
331 bool write
, u32 addr
, struct sk_buff_head
*pktlist
)
333 unsigned int req_sz
, func_blk_sz
, sg_cnt
, sg_data_sz
, pkt_offset
;
334 unsigned int max_blks
, max_req_sz
, orig_offset
, dst_offset
;
335 unsigned short max_seg_sz
, seg_sz
;
336 unsigned char *pkt_data
, *orig_data
, *dst_data
;
337 struct sk_buff
*pkt_next
= NULL
, *local_pkt_next
;
338 struct sk_buff_head local_list
, *target_list
;
339 struct mmc_request mmc_req
;
340 struct mmc_command mmc_cmd
;
341 struct mmc_data mmc_dat
;
343 struct scatterlist
*sgl
;
344 struct mmc_host
*host
;
350 brcmf_pm_resume_wait(sdiodev
, &sdiodev
->request_buffer_wait
);
351 if (brcmf_pm_resume_error(sdiodev
))
354 /* Single skb use the standard mmc interface */
355 if (pktlist
->qlen
== 1) {
356 pkt_next
= pktlist
->next
;
357 req_sz
= pkt_next
->len
+ 3;
361 return sdio_memcpy_toio(sdiodev
->func
[fn
], addr
,
362 ((u8
*)(pkt_next
->data
)),
365 return sdio_memcpy_fromio(sdiodev
->func
[fn
],
366 ((u8
*)(pkt_next
->data
)),
369 /* function 2 read is FIFO operation */
370 return sdio_readsb(sdiodev
->func
[fn
],
371 ((u8
*)(pkt_next
->data
)), addr
,
375 target_list
= pktlist
;
376 /* for host with broken sg support, prepare a page aligned list */
377 __skb_queue_head_init(&local_list
);
378 if (sdiodev
->pdata
&& sdiodev
->pdata
->broken_sg_support
&& !write
) {
380 skb_queue_walk(pktlist
, pkt_next
)
381 req_sz
+= pkt_next
->len
;
382 req_sz
= ALIGN(req_sz
, sdiodev
->func
[fn
]->cur_blksize
);
383 while (req_sz
> PAGE_SIZE
) {
384 pkt_next
= brcmu_pkt_buf_get_skb(PAGE_SIZE
);
385 if (pkt_next
== NULL
) {
389 __skb_queue_tail(&local_list
, pkt_next
);
392 pkt_next
= brcmu_pkt_buf_get_skb(req_sz
);
393 if (pkt_next
== NULL
) {
397 __skb_queue_tail(&local_list
, pkt_next
);
398 target_list
= &local_list
;
401 host
= sdiodev
->func
[fn
]->card
->host
;
402 func_blk_sz
= sdiodev
->func
[fn
]->cur_blksize
;
403 /* Blocks per command is limited by host count, host transfer
404 * size and the maximum for IO_RW_EXTENDED of 511 blocks.
406 max_blks
= min_t(unsigned int, host
->max_blk_count
, 511u);
407 max_req_sz
= min_t(unsigned int, host
->max_req_size
,
408 max_blks
* func_blk_sz
);
409 max_seg_sz
= min_t(unsigned short, host
->max_segs
, SG_MAX_SINGLE_ALLOC
);
410 max_seg_sz
= min_t(unsigned short, max_seg_sz
, target_list
->qlen
);
411 seg_sz
= target_list
->qlen
;
413 pkt_next
= target_list
->next
;
415 if (sg_alloc_table(&st
, max_seg_sz
, GFP_KERNEL
)) {
423 memset(&mmc_req
, 0, sizeof(struct mmc_request
));
424 memset(&mmc_cmd
, 0, sizeof(struct mmc_command
));
425 memset(&mmc_dat
, 0, sizeof(struct mmc_data
));
428 while (pkt_next
!= (struct sk_buff
*)target_list
) {
429 pkt_data
= pkt_next
->data
+ pkt_offset
;
430 sg_data_sz
= pkt_next
->len
- pkt_offset
;
431 if (sg_data_sz
> host
->max_seg_size
)
432 sg_data_sz
= host
->max_seg_size
;
433 if (sg_data_sz
> max_req_sz
- req_sz
)
434 sg_data_sz
= max_req_sz
- req_sz
;
436 sg_set_buf(sgl
, pkt_data
, sg_data_sz
);
440 req_sz
+= sg_data_sz
;
441 pkt_offset
+= sg_data_sz
;
442 if (pkt_offset
== pkt_next
->len
) {
444 pkt_next
= pkt_next
->next
;
447 if (req_sz
>= max_req_sz
|| sg_cnt
>= max_seg_sz
)
452 if (req_sz
% func_blk_sz
!= 0) {
453 brcmf_err("sg request length %u is not %u aligned\n",
454 req_sz
, func_blk_sz
);
459 mmc_dat
.sg_len
= sg_cnt
;
460 mmc_dat
.blksz
= func_blk_sz
;
461 mmc_dat
.blocks
= req_sz
/ func_blk_sz
;
462 mmc_dat
.flags
= write
? MMC_DATA_WRITE
: MMC_DATA_READ
;
463 mmc_cmd
.opcode
= SD_IO_RW_EXTENDED
;
464 mmc_cmd
.arg
= write
? 1<<31 : 0; /* write flag */
465 mmc_cmd
.arg
|= (fn
& 0x7) << 28; /* SDIO func num */
466 mmc_cmd
.arg
|= 1<<27; /* block mode */
467 /* incrementing addr for function 1 */
468 mmc_cmd
.arg
|= (fn
== 1) ? 1<<26 : 0;
469 mmc_cmd
.arg
|= (addr
& 0x1FFFF) << 9; /* address */
470 mmc_cmd
.arg
|= mmc_dat
.blocks
& 0x1FF; /* block count */
471 mmc_cmd
.flags
= MMC_RSP_SPI_R5
| MMC_RSP_R5
| MMC_CMD_ADTC
;
472 mmc_req
.cmd
= &mmc_cmd
;
473 mmc_req
.data
= &mmc_dat
;
477 mmc_set_data_timeout(&mmc_dat
, sdiodev
->func
[fn
]->card
);
478 mmc_wait_for_req(host
, &mmc_req
);
480 ret
= mmc_cmd
.error
? mmc_cmd
.error
: mmc_dat
.error
;
482 brcmf_err("CMD53 sg block %s failed %d\n",
483 write
? "write" : "read", ret
);
489 if (sdiodev
->pdata
&& sdiodev
->pdata
->broken_sg_support
&& !write
) {
490 local_pkt_next
= local_list
.next
;
492 skb_queue_walk(pktlist
, pkt_next
) {
495 req_sz
= local_pkt_next
->len
- orig_offset
;
496 req_sz
= min_t(uint
, pkt_next
->len
- dst_offset
,
498 orig_data
= local_pkt_next
->data
+ orig_offset
;
499 dst_data
= pkt_next
->data
+ dst_offset
;
500 memcpy(dst_data
, orig_data
, req_sz
);
501 orig_offset
+= req_sz
;
502 dst_offset
+= req_sz
;
503 if (orig_offset
== local_pkt_next
->len
) {
505 local_pkt_next
= local_pkt_next
->next
;
507 if (dst_offset
== pkt_next
->len
)
509 } while (!skb_queue_empty(&local_list
));
515 while ((pkt_next
= __skb_dequeue(&local_list
)) != NULL
)
516 brcmu_pkt_buf_free_skb(pkt_next
);
522 brcmf_sdcard_recv_buf(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
523 uint flags
, u8
*buf
, uint nbytes
)
525 struct sk_buff
*mypkt
;
528 mypkt
= brcmu_pkt_buf_get_skb(nbytes
);
530 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
535 err
= brcmf_sdcard_recv_pkt(sdiodev
, addr
, fn
, flags
, mypkt
);
537 memcpy(buf
, mypkt
->data
, nbytes
);
539 brcmu_pkt_buf_free_skb(mypkt
);
544 brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
545 uint flags
, struct sk_buff
*pkt
)
549 struct sk_buff_head pkt_list
;
551 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
554 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
555 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
559 skb_queue_head_init(&pkt_list
);
560 skb_queue_tail(&pkt_list
, pkt
);
561 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, &pkt_list
);
562 skb_dequeue_tail(&pkt_list
);
568 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
569 uint flags
, struct sk_buff_head
*pktq
)
575 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
576 fn
, addr
, pktq
->qlen
);
578 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
579 err
= brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
583 incr_fix
= (flags
& SDIO_REQ_FIXED
) ? SDIOH_DATA_FIX
: SDIOH_DATA_INC
;
584 err
= brcmf_sdio_buffrw(sdiodev
, fn
, false, addr
, pktq
);
591 brcmf_sdcard_send_buf(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
592 uint flags
, u8
*buf
, uint nbytes
)
594 struct sk_buff
*mypkt
;
595 struct sk_buff_head pktq
;
598 mypkt
= brcmu_pkt_buf_get_skb(nbytes
);
600 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
605 memcpy(mypkt
->data
, buf
, nbytes
);
606 __skb_queue_head_init(&pktq
);
607 __skb_queue_tail(&pktq
, mypkt
);
608 err
= brcmf_sdcard_send_pkt(sdiodev
, addr
, fn
, flags
, &pktq
);
609 __skb_dequeue_tail(&pktq
);
611 brcmu_pkt_buf_free_skb(mypkt
);
617 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev
*sdiodev
, u32 addr
, uint fn
,
618 uint flags
, struct sk_buff_head
*pktq
)
623 brcmf_dbg(SDIO
, "fun = %d, addr = 0x%x, size = %d\n",
624 fn
, addr
, pktq
->qlen
);
626 width
= (flags
& SDIO_REQ_4BYTE
) ? 4 : 2;
627 brcmf_sdio_addrprep(sdiodev
, width
, &addr
);
629 err
= brcmf_sdio_buffrw(sdiodev
, fn
, true, addr
, pktq
);
635 brcmf_sdio_ramrw(struct brcmf_sdio_dev
*sdiodev
, bool write
, u32 address
,
642 struct sk_buff_head pkt_list
;
644 dsize
= min_t(uint
, SBSDIO_SB_OFT_ADDR_LIMIT
, size
);
645 pkt
= dev_alloc_skb(dsize
);
647 brcmf_err("dev_alloc_skb failed: len %d\n", dsize
);
651 skb_queue_head_init(&pkt_list
);
653 /* Determine initial transfer parameters */
654 sdaddr
= address
& SBSDIO_SB_OFT_ADDR_MASK
;
655 if ((sdaddr
+ size
) & SBSDIO_SBWINDOW_MASK
)
656 dsize
= (SBSDIO_SB_OFT_ADDR_LIMIT
- sdaddr
);
660 sdio_claim_host(sdiodev
->func
[1]);
662 /* Do the transfer(s) */
664 /* Set the backplane window to include the start address */
665 bcmerror
= brcmf_sdcard_set_sbaddr_window(sdiodev
, address
);
669 brcmf_dbg(SDIO
, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
670 write
? "write" : "read", dsize
,
671 sdaddr
, address
& SBSDIO_SBWINDOW_MASK
);
673 sdaddr
&= SBSDIO_SB_OFT_ADDR_MASK
;
674 sdaddr
|= SBSDIO_SB_ACCESS_2_4B_FLAG
;
678 memcpy(pkt
->data
, data
, dsize
);
679 skb_queue_tail(&pkt_list
, pkt
);
680 bcmerror
= brcmf_sdio_buffrw(sdiodev
, SDIO_FUNC_1
, write
,
682 skb_dequeue_tail(&pkt_list
);
684 brcmf_err("membytes transfer failed\n");
688 memcpy(data
, pkt
->data
, dsize
);
689 skb_trim(pkt
, dsize
);
691 /* Adjust for next transfer (if any) */
697 dsize
= min_t(uint
, SBSDIO_SB_OFT_ADDR_LIMIT
, size
);
703 /* Return the window to backplane enumeration space for core access */
704 if (brcmf_sdcard_set_sbaddr_window(sdiodev
, sdiodev
->sbwad
))
705 brcmf_err("FAILED to set window back to 0x%x\n",
708 sdio_release_host(sdiodev
->func
[1]);
713 int brcmf_sdcard_abort(struct brcmf_sdio_dev
*sdiodev
, uint fn
)
715 char t_func
= (char)fn
;
716 brcmf_dbg(SDIO
, "Enter\n");
718 /* issue abort cmd52 command through F0 */
719 brcmf_sdioh_request_byte(sdiodev
, SDIOH_WRITE
, SDIO_FUNC_0
,
720 SDIO_CCCR_ABORT
, &t_func
);
722 brcmf_dbg(SDIO
, "Exit\n");
726 int brcmf_sdio_probe(struct brcmf_sdio_dev
*sdiodev
)
731 ret
= brcmf_sdioh_attach(sdiodev
);
737 /* try to attach to the target device */
738 sdiodev
->bus
= brcmf_sdbrcm_probe(regs
, sdiodev
);
740 brcmf_err("device attach failed\n");
747 brcmf_sdio_remove(sdiodev
);
751 EXPORT_SYMBOL(brcmf_sdio_probe
);
753 int brcmf_sdio_remove(struct brcmf_sdio_dev
*sdiodev
)
755 sdiodev
->bus_if
->state
= BRCMF_BUS_DOWN
;
758 brcmf_sdbrcm_disconnect(sdiodev
->bus
);
762 brcmf_sdioh_detach(sdiodev
);
768 EXPORT_SYMBOL(brcmf_sdio_remove
);
770 void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev
*sdiodev
, bool enable
)
773 brcmf_sdbrcm_wd_timer(sdiodev
->bus
, BRCMF_WD_POLL_MS
);
775 brcmf_sdbrcm_wd_timer(sdiodev
->bus
, 0);