Linux 4.19.133
[linux/fpc-iii.git] / drivers / net / wireless / broadcom / brcm80211 / brcmfmac / bcmsdh.c
blob710dc59c5d34ddb479f923feb38a5d988d9e230a
1 /*
2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 /* ****************** SDIO CARD Interface Functions **************************/
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
21 #include <linux/pci_ids.h>
22 #include <linux/sched.h>
23 #include <linux/completion.h>
24 #include <linux/interrupt.h>
25 #include <linux/scatterlist.h>
26 #include <linux/mmc/sdio.h>
27 #include <linux/mmc/core.h>
28 #include <linux/mmc/sdio_func.h>
29 #include <linux/mmc/card.h>
30 #include <linux/mmc/host.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/suspend.h>
33 #include <linux/errno.h>
34 #include <linux/module.h>
35 #include <linux/acpi.h>
36 #include <net/cfg80211.h>
38 #include <defs.h>
39 #include <brcm_hw_ids.h>
40 #include <brcmu_utils.h>
41 #include <brcmu_wifi.h>
42 #include <chipcommon.h>
43 #include <soc.h>
44 #include "chip.h"
45 #include "bus.h"
46 #include "debug.h"
47 #include "sdio.h"
48 #include "core.h"
49 #include "common.h"
51 #define SDIOH_API_ACCESS_RETRY_LIMIT 2
53 #define DMA_ALIGN_MASK 0x03
55 #define SDIO_FUNC1_BLOCKSIZE 64
56 #define SDIO_FUNC2_BLOCKSIZE 512
57 /* Maximum milliseconds to wait for F2 to come up */
58 #define SDIO_WAIT_F2RDY 3000
60 #define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
62 struct brcmf_sdiod_freezer {
63 atomic_t freezing;
64 atomic_t thread_count;
65 u32 frozen_count;
66 wait_queue_head_t thread_freeze;
67 struct completion resumed;
70 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
72 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
73 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
75 brcmf_dbg(INTR, "OOB intr triggered\n");
77 /* out-of-band interrupt is level-triggered which won't
78 * be cleared until dpc
80 if (sdiodev->irq_en) {
81 disable_irq_nosync(irq);
82 sdiodev->irq_en = false;
85 brcmf_sdio_isr(sdiodev->bus);
87 return IRQ_HANDLED;
90 static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
92 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
93 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
95 brcmf_dbg(INTR, "IB intr triggered\n");
97 brcmf_sdio_isr(sdiodev->bus);
100 /* dummy handler for SDIO function 2 interrupt */
101 static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func)
105 int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
107 struct brcmfmac_sdio_pd *pdata;
108 int ret = 0;
109 u8 data;
110 u32 addr, gpiocontrol;
112 pdata = &sdiodev->settings->bus.sdio;
113 if (pdata->oob_irq_supported) {
114 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
115 pdata->oob_irq_nr);
116 spin_lock_init(&sdiodev->irq_en_lock);
117 sdiodev->irq_en = true;
119 ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
120 pdata->oob_irq_flags, "brcmf_oob_intr",
121 &sdiodev->func1->dev);
122 if (ret != 0) {
123 brcmf_err("request_irq failed %d\n", ret);
124 return ret;
126 sdiodev->oob_irq_requested = true;
128 ret = enable_irq_wake(pdata->oob_irq_nr);
129 if (ret != 0) {
130 brcmf_err("enable_irq_wake failed %d\n", ret);
131 return ret;
133 sdiodev->irq_wake = true;
135 sdio_claim_host(sdiodev->func1);
137 if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
138 /* assign GPIO to SDIO core */
139 addr = CORE_CC_REG(SI_ENUM_BASE, gpiocontrol);
140 gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret);
141 gpiocontrol |= 0x2;
142 brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret);
144 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT,
145 0xf, &ret);
146 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret);
147 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret);
150 /* must configure SDIO_CCCR_IENx to enable irq */
151 data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
152 data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
153 SDIO_CCCR_IEN_FUNC0;
154 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
156 /* redirect, configure and enable io for interrupt signal */
157 data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE;
158 if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH)
159 data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
160 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
161 data, &ret);
162 sdio_release_host(sdiodev->func1);
163 } else {
164 brcmf_dbg(SDIO, "Entering\n");
165 sdio_claim_host(sdiodev->func1);
166 sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
167 sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
168 sdio_release_host(sdiodev->func1);
169 sdiodev->sd_irq_requested = true;
172 return 0;
175 void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
178 brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n",
179 sdiodev->oob_irq_requested,
180 sdiodev->sd_irq_requested);
182 if (sdiodev->oob_irq_requested) {
183 struct brcmfmac_sdio_pd *pdata;
185 pdata = &sdiodev->settings->bus.sdio;
186 sdio_claim_host(sdiodev->func1);
187 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
188 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
189 sdio_release_host(sdiodev->func1);
191 sdiodev->oob_irq_requested = false;
192 if (sdiodev->irq_wake) {
193 disable_irq_wake(pdata->oob_irq_nr);
194 sdiodev->irq_wake = false;
196 free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
197 sdiodev->irq_en = false;
198 sdiodev->oob_irq_requested = false;
201 if (sdiodev->sd_irq_requested) {
202 sdio_claim_host(sdiodev->func1);
203 sdio_release_irq(sdiodev->func2);
204 sdio_release_irq(sdiodev->func1);
205 sdio_release_host(sdiodev->func1);
206 sdiodev->sd_irq_requested = false;
210 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
211 enum brcmf_sdiod_state state)
213 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM ||
214 state == sdiodev->state)
215 return;
217 brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state);
218 switch (sdiodev->state) {
219 case BRCMF_SDIOD_DATA:
220 /* any other state means bus interface is down */
221 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
222 break;
223 case BRCMF_SDIOD_DOWN:
224 /* transition from DOWN to DATA means bus interface is up */
225 if (state == BRCMF_SDIOD_DATA)
226 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP);
227 break;
228 default:
229 break;
231 sdiodev->state = state;
234 static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev,
235 u32 addr)
237 u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK;
238 int err = 0, i;
240 if (bar0 == sdiodev->sbwad)
241 return 0;
243 v = bar0 >> 8;
245 for (i = 0 ; i < 3 && !err ; i++, v >>= 8)
246 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i,
247 v & 0xff, &err);
249 if (!err)
250 sdiodev->sbwad = bar0;
252 return err;
255 u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
257 u32 data = 0;
258 int retval;
260 retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
261 if (retval)
262 goto out;
264 addr &= SBSDIO_SB_OFT_ADDR_MASK;
265 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
267 data = sdio_readl(sdiodev->func1, addr, &retval);
269 out:
270 if (ret)
271 *ret = retval;
273 return data;
276 void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
277 u32 data, int *ret)
279 int retval;
281 retval = brcmf_sdiod_set_backplane_window(sdiodev, addr);
282 if (retval)
283 goto out;
285 addr &= SBSDIO_SB_OFT_ADDR_MASK;
286 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
288 sdio_writel(sdiodev->func1, data, addr, &retval);
290 out:
291 if (ret)
292 *ret = retval;
295 static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
296 struct sdio_func *func, u32 addr,
297 struct sk_buff *skb)
299 unsigned int req_sz;
300 int err;
302 /* Single skb use the standard mmc interface */
303 req_sz = skb->len + 3;
304 req_sz &= (uint)~3;
306 switch (func->num) {
307 case 1:
308 err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
309 req_sz);
310 break;
311 case 2:
312 err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
313 break;
314 default:
315 /* bail out as things are really fishy here */
316 WARN(1, "invalid sdio function number: %d\n", func->num);
317 err = -ENOMEDIUM;
320 if (err == -ENOMEDIUM)
321 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
323 return err;
326 static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
327 struct sdio_func *func, u32 addr,
328 struct sk_buff *skb)
330 unsigned int req_sz;
331 int err;
333 /* Single skb use the standard mmc interface */
334 req_sz = skb->len + 3;
335 req_sz &= (uint)~3;
337 err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
339 if (err == -ENOMEDIUM)
340 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
342 return err;
346 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access
347 * @sdiodev: brcmfmac sdio device
348 * @func: SDIO function
349 * @write: direction flag
350 * @addr: dongle memory address as source/destination
351 * @pkt: skb pointer
353 * This function takes the respbonsibility as the interface function to MMC
354 * stack for block data access. It assumes that the skb passed down by the
355 * caller has already been padded and aligned.
357 static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
358 struct sdio_func *func,
359 bool write, u32 addr,
360 struct sk_buff_head *pktlist)
362 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
363 unsigned int max_req_sz, orig_offset, dst_offset;
364 unsigned short max_seg_cnt, seg_sz;
365 unsigned char *pkt_data, *orig_data, *dst_data;
366 struct sk_buff *pkt_next = NULL, *local_pkt_next;
367 struct sk_buff_head local_list, *target_list;
368 struct mmc_request mmc_req;
369 struct mmc_command mmc_cmd;
370 struct mmc_data mmc_dat;
371 struct scatterlist *sgl;
372 int ret = 0;
374 if (!pktlist->qlen)
375 return -EINVAL;
377 target_list = pktlist;
378 /* for host with broken sg support, prepare a page aligned list */
379 __skb_queue_head_init(&local_list);
380 if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
381 req_sz = 0;
382 skb_queue_walk(pktlist, pkt_next)
383 req_sz += pkt_next->len;
384 req_sz = ALIGN(req_sz, func->cur_blksize);
385 while (req_sz > PAGE_SIZE) {
386 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
387 if (pkt_next == NULL) {
388 ret = -ENOMEM;
389 goto exit;
391 __skb_queue_tail(&local_list, pkt_next);
392 req_sz -= PAGE_SIZE;
394 pkt_next = brcmu_pkt_buf_get_skb(req_sz);
395 if (pkt_next == NULL) {
396 ret = -ENOMEM;
397 goto exit;
399 __skb_queue_tail(&local_list, pkt_next);
400 target_list = &local_list;
403 func_blk_sz = func->cur_blksize;
404 max_req_sz = sdiodev->max_request_size;
405 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
406 target_list->qlen);
407 seg_sz = target_list->qlen;
408 pkt_offset = 0;
409 pkt_next = target_list->next;
411 memset(&mmc_req, 0, sizeof(struct mmc_request));
412 memset(&mmc_cmd, 0, sizeof(struct mmc_command));
413 memset(&mmc_dat, 0, sizeof(struct mmc_data));
415 mmc_dat.sg = sdiodev->sgtable.sgl;
416 mmc_dat.blksz = func_blk_sz;
417 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
418 mmc_cmd.opcode = SD_IO_RW_EXTENDED;
419 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
420 mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
421 mmc_cmd.arg |= 1 << 27; /* block mode */
422 /* for function 1 the addr will be incremented */
423 mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
424 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
425 mmc_req.cmd = &mmc_cmd;
426 mmc_req.data = &mmc_dat;
428 while (seg_sz) {
429 req_sz = 0;
430 sg_cnt = 0;
431 sgl = sdiodev->sgtable.sgl;
432 /* prep sg table */
433 while (pkt_next != (struct sk_buff *)target_list) {
434 pkt_data = pkt_next->data + pkt_offset;
435 sg_data_sz = pkt_next->len - pkt_offset;
436 if (sg_data_sz > sdiodev->max_segment_size)
437 sg_data_sz = sdiodev->max_segment_size;
438 if (sg_data_sz > max_req_sz - req_sz)
439 sg_data_sz = max_req_sz - req_sz;
441 sg_set_buf(sgl, pkt_data, sg_data_sz);
443 sg_cnt++;
444 sgl = sg_next(sgl);
445 req_sz += sg_data_sz;
446 pkt_offset += sg_data_sz;
447 if (pkt_offset == pkt_next->len) {
448 pkt_offset = 0;
449 pkt_next = pkt_next->next;
452 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
453 break;
455 seg_sz -= sg_cnt;
457 if (req_sz % func_blk_sz != 0) {
458 brcmf_err("sg request length %u is not %u aligned\n",
459 req_sz, func_blk_sz);
460 ret = -ENOTBLK;
461 goto exit;
464 mmc_dat.sg_len = sg_cnt;
465 mmc_dat.blocks = req_sz / func_blk_sz;
466 mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
467 mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
468 /* incrementing addr for function 1 */
469 if (func->num == 1)
470 addr += req_sz;
472 mmc_set_data_timeout(&mmc_dat, func->card);
473 mmc_wait_for_req(func->card->host, &mmc_req);
475 ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
476 if (ret == -ENOMEDIUM) {
477 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
478 break;
479 } else if (ret != 0) {
480 brcmf_err("CMD53 sg block %s failed %d\n",
481 write ? "write" : "read", ret);
482 ret = -EIO;
483 break;
487 if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
488 local_pkt_next = local_list.next;
489 orig_offset = 0;
490 skb_queue_walk(pktlist, pkt_next) {
491 dst_offset = 0;
492 do {
493 req_sz = local_pkt_next->len - orig_offset;
494 req_sz = min_t(uint, pkt_next->len - dst_offset,
495 req_sz);
496 orig_data = local_pkt_next->data + orig_offset;
497 dst_data = pkt_next->data + dst_offset;
498 memcpy(dst_data, orig_data, req_sz);
499 orig_offset += req_sz;
500 dst_offset += req_sz;
501 if (orig_offset == local_pkt_next->len) {
502 orig_offset = 0;
503 local_pkt_next = local_pkt_next->next;
505 if (dst_offset == pkt_next->len)
506 break;
507 } while (!skb_queue_empty(&local_list));
511 exit:
512 sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
513 while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
514 brcmu_pkt_buf_free_skb(pkt_next);
516 return ret;
519 int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
521 struct sk_buff *mypkt;
522 int err;
524 mypkt = brcmu_pkt_buf_get_skb(nbytes);
525 if (!mypkt) {
526 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
527 nbytes);
528 return -EIO;
531 err = brcmf_sdiod_recv_pkt(sdiodev, mypkt);
532 if (!err)
533 memcpy(buf, mypkt->data, nbytes);
535 brcmu_pkt_buf_free_skb(mypkt);
536 return err;
539 int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
541 u32 addr = sdiodev->cc_core->base;
542 int err = 0;
544 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
546 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
547 if (err)
548 goto done;
550 addr &= SBSDIO_SB_OFT_ADDR_MASK;
551 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
553 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
555 done:
556 return err;
559 int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
560 struct sk_buff_head *pktq, uint totlen)
562 struct sk_buff *glom_skb = NULL;
563 struct sk_buff *skb;
564 u32 addr = sdiodev->cc_core->base;
565 int err = 0;
567 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
568 addr, pktq->qlen);
570 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
571 if (err)
572 goto done;
574 addr &= SBSDIO_SB_OFT_ADDR_MASK;
575 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
577 if (pktq->qlen == 1)
578 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
579 pktq->next);
580 else if (!sdiodev->sg_support) {
581 glom_skb = brcmu_pkt_buf_get_skb(totlen);
582 if (!glom_skb)
583 return -ENOMEM;
584 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
585 glom_skb);
586 if (err)
587 goto done;
589 skb_queue_walk(pktq, skb) {
590 memcpy(skb->data, glom_skb->data, skb->len);
591 skb_pull(glom_skb, skb->len);
593 } else
594 err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
595 addr, pktq);
597 done:
598 brcmu_pkt_buf_free_skb(glom_skb);
599 return err;
602 int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
604 struct sk_buff *mypkt;
605 u32 addr = sdiodev->cc_core->base;
606 int err;
608 mypkt = brcmu_pkt_buf_get_skb(nbytes);
610 if (!mypkt) {
611 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
612 nbytes);
613 return -EIO;
616 memcpy(mypkt->data, buf, nbytes);
618 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
619 if (err)
620 goto out;
622 addr &= SBSDIO_SB_OFT_ADDR_MASK;
623 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
625 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
626 out:
627 brcmu_pkt_buf_free_skb(mypkt);
629 return err;
632 int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
633 struct sk_buff_head *pktq)
635 struct sk_buff *skb;
636 u32 addr = sdiodev->cc_core->base;
637 int err;
639 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
641 err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
642 if (err)
643 return err;
645 addr &= SBSDIO_SB_OFT_ADDR_MASK;
646 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
648 if (pktq->qlen == 1 || !sdiodev->sg_support) {
649 skb_queue_walk(pktq, skb) {
650 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
651 addr, skb);
652 if (err)
653 break;
655 } else {
656 err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
657 addr, pktq);
660 return err;
664 brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
665 u8 *data, uint size)
667 int err = 0;
668 struct sk_buff *pkt;
669 u32 sdaddr;
670 uint dsize;
672 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
673 pkt = dev_alloc_skb(dsize);
674 if (!pkt) {
675 brcmf_err("dev_alloc_skb failed: len %d\n", dsize);
676 return -EIO;
678 pkt->priority = 0;
680 /* Determine initial transfer parameters */
681 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
682 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK)
683 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr);
684 else
685 dsize = size;
687 sdio_claim_host(sdiodev->func1);
689 /* Do the transfer(s) */
690 while (size) {
691 /* Set the backplane window to include the start address */
692 err = brcmf_sdiod_set_backplane_window(sdiodev, address);
693 if (err)
694 break;
696 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n",
697 write ? "write" : "read", dsize,
698 sdaddr, address & SBSDIO_SBWINDOW_MASK);
700 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK;
701 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
703 skb_put(pkt, dsize);
705 if (write) {
706 memcpy(pkt->data, data, dsize);
707 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
708 sdaddr, pkt);
709 } else {
710 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
711 sdaddr, pkt);
714 if (err) {
715 brcmf_err("membytes transfer failed\n");
716 break;
718 if (!write)
719 memcpy(data, pkt->data, dsize);
720 skb_trim(pkt, 0);
722 /* Adjust for next transfer (if any) */
723 size -= dsize;
724 if (size) {
725 data += dsize;
726 address += dsize;
727 sdaddr = 0;
728 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
732 dev_kfree_skb(pkt);
734 sdio_release_host(sdiodev->func1);
736 return err;
739 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
741 brcmf_dbg(SDIO, "Enter\n");
743 /* Issue abort cmd52 command through F0 */
744 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
746 brcmf_dbg(SDIO, "Exit\n");
747 return 0;
750 void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
752 struct sdio_func *func;
753 struct mmc_host *host;
754 uint max_blocks;
755 uint nents;
756 int err;
758 func = sdiodev->func2;
759 host = func->card->host;
760 sdiodev->sg_support = host->max_segs > 1;
761 max_blocks = min_t(uint, host->max_blk_count, 511u);
762 sdiodev->max_request_size = min_t(uint, host->max_req_size,
763 max_blocks * func->cur_blksize);
764 sdiodev->max_segment_count = min_t(uint, host->max_segs,
765 SG_MAX_SINGLE_ALLOC);
766 sdiodev->max_segment_size = host->max_seg_size;
768 if (!sdiodev->sg_support)
769 return;
771 nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE,
772 sdiodev->settings->bus.sdio.txglomsz);
773 nents += (nents >> 4) + 1;
775 WARN_ON(nents > sdiodev->max_segment_count);
777 brcmf_dbg(TRACE, "nents=%d\n", nents);
778 err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
779 if (err < 0) {
780 brcmf_err("allocation failed: disable scatter-gather");
781 sdiodev->sg_support = false;
784 sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz;
787 #ifdef CONFIG_PM_SLEEP
788 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
790 sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL);
791 if (!sdiodev->freezer)
792 return -ENOMEM;
793 atomic_set(&sdiodev->freezer->thread_count, 0);
794 atomic_set(&sdiodev->freezer->freezing, 0);
795 init_waitqueue_head(&sdiodev->freezer->thread_freeze);
796 init_completion(&sdiodev->freezer->resumed);
797 return 0;
800 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
802 if (sdiodev->freezer) {
803 WARN_ON(atomic_read(&sdiodev->freezer->freezing));
804 kfree(sdiodev->freezer);
808 static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
810 atomic_t *expect = &sdiodev->freezer->thread_count;
811 int res = 0;
813 sdiodev->freezer->frozen_count = 0;
814 reinit_completion(&sdiodev->freezer->resumed);
815 atomic_set(&sdiodev->freezer->freezing, 1);
816 brcmf_sdio_trigger_dpc(sdiodev->bus);
817 wait_event(sdiodev->freezer->thread_freeze,
818 atomic_read(expect) == sdiodev->freezer->frozen_count);
819 sdio_claim_host(sdiodev->func1);
820 res = brcmf_sdio_sleep(sdiodev->bus, true);
821 sdio_release_host(sdiodev->func1);
822 return res;
825 static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
827 sdio_claim_host(sdiodev->func1);
828 brcmf_sdio_sleep(sdiodev->bus, false);
829 sdio_release_host(sdiodev->func1);
830 atomic_set(&sdiodev->freezer->freezing, 0);
831 complete_all(&sdiodev->freezer->resumed);
834 bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev)
836 return atomic_read(&sdiodev->freezer->freezing);
839 void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev)
841 if (!brcmf_sdiod_freezing(sdiodev))
842 return;
843 sdiodev->freezer->frozen_count++;
844 wake_up(&sdiodev->freezer->thread_freeze);
845 wait_for_completion(&sdiodev->freezer->resumed);
848 void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev)
850 atomic_inc(&sdiodev->freezer->thread_count);
853 void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
855 atomic_dec(&sdiodev->freezer->thread_count);
857 #else
858 static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev)
860 return 0;
863 static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
866 #endif /* CONFIG_PM_SLEEP */
868 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
870 sdiodev->state = BRCMF_SDIOD_DOWN;
871 if (sdiodev->bus) {
872 brcmf_sdio_remove(sdiodev->bus);
873 sdiodev->bus = NULL;
876 brcmf_sdiod_freezer_detach(sdiodev);
878 /* Disable Function 2 */
879 sdio_claim_host(sdiodev->func2);
880 sdio_disable_func(sdiodev->func2);
881 sdio_release_host(sdiodev->func2);
883 /* Disable Function 1 */
884 sdio_claim_host(sdiodev->func1);
885 sdio_disable_func(sdiodev->func1);
886 sdio_release_host(sdiodev->func1);
888 sg_free_table(&sdiodev->sgtable);
889 sdiodev->sbwad = 0;
891 pm_runtime_allow(sdiodev->func1->card->host->parent);
892 return 0;
895 static void brcmf_sdiod_host_fixup(struct mmc_host *host)
897 /* runtime-pm powers off the device */
898 pm_runtime_forbid(host->parent);
899 /* avoid removal detection upon resume */
900 host->caps |= MMC_CAP_NONREMOVABLE;
903 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
905 int ret = 0;
907 sdio_claim_host(sdiodev->func1);
909 ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
910 if (ret) {
911 brcmf_err("Failed to set F1 blocksize\n");
912 sdio_release_host(sdiodev->func1);
913 goto out;
915 ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
916 if (ret) {
917 brcmf_err("Failed to set F2 blocksize\n");
918 sdio_release_host(sdiodev->func1);
919 goto out;
922 /* increase F2 timeout */
923 sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
925 /* Enable Function 1 */
926 ret = sdio_enable_func(sdiodev->func1);
927 sdio_release_host(sdiodev->func1);
928 if (ret) {
929 brcmf_err("Failed to enable F1: err=%d\n", ret);
930 goto out;
933 ret = brcmf_sdiod_freezer_attach(sdiodev);
934 if (ret)
935 goto out;
937 /* try to attach to the target device */
938 sdiodev->bus = brcmf_sdio_probe(sdiodev);
939 if (!sdiodev->bus) {
940 ret = -ENODEV;
941 goto out;
943 brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
944 out:
945 if (ret)
946 brcmf_sdiod_remove(sdiodev);
948 return ret;
951 #define BRCMF_SDIO_DEVICE(dev_id) \
952 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)}
954 /* devices we support, null terminated */
955 static const struct sdio_device_id brcmf_sdmmc_ids[] = {
956 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143),
957 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241),
958 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329),
959 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330),
960 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334),
961 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
962 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
963 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
964 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
965 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
966 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
967 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
968 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
969 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
970 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
971 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
972 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
973 { /* end: all zeroes */ }
975 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
978 static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
979 int val)
981 #if IS_ENABLED(CONFIG_ACPI)
982 struct acpi_device *adev;
984 adev = ACPI_COMPANION(dev);
985 if (adev)
986 adev->flags.power_manageable = 0;
987 #endif
990 static int brcmf_ops_sdio_probe(struct sdio_func *func,
991 const struct sdio_device_id *id)
993 int err;
994 struct brcmf_sdio_dev *sdiodev;
995 struct brcmf_bus *bus_if;
996 struct device *dev;
998 brcmf_dbg(SDIO, "Enter\n");
999 brcmf_dbg(SDIO, "Class=%x\n", func->class);
1000 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1001 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1002 brcmf_dbg(SDIO, "Function#: %d\n", func->num);
1004 dev = &func->dev;
1006 /* Set MMC_QUIRK_LENIENT_FN0 for this card */
1007 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
1009 /* prohibit ACPI power management for this device */
1010 brcmf_sdiod_acpi_set_power_manageable(dev, 0);
1012 /* Consume func num 1 but dont do anything with it. */
1013 if (func->num == 1)
1014 return 0;
1016 /* Ignore anything but func 2 */
1017 if (func->num != 2)
1018 return -ENODEV;
1020 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
1021 if (!bus_if)
1022 return -ENOMEM;
1023 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
1024 if (!sdiodev) {
1025 kfree(bus_if);
1026 return -ENOMEM;
1029 /* store refs to functions used. mmc_card does
1030 * not hold the F0 function pointer.
1032 sdiodev->func1 = func->card->sdio_func[0];
1033 sdiodev->func2 = func;
1035 sdiodev->bus_if = bus_if;
1036 bus_if->bus_priv.sdio = sdiodev;
1037 bus_if->proto_type = BRCMF_PROTO_BCDC;
1038 dev_set_drvdata(&func->dev, bus_if);
1039 dev_set_drvdata(&sdiodev->func1->dev, bus_if);
1040 sdiodev->dev = &sdiodev->func1->dev;
1042 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
1044 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n");
1045 err = brcmf_sdiod_probe(sdiodev);
1046 if (err) {
1047 brcmf_err("F2 error, probe failed %d...\n", err);
1048 goto fail;
1051 brcmf_dbg(SDIO, "F2 init completed...\n");
1052 return 0;
1054 fail:
1055 dev_set_drvdata(&func->dev, NULL);
1056 dev_set_drvdata(&sdiodev->func1->dev, NULL);
1057 kfree(sdiodev);
1058 kfree(bus_if);
1059 return err;
1062 static void brcmf_ops_sdio_remove(struct sdio_func *func)
1064 struct brcmf_bus *bus_if;
1065 struct brcmf_sdio_dev *sdiodev;
1067 brcmf_dbg(SDIO, "Enter\n");
1068 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
1069 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
1070 brcmf_dbg(SDIO, "Function: %d\n", func->num);
1072 bus_if = dev_get_drvdata(&func->dev);
1073 if (bus_if) {
1074 sdiodev = bus_if->bus_priv.sdio;
1076 /* start by unregistering irqs */
1077 brcmf_sdiod_intr_unregister(sdiodev);
1079 if (func->num != 1)
1080 return;
1082 /* only proceed with rest of cleanup if func 1 */
1083 brcmf_sdiod_remove(sdiodev);
1085 dev_set_drvdata(&sdiodev->func1->dev, NULL);
1086 dev_set_drvdata(&sdiodev->func2->dev, NULL);
1088 kfree(bus_if);
1089 kfree(sdiodev);
1092 brcmf_dbg(SDIO, "Exit\n");
1095 void brcmf_sdio_wowl_config(struct device *dev, bool enabled)
1097 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1098 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1100 brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled);
1101 sdiodev->wowl_enabled = enabled;
1104 #ifdef CONFIG_PM_SLEEP
1105 static int brcmf_ops_sdio_suspend(struct device *dev)
1107 struct sdio_func *func;
1108 struct brcmf_bus *bus_if;
1109 struct brcmf_sdio_dev *sdiodev;
1110 mmc_pm_flag_t sdio_flags;
1112 func = container_of(dev, struct sdio_func, dev);
1113 brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1114 if (func->num != 1)
1115 return 0;
1118 bus_if = dev_get_drvdata(dev);
1119 sdiodev = bus_if->bus_priv.sdio;
1121 brcmf_sdiod_freezer_on(sdiodev);
1122 brcmf_sdio_wd_timer(sdiodev->bus, 0);
1124 sdio_flags = MMC_PM_KEEP_POWER;
1125 if (sdiodev->wowl_enabled) {
1126 if (sdiodev->settings->bus.sdio.oob_irq_supported)
1127 enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
1128 else
1129 sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
1131 if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
1132 brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
1133 return 0;
1136 static int brcmf_ops_sdio_resume(struct device *dev)
1138 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1139 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
1140 struct sdio_func *func = container_of(dev, struct sdio_func, dev);
1142 brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
1143 if (func->num != 2)
1144 return 0;
1146 brcmf_sdiod_freezer_off(sdiodev);
1147 return 0;
1150 static const struct dev_pm_ops brcmf_sdio_pm_ops = {
1151 .suspend = brcmf_ops_sdio_suspend,
1152 .resume = brcmf_ops_sdio_resume,
1154 #endif /* CONFIG_PM_SLEEP */
1156 static struct sdio_driver brcmf_sdmmc_driver = {
1157 .probe = brcmf_ops_sdio_probe,
1158 .remove = brcmf_ops_sdio_remove,
1159 .name = KBUILD_MODNAME,
1160 .id_table = brcmf_sdmmc_ids,
1161 .drv = {
1162 .owner = THIS_MODULE,
1163 #ifdef CONFIG_PM_SLEEP
1164 .pm = &brcmf_sdio_pm_ops,
1165 #endif /* CONFIG_PM_SLEEP */
1166 .coredump = brcmf_dev_coredump,
1170 void brcmf_sdio_register(void)
1172 int ret;
1174 ret = sdio_register_driver(&brcmf_sdmmc_driver);
1175 if (ret)
1176 brcmf_err("sdio_register_driver failed: %d\n", ret);
1179 void brcmf_sdio_exit(void)
1181 brcmf_dbg(SDIO, "Enter\n");
1183 sdio_unregister_driver(&brcmf_sdmmc_driver);