2 * SD/MMC Greybus driver.
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/kernel.h>
11 #include <linux/mmc/core.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/scatterlist.h>
15 #include <linux/workqueue.h>
21 struct gb_connection
*connection
;
22 struct gbphy_device
*gbphy_dev
;
24 struct mmc_request
*mrq
;
25 struct mutex lock
; /* lock for this host */
27 spinlock_t xfer
; /* lock to cancel ongoing transfer */
29 struct workqueue_struct
*mrq_workqueue
;
30 struct work_struct mrqwork
;
38 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
40 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
41 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
44 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
46 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
47 #define GB_SDIO_VDD_SHIFT 8
49 #ifndef MMC_CAP2_CORE_RUNTIME_PM
50 #define MMC_CAP2_CORE_RUNTIME_PM 0
53 static inline bool single_op(struct mmc_command
*cmd
)
55 uint32_t opcode
= cmd
->opcode
;
57 return opcode
== MMC_WRITE_BLOCK
||
58 opcode
== MMC_READ_SINGLE_BLOCK
;
61 static void _gb_sdio_set_host_caps(struct gb_sdio_host
*host
, u32 r
)
66 caps
= ((r
& GB_SDIO_CAP_NONREMOVABLE
) ? MMC_CAP_NONREMOVABLE
: 0) |
67 ((r
& GB_SDIO_CAP_4_BIT_DATA
) ? MMC_CAP_4_BIT_DATA
: 0) |
68 ((r
& GB_SDIO_CAP_8_BIT_DATA
) ? MMC_CAP_8_BIT_DATA
: 0) |
69 ((r
& GB_SDIO_CAP_MMC_HS
) ? MMC_CAP_MMC_HIGHSPEED
: 0) |
70 ((r
& GB_SDIO_CAP_SD_HS
) ? MMC_CAP_SD_HIGHSPEED
: 0) |
71 ((r
& GB_SDIO_CAP_ERASE
) ? MMC_CAP_ERASE
: 0) |
72 ((r
& GB_SDIO_CAP_1_2V_DDR
) ? MMC_CAP_1_2V_DDR
: 0) |
73 ((r
& GB_SDIO_CAP_1_8V_DDR
) ? MMC_CAP_1_8V_DDR
: 0) |
74 ((r
& GB_SDIO_CAP_POWER_OFF_CARD
) ? MMC_CAP_POWER_OFF_CARD
: 0) |
75 ((r
& GB_SDIO_CAP_UHS_SDR12
) ? MMC_CAP_UHS_SDR12
: 0) |
76 ((r
& GB_SDIO_CAP_UHS_SDR25
) ? MMC_CAP_UHS_SDR25
: 0) |
77 ((r
& GB_SDIO_CAP_UHS_SDR50
) ? MMC_CAP_UHS_SDR50
: 0) |
78 ((r
& GB_SDIO_CAP_UHS_SDR104
) ? MMC_CAP_UHS_SDR104
: 0) |
79 ((r
& GB_SDIO_CAP_UHS_DDR50
) ? MMC_CAP_UHS_DDR50
: 0) |
80 ((r
& GB_SDIO_CAP_DRIVER_TYPE_A
) ? MMC_CAP_DRIVER_TYPE_A
: 0) |
81 ((r
& GB_SDIO_CAP_DRIVER_TYPE_C
) ? MMC_CAP_DRIVER_TYPE_C
: 0) |
82 ((r
& GB_SDIO_CAP_DRIVER_TYPE_D
) ? MMC_CAP_DRIVER_TYPE_D
: 0);
84 caps2
= ((r
& GB_SDIO_CAP_HS200_1_2V
) ? MMC_CAP2_HS200_1_2V_SDR
: 0) |
85 ((r
& GB_SDIO_CAP_HS400_1_2V
) ? MMC_CAP2_HS400_1_2V
: 0) |
86 ((r
& GB_SDIO_CAP_HS400_1_8V
) ? MMC_CAP2_HS400_1_8V
: 0) |
87 ((r
& GB_SDIO_CAP_HS200_1_8V
) ? MMC_CAP2_HS200_1_8V_SDR
: 0);
89 host
->mmc
->caps
= caps
;
90 host
->mmc
->caps2
= caps2
| MMC_CAP2_CORE_RUNTIME_PM
;
92 if (caps
& MMC_CAP_NONREMOVABLE
)
93 host
->card_present
= true;
96 static u32
_gb_sdio_get_host_ocr(u32 ocr
)
98 return (((ocr
& GB_SDIO_VDD_165_195
) ? MMC_VDD_165_195
: 0) |
99 ((ocr
& GB_SDIO_VDD_20_21
) ? MMC_VDD_20_21
: 0) |
100 ((ocr
& GB_SDIO_VDD_21_22
) ? MMC_VDD_21_22
: 0) |
101 ((ocr
& GB_SDIO_VDD_22_23
) ? MMC_VDD_22_23
: 0) |
102 ((ocr
& GB_SDIO_VDD_23_24
) ? MMC_VDD_23_24
: 0) |
103 ((ocr
& GB_SDIO_VDD_24_25
) ? MMC_VDD_24_25
: 0) |
104 ((ocr
& GB_SDIO_VDD_25_26
) ? MMC_VDD_25_26
: 0) |
105 ((ocr
& GB_SDIO_VDD_26_27
) ? MMC_VDD_26_27
: 0) |
106 ((ocr
& GB_SDIO_VDD_27_28
) ? MMC_VDD_27_28
: 0) |
107 ((ocr
& GB_SDIO_VDD_28_29
) ? MMC_VDD_28_29
: 0) |
108 ((ocr
& GB_SDIO_VDD_29_30
) ? MMC_VDD_29_30
: 0) |
109 ((ocr
& GB_SDIO_VDD_30_31
) ? MMC_VDD_30_31
: 0) |
110 ((ocr
& GB_SDIO_VDD_31_32
) ? MMC_VDD_31_32
: 0) |
111 ((ocr
& GB_SDIO_VDD_32_33
) ? MMC_VDD_32_33
: 0) |
112 ((ocr
& GB_SDIO_VDD_33_34
) ? MMC_VDD_33_34
: 0) |
113 ((ocr
& GB_SDIO_VDD_34_35
) ? MMC_VDD_34_35
: 0) |
114 ((ocr
& GB_SDIO_VDD_35_36
) ? MMC_VDD_35_36
: 0)
118 static int gb_sdio_get_caps(struct gb_sdio_host
*host
)
120 struct gb_sdio_get_caps_response response
;
121 struct mmc_host
*mmc
= host
->mmc
;
128 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_GET_CAPABILITIES
,
129 NULL
, 0, &response
, sizeof(response
));
132 r
= le32_to_cpu(response
.caps
);
134 _gb_sdio_set_host_caps(host
, r
);
136 /* get the max block size that could fit our payload */
137 data_max
= gb_operation_get_payload_size_max(host
->connection
);
138 data_max
= min(data_max
- sizeof(struct gb_sdio_transfer_request
),
139 data_max
- sizeof(struct gb_sdio_transfer_response
));
141 blksz
= min_t(u16
, le16_to_cpu(response
.max_blk_size
), data_max
);
142 blksz
= max_t(u32
, 512, blksz
);
144 mmc
->max_blk_size
= rounddown_pow_of_two(blksz
);
145 mmc
->max_blk_count
= le16_to_cpu(response
.max_blk_count
);
146 host
->data_max
= data_max
;
148 /* get ocr supported values */
149 ocr
= _gb_sdio_get_host_ocr(le32_to_cpu(response
.ocr
));
150 mmc
->ocr_avail
= ocr
;
151 mmc
->ocr_avail_sdio
= mmc
->ocr_avail
;
152 mmc
->ocr_avail_sd
= mmc
->ocr_avail
;
153 mmc
->ocr_avail_mmc
= mmc
->ocr_avail
;
155 /* get frequency range values */
156 mmc
->f_min
= le32_to_cpu(response
.f_min
);
157 mmc
->f_max
= le32_to_cpu(response
.f_max
);
162 static void _gb_queue_event(struct gb_sdio_host
*host
, u8 event
)
164 if (event
& GB_SDIO_CARD_INSERTED
)
165 host
->queued_events
&= ~GB_SDIO_CARD_REMOVED
;
166 else if (event
& GB_SDIO_CARD_REMOVED
)
167 host
->queued_events
&= ~GB_SDIO_CARD_INSERTED
;
169 host
->queued_events
|= event
;
172 static int _gb_sdio_process_events(struct gb_sdio_host
*host
, u8 event
)
174 u8 state_changed
= 0;
176 if (event
& GB_SDIO_CARD_INSERTED
) {
177 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
179 if (host
->card_present
)
181 host
->card_present
= true;
185 if (event
& GB_SDIO_CARD_REMOVED
) {
186 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
188 if (!(host
->card_present
))
190 host
->card_present
= false;
194 if (event
& GB_SDIO_WP
) {
195 host
->read_only
= true;
199 dev_info(mmc_dev(host
->mmc
), "card %s now event\n",
200 (host
->card_present
? "inserted" : "removed"));
201 mmc_detect_change(host
->mmc
, 0);
207 static int gb_sdio_request_handler(struct gb_operation
*op
)
209 struct gb_sdio_host
*host
= gb_connection_get_data(op
->connection
);
210 struct gb_message
*request
;
211 struct gb_sdio_event_request
*payload
;
216 if (type
!= GB_SDIO_TYPE_EVENT
) {
217 dev_err(mmc_dev(host
->mmc
),
218 "unsupported unsolicited event: %u\n", type
);
222 request
= op
->request
;
224 if (request
->payload_size
< sizeof(*payload
)) {
225 dev_err(mmc_dev(host
->mmc
), "wrong event size received (%zu < %zu)\n",
226 request
->payload_size
, sizeof(*payload
));
230 payload
= request
->payload
;
231 event
= payload
->event
;
234 _gb_queue_event(host
, event
);
236 ret
= _gb_sdio_process_events(host
, event
);
241 static int gb_sdio_set_ios(struct gb_sdio_host
*host
,
242 struct gb_sdio_set_ios_request
*request
)
246 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
250 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_SET_IOS
, request
,
251 sizeof(*request
), NULL
, 0);
253 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
258 static int _gb_sdio_send(struct gb_sdio_host
*host
, struct mmc_data
*data
,
259 size_t len
, u16 nblocks
, off_t skip
)
261 struct gb_sdio_transfer_request
*request
;
262 struct gb_sdio_transfer_response
*response
;
263 struct gb_operation
*operation
;
264 struct scatterlist
*sg
= data
->sg
;
265 unsigned int sg_len
= data
->sg_len
;
271 WARN_ON(len
> host
->data_max
);
273 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
274 len
+ sizeof(*request
),
275 sizeof(*response
), GFP_KERNEL
);
279 request
= operation
->request
->payload
;
280 request
->data_flags
= (data
->flags
>> 8);
281 request
->data_blocks
= cpu_to_le16(nblocks
);
282 request
->data_blksz
= cpu_to_le16(data
->blksz
);
284 copied
= sg_pcopy_to_buffer(sg
, sg_len
, &request
->data
[0], len
, skip
);
288 goto err_put_operation
;
291 ret
= gb_operation_request_send_sync(operation
);
293 goto err_put_operation
;
295 response
= operation
->response
->payload
;
297 send_blocks
= le16_to_cpu(response
->data_blocks
);
298 send_blksz
= le16_to_cpu(response
->data_blksz
);
300 if (len
!= send_blksz
* send_blocks
) {
301 dev_err(mmc_dev(host
->mmc
), "send: size received: %zu != %d\n",
302 len
, send_blksz
* send_blocks
);
307 gb_operation_put(operation
);
312 static int _gb_sdio_recv(struct gb_sdio_host
*host
, struct mmc_data
*data
,
313 size_t len
, u16 nblocks
, off_t skip
)
315 struct gb_sdio_transfer_request
*request
;
316 struct gb_sdio_transfer_response
*response
;
317 struct gb_operation
*operation
;
318 struct scatterlist
*sg
= data
->sg
;
319 unsigned int sg_len
= data
->sg_len
;
325 WARN_ON(len
> host
->data_max
);
327 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
329 len
+ sizeof(*response
), GFP_KERNEL
);
333 request
= operation
->request
->payload
;
334 request
->data_flags
= (data
->flags
>> 8);
335 request
->data_blocks
= cpu_to_le16(nblocks
);
336 request
->data_blksz
= cpu_to_le16(data
->blksz
);
338 ret
= gb_operation_request_send_sync(operation
);
340 goto err_put_operation
;
342 response
= operation
->response
->payload
;
343 recv_blocks
= le16_to_cpu(response
->data_blocks
);
344 recv_blksz
= le16_to_cpu(response
->data_blksz
);
346 if (len
!= recv_blksz
* recv_blocks
) {
347 dev_err(mmc_dev(host
->mmc
), "recv: size received: %d != %zu\n",
348 recv_blksz
* recv_blocks
, len
);
350 goto err_put_operation
;
353 copied
= sg_pcopy_from_buffer(sg
, sg_len
, &response
->data
[0], len
,
359 gb_operation_put(operation
);
364 static int gb_sdio_transfer(struct gb_sdio_host
*host
, struct mmc_data
*data
)
371 if (single_op(data
->mrq
->cmd
) && data
->blocks
> 1) {
376 left
= data
->blksz
* data
->blocks
;
379 /* check is a stop transmission is pending */
380 spin_lock(&host
->xfer
);
381 if (host
->xfer_stop
) {
382 host
->xfer_stop
= false;
383 spin_unlock(&host
->xfer
);
387 spin_unlock(&host
->xfer
);
388 len
= min(left
, host
->data_max
);
389 nblocks
= len
/ data
->blksz
;
390 len
= nblocks
* data
->blksz
;
392 if (data
->flags
& MMC_DATA_READ
) {
393 ret
= _gb_sdio_recv(host
, data
, len
, nblocks
, skip
);
397 ret
= _gb_sdio_send(host
, data
, len
, nblocks
, skip
);
401 data
->bytes_xfered
+= len
;
411 static int gb_sdio_command(struct gb_sdio_host
*host
, struct mmc_command
*cmd
)
413 struct gb_sdio_command_request request
= {0};
414 struct gb_sdio_command_response response
;
415 struct mmc_data
*data
= host
->mrq
->data
;
416 unsigned int timeout_ms
;
422 switch (mmc_resp_type(cmd
)) {
424 cmd_flags
= GB_SDIO_RSP_NONE
;
427 cmd_flags
= GB_SDIO_RSP_R1_R5_R6_R7
;
430 cmd_flags
= GB_SDIO_RSP_R1B
;
433 cmd_flags
= GB_SDIO_RSP_R2
;
436 cmd_flags
= GB_SDIO_RSP_R3_R4
;
439 dev_err(mmc_dev(host
->mmc
), "cmd flag invalid 0x%04x\n",
445 switch (mmc_cmd_type(cmd
)) {
447 cmd_type
= GB_SDIO_CMD_BC
;
450 cmd_type
= GB_SDIO_CMD_BCR
;
453 cmd_type
= GB_SDIO_CMD_AC
;
456 cmd_type
= GB_SDIO_CMD_ADTC
;
459 dev_err(mmc_dev(host
->mmc
), "cmd type invalid 0x%04x\n",
465 request
.cmd
= cmd
->opcode
;
466 request
.cmd_flags
= cmd_flags
;
467 request
.cmd_type
= cmd_type
;
468 request
.cmd_arg
= cpu_to_le32(cmd
->arg
);
469 /* some controllers need to know at command time data details */
471 request
.data_blocks
= cpu_to_le16(data
->blocks
);
472 request
.data_blksz
= cpu_to_le16(data
->blksz
);
475 timeout_ms
= cmd
->busy_timeout
? cmd
->busy_timeout
:
476 GB_OPERATION_TIMEOUT_DEFAULT
;
478 ret
= gb_operation_sync_timeout(host
->connection
, GB_SDIO_TYPE_COMMAND
,
479 &request
, sizeof(request
), &response
,
480 sizeof(response
), timeout_ms
);
484 /* no response expected */
485 if (cmd_flags
== GB_SDIO_RSP_NONE
)
488 /* long response expected */
489 if (cmd_flags
& GB_SDIO_RSP_R2
)
490 for (i
= 0; i
< 4; i
++)
491 cmd
->resp
[i
] = le32_to_cpu(response
.resp
[i
]);
493 cmd
->resp
[0] = le32_to_cpu(response
.resp
[0]);
500 static void gb_sdio_mrq_work(struct work_struct
*work
)
502 struct gb_sdio_host
*host
;
503 struct mmc_request
*mrq
;
506 host
= container_of(work
, struct gb_sdio_host
, mrqwork
);
508 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
512 mutex_lock(&host
->lock
);
515 mutex_unlock(&host
->lock
);
516 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
517 dev_err(mmc_dev(host
->mmc
), "mmc request is NULL");
522 mrq
->cmd
->error
= -ESHUTDOWN
;
527 ret
= gb_sdio_command(host
, mrq
->sbc
);
532 ret
= gb_sdio_command(host
, mrq
->cmd
);
537 ret
= gb_sdio_transfer(host
, mrq
->data
);
543 ret
= gb_sdio_command(host
, mrq
->stop
);
550 mutex_unlock(&host
->lock
);
551 mmc_request_done(host
->mmc
, mrq
);
552 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
555 static void gb_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
557 struct gb_sdio_host
*host
= mmc_priv(mmc
);
558 struct mmc_command
*cmd
= mrq
->cmd
;
560 /* Check if it is a cancel to ongoing transfer */
561 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
) {
562 spin_lock(&host
->xfer
);
563 host
->xfer_stop
= true;
564 spin_unlock(&host
->xfer
);
567 mutex_lock(&host
->lock
);
573 mrq
->cmd
->error
= -ESHUTDOWN
;
576 if (!host
->card_present
) {
577 mrq
->cmd
->error
= -ENOMEDIUM
;
581 queue_work(host
->mrq_workqueue
, &host
->mrqwork
);
583 mutex_unlock(&host
->lock
);
588 mutex_unlock(&host
->lock
);
589 mmc_request_done(mmc
, mrq
);
592 static void gb_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
594 struct gb_sdio_host
*host
= mmc_priv(mmc
);
595 struct gb_sdio_set_ios_request request
;
604 mutex_lock(&host
->lock
);
605 request
.clock
= cpu_to_le32(ios
->clock
);
608 vdd
= 1 << (ios
->vdd
- GB_SDIO_VDD_SHIFT
);
609 request
.vdd
= cpu_to_le32(vdd
);
611 request
.bus_mode
= (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
?
612 GB_SDIO_BUSMODE_OPENDRAIN
:
613 GB_SDIO_BUSMODE_PUSHPULL
);
615 switch (ios
->power_mode
) {
618 power_mode
= GB_SDIO_POWER_OFF
;
621 power_mode
= GB_SDIO_POWER_UP
;
624 power_mode
= GB_SDIO_POWER_ON
;
626 case MMC_POWER_UNDEFINED
:
627 power_mode
= GB_SDIO_POWER_UNDEFINED
;
630 request
.power_mode
= power_mode
;
632 switch (ios
->bus_width
) {
633 case MMC_BUS_WIDTH_1
:
634 bus_width
= GB_SDIO_BUS_WIDTH_1
;
636 case MMC_BUS_WIDTH_4
:
638 bus_width
= GB_SDIO_BUS_WIDTH_4
;
640 case MMC_BUS_WIDTH_8
:
641 bus_width
= GB_SDIO_BUS_WIDTH_8
;
644 request
.bus_width
= bus_width
;
646 switch (ios
->timing
) {
647 case MMC_TIMING_LEGACY
:
649 timing
= GB_SDIO_TIMING_LEGACY
;
651 case MMC_TIMING_MMC_HS
:
652 timing
= GB_SDIO_TIMING_MMC_HS
;
654 case MMC_TIMING_SD_HS
:
655 timing
= GB_SDIO_TIMING_SD_HS
;
657 case MMC_TIMING_UHS_SDR12
:
658 timing
= GB_SDIO_TIMING_UHS_SDR12
;
660 case MMC_TIMING_UHS_SDR25
:
661 timing
= GB_SDIO_TIMING_UHS_SDR25
;
663 case MMC_TIMING_UHS_SDR50
:
664 timing
= GB_SDIO_TIMING_UHS_SDR50
;
666 case MMC_TIMING_UHS_SDR104
:
667 timing
= GB_SDIO_TIMING_UHS_SDR104
;
669 case MMC_TIMING_UHS_DDR50
:
670 timing
= GB_SDIO_TIMING_UHS_DDR50
;
672 case MMC_TIMING_MMC_DDR52
:
673 timing
= GB_SDIO_TIMING_MMC_DDR52
;
675 case MMC_TIMING_MMC_HS200
:
676 timing
= GB_SDIO_TIMING_MMC_HS200
;
678 case MMC_TIMING_MMC_HS400
:
679 timing
= GB_SDIO_TIMING_MMC_HS400
;
682 request
.timing
= timing
;
684 switch (ios
->signal_voltage
) {
685 case MMC_SIGNAL_VOLTAGE_330
:
686 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_330
;
688 case MMC_SIGNAL_VOLTAGE_180
:
690 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_180
;
692 case MMC_SIGNAL_VOLTAGE_120
:
693 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_120
;
696 request
.signal_voltage
= signal_voltage
;
698 switch (ios
->drv_type
) {
699 case MMC_SET_DRIVER_TYPE_A
:
700 drv_type
= GB_SDIO_SET_DRIVER_TYPE_A
;
702 case MMC_SET_DRIVER_TYPE_C
:
703 drv_type
= GB_SDIO_SET_DRIVER_TYPE_C
;
705 case MMC_SET_DRIVER_TYPE_D
:
706 drv_type
= GB_SDIO_SET_DRIVER_TYPE_D
;
708 case MMC_SET_DRIVER_TYPE_B
:
710 drv_type
= GB_SDIO_SET_DRIVER_TYPE_B
;
713 request
.drv_type
= drv_type
;
715 ret
= gb_sdio_set_ios(host
, &request
);
719 memcpy(&mmc
->ios
, ios
, sizeof(mmc
->ios
));
722 mutex_unlock(&host
->lock
);
725 static int gb_mmc_get_ro(struct mmc_host
*mmc
)
727 struct gb_sdio_host
*host
= mmc_priv(mmc
);
729 mutex_lock(&host
->lock
);
731 mutex_unlock(&host
->lock
);
734 mutex_unlock(&host
->lock
);
736 return host
->read_only
;
739 static int gb_mmc_get_cd(struct mmc_host
*mmc
)
741 struct gb_sdio_host
*host
= mmc_priv(mmc
);
743 mutex_lock(&host
->lock
);
745 mutex_unlock(&host
->lock
);
748 mutex_unlock(&host
->lock
);
750 return host
->card_present
;
753 static int gb_mmc_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
758 static const struct mmc_host_ops gb_sdio_ops
= {
759 .request
= gb_mmc_request
,
760 .set_ios
= gb_mmc_set_ios
,
761 .get_ro
= gb_mmc_get_ro
,
762 .get_cd
= gb_mmc_get_cd
,
763 .start_signal_voltage_switch
= gb_mmc_switch_voltage
,
766 static int gb_sdio_probe(struct gbphy_device
*gbphy_dev
,
767 const struct gbphy_device_id
*id
)
769 struct gb_connection
*connection
;
770 struct mmc_host
*mmc
;
771 struct gb_sdio_host
*host
;
774 mmc
= mmc_alloc_host(sizeof(*host
), &gbphy_dev
->dev
);
778 connection
= gb_connection_create(gbphy_dev
->bundle
,
779 le16_to_cpu(gbphy_dev
->cport_desc
->id
),
780 gb_sdio_request_handler
);
781 if (IS_ERR(connection
)) {
782 ret
= PTR_ERR(connection
);
786 host
= mmc_priv(mmc
);
788 host
->removed
= true;
790 host
->connection
= connection
;
791 gb_connection_set_data(connection
, host
);
792 host
->gbphy_dev
= gbphy_dev
;
793 gb_gbphy_set_data(gbphy_dev
, host
);
795 ret
= gb_connection_enable_tx(connection
);
797 goto exit_connection_destroy
;
799 ret
= gb_sdio_get_caps(host
);
801 goto exit_connection_disable
;
803 mmc
->ops
= &gb_sdio_ops
;
805 mmc
->max_segs
= host
->mmc
->max_blk_count
;
807 /* for now we make a map 1:1 between max request and segment size */
808 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
809 mmc
->max_seg_size
= mmc
->max_req_size
;
811 mutex_init(&host
->lock
);
812 spin_lock_init(&host
->xfer
);
813 host
->mrq_workqueue
= alloc_workqueue("mmc-%s", 0, 1,
814 dev_name(&gbphy_dev
->dev
));
815 if (!host
->mrq_workqueue
) {
817 goto exit_connection_disable
;
819 INIT_WORK(&host
->mrqwork
, gb_sdio_mrq_work
);
821 ret
= gb_connection_enable(connection
);
823 goto exit_wq_destroy
;
825 ret
= mmc_add_host(mmc
);
827 goto exit_wq_destroy
;
828 host
->removed
= false;
829 ret
= _gb_sdio_process_events(host
, host
->queued_events
);
830 host
->queued_events
= 0;
832 gbphy_runtime_put_autosuspend(gbphy_dev
);
837 destroy_workqueue(host
->mrq_workqueue
);
838 exit_connection_disable
:
839 gb_connection_disable(connection
);
840 exit_connection_destroy
:
841 gb_connection_destroy(connection
);
848 static void gb_sdio_remove(struct gbphy_device
*gbphy_dev
)
850 struct gb_sdio_host
*host
= gb_gbphy_get_data(gbphy_dev
);
851 struct gb_connection
*connection
= host
->connection
;
852 struct mmc_host
*mmc
;
855 ret
= gbphy_runtime_get_sync(gbphy_dev
);
857 gbphy_runtime_get_noresume(gbphy_dev
);
859 mutex_lock(&host
->lock
);
860 host
->removed
= true;
862 gb_connection_set_data(connection
, NULL
);
863 mutex_unlock(&host
->lock
);
865 flush_workqueue(host
->mrq_workqueue
);
866 destroy_workqueue(host
->mrq_workqueue
);
867 gb_connection_disable_rx(connection
);
868 mmc_remove_host(mmc
);
869 gb_connection_disable(connection
);
870 gb_connection_destroy(connection
);
874 static const struct gbphy_device_id gb_sdio_id_table
[] = {
875 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO
) },
878 MODULE_DEVICE_TABLE(gbphy
, gb_sdio_id_table
);
880 static struct gbphy_driver sdio_driver
= {
882 .probe
= gb_sdio_probe
,
883 .remove
= gb_sdio_remove
,
884 .id_table
= gb_sdio_id_table
,
887 module_gbphy_driver(sdio_driver
);
888 MODULE_LICENSE("GPL v2");