1 // SPDX-License-Identifier: GPL-2.0
3 * SD/MMC Greybus driver.
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
15 #include <linux/greybus.h>
20 struct gb_connection
*connection
;
21 struct gbphy_device
*gbphy_dev
;
23 struct mmc_request
*mrq
;
24 struct mutex lock
; /* lock for this host */
26 spinlock_t xfer
; /* lock to cancel ongoing transfer */
28 struct workqueue_struct
*mrq_workqueue
;
29 struct work_struct mrqwork
;
37 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
39 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
40 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
46 #define GB_SDIO_VDD_SHIFT 8
48 #ifndef MMC_CAP2_CORE_RUNTIME_PM
49 #define MMC_CAP2_CORE_RUNTIME_PM 0
52 static inline bool single_op(struct mmc_command
*cmd
)
54 u32 opcode
= cmd
->opcode
;
56 return opcode
== MMC_WRITE_BLOCK
||
57 opcode
== MMC_READ_SINGLE_BLOCK
;
60 static void _gb_sdio_set_host_caps(struct gb_sdio_host
*host
, u32 r
)
65 caps
= ((r
& GB_SDIO_CAP_NONREMOVABLE
) ? MMC_CAP_NONREMOVABLE
: 0) |
66 ((r
& GB_SDIO_CAP_4_BIT_DATA
) ? MMC_CAP_4_BIT_DATA
: 0) |
67 ((r
& GB_SDIO_CAP_8_BIT_DATA
) ? MMC_CAP_8_BIT_DATA
: 0) |
68 ((r
& GB_SDIO_CAP_MMC_HS
) ? MMC_CAP_MMC_HIGHSPEED
: 0) |
69 ((r
& GB_SDIO_CAP_SD_HS
) ? MMC_CAP_SD_HIGHSPEED
: 0) |
70 ((r
& GB_SDIO_CAP_ERASE
) ? MMC_CAP_ERASE
: 0) |
71 ((r
& GB_SDIO_CAP_1_2V_DDR
) ? MMC_CAP_1_2V_DDR
: 0) |
72 ((r
& GB_SDIO_CAP_1_8V_DDR
) ? MMC_CAP_1_8V_DDR
: 0) |
73 ((r
& GB_SDIO_CAP_POWER_OFF_CARD
) ? MMC_CAP_POWER_OFF_CARD
: 0) |
74 ((r
& GB_SDIO_CAP_UHS_SDR12
) ? MMC_CAP_UHS_SDR12
: 0) |
75 ((r
& GB_SDIO_CAP_UHS_SDR25
) ? MMC_CAP_UHS_SDR25
: 0) |
76 ((r
& GB_SDIO_CAP_UHS_SDR50
) ? MMC_CAP_UHS_SDR50
: 0) |
77 ((r
& GB_SDIO_CAP_UHS_SDR104
) ? MMC_CAP_UHS_SDR104
: 0) |
78 ((r
& GB_SDIO_CAP_UHS_DDR50
) ? MMC_CAP_UHS_DDR50
: 0) |
79 ((r
& GB_SDIO_CAP_DRIVER_TYPE_A
) ? MMC_CAP_DRIVER_TYPE_A
: 0) |
80 ((r
& GB_SDIO_CAP_DRIVER_TYPE_C
) ? MMC_CAP_DRIVER_TYPE_C
: 0) |
81 ((r
& GB_SDIO_CAP_DRIVER_TYPE_D
) ? MMC_CAP_DRIVER_TYPE_D
: 0);
83 caps2
= ((r
& GB_SDIO_CAP_HS200_1_2V
) ? MMC_CAP2_HS200_1_2V_SDR
: 0) |
84 ((r
& GB_SDIO_CAP_HS400_1_2V
) ? MMC_CAP2_HS400_1_2V
: 0) |
85 ((r
& GB_SDIO_CAP_HS400_1_8V
) ? MMC_CAP2_HS400_1_8V
: 0) |
86 ((r
& GB_SDIO_CAP_HS200_1_8V
) ? MMC_CAP2_HS200_1_8V_SDR
: 0);
88 host
->mmc
->caps
= caps
;
89 host
->mmc
->caps2
= caps2
| MMC_CAP2_CORE_RUNTIME_PM
;
91 if (caps
& MMC_CAP_NONREMOVABLE
)
92 host
->card_present
= true;
95 static u32
_gb_sdio_get_host_ocr(u32 ocr
)
97 return (((ocr
& GB_SDIO_VDD_165_195
) ? MMC_VDD_165_195
: 0) |
98 ((ocr
& GB_SDIO_VDD_20_21
) ? MMC_VDD_20_21
: 0) |
99 ((ocr
& GB_SDIO_VDD_21_22
) ? MMC_VDD_21_22
: 0) |
100 ((ocr
& GB_SDIO_VDD_22_23
) ? MMC_VDD_22_23
: 0) |
101 ((ocr
& GB_SDIO_VDD_23_24
) ? MMC_VDD_23_24
: 0) |
102 ((ocr
& GB_SDIO_VDD_24_25
) ? MMC_VDD_24_25
: 0) |
103 ((ocr
& GB_SDIO_VDD_25_26
) ? MMC_VDD_25_26
: 0) |
104 ((ocr
& GB_SDIO_VDD_26_27
) ? MMC_VDD_26_27
: 0) |
105 ((ocr
& GB_SDIO_VDD_27_28
) ? MMC_VDD_27_28
: 0) |
106 ((ocr
& GB_SDIO_VDD_28_29
) ? MMC_VDD_28_29
: 0) |
107 ((ocr
& GB_SDIO_VDD_29_30
) ? MMC_VDD_29_30
: 0) |
108 ((ocr
& GB_SDIO_VDD_30_31
) ? MMC_VDD_30_31
: 0) |
109 ((ocr
& GB_SDIO_VDD_31_32
) ? MMC_VDD_31_32
: 0) |
110 ((ocr
& GB_SDIO_VDD_32_33
) ? MMC_VDD_32_33
: 0) |
111 ((ocr
& GB_SDIO_VDD_33_34
) ? MMC_VDD_33_34
: 0) |
112 ((ocr
& GB_SDIO_VDD_34_35
) ? MMC_VDD_34_35
: 0) |
113 ((ocr
& GB_SDIO_VDD_35_36
) ? MMC_VDD_35_36
: 0)
117 static int gb_sdio_get_caps(struct gb_sdio_host
*host
)
119 struct gb_sdio_get_caps_response response
;
120 struct mmc_host
*mmc
= host
->mmc
;
127 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_GET_CAPABILITIES
,
128 NULL
, 0, &response
, sizeof(response
));
131 r
= le32_to_cpu(response
.caps
);
133 _gb_sdio_set_host_caps(host
, r
);
135 /* get the max block size that could fit our payload */
136 data_max
= gb_operation_get_payload_size_max(host
->connection
);
137 data_max
= min(data_max
- sizeof(struct gb_sdio_transfer_request
),
138 data_max
- sizeof(struct gb_sdio_transfer_response
));
140 blksz
= min_t(u16
, le16_to_cpu(response
.max_blk_size
), data_max
);
141 blksz
= max_t(u32
, 512, blksz
);
143 mmc
->max_blk_size
= rounddown_pow_of_two(blksz
);
144 mmc
->max_blk_count
= le16_to_cpu(response
.max_blk_count
);
145 host
->data_max
= data_max
;
147 /* get ocr supported values */
148 ocr
= _gb_sdio_get_host_ocr(le32_to_cpu(response
.ocr
));
149 mmc
->ocr_avail
= ocr
;
150 mmc
->ocr_avail_sdio
= mmc
->ocr_avail
;
151 mmc
->ocr_avail_sd
= mmc
->ocr_avail
;
152 mmc
->ocr_avail_mmc
= mmc
->ocr_avail
;
154 /* get frequency range values */
155 mmc
->f_min
= le32_to_cpu(response
.f_min
);
156 mmc
->f_max
= le32_to_cpu(response
.f_max
);
161 static void _gb_queue_event(struct gb_sdio_host
*host
, u8 event
)
163 if (event
& GB_SDIO_CARD_INSERTED
)
164 host
->queued_events
&= ~GB_SDIO_CARD_REMOVED
;
165 else if (event
& GB_SDIO_CARD_REMOVED
)
166 host
->queued_events
&= ~GB_SDIO_CARD_INSERTED
;
168 host
->queued_events
|= event
;
171 static int _gb_sdio_process_events(struct gb_sdio_host
*host
, u8 event
)
173 u8 state_changed
= 0;
175 if (event
& GB_SDIO_CARD_INSERTED
) {
176 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
178 if (host
->card_present
)
180 host
->card_present
= true;
184 if (event
& GB_SDIO_CARD_REMOVED
) {
185 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
187 if (!(host
->card_present
))
189 host
->card_present
= false;
193 if (event
& GB_SDIO_WP
)
194 host
->read_only
= true;
197 dev_info(mmc_dev(host
->mmc
), "card %s now event\n",
198 (host
->card_present
? "inserted" : "removed"));
199 mmc_detect_change(host
->mmc
, 0);
205 static int gb_sdio_request_handler(struct gb_operation
*op
)
207 struct gb_sdio_host
*host
= gb_connection_get_data(op
->connection
);
208 struct gb_message
*request
;
209 struct gb_sdio_event_request
*payload
;
214 if (type
!= GB_SDIO_TYPE_EVENT
) {
215 dev_err(mmc_dev(host
->mmc
),
216 "unsupported unsolicited event: %u\n", type
);
220 request
= op
->request
;
222 if (request
->payload_size
< sizeof(*payload
)) {
223 dev_err(mmc_dev(host
->mmc
), "wrong event size received (%zu < %zu)\n",
224 request
->payload_size
, sizeof(*payload
));
228 payload
= request
->payload
;
229 event
= payload
->event
;
232 _gb_queue_event(host
, event
);
234 ret
= _gb_sdio_process_events(host
, event
);
239 static int gb_sdio_set_ios(struct gb_sdio_host
*host
,
240 struct gb_sdio_set_ios_request
*request
)
244 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
248 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_SET_IOS
, request
,
249 sizeof(*request
), NULL
, 0);
251 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
256 static int _gb_sdio_send(struct gb_sdio_host
*host
, struct mmc_data
*data
,
257 size_t len
, u16 nblocks
, off_t skip
)
259 struct gb_sdio_transfer_request
*request
;
260 struct gb_sdio_transfer_response
*response
;
261 struct gb_operation
*operation
;
262 struct scatterlist
*sg
= data
->sg
;
263 unsigned int sg_len
= data
->sg_len
;
269 WARN_ON(len
> host
->data_max
);
271 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
272 len
+ sizeof(*request
),
273 sizeof(*response
), GFP_KERNEL
);
277 request
= operation
->request
->payload
;
278 request
->data_flags
= data
->flags
>> 8;
279 request
->data_blocks
= cpu_to_le16(nblocks
);
280 request
->data_blksz
= cpu_to_le16(data
->blksz
);
282 copied
= sg_pcopy_to_buffer(sg
, sg_len
, &request
->data
[0], len
, skip
);
286 goto err_put_operation
;
289 ret
= gb_operation_request_send_sync(operation
);
291 goto err_put_operation
;
293 response
= operation
->response
->payload
;
295 send_blocks
= le16_to_cpu(response
->data_blocks
);
296 send_blksz
= le16_to_cpu(response
->data_blksz
);
298 if (len
!= send_blksz
* send_blocks
) {
299 dev_err(mmc_dev(host
->mmc
), "send: size received: %zu != %d\n",
300 len
, send_blksz
* send_blocks
);
305 gb_operation_put(operation
);
310 static int _gb_sdio_recv(struct gb_sdio_host
*host
, struct mmc_data
*data
,
311 size_t len
, u16 nblocks
, off_t skip
)
313 struct gb_sdio_transfer_request
*request
;
314 struct gb_sdio_transfer_response
*response
;
315 struct gb_operation
*operation
;
316 struct scatterlist
*sg
= data
->sg
;
317 unsigned int sg_len
= data
->sg_len
;
323 WARN_ON(len
> host
->data_max
);
325 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
327 len
+ sizeof(*response
), GFP_KERNEL
);
331 request
= operation
->request
->payload
;
332 request
->data_flags
= data
->flags
>> 8;
333 request
->data_blocks
= cpu_to_le16(nblocks
);
334 request
->data_blksz
= cpu_to_le16(data
->blksz
);
336 ret
= gb_operation_request_send_sync(operation
);
338 goto err_put_operation
;
340 response
= operation
->response
->payload
;
341 recv_blocks
= le16_to_cpu(response
->data_blocks
);
342 recv_blksz
= le16_to_cpu(response
->data_blksz
);
344 if (len
!= recv_blksz
* recv_blocks
) {
345 dev_err(mmc_dev(host
->mmc
), "recv: size received: %d != %zu\n",
346 recv_blksz
* recv_blocks
, len
);
348 goto err_put_operation
;
351 copied
= sg_pcopy_from_buffer(sg
, sg_len
, &response
->data
[0], len
,
357 gb_operation_put(operation
);
362 static int gb_sdio_transfer(struct gb_sdio_host
*host
, struct mmc_data
*data
)
369 if (single_op(data
->mrq
->cmd
) && data
->blocks
> 1) {
374 left
= data
->blksz
* data
->blocks
;
377 /* check is a stop transmission is pending */
378 spin_lock(&host
->xfer
);
379 if (host
->xfer_stop
) {
380 host
->xfer_stop
= false;
381 spin_unlock(&host
->xfer
);
385 spin_unlock(&host
->xfer
);
386 len
= min(left
, host
->data_max
);
387 nblocks
= len
/ data
->blksz
;
388 len
= nblocks
* data
->blksz
;
390 if (data
->flags
& MMC_DATA_READ
) {
391 ret
= _gb_sdio_recv(host
, data
, len
, nblocks
, skip
);
395 ret
= _gb_sdio_send(host
, data
, len
, nblocks
, skip
);
399 data
->bytes_xfered
+= len
;
409 static int gb_sdio_command(struct gb_sdio_host
*host
, struct mmc_command
*cmd
)
411 struct gb_sdio_command_request request
= {0};
412 struct gb_sdio_command_response response
;
413 struct mmc_data
*data
= host
->mrq
->data
;
419 switch (mmc_resp_type(cmd
)) {
421 cmd_flags
= GB_SDIO_RSP_NONE
;
424 cmd_flags
= GB_SDIO_RSP_R1_R5_R6_R7
;
427 cmd_flags
= GB_SDIO_RSP_R1B
;
430 cmd_flags
= GB_SDIO_RSP_R2
;
433 cmd_flags
= GB_SDIO_RSP_R3_R4
;
436 dev_err(mmc_dev(host
->mmc
), "cmd flag invalid 0x%04x\n",
442 switch (mmc_cmd_type(cmd
)) {
444 cmd_type
= GB_SDIO_CMD_BC
;
447 cmd_type
= GB_SDIO_CMD_BCR
;
450 cmd_type
= GB_SDIO_CMD_AC
;
453 cmd_type
= GB_SDIO_CMD_ADTC
;
456 dev_err(mmc_dev(host
->mmc
), "cmd type invalid 0x%04x\n",
462 request
.cmd
= cmd
->opcode
;
463 request
.cmd_flags
= cmd_flags
;
464 request
.cmd_type
= cmd_type
;
465 request
.cmd_arg
= cpu_to_le32(cmd
->arg
);
466 /* some controllers need to know at command time data details */
468 request
.data_blocks
= cpu_to_le16(data
->blocks
);
469 request
.data_blksz
= cpu_to_le16(data
->blksz
);
472 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_COMMAND
,
473 &request
, sizeof(request
), &response
,
478 /* no response expected */
479 if (cmd_flags
== GB_SDIO_RSP_NONE
)
482 /* long response expected */
483 if (cmd_flags
& GB_SDIO_RSP_R2
)
484 for (i
= 0; i
< 4; i
++)
485 cmd
->resp
[i
] = le32_to_cpu(response
.resp
[i
]);
487 cmd
->resp
[0] = le32_to_cpu(response
.resp
[0]);
494 static void gb_sdio_mrq_work(struct work_struct
*work
)
496 struct gb_sdio_host
*host
;
497 struct mmc_request
*mrq
;
500 host
= container_of(work
, struct gb_sdio_host
, mrqwork
);
502 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
506 mutex_lock(&host
->lock
);
509 mutex_unlock(&host
->lock
);
510 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
511 dev_err(mmc_dev(host
->mmc
), "mmc request is NULL");
516 mrq
->cmd
->error
= -ESHUTDOWN
;
521 ret
= gb_sdio_command(host
, mrq
->sbc
);
526 ret
= gb_sdio_command(host
, mrq
->cmd
);
531 ret
= gb_sdio_transfer(host
, mrq
->data
);
537 ret
= gb_sdio_command(host
, mrq
->stop
);
544 mutex_unlock(&host
->lock
);
545 mmc_request_done(host
->mmc
, mrq
);
546 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
549 static void gb_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
551 struct gb_sdio_host
*host
= mmc_priv(mmc
);
552 struct mmc_command
*cmd
= mrq
->cmd
;
554 /* Check if it is a cancel to ongoing transfer */
555 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
) {
556 spin_lock(&host
->xfer
);
557 host
->xfer_stop
= true;
558 spin_unlock(&host
->xfer
);
561 mutex_lock(&host
->lock
);
567 mrq
->cmd
->error
= -ESHUTDOWN
;
570 if (!host
->card_present
) {
571 mrq
->cmd
->error
= -ENOMEDIUM
;
575 queue_work(host
->mrq_workqueue
, &host
->mrqwork
);
577 mutex_unlock(&host
->lock
);
582 mutex_unlock(&host
->lock
);
583 mmc_request_done(mmc
, mrq
);
586 static void gb_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
588 struct gb_sdio_host
*host
= mmc_priv(mmc
);
589 struct gb_sdio_set_ios_request request
;
598 mutex_lock(&host
->lock
);
599 request
.clock
= cpu_to_le32(ios
->clock
);
602 vdd
= 1 << (ios
->vdd
- GB_SDIO_VDD_SHIFT
);
603 request
.vdd
= cpu_to_le32(vdd
);
605 request
.bus_mode
= ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
?
606 GB_SDIO_BUSMODE_OPENDRAIN
:
607 GB_SDIO_BUSMODE_PUSHPULL
;
609 switch (ios
->power_mode
) {
612 power_mode
= GB_SDIO_POWER_OFF
;
615 power_mode
= GB_SDIO_POWER_UP
;
618 power_mode
= GB_SDIO_POWER_ON
;
620 case MMC_POWER_UNDEFINED
:
621 power_mode
= GB_SDIO_POWER_UNDEFINED
;
624 request
.power_mode
= power_mode
;
626 switch (ios
->bus_width
) {
627 case MMC_BUS_WIDTH_1
:
628 bus_width
= GB_SDIO_BUS_WIDTH_1
;
630 case MMC_BUS_WIDTH_4
:
632 bus_width
= GB_SDIO_BUS_WIDTH_4
;
634 case MMC_BUS_WIDTH_8
:
635 bus_width
= GB_SDIO_BUS_WIDTH_8
;
638 request
.bus_width
= bus_width
;
640 switch (ios
->timing
) {
641 case MMC_TIMING_LEGACY
:
643 timing
= GB_SDIO_TIMING_LEGACY
;
645 case MMC_TIMING_MMC_HS
:
646 timing
= GB_SDIO_TIMING_MMC_HS
;
648 case MMC_TIMING_SD_HS
:
649 timing
= GB_SDIO_TIMING_SD_HS
;
651 case MMC_TIMING_UHS_SDR12
:
652 timing
= GB_SDIO_TIMING_UHS_SDR12
;
654 case MMC_TIMING_UHS_SDR25
:
655 timing
= GB_SDIO_TIMING_UHS_SDR25
;
657 case MMC_TIMING_UHS_SDR50
:
658 timing
= GB_SDIO_TIMING_UHS_SDR50
;
660 case MMC_TIMING_UHS_SDR104
:
661 timing
= GB_SDIO_TIMING_UHS_SDR104
;
663 case MMC_TIMING_UHS_DDR50
:
664 timing
= GB_SDIO_TIMING_UHS_DDR50
;
666 case MMC_TIMING_MMC_DDR52
:
667 timing
= GB_SDIO_TIMING_MMC_DDR52
;
669 case MMC_TIMING_MMC_HS200
:
670 timing
= GB_SDIO_TIMING_MMC_HS200
;
672 case MMC_TIMING_MMC_HS400
:
673 timing
= GB_SDIO_TIMING_MMC_HS400
;
676 request
.timing
= timing
;
678 switch (ios
->signal_voltage
) {
679 case MMC_SIGNAL_VOLTAGE_330
:
680 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_330
;
682 case MMC_SIGNAL_VOLTAGE_180
:
684 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_180
;
686 case MMC_SIGNAL_VOLTAGE_120
:
687 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_120
;
690 request
.signal_voltage
= signal_voltage
;
692 switch (ios
->drv_type
) {
693 case MMC_SET_DRIVER_TYPE_A
:
694 drv_type
= GB_SDIO_SET_DRIVER_TYPE_A
;
696 case MMC_SET_DRIVER_TYPE_C
:
697 drv_type
= GB_SDIO_SET_DRIVER_TYPE_C
;
699 case MMC_SET_DRIVER_TYPE_D
:
700 drv_type
= GB_SDIO_SET_DRIVER_TYPE_D
;
702 case MMC_SET_DRIVER_TYPE_B
:
704 drv_type
= GB_SDIO_SET_DRIVER_TYPE_B
;
707 request
.drv_type
= drv_type
;
709 ret
= gb_sdio_set_ios(host
, &request
);
713 memcpy(&mmc
->ios
, ios
, sizeof(mmc
->ios
));
716 mutex_unlock(&host
->lock
);
719 static int gb_mmc_get_ro(struct mmc_host
*mmc
)
721 struct gb_sdio_host
*host
= mmc_priv(mmc
);
723 mutex_lock(&host
->lock
);
725 mutex_unlock(&host
->lock
);
728 mutex_unlock(&host
->lock
);
730 return host
->read_only
;
733 static int gb_mmc_get_cd(struct mmc_host
*mmc
)
735 struct gb_sdio_host
*host
= mmc_priv(mmc
);
737 mutex_lock(&host
->lock
);
739 mutex_unlock(&host
->lock
);
742 mutex_unlock(&host
->lock
);
744 return host
->card_present
;
747 static int gb_mmc_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
752 static const struct mmc_host_ops gb_sdio_ops
= {
753 .request
= gb_mmc_request
,
754 .set_ios
= gb_mmc_set_ios
,
755 .get_ro
= gb_mmc_get_ro
,
756 .get_cd
= gb_mmc_get_cd
,
757 .start_signal_voltage_switch
= gb_mmc_switch_voltage
,
760 static int gb_sdio_probe(struct gbphy_device
*gbphy_dev
,
761 const struct gbphy_device_id
*id
)
763 struct gb_connection
*connection
;
764 struct mmc_host
*mmc
;
765 struct gb_sdio_host
*host
;
768 mmc
= mmc_alloc_host(sizeof(*host
), &gbphy_dev
->dev
);
772 connection
= gb_connection_create(gbphy_dev
->bundle
,
773 le16_to_cpu(gbphy_dev
->cport_desc
->id
),
774 gb_sdio_request_handler
);
775 if (IS_ERR(connection
)) {
776 ret
= PTR_ERR(connection
);
780 host
= mmc_priv(mmc
);
782 host
->removed
= true;
784 host
->connection
= connection
;
785 gb_connection_set_data(connection
, host
);
786 host
->gbphy_dev
= gbphy_dev
;
787 gb_gbphy_set_data(gbphy_dev
, host
);
789 ret
= gb_connection_enable_tx(connection
);
791 goto exit_connection_destroy
;
793 ret
= gb_sdio_get_caps(host
);
795 goto exit_connection_disable
;
797 mmc
->ops
= &gb_sdio_ops
;
799 mmc
->max_segs
= host
->mmc
->max_blk_count
;
801 /* for now we make a map 1:1 between max request and segment size */
802 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
803 mmc
->max_seg_size
= mmc
->max_req_size
;
805 mutex_init(&host
->lock
);
806 spin_lock_init(&host
->xfer
);
807 host
->mrq_workqueue
= alloc_workqueue("mmc-%s", 0, 1,
808 dev_name(&gbphy_dev
->dev
));
809 if (!host
->mrq_workqueue
) {
811 goto exit_connection_disable
;
813 INIT_WORK(&host
->mrqwork
, gb_sdio_mrq_work
);
815 ret
= gb_connection_enable(connection
);
817 goto exit_wq_destroy
;
819 ret
= mmc_add_host(mmc
);
821 goto exit_wq_destroy
;
822 host
->removed
= false;
823 ret
= _gb_sdio_process_events(host
, host
->queued_events
);
824 host
->queued_events
= 0;
826 gbphy_runtime_put_autosuspend(gbphy_dev
);
831 destroy_workqueue(host
->mrq_workqueue
);
832 exit_connection_disable
:
833 gb_connection_disable(connection
);
834 exit_connection_destroy
:
835 gb_connection_destroy(connection
);
842 static void gb_sdio_remove(struct gbphy_device
*gbphy_dev
)
844 struct gb_sdio_host
*host
= gb_gbphy_get_data(gbphy_dev
);
845 struct gb_connection
*connection
= host
->connection
;
846 struct mmc_host
*mmc
;
849 ret
= gbphy_runtime_get_sync(gbphy_dev
);
851 gbphy_runtime_get_noresume(gbphy_dev
);
853 mutex_lock(&host
->lock
);
854 host
->removed
= true;
856 gb_connection_set_data(connection
, NULL
);
857 mutex_unlock(&host
->lock
);
859 flush_workqueue(host
->mrq_workqueue
);
860 destroy_workqueue(host
->mrq_workqueue
);
861 gb_connection_disable_rx(connection
);
862 mmc_remove_host(mmc
);
863 gb_connection_disable(connection
);
864 gb_connection_destroy(connection
);
868 static const struct gbphy_device_id gb_sdio_id_table
[] = {
869 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO
) },
872 MODULE_DEVICE_TABLE(gbphy
, gb_sdio_id_table
);
874 static struct gbphy_driver sdio_driver
= {
876 .probe
= gb_sdio_probe
,
877 .remove
= gb_sdio_remove
,
878 .id_table
= gb_sdio_id_table
,
881 module_gbphy_driver(sdio_driver
);
882 MODULE_LICENSE("GPL v2");