1 // SPDX-License-Identifier: GPL-2.0
3 * SD/MMC Greybus driver.
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
15 #include <linux/greybus.h>
20 struct gb_connection
*connection
;
21 struct gbphy_device
*gbphy_dev
;
23 struct mmc_request
*mrq
;
24 struct mutex lock
; /* lock for this host */
26 spinlock_t xfer
; /* lock to cancel ongoing transfer */
28 struct workqueue_struct
*mrq_workqueue
;
29 struct work_struct mrqwork
;
37 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
39 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
40 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
46 #define GB_SDIO_VDD_SHIFT 8
48 #ifndef MMC_CAP2_CORE_RUNTIME_PM
49 #define MMC_CAP2_CORE_RUNTIME_PM 0
52 static inline bool single_op(struct mmc_command
*cmd
)
54 u32 opcode
= cmd
->opcode
;
56 return opcode
== MMC_WRITE_BLOCK
||
57 opcode
== MMC_READ_SINGLE_BLOCK
;
60 static void _gb_sdio_set_host_caps(struct gb_sdio_host
*host
, u32 r
)
65 caps
= ((r
& GB_SDIO_CAP_NONREMOVABLE
) ? MMC_CAP_NONREMOVABLE
: 0) |
66 ((r
& GB_SDIO_CAP_4_BIT_DATA
) ? MMC_CAP_4_BIT_DATA
: 0) |
67 ((r
& GB_SDIO_CAP_8_BIT_DATA
) ? MMC_CAP_8_BIT_DATA
: 0) |
68 ((r
& GB_SDIO_CAP_MMC_HS
) ? MMC_CAP_MMC_HIGHSPEED
: 0) |
69 ((r
& GB_SDIO_CAP_SD_HS
) ? MMC_CAP_SD_HIGHSPEED
: 0) |
70 ((r
& GB_SDIO_CAP_1_2V_DDR
) ? MMC_CAP_1_2V_DDR
: 0) |
71 ((r
& GB_SDIO_CAP_1_8V_DDR
) ? MMC_CAP_1_8V_DDR
: 0) |
72 ((r
& GB_SDIO_CAP_POWER_OFF_CARD
) ? MMC_CAP_POWER_OFF_CARD
: 0) |
73 ((r
& GB_SDIO_CAP_UHS_SDR12
) ? MMC_CAP_UHS_SDR12
: 0) |
74 ((r
& GB_SDIO_CAP_UHS_SDR25
) ? MMC_CAP_UHS_SDR25
: 0) |
75 ((r
& GB_SDIO_CAP_UHS_SDR50
) ? MMC_CAP_UHS_SDR50
: 0) |
76 ((r
& GB_SDIO_CAP_UHS_SDR104
) ? MMC_CAP_UHS_SDR104
: 0) |
77 ((r
& GB_SDIO_CAP_UHS_DDR50
) ? MMC_CAP_UHS_DDR50
: 0) |
78 ((r
& GB_SDIO_CAP_DRIVER_TYPE_A
) ? MMC_CAP_DRIVER_TYPE_A
: 0) |
79 ((r
& GB_SDIO_CAP_DRIVER_TYPE_C
) ? MMC_CAP_DRIVER_TYPE_C
: 0) |
80 ((r
& GB_SDIO_CAP_DRIVER_TYPE_D
) ? MMC_CAP_DRIVER_TYPE_D
: 0);
82 caps2
= ((r
& GB_SDIO_CAP_HS200_1_2V
) ? MMC_CAP2_HS200_1_2V_SDR
: 0) |
83 ((r
& GB_SDIO_CAP_HS400_1_2V
) ? MMC_CAP2_HS400_1_2V
: 0) |
84 ((r
& GB_SDIO_CAP_HS400_1_8V
) ? MMC_CAP2_HS400_1_8V
: 0) |
85 ((r
& GB_SDIO_CAP_HS200_1_8V
) ? MMC_CAP2_HS200_1_8V_SDR
: 0);
87 host
->mmc
->caps
= caps
;
88 host
->mmc
->caps2
= caps2
| MMC_CAP2_CORE_RUNTIME_PM
;
90 if (caps
& MMC_CAP_NONREMOVABLE
)
91 host
->card_present
= true;
94 static u32
_gb_sdio_get_host_ocr(u32 ocr
)
96 return (((ocr
& GB_SDIO_VDD_165_195
) ? MMC_VDD_165_195
: 0) |
97 ((ocr
& GB_SDIO_VDD_20_21
) ? MMC_VDD_20_21
: 0) |
98 ((ocr
& GB_SDIO_VDD_21_22
) ? MMC_VDD_21_22
: 0) |
99 ((ocr
& GB_SDIO_VDD_22_23
) ? MMC_VDD_22_23
: 0) |
100 ((ocr
& GB_SDIO_VDD_23_24
) ? MMC_VDD_23_24
: 0) |
101 ((ocr
& GB_SDIO_VDD_24_25
) ? MMC_VDD_24_25
: 0) |
102 ((ocr
& GB_SDIO_VDD_25_26
) ? MMC_VDD_25_26
: 0) |
103 ((ocr
& GB_SDIO_VDD_26_27
) ? MMC_VDD_26_27
: 0) |
104 ((ocr
& GB_SDIO_VDD_27_28
) ? MMC_VDD_27_28
: 0) |
105 ((ocr
& GB_SDIO_VDD_28_29
) ? MMC_VDD_28_29
: 0) |
106 ((ocr
& GB_SDIO_VDD_29_30
) ? MMC_VDD_29_30
: 0) |
107 ((ocr
& GB_SDIO_VDD_30_31
) ? MMC_VDD_30_31
: 0) |
108 ((ocr
& GB_SDIO_VDD_31_32
) ? MMC_VDD_31_32
: 0) |
109 ((ocr
& GB_SDIO_VDD_32_33
) ? MMC_VDD_32_33
: 0) |
110 ((ocr
& GB_SDIO_VDD_33_34
) ? MMC_VDD_33_34
: 0) |
111 ((ocr
& GB_SDIO_VDD_34_35
) ? MMC_VDD_34_35
: 0) |
112 ((ocr
& GB_SDIO_VDD_35_36
) ? MMC_VDD_35_36
: 0)
116 static int gb_sdio_get_caps(struct gb_sdio_host
*host
)
118 struct gb_sdio_get_caps_response response
;
119 struct mmc_host
*mmc
= host
->mmc
;
126 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_GET_CAPABILITIES
,
127 NULL
, 0, &response
, sizeof(response
));
130 r
= le32_to_cpu(response
.caps
);
132 _gb_sdio_set_host_caps(host
, r
);
134 /* get the max block size that could fit our payload */
135 data_max
= gb_operation_get_payload_size_max(host
->connection
);
136 data_max
= min(data_max
- sizeof(struct gb_sdio_transfer_request
),
137 data_max
- sizeof(struct gb_sdio_transfer_response
));
139 blksz
= min_t(u16
, le16_to_cpu(response
.max_blk_size
), data_max
);
140 blksz
= max_t(u32
, 512, blksz
);
142 mmc
->max_blk_size
= rounddown_pow_of_two(blksz
);
143 mmc
->max_blk_count
= le16_to_cpu(response
.max_blk_count
);
144 host
->data_max
= data_max
;
146 /* get ocr supported values */
147 ocr
= _gb_sdio_get_host_ocr(le32_to_cpu(response
.ocr
));
148 mmc
->ocr_avail
= ocr
;
149 mmc
->ocr_avail_sdio
= mmc
->ocr_avail
;
150 mmc
->ocr_avail_sd
= mmc
->ocr_avail
;
151 mmc
->ocr_avail_mmc
= mmc
->ocr_avail
;
153 /* get frequency range values */
154 mmc
->f_min
= le32_to_cpu(response
.f_min
);
155 mmc
->f_max
= le32_to_cpu(response
.f_max
);
160 static void _gb_queue_event(struct gb_sdio_host
*host
, u8 event
)
162 if (event
& GB_SDIO_CARD_INSERTED
)
163 host
->queued_events
&= ~GB_SDIO_CARD_REMOVED
;
164 else if (event
& GB_SDIO_CARD_REMOVED
)
165 host
->queued_events
&= ~GB_SDIO_CARD_INSERTED
;
167 host
->queued_events
|= event
;
170 static int _gb_sdio_process_events(struct gb_sdio_host
*host
, u8 event
)
172 u8 state_changed
= 0;
174 if (event
& GB_SDIO_CARD_INSERTED
) {
175 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
177 if (host
->card_present
)
179 host
->card_present
= true;
183 if (event
& GB_SDIO_CARD_REMOVED
) {
184 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
186 if (!(host
->card_present
))
188 host
->card_present
= false;
192 if (event
& GB_SDIO_WP
)
193 host
->read_only
= true;
196 dev_info(mmc_dev(host
->mmc
), "card %s now event\n",
197 (host
->card_present
? "inserted" : "removed"));
198 mmc_detect_change(host
->mmc
, 0);
204 static int gb_sdio_request_handler(struct gb_operation
*op
)
206 struct gb_sdio_host
*host
= gb_connection_get_data(op
->connection
);
207 struct gb_message
*request
;
208 struct gb_sdio_event_request
*payload
;
213 if (type
!= GB_SDIO_TYPE_EVENT
) {
214 dev_err(mmc_dev(host
->mmc
),
215 "unsupported unsolicited event: %u\n", type
);
219 request
= op
->request
;
221 if (request
->payload_size
< sizeof(*payload
)) {
222 dev_err(mmc_dev(host
->mmc
), "wrong event size received (%zu < %zu)\n",
223 request
->payload_size
, sizeof(*payload
));
227 payload
= request
->payload
;
228 event
= payload
->event
;
231 _gb_queue_event(host
, event
);
233 ret
= _gb_sdio_process_events(host
, event
);
238 static int gb_sdio_set_ios(struct gb_sdio_host
*host
,
239 struct gb_sdio_set_ios_request
*request
)
243 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
247 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_SET_IOS
, request
,
248 sizeof(*request
), NULL
, 0);
250 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
255 static int _gb_sdio_send(struct gb_sdio_host
*host
, struct mmc_data
*data
,
256 size_t len
, u16 nblocks
, off_t skip
)
258 struct gb_sdio_transfer_request
*request
;
259 struct gb_sdio_transfer_response
*response
;
260 struct gb_operation
*operation
;
261 struct scatterlist
*sg
= data
->sg
;
262 unsigned int sg_len
= data
->sg_len
;
268 WARN_ON(len
> host
->data_max
);
270 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
271 len
+ sizeof(*request
),
272 sizeof(*response
), GFP_KERNEL
);
276 request
= operation
->request
->payload
;
277 request
->data_flags
= data
->flags
>> 8;
278 request
->data_blocks
= cpu_to_le16(nblocks
);
279 request
->data_blksz
= cpu_to_le16(data
->blksz
);
281 copied
= sg_pcopy_to_buffer(sg
, sg_len
, &request
->data
[0], len
, skip
);
285 goto err_put_operation
;
288 ret
= gb_operation_request_send_sync(operation
);
290 goto err_put_operation
;
292 response
= operation
->response
->payload
;
294 send_blocks
= le16_to_cpu(response
->data_blocks
);
295 send_blksz
= le16_to_cpu(response
->data_blksz
);
297 if (len
!= send_blksz
* send_blocks
) {
298 dev_err(mmc_dev(host
->mmc
), "send: size received: %zu != %d\n",
299 len
, send_blksz
* send_blocks
);
304 gb_operation_put(operation
);
309 static int _gb_sdio_recv(struct gb_sdio_host
*host
, struct mmc_data
*data
,
310 size_t len
, u16 nblocks
, off_t skip
)
312 struct gb_sdio_transfer_request
*request
;
313 struct gb_sdio_transfer_response
*response
;
314 struct gb_operation
*operation
;
315 struct scatterlist
*sg
= data
->sg
;
316 unsigned int sg_len
= data
->sg_len
;
322 WARN_ON(len
> host
->data_max
);
324 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
326 len
+ sizeof(*response
), GFP_KERNEL
);
330 request
= operation
->request
->payload
;
331 request
->data_flags
= data
->flags
>> 8;
332 request
->data_blocks
= cpu_to_le16(nblocks
);
333 request
->data_blksz
= cpu_to_le16(data
->blksz
);
335 ret
= gb_operation_request_send_sync(operation
);
337 goto err_put_operation
;
339 response
= operation
->response
->payload
;
340 recv_blocks
= le16_to_cpu(response
->data_blocks
);
341 recv_blksz
= le16_to_cpu(response
->data_blksz
);
343 if (len
!= recv_blksz
* recv_blocks
) {
344 dev_err(mmc_dev(host
->mmc
), "recv: size received: %d != %zu\n",
345 recv_blksz
* recv_blocks
, len
);
347 goto err_put_operation
;
350 copied
= sg_pcopy_from_buffer(sg
, sg_len
, &response
->data
[0], len
,
356 gb_operation_put(operation
);
361 static int gb_sdio_transfer(struct gb_sdio_host
*host
, struct mmc_data
*data
)
368 if (single_op(data
->mrq
->cmd
) && data
->blocks
> 1) {
373 left
= data
->blksz
* data
->blocks
;
376 /* check is a stop transmission is pending */
377 spin_lock(&host
->xfer
);
378 if (host
->xfer_stop
) {
379 host
->xfer_stop
= false;
380 spin_unlock(&host
->xfer
);
384 spin_unlock(&host
->xfer
);
385 len
= min(left
, host
->data_max
);
386 nblocks
= len
/ data
->blksz
;
387 len
= nblocks
* data
->blksz
;
389 if (data
->flags
& MMC_DATA_READ
) {
390 ret
= _gb_sdio_recv(host
, data
, len
, nblocks
, skip
);
394 ret
= _gb_sdio_send(host
, data
, len
, nblocks
, skip
);
398 data
->bytes_xfered
+= len
;
408 static int gb_sdio_command(struct gb_sdio_host
*host
, struct mmc_command
*cmd
)
410 struct gb_sdio_command_request request
= {0};
411 struct gb_sdio_command_response response
;
412 struct mmc_data
*data
= host
->mrq
->data
;
413 unsigned int timeout_ms
;
419 switch (mmc_resp_type(cmd
)) {
421 cmd_flags
= GB_SDIO_RSP_NONE
;
424 cmd_flags
= GB_SDIO_RSP_R1_R5_R6_R7
;
427 cmd_flags
= GB_SDIO_RSP_R1B
;
430 cmd_flags
= GB_SDIO_RSP_R2
;
433 cmd_flags
= GB_SDIO_RSP_R3_R4
;
436 dev_err(mmc_dev(host
->mmc
), "cmd flag invalid 0x%04x\n",
442 switch (mmc_cmd_type(cmd
)) {
444 cmd_type
= GB_SDIO_CMD_BC
;
447 cmd_type
= GB_SDIO_CMD_BCR
;
450 cmd_type
= GB_SDIO_CMD_AC
;
453 cmd_type
= GB_SDIO_CMD_ADTC
;
456 dev_err(mmc_dev(host
->mmc
), "cmd type invalid 0x%04x\n",
462 request
.cmd
= cmd
->opcode
;
463 request
.cmd_flags
= cmd_flags
;
464 request
.cmd_type
= cmd_type
;
465 request
.cmd_arg
= cpu_to_le32(cmd
->arg
);
466 /* some controllers need to know at command time data details */
468 request
.data_blocks
= cpu_to_le16(data
->blocks
);
469 request
.data_blksz
= cpu_to_le16(data
->blksz
);
472 timeout_ms
= cmd
->busy_timeout
? cmd
->busy_timeout
:
473 GB_OPERATION_TIMEOUT_DEFAULT
;
475 ret
= gb_operation_sync_timeout(host
->connection
, GB_SDIO_TYPE_COMMAND
,
476 &request
, sizeof(request
), &response
,
477 sizeof(response
), timeout_ms
);
481 /* no response expected */
482 if (cmd_flags
== GB_SDIO_RSP_NONE
)
485 /* long response expected */
486 if (cmd_flags
& GB_SDIO_RSP_R2
)
487 for (i
= 0; i
< 4; i
++)
488 cmd
->resp
[i
] = le32_to_cpu(response
.resp
[i
]);
490 cmd
->resp
[0] = le32_to_cpu(response
.resp
[0]);
497 static void gb_sdio_mrq_work(struct work_struct
*work
)
499 struct gb_sdio_host
*host
;
500 struct mmc_request
*mrq
;
503 host
= container_of(work
, struct gb_sdio_host
, mrqwork
);
505 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
509 mutex_lock(&host
->lock
);
512 mutex_unlock(&host
->lock
);
513 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
514 dev_err(mmc_dev(host
->mmc
), "mmc request is NULL");
519 mrq
->cmd
->error
= -ESHUTDOWN
;
524 ret
= gb_sdio_command(host
, mrq
->sbc
);
529 ret
= gb_sdio_command(host
, mrq
->cmd
);
534 ret
= gb_sdio_transfer(host
, mrq
->data
);
540 ret
= gb_sdio_command(host
, mrq
->stop
);
547 mutex_unlock(&host
->lock
);
548 mmc_request_done(host
->mmc
, mrq
);
549 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
552 static void gb_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
554 struct gb_sdio_host
*host
= mmc_priv(mmc
);
555 struct mmc_command
*cmd
= mrq
->cmd
;
557 /* Check if it is a cancel to ongoing transfer */
558 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
) {
559 spin_lock(&host
->xfer
);
560 host
->xfer_stop
= true;
561 spin_unlock(&host
->xfer
);
564 mutex_lock(&host
->lock
);
570 mrq
->cmd
->error
= -ESHUTDOWN
;
573 if (!host
->card_present
) {
574 mrq
->cmd
->error
= -ENOMEDIUM
;
578 queue_work(host
->mrq_workqueue
, &host
->mrqwork
);
580 mutex_unlock(&host
->lock
);
585 mutex_unlock(&host
->lock
);
586 mmc_request_done(mmc
, mrq
);
589 static void gb_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
591 struct gb_sdio_host
*host
= mmc_priv(mmc
);
592 struct gb_sdio_set_ios_request request
;
601 mutex_lock(&host
->lock
);
602 request
.clock
= cpu_to_le32(ios
->clock
);
605 vdd
= 1 << (ios
->vdd
- GB_SDIO_VDD_SHIFT
);
606 request
.vdd
= cpu_to_le32(vdd
);
608 request
.bus_mode
= ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
?
609 GB_SDIO_BUSMODE_OPENDRAIN
:
610 GB_SDIO_BUSMODE_PUSHPULL
;
612 switch (ios
->power_mode
) {
615 power_mode
= GB_SDIO_POWER_OFF
;
618 power_mode
= GB_SDIO_POWER_UP
;
621 power_mode
= GB_SDIO_POWER_ON
;
623 case MMC_POWER_UNDEFINED
:
624 power_mode
= GB_SDIO_POWER_UNDEFINED
;
627 request
.power_mode
= power_mode
;
629 switch (ios
->bus_width
) {
630 case MMC_BUS_WIDTH_1
:
631 bus_width
= GB_SDIO_BUS_WIDTH_1
;
633 case MMC_BUS_WIDTH_4
:
635 bus_width
= GB_SDIO_BUS_WIDTH_4
;
637 case MMC_BUS_WIDTH_8
:
638 bus_width
= GB_SDIO_BUS_WIDTH_8
;
641 request
.bus_width
= bus_width
;
643 switch (ios
->timing
) {
644 case MMC_TIMING_LEGACY
:
646 timing
= GB_SDIO_TIMING_LEGACY
;
648 case MMC_TIMING_MMC_HS
:
649 timing
= GB_SDIO_TIMING_MMC_HS
;
651 case MMC_TIMING_SD_HS
:
652 timing
= GB_SDIO_TIMING_SD_HS
;
654 case MMC_TIMING_UHS_SDR12
:
655 timing
= GB_SDIO_TIMING_UHS_SDR12
;
657 case MMC_TIMING_UHS_SDR25
:
658 timing
= GB_SDIO_TIMING_UHS_SDR25
;
660 case MMC_TIMING_UHS_SDR50
:
661 timing
= GB_SDIO_TIMING_UHS_SDR50
;
663 case MMC_TIMING_UHS_SDR104
:
664 timing
= GB_SDIO_TIMING_UHS_SDR104
;
666 case MMC_TIMING_UHS_DDR50
:
667 timing
= GB_SDIO_TIMING_UHS_DDR50
;
669 case MMC_TIMING_MMC_DDR52
:
670 timing
= GB_SDIO_TIMING_MMC_DDR52
;
672 case MMC_TIMING_MMC_HS200
:
673 timing
= GB_SDIO_TIMING_MMC_HS200
;
675 case MMC_TIMING_MMC_HS400
:
676 timing
= GB_SDIO_TIMING_MMC_HS400
;
679 request
.timing
= timing
;
681 switch (ios
->signal_voltage
) {
682 case MMC_SIGNAL_VOLTAGE_330
:
683 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_330
;
685 case MMC_SIGNAL_VOLTAGE_180
:
687 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_180
;
689 case MMC_SIGNAL_VOLTAGE_120
:
690 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_120
;
693 request
.signal_voltage
= signal_voltage
;
695 switch (ios
->drv_type
) {
696 case MMC_SET_DRIVER_TYPE_A
:
697 drv_type
= GB_SDIO_SET_DRIVER_TYPE_A
;
699 case MMC_SET_DRIVER_TYPE_C
:
700 drv_type
= GB_SDIO_SET_DRIVER_TYPE_C
;
702 case MMC_SET_DRIVER_TYPE_D
:
703 drv_type
= GB_SDIO_SET_DRIVER_TYPE_D
;
705 case MMC_SET_DRIVER_TYPE_B
:
707 drv_type
= GB_SDIO_SET_DRIVER_TYPE_B
;
710 request
.drv_type
= drv_type
;
712 ret
= gb_sdio_set_ios(host
, &request
);
716 memcpy(&mmc
->ios
, ios
, sizeof(mmc
->ios
));
719 mutex_unlock(&host
->lock
);
722 static int gb_mmc_get_ro(struct mmc_host
*mmc
)
724 struct gb_sdio_host
*host
= mmc_priv(mmc
);
726 mutex_lock(&host
->lock
);
728 mutex_unlock(&host
->lock
);
731 mutex_unlock(&host
->lock
);
733 return host
->read_only
;
736 static int gb_mmc_get_cd(struct mmc_host
*mmc
)
738 struct gb_sdio_host
*host
= mmc_priv(mmc
);
740 mutex_lock(&host
->lock
);
742 mutex_unlock(&host
->lock
);
745 mutex_unlock(&host
->lock
);
747 return host
->card_present
;
750 static int gb_mmc_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
755 static const struct mmc_host_ops gb_sdio_ops
= {
756 .request
= gb_mmc_request
,
757 .set_ios
= gb_mmc_set_ios
,
758 .get_ro
= gb_mmc_get_ro
,
759 .get_cd
= gb_mmc_get_cd
,
760 .start_signal_voltage_switch
= gb_mmc_switch_voltage
,
763 static int gb_sdio_probe(struct gbphy_device
*gbphy_dev
,
764 const struct gbphy_device_id
*id
)
766 struct gb_connection
*connection
;
767 struct mmc_host
*mmc
;
768 struct gb_sdio_host
*host
;
771 mmc
= mmc_alloc_host(sizeof(*host
), &gbphy_dev
->dev
);
775 connection
= gb_connection_create(gbphy_dev
->bundle
,
776 le16_to_cpu(gbphy_dev
->cport_desc
->id
),
777 gb_sdio_request_handler
);
778 if (IS_ERR(connection
)) {
779 ret
= PTR_ERR(connection
);
783 host
= mmc_priv(mmc
);
785 host
->removed
= true;
787 host
->connection
= connection
;
788 gb_connection_set_data(connection
, host
);
789 host
->gbphy_dev
= gbphy_dev
;
790 gb_gbphy_set_data(gbphy_dev
, host
);
792 ret
= gb_connection_enable_tx(connection
);
794 goto exit_connection_destroy
;
796 ret
= gb_sdio_get_caps(host
);
798 goto exit_connection_disable
;
800 mmc
->ops
= &gb_sdio_ops
;
802 mmc
->max_segs
= host
->mmc
->max_blk_count
;
804 /* for now we make a map 1:1 between max request and segment size */
805 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
806 mmc
->max_seg_size
= mmc
->max_req_size
;
808 mutex_init(&host
->lock
);
809 spin_lock_init(&host
->xfer
);
810 host
->mrq_workqueue
= alloc_workqueue("mmc-%s", 0, 1,
811 dev_name(&gbphy_dev
->dev
));
812 if (!host
->mrq_workqueue
) {
814 goto exit_connection_disable
;
816 INIT_WORK(&host
->mrqwork
, gb_sdio_mrq_work
);
818 ret
= gb_connection_enable(connection
);
820 goto exit_wq_destroy
;
822 ret
= mmc_add_host(mmc
);
824 goto exit_wq_destroy
;
825 host
->removed
= false;
826 ret
= _gb_sdio_process_events(host
, host
->queued_events
);
827 host
->queued_events
= 0;
829 gbphy_runtime_put_autosuspend(gbphy_dev
);
834 destroy_workqueue(host
->mrq_workqueue
);
835 exit_connection_disable
:
836 gb_connection_disable(connection
);
837 exit_connection_destroy
:
838 gb_connection_destroy(connection
);
845 static void gb_sdio_remove(struct gbphy_device
*gbphy_dev
)
847 struct gb_sdio_host
*host
= gb_gbphy_get_data(gbphy_dev
);
848 struct gb_connection
*connection
= host
->connection
;
849 struct mmc_host
*mmc
;
852 ret
= gbphy_runtime_get_sync(gbphy_dev
);
854 gbphy_runtime_get_noresume(gbphy_dev
);
856 mutex_lock(&host
->lock
);
857 host
->removed
= true;
859 gb_connection_set_data(connection
, NULL
);
860 mutex_unlock(&host
->lock
);
862 flush_workqueue(host
->mrq_workqueue
);
863 destroy_workqueue(host
->mrq_workqueue
);
864 gb_connection_disable_rx(connection
);
865 mmc_remove_host(mmc
);
866 gb_connection_disable(connection
);
867 gb_connection_destroy(connection
);
871 static const struct gbphy_device_id gb_sdio_id_table
[] = {
872 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO
) },
875 MODULE_DEVICE_TABLE(gbphy
, gb_sdio_id_table
);
877 static struct gbphy_driver sdio_driver
= {
879 .probe
= gb_sdio_probe
,
880 .remove
= gb_sdio_remove
,
881 .id_table
= gb_sdio_id_table
,
884 module_gbphy_driver(sdio_driver
);
885 MODULE_LICENSE("GPL v2");