1 // SPDX-License-Identifier: GPL-2.0
3 * SD/MMC Greybus driver.
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
20 struct gb_connection
*connection
;
21 struct gbphy_device
*gbphy_dev
;
23 struct mmc_request
*mrq
;
24 struct mutex lock
; /* lock for this host */
26 spinlock_t xfer
; /* lock to cancel ongoing transfer */
28 struct workqueue_struct
*mrq_workqueue
;
29 struct work_struct mrqwork
;
37 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
39 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
40 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
46 #define GB_SDIO_VDD_SHIFT 8
48 #ifndef MMC_CAP2_CORE_RUNTIME_PM
49 #define MMC_CAP2_CORE_RUNTIME_PM 0
52 static inline bool single_op(struct mmc_command
*cmd
)
54 u32 opcode
= cmd
->opcode
;
56 return opcode
== MMC_WRITE_BLOCK
||
57 opcode
== MMC_READ_SINGLE_BLOCK
;
60 static void _gb_sdio_set_host_caps(struct gb_sdio_host
*host
, u32 r
)
65 caps
= ((r
& GB_SDIO_CAP_NONREMOVABLE
) ? MMC_CAP_NONREMOVABLE
: 0) |
66 ((r
& GB_SDIO_CAP_4_BIT_DATA
) ? MMC_CAP_4_BIT_DATA
: 0) |
67 ((r
& GB_SDIO_CAP_8_BIT_DATA
) ? MMC_CAP_8_BIT_DATA
: 0) |
68 ((r
& GB_SDIO_CAP_MMC_HS
) ? MMC_CAP_MMC_HIGHSPEED
: 0) |
69 ((r
& GB_SDIO_CAP_SD_HS
) ? MMC_CAP_SD_HIGHSPEED
: 0) |
70 ((r
& GB_SDIO_CAP_ERASE
) ? MMC_CAP_ERASE
: 0) |
71 ((r
& GB_SDIO_CAP_1_2V_DDR
) ? MMC_CAP_1_2V_DDR
: 0) |
72 ((r
& GB_SDIO_CAP_1_8V_DDR
) ? MMC_CAP_1_8V_DDR
: 0) |
73 ((r
& GB_SDIO_CAP_POWER_OFF_CARD
) ? MMC_CAP_POWER_OFF_CARD
: 0) |
74 ((r
& GB_SDIO_CAP_UHS_SDR12
) ? MMC_CAP_UHS_SDR12
: 0) |
75 ((r
& GB_SDIO_CAP_UHS_SDR25
) ? MMC_CAP_UHS_SDR25
: 0) |
76 ((r
& GB_SDIO_CAP_UHS_SDR50
) ? MMC_CAP_UHS_SDR50
: 0) |
77 ((r
& GB_SDIO_CAP_UHS_SDR104
) ? MMC_CAP_UHS_SDR104
: 0) |
78 ((r
& GB_SDIO_CAP_UHS_DDR50
) ? MMC_CAP_UHS_DDR50
: 0) |
79 ((r
& GB_SDIO_CAP_DRIVER_TYPE_A
) ? MMC_CAP_DRIVER_TYPE_A
: 0) |
80 ((r
& GB_SDIO_CAP_DRIVER_TYPE_C
) ? MMC_CAP_DRIVER_TYPE_C
: 0) |
81 ((r
& GB_SDIO_CAP_DRIVER_TYPE_D
) ? MMC_CAP_DRIVER_TYPE_D
: 0);
83 caps2
= ((r
& GB_SDIO_CAP_HS200_1_2V
) ? MMC_CAP2_HS200_1_2V_SDR
: 0) |
84 ((r
& GB_SDIO_CAP_HS400_1_2V
) ? MMC_CAP2_HS400_1_2V
: 0) |
85 ((r
& GB_SDIO_CAP_HS400_1_8V
) ? MMC_CAP2_HS400_1_8V
: 0) |
86 ((r
& GB_SDIO_CAP_HS200_1_8V
) ? MMC_CAP2_HS200_1_8V_SDR
: 0);
88 host
->mmc
->caps
= caps
;
89 host
->mmc
->caps2
= caps2
| MMC_CAP2_CORE_RUNTIME_PM
;
91 if (caps
& MMC_CAP_NONREMOVABLE
)
92 host
->card_present
= true;
95 static u32
_gb_sdio_get_host_ocr(u32 ocr
)
97 return (((ocr
& GB_SDIO_VDD_165_195
) ? MMC_VDD_165_195
: 0) |
98 ((ocr
& GB_SDIO_VDD_20_21
) ? MMC_VDD_20_21
: 0) |
99 ((ocr
& GB_SDIO_VDD_21_22
) ? MMC_VDD_21_22
: 0) |
100 ((ocr
& GB_SDIO_VDD_22_23
) ? MMC_VDD_22_23
: 0) |
101 ((ocr
& GB_SDIO_VDD_23_24
) ? MMC_VDD_23_24
: 0) |
102 ((ocr
& GB_SDIO_VDD_24_25
) ? MMC_VDD_24_25
: 0) |
103 ((ocr
& GB_SDIO_VDD_25_26
) ? MMC_VDD_25_26
: 0) |
104 ((ocr
& GB_SDIO_VDD_26_27
) ? MMC_VDD_26_27
: 0) |
105 ((ocr
& GB_SDIO_VDD_27_28
) ? MMC_VDD_27_28
: 0) |
106 ((ocr
& GB_SDIO_VDD_28_29
) ? MMC_VDD_28_29
: 0) |
107 ((ocr
& GB_SDIO_VDD_29_30
) ? MMC_VDD_29_30
: 0) |
108 ((ocr
& GB_SDIO_VDD_30_31
) ? MMC_VDD_30_31
: 0) |
109 ((ocr
& GB_SDIO_VDD_31_32
) ? MMC_VDD_31_32
: 0) |
110 ((ocr
& GB_SDIO_VDD_32_33
) ? MMC_VDD_32_33
: 0) |
111 ((ocr
& GB_SDIO_VDD_33_34
) ? MMC_VDD_33_34
: 0) |
112 ((ocr
& GB_SDIO_VDD_34_35
) ? MMC_VDD_34_35
: 0) |
113 ((ocr
& GB_SDIO_VDD_35_36
) ? MMC_VDD_35_36
: 0)
117 static int gb_sdio_get_caps(struct gb_sdio_host
*host
)
119 struct gb_sdio_get_caps_response response
;
120 struct mmc_host
*mmc
= host
->mmc
;
127 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_GET_CAPABILITIES
,
128 NULL
, 0, &response
, sizeof(response
));
131 r
= le32_to_cpu(response
.caps
);
133 _gb_sdio_set_host_caps(host
, r
);
135 /* get the max block size that could fit our payload */
136 data_max
= gb_operation_get_payload_size_max(host
->connection
);
137 data_max
= min(data_max
- sizeof(struct gb_sdio_transfer_request
),
138 data_max
- sizeof(struct gb_sdio_transfer_response
));
140 blksz
= min_t(u16
, le16_to_cpu(response
.max_blk_size
), data_max
);
141 blksz
= max_t(u32
, 512, blksz
);
143 mmc
->max_blk_size
= rounddown_pow_of_two(blksz
);
144 mmc
->max_blk_count
= le16_to_cpu(response
.max_blk_count
);
145 host
->data_max
= data_max
;
147 /* get ocr supported values */
148 ocr
= _gb_sdio_get_host_ocr(le32_to_cpu(response
.ocr
));
149 mmc
->ocr_avail
= ocr
;
150 mmc
->ocr_avail_sdio
= mmc
->ocr_avail
;
151 mmc
->ocr_avail_sd
= mmc
->ocr_avail
;
152 mmc
->ocr_avail_mmc
= mmc
->ocr_avail
;
154 /* get frequency range values */
155 mmc
->f_min
= le32_to_cpu(response
.f_min
);
156 mmc
->f_max
= le32_to_cpu(response
.f_max
);
161 static void _gb_queue_event(struct gb_sdio_host
*host
, u8 event
)
163 if (event
& GB_SDIO_CARD_INSERTED
)
164 host
->queued_events
&= ~GB_SDIO_CARD_REMOVED
;
165 else if (event
& GB_SDIO_CARD_REMOVED
)
166 host
->queued_events
&= ~GB_SDIO_CARD_INSERTED
;
168 host
->queued_events
|= event
;
171 static int _gb_sdio_process_events(struct gb_sdio_host
*host
, u8 event
)
173 u8 state_changed
= 0;
175 if (event
& GB_SDIO_CARD_INSERTED
) {
176 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
178 if (host
->card_present
)
180 host
->card_present
= true;
184 if (event
& GB_SDIO_CARD_REMOVED
) {
185 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
187 if (!(host
->card_present
))
189 host
->card_present
= false;
193 if (event
& GB_SDIO_WP
)
194 host
->read_only
= true;
197 dev_info(mmc_dev(host
->mmc
), "card %s now event\n",
198 (host
->card_present
? "inserted" : "removed"));
199 mmc_detect_change(host
->mmc
, 0);
205 static int gb_sdio_request_handler(struct gb_operation
*op
)
207 struct gb_sdio_host
*host
= gb_connection_get_data(op
->connection
);
208 struct gb_message
*request
;
209 struct gb_sdio_event_request
*payload
;
214 if (type
!= GB_SDIO_TYPE_EVENT
) {
215 dev_err(mmc_dev(host
->mmc
),
216 "unsupported unsolicited event: %u\n", type
);
220 request
= op
->request
;
222 if (request
->payload_size
< sizeof(*payload
)) {
223 dev_err(mmc_dev(host
->mmc
), "wrong event size received (%zu < %zu)\n",
224 request
->payload_size
, sizeof(*payload
));
228 payload
= request
->payload
;
229 event
= payload
->event
;
232 _gb_queue_event(host
, event
);
234 ret
= _gb_sdio_process_events(host
, event
);
239 static int gb_sdio_set_ios(struct gb_sdio_host
*host
,
240 struct gb_sdio_set_ios_request
*request
)
244 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
248 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_SET_IOS
, request
,
249 sizeof(*request
), NULL
, 0);
251 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
256 static int _gb_sdio_send(struct gb_sdio_host
*host
, struct mmc_data
*data
,
257 size_t len
, u16 nblocks
, off_t skip
)
259 struct gb_sdio_transfer_request
*request
;
260 struct gb_sdio_transfer_response
*response
;
261 struct gb_operation
*operation
;
262 struct scatterlist
*sg
= data
->sg
;
263 unsigned int sg_len
= data
->sg_len
;
269 WARN_ON(len
> host
->data_max
);
271 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
272 len
+ sizeof(*request
),
273 sizeof(*response
), GFP_KERNEL
);
277 request
= operation
->request
->payload
;
278 request
->data_flags
= (data
->flags
>> 8);
279 request
->data_blocks
= cpu_to_le16(nblocks
);
280 request
->data_blksz
= cpu_to_le16(data
->blksz
);
282 copied
= sg_pcopy_to_buffer(sg
, sg_len
, &request
->data
[0], len
, skip
);
286 goto err_put_operation
;
289 ret
= gb_operation_request_send_sync(operation
);
291 goto err_put_operation
;
293 response
= operation
->response
->payload
;
295 send_blocks
= le16_to_cpu(response
->data_blocks
);
296 send_blksz
= le16_to_cpu(response
->data_blksz
);
298 if (len
!= send_blksz
* send_blocks
) {
299 dev_err(mmc_dev(host
->mmc
), "send: size received: %zu != %d\n",
300 len
, send_blksz
* send_blocks
);
305 gb_operation_put(operation
);
310 static int _gb_sdio_recv(struct gb_sdio_host
*host
, struct mmc_data
*data
,
311 size_t len
, u16 nblocks
, off_t skip
)
313 struct gb_sdio_transfer_request
*request
;
314 struct gb_sdio_transfer_response
*response
;
315 struct gb_operation
*operation
;
316 struct scatterlist
*sg
= data
->sg
;
317 unsigned int sg_len
= data
->sg_len
;
323 WARN_ON(len
> host
->data_max
);
325 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
327 len
+ sizeof(*response
), GFP_KERNEL
);
331 request
= operation
->request
->payload
;
332 request
->data_flags
= (data
->flags
>> 8);
333 request
->data_blocks
= cpu_to_le16(nblocks
);
334 request
->data_blksz
= cpu_to_le16(data
->blksz
);
336 ret
= gb_operation_request_send_sync(operation
);
338 goto err_put_operation
;
340 response
= operation
->response
->payload
;
341 recv_blocks
= le16_to_cpu(response
->data_blocks
);
342 recv_blksz
= le16_to_cpu(response
->data_blksz
);
344 if (len
!= recv_blksz
* recv_blocks
) {
345 dev_err(mmc_dev(host
->mmc
), "recv: size received: %d != %zu\n",
346 recv_blksz
* recv_blocks
, len
);
348 goto err_put_operation
;
351 copied
= sg_pcopy_from_buffer(sg
, sg_len
, &response
->data
[0], len
,
357 gb_operation_put(operation
);
362 static int gb_sdio_transfer(struct gb_sdio_host
*host
, struct mmc_data
*data
)
369 if (single_op(data
->mrq
->cmd
) && data
->blocks
> 1) {
374 left
= data
->blksz
* data
->blocks
;
377 /* check is a stop transmission is pending */
378 spin_lock(&host
->xfer
);
379 if (host
->xfer_stop
) {
380 host
->xfer_stop
= false;
381 spin_unlock(&host
->xfer
);
385 spin_unlock(&host
->xfer
);
386 len
= min(left
, host
->data_max
);
387 nblocks
= len
/ data
->blksz
;
388 len
= nblocks
* data
->blksz
;
390 if (data
->flags
& MMC_DATA_READ
) {
391 ret
= _gb_sdio_recv(host
, data
, len
, nblocks
, skip
);
395 ret
= _gb_sdio_send(host
, data
, len
, nblocks
, skip
);
399 data
->bytes_xfered
+= len
;
409 static int gb_sdio_command(struct gb_sdio_host
*host
, struct mmc_command
*cmd
)
411 struct gb_sdio_command_request request
= {0};
412 struct gb_sdio_command_response response
;
413 struct mmc_data
*data
= host
->mrq
->data
;
414 unsigned int timeout_ms
;
420 switch (mmc_resp_type(cmd
)) {
422 cmd_flags
= GB_SDIO_RSP_NONE
;
425 cmd_flags
= GB_SDIO_RSP_R1_R5_R6_R7
;
428 cmd_flags
= GB_SDIO_RSP_R1B
;
431 cmd_flags
= GB_SDIO_RSP_R2
;
434 cmd_flags
= GB_SDIO_RSP_R3_R4
;
437 dev_err(mmc_dev(host
->mmc
), "cmd flag invalid 0x%04x\n",
443 switch (mmc_cmd_type(cmd
)) {
445 cmd_type
= GB_SDIO_CMD_BC
;
448 cmd_type
= GB_SDIO_CMD_BCR
;
451 cmd_type
= GB_SDIO_CMD_AC
;
454 cmd_type
= GB_SDIO_CMD_ADTC
;
457 dev_err(mmc_dev(host
->mmc
), "cmd type invalid 0x%04x\n",
463 request
.cmd
= cmd
->opcode
;
464 request
.cmd_flags
= cmd_flags
;
465 request
.cmd_type
= cmd_type
;
466 request
.cmd_arg
= cpu_to_le32(cmd
->arg
);
467 /* some controllers need to know at command time data details */
469 request
.data_blocks
= cpu_to_le16(data
->blocks
);
470 request
.data_blksz
= cpu_to_le16(data
->blksz
);
473 timeout_ms
= cmd
->busy_timeout
? cmd
->busy_timeout
:
474 GB_OPERATION_TIMEOUT_DEFAULT
;
476 ret
= gb_operation_sync_timeout(host
->connection
, GB_SDIO_TYPE_COMMAND
,
477 &request
, sizeof(request
), &response
,
478 sizeof(response
), timeout_ms
);
482 /* no response expected */
483 if (cmd_flags
== GB_SDIO_RSP_NONE
)
486 /* long response expected */
487 if (cmd_flags
& GB_SDIO_RSP_R2
)
488 for (i
= 0; i
< 4; i
++)
489 cmd
->resp
[i
] = le32_to_cpu(response
.resp
[i
]);
491 cmd
->resp
[0] = le32_to_cpu(response
.resp
[0]);
498 static void gb_sdio_mrq_work(struct work_struct
*work
)
500 struct gb_sdio_host
*host
;
501 struct mmc_request
*mrq
;
504 host
= container_of(work
, struct gb_sdio_host
, mrqwork
);
506 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
510 mutex_lock(&host
->lock
);
513 mutex_unlock(&host
->lock
);
514 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
515 dev_err(mmc_dev(host
->mmc
), "mmc request is NULL");
520 mrq
->cmd
->error
= -ESHUTDOWN
;
525 ret
= gb_sdio_command(host
, mrq
->sbc
);
530 ret
= gb_sdio_command(host
, mrq
->cmd
);
535 ret
= gb_sdio_transfer(host
, mrq
->data
);
541 ret
= gb_sdio_command(host
, mrq
->stop
);
548 mutex_unlock(&host
->lock
);
549 mmc_request_done(host
->mmc
, mrq
);
550 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
553 static void gb_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
555 struct gb_sdio_host
*host
= mmc_priv(mmc
);
556 struct mmc_command
*cmd
= mrq
->cmd
;
558 /* Check if it is a cancel to ongoing transfer */
559 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
) {
560 spin_lock(&host
->xfer
);
561 host
->xfer_stop
= true;
562 spin_unlock(&host
->xfer
);
565 mutex_lock(&host
->lock
);
571 mrq
->cmd
->error
= -ESHUTDOWN
;
574 if (!host
->card_present
) {
575 mrq
->cmd
->error
= -ENOMEDIUM
;
579 queue_work(host
->mrq_workqueue
, &host
->mrqwork
);
581 mutex_unlock(&host
->lock
);
586 mutex_unlock(&host
->lock
);
587 mmc_request_done(mmc
, mrq
);
590 static void gb_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
592 struct gb_sdio_host
*host
= mmc_priv(mmc
);
593 struct gb_sdio_set_ios_request request
;
602 mutex_lock(&host
->lock
);
603 request
.clock
= cpu_to_le32(ios
->clock
);
606 vdd
= 1 << (ios
->vdd
- GB_SDIO_VDD_SHIFT
);
607 request
.vdd
= cpu_to_le32(vdd
);
609 request
.bus_mode
= (ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
?
610 GB_SDIO_BUSMODE_OPENDRAIN
:
611 GB_SDIO_BUSMODE_PUSHPULL
);
613 switch (ios
->power_mode
) {
616 power_mode
= GB_SDIO_POWER_OFF
;
619 power_mode
= GB_SDIO_POWER_UP
;
622 power_mode
= GB_SDIO_POWER_ON
;
624 case MMC_POWER_UNDEFINED
:
625 power_mode
= GB_SDIO_POWER_UNDEFINED
;
628 request
.power_mode
= power_mode
;
630 switch (ios
->bus_width
) {
631 case MMC_BUS_WIDTH_1
:
632 bus_width
= GB_SDIO_BUS_WIDTH_1
;
634 case MMC_BUS_WIDTH_4
:
636 bus_width
= GB_SDIO_BUS_WIDTH_4
;
638 case MMC_BUS_WIDTH_8
:
639 bus_width
= GB_SDIO_BUS_WIDTH_8
;
642 request
.bus_width
= bus_width
;
644 switch (ios
->timing
) {
645 case MMC_TIMING_LEGACY
:
647 timing
= GB_SDIO_TIMING_LEGACY
;
649 case MMC_TIMING_MMC_HS
:
650 timing
= GB_SDIO_TIMING_MMC_HS
;
652 case MMC_TIMING_SD_HS
:
653 timing
= GB_SDIO_TIMING_SD_HS
;
655 case MMC_TIMING_UHS_SDR12
:
656 timing
= GB_SDIO_TIMING_UHS_SDR12
;
658 case MMC_TIMING_UHS_SDR25
:
659 timing
= GB_SDIO_TIMING_UHS_SDR25
;
661 case MMC_TIMING_UHS_SDR50
:
662 timing
= GB_SDIO_TIMING_UHS_SDR50
;
664 case MMC_TIMING_UHS_SDR104
:
665 timing
= GB_SDIO_TIMING_UHS_SDR104
;
667 case MMC_TIMING_UHS_DDR50
:
668 timing
= GB_SDIO_TIMING_UHS_DDR50
;
670 case MMC_TIMING_MMC_DDR52
:
671 timing
= GB_SDIO_TIMING_MMC_DDR52
;
673 case MMC_TIMING_MMC_HS200
:
674 timing
= GB_SDIO_TIMING_MMC_HS200
;
676 case MMC_TIMING_MMC_HS400
:
677 timing
= GB_SDIO_TIMING_MMC_HS400
;
680 request
.timing
= timing
;
682 switch (ios
->signal_voltage
) {
683 case MMC_SIGNAL_VOLTAGE_330
:
684 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_330
;
686 case MMC_SIGNAL_VOLTAGE_180
:
688 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_180
;
690 case MMC_SIGNAL_VOLTAGE_120
:
691 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_120
;
694 request
.signal_voltage
= signal_voltage
;
696 switch (ios
->drv_type
) {
697 case MMC_SET_DRIVER_TYPE_A
:
698 drv_type
= GB_SDIO_SET_DRIVER_TYPE_A
;
700 case MMC_SET_DRIVER_TYPE_C
:
701 drv_type
= GB_SDIO_SET_DRIVER_TYPE_C
;
703 case MMC_SET_DRIVER_TYPE_D
:
704 drv_type
= GB_SDIO_SET_DRIVER_TYPE_D
;
706 case MMC_SET_DRIVER_TYPE_B
:
708 drv_type
= GB_SDIO_SET_DRIVER_TYPE_B
;
711 request
.drv_type
= drv_type
;
713 ret
= gb_sdio_set_ios(host
, &request
);
717 memcpy(&mmc
->ios
, ios
, sizeof(mmc
->ios
));
720 mutex_unlock(&host
->lock
);
723 static int gb_mmc_get_ro(struct mmc_host
*mmc
)
725 struct gb_sdio_host
*host
= mmc_priv(mmc
);
727 mutex_lock(&host
->lock
);
729 mutex_unlock(&host
->lock
);
732 mutex_unlock(&host
->lock
);
734 return host
->read_only
;
737 static int gb_mmc_get_cd(struct mmc_host
*mmc
)
739 struct gb_sdio_host
*host
= mmc_priv(mmc
);
741 mutex_lock(&host
->lock
);
743 mutex_unlock(&host
->lock
);
746 mutex_unlock(&host
->lock
);
748 return host
->card_present
;
751 static int gb_mmc_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
756 static const struct mmc_host_ops gb_sdio_ops
= {
757 .request
= gb_mmc_request
,
758 .set_ios
= gb_mmc_set_ios
,
759 .get_ro
= gb_mmc_get_ro
,
760 .get_cd
= gb_mmc_get_cd
,
761 .start_signal_voltage_switch
= gb_mmc_switch_voltage
,
764 static int gb_sdio_probe(struct gbphy_device
*gbphy_dev
,
765 const struct gbphy_device_id
*id
)
767 struct gb_connection
*connection
;
768 struct mmc_host
*mmc
;
769 struct gb_sdio_host
*host
;
772 mmc
= mmc_alloc_host(sizeof(*host
), &gbphy_dev
->dev
);
776 connection
= gb_connection_create(gbphy_dev
->bundle
,
777 le16_to_cpu(gbphy_dev
->cport_desc
->id
),
778 gb_sdio_request_handler
);
779 if (IS_ERR(connection
)) {
780 ret
= PTR_ERR(connection
);
784 host
= mmc_priv(mmc
);
786 host
->removed
= true;
788 host
->connection
= connection
;
789 gb_connection_set_data(connection
, host
);
790 host
->gbphy_dev
= gbphy_dev
;
791 gb_gbphy_set_data(gbphy_dev
, host
);
793 ret
= gb_connection_enable_tx(connection
);
795 goto exit_connection_destroy
;
797 ret
= gb_sdio_get_caps(host
);
799 goto exit_connection_disable
;
801 mmc
->ops
= &gb_sdio_ops
;
803 mmc
->max_segs
= host
->mmc
->max_blk_count
;
805 /* for now we make a map 1:1 between max request and segment size */
806 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
807 mmc
->max_seg_size
= mmc
->max_req_size
;
809 mutex_init(&host
->lock
);
810 spin_lock_init(&host
->xfer
);
811 host
->mrq_workqueue
= alloc_workqueue("mmc-%s", 0, 1,
812 dev_name(&gbphy_dev
->dev
));
813 if (!host
->mrq_workqueue
) {
815 goto exit_connection_disable
;
817 INIT_WORK(&host
->mrqwork
, gb_sdio_mrq_work
);
819 ret
= gb_connection_enable(connection
);
821 goto exit_wq_destroy
;
823 ret
= mmc_add_host(mmc
);
825 goto exit_wq_destroy
;
826 host
->removed
= false;
827 ret
= _gb_sdio_process_events(host
, host
->queued_events
);
828 host
->queued_events
= 0;
830 gbphy_runtime_put_autosuspend(gbphy_dev
);
835 destroy_workqueue(host
->mrq_workqueue
);
836 exit_connection_disable
:
837 gb_connection_disable(connection
);
838 exit_connection_destroy
:
839 gb_connection_destroy(connection
);
846 static void gb_sdio_remove(struct gbphy_device
*gbphy_dev
)
848 struct gb_sdio_host
*host
= gb_gbphy_get_data(gbphy_dev
);
849 struct gb_connection
*connection
= host
->connection
;
850 struct mmc_host
*mmc
;
853 ret
= gbphy_runtime_get_sync(gbphy_dev
);
855 gbphy_runtime_get_noresume(gbphy_dev
);
857 mutex_lock(&host
->lock
);
858 host
->removed
= true;
860 gb_connection_set_data(connection
, NULL
);
861 mutex_unlock(&host
->lock
);
863 flush_workqueue(host
->mrq_workqueue
);
864 destroy_workqueue(host
->mrq_workqueue
);
865 gb_connection_disable_rx(connection
);
866 mmc_remove_host(mmc
);
867 gb_connection_disable(connection
);
868 gb_connection_destroy(connection
);
872 static const struct gbphy_device_id gb_sdio_id_table
[] = {
873 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO
) },
876 MODULE_DEVICE_TABLE(gbphy
, gb_sdio_id_table
);
878 static struct gbphy_driver sdio_driver
= {
880 .probe
= gb_sdio_probe
,
881 .remove
= gb_sdio_remove
,
882 .id_table
= gb_sdio_id_table
,
885 module_gbphy_driver(sdio_driver
);
886 MODULE_LICENSE("GPL v2");