1 // SPDX-License-Identifier: GPL-2.0
3 * SD/MMC Greybus driver.
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
15 #include <linux/greybus.h>
20 struct gb_connection
*connection
;
21 struct gbphy_device
*gbphy_dev
;
23 struct mmc_request
*mrq
;
24 struct mutex lock
; /* lock for this host */
26 spinlock_t xfer
; /* lock to cancel ongoing transfer */
28 struct workqueue_struct
*mrq_workqueue
;
29 struct work_struct mrqwork
;
36 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
38 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
39 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
41 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
44 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
45 #define GB_SDIO_VDD_SHIFT 8
47 #ifndef MMC_CAP2_CORE_RUNTIME_PM
48 #define MMC_CAP2_CORE_RUNTIME_PM 0
51 static inline bool single_op(struct mmc_command
*cmd
)
53 u32 opcode
= cmd
->opcode
;
55 return opcode
== MMC_WRITE_BLOCK
||
56 opcode
== MMC_READ_SINGLE_BLOCK
;
59 static void _gb_sdio_set_host_caps(struct gb_sdio_host
*host
, u32 r
)
64 caps
= ((r
& GB_SDIO_CAP_NONREMOVABLE
) ? MMC_CAP_NONREMOVABLE
: 0) |
65 ((r
& GB_SDIO_CAP_4_BIT_DATA
) ? MMC_CAP_4_BIT_DATA
: 0) |
66 ((r
& GB_SDIO_CAP_8_BIT_DATA
) ? MMC_CAP_8_BIT_DATA
: 0) |
67 ((r
& GB_SDIO_CAP_MMC_HS
) ? MMC_CAP_MMC_HIGHSPEED
: 0) |
68 ((r
& GB_SDIO_CAP_SD_HS
) ? MMC_CAP_SD_HIGHSPEED
: 0) |
69 ((r
& GB_SDIO_CAP_1_2V_DDR
) ? MMC_CAP_1_2V_DDR
: 0) |
70 ((r
& GB_SDIO_CAP_1_8V_DDR
) ? MMC_CAP_1_8V_DDR
: 0) |
71 ((r
& GB_SDIO_CAP_POWER_OFF_CARD
) ? MMC_CAP_POWER_OFF_CARD
: 0) |
72 ((r
& GB_SDIO_CAP_UHS_SDR12
) ? MMC_CAP_UHS_SDR12
: 0) |
73 ((r
& GB_SDIO_CAP_UHS_SDR25
) ? MMC_CAP_UHS_SDR25
: 0) |
74 ((r
& GB_SDIO_CAP_UHS_SDR50
) ? MMC_CAP_UHS_SDR50
: 0) |
75 ((r
& GB_SDIO_CAP_UHS_SDR104
) ? MMC_CAP_UHS_SDR104
: 0) |
76 ((r
& GB_SDIO_CAP_UHS_DDR50
) ? MMC_CAP_UHS_DDR50
: 0) |
77 ((r
& GB_SDIO_CAP_DRIVER_TYPE_A
) ? MMC_CAP_DRIVER_TYPE_A
: 0) |
78 ((r
& GB_SDIO_CAP_DRIVER_TYPE_C
) ? MMC_CAP_DRIVER_TYPE_C
: 0) |
79 ((r
& GB_SDIO_CAP_DRIVER_TYPE_D
) ? MMC_CAP_DRIVER_TYPE_D
: 0);
81 caps2
= ((r
& GB_SDIO_CAP_HS200_1_2V
) ? MMC_CAP2_HS200_1_2V_SDR
: 0) |
82 ((r
& GB_SDIO_CAP_HS400_1_2V
) ? MMC_CAP2_HS400_1_2V
: 0) |
83 ((r
& GB_SDIO_CAP_HS400_1_8V
) ? MMC_CAP2_HS400_1_8V
: 0) |
84 ((r
& GB_SDIO_CAP_HS200_1_8V
) ? MMC_CAP2_HS200_1_8V_SDR
: 0);
86 host
->mmc
->caps
= caps
;
87 host
->mmc
->caps2
= caps2
| MMC_CAP2_CORE_RUNTIME_PM
;
89 if (caps
& MMC_CAP_NONREMOVABLE
)
90 host
->card_present
= true;
93 static u32
_gb_sdio_get_host_ocr(u32 ocr
)
95 return (((ocr
& GB_SDIO_VDD_165_195
) ? MMC_VDD_165_195
: 0) |
96 ((ocr
& GB_SDIO_VDD_20_21
) ? MMC_VDD_20_21
: 0) |
97 ((ocr
& GB_SDIO_VDD_21_22
) ? MMC_VDD_21_22
: 0) |
98 ((ocr
& GB_SDIO_VDD_22_23
) ? MMC_VDD_22_23
: 0) |
99 ((ocr
& GB_SDIO_VDD_23_24
) ? MMC_VDD_23_24
: 0) |
100 ((ocr
& GB_SDIO_VDD_24_25
) ? MMC_VDD_24_25
: 0) |
101 ((ocr
& GB_SDIO_VDD_25_26
) ? MMC_VDD_25_26
: 0) |
102 ((ocr
& GB_SDIO_VDD_26_27
) ? MMC_VDD_26_27
: 0) |
103 ((ocr
& GB_SDIO_VDD_27_28
) ? MMC_VDD_27_28
: 0) |
104 ((ocr
& GB_SDIO_VDD_28_29
) ? MMC_VDD_28_29
: 0) |
105 ((ocr
& GB_SDIO_VDD_29_30
) ? MMC_VDD_29_30
: 0) |
106 ((ocr
& GB_SDIO_VDD_30_31
) ? MMC_VDD_30_31
: 0) |
107 ((ocr
& GB_SDIO_VDD_31_32
) ? MMC_VDD_31_32
: 0) |
108 ((ocr
& GB_SDIO_VDD_32_33
) ? MMC_VDD_32_33
: 0) |
109 ((ocr
& GB_SDIO_VDD_33_34
) ? MMC_VDD_33_34
: 0) |
110 ((ocr
& GB_SDIO_VDD_34_35
) ? MMC_VDD_34_35
: 0) |
111 ((ocr
& GB_SDIO_VDD_35_36
) ? MMC_VDD_35_36
: 0)
115 static int gb_sdio_get_caps(struct gb_sdio_host
*host
)
117 struct gb_sdio_get_caps_response response
;
118 struct mmc_host
*mmc
= host
->mmc
;
125 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_GET_CAPABILITIES
,
126 NULL
, 0, &response
, sizeof(response
));
129 r
= le32_to_cpu(response
.caps
);
131 _gb_sdio_set_host_caps(host
, r
);
133 /* get the max block size that could fit our payload */
134 data_max
= gb_operation_get_payload_size_max(host
->connection
);
135 data_max
= min(data_max
- sizeof(struct gb_sdio_transfer_request
),
136 data_max
- sizeof(struct gb_sdio_transfer_response
));
138 blksz
= min_t(u16
, le16_to_cpu(response
.max_blk_size
), data_max
);
139 blksz
= max_t(u32
, 512, blksz
);
141 mmc
->max_blk_size
= rounddown_pow_of_two(blksz
);
142 mmc
->max_blk_count
= le16_to_cpu(response
.max_blk_count
);
143 host
->data_max
= data_max
;
145 /* get ocr supported values */
146 ocr
= _gb_sdio_get_host_ocr(le32_to_cpu(response
.ocr
));
147 mmc
->ocr_avail
= ocr
;
148 mmc
->ocr_avail_sdio
= mmc
->ocr_avail
;
149 mmc
->ocr_avail_sd
= mmc
->ocr_avail
;
150 mmc
->ocr_avail_mmc
= mmc
->ocr_avail
;
152 /* get frequency range values */
153 mmc
->f_min
= le32_to_cpu(response
.f_min
);
154 mmc
->f_max
= le32_to_cpu(response
.f_max
);
159 static void _gb_queue_event(struct gb_sdio_host
*host
, u8 event
)
161 if (event
& GB_SDIO_CARD_INSERTED
)
162 host
->queued_events
&= ~GB_SDIO_CARD_REMOVED
;
163 else if (event
& GB_SDIO_CARD_REMOVED
)
164 host
->queued_events
&= ~GB_SDIO_CARD_INSERTED
;
166 host
->queued_events
|= event
;
169 static int _gb_sdio_process_events(struct gb_sdio_host
*host
, u8 event
)
171 u8 state_changed
= 0;
173 if (event
& GB_SDIO_CARD_INSERTED
) {
174 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
176 if (host
->card_present
)
178 host
->card_present
= true;
182 if (event
& GB_SDIO_CARD_REMOVED
) {
183 if (host
->mmc
->caps
& MMC_CAP_NONREMOVABLE
)
185 if (!(host
->card_present
))
187 host
->card_present
= false;
191 if (event
& GB_SDIO_WP
)
192 host
->read_only
= true;
195 dev_info(mmc_dev(host
->mmc
), "card %s now event\n",
196 (host
->card_present
? "inserted" : "removed"));
197 mmc_detect_change(host
->mmc
, 0);
203 static int gb_sdio_request_handler(struct gb_operation
*op
)
205 struct gb_sdio_host
*host
= gb_connection_get_data(op
->connection
);
206 struct gb_message
*request
;
207 struct gb_sdio_event_request
*payload
;
212 if (type
!= GB_SDIO_TYPE_EVENT
) {
213 dev_err(mmc_dev(host
->mmc
),
214 "unsupported unsolicited event: %u\n", type
);
218 request
= op
->request
;
220 if (request
->payload_size
< sizeof(*payload
)) {
221 dev_err(mmc_dev(host
->mmc
), "wrong event size received (%zu < %zu)\n",
222 request
->payload_size
, sizeof(*payload
));
226 payload
= request
->payload
;
227 event
= payload
->event
;
230 _gb_queue_event(host
, event
);
232 ret
= _gb_sdio_process_events(host
, event
);
237 static int gb_sdio_set_ios(struct gb_sdio_host
*host
,
238 struct gb_sdio_set_ios_request
*request
)
242 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
246 ret
= gb_operation_sync(host
->connection
, GB_SDIO_TYPE_SET_IOS
, request
,
247 sizeof(*request
), NULL
, 0);
249 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
254 static int _gb_sdio_send(struct gb_sdio_host
*host
, struct mmc_data
*data
,
255 size_t len
, u16 nblocks
, off_t skip
)
257 struct gb_sdio_transfer_request
*request
;
258 struct gb_sdio_transfer_response
*response
;
259 struct gb_operation
*operation
;
260 struct scatterlist
*sg
= data
->sg
;
261 unsigned int sg_len
= data
->sg_len
;
267 WARN_ON(len
> host
->data_max
);
269 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
270 len
+ sizeof(*request
),
271 sizeof(*response
), GFP_KERNEL
);
275 request
= operation
->request
->payload
;
276 request
->data_flags
= data
->flags
>> 8;
277 request
->data_blocks
= cpu_to_le16(nblocks
);
278 request
->data_blksz
= cpu_to_le16(data
->blksz
);
280 copied
= sg_pcopy_to_buffer(sg
, sg_len
, &request
->data
[0], len
, skip
);
284 goto err_put_operation
;
287 ret
= gb_operation_request_send_sync(operation
);
289 goto err_put_operation
;
291 response
= operation
->response
->payload
;
293 send_blocks
= le16_to_cpu(response
->data_blocks
);
294 send_blksz
= le16_to_cpu(response
->data_blksz
);
296 if (len
!= send_blksz
* send_blocks
) {
297 dev_err(mmc_dev(host
->mmc
), "send: size received: %zu != %d\n",
298 len
, send_blksz
* send_blocks
);
303 gb_operation_put(operation
);
308 static int _gb_sdio_recv(struct gb_sdio_host
*host
, struct mmc_data
*data
,
309 size_t len
, u16 nblocks
, off_t skip
)
311 struct gb_sdio_transfer_request
*request
;
312 struct gb_sdio_transfer_response
*response
;
313 struct gb_operation
*operation
;
314 struct scatterlist
*sg
= data
->sg
;
315 unsigned int sg_len
= data
->sg_len
;
321 WARN_ON(len
> host
->data_max
);
323 operation
= gb_operation_create(host
->connection
, GB_SDIO_TYPE_TRANSFER
,
325 len
+ sizeof(*response
), GFP_KERNEL
);
329 request
= operation
->request
->payload
;
330 request
->data_flags
= data
->flags
>> 8;
331 request
->data_blocks
= cpu_to_le16(nblocks
);
332 request
->data_blksz
= cpu_to_le16(data
->blksz
);
334 ret
= gb_operation_request_send_sync(operation
);
336 goto err_put_operation
;
338 response
= operation
->response
->payload
;
339 recv_blocks
= le16_to_cpu(response
->data_blocks
);
340 recv_blksz
= le16_to_cpu(response
->data_blksz
);
342 if (len
!= recv_blksz
* recv_blocks
) {
343 dev_err(mmc_dev(host
->mmc
), "recv: size received: %d != %zu\n",
344 recv_blksz
* recv_blocks
, len
);
346 goto err_put_operation
;
349 copied
= sg_pcopy_from_buffer(sg
, sg_len
, &response
->data
[0], len
,
355 gb_operation_put(operation
);
360 static int gb_sdio_transfer(struct gb_sdio_host
*host
, struct mmc_data
*data
)
367 if (single_op(data
->mrq
->cmd
) && data
->blocks
> 1) {
372 left
= data
->blksz
* data
->blocks
;
375 /* check is a stop transmission is pending */
376 spin_lock(&host
->xfer
);
377 if (host
->xfer_stop
) {
378 host
->xfer_stop
= false;
379 spin_unlock(&host
->xfer
);
383 spin_unlock(&host
->xfer
);
384 len
= min(left
, host
->data_max
);
385 nblocks
= len
/ data
->blksz
;
386 len
= nblocks
* data
->blksz
;
388 if (data
->flags
& MMC_DATA_READ
) {
389 ret
= _gb_sdio_recv(host
, data
, len
, nblocks
, skip
);
393 ret
= _gb_sdio_send(host
, data
, len
, nblocks
, skip
);
397 data
->bytes_xfered
+= len
;
407 static int gb_sdio_command(struct gb_sdio_host
*host
, struct mmc_command
*cmd
)
409 struct gb_sdio_command_request request
= {0};
410 struct gb_sdio_command_response response
;
411 struct mmc_data
*data
= host
->mrq
->data
;
412 unsigned int timeout_ms
;
418 switch (mmc_resp_type(cmd
)) {
420 cmd_flags
= GB_SDIO_RSP_NONE
;
423 cmd_flags
= GB_SDIO_RSP_R1_R5_R6_R7
;
426 cmd_flags
= GB_SDIO_RSP_R1B
;
429 cmd_flags
= GB_SDIO_RSP_R2
;
432 cmd_flags
= GB_SDIO_RSP_R3_R4
;
435 dev_err(mmc_dev(host
->mmc
), "cmd flag invalid 0x%04x\n",
441 switch (mmc_cmd_type(cmd
)) {
443 cmd_type
= GB_SDIO_CMD_BC
;
446 cmd_type
= GB_SDIO_CMD_BCR
;
449 cmd_type
= GB_SDIO_CMD_AC
;
452 cmd_type
= GB_SDIO_CMD_ADTC
;
455 dev_err(mmc_dev(host
->mmc
), "cmd type invalid 0x%04x\n",
461 request
.cmd
= cmd
->opcode
;
462 request
.cmd_flags
= cmd_flags
;
463 request
.cmd_type
= cmd_type
;
464 request
.cmd_arg
= cpu_to_le32(cmd
->arg
);
465 /* some controllers need to know at command time data details */
467 request
.data_blocks
= cpu_to_le16(data
->blocks
);
468 request
.data_blksz
= cpu_to_le16(data
->blksz
);
471 timeout_ms
= cmd
->busy_timeout
? cmd
->busy_timeout
:
472 GB_OPERATION_TIMEOUT_DEFAULT
;
474 ret
= gb_operation_sync_timeout(host
->connection
, GB_SDIO_TYPE_COMMAND
,
475 &request
, sizeof(request
), &response
,
476 sizeof(response
), timeout_ms
);
480 /* no response expected */
481 if (cmd_flags
== GB_SDIO_RSP_NONE
)
484 /* long response expected */
485 if (cmd_flags
& GB_SDIO_RSP_R2
)
486 for (i
= 0; i
< 4; i
++)
487 cmd
->resp
[i
] = le32_to_cpu(response
.resp
[i
]);
489 cmd
->resp
[0] = le32_to_cpu(response
.resp
[0]);
496 static void gb_sdio_mrq_work(struct work_struct
*work
)
498 struct gb_sdio_host
*host
;
499 struct mmc_request
*mrq
;
502 host
= container_of(work
, struct gb_sdio_host
, mrqwork
);
504 ret
= gbphy_runtime_get_sync(host
->gbphy_dev
);
508 mutex_lock(&host
->lock
);
511 mutex_unlock(&host
->lock
);
512 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
513 dev_err(mmc_dev(host
->mmc
), "mmc request is NULL");
518 mrq
->cmd
->error
= -ESHUTDOWN
;
523 ret
= gb_sdio_command(host
, mrq
->sbc
);
528 ret
= gb_sdio_command(host
, mrq
->cmd
);
533 ret
= gb_sdio_transfer(host
, mrq
->data
);
539 ret
= gb_sdio_command(host
, mrq
->stop
);
546 mutex_unlock(&host
->lock
);
547 mmc_request_done(host
->mmc
, mrq
);
548 gbphy_runtime_put_autosuspend(host
->gbphy_dev
);
551 static void gb_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
553 struct gb_sdio_host
*host
= mmc_priv(mmc
);
554 struct mmc_command
*cmd
= mrq
->cmd
;
556 /* Check if it is a cancel to ongoing transfer */
557 if (cmd
->opcode
== MMC_STOP_TRANSMISSION
) {
558 spin_lock(&host
->xfer
);
559 host
->xfer_stop
= true;
560 spin_unlock(&host
->xfer
);
563 mutex_lock(&host
->lock
);
569 mrq
->cmd
->error
= -ESHUTDOWN
;
572 if (!host
->card_present
) {
573 mrq
->cmd
->error
= -ENOMEDIUM
;
577 queue_work(host
->mrq_workqueue
, &host
->mrqwork
);
579 mutex_unlock(&host
->lock
);
584 mutex_unlock(&host
->lock
);
585 mmc_request_done(mmc
, mrq
);
588 static void gb_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
590 struct gb_sdio_host
*host
= mmc_priv(mmc
);
591 struct gb_sdio_set_ios_request request
;
600 mutex_lock(&host
->lock
);
601 request
.clock
= cpu_to_le32(ios
->clock
);
604 vdd
= 1 << (ios
->vdd
- GB_SDIO_VDD_SHIFT
);
605 request
.vdd
= cpu_to_le32(vdd
);
607 request
.bus_mode
= ios
->bus_mode
== MMC_BUSMODE_OPENDRAIN
?
608 GB_SDIO_BUSMODE_OPENDRAIN
:
609 GB_SDIO_BUSMODE_PUSHPULL
;
611 switch (ios
->power_mode
) {
614 power_mode
= GB_SDIO_POWER_OFF
;
617 power_mode
= GB_SDIO_POWER_UP
;
620 power_mode
= GB_SDIO_POWER_ON
;
622 case MMC_POWER_UNDEFINED
:
623 power_mode
= GB_SDIO_POWER_UNDEFINED
;
626 request
.power_mode
= power_mode
;
628 switch (ios
->bus_width
) {
629 case MMC_BUS_WIDTH_1
:
630 bus_width
= GB_SDIO_BUS_WIDTH_1
;
632 case MMC_BUS_WIDTH_4
:
634 bus_width
= GB_SDIO_BUS_WIDTH_4
;
636 case MMC_BUS_WIDTH_8
:
637 bus_width
= GB_SDIO_BUS_WIDTH_8
;
640 request
.bus_width
= bus_width
;
642 switch (ios
->timing
) {
643 case MMC_TIMING_LEGACY
:
645 timing
= GB_SDIO_TIMING_LEGACY
;
647 case MMC_TIMING_MMC_HS
:
648 timing
= GB_SDIO_TIMING_MMC_HS
;
650 case MMC_TIMING_SD_HS
:
651 timing
= GB_SDIO_TIMING_SD_HS
;
653 case MMC_TIMING_UHS_SDR12
:
654 timing
= GB_SDIO_TIMING_UHS_SDR12
;
656 case MMC_TIMING_UHS_SDR25
:
657 timing
= GB_SDIO_TIMING_UHS_SDR25
;
659 case MMC_TIMING_UHS_SDR50
:
660 timing
= GB_SDIO_TIMING_UHS_SDR50
;
662 case MMC_TIMING_UHS_SDR104
:
663 timing
= GB_SDIO_TIMING_UHS_SDR104
;
665 case MMC_TIMING_UHS_DDR50
:
666 timing
= GB_SDIO_TIMING_UHS_DDR50
;
668 case MMC_TIMING_MMC_DDR52
:
669 timing
= GB_SDIO_TIMING_MMC_DDR52
;
671 case MMC_TIMING_MMC_HS200
:
672 timing
= GB_SDIO_TIMING_MMC_HS200
;
674 case MMC_TIMING_MMC_HS400
:
675 timing
= GB_SDIO_TIMING_MMC_HS400
;
678 request
.timing
= timing
;
680 switch (ios
->signal_voltage
) {
681 case MMC_SIGNAL_VOLTAGE_330
:
682 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_330
;
684 case MMC_SIGNAL_VOLTAGE_180
:
686 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_180
;
688 case MMC_SIGNAL_VOLTAGE_120
:
689 signal_voltage
= GB_SDIO_SIGNAL_VOLTAGE_120
;
692 request
.signal_voltage
= signal_voltage
;
694 switch (ios
->drv_type
) {
695 case MMC_SET_DRIVER_TYPE_A
:
696 drv_type
= GB_SDIO_SET_DRIVER_TYPE_A
;
698 case MMC_SET_DRIVER_TYPE_C
:
699 drv_type
= GB_SDIO_SET_DRIVER_TYPE_C
;
701 case MMC_SET_DRIVER_TYPE_D
:
702 drv_type
= GB_SDIO_SET_DRIVER_TYPE_D
;
704 case MMC_SET_DRIVER_TYPE_B
:
706 drv_type
= GB_SDIO_SET_DRIVER_TYPE_B
;
709 request
.drv_type
= drv_type
;
711 ret
= gb_sdio_set_ios(host
, &request
);
715 memcpy(&mmc
->ios
, ios
, sizeof(mmc
->ios
));
718 mutex_unlock(&host
->lock
);
721 static int gb_mmc_get_ro(struct mmc_host
*mmc
)
723 struct gb_sdio_host
*host
= mmc_priv(mmc
);
725 mutex_lock(&host
->lock
);
727 mutex_unlock(&host
->lock
);
730 mutex_unlock(&host
->lock
);
732 return host
->read_only
;
735 static int gb_mmc_get_cd(struct mmc_host
*mmc
)
737 struct gb_sdio_host
*host
= mmc_priv(mmc
);
739 mutex_lock(&host
->lock
);
741 mutex_unlock(&host
->lock
);
744 mutex_unlock(&host
->lock
);
746 return host
->card_present
;
749 static int gb_mmc_switch_voltage(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
754 static const struct mmc_host_ops gb_sdio_ops
= {
755 .request
= gb_mmc_request
,
756 .set_ios
= gb_mmc_set_ios
,
757 .get_ro
= gb_mmc_get_ro
,
758 .get_cd
= gb_mmc_get_cd
,
759 .start_signal_voltage_switch
= gb_mmc_switch_voltage
,
762 static int gb_sdio_probe(struct gbphy_device
*gbphy_dev
,
763 const struct gbphy_device_id
*id
)
765 struct gb_connection
*connection
;
766 struct mmc_host
*mmc
;
767 struct gb_sdio_host
*host
;
770 mmc
= mmc_alloc_host(sizeof(*host
), &gbphy_dev
->dev
);
774 connection
= gb_connection_create(gbphy_dev
->bundle
,
775 le16_to_cpu(gbphy_dev
->cport_desc
->id
),
776 gb_sdio_request_handler
);
777 if (IS_ERR(connection
)) {
778 ret
= PTR_ERR(connection
);
782 host
= mmc_priv(mmc
);
784 host
->removed
= true;
786 host
->connection
= connection
;
787 gb_connection_set_data(connection
, host
);
788 host
->gbphy_dev
= gbphy_dev
;
789 gb_gbphy_set_data(gbphy_dev
, host
);
791 ret
= gb_connection_enable_tx(connection
);
793 goto exit_connection_destroy
;
795 ret
= gb_sdio_get_caps(host
);
797 goto exit_connection_disable
;
799 mmc
->ops
= &gb_sdio_ops
;
801 mmc
->max_segs
= host
->mmc
->max_blk_count
;
803 /* for now we make a map 1:1 between max request and segment size */
804 mmc
->max_req_size
= mmc
->max_blk_size
* mmc
->max_blk_count
;
805 mmc
->max_seg_size
= mmc
->max_req_size
;
807 mutex_init(&host
->lock
);
808 spin_lock_init(&host
->xfer
);
809 host
->mrq_workqueue
= alloc_workqueue("mmc-%s", 0, 1,
810 dev_name(&gbphy_dev
->dev
));
811 if (!host
->mrq_workqueue
) {
813 goto exit_connection_disable
;
815 INIT_WORK(&host
->mrqwork
, gb_sdio_mrq_work
);
817 ret
= gb_connection_enable(connection
);
819 goto exit_wq_destroy
;
821 ret
= mmc_add_host(mmc
);
823 goto exit_wq_destroy
;
824 host
->removed
= false;
825 ret
= _gb_sdio_process_events(host
, host
->queued_events
);
826 host
->queued_events
= 0;
828 gbphy_runtime_put_autosuspend(gbphy_dev
);
833 destroy_workqueue(host
->mrq_workqueue
);
834 exit_connection_disable
:
835 gb_connection_disable(connection
);
836 exit_connection_destroy
:
837 gb_connection_destroy(connection
);
844 static void gb_sdio_remove(struct gbphy_device
*gbphy_dev
)
846 struct gb_sdio_host
*host
= gb_gbphy_get_data(gbphy_dev
);
847 struct gb_connection
*connection
= host
->connection
;
848 struct mmc_host
*mmc
;
851 ret
= gbphy_runtime_get_sync(gbphy_dev
);
853 gbphy_runtime_get_noresume(gbphy_dev
);
855 mutex_lock(&host
->lock
);
856 host
->removed
= true;
858 gb_connection_set_data(connection
, NULL
);
859 mutex_unlock(&host
->lock
);
861 destroy_workqueue(host
->mrq_workqueue
);
862 gb_connection_disable_rx(connection
);
863 mmc_remove_host(mmc
);
864 gb_connection_disable(connection
);
865 gb_connection_destroy(connection
);
869 static const struct gbphy_device_id gb_sdio_id_table
[] = {
870 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO
) },
873 MODULE_DEVICE_TABLE(gbphy
, gb_sdio_id_table
);
875 static struct gbphy_driver sdio_driver
= {
877 .probe
= gb_sdio_probe
,
878 .remove
= gb_sdio_remove
,
879 .id_table
= gb_sdio_id_table
,
882 module_gbphy_driver(sdio_driver
);
883 MODULE_DESCRIPTION("SD/MMC Greybus driver");
884 MODULE_LICENSE("GPL v2");