Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / staging / greybus / sdio.c
blob0939f4a4c963b11a0ddd50d1d20f9682a3199147
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SD/MMC Greybus driver.
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
7 */
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
15 #include <linux/greybus.h>
17 #include "gbphy.h"
19 struct gb_sdio_host {
20 struct gb_connection *connection;
21 struct gbphy_device *gbphy_dev;
22 struct mmc_host *mmc;
23 struct mmc_request *mrq;
24 struct mutex lock; /* lock for this host */
25 size_t data_max;
26 spinlock_t xfer; /* lock to cancel ongoing transfer */
27 bool xfer_stop;
28 struct workqueue_struct *mrq_workqueue;
29 struct work_struct mrqwork;
30 u8 queued_events;
31 bool removed;
32 bool card_present;
33 bool read_only;
37 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
38 GB_SDIO_RSP_OPCODE)
39 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
40 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
41 GB_SDIO_RSP_136)
42 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
46 #define GB_SDIO_VDD_SHIFT 8
48 #ifndef MMC_CAP2_CORE_RUNTIME_PM
49 #define MMC_CAP2_CORE_RUNTIME_PM 0
50 #endif
52 static inline bool single_op(struct mmc_command *cmd)
54 u32 opcode = cmd->opcode;
56 return opcode == MMC_WRITE_BLOCK ||
57 opcode == MMC_READ_SINGLE_BLOCK;
60 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
62 u32 caps = 0;
63 u32 caps2 = 0;
65 caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
66 ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
67 ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
68 ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
69 ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
70 ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
71 ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
72 ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
73 ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
74 ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
75 ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
76 ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
77 ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
78 ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
79 ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
80 ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
82 caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
83 ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
84 ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
85 ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
87 host->mmc->caps = caps;
88 host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
90 if (caps & MMC_CAP_NONREMOVABLE)
91 host->card_present = true;
94 static u32 _gb_sdio_get_host_ocr(u32 ocr)
96 return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
97 ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
98 ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
99 ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
100 ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
101 ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
102 ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
103 ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
104 ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
105 ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
106 ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
107 ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
108 ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
109 ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
110 ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
111 ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
112 ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
116 static int gb_sdio_get_caps(struct gb_sdio_host *host)
118 struct gb_sdio_get_caps_response response;
119 struct mmc_host *mmc = host->mmc;
120 u16 data_max;
121 u32 blksz;
122 u32 ocr;
123 u32 r;
124 int ret;
126 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
127 NULL, 0, &response, sizeof(response));
128 if (ret < 0)
129 return ret;
130 r = le32_to_cpu(response.caps);
132 _gb_sdio_set_host_caps(host, r);
134 /* get the max block size that could fit our payload */
135 data_max = gb_operation_get_payload_size_max(host->connection);
136 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
137 data_max - sizeof(struct gb_sdio_transfer_response));
139 blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
140 blksz = max_t(u32, 512, blksz);
142 mmc->max_blk_size = rounddown_pow_of_two(blksz);
143 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
144 host->data_max = data_max;
146 /* get ocr supported values */
147 ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
148 mmc->ocr_avail = ocr;
149 mmc->ocr_avail_sdio = mmc->ocr_avail;
150 mmc->ocr_avail_sd = mmc->ocr_avail;
151 mmc->ocr_avail_mmc = mmc->ocr_avail;
153 /* get frequency range values */
154 mmc->f_min = le32_to_cpu(response.f_min);
155 mmc->f_max = le32_to_cpu(response.f_max);
157 return 0;
160 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
162 if (event & GB_SDIO_CARD_INSERTED)
163 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
164 else if (event & GB_SDIO_CARD_REMOVED)
165 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
167 host->queued_events |= event;
170 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
172 u8 state_changed = 0;
174 if (event & GB_SDIO_CARD_INSERTED) {
175 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
176 return 0;
177 if (host->card_present)
178 return 0;
179 host->card_present = true;
180 state_changed = 1;
183 if (event & GB_SDIO_CARD_REMOVED) {
184 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
185 return 0;
186 if (!(host->card_present))
187 return 0;
188 host->card_present = false;
189 state_changed = 1;
192 if (event & GB_SDIO_WP)
193 host->read_only = true;
195 if (state_changed) {
196 dev_info(mmc_dev(host->mmc), "card %s now event\n",
197 (host->card_present ? "inserted" : "removed"));
198 mmc_detect_change(host->mmc, 0);
201 return 0;
204 static int gb_sdio_request_handler(struct gb_operation *op)
206 struct gb_sdio_host *host = gb_connection_get_data(op->connection);
207 struct gb_message *request;
208 struct gb_sdio_event_request *payload;
209 u8 type = op->type;
210 int ret = 0;
211 u8 event;
213 if (type != GB_SDIO_TYPE_EVENT) {
214 dev_err(mmc_dev(host->mmc),
215 "unsupported unsolicited event: %u\n", type);
216 return -EINVAL;
219 request = op->request;
221 if (request->payload_size < sizeof(*payload)) {
222 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
223 request->payload_size, sizeof(*payload));
224 return -EINVAL;
227 payload = request->payload;
228 event = payload->event;
230 if (host->removed)
231 _gb_queue_event(host, event);
232 else
233 ret = _gb_sdio_process_events(host, event);
235 return ret;
238 static int gb_sdio_set_ios(struct gb_sdio_host *host,
239 struct gb_sdio_set_ios_request *request)
241 int ret;
243 ret = gbphy_runtime_get_sync(host->gbphy_dev);
244 if (ret)
245 return ret;
247 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
248 sizeof(*request), NULL, 0);
250 gbphy_runtime_put_autosuspend(host->gbphy_dev);
252 return ret;
255 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
256 size_t len, u16 nblocks, off_t skip)
258 struct gb_sdio_transfer_request *request;
259 struct gb_sdio_transfer_response *response;
260 struct gb_operation *operation;
261 struct scatterlist *sg = data->sg;
262 unsigned int sg_len = data->sg_len;
263 size_t copied;
264 u16 send_blksz;
265 u16 send_blocks;
266 int ret;
268 WARN_ON(len > host->data_max);
270 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
271 len + sizeof(*request),
272 sizeof(*response), GFP_KERNEL);
273 if (!operation)
274 return -ENOMEM;
276 request = operation->request->payload;
277 request->data_flags = data->flags >> 8;
278 request->data_blocks = cpu_to_le16(nblocks);
279 request->data_blksz = cpu_to_le16(data->blksz);
281 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
283 if (copied != len) {
284 ret = -EINVAL;
285 goto err_put_operation;
288 ret = gb_operation_request_send_sync(operation);
289 if (ret < 0)
290 goto err_put_operation;
292 response = operation->response->payload;
294 send_blocks = le16_to_cpu(response->data_blocks);
295 send_blksz = le16_to_cpu(response->data_blksz);
297 if (len != send_blksz * send_blocks) {
298 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
299 len, send_blksz * send_blocks);
300 ret = -EINVAL;
303 err_put_operation:
304 gb_operation_put(operation);
306 return ret;
309 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
310 size_t len, u16 nblocks, off_t skip)
312 struct gb_sdio_transfer_request *request;
313 struct gb_sdio_transfer_response *response;
314 struct gb_operation *operation;
315 struct scatterlist *sg = data->sg;
316 unsigned int sg_len = data->sg_len;
317 size_t copied;
318 u16 recv_blksz;
319 u16 recv_blocks;
320 int ret;
322 WARN_ON(len > host->data_max);
324 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
325 sizeof(*request),
326 len + sizeof(*response), GFP_KERNEL);
327 if (!operation)
328 return -ENOMEM;
330 request = operation->request->payload;
331 request->data_flags = data->flags >> 8;
332 request->data_blocks = cpu_to_le16(nblocks);
333 request->data_blksz = cpu_to_le16(data->blksz);
335 ret = gb_operation_request_send_sync(operation);
336 if (ret < 0)
337 goto err_put_operation;
339 response = operation->response->payload;
340 recv_blocks = le16_to_cpu(response->data_blocks);
341 recv_blksz = le16_to_cpu(response->data_blksz);
343 if (len != recv_blksz * recv_blocks) {
344 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
345 recv_blksz * recv_blocks, len);
346 ret = -EINVAL;
347 goto err_put_operation;
350 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
351 skip);
352 if (copied != len)
353 ret = -EINVAL;
355 err_put_operation:
356 gb_operation_put(operation);
358 return ret;
361 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
363 size_t left, len;
364 off_t skip = 0;
365 int ret = 0;
366 u16 nblocks;
368 if (single_op(data->mrq->cmd) && data->blocks > 1) {
369 ret = -ETIMEDOUT;
370 goto out;
373 left = data->blksz * data->blocks;
375 while (left) {
376 /* check is a stop transmission is pending */
377 spin_lock(&host->xfer);
378 if (host->xfer_stop) {
379 host->xfer_stop = false;
380 spin_unlock(&host->xfer);
381 ret = -EINTR;
382 goto out;
384 spin_unlock(&host->xfer);
385 len = min(left, host->data_max);
386 nblocks = len / data->blksz;
387 len = nblocks * data->blksz;
389 if (data->flags & MMC_DATA_READ) {
390 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
391 if (ret < 0)
392 goto out;
393 } else {
394 ret = _gb_sdio_send(host, data, len, nblocks, skip);
395 if (ret < 0)
396 goto out;
398 data->bytes_xfered += len;
399 left -= len;
400 skip += len;
403 out:
404 data->error = ret;
405 return ret;
408 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
410 struct gb_sdio_command_request request = {0};
411 struct gb_sdio_command_response response;
412 struct mmc_data *data = host->mrq->data;
413 unsigned int timeout_ms;
414 u8 cmd_flags;
415 u8 cmd_type;
416 int i;
417 int ret;
419 switch (mmc_resp_type(cmd)) {
420 case MMC_RSP_NONE:
421 cmd_flags = GB_SDIO_RSP_NONE;
422 break;
423 case MMC_RSP_R1:
424 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
425 break;
426 case MMC_RSP_R1B:
427 cmd_flags = GB_SDIO_RSP_R1B;
428 break;
429 case MMC_RSP_R2:
430 cmd_flags = GB_SDIO_RSP_R2;
431 break;
432 case MMC_RSP_R3:
433 cmd_flags = GB_SDIO_RSP_R3_R4;
434 break;
435 default:
436 dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
437 mmc_resp_type(cmd));
438 ret = -EINVAL;
439 goto out;
442 switch (mmc_cmd_type(cmd)) {
443 case MMC_CMD_BC:
444 cmd_type = GB_SDIO_CMD_BC;
445 break;
446 case MMC_CMD_BCR:
447 cmd_type = GB_SDIO_CMD_BCR;
448 break;
449 case MMC_CMD_AC:
450 cmd_type = GB_SDIO_CMD_AC;
451 break;
452 case MMC_CMD_ADTC:
453 cmd_type = GB_SDIO_CMD_ADTC;
454 break;
455 default:
456 dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
457 mmc_cmd_type(cmd));
458 ret = -EINVAL;
459 goto out;
462 request.cmd = cmd->opcode;
463 request.cmd_flags = cmd_flags;
464 request.cmd_type = cmd_type;
465 request.cmd_arg = cpu_to_le32(cmd->arg);
466 /* some controllers need to know at command time data details */
467 if (data) {
468 request.data_blocks = cpu_to_le16(data->blocks);
469 request.data_blksz = cpu_to_le16(data->blksz);
472 timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
473 GB_OPERATION_TIMEOUT_DEFAULT;
475 ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
476 &request, sizeof(request), &response,
477 sizeof(response), timeout_ms);
478 if (ret < 0)
479 goto out;
481 /* no response expected */
482 if (cmd_flags == GB_SDIO_RSP_NONE)
483 goto out;
485 /* long response expected */
486 if (cmd_flags & GB_SDIO_RSP_R2)
487 for (i = 0; i < 4; i++)
488 cmd->resp[i] = le32_to_cpu(response.resp[i]);
489 else
490 cmd->resp[0] = le32_to_cpu(response.resp[0]);
492 out:
493 cmd->error = ret;
494 return ret;
497 static void gb_sdio_mrq_work(struct work_struct *work)
499 struct gb_sdio_host *host;
500 struct mmc_request *mrq;
501 int ret;
503 host = container_of(work, struct gb_sdio_host, mrqwork);
505 ret = gbphy_runtime_get_sync(host->gbphy_dev);
506 if (ret)
507 return;
509 mutex_lock(&host->lock);
510 mrq = host->mrq;
511 if (!mrq) {
512 mutex_unlock(&host->lock);
513 gbphy_runtime_put_autosuspend(host->gbphy_dev);
514 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
515 return;
518 if (host->removed) {
519 mrq->cmd->error = -ESHUTDOWN;
520 goto done;
523 if (mrq->sbc) {
524 ret = gb_sdio_command(host, mrq->sbc);
525 if (ret < 0)
526 goto done;
529 ret = gb_sdio_command(host, mrq->cmd);
530 if (ret < 0)
531 goto done;
533 if (mrq->data) {
534 ret = gb_sdio_transfer(host, mrq->data);
535 if (ret < 0)
536 goto done;
539 if (mrq->stop) {
540 ret = gb_sdio_command(host, mrq->stop);
541 if (ret < 0)
542 goto done;
545 done:
546 host->mrq = NULL;
547 mutex_unlock(&host->lock);
548 mmc_request_done(host->mmc, mrq);
549 gbphy_runtime_put_autosuspend(host->gbphy_dev);
552 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
554 struct gb_sdio_host *host = mmc_priv(mmc);
555 struct mmc_command *cmd = mrq->cmd;
557 /* Check if it is a cancel to ongoing transfer */
558 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
559 spin_lock(&host->xfer);
560 host->xfer_stop = true;
561 spin_unlock(&host->xfer);
564 mutex_lock(&host->lock);
566 WARN_ON(host->mrq);
567 host->mrq = mrq;
569 if (host->removed) {
570 mrq->cmd->error = -ESHUTDOWN;
571 goto out;
573 if (!host->card_present) {
574 mrq->cmd->error = -ENOMEDIUM;
575 goto out;
578 queue_work(host->mrq_workqueue, &host->mrqwork);
580 mutex_unlock(&host->lock);
581 return;
583 out:
584 host->mrq = NULL;
585 mutex_unlock(&host->lock);
586 mmc_request_done(mmc, mrq);
589 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
591 struct gb_sdio_host *host = mmc_priv(mmc);
592 struct gb_sdio_set_ios_request request;
593 int ret;
594 u8 power_mode;
595 u8 bus_width;
596 u8 timing;
597 u8 signal_voltage;
598 u8 drv_type;
599 u32 vdd = 0;
601 mutex_lock(&host->lock);
602 request.clock = cpu_to_le32(ios->clock);
604 if (ios->vdd)
605 vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
606 request.vdd = cpu_to_le32(vdd);
608 request.bus_mode = ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
609 GB_SDIO_BUSMODE_OPENDRAIN :
610 GB_SDIO_BUSMODE_PUSHPULL;
612 switch (ios->power_mode) {
613 case MMC_POWER_OFF:
614 default:
615 power_mode = GB_SDIO_POWER_OFF;
616 break;
617 case MMC_POWER_UP:
618 power_mode = GB_SDIO_POWER_UP;
619 break;
620 case MMC_POWER_ON:
621 power_mode = GB_SDIO_POWER_ON;
622 break;
623 case MMC_POWER_UNDEFINED:
624 power_mode = GB_SDIO_POWER_UNDEFINED;
625 break;
627 request.power_mode = power_mode;
629 switch (ios->bus_width) {
630 case MMC_BUS_WIDTH_1:
631 bus_width = GB_SDIO_BUS_WIDTH_1;
632 break;
633 case MMC_BUS_WIDTH_4:
634 default:
635 bus_width = GB_SDIO_BUS_WIDTH_4;
636 break;
637 case MMC_BUS_WIDTH_8:
638 bus_width = GB_SDIO_BUS_WIDTH_8;
639 break;
641 request.bus_width = bus_width;
643 switch (ios->timing) {
644 case MMC_TIMING_LEGACY:
645 default:
646 timing = GB_SDIO_TIMING_LEGACY;
647 break;
648 case MMC_TIMING_MMC_HS:
649 timing = GB_SDIO_TIMING_MMC_HS;
650 break;
651 case MMC_TIMING_SD_HS:
652 timing = GB_SDIO_TIMING_SD_HS;
653 break;
654 case MMC_TIMING_UHS_SDR12:
655 timing = GB_SDIO_TIMING_UHS_SDR12;
656 break;
657 case MMC_TIMING_UHS_SDR25:
658 timing = GB_SDIO_TIMING_UHS_SDR25;
659 break;
660 case MMC_TIMING_UHS_SDR50:
661 timing = GB_SDIO_TIMING_UHS_SDR50;
662 break;
663 case MMC_TIMING_UHS_SDR104:
664 timing = GB_SDIO_TIMING_UHS_SDR104;
665 break;
666 case MMC_TIMING_UHS_DDR50:
667 timing = GB_SDIO_TIMING_UHS_DDR50;
668 break;
669 case MMC_TIMING_MMC_DDR52:
670 timing = GB_SDIO_TIMING_MMC_DDR52;
671 break;
672 case MMC_TIMING_MMC_HS200:
673 timing = GB_SDIO_TIMING_MMC_HS200;
674 break;
675 case MMC_TIMING_MMC_HS400:
676 timing = GB_SDIO_TIMING_MMC_HS400;
677 break;
679 request.timing = timing;
681 switch (ios->signal_voltage) {
682 case MMC_SIGNAL_VOLTAGE_330:
683 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
684 break;
685 case MMC_SIGNAL_VOLTAGE_180:
686 default:
687 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
688 break;
689 case MMC_SIGNAL_VOLTAGE_120:
690 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
691 break;
693 request.signal_voltage = signal_voltage;
695 switch (ios->drv_type) {
696 case MMC_SET_DRIVER_TYPE_A:
697 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
698 break;
699 case MMC_SET_DRIVER_TYPE_C:
700 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
701 break;
702 case MMC_SET_DRIVER_TYPE_D:
703 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
704 break;
705 case MMC_SET_DRIVER_TYPE_B:
706 default:
707 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
708 break;
710 request.drv_type = drv_type;
712 ret = gb_sdio_set_ios(host, &request);
713 if (ret < 0)
714 goto out;
716 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
718 out:
719 mutex_unlock(&host->lock);
722 static int gb_mmc_get_ro(struct mmc_host *mmc)
724 struct gb_sdio_host *host = mmc_priv(mmc);
726 mutex_lock(&host->lock);
727 if (host->removed) {
728 mutex_unlock(&host->lock);
729 return -ESHUTDOWN;
731 mutex_unlock(&host->lock);
733 return host->read_only;
736 static int gb_mmc_get_cd(struct mmc_host *mmc)
738 struct gb_sdio_host *host = mmc_priv(mmc);
740 mutex_lock(&host->lock);
741 if (host->removed) {
742 mutex_unlock(&host->lock);
743 return -ESHUTDOWN;
745 mutex_unlock(&host->lock);
747 return host->card_present;
750 static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
752 return 0;
755 static const struct mmc_host_ops gb_sdio_ops = {
756 .request = gb_mmc_request,
757 .set_ios = gb_mmc_set_ios,
758 .get_ro = gb_mmc_get_ro,
759 .get_cd = gb_mmc_get_cd,
760 .start_signal_voltage_switch = gb_mmc_switch_voltage,
763 static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
764 const struct gbphy_device_id *id)
766 struct gb_connection *connection;
767 struct mmc_host *mmc;
768 struct gb_sdio_host *host;
769 int ret = 0;
771 mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
772 if (!mmc)
773 return -ENOMEM;
775 connection = gb_connection_create(gbphy_dev->bundle,
776 le16_to_cpu(gbphy_dev->cport_desc->id),
777 gb_sdio_request_handler);
778 if (IS_ERR(connection)) {
779 ret = PTR_ERR(connection);
780 goto exit_mmc_free;
783 host = mmc_priv(mmc);
784 host->mmc = mmc;
785 host->removed = true;
787 host->connection = connection;
788 gb_connection_set_data(connection, host);
789 host->gbphy_dev = gbphy_dev;
790 gb_gbphy_set_data(gbphy_dev, host);
792 ret = gb_connection_enable_tx(connection);
793 if (ret)
794 goto exit_connection_destroy;
796 ret = gb_sdio_get_caps(host);
797 if (ret < 0)
798 goto exit_connection_disable;
800 mmc->ops = &gb_sdio_ops;
802 mmc->max_segs = host->mmc->max_blk_count;
804 /* for now we make a map 1:1 between max request and segment size */
805 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
806 mmc->max_seg_size = mmc->max_req_size;
808 mutex_init(&host->lock);
809 spin_lock_init(&host->xfer);
810 host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
811 dev_name(&gbphy_dev->dev));
812 if (!host->mrq_workqueue) {
813 ret = -ENOMEM;
814 goto exit_connection_disable;
816 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
818 ret = gb_connection_enable(connection);
819 if (ret)
820 goto exit_wq_destroy;
822 ret = mmc_add_host(mmc);
823 if (ret < 0)
824 goto exit_wq_destroy;
825 host->removed = false;
826 ret = _gb_sdio_process_events(host, host->queued_events);
827 host->queued_events = 0;
829 gbphy_runtime_put_autosuspend(gbphy_dev);
831 return ret;
833 exit_wq_destroy:
834 destroy_workqueue(host->mrq_workqueue);
835 exit_connection_disable:
836 gb_connection_disable(connection);
837 exit_connection_destroy:
838 gb_connection_destroy(connection);
839 exit_mmc_free:
840 mmc_free_host(mmc);
842 return ret;
845 static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
847 struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
848 struct gb_connection *connection = host->connection;
849 struct mmc_host *mmc;
850 int ret;
852 ret = gbphy_runtime_get_sync(gbphy_dev);
853 if (ret)
854 gbphy_runtime_get_noresume(gbphy_dev);
856 mutex_lock(&host->lock);
857 host->removed = true;
858 mmc = host->mmc;
859 gb_connection_set_data(connection, NULL);
860 mutex_unlock(&host->lock);
862 flush_workqueue(host->mrq_workqueue);
863 destroy_workqueue(host->mrq_workqueue);
864 gb_connection_disable_rx(connection);
865 mmc_remove_host(mmc);
866 gb_connection_disable(connection);
867 gb_connection_destroy(connection);
868 mmc_free_host(mmc);
871 static const struct gbphy_device_id gb_sdio_id_table[] = {
872 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
873 { },
875 MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
877 static struct gbphy_driver sdio_driver = {
878 .name = "sdio",
879 .probe = gb_sdio_probe,
880 .remove = gb_sdio_remove,
881 .id_table = gb_sdio_id_table,
884 module_gbphy_driver(sdio_driver);
885 MODULE_LICENSE("GPL v2");