Linux 4.19.133
[linux/fpc-iii.git] / drivers / staging / greybus / sdio.c
blobafb2e5e5111afd5f16f0b107c0dd484a8e2c6771
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SD/MMC Greybus driver.
5 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
7 */
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
16 #include "greybus.h"
17 #include "gbphy.h"
19 struct gb_sdio_host {
20 struct gb_connection *connection;
21 struct gbphy_device *gbphy_dev;
22 struct mmc_host *mmc;
23 struct mmc_request *mrq;
24 struct mutex lock; /* lock for this host */
25 size_t data_max;
26 spinlock_t xfer; /* lock to cancel ongoing transfer */
27 bool xfer_stop;
28 struct workqueue_struct *mrq_workqueue;
29 struct work_struct mrqwork;
30 u8 queued_events;
31 bool removed;
32 bool card_present;
33 bool read_only;
37 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
38 GB_SDIO_RSP_OPCODE)
39 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
40 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
41 GB_SDIO_RSP_136)
42 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
46 #define GB_SDIO_VDD_SHIFT 8
48 #ifndef MMC_CAP2_CORE_RUNTIME_PM
49 #define MMC_CAP2_CORE_RUNTIME_PM 0
50 #endif
52 static inline bool single_op(struct mmc_command *cmd)
54 u32 opcode = cmd->opcode;
56 return opcode == MMC_WRITE_BLOCK ||
57 opcode == MMC_READ_SINGLE_BLOCK;
60 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
62 u32 caps = 0;
63 u32 caps2 = 0;
65 caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
66 ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
67 ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
68 ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
69 ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
70 ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
71 ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
72 ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
73 ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
74 ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
75 ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
76 ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
77 ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
78 ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
79 ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
80 ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
81 ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
83 caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
84 ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
85 ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
86 ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
88 host->mmc->caps = caps;
89 host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
91 if (caps & MMC_CAP_NONREMOVABLE)
92 host->card_present = true;
95 static u32 _gb_sdio_get_host_ocr(u32 ocr)
97 return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
98 ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
99 ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
100 ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
101 ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
102 ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
103 ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
104 ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
105 ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
106 ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
107 ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
108 ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
109 ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
110 ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
111 ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
112 ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
113 ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
117 static int gb_sdio_get_caps(struct gb_sdio_host *host)
119 struct gb_sdio_get_caps_response response;
120 struct mmc_host *mmc = host->mmc;
121 u16 data_max;
122 u32 blksz;
123 u32 ocr;
124 u32 r;
125 int ret;
127 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
128 NULL, 0, &response, sizeof(response));
129 if (ret < 0)
130 return ret;
131 r = le32_to_cpu(response.caps);
133 _gb_sdio_set_host_caps(host, r);
135 /* get the max block size that could fit our payload */
136 data_max = gb_operation_get_payload_size_max(host->connection);
137 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
138 data_max - sizeof(struct gb_sdio_transfer_response));
140 blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
141 blksz = max_t(u32, 512, blksz);
143 mmc->max_blk_size = rounddown_pow_of_two(blksz);
144 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
145 host->data_max = data_max;
147 /* get ocr supported values */
148 ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
149 mmc->ocr_avail = ocr;
150 mmc->ocr_avail_sdio = mmc->ocr_avail;
151 mmc->ocr_avail_sd = mmc->ocr_avail;
152 mmc->ocr_avail_mmc = mmc->ocr_avail;
154 /* get frequency range values */
155 mmc->f_min = le32_to_cpu(response.f_min);
156 mmc->f_max = le32_to_cpu(response.f_max);
158 return 0;
161 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
163 if (event & GB_SDIO_CARD_INSERTED)
164 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
165 else if (event & GB_SDIO_CARD_REMOVED)
166 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
168 host->queued_events |= event;
171 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
173 u8 state_changed = 0;
175 if (event & GB_SDIO_CARD_INSERTED) {
176 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
177 return 0;
178 if (host->card_present)
179 return 0;
180 host->card_present = true;
181 state_changed = 1;
184 if (event & GB_SDIO_CARD_REMOVED) {
185 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
186 return 0;
187 if (!(host->card_present))
188 return 0;
189 host->card_present = false;
190 state_changed = 1;
193 if (event & GB_SDIO_WP)
194 host->read_only = true;
196 if (state_changed) {
197 dev_info(mmc_dev(host->mmc), "card %s now event\n",
198 (host->card_present ? "inserted" : "removed"));
199 mmc_detect_change(host->mmc, 0);
202 return 0;
205 static int gb_sdio_request_handler(struct gb_operation *op)
207 struct gb_sdio_host *host = gb_connection_get_data(op->connection);
208 struct gb_message *request;
209 struct gb_sdio_event_request *payload;
210 u8 type = op->type;
211 int ret = 0;
212 u8 event;
214 if (type != GB_SDIO_TYPE_EVENT) {
215 dev_err(mmc_dev(host->mmc),
216 "unsupported unsolicited event: %u\n", type);
217 return -EINVAL;
220 request = op->request;
222 if (request->payload_size < sizeof(*payload)) {
223 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
224 request->payload_size, sizeof(*payload));
225 return -EINVAL;
228 payload = request->payload;
229 event = payload->event;
231 if (host->removed)
232 _gb_queue_event(host, event);
233 else
234 ret = _gb_sdio_process_events(host, event);
236 return ret;
239 static int gb_sdio_set_ios(struct gb_sdio_host *host,
240 struct gb_sdio_set_ios_request *request)
242 int ret;
244 ret = gbphy_runtime_get_sync(host->gbphy_dev);
245 if (ret)
246 return ret;
248 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
249 sizeof(*request), NULL, 0);
251 gbphy_runtime_put_autosuspend(host->gbphy_dev);
253 return ret;
256 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
257 size_t len, u16 nblocks, off_t skip)
259 struct gb_sdio_transfer_request *request;
260 struct gb_sdio_transfer_response *response;
261 struct gb_operation *operation;
262 struct scatterlist *sg = data->sg;
263 unsigned int sg_len = data->sg_len;
264 size_t copied;
265 u16 send_blksz;
266 u16 send_blocks;
267 int ret;
269 WARN_ON(len > host->data_max);
271 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
272 len + sizeof(*request),
273 sizeof(*response), GFP_KERNEL);
274 if (!operation)
275 return -ENOMEM;
277 request = operation->request->payload;
278 request->data_flags = (data->flags >> 8);
279 request->data_blocks = cpu_to_le16(nblocks);
280 request->data_blksz = cpu_to_le16(data->blksz);
282 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
284 if (copied != len) {
285 ret = -EINVAL;
286 goto err_put_operation;
289 ret = gb_operation_request_send_sync(operation);
290 if (ret < 0)
291 goto err_put_operation;
293 response = operation->response->payload;
295 send_blocks = le16_to_cpu(response->data_blocks);
296 send_blksz = le16_to_cpu(response->data_blksz);
298 if (len != send_blksz * send_blocks) {
299 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
300 len, send_blksz * send_blocks);
301 ret = -EINVAL;
304 err_put_operation:
305 gb_operation_put(operation);
307 return ret;
310 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
311 size_t len, u16 nblocks, off_t skip)
313 struct gb_sdio_transfer_request *request;
314 struct gb_sdio_transfer_response *response;
315 struct gb_operation *operation;
316 struct scatterlist *sg = data->sg;
317 unsigned int sg_len = data->sg_len;
318 size_t copied;
319 u16 recv_blksz;
320 u16 recv_blocks;
321 int ret;
323 WARN_ON(len > host->data_max);
325 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
326 sizeof(*request),
327 len + sizeof(*response), GFP_KERNEL);
328 if (!operation)
329 return -ENOMEM;
331 request = operation->request->payload;
332 request->data_flags = (data->flags >> 8);
333 request->data_blocks = cpu_to_le16(nblocks);
334 request->data_blksz = cpu_to_le16(data->blksz);
336 ret = gb_operation_request_send_sync(operation);
337 if (ret < 0)
338 goto err_put_operation;
340 response = operation->response->payload;
341 recv_blocks = le16_to_cpu(response->data_blocks);
342 recv_blksz = le16_to_cpu(response->data_blksz);
344 if (len != recv_blksz * recv_blocks) {
345 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
346 recv_blksz * recv_blocks, len);
347 ret = -EINVAL;
348 goto err_put_operation;
351 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
352 skip);
353 if (copied != len)
354 ret = -EINVAL;
356 err_put_operation:
357 gb_operation_put(operation);
359 return ret;
362 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
364 size_t left, len;
365 off_t skip = 0;
366 int ret = 0;
367 u16 nblocks;
369 if (single_op(data->mrq->cmd) && data->blocks > 1) {
370 ret = -ETIMEDOUT;
371 goto out;
374 left = data->blksz * data->blocks;
376 while (left) {
377 /* check is a stop transmission is pending */
378 spin_lock(&host->xfer);
379 if (host->xfer_stop) {
380 host->xfer_stop = false;
381 spin_unlock(&host->xfer);
382 ret = -EINTR;
383 goto out;
385 spin_unlock(&host->xfer);
386 len = min(left, host->data_max);
387 nblocks = len / data->blksz;
388 len = nblocks * data->blksz;
390 if (data->flags & MMC_DATA_READ) {
391 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
392 if (ret < 0)
393 goto out;
394 } else {
395 ret = _gb_sdio_send(host, data, len, nblocks, skip);
396 if (ret < 0)
397 goto out;
399 data->bytes_xfered += len;
400 left -= len;
401 skip += len;
404 out:
405 data->error = ret;
406 return ret;
409 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
411 struct gb_sdio_command_request request = {0};
412 struct gb_sdio_command_response response;
413 struct mmc_data *data = host->mrq->data;
414 unsigned int timeout_ms;
415 u8 cmd_flags;
416 u8 cmd_type;
417 int i;
418 int ret;
420 switch (mmc_resp_type(cmd)) {
421 case MMC_RSP_NONE:
422 cmd_flags = GB_SDIO_RSP_NONE;
423 break;
424 case MMC_RSP_R1:
425 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
426 break;
427 case MMC_RSP_R1B:
428 cmd_flags = GB_SDIO_RSP_R1B;
429 break;
430 case MMC_RSP_R2:
431 cmd_flags = GB_SDIO_RSP_R2;
432 break;
433 case MMC_RSP_R3:
434 cmd_flags = GB_SDIO_RSP_R3_R4;
435 break;
436 default:
437 dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
438 mmc_resp_type(cmd));
439 ret = -EINVAL;
440 goto out;
443 switch (mmc_cmd_type(cmd)) {
444 case MMC_CMD_BC:
445 cmd_type = GB_SDIO_CMD_BC;
446 break;
447 case MMC_CMD_BCR:
448 cmd_type = GB_SDIO_CMD_BCR;
449 break;
450 case MMC_CMD_AC:
451 cmd_type = GB_SDIO_CMD_AC;
452 break;
453 case MMC_CMD_ADTC:
454 cmd_type = GB_SDIO_CMD_ADTC;
455 break;
456 default:
457 dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
458 mmc_cmd_type(cmd));
459 ret = -EINVAL;
460 goto out;
463 request.cmd = cmd->opcode;
464 request.cmd_flags = cmd_flags;
465 request.cmd_type = cmd_type;
466 request.cmd_arg = cpu_to_le32(cmd->arg);
467 /* some controllers need to know at command time data details */
468 if (data) {
469 request.data_blocks = cpu_to_le16(data->blocks);
470 request.data_blksz = cpu_to_le16(data->blksz);
473 timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
474 GB_OPERATION_TIMEOUT_DEFAULT;
476 ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
477 &request, sizeof(request), &response,
478 sizeof(response), timeout_ms);
479 if (ret < 0)
480 goto out;
482 /* no response expected */
483 if (cmd_flags == GB_SDIO_RSP_NONE)
484 goto out;
486 /* long response expected */
487 if (cmd_flags & GB_SDIO_RSP_R2)
488 for (i = 0; i < 4; i++)
489 cmd->resp[i] = le32_to_cpu(response.resp[i]);
490 else
491 cmd->resp[0] = le32_to_cpu(response.resp[0]);
493 out:
494 cmd->error = ret;
495 return ret;
498 static void gb_sdio_mrq_work(struct work_struct *work)
500 struct gb_sdio_host *host;
501 struct mmc_request *mrq;
502 int ret;
504 host = container_of(work, struct gb_sdio_host, mrqwork);
506 ret = gbphy_runtime_get_sync(host->gbphy_dev);
507 if (ret)
508 return;
510 mutex_lock(&host->lock);
511 mrq = host->mrq;
512 if (!mrq) {
513 mutex_unlock(&host->lock);
514 gbphy_runtime_put_autosuspend(host->gbphy_dev);
515 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
516 return;
519 if (host->removed) {
520 mrq->cmd->error = -ESHUTDOWN;
521 goto done;
524 if (mrq->sbc) {
525 ret = gb_sdio_command(host, mrq->sbc);
526 if (ret < 0)
527 goto done;
530 ret = gb_sdio_command(host, mrq->cmd);
531 if (ret < 0)
532 goto done;
534 if (mrq->data) {
535 ret = gb_sdio_transfer(host, mrq->data);
536 if (ret < 0)
537 goto done;
540 if (mrq->stop) {
541 ret = gb_sdio_command(host, mrq->stop);
542 if (ret < 0)
543 goto done;
546 done:
547 host->mrq = NULL;
548 mutex_unlock(&host->lock);
549 mmc_request_done(host->mmc, mrq);
550 gbphy_runtime_put_autosuspend(host->gbphy_dev);
553 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
555 struct gb_sdio_host *host = mmc_priv(mmc);
556 struct mmc_command *cmd = mrq->cmd;
558 /* Check if it is a cancel to ongoing transfer */
559 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
560 spin_lock(&host->xfer);
561 host->xfer_stop = true;
562 spin_unlock(&host->xfer);
565 mutex_lock(&host->lock);
567 WARN_ON(host->mrq);
568 host->mrq = mrq;
570 if (host->removed) {
571 mrq->cmd->error = -ESHUTDOWN;
572 goto out;
574 if (!host->card_present) {
575 mrq->cmd->error = -ENOMEDIUM;
576 goto out;
579 queue_work(host->mrq_workqueue, &host->mrqwork);
581 mutex_unlock(&host->lock);
582 return;
584 out:
585 host->mrq = NULL;
586 mutex_unlock(&host->lock);
587 mmc_request_done(mmc, mrq);
590 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
592 struct gb_sdio_host *host = mmc_priv(mmc);
593 struct gb_sdio_set_ios_request request;
594 int ret;
595 u8 power_mode;
596 u8 bus_width;
597 u8 timing;
598 u8 signal_voltage;
599 u8 drv_type;
600 u32 vdd = 0;
602 mutex_lock(&host->lock);
603 request.clock = cpu_to_le32(ios->clock);
605 if (ios->vdd)
606 vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
607 request.vdd = cpu_to_le32(vdd);
609 request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
610 GB_SDIO_BUSMODE_OPENDRAIN :
611 GB_SDIO_BUSMODE_PUSHPULL);
613 switch (ios->power_mode) {
614 case MMC_POWER_OFF:
615 default:
616 power_mode = GB_SDIO_POWER_OFF;
617 break;
618 case MMC_POWER_UP:
619 power_mode = GB_SDIO_POWER_UP;
620 break;
621 case MMC_POWER_ON:
622 power_mode = GB_SDIO_POWER_ON;
623 break;
624 case MMC_POWER_UNDEFINED:
625 power_mode = GB_SDIO_POWER_UNDEFINED;
626 break;
628 request.power_mode = power_mode;
630 switch (ios->bus_width) {
631 case MMC_BUS_WIDTH_1:
632 bus_width = GB_SDIO_BUS_WIDTH_1;
633 break;
634 case MMC_BUS_WIDTH_4:
635 default:
636 bus_width = GB_SDIO_BUS_WIDTH_4;
637 break;
638 case MMC_BUS_WIDTH_8:
639 bus_width = GB_SDIO_BUS_WIDTH_8;
640 break;
642 request.bus_width = bus_width;
644 switch (ios->timing) {
645 case MMC_TIMING_LEGACY:
646 default:
647 timing = GB_SDIO_TIMING_LEGACY;
648 break;
649 case MMC_TIMING_MMC_HS:
650 timing = GB_SDIO_TIMING_MMC_HS;
651 break;
652 case MMC_TIMING_SD_HS:
653 timing = GB_SDIO_TIMING_SD_HS;
654 break;
655 case MMC_TIMING_UHS_SDR12:
656 timing = GB_SDIO_TIMING_UHS_SDR12;
657 break;
658 case MMC_TIMING_UHS_SDR25:
659 timing = GB_SDIO_TIMING_UHS_SDR25;
660 break;
661 case MMC_TIMING_UHS_SDR50:
662 timing = GB_SDIO_TIMING_UHS_SDR50;
663 break;
664 case MMC_TIMING_UHS_SDR104:
665 timing = GB_SDIO_TIMING_UHS_SDR104;
666 break;
667 case MMC_TIMING_UHS_DDR50:
668 timing = GB_SDIO_TIMING_UHS_DDR50;
669 break;
670 case MMC_TIMING_MMC_DDR52:
671 timing = GB_SDIO_TIMING_MMC_DDR52;
672 break;
673 case MMC_TIMING_MMC_HS200:
674 timing = GB_SDIO_TIMING_MMC_HS200;
675 break;
676 case MMC_TIMING_MMC_HS400:
677 timing = GB_SDIO_TIMING_MMC_HS400;
678 break;
680 request.timing = timing;
682 switch (ios->signal_voltage) {
683 case MMC_SIGNAL_VOLTAGE_330:
684 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
685 break;
686 case MMC_SIGNAL_VOLTAGE_180:
687 default:
688 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
689 break;
690 case MMC_SIGNAL_VOLTAGE_120:
691 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
692 break;
694 request.signal_voltage = signal_voltage;
696 switch (ios->drv_type) {
697 case MMC_SET_DRIVER_TYPE_A:
698 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
699 break;
700 case MMC_SET_DRIVER_TYPE_C:
701 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
702 break;
703 case MMC_SET_DRIVER_TYPE_D:
704 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
705 break;
706 case MMC_SET_DRIVER_TYPE_B:
707 default:
708 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
709 break;
711 request.drv_type = drv_type;
713 ret = gb_sdio_set_ios(host, &request);
714 if (ret < 0)
715 goto out;
717 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
719 out:
720 mutex_unlock(&host->lock);
723 static int gb_mmc_get_ro(struct mmc_host *mmc)
725 struct gb_sdio_host *host = mmc_priv(mmc);
727 mutex_lock(&host->lock);
728 if (host->removed) {
729 mutex_unlock(&host->lock);
730 return -ESHUTDOWN;
732 mutex_unlock(&host->lock);
734 return host->read_only;
737 static int gb_mmc_get_cd(struct mmc_host *mmc)
739 struct gb_sdio_host *host = mmc_priv(mmc);
741 mutex_lock(&host->lock);
742 if (host->removed) {
743 mutex_unlock(&host->lock);
744 return -ESHUTDOWN;
746 mutex_unlock(&host->lock);
748 return host->card_present;
751 static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
753 return 0;
756 static const struct mmc_host_ops gb_sdio_ops = {
757 .request = gb_mmc_request,
758 .set_ios = gb_mmc_set_ios,
759 .get_ro = gb_mmc_get_ro,
760 .get_cd = gb_mmc_get_cd,
761 .start_signal_voltage_switch = gb_mmc_switch_voltage,
764 static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
765 const struct gbphy_device_id *id)
767 struct gb_connection *connection;
768 struct mmc_host *mmc;
769 struct gb_sdio_host *host;
770 int ret = 0;
772 mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
773 if (!mmc)
774 return -ENOMEM;
776 connection = gb_connection_create(gbphy_dev->bundle,
777 le16_to_cpu(gbphy_dev->cport_desc->id),
778 gb_sdio_request_handler);
779 if (IS_ERR(connection)) {
780 ret = PTR_ERR(connection);
781 goto exit_mmc_free;
784 host = mmc_priv(mmc);
785 host->mmc = mmc;
786 host->removed = true;
788 host->connection = connection;
789 gb_connection_set_data(connection, host);
790 host->gbphy_dev = gbphy_dev;
791 gb_gbphy_set_data(gbphy_dev, host);
793 ret = gb_connection_enable_tx(connection);
794 if (ret)
795 goto exit_connection_destroy;
797 ret = gb_sdio_get_caps(host);
798 if (ret < 0)
799 goto exit_connection_disable;
801 mmc->ops = &gb_sdio_ops;
803 mmc->max_segs = host->mmc->max_blk_count;
805 /* for now we make a map 1:1 between max request and segment size */
806 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
807 mmc->max_seg_size = mmc->max_req_size;
809 mutex_init(&host->lock);
810 spin_lock_init(&host->xfer);
811 host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
812 dev_name(&gbphy_dev->dev));
813 if (!host->mrq_workqueue) {
814 ret = -ENOMEM;
815 goto exit_connection_disable;
817 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
819 ret = gb_connection_enable(connection);
820 if (ret)
821 goto exit_wq_destroy;
823 ret = mmc_add_host(mmc);
824 if (ret < 0)
825 goto exit_wq_destroy;
826 host->removed = false;
827 ret = _gb_sdio_process_events(host, host->queued_events);
828 host->queued_events = 0;
830 gbphy_runtime_put_autosuspend(gbphy_dev);
832 return ret;
834 exit_wq_destroy:
835 destroy_workqueue(host->mrq_workqueue);
836 exit_connection_disable:
837 gb_connection_disable(connection);
838 exit_connection_destroy:
839 gb_connection_destroy(connection);
840 exit_mmc_free:
841 mmc_free_host(mmc);
843 return ret;
846 static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
848 struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
849 struct gb_connection *connection = host->connection;
850 struct mmc_host *mmc;
851 int ret;
853 ret = gbphy_runtime_get_sync(gbphy_dev);
854 if (ret)
855 gbphy_runtime_get_noresume(gbphy_dev);
857 mutex_lock(&host->lock);
858 host->removed = true;
859 mmc = host->mmc;
860 gb_connection_set_data(connection, NULL);
861 mutex_unlock(&host->lock);
863 flush_workqueue(host->mrq_workqueue);
864 destroy_workqueue(host->mrq_workqueue);
865 gb_connection_disable_rx(connection);
866 mmc_remove_host(mmc);
867 gb_connection_disable(connection);
868 gb_connection_destroy(connection);
869 mmc_free_host(mmc);
872 static const struct gbphy_device_id gb_sdio_id_table[] = {
873 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
874 { },
876 MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
878 static struct gbphy_driver sdio_driver = {
879 .name = "sdio",
880 .probe = gb_sdio_probe,
881 .remove = gb_sdio_remove,
882 .id_table = gb_sdio_id_table,
885 module_gbphy_driver(sdio_driver);
886 MODULE_LICENSE("GPL v2");