powercap: restrict energy meter to root access
[linux/fpc-iii.git] / drivers / staging / greybus / sdio.c
blob82a1c2cf668772d3f343faa89a2e6287f800e3c8
1 /*
2 * SD/MMC Greybus driver.
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
8 */
10 #include <linux/kernel.h>
11 #include <linux/mmc/core.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/scatterlist.h>
15 #include <linux/workqueue.h>
17 #include "greybus.h"
18 #include "gbphy.h"
20 struct gb_sdio_host {
21 struct gb_connection *connection;
22 struct gbphy_device *gbphy_dev;
23 struct mmc_host *mmc;
24 struct mmc_request *mrq;
25 struct mutex lock; /* lock for this host */
26 size_t data_max;
27 spinlock_t xfer; /* lock to cancel ongoing transfer */
28 bool xfer_stop;
29 struct workqueue_struct *mrq_workqueue;
30 struct work_struct mrqwork;
31 u8 queued_events;
32 bool removed;
33 bool card_present;
34 bool read_only;
38 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
39 GB_SDIO_RSP_OPCODE)
40 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
41 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 GB_SDIO_RSP_136)
43 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
44 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
46 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
47 #define GB_SDIO_VDD_SHIFT 8
49 #ifndef MMC_CAP2_CORE_RUNTIME_PM
50 #define MMC_CAP2_CORE_RUNTIME_PM 0
51 #endif
53 static inline bool single_op(struct mmc_command *cmd)
55 uint32_t opcode = cmd->opcode;
57 return opcode == MMC_WRITE_BLOCK ||
58 opcode == MMC_READ_SINGLE_BLOCK;
61 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
63 u32 caps = 0;
64 u32 caps2 = 0;
66 caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
67 ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
68 ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
69 ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
70 ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
71 ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
72 ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
73 ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
74 ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
75 ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
76 ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
77 ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
78 ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
79 ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
80 ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
81 ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
82 ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
84 caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
85 ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
86 ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
87 ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
89 host->mmc->caps = caps;
90 host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
92 if (caps & MMC_CAP_NONREMOVABLE)
93 host->card_present = true;
96 static u32 _gb_sdio_get_host_ocr(u32 ocr)
98 return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
99 ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
100 ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
101 ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
102 ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
103 ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
104 ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
105 ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
106 ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
107 ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
108 ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
109 ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
110 ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
111 ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
112 ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
113 ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
114 ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
118 static int gb_sdio_get_caps(struct gb_sdio_host *host)
120 struct gb_sdio_get_caps_response response;
121 struct mmc_host *mmc = host->mmc;
122 u16 data_max;
123 u32 blksz;
124 u32 ocr;
125 u32 r;
126 int ret;
128 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
129 NULL, 0, &response, sizeof(response));
130 if (ret < 0)
131 return ret;
132 r = le32_to_cpu(response.caps);
134 _gb_sdio_set_host_caps(host, r);
136 /* get the max block size that could fit our payload */
137 data_max = gb_operation_get_payload_size_max(host->connection);
138 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
139 data_max - sizeof(struct gb_sdio_transfer_response));
141 blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
142 blksz = max_t(u32, 512, blksz);
144 mmc->max_blk_size = rounddown_pow_of_two(blksz);
145 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
146 host->data_max = data_max;
148 /* get ocr supported values */
149 ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
150 mmc->ocr_avail = ocr;
151 mmc->ocr_avail_sdio = mmc->ocr_avail;
152 mmc->ocr_avail_sd = mmc->ocr_avail;
153 mmc->ocr_avail_mmc = mmc->ocr_avail;
155 /* get frequency range values */
156 mmc->f_min = le32_to_cpu(response.f_min);
157 mmc->f_max = le32_to_cpu(response.f_max);
159 return 0;
162 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
164 if (event & GB_SDIO_CARD_INSERTED)
165 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
166 else if (event & GB_SDIO_CARD_REMOVED)
167 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
169 host->queued_events |= event;
172 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
174 u8 state_changed = 0;
176 if (event & GB_SDIO_CARD_INSERTED) {
177 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
178 return 0;
179 if (host->card_present)
180 return 0;
181 host->card_present = true;
182 state_changed = 1;
185 if (event & GB_SDIO_CARD_REMOVED) {
186 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
187 return 0;
188 if (!(host->card_present))
189 return 0;
190 host->card_present = false;
191 state_changed = 1;
194 if (event & GB_SDIO_WP) {
195 host->read_only = true;
198 if (state_changed) {
199 dev_info(mmc_dev(host->mmc), "card %s now event\n",
200 (host->card_present ? "inserted" : "removed"));
201 mmc_detect_change(host->mmc, 0);
204 return 0;
207 static int gb_sdio_request_handler(struct gb_operation *op)
209 struct gb_sdio_host *host = gb_connection_get_data(op->connection);
210 struct gb_message *request;
211 struct gb_sdio_event_request *payload;
212 u8 type = op->type;
213 int ret = 0;
214 u8 event;
216 if (type != GB_SDIO_TYPE_EVENT) {
217 dev_err(mmc_dev(host->mmc),
218 "unsupported unsolicited event: %u\n", type);
219 return -EINVAL;
222 request = op->request;
224 if (request->payload_size < sizeof(*payload)) {
225 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
226 request->payload_size, sizeof(*payload));
227 return -EINVAL;
230 payload = request->payload;
231 event = payload->event;
233 if (host->removed)
234 _gb_queue_event(host, event);
235 else
236 ret = _gb_sdio_process_events(host, event);
238 return ret;
241 static int gb_sdio_set_ios(struct gb_sdio_host *host,
242 struct gb_sdio_set_ios_request *request)
244 int ret;
246 ret = gbphy_runtime_get_sync(host->gbphy_dev);
247 if (ret)
248 return ret;
250 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
251 sizeof(*request), NULL, 0);
253 gbphy_runtime_put_autosuspend(host->gbphy_dev);
255 return ret;
258 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
259 size_t len, u16 nblocks, off_t skip)
261 struct gb_sdio_transfer_request *request;
262 struct gb_sdio_transfer_response *response;
263 struct gb_operation *operation;
264 struct scatterlist *sg = data->sg;
265 unsigned int sg_len = data->sg_len;
266 size_t copied;
267 u16 send_blksz;
268 u16 send_blocks;
269 int ret;
271 WARN_ON(len > host->data_max);
273 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
274 len + sizeof(*request),
275 sizeof(*response), GFP_KERNEL);
276 if (!operation)
277 return -ENOMEM;
279 request = operation->request->payload;
280 request->data_flags = (data->flags >> 8);
281 request->data_blocks = cpu_to_le16(nblocks);
282 request->data_blksz = cpu_to_le16(data->blksz);
284 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
286 if (copied != len) {
287 ret = -EINVAL;
288 goto err_put_operation;
291 ret = gb_operation_request_send_sync(operation);
292 if (ret < 0)
293 goto err_put_operation;
295 response = operation->response->payload;
297 send_blocks = le16_to_cpu(response->data_blocks);
298 send_blksz = le16_to_cpu(response->data_blksz);
300 if (len != send_blksz * send_blocks) {
301 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
302 len, send_blksz * send_blocks);
303 ret = -EINVAL;
306 err_put_operation:
307 gb_operation_put(operation);
309 return ret;
312 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
313 size_t len, u16 nblocks, off_t skip)
315 struct gb_sdio_transfer_request *request;
316 struct gb_sdio_transfer_response *response;
317 struct gb_operation *operation;
318 struct scatterlist *sg = data->sg;
319 unsigned int sg_len = data->sg_len;
320 size_t copied;
321 u16 recv_blksz;
322 u16 recv_blocks;
323 int ret;
325 WARN_ON(len > host->data_max);
327 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
328 sizeof(*request),
329 len + sizeof(*response), GFP_KERNEL);
330 if (!operation)
331 return -ENOMEM;
333 request = operation->request->payload;
334 request->data_flags = (data->flags >> 8);
335 request->data_blocks = cpu_to_le16(nblocks);
336 request->data_blksz = cpu_to_le16(data->blksz);
338 ret = gb_operation_request_send_sync(operation);
339 if (ret < 0)
340 goto err_put_operation;
342 response = operation->response->payload;
343 recv_blocks = le16_to_cpu(response->data_blocks);
344 recv_blksz = le16_to_cpu(response->data_blksz);
346 if (len != recv_blksz * recv_blocks) {
347 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
348 recv_blksz * recv_blocks, len);
349 ret = -EINVAL;
350 goto err_put_operation;
353 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
354 skip);
355 if (copied != len)
356 ret = -EINVAL;
358 err_put_operation:
359 gb_operation_put(operation);
361 return ret;
364 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
366 size_t left, len;
367 off_t skip = 0;
368 int ret = 0;
369 u16 nblocks;
371 if (single_op(data->mrq->cmd) && data->blocks > 1) {
372 ret = -ETIMEDOUT;
373 goto out;
376 left = data->blksz * data->blocks;
378 while (left) {
379 /* check is a stop transmission is pending */
380 spin_lock(&host->xfer);
381 if (host->xfer_stop) {
382 host->xfer_stop = false;
383 spin_unlock(&host->xfer);
384 ret = -EINTR;
385 goto out;
387 spin_unlock(&host->xfer);
388 len = min(left, host->data_max);
389 nblocks = len / data->blksz;
390 len = nblocks * data->blksz;
392 if (data->flags & MMC_DATA_READ) {
393 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
394 if (ret < 0)
395 goto out;
396 } else {
397 ret = _gb_sdio_send(host, data, len, nblocks, skip);
398 if (ret < 0)
399 goto out;
401 data->bytes_xfered += len;
402 left -= len;
403 skip += len;
406 out:
407 data->error = ret;
408 return ret;
411 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
413 struct gb_sdio_command_request request = {0};
414 struct gb_sdio_command_response response;
415 struct mmc_data *data = host->mrq->data;
416 unsigned int timeout_ms;
417 u8 cmd_flags;
418 u8 cmd_type;
419 int i;
420 int ret;
422 switch (mmc_resp_type(cmd)) {
423 case MMC_RSP_NONE:
424 cmd_flags = GB_SDIO_RSP_NONE;
425 break;
426 case MMC_RSP_R1:
427 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
428 break;
429 case MMC_RSP_R1B:
430 cmd_flags = GB_SDIO_RSP_R1B;
431 break;
432 case MMC_RSP_R2:
433 cmd_flags = GB_SDIO_RSP_R2;
434 break;
435 case MMC_RSP_R3:
436 cmd_flags = GB_SDIO_RSP_R3_R4;
437 break;
438 default:
439 dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
440 mmc_resp_type(cmd));
441 ret = -EINVAL;
442 goto out;
445 switch (mmc_cmd_type(cmd)) {
446 case MMC_CMD_BC:
447 cmd_type = GB_SDIO_CMD_BC;
448 break;
449 case MMC_CMD_BCR:
450 cmd_type = GB_SDIO_CMD_BCR;
451 break;
452 case MMC_CMD_AC:
453 cmd_type = GB_SDIO_CMD_AC;
454 break;
455 case MMC_CMD_ADTC:
456 cmd_type = GB_SDIO_CMD_ADTC;
457 break;
458 default:
459 dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
460 mmc_cmd_type(cmd));
461 ret = -EINVAL;
462 goto out;
465 request.cmd = cmd->opcode;
466 request.cmd_flags = cmd_flags;
467 request.cmd_type = cmd_type;
468 request.cmd_arg = cpu_to_le32(cmd->arg);
469 /* some controllers need to know at command time data details */
470 if (data) {
471 request.data_blocks = cpu_to_le16(data->blocks);
472 request.data_blksz = cpu_to_le16(data->blksz);
475 timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
476 GB_OPERATION_TIMEOUT_DEFAULT;
478 ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
479 &request, sizeof(request), &response,
480 sizeof(response), timeout_ms);
481 if (ret < 0)
482 goto out;
484 /* no response expected */
485 if (cmd_flags == GB_SDIO_RSP_NONE)
486 goto out;
488 /* long response expected */
489 if (cmd_flags & GB_SDIO_RSP_R2)
490 for (i = 0; i < 4; i++)
491 cmd->resp[i] = le32_to_cpu(response.resp[i]);
492 else
493 cmd->resp[0] = le32_to_cpu(response.resp[0]);
495 out:
496 cmd->error = ret;
497 return ret;
500 static void gb_sdio_mrq_work(struct work_struct *work)
502 struct gb_sdio_host *host;
503 struct mmc_request *mrq;
504 int ret;
506 host = container_of(work, struct gb_sdio_host, mrqwork);
508 ret = gbphy_runtime_get_sync(host->gbphy_dev);
509 if (ret)
510 return;
512 mutex_lock(&host->lock);
513 mrq = host->mrq;
514 if (!mrq) {
515 mutex_unlock(&host->lock);
516 gbphy_runtime_put_autosuspend(host->gbphy_dev);
517 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
518 return;
521 if (host->removed) {
522 mrq->cmd->error = -ESHUTDOWN;
523 goto done;
526 if (mrq->sbc) {
527 ret = gb_sdio_command(host, mrq->sbc);
528 if (ret < 0)
529 goto done;
532 ret = gb_sdio_command(host, mrq->cmd);
533 if (ret < 0)
534 goto done;
536 if (mrq->data) {
537 ret = gb_sdio_transfer(host, mrq->data);
538 if (ret < 0)
539 goto done;
542 if (mrq->stop) {
543 ret = gb_sdio_command(host, mrq->stop);
544 if (ret < 0)
545 goto done;
548 done:
549 host->mrq = NULL;
550 mutex_unlock(&host->lock);
551 mmc_request_done(host->mmc, mrq);
552 gbphy_runtime_put_autosuspend(host->gbphy_dev);
555 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
557 struct gb_sdio_host *host = mmc_priv(mmc);
558 struct mmc_command *cmd = mrq->cmd;
560 /* Check if it is a cancel to ongoing transfer */
561 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
562 spin_lock(&host->xfer);
563 host->xfer_stop = true;
564 spin_unlock(&host->xfer);
567 mutex_lock(&host->lock);
569 WARN_ON(host->mrq);
570 host->mrq = mrq;
572 if (host->removed) {
573 mrq->cmd->error = -ESHUTDOWN;
574 goto out;
576 if (!host->card_present) {
577 mrq->cmd->error = -ENOMEDIUM;
578 goto out;
581 queue_work(host->mrq_workqueue, &host->mrqwork);
583 mutex_unlock(&host->lock);
584 return;
586 out:
587 host->mrq = NULL;
588 mutex_unlock(&host->lock);
589 mmc_request_done(mmc, mrq);
592 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
594 struct gb_sdio_host *host = mmc_priv(mmc);
595 struct gb_sdio_set_ios_request request;
596 int ret;
597 u8 power_mode;
598 u8 bus_width;
599 u8 timing;
600 u8 signal_voltage;
601 u8 drv_type;
602 u32 vdd = 0;
604 mutex_lock(&host->lock);
605 request.clock = cpu_to_le32(ios->clock);
607 if (ios->vdd)
608 vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
609 request.vdd = cpu_to_le32(vdd);
611 request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
612 GB_SDIO_BUSMODE_OPENDRAIN :
613 GB_SDIO_BUSMODE_PUSHPULL);
615 switch (ios->power_mode) {
616 case MMC_POWER_OFF:
617 default:
618 power_mode = GB_SDIO_POWER_OFF;
619 break;
620 case MMC_POWER_UP:
621 power_mode = GB_SDIO_POWER_UP;
622 break;
623 case MMC_POWER_ON:
624 power_mode = GB_SDIO_POWER_ON;
625 break;
626 case MMC_POWER_UNDEFINED:
627 power_mode = GB_SDIO_POWER_UNDEFINED;
628 break;
630 request.power_mode = power_mode;
632 switch (ios->bus_width) {
633 case MMC_BUS_WIDTH_1:
634 bus_width = GB_SDIO_BUS_WIDTH_1;
635 break;
636 case MMC_BUS_WIDTH_4:
637 default:
638 bus_width = GB_SDIO_BUS_WIDTH_4;
639 break;
640 case MMC_BUS_WIDTH_8:
641 bus_width = GB_SDIO_BUS_WIDTH_8;
642 break;
644 request.bus_width = bus_width;
646 switch (ios->timing) {
647 case MMC_TIMING_LEGACY:
648 default:
649 timing = GB_SDIO_TIMING_LEGACY;
650 break;
651 case MMC_TIMING_MMC_HS:
652 timing = GB_SDIO_TIMING_MMC_HS;
653 break;
654 case MMC_TIMING_SD_HS:
655 timing = GB_SDIO_TIMING_SD_HS;
656 break;
657 case MMC_TIMING_UHS_SDR12:
658 timing = GB_SDIO_TIMING_UHS_SDR12;
659 break;
660 case MMC_TIMING_UHS_SDR25:
661 timing = GB_SDIO_TIMING_UHS_SDR25;
662 break;
663 case MMC_TIMING_UHS_SDR50:
664 timing = GB_SDIO_TIMING_UHS_SDR50;
665 break;
666 case MMC_TIMING_UHS_SDR104:
667 timing = GB_SDIO_TIMING_UHS_SDR104;
668 break;
669 case MMC_TIMING_UHS_DDR50:
670 timing = GB_SDIO_TIMING_UHS_DDR50;
671 break;
672 case MMC_TIMING_MMC_DDR52:
673 timing = GB_SDIO_TIMING_MMC_DDR52;
674 break;
675 case MMC_TIMING_MMC_HS200:
676 timing = GB_SDIO_TIMING_MMC_HS200;
677 break;
678 case MMC_TIMING_MMC_HS400:
679 timing = GB_SDIO_TIMING_MMC_HS400;
680 break;
682 request.timing = timing;
684 switch (ios->signal_voltage) {
685 case MMC_SIGNAL_VOLTAGE_330:
686 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
687 break;
688 case MMC_SIGNAL_VOLTAGE_180:
689 default:
690 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
691 break;
692 case MMC_SIGNAL_VOLTAGE_120:
693 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
694 break;
696 request.signal_voltage = signal_voltage;
698 switch (ios->drv_type) {
699 case MMC_SET_DRIVER_TYPE_A:
700 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
701 break;
702 case MMC_SET_DRIVER_TYPE_C:
703 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
704 break;
705 case MMC_SET_DRIVER_TYPE_D:
706 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
707 break;
708 case MMC_SET_DRIVER_TYPE_B:
709 default:
710 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
711 break;
713 request.drv_type = drv_type;
715 ret = gb_sdio_set_ios(host, &request);
716 if (ret < 0)
717 goto out;
719 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
721 out:
722 mutex_unlock(&host->lock);
725 static int gb_mmc_get_ro(struct mmc_host *mmc)
727 struct gb_sdio_host *host = mmc_priv(mmc);
729 mutex_lock(&host->lock);
730 if (host->removed) {
731 mutex_unlock(&host->lock);
732 return -ESHUTDOWN;
734 mutex_unlock(&host->lock);
736 return host->read_only;
739 static int gb_mmc_get_cd(struct mmc_host *mmc)
741 struct gb_sdio_host *host = mmc_priv(mmc);
743 mutex_lock(&host->lock);
744 if (host->removed) {
745 mutex_unlock(&host->lock);
746 return -ESHUTDOWN;
748 mutex_unlock(&host->lock);
750 return host->card_present;
753 static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
755 return 0;
758 static const struct mmc_host_ops gb_sdio_ops = {
759 .request = gb_mmc_request,
760 .set_ios = gb_mmc_set_ios,
761 .get_ro = gb_mmc_get_ro,
762 .get_cd = gb_mmc_get_cd,
763 .start_signal_voltage_switch = gb_mmc_switch_voltage,
766 static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
767 const struct gbphy_device_id *id)
769 struct gb_connection *connection;
770 struct mmc_host *mmc;
771 struct gb_sdio_host *host;
772 int ret = 0;
774 mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
775 if (!mmc)
776 return -ENOMEM;
778 connection = gb_connection_create(gbphy_dev->bundle,
779 le16_to_cpu(gbphy_dev->cport_desc->id),
780 gb_sdio_request_handler);
781 if (IS_ERR(connection)) {
782 ret = PTR_ERR(connection);
783 goto exit_mmc_free;
786 host = mmc_priv(mmc);
787 host->mmc = mmc;
788 host->removed = true;
790 host->connection = connection;
791 gb_connection_set_data(connection, host);
792 host->gbphy_dev = gbphy_dev;
793 gb_gbphy_set_data(gbphy_dev, host);
795 ret = gb_connection_enable_tx(connection);
796 if (ret)
797 goto exit_connection_destroy;
799 ret = gb_sdio_get_caps(host);
800 if (ret < 0)
801 goto exit_connection_disable;
803 mmc->ops = &gb_sdio_ops;
805 mmc->max_segs = host->mmc->max_blk_count;
807 /* for now we make a map 1:1 between max request and segment size */
808 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
809 mmc->max_seg_size = mmc->max_req_size;
811 mutex_init(&host->lock);
812 spin_lock_init(&host->xfer);
813 host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
814 dev_name(&gbphy_dev->dev));
815 if (!host->mrq_workqueue) {
816 ret = -ENOMEM;
817 goto exit_connection_disable;
819 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
821 ret = gb_connection_enable(connection);
822 if (ret)
823 goto exit_wq_destroy;
825 ret = mmc_add_host(mmc);
826 if (ret < 0)
827 goto exit_wq_destroy;
828 host->removed = false;
829 ret = _gb_sdio_process_events(host, host->queued_events);
830 host->queued_events = 0;
832 gbphy_runtime_put_autosuspend(gbphy_dev);
834 return ret;
836 exit_wq_destroy:
837 destroy_workqueue(host->mrq_workqueue);
838 exit_connection_disable:
839 gb_connection_disable(connection);
840 exit_connection_destroy:
841 gb_connection_destroy(connection);
842 exit_mmc_free:
843 mmc_free_host(mmc);
845 return ret;
848 static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
850 struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
851 struct gb_connection *connection = host->connection;
852 struct mmc_host *mmc;
853 int ret;
855 ret = gbphy_runtime_get_sync(gbphy_dev);
856 if (ret)
857 gbphy_runtime_get_noresume(gbphy_dev);
859 mutex_lock(&host->lock);
860 host->removed = true;
861 mmc = host->mmc;
862 gb_connection_set_data(connection, NULL);
863 mutex_unlock(&host->lock);
865 flush_workqueue(host->mrq_workqueue);
866 destroy_workqueue(host->mrq_workqueue);
867 gb_connection_disable_rx(connection);
868 mmc_remove_host(mmc);
869 gb_connection_disable(connection);
870 gb_connection_destroy(connection);
871 mmc_free_host(mmc);
874 static const struct gbphy_device_id gb_sdio_id_table[] = {
875 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
876 { },
878 MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
880 static struct gbphy_driver sdio_driver = {
881 .name = "sdio",
882 .probe = gb_sdio_probe,
883 .remove = gb_sdio_remove,
884 .id_table = gb_sdio_id_table,
887 module_gbphy_driver(sdio_driver);
888 MODULE_LICENSE("GPL v2");