2 * linux/drivers/mmc/core/mmc_ops.h
4 * Copyright 2006-2007 Pierre Ossman
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
12 #include <linux/types.h>
13 #include <linux/scatterlist.h>
15 #include <linux/mmc/host.h>
16 #include <linux/mmc/card.h>
17 #include <linux/mmc/mmc.h>
22 static int _mmc_select_card(struct mmc_host
*host
, struct mmc_card
*card
)
25 struct mmc_command cmd
;
29 memset(&cmd
, 0, sizeof(struct mmc_command
));
31 cmd
.opcode
= MMC_SELECT_CARD
;
34 cmd
.arg
= card
->rca
<< 16;
35 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
38 cmd
.flags
= MMC_RSP_NONE
| MMC_CMD_AC
;
41 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
48 int mmc_select_card(struct mmc_card
*card
)
52 return _mmc_select_card(card
->host
, card
);
55 int mmc_deselect_cards(struct mmc_host
*host
)
57 return _mmc_select_card(host
, NULL
);
60 int mmc_card_sleepawake(struct mmc_host
*host
, int sleep
)
62 struct mmc_command cmd
;
63 struct mmc_card
*card
= host
->card
;
67 mmc_deselect_cards(host
);
69 memset(&cmd
, 0, sizeof(struct mmc_command
));
71 cmd
.opcode
= MMC_SLEEP_AWAKE
;
72 cmd
.arg
= card
->rca
<< 16;
76 cmd
.flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
77 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
82 * If the host does not wait while the card signals busy, then we will
83 * will have to wait the sleep/awake timeout. Note, we cannot use the
84 * SEND_STATUS command to poll the status because that command (and most
85 * others) is invalid while the card sleeps.
87 if (!(host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
))
88 mmc_delay(DIV_ROUND_UP(card
->ext_csd
.sa_timeout
, 10000));
91 err
= mmc_select_card(card
);
96 int mmc_go_idle(struct mmc_host
*host
)
99 struct mmc_command cmd
;
102 * Non-SPI hosts need to prevent chipselect going active during
103 * GO_IDLE; that would put chips into SPI mode. Remind them of
104 * that in case of hardware that won't pull up DAT3/nCS otherwise.
106 * SPI hosts ignore ios.chip_select; it's managed according to
107 * rules that must accomodate non-MMC slaves which this layer
108 * won't even know about.
110 if (!mmc_host_is_spi(host
)) {
111 mmc_set_chip_select(host
, MMC_CS_HIGH
);
115 memset(&cmd
, 0, sizeof(struct mmc_command
));
117 cmd
.opcode
= MMC_GO_IDLE_STATE
;
119 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_NONE
| MMC_CMD_BC
;
121 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
125 if (!mmc_host_is_spi(host
)) {
126 mmc_set_chip_select(host
, MMC_CS_DONTCARE
);
130 host
->use_spi_crc
= 0;
135 int mmc_send_op_cond(struct mmc_host
*host
, u32 ocr
, u32
*rocr
)
137 struct mmc_command cmd
;
142 memset(&cmd
, 0, sizeof(struct mmc_command
));
144 cmd
.opcode
= MMC_SEND_OP_COND
;
145 cmd
.arg
= mmc_host_is_spi(host
) ? 0 : ocr
;
146 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R3
| MMC_CMD_BCR
;
148 for (i
= 100; i
; i
--) {
149 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
153 /* if we're just probing, do a single pass */
157 /* otherwise wait until reset completes */
158 if (mmc_host_is_spi(host
)) {
159 if (!(cmd
.resp
[0] & R1_SPI_IDLE
))
162 if (cmd
.resp
[0] & MMC_CARD_BUSY
)
171 if (rocr
&& !mmc_host_is_spi(host
))
177 int mmc_all_send_cid(struct mmc_host
*host
, u32
*cid
)
180 struct mmc_command cmd
;
185 memset(&cmd
, 0, sizeof(struct mmc_command
));
187 cmd
.opcode
= MMC_ALL_SEND_CID
;
189 cmd
.flags
= MMC_RSP_R2
| MMC_CMD_BCR
;
191 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
195 memcpy(cid
, cmd
.resp
, sizeof(u32
) * 4);
200 int mmc_set_relative_addr(struct mmc_card
*card
)
203 struct mmc_command cmd
;
208 memset(&cmd
, 0, sizeof(struct mmc_command
));
210 cmd
.opcode
= MMC_SET_RELATIVE_ADDR
;
211 cmd
.arg
= card
->rca
<< 16;
212 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
214 err
= mmc_wait_for_cmd(card
->host
, &cmd
, MMC_CMD_RETRIES
);
222 mmc_send_cxd_native(struct mmc_host
*host
, u32 arg
, u32
*cxd
, int opcode
)
225 struct mmc_command cmd
;
230 memset(&cmd
, 0, sizeof(struct mmc_command
));
234 cmd
.flags
= MMC_RSP_R2
| MMC_CMD_AC
;
236 err
= mmc_wait_for_cmd(host
, &cmd
, MMC_CMD_RETRIES
);
240 memcpy(cxd
, cmd
.resp
, sizeof(u32
) * 4);
246 mmc_send_cxd_data(struct mmc_card
*card
, struct mmc_host
*host
,
247 u32 opcode
, void *buf
, unsigned len
)
249 struct mmc_request mrq
;
250 struct mmc_command cmd
;
251 struct mmc_data data
;
252 struct scatterlist sg
;
255 /* dma onto stack is unsafe/nonportable, but callers to this
256 * routine normally provide temporary on-stack buffers ...
258 data_buf
= kmalloc(len
, GFP_KERNEL
);
259 if (data_buf
== NULL
)
262 memset(&mrq
, 0, sizeof(struct mmc_request
));
263 memset(&cmd
, 0, sizeof(struct mmc_command
));
264 memset(&data
, 0, sizeof(struct mmc_data
));
272 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
273 * rely on callers to never use this with "native" calls for reading
274 * CSD or CID. Native versions of those commands use the R2 type,
275 * not R1 plus a data block.
277 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
281 data
.flags
= MMC_DATA_READ
;
285 sg_init_one(&sg
, data_buf
, len
);
287 if (opcode
== MMC_SEND_CSD
|| opcode
== MMC_SEND_CID
) {
289 * The spec states that CSR and CID accesses have a timeout
290 * of 64 clock cycles.
293 data
.timeout_clks
= 64;
295 mmc_set_data_timeout(&data
, card
);
297 mmc_wait_for_req(host
, &mrq
);
299 memcpy(buf
, data_buf
, len
);
310 int mmc_send_csd(struct mmc_card
*card
, u32
*csd
)
314 if (!mmc_host_is_spi(card
->host
))
315 return mmc_send_cxd_native(card
->host
, card
->rca
<< 16,
318 ret
= mmc_send_cxd_data(card
, card
->host
, MMC_SEND_CSD
, csd
, 16);
322 for (i
= 0;i
< 4;i
++)
323 csd
[i
] = be32_to_cpu(csd
[i
]);
328 int mmc_send_cid(struct mmc_host
*host
, u32
*cid
)
332 if (!mmc_host_is_spi(host
)) {
335 return mmc_send_cxd_native(host
, host
->card
->rca
<< 16,
339 ret
= mmc_send_cxd_data(NULL
, host
, MMC_SEND_CID
, cid
, 16);
343 for (i
= 0;i
< 4;i
++)
344 cid
[i
] = be32_to_cpu(cid
[i
]);
349 int mmc_send_ext_csd(struct mmc_card
*card
, u8
*ext_csd
)
351 return mmc_send_cxd_data(card
, card
->host
, MMC_SEND_EXT_CSD
,
355 int mmc_spi_read_ocr(struct mmc_host
*host
, int highcap
, u32
*ocrp
)
357 struct mmc_command cmd
;
360 memset(&cmd
, 0, sizeof(struct mmc_command
));
362 cmd
.opcode
= MMC_SPI_READ_OCR
;
363 cmd
.arg
= highcap
? (1 << 30) : 0;
364 cmd
.flags
= MMC_RSP_SPI_R3
;
366 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
372 int mmc_spi_set_crc(struct mmc_host
*host
, int use_crc
)
374 struct mmc_command cmd
;
377 memset(&cmd
, 0, sizeof(struct mmc_command
));
379 cmd
.opcode
= MMC_SPI_CRC_ON_OFF
;
380 cmd
.flags
= MMC_RSP_SPI_R1
;
383 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
385 host
->use_spi_crc
= use_crc
;
389 int mmc_switch(struct mmc_card
*card
, u8 set
, u8 index
, u8 value
)
392 struct mmc_command cmd
;
398 memset(&cmd
, 0, sizeof(struct mmc_command
));
400 cmd
.opcode
= MMC_SWITCH
;
401 cmd
.arg
= (MMC_SWITCH_MODE_WRITE_BYTE
<< 24) |
405 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
407 err
= mmc_wait_for_cmd(card
->host
, &cmd
, MMC_CMD_RETRIES
);
411 /* Must check status to be sure of no errors */
413 err
= mmc_send_status(card
, &status
);
416 if (card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
)
418 if (mmc_host_is_spi(card
->host
))
420 } while (R1_CURRENT_STATE(status
) == 7);
422 if (mmc_host_is_spi(card
->host
)) {
423 if (status
& R1_SPI_ILLEGAL_COMMAND
)
426 if (status
& 0xFDFFA000)
427 printk(KERN_WARNING
"%s: unexpected status %#x after "
428 "switch", mmc_hostname(card
->host
), status
);
429 if (status
& R1_SWITCH_ERROR
)
436 int mmc_send_status(struct mmc_card
*card
, u32
*status
)
439 struct mmc_command cmd
;
444 memset(&cmd
, 0, sizeof(struct mmc_command
));
446 cmd
.opcode
= MMC_SEND_STATUS
;
447 if (!mmc_host_is_spi(card
->host
))
448 cmd
.arg
= card
->rca
<< 16;
449 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
451 err
= mmc_wait_for_cmd(card
->host
, &cmd
, MMC_CMD_RETRIES
);
455 /* NOTE: callers are required to understand the difference
456 * between "native" and SPI format status words!
459 *status
= cmd
.resp
[0];