iwlwifi: introduce host commands callbacks
[linux/fpc-iii.git] / drivers / mmc / host / sdhci.c
blob4b673aa2dc3cae7a7e588e1d306e59437848f54f
1 /*
2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/highmem.h>
18 #include <linux/pci.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/scatterlist.h>
22 #include <linux/mmc/host.h>
24 #include "sdhci.h"
26 #define DRIVER_NAME "sdhci"
28 #define DBG(f, x...) \
29 pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
31 static unsigned int debug_quirks = 0;
33 /* For multi controllers in one platform case */
34 static u16 chip_index = 0;
35 static spinlock_t index_lock;
38 * Different quirks to handle when the hardware deviates from a strict
39 * interpretation of the SDHCI specification.
42 /* Controller doesn't honor resets unless we touch the clock register */
43 #define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
44 /* Controller has bad caps bits, but really supports DMA */
45 #define SDHCI_QUIRK_FORCE_DMA (1<<1)
46 /* Controller doesn't like some resets when there is no card inserted. */
47 #define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
48 /* Controller doesn't like clearing the power reg before a change */
49 #define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
50 /* Controller has flaky internal state so reset it on each ios change */
51 #define SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS (1<<4)
52 /* Controller has an unusable DMA engine */
53 #define SDHCI_QUIRK_BROKEN_DMA (1<<5)
54 /* Controller can only DMA from 32-bit aligned addresses */
55 #define SDHCI_QUIRK_32BIT_DMA_ADDR (1<<6)
56 /* Controller can only DMA chunk sizes that are a multiple of 32 bits */
57 #define SDHCI_QUIRK_32BIT_DMA_SIZE (1<<7)
58 /* Controller needs to be reset after each request to stay stable */
59 #define SDHCI_QUIRK_RESET_AFTER_REQUEST (1<<8)
61 static const struct pci_device_id pci_ids[] __devinitdata = {
63 .vendor = PCI_VENDOR_ID_RICOH,
64 .device = PCI_DEVICE_ID_RICOH_R5C822,
65 .subvendor = PCI_VENDOR_ID_IBM,
66 .subdevice = PCI_ANY_ID,
67 .driver_data = SDHCI_QUIRK_CLOCK_BEFORE_RESET |
68 SDHCI_QUIRK_FORCE_DMA,
72 .vendor = PCI_VENDOR_ID_RICOH,
73 .device = PCI_DEVICE_ID_RICOH_R5C822,
74 .subvendor = PCI_ANY_ID,
75 .subdevice = PCI_ANY_ID,
76 .driver_data = SDHCI_QUIRK_FORCE_DMA |
77 SDHCI_QUIRK_NO_CARD_NO_RESET,
81 .vendor = PCI_VENDOR_ID_TI,
82 .device = PCI_DEVICE_ID_TI_XX21_XX11_SD,
83 .subvendor = PCI_ANY_ID,
84 .subdevice = PCI_ANY_ID,
85 .driver_data = SDHCI_QUIRK_FORCE_DMA,
89 .vendor = PCI_VENDOR_ID_ENE,
90 .device = PCI_DEVICE_ID_ENE_CB712_SD,
91 .subvendor = PCI_ANY_ID,
92 .subdevice = PCI_ANY_ID,
93 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
94 SDHCI_QUIRK_BROKEN_DMA,
98 .vendor = PCI_VENDOR_ID_ENE,
99 .device = PCI_DEVICE_ID_ENE_CB712_SD_2,
100 .subvendor = PCI_ANY_ID,
101 .subdevice = PCI_ANY_ID,
102 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
103 SDHCI_QUIRK_BROKEN_DMA,
107 .vendor = PCI_VENDOR_ID_ENE,
108 .device = PCI_DEVICE_ID_ENE_CB714_SD,
109 .subvendor = PCI_ANY_ID,
110 .subdevice = PCI_ANY_ID,
111 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
112 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
116 .vendor = PCI_VENDOR_ID_ENE,
117 .device = PCI_DEVICE_ID_ENE_CB714_SD_2,
118 .subvendor = PCI_ANY_ID,
119 .subdevice = PCI_ANY_ID,
120 .driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE |
121 SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS,
125 .vendor = PCI_VENDOR_ID_JMICRON,
126 .device = PCI_DEVICE_ID_JMICRON_JMB38X_SD,
127 .subvendor = PCI_ANY_ID,
128 .subdevice = PCI_ANY_ID,
129 .driver_data = SDHCI_QUIRK_32BIT_DMA_ADDR |
130 SDHCI_QUIRK_32BIT_DMA_SIZE |
131 SDHCI_QUIRK_RESET_AFTER_REQUEST,
134 { /* Generic SD host controller */
135 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
138 { /* end: all zeroes */ },
141 MODULE_DEVICE_TABLE(pci, pci_ids);
143 static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);
144 static void sdhci_finish_data(struct sdhci_host *);
146 static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
147 static void sdhci_finish_command(struct sdhci_host *);
149 static void sdhci_dumpregs(struct sdhci_host *host)
151 printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
153 printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
154 readl(host->ioaddr + SDHCI_DMA_ADDRESS),
155 readw(host->ioaddr + SDHCI_HOST_VERSION));
156 printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
157 readw(host->ioaddr + SDHCI_BLOCK_SIZE),
158 readw(host->ioaddr + SDHCI_BLOCK_COUNT));
159 printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
160 readl(host->ioaddr + SDHCI_ARGUMENT),
161 readw(host->ioaddr + SDHCI_TRANSFER_MODE));
162 printk(KERN_DEBUG DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
163 readl(host->ioaddr + SDHCI_PRESENT_STATE),
164 readb(host->ioaddr + SDHCI_HOST_CONTROL));
165 printk(KERN_DEBUG DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
166 readb(host->ioaddr + SDHCI_POWER_CONTROL),
167 readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL));
168 printk(KERN_DEBUG DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
169 readb(host->ioaddr + SDHCI_WAKE_UP_CONTROL),
170 readw(host->ioaddr + SDHCI_CLOCK_CONTROL));
171 printk(KERN_DEBUG DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
172 readb(host->ioaddr + SDHCI_TIMEOUT_CONTROL),
173 readl(host->ioaddr + SDHCI_INT_STATUS));
174 printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
175 readl(host->ioaddr + SDHCI_INT_ENABLE),
176 readl(host->ioaddr + SDHCI_SIGNAL_ENABLE));
177 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
178 readw(host->ioaddr + SDHCI_ACMD12_ERR),
179 readw(host->ioaddr + SDHCI_SLOT_INT_STATUS));
180 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n",
181 readl(host->ioaddr + SDHCI_CAPABILITIES),
182 readl(host->ioaddr + SDHCI_MAX_CURRENT));
184 printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
187 /*****************************************************************************\
189 * Low level functions *
191 \*****************************************************************************/
193 static void sdhci_reset(struct sdhci_host *host, u8 mask)
195 unsigned long timeout;
197 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
198 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
199 SDHCI_CARD_PRESENT))
200 return;
203 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
205 if (mask & SDHCI_RESET_ALL)
206 host->clock = 0;
208 /* Wait max 100 ms */
209 timeout = 100;
211 /* hw clears the bit when it's done */
212 while (readb(host->ioaddr + SDHCI_SOFTWARE_RESET) & mask) {
213 if (timeout == 0) {
214 printk(KERN_ERR "%s: Reset 0x%x never completed.\n",
215 mmc_hostname(host->mmc), (int)mask);
216 sdhci_dumpregs(host);
217 return;
219 timeout--;
220 mdelay(1);
224 static void sdhci_init(struct sdhci_host *host)
226 u32 intmask;
228 sdhci_reset(host, SDHCI_RESET_ALL);
230 intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
231 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
232 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
233 SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT |
234 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL |
235 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE;
237 writel(intmask, host->ioaddr + SDHCI_INT_ENABLE);
238 writel(intmask, host->ioaddr + SDHCI_SIGNAL_ENABLE);
241 static void sdhci_activate_led(struct sdhci_host *host)
243 u8 ctrl;
245 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
246 ctrl |= SDHCI_CTRL_LED;
247 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
250 static void sdhci_deactivate_led(struct sdhci_host *host)
252 u8 ctrl;
254 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
255 ctrl &= ~SDHCI_CTRL_LED;
256 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
259 /*****************************************************************************\
261 * Core functions *
263 \*****************************************************************************/
265 static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
267 return sg_virt(host->cur_sg);
270 static inline int sdhci_next_sg(struct sdhci_host* host)
273 * Skip to next SG entry.
275 host->cur_sg++;
276 host->num_sg--;
279 * Any entries left?
281 if (host->num_sg > 0) {
282 host->offset = 0;
283 host->remain = host->cur_sg->length;
286 return host->num_sg;
289 static void sdhci_read_block_pio(struct sdhci_host *host)
291 int blksize, chunk_remain;
292 u32 data;
293 char *buffer;
294 int size;
296 DBG("PIO reading\n");
298 blksize = host->data->blksz;
299 chunk_remain = 0;
300 data = 0;
302 buffer = sdhci_sg_to_buffer(host) + host->offset;
304 while (blksize) {
305 if (chunk_remain == 0) {
306 data = readl(host->ioaddr + SDHCI_BUFFER);
307 chunk_remain = min(blksize, 4);
310 size = min(host->remain, chunk_remain);
312 chunk_remain -= size;
313 blksize -= size;
314 host->offset += size;
315 host->remain -= size;
317 while (size) {
318 *buffer = data & 0xFF;
319 buffer++;
320 data >>= 8;
321 size--;
324 if (host->remain == 0) {
325 if (sdhci_next_sg(host) == 0) {
326 BUG_ON(blksize != 0);
327 return;
329 buffer = sdhci_sg_to_buffer(host);
334 static void sdhci_write_block_pio(struct sdhci_host *host)
336 int blksize, chunk_remain;
337 u32 data;
338 char *buffer;
339 int bytes, size;
341 DBG("PIO writing\n");
343 blksize = host->data->blksz;
344 chunk_remain = 4;
345 data = 0;
347 bytes = 0;
348 buffer = sdhci_sg_to_buffer(host) + host->offset;
350 while (blksize) {
351 size = min(host->remain, chunk_remain);
353 chunk_remain -= size;
354 blksize -= size;
355 host->offset += size;
356 host->remain -= size;
358 while (size) {
359 data >>= 8;
360 data |= (u32)*buffer << 24;
361 buffer++;
362 size--;
365 if (chunk_remain == 0) {
366 writel(data, host->ioaddr + SDHCI_BUFFER);
367 chunk_remain = min(blksize, 4);
370 if (host->remain == 0) {
371 if (sdhci_next_sg(host) == 0) {
372 BUG_ON(blksize != 0);
373 return;
375 buffer = sdhci_sg_to_buffer(host);
380 static void sdhci_transfer_pio(struct sdhci_host *host)
382 u32 mask;
384 BUG_ON(!host->data);
386 if (host->num_sg == 0)
387 return;
389 if (host->data->flags & MMC_DATA_READ)
390 mask = SDHCI_DATA_AVAILABLE;
391 else
392 mask = SDHCI_SPACE_AVAILABLE;
394 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
395 if (host->data->flags & MMC_DATA_READ)
396 sdhci_read_block_pio(host);
397 else
398 sdhci_write_block_pio(host);
400 if (host->num_sg == 0)
401 break;
404 DBG("PIO transfer complete.\n");
407 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
409 u8 count;
410 unsigned target_timeout, current_timeout;
412 WARN_ON(host->data);
414 if (data == NULL)
415 return;
417 /* Sanity checks */
418 BUG_ON(data->blksz * data->blocks > 524288);
419 BUG_ON(data->blksz > host->mmc->max_blk_size);
420 BUG_ON(data->blocks > 65535);
422 host->data = data;
423 host->data_early = 0;
425 /* timeout in us */
426 target_timeout = data->timeout_ns / 1000 +
427 data->timeout_clks / host->clock;
430 * Figure out needed cycles.
431 * We do this in steps in order to fit inside a 32 bit int.
432 * The first step is the minimum timeout, which will have a
433 * minimum resolution of 6 bits:
434 * (1) 2^13*1000 > 2^22,
435 * (2) host->timeout_clk < 2^16
436 * =>
437 * (1) / (2) > 2^6
439 count = 0;
440 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
441 while (current_timeout < target_timeout) {
442 count++;
443 current_timeout <<= 1;
444 if (count >= 0xF)
445 break;
448 if (count >= 0xF) {
449 printk(KERN_WARNING "%s: Too large timeout requested!\n",
450 mmc_hostname(host->mmc));
451 count = 0xE;
454 writeb(count, host->ioaddr + SDHCI_TIMEOUT_CONTROL);
456 if (host->flags & SDHCI_USE_DMA)
457 host->flags |= SDHCI_REQ_USE_DMA;
459 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
460 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) &&
461 ((data->blksz * data->blocks) & 0x3))) {
462 DBG("Reverting to PIO because of transfer size (%d)\n",
463 data->blksz * data->blocks);
464 host->flags &= ~SDHCI_REQ_USE_DMA;
468 * The assumption here being that alignment is the same after
469 * translation to device address space.
471 if (unlikely((host->flags & SDHCI_REQ_USE_DMA) &&
472 (host->chip->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
473 (data->sg->offset & 0x3))) {
474 DBG("Reverting to PIO because of bad alignment\n");
475 host->flags &= ~SDHCI_REQ_USE_DMA;
478 if (host->flags & SDHCI_REQ_USE_DMA) {
479 int count;
481 count = pci_map_sg(host->chip->pdev, data->sg, data->sg_len,
482 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
483 BUG_ON(count != 1);
485 writel(sg_dma_address(data->sg), host->ioaddr + SDHCI_DMA_ADDRESS);
486 } else {
487 host->cur_sg = data->sg;
488 host->num_sg = data->sg_len;
490 host->offset = 0;
491 host->remain = host->cur_sg->length;
494 /* We do not handle DMA boundaries, so set it to max (512 KiB) */
495 writew(SDHCI_MAKE_BLKSZ(7, data->blksz),
496 host->ioaddr + SDHCI_BLOCK_SIZE);
497 writew(data->blocks, host->ioaddr + SDHCI_BLOCK_COUNT);
500 static void sdhci_set_transfer_mode(struct sdhci_host *host,
501 struct mmc_data *data)
503 u16 mode;
505 if (data == NULL)
506 return;
508 WARN_ON(!host->data);
510 mode = SDHCI_TRNS_BLK_CNT_EN;
511 if (data->blocks > 1)
512 mode |= SDHCI_TRNS_MULTI;
513 if (data->flags & MMC_DATA_READ)
514 mode |= SDHCI_TRNS_READ;
515 if (host->flags & SDHCI_REQ_USE_DMA)
516 mode |= SDHCI_TRNS_DMA;
518 writew(mode, host->ioaddr + SDHCI_TRANSFER_MODE);
521 static void sdhci_finish_data(struct sdhci_host *host)
523 struct mmc_data *data;
524 u16 blocks;
526 BUG_ON(!host->data);
528 data = host->data;
529 host->data = NULL;
531 if (host->flags & SDHCI_REQ_USE_DMA) {
532 pci_unmap_sg(host->chip->pdev, data->sg, data->sg_len,
533 (data->flags & MMC_DATA_READ)?PCI_DMA_FROMDEVICE:PCI_DMA_TODEVICE);
537 * Controller doesn't count down when in single block mode.
539 if (data->blocks == 1)
540 blocks = (data->error == 0) ? 0 : 1;
541 else
542 blocks = readw(host->ioaddr + SDHCI_BLOCK_COUNT);
543 data->bytes_xfered = data->blksz * (data->blocks - blocks);
545 if (!data->error && blocks) {
546 printk(KERN_ERR "%s: Controller signalled completion even "
547 "though there were blocks left.\n",
548 mmc_hostname(host->mmc));
549 data->error = -EIO;
552 if (data->stop) {
554 * The controller needs a reset of internal state machines
555 * upon error conditions.
557 if (data->error) {
558 sdhci_reset(host, SDHCI_RESET_CMD);
559 sdhci_reset(host, SDHCI_RESET_DATA);
562 sdhci_send_command(host, data->stop);
563 } else
564 tasklet_schedule(&host->finish_tasklet);
567 static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
569 int flags;
570 u32 mask;
571 unsigned long timeout;
573 WARN_ON(host->cmd);
575 /* Wait max 10 ms */
576 timeout = 10;
578 mask = SDHCI_CMD_INHIBIT;
579 if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
580 mask |= SDHCI_DATA_INHIBIT;
582 /* We shouldn't wait for data inihibit for stop commands, even
583 though they might use busy signaling */
584 if (host->mrq->data && (cmd == host->mrq->data->stop))
585 mask &= ~SDHCI_DATA_INHIBIT;
587 while (readl(host->ioaddr + SDHCI_PRESENT_STATE) & mask) {
588 if (timeout == 0) {
589 printk(KERN_ERR "%s: Controller never released "
590 "inhibit bit(s).\n", mmc_hostname(host->mmc));
591 sdhci_dumpregs(host);
592 cmd->error = -EIO;
593 tasklet_schedule(&host->finish_tasklet);
594 return;
596 timeout--;
597 mdelay(1);
600 mod_timer(&host->timer, jiffies + 10 * HZ);
602 host->cmd = cmd;
604 sdhci_prepare_data(host, cmd->data);
606 writel(cmd->arg, host->ioaddr + SDHCI_ARGUMENT);
608 sdhci_set_transfer_mode(host, cmd->data);
610 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
611 printk(KERN_ERR "%s: Unsupported response type!\n",
612 mmc_hostname(host->mmc));
613 cmd->error = -EINVAL;
614 tasklet_schedule(&host->finish_tasklet);
615 return;
618 if (!(cmd->flags & MMC_RSP_PRESENT))
619 flags = SDHCI_CMD_RESP_NONE;
620 else if (cmd->flags & MMC_RSP_136)
621 flags = SDHCI_CMD_RESP_LONG;
622 else if (cmd->flags & MMC_RSP_BUSY)
623 flags = SDHCI_CMD_RESP_SHORT_BUSY;
624 else
625 flags = SDHCI_CMD_RESP_SHORT;
627 if (cmd->flags & MMC_RSP_CRC)
628 flags |= SDHCI_CMD_CRC;
629 if (cmd->flags & MMC_RSP_OPCODE)
630 flags |= SDHCI_CMD_INDEX;
631 if (cmd->data)
632 flags |= SDHCI_CMD_DATA;
634 writew(SDHCI_MAKE_CMD(cmd->opcode, flags),
635 host->ioaddr + SDHCI_COMMAND);
638 static void sdhci_finish_command(struct sdhci_host *host)
640 int i;
642 BUG_ON(host->cmd == NULL);
644 if (host->cmd->flags & MMC_RSP_PRESENT) {
645 if (host->cmd->flags & MMC_RSP_136) {
646 /* CRC is stripped so we need to do some shifting. */
647 for (i = 0;i < 4;i++) {
648 host->cmd->resp[i] = readl(host->ioaddr +
649 SDHCI_RESPONSE + (3-i)*4) << 8;
650 if (i != 3)
651 host->cmd->resp[i] |=
652 readb(host->ioaddr +
653 SDHCI_RESPONSE + (3-i)*4-1);
655 } else {
656 host->cmd->resp[0] = readl(host->ioaddr + SDHCI_RESPONSE);
660 host->cmd->error = 0;
662 if (host->data && host->data_early)
663 sdhci_finish_data(host);
665 if (!host->cmd->data)
666 tasklet_schedule(&host->finish_tasklet);
668 host->cmd = NULL;
671 static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
673 int div;
674 u16 clk;
675 unsigned long timeout;
677 if (clock == host->clock)
678 return;
680 writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
682 if (clock == 0)
683 goto out;
685 for (div = 1;div < 256;div *= 2) {
686 if ((host->max_clk / div) <= clock)
687 break;
689 div >>= 1;
691 clk = div << SDHCI_DIVIDER_SHIFT;
692 clk |= SDHCI_CLOCK_INT_EN;
693 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
695 /* Wait max 10 ms */
696 timeout = 10;
697 while (!((clk = readw(host->ioaddr + SDHCI_CLOCK_CONTROL))
698 & SDHCI_CLOCK_INT_STABLE)) {
699 if (timeout == 0) {
700 printk(KERN_ERR "%s: Internal clock never "
701 "stabilised.\n", mmc_hostname(host->mmc));
702 sdhci_dumpregs(host);
703 return;
705 timeout--;
706 mdelay(1);
709 clk |= SDHCI_CLOCK_CARD_EN;
710 writew(clk, host->ioaddr + SDHCI_CLOCK_CONTROL);
712 out:
713 host->clock = clock;
716 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
718 u8 pwr;
720 if (host->power == power)
721 return;
723 if (power == (unsigned short)-1) {
724 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
725 goto out;
729 * Spec says that we should clear the power reg before setting
730 * a new value. Some controllers don't seem to like this though.
732 if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
733 writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
735 pwr = SDHCI_POWER_ON;
737 switch (1 << power) {
738 case MMC_VDD_165_195:
739 pwr |= SDHCI_POWER_180;
740 break;
741 case MMC_VDD_29_30:
742 case MMC_VDD_30_31:
743 pwr |= SDHCI_POWER_300;
744 break;
745 case MMC_VDD_32_33:
746 case MMC_VDD_33_34:
747 pwr |= SDHCI_POWER_330;
748 break;
749 default:
750 BUG();
753 writeb(pwr, host->ioaddr + SDHCI_POWER_CONTROL);
755 out:
756 host->power = power;
759 /*****************************************************************************\
761 * MMC callbacks *
763 \*****************************************************************************/
765 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
767 struct sdhci_host *host;
768 unsigned long flags;
770 host = mmc_priv(mmc);
772 spin_lock_irqsave(&host->lock, flags);
774 WARN_ON(host->mrq != NULL);
776 sdhci_activate_led(host);
778 host->mrq = mrq;
780 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
781 host->mrq->cmd->error = -ENOMEDIUM;
782 tasklet_schedule(&host->finish_tasklet);
783 } else
784 sdhci_send_command(host, mrq->cmd);
786 mmiowb();
787 spin_unlock_irqrestore(&host->lock, flags);
790 static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
792 struct sdhci_host *host;
793 unsigned long flags;
794 u8 ctrl;
796 host = mmc_priv(mmc);
798 spin_lock_irqsave(&host->lock, flags);
801 * Reset the chip on each power off.
802 * Should clear out any weird states.
804 if (ios->power_mode == MMC_POWER_OFF) {
805 writel(0, host->ioaddr + SDHCI_SIGNAL_ENABLE);
806 sdhci_init(host);
809 sdhci_set_clock(host, ios->clock);
811 if (ios->power_mode == MMC_POWER_OFF)
812 sdhci_set_power(host, -1);
813 else
814 sdhci_set_power(host, ios->vdd);
816 ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
818 if (ios->bus_width == MMC_BUS_WIDTH_4)
819 ctrl |= SDHCI_CTRL_4BITBUS;
820 else
821 ctrl &= ~SDHCI_CTRL_4BITBUS;
823 if (ios->timing == MMC_TIMING_SD_HS)
824 ctrl |= SDHCI_CTRL_HISPD;
825 else
826 ctrl &= ~SDHCI_CTRL_HISPD;
828 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
831 * Some (ENE) controllers go apeshit on some ios operation,
832 * signalling timeout and CRC errors even on CMD0. Resetting
833 * it on each ios seems to solve the problem.
835 if(host->chip->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
836 sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
838 mmiowb();
839 spin_unlock_irqrestore(&host->lock, flags);
842 static int sdhci_get_ro(struct mmc_host *mmc)
844 struct sdhci_host *host;
845 unsigned long flags;
846 int present;
848 host = mmc_priv(mmc);
850 spin_lock_irqsave(&host->lock, flags);
852 present = readl(host->ioaddr + SDHCI_PRESENT_STATE);
854 spin_unlock_irqrestore(&host->lock, flags);
856 return !(present & SDHCI_WRITE_PROTECT);
859 static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
861 struct sdhci_host *host;
862 unsigned long flags;
863 u32 ier;
865 host = mmc_priv(mmc);
867 spin_lock_irqsave(&host->lock, flags);
869 ier = readl(host->ioaddr + SDHCI_INT_ENABLE);
871 ier &= ~SDHCI_INT_CARD_INT;
872 if (enable)
873 ier |= SDHCI_INT_CARD_INT;
875 writel(ier, host->ioaddr + SDHCI_INT_ENABLE);
876 writel(ier, host->ioaddr + SDHCI_SIGNAL_ENABLE);
878 mmiowb();
880 spin_unlock_irqrestore(&host->lock, flags);
883 static const struct mmc_host_ops sdhci_ops = {
884 .request = sdhci_request,
885 .set_ios = sdhci_set_ios,
886 .get_ro = sdhci_get_ro,
887 .enable_sdio_irq = sdhci_enable_sdio_irq,
890 /*****************************************************************************\
892 * Tasklets *
894 \*****************************************************************************/
896 static void sdhci_tasklet_card(unsigned long param)
898 struct sdhci_host *host;
899 unsigned long flags;
901 host = (struct sdhci_host*)param;
903 spin_lock_irqsave(&host->lock, flags);
905 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
906 if (host->mrq) {
907 printk(KERN_ERR "%s: Card removed during transfer!\n",
908 mmc_hostname(host->mmc));
909 printk(KERN_ERR "%s: Resetting controller.\n",
910 mmc_hostname(host->mmc));
912 sdhci_reset(host, SDHCI_RESET_CMD);
913 sdhci_reset(host, SDHCI_RESET_DATA);
915 host->mrq->cmd->error = -ENOMEDIUM;
916 tasklet_schedule(&host->finish_tasklet);
920 spin_unlock_irqrestore(&host->lock, flags);
922 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
925 static void sdhci_tasklet_finish(unsigned long param)
927 struct sdhci_host *host;
928 unsigned long flags;
929 struct mmc_request *mrq;
931 host = (struct sdhci_host*)param;
933 spin_lock_irqsave(&host->lock, flags);
935 del_timer(&host->timer);
937 mrq = host->mrq;
940 * The controller needs a reset of internal state machines
941 * upon error conditions.
943 if (mrq->cmd->error ||
944 (mrq->data && (mrq->data->error ||
945 (mrq->data->stop && mrq->data->stop->error))) ||
946 (host->chip->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) {
948 /* Some controllers need this kick or reset won't work here */
949 if (host->chip->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) {
950 unsigned int clock;
952 /* This is to force an update */
953 clock = host->clock;
954 host->clock = 0;
955 sdhci_set_clock(host, clock);
958 /* Spec says we should do both at the same time, but Ricoh
959 controllers do not like that. */
960 sdhci_reset(host, SDHCI_RESET_CMD);
961 sdhci_reset(host, SDHCI_RESET_DATA);
964 host->mrq = NULL;
965 host->cmd = NULL;
966 host->data = NULL;
968 sdhci_deactivate_led(host);
970 mmiowb();
971 spin_unlock_irqrestore(&host->lock, flags);
973 mmc_request_done(host->mmc, mrq);
976 static void sdhci_timeout_timer(unsigned long data)
978 struct sdhci_host *host;
979 unsigned long flags;
981 host = (struct sdhci_host*)data;
983 spin_lock_irqsave(&host->lock, flags);
985 if (host->mrq) {
986 printk(KERN_ERR "%s: Timeout waiting for hardware "
987 "interrupt.\n", mmc_hostname(host->mmc));
988 sdhci_dumpregs(host);
990 if (host->data) {
991 host->data->error = -ETIMEDOUT;
992 sdhci_finish_data(host);
993 } else {
994 if (host->cmd)
995 host->cmd->error = -ETIMEDOUT;
996 else
997 host->mrq->cmd->error = -ETIMEDOUT;
999 tasklet_schedule(&host->finish_tasklet);
1003 mmiowb();
1004 spin_unlock_irqrestore(&host->lock, flags);
1007 /*****************************************************************************\
1009 * Interrupt handling *
1011 \*****************************************************************************/
1013 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
1015 BUG_ON(intmask == 0);
1017 if (!host->cmd) {
1018 printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
1019 "though no command operation was in progress.\n",
1020 mmc_hostname(host->mmc), (unsigned)intmask);
1021 sdhci_dumpregs(host);
1022 return;
1025 if (intmask & SDHCI_INT_TIMEOUT)
1026 host->cmd->error = -ETIMEDOUT;
1027 else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
1028 SDHCI_INT_INDEX))
1029 host->cmd->error = -EILSEQ;
1031 if (host->cmd->error)
1032 tasklet_schedule(&host->finish_tasklet);
1033 else if (intmask & SDHCI_INT_RESPONSE)
1034 sdhci_finish_command(host);
1037 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1039 BUG_ON(intmask == 0);
1041 if (!host->data) {
1043 * A data end interrupt is sent together with the response
1044 * for the stop command.
1046 if (intmask & SDHCI_INT_DATA_END)
1047 return;
1049 printk(KERN_ERR "%s: Got data interrupt 0x%08x even "
1050 "though no data operation was in progress.\n",
1051 mmc_hostname(host->mmc), (unsigned)intmask);
1052 sdhci_dumpregs(host);
1054 return;
1057 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1058 host->data->error = -ETIMEDOUT;
1059 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
1060 host->data->error = -EILSEQ;
1062 if (host->data->error)
1063 sdhci_finish_data(host);
1064 else {
1065 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
1066 sdhci_transfer_pio(host);
1069 * We currently don't do anything fancy with DMA
1070 * boundaries, but as we can't disable the feature
1071 * we need to at least restart the transfer.
1073 if (intmask & SDHCI_INT_DMA_END)
1074 writel(readl(host->ioaddr + SDHCI_DMA_ADDRESS),
1075 host->ioaddr + SDHCI_DMA_ADDRESS);
1077 if (intmask & SDHCI_INT_DATA_END) {
1078 if (host->cmd) {
1080 * Data managed to finish before the
1081 * command completed. Make sure we do
1082 * things in the proper order.
1084 host->data_early = 1;
1085 } else {
1086 sdhci_finish_data(host);
1092 static irqreturn_t sdhci_irq(int irq, void *dev_id)
1094 irqreturn_t result;
1095 struct sdhci_host* host = dev_id;
1096 u32 intmask;
1097 int cardint = 0;
1099 spin_lock(&host->lock);
1101 intmask = readl(host->ioaddr + SDHCI_INT_STATUS);
1103 if (!intmask || intmask == 0xffffffff) {
1104 result = IRQ_NONE;
1105 goto out;
1108 DBG("*** %s got interrupt: 0x%08x\n", host->slot_descr, intmask);
1110 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
1111 writel(intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE),
1112 host->ioaddr + SDHCI_INT_STATUS);
1113 tasklet_schedule(&host->card_tasklet);
1116 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
1118 if (intmask & SDHCI_INT_CMD_MASK) {
1119 writel(intmask & SDHCI_INT_CMD_MASK,
1120 host->ioaddr + SDHCI_INT_STATUS);
1121 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
1124 if (intmask & SDHCI_INT_DATA_MASK) {
1125 writel(intmask & SDHCI_INT_DATA_MASK,
1126 host->ioaddr + SDHCI_INT_STATUS);
1127 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
1130 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK);
1132 intmask &= ~SDHCI_INT_ERROR;
1134 if (intmask & SDHCI_INT_BUS_POWER) {
1135 printk(KERN_ERR "%s: Card is consuming too much power!\n",
1136 mmc_hostname(host->mmc));
1137 writel(SDHCI_INT_BUS_POWER, host->ioaddr + SDHCI_INT_STATUS);
1140 intmask &= ~SDHCI_INT_BUS_POWER;
1142 if (intmask & SDHCI_INT_CARD_INT)
1143 cardint = 1;
1145 intmask &= ~SDHCI_INT_CARD_INT;
1147 if (intmask) {
1148 printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
1149 mmc_hostname(host->mmc), intmask);
1150 sdhci_dumpregs(host);
1152 writel(intmask, host->ioaddr + SDHCI_INT_STATUS);
1155 result = IRQ_HANDLED;
1157 mmiowb();
1158 out:
1159 spin_unlock(&host->lock);
1162 * We have to delay this as it calls back into the driver.
1164 if (cardint)
1165 mmc_signal_sdio_irq(host->mmc);
1167 return result;
1170 /*****************************************************************************\
1172 * Suspend/resume *
1174 \*****************************************************************************/
1176 #ifdef CONFIG_PM
1178 static int sdhci_suspend (struct pci_dev *pdev, pm_message_t state)
1180 struct sdhci_chip *chip;
1181 int i, ret;
1183 chip = pci_get_drvdata(pdev);
1184 if (!chip)
1185 return 0;
1187 DBG("Suspending...\n");
1189 for (i = 0;i < chip->num_slots;i++) {
1190 if (!chip->hosts[i])
1191 continue;
1192 ret = mmc_suspend_host(chip->hosts[i]->mmc, state);
1193 if (ret) {
1194 for (i--;i >= 0;i--)
1195 mmc_resume_host(chip->hosts[i]->mmc);
1196 return ret;
1200 pci_save_state(pdev);
1201 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1203 for (i = 0;i < chip->num_slots;i++) {
1204 if (!chip->hosts[i])
1205 continue;
1206 free_irq(chip->hosts[i]->irq, chip->hosts[i]);
1209 pci_disable_device(pdev);
1210 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1212 return 0;
1215 static int sdhci_resume (struct pci_dev *pdev)
1217 struct sdhci_chip *chip;
1218 int i, ret;
1220 chip = pci_get_drvdata(pdev);
1221 if (!chip)
1222 return 0;
1224 DBG("Resuming...\n");
1226 pci_set_power_state(pdev, PCI_D0);
1227 pci_restore_state(pdev);
1228 ret = pci_enable_device(pdev);
1229 if (ret)
1230 return ret;
1232 for (i = 0;i < chip->num_slots;i++) {
1233 if (!chip->hosts[i])
1234 continue;
1235 if (chip->hosts[i]->flags & SDHCI_USE_DMA)
1236 pci_set_master(pdev);
1237 ret = request_irq(chip->hosts[i]->irq, sdhci_irq,
1238 IRQF_SHARED, chip->hosts[i]->slot_descr,
1239 chip->hosts[i]);
1240 if (ret)
1241 return ret;
1242 sdhci_init(chip->hosts[i]);
1243 mmiowb();
1244 ret = mmc_resume_host(chip->hosts[i]->mmc);
1245 if (ret)
1246 return ret;
1249 return 0;
1252 #else /* CONFIG_PM */
1254 #define sdhci_suspend NULL
1255 #define sdhci_resume NULL
1257 #endif /* CONFIG_PM */
1259 /*****************************************************************************\
1261 * Device probing/removal *
1263 \*****************************************************************************/
1265 static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1267 int ret;
1268 unsigned int version;
1269 struct sdhci_chip *chip;
1270 struct mmc_host *mmc;
1271 struct sdhci_host *host;
1273 u8 first_bar;
1274 unsigned int caps;
1276 chip = pci_get_drvdata(pdev);
1277 BUG_ON(!chip);
1279 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &first_bar);
1280 if (ret)
1281 return ret;
1283 first_bar &= PCI_SLOT_INFO_FIRST_BAR_MASK;
1285 if (first_bar > 5) {
1286 printk(KERN_ERR DRIVER_NAME ": Invalid first BAR. Aborting.\n");
1287 return -ENODEV;
1290 if (!(pci_resource_flags(pdev, first_bar + slot) & IORESOURCE_MEM)) {
1291 printk(KERN_ERR DRIVER_NAME ": BAR is not iomem. Aborting.\n");
1292 return -ENODEV;
1295 if (pci_resource_len(pdev, first_bar + slot) != 0x100) {
1296 printk(KERN_ERR DRIVER_NAME ": Invalid iomem size. "
1297 "You may experience problems.\n");
1300 if ((pdev->class & 0x0000FF) == PCI_SDHCI_IFVENDOR) {
1301 printk(KERN_ERR DRIVER_NAME ": Vendor specific interface. Aborting.\n");
1302 return -ENODEV;
1305 if ((pdev->class & 0x0000FF) > PCI_SDHCI_IFVENDOR) {
1306 printk(KERN_ERR DRIVER_NAME ": Unknown interface. Aborting.\n");
1307 return -ENODEV;
1310 mmc = mmc_alloc_host(sizeof(struct sdhci_host), &pdev->dev);
1311 if (!mmc)
1312 return -ENOMEM;
1314 host = mmc_priv(mmc);
1315 host->mmc = mmc;
1317 host->chip = chip;
1318 chip->hosts[slot] = host;
1320 host->bar = first_bar + slot;
1322 host->addr = pci_resource_start(pdev, host->bar);
1323 host->irq = pdev->irq;
1325 DBG("slot %d at 0x%08lx, irq %d\n", slot, host->addr, host->irq);
1327 snprintf(host->slot_descr, 20, "sdhc%d:slot%d", chip->index, slot);
1329 ret = pci_request_region(pdev, host->bar, host->slot_descr);
1330 if (ret)
1331 goto free;
1333 host->ioaddr = ioremap_nocache(host->addr,
1334 pci_resource_len(pdev, host->bar));
1335 if (!host->ioaddr) {
1336 ret = -ENOMEM;
1337 goto release;
1340 sdhci_reset(host, SDHCI_RESET_ALL);
1342 version = readw(host->ioaddr + SDHCI_HOST_VERSION);
1343 version = (version & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
1344 if (version > 1) {
1345 printk(KERN_ERR "%s: Unknown controller version (%d). "
1346 "You may experience problems.\n", host->slot_descr,
1347 version);
1350 caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
1352 if (chip->quirks & SDHCI_QUIRK_FORCE_DMA)
1353 host->flags |= SDHCI_USE_DMA;
1354 else if (!(caps & SDHCI_CAN_DO_DMA))
1355 DBG("Controller doesn't have DMA capability\n");
1356 else
1357 host->flags |= SDHCI_USE_DMA;
1359 if ((chip->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
1360 (host->flags & SDHCI_USE_DMA)) {
1361 DBG("Disabling DMA as it is marked broken\n");
1362 host->flags &= ~SDHCI_USE_DMA;
1365 if (((pdev->class & 0x0000FF) != PCI_SDHCI_IFDMA) &&
1366 (host->flags & SDHCI_USE_DMA)) {
1367 printk(KERN_WARNING "%s: Will use DMA "
1368 "mode even though HW doesn't fully "
1369 "claim to support it.\n", host->slot_descr);
1372 if (host->flags & SDHCI_USE_DMA) {
1373 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1374 printk(KERN_WARNING "%s: No suitable DMA available. "
1375 "Falling back to PIO.\n", host->slot_descr);
1376 host->flags &= ~SDHCI_USE_DMA;
1380 if (host->flags & SDHCI_USE_DMA)
1381 pci_set_master(pdev);
1382 else /* XXX: Hack to get MMC layer to avoid highmem */
1383 pdev->dma_mask = 0;
1385 host->max_clk =
1386 (caps & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT;
1387 if (host->max_clk == 0) {
1388 printk(KERN_ERR "%s: Hardware doesn't specify base clock "
1389 "frequency.\n", host->slot_descr);
1390 ret = -ENODEV;
1391 goto unmap;
1393 host->max_clk *= 1000000;
1395 host->timeout_clk =
1396 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;
1397 if (host->timeout_clk == 0) {
1398 printk(KERN_ERR "%s: Hardware doesn't specify timeout clock "
1399 "frequency.\n", host->slot_descr);
1400 ret = -ENODEV;
1401 goto unmap;
1403 if (caps & SDHCI_TIMEOUT_CLK_UNIT)
1404 host->timeout_clk *= 1000;
1407 * Set host parameters.
1409 mmc->ops = &sdhci_ops;
1410 mmc->f_min = host->max_clk / 256;
1411 mmc->f_max = host->max_clk;
1412 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MULTIWRITE | MMC_CAP_SDIO_IRQ;
1414 if (caps & SDHCI_CAN_DO_HISPD)
1415 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1417 mmc->ocr_avail = 0;
1418 if (caps & SDHCI_CAN_VDD_330)
1419 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
1420 if (caps & SDHCI_CAN_VDD_300)
1421 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
1422 if (caps & SDHCI_CAN_VDD_180)
1423 mmc->ocr_avail |= MMC_VDD_165_195;
1425 if (mmc->ocr_avail == 0) {
1426 printk(KERN_ERR "%s: Hardware doesn't report any "
1427 "support voltages.\n", host->slot_descr);
1428 ret = -ENODEV;
1429 goto unmap;
1432 spin_lock_init(&host->lock);
1435 * Maximum number of segments. Hardware cannot do scatter lists.
1437 if (host->flags & SDHCI_USE_DMA)
1438 mmc->max_hw_segs = 1;
1439 else
1440 mmc->max_hw_segs = 16;
1441 mmc->max_phys_segs = 16;
1444 * Maximum number of sectors in one transfer. Limited by DMA boundary
1445 * size (512KiB).
1447 mmc->max_req_size = 524288;
1450 * Maximum segment size. Could be one segment with the maximum number
1451 * of bytes.
1453 mmc->max_seg_size = mmc->max_req_size;
1456 * Maximum block size. This varies from controller to controller and
1457 * is specified in the capabilities register.
1459 mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
1460 if (mmc->max_blk_size >= 3) {
1461 printk(KERN_WARNING "%s: Invalid maximum block size, assuming 512\n",
1462 host->slot_descr);
1463 mmc->max_blk_size = 512;
1464 } else
1465 mmc->max_blk_size = 512 << mmc->max_blk_size;
1468 * Maximum block count.
1470 mmc->max_blk_count = 65535;
1473 * Init tasklets.
1475 tasklet_init(&host->card_tasklet,
1476 sdhci_tasklet_card, (unsigned long)host);
1477 tasklet_init(&host->finish_tasklet,
1478 sdhci_tasklet_finish, (unsigned long)host);
1480 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
1482 ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
1483 host->slot_descr, host);
1484 if (ret)
1485 goto untasklet;
1487 sdhci_init(host);
1489 #ifdef CONFIG_MMC_DEBUG
1490 sdhci_dumpregs(host);
1491 #endif
1493 mmiowb();
1495 mmc_add_host(mmc);
1497 printk(KERN_INFO "%s: SDHCI at 0x%08lx irq %d %s\n", mmc_hostname(mmc),
1498 host->addr, host->irq,
1499 (host->flags & SDHCI_USE_DMA)?"DMA":"PIO");
1501 return 0;
1503 untasklet:
1504 tasklet_kill(&host->card_tasklet);
1505 tasklet_kill(&host->finish_tasklet);
1506 unmap:
1507 iounmap(host->ioaddr);
1508 release:
1509 pci_release_region(pdev, host->bar);
1510 free:
1511 mmc_free_host(mmc);
1513 return ret;
1516 static void sdhci_remove_slot(struct pci_dev *pdev, int slot)
1518 struct sdhci_chip *chip;
1519 struct mmc_host *mmc;
1520 struct sdhci_host *host;
1522 chip = pci_get_drvdata(pdev);
1523 host = chip->hosts[slot];
1524 mmc = host->mmc;
1526 chip->hosts[slot] = NULL;
1528 mmc_remove_host(mmc);
1530 sdhci_reset(host, SDHCI_RESET_ALL);
1532 free_irq(host->irq, host);
1534 del_timer_sync(&host->timer);
1536 tasklet_kill(&host->card_tasklet);
1537 tasklet_kill(&host->finish_tasklet);
1539 iounmap(host->ioaddr);
1541 pci_release_region(pdev, host->bar);
1543 mmc_free_host(mmc);
1546 static int __devinit sdhci_probe(struct pci_dev *pdev,
1547 const struct pci_device_id *ent)
1549 int ret, i;
1550 u8 slots, rev;
1551 struct sdhci_chip *chip;
1553 BUG_ON(pdev == NULL);
1554 BUG_ON(ent == NULL);
1556 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
1558 printk(KERN_INFO DRIVER_NAME
1559 ": SDHCI controller found at %s [%04x:%04x] (rev %x)\n",
1560 pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
1561 (int)rev);
1563 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
1564 if (ret)
1565 return ret;
1567 slots = PCI_SLOT_INFO_SLOTS(slots) + 1;
1568 DBG("found %d slot(s)\n", slots);
1569 if (slots == 0)
1570 return -ENODEV;
1572 ret = pci_enable_device(pdev);
1573 if (ret)
1574 return ret;
1576 chip = kzalloc(sizeof(struct sdhci_chip) +
1577 sizeof(struct sdhci_host*) * slots, GFP_KERNEL);
1578 if (!chip) {
1579 ret = -ENOMEM;
1580 goto err;
1583 chip->pdev = pdev;
1584 chip->quirks = ent->driver_data;
1586 if (debug_quirks)
1587 chip->quirks = debug_quirks;
1589 chip->num_slots = slots;
1590 pci_set_drvdata(pdev, chip);
1592 /* Add for multi controller case */
1593 spin_lock(&index_lock);
1594 chip->index = chip_index++;
1595 spin_unlock(&index_lock);
1597 for (i = 0;i < slots;i++) {
1598 ret = sdhci_probe_slot(pdev, i);
1599 if (ret) {
1600 for (i--;i >= 0;i--)
1601 sdhci_remove_slot(pdev, i);
1602 goto free;
1606 return 0;
1608 free:
1609 pci_set_drvdata(pdev, NULL);
1610 kfree(chip);
1612 err:
1613 pci_disable_device(pdev);
1614 return ret;
1617 static void __devexit sdhci_remove(struct pci_dev *pdev)
1619 int i;
1620 struct sdhci_chip *chip;
1622 chip = pci_get_drvdata(pdev);
1624 if (chip) {
1625 for (i = 0;i < chip->num_slots;i++)
1626 sdhci_remove_slot(pdev, i);
1628 pci_set_drvdata(pdev, NULL);
1630 kfree(chip);
1633 pci_disable_device(pdev);
1636 static struct pci_driver sdhci_driver = {
1637 .name = DRIVER_NAME,
1638 .id_table = pci_ids,
1639 .probe = sdhci_probe,
1640 .remove = __devexit_p(sdhci_remove),
1641 .suspend = sdhci_suspend,
1642 .resume = sdhci_resume,
1645 /*****************************************************************************\
1647 * Driver init/exit *
1649 \*****************************************************************************/
1651 static int __init sdhci_drv_init(void)
1653 printk(KERN_INFO DRIVER_NAME
1654 ": Secure Digital Host Controller Interface driver\n");
1655 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
1657 spin_lock_init(&index_lock);
1659 return pci_register_driver(&sdhci_driver);
1662 static void __exit sdhci_drv_exit(void)
1664 DBG("Exiting\n");
1666 pci_unregister_driver(&sdhci_driver);
1669 module_init(sdhci_drv_init);
1670 module_exit(sdhci_drv_exit);
1672 module_param(debug_quirks, uint, 0444);
1674 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
1675 MODULE_DESCRIPTION("Secure Digital Host Controller Interface driver");
1676 MODULE_LICENSE("GPL");
1678 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");