x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / mfd / rtsx_pcr.c
blob98029ee0959e3b00eb193634712c748e707f6672
1 /* Driver for Realtek PCI-Express card reader
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Author:
19 * Wei WANG <wei_wang@realsil.com.cn>
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/highmem.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/idr.h>
30 #include <linux/platform_device.h>
31 #include <linux/mfd/core.h>
32 #include <linux/mfd/rtsx_pci.h>
33 #include <asm/unaligned.h>
35 #include "rtsx_pcr.h"
37 static bool msi_en = true;
38 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(msi_en, "Enable MSI");
41 static DEFINE_IDR(rtsx_pci_idr);
42 static DEFINE_SPINLOCK(rtsx_pci_lock);
44 static struct mfd_cell rtsx_pcr_cells[] = {
45 [RTSX_SD_CARD] = {
46 .name = DRV_NAME_RTSX_PCI_SDMMC,
48 [RTSX_MS_CARD] = {
49 .name = DRV_NAME_RTSX_PCI_MS,
53 static const struct pci_device_id rtsx_pci_ids[] = {
54 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
55 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
60 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
61 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
62 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
63 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
64 { 0, }
67 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
69 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
71 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
72 0xFC, pcr->aspm_en);
75 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
77 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
78 0xFC, 0);
81 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
83 /* If pci device removed, don't queue idle work any more */
84 if (pcr->remove_pci)
85 return;
87 if (pcr->state != PDEV_STAT_RUN) {
88 pcr->state = PDEV_STAT_RUN;
89 if (pcr->ops->enable_auto_blink)
90 pcr->ops->enable_auto_blink(pcr);
92 if (pcr->aspm_en)
93 rtsx_pci_disable_aspm(pcr);
96 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
98 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
100 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
102 int i;
103 u32 val = HAIMR_WRITE_START;
105 val |= (u32)(addr & 0x3FFF) << 16;
106 val |= (u32)mask << 8;
107 val |= (u32)data;
109 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
111 for (i = 0; i < MAX_RW_REG_CNT; i++) {
112 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
113 if ((val & HAIMR_TRANS_END) == 0) {
114 if (data != (u8)val)
115 return -EIO;
116 return 0;
120 return -ETIMEDOUT;
122 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
124 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
126 u32 val = HAIMR_READ_START;
127 int i;
129 val |= (u32)(addr & 0x3FFF) << 16;
130 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
132 for (i = 0; i < MAX_RW_REG_CNT; i++) {
133 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
134 if ((val & HAIMR_TRANS_END) == 0)
135 break;
138 if (i >= MAX_RW_REG_CNT)
139 return -ETIMEDOUT;
141 if (data)
142 *data = (u8)(val & 0xFF);
144 return 0;
146 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
148 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
150 int err, i, finished = 0;
151 u8 tmp;
153 rtsx_pci_init_cmd(pcr);
155 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
156 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
157 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
158 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
160 err = rtsx_pci_send_cmd(pcr, 100);
161 if (err < 0)
162 return err;
164 for (i = 0; i < 100000; i++) {
165 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
166 if (err < 0)
167 return err;
169 if (!(tmp & 0x80)) {
170 finished = 1;
171 break;
175 if (!finished)
176 return -ETIMEDOUT;
178 return 0;
181 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
183 if (pcr->ops->write_phy)
184 return pcr->ops->write_phy(pcr, addr, val);
186 return __rtsx_pci_write_phy_register(pcr, addr, val);
188 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
190 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
192 int err, i, finished = 0;
193 u16 data;
194 u8 *ptr, tmp;
196 rtsx_pci_init_cmd(pcr);
198 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
199 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
201 err = rtsx_pci_send_cmd(pcr, 100);
202 if (err < 0)
203 return err;
205 for (i = 0; i < 100000; i++) {
206 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
207 if (err < 0)
208 return err;
210 if (!(tmp & 0x80)) {
211 finished = 1;
212 break;
216 if (!finished)
217 return -ETIMEDOUT;
219 rtsx_pci_init_cmd(pcr);
221 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
222 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
224 err = rtsx_pci_send_cmd(pcr, 100);
225 if (err < 0)
226 return err;
228 ptr = rtsx_pci_get_cmd_data(pcr);
229 data = ((u16)ptr[1] << 8) | ptr[0];
231 if (val)
232 *val = data;
234 return 0;
237 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
239 if (pcr->ops->read_phy)
240 return pcr->ops->read_phy(pcr, addr, val);
242 return __rtsx_pci_read_phy_register(pcr, addr, val);
244 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
246 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
248 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
249 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
251 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
252 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
254 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
256 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
257 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
259 unsigned long flags;
260 u32 val = 0;
261 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
263 val |= (u32)(cmd_type & 0x03) << 30;
264 val |= (u32)(reg_addr & 0x3FFF) << 16;
265 val |= (u32)mask << 8;
266 val |= (u32)data;
268 spin_lock_irqsave(&pcr->lock, flags);
269 ptr += pcr->ci;
270 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
271 put_unaligned_le32(val, ptr);
272 ptr++;
273 pcr->ci++;
275 spin_unlock_irqrestore(&pcr->lock, flags);
277 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
279 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
281 u32 val = 1 << 31;
283 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
285 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
286 /* Hardware Auto Response */
287 val |= 0x40000000;
288 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
290 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
292 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
294 struct completion trans_done;
295 u32 val = 1 << 31;
296 long timeleft;
297 unsigned long flags;
298 int err = 0;
300 spin_lock_irqsave(&pcr->lock, flags);
302 /* set up data structures for the wakeup system */
303 pcr->done = &trans_done;
304 pcr->trans_result = TRANS_NOT_READY;
305 init_completion(&trans_done);
307 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
309 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
310 /* Hardware Auto Response */
311 val |= 0x40000000;
312 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
314 spin_unlock_irqrestore(&pcr->lock, flags);
316 /* Wait for TRANS_OK_INT */
317 timeleft = wait_for_completion_interruptible_timeout(
318 &trans_done, msecs_to_jiffies(timeout));
319 if (timeleft <= 0) {
320 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
321 err = -ETIMEDOUT;
322 goto finish_send_cmd;
325 spin_lock_irqsave(&pcr->lock, flags);
326 if (pcr->trans_result == TRANS_RESULT_FAIL)
327 err = -EINVAL;
328 else if (pcr->trans_result == TRANS_RESULT_OK)
329 err = 0;
330 else if (pcr->trans_result == TRANS_NO_DEVICE)
331 err = -ENODEV;
332 spin_unlock_irqrestore(&pcr->lock, flags);
334 finish_send_cmd:
335 spin_lock_irqsave(&pcr->lock, flags);
336 pcr->done = NULL;
337 spin_unlock_irqrestore(&pcr->lock, flags);
339 if ((err < 0) && (err != -ENODEV))
340 rtsx_pci_stop_cmd(pcr);
342 if (pcr->finish_me)
343 complete(pcr->finish_me);
345 return err;
347 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
349 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
350 dma_addr_t addr, unsigned int len, int end)
352 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
353 u64 val;
354 u8 option = SG_VALID | SG_TRANS_DATA;
356 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
358 if (end)
359 option |= SG_END;
360 val = ((u64)addr << 32) | ((u64)len << 12) | option;
362 put_unaligned_le64(val, ptr);
363 pcr->sgi++;
366 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
367 int num_sg, bool read, int timeout)
369 int err = 0, count;
371 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
372 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
373 if (count < 1)
374 return -EINVAL;
375 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
377 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
379 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
381 return err;
383 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
385 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
386 int num_sg, bool read)
388 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
390 if (pcr->remove_pci)
391 return -EINVAL;
393 if ((sglist == NULL) || (num_sg <= 0))
394 return -EINVAL;
396 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
398 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
400 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
401 int num_sg, bool read)
403 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
405 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
407 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
409 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
410 int count, bool read, int timeout)
412 struct completion trans_done;
413 struct scatterlist *sg;
414 dma_addr_t addr;
415 long timeleft;
416 unsigned long flags;
417 unsigned int len;
418 int i, err = 0;
419 u32 val;
420 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
422 if (pcr->remove_pci)
423 return -ENODEV;
425 if ((sglist == NULL) || (count < 1))
426 return -EINVAL;
428 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
429 pcr->sgi = 0;
430 for_each_sg(sglist, sg, count, i) {
431 addr = sg_dma_address(sg);
432 len = sg_dma_len(sg);
433 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
436 spin_lock_irqsave(&pcr->lock, flags);
438 pcr->done = &trans_done;
439 pcr->trans_result = TRANS_NOT_READY;
440 init_completion(&trans_done);
441 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
442 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
444 spin_unlock_irqrestore(&pcr->lock, flags);
446 timeleft = wait_for_completion_interruptible_timeout(
447 &trans_done, msecs_to_jiffies(timeout));
448 if (timeleft <= 0) {
449 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
450 err = -ETIMEDOUT;
451 goto out;
454 spin_lock_irqsave(&pcr->lock, flags);
455 if (pcr->trans_result == TRANS_RESULT_FAIL)
456 err = -EINVAL;
457 else if (pcr->trans_result == TRANS_NO_DEVICE)
458 err = -ENODEV;
459 spin_unlock_irqrestore(&pcr->lock, flags);
461 out:
462 spin_lock_irqsave(&pcr->lock, flags);
463 pcr->done = NULL;
464 spin_unlock_irqrestore(&pcr->lock, flags);
466 if ((err < 0) && (err != -ENODEV))
467 rtsx_pci_stop_cmd(pcr);
469 if (pcr->finish_me)
470 complete(pcr->finish_me);
472 return err;
474 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
476 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
478 int err;
479 int i, j;
480 u16 reg;
481 u8 *ptr;
483 if (buf_len > 512)
484 buf_len = 512;
486 ptr = buf;
487 reg = PPBUF_BASE2;
488 for (i = 0; i < buf_len / 256; i++) {
489 rtsx_pci_init_cmd(pcr);
491 for (j = 0; j < 256; j++)
492 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
494 err = rtsx_pci_send_cmd(pcr, 250);
495 if (err < 0)
496 return err;
498 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
499 ptr += 256;
502 if (buf_len % 256) {
503 rtsx_pci_init_cmd(pcr);
505 for (j = 0; j < buf_len % 256; j++)
506 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
508 err = rtsx_pci_send_cmd(pcr, 250);
509 if (err < 0)
510 return err;
513 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
515 return 0;
517 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
519 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
521 int err;
522 int i, j;
523 u16 reg;
524 u8 *ptr;
526 if (buf_len > 512)
527 buf_len = 512;
529 ptr = buf;
530 reg = PPBUF_BASE2;
531 for (i = 0; i < buf_len / 256; i++) {
532 rtsx_pci_init_cmd(pcr);
534 for (j = 0; j < 256; j++) {
535 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
536 reg++, 0xFF, *ptr);
537 ptr++;
540 err = rtsx_pci_send_cmd(pcr, 250);
541 if (err < 0)
542 return err;
545 if (buf_len % 256) {
546 rtsx_pci_init_cmd(pcr);
548 for (j = 0; j < buf_len % 256; j++) {
549 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
550 reg++, 0xFF, *ptr);
551 ptr++;
554 err = rtsx_pci_send_cmd(pcr, 250);
555 if (err < 0)
556 return err;
559 return 0;
561 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
563 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
565 rtsx_pci_init_cmd(pcr);
567 while (*tbl & 0xFFFF0000) {
568 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
569 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
570 tbl++;
573 return rtsx_pci_send_cmd(pcr, 100);
576 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
578 const u32 *tbl;
580 if (card == RTSX_SD_CARD)
581 tbl = pcr->sd_pull_ctl_enable_tbl;
582 else if (card == RTSX_MS_CARD)
583 tbl = pcr->ms_pull_ctl_enable_tbl;
584 else
585 return -EINVAL;
587 return rtsx_pci_set_pull_ctl(pcr, tbl);
589 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
591 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
593 const u32 *tbl;
595 if (card == RTSX_SD_CARD)
596 tbl = pcr->sd_pull_ctl_disable_tbl;
597 else if (card == RTSX_MS_CARD)
598 tbl = pcr->ms_pull_ctl_disable_tbl;
599 else
600 return -EINVAL;
603 return rtsx_pci_set_pull_ctl(pcr, tbl);
605 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
607 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
609 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
611 if (pcr->num_slots > 1)
612 pcr->bier |= MS_INT_EN;
614 /* Enable Bus Interrupt */
615 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
617 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
620 static inline u8 double_ssc_depth(u8 depth)
622 return ((depth > 1) ? (depth - 1) : depth);
625 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
627 if (div > CLK_DIV_1) {
628 if (ssc_depth > (div - 1))
629 ssc_depth -= (div - 1);
630 else
631 ssc_depth = SSC_DEPTH_4M;
634 return ssc_depth;
637 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
638 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
640 int err, clk;
641 u8 n, clk_divider, mcu_cnt, div;
642 u8 depth[] = {
643 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
644 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
645 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
646 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
647 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
650 if (initial_mode) {
651 /* We use 250k(around) here, in initial stage */
652 clk_divider = SD_CLK_DIVIDE_128;
653 card_clock = 30000000;
654 } else {
655 clk_divider = SD_CLK_DIVIDE_0;
657 err = rtsx_pci_write_register(pcr, SD_CFG1,
658 SD_CLK_DIVIDE_MASK, clk_divider);
659 if (err < 0)
660 return err;
662 card_clock /= 1000000;
663 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
665 clk = card_clock;
666 if (!initial_mode && double_clk)
667 clk = card_clock * 2;
668 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
669 clk, pcr->cur_clock);
671 if (clk == pcr->cur_clock)
672 return 0;
674 if (pcr->ops->conv_clk_and_div_n)
675 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
676 else
677 n = (u8)(clk - 2);
678 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
679 return -EINVAL;
681 mcu_cnt = (u8)(125/clk + 3);
682 if (mcu_cnt > 15)
683 mcu_cnt = 15;
685 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
686 div = CLK_DIV_1;
687 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
688 if (pcr->ops->conv_clk_and_div_n) {
689 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
690 DIV_N_TO_CLK) * 2;
691 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
692 CLK_TO_DIV_N);
693 } else {
694 n = (n + 2) * 2 - 2;
696 div++;
698 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
700 ssc_depth = depth[ssc_depth];
701 if (double_clk)
702 ssc_depth = double_ssc_depth(ssc_depth);
704 ssc_depth = revise_ssc_depth(ssc_depth, div);
705 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
707 rtsx_pci_init_cmd(pcr);
708 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
709 CLK_LOW_FREQ, CLK_LOW_FREQ);
710 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
711 0xFF, (div << 4) | mcu_cnt);
712 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
713 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
714 SSC_DEPTH_MASK, ssc_depth);
715 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
716 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
717 if (vpclk) {
718 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
719 PHASE_NOT_RESET, 0);
720 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
721 PHASE_NOT_RESET, PHASE_NOT_RESET);
724 err = rtsx_pci_send_cmd(pcr, 2000);
725 if (err < 0)
726 return err;
728 /* Wait SSC clock stable */
729 udelay(10);
730 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
731 if (err < 0)
732 return err;
734 pcr->cur_clock = clk;
735 return 0;
737 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
739 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
741 if (pcr->ops->card_power_on)
742 return pcr->ops->card_power_on(pcr, card);
744 return 0;
746 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
748 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
750 if (pcr->ops->card_power_off)
751 return pcr->ops->card_power_off(pcr, card);
753 return 0;
755 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
757 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
759 unsigned int cd_mask[] = {
760 [RTSX_SD_CARD] = SD_EXIST,
761 [RTSX_MS_CARD] = MS_EXIST
764 if (!(pcr->flags & PCR_MS_PMOS)) {
765 /* When using single PMOS, accessing card is not permitted
766 * if the existing card is not the designated one.
768 if (pcr->card_exist & (~cd_mask[card]))
769 return -EIO;
772 return 0;
774 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
776 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
778 if (pcr->ops->switch_output_voltage)
779 return pcr->ops->switch_output_voltage(pcr, voltage);
781 return 0;
783 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
785 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
787 unsigned int val;
789 val = rtsx_pci_readl(pcr, RTSX_BIPR);
790 if (pcr->ops->cd_deglitch)
791 val = pcr->ops->cd_deglitch(pcr);
793 return val;
795 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
797 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
799 struct completion finish;
801 pcr->finish_me = &finish;
802 init_completion(&finish);
804 if (pcr->done)
805 complete(pcr->done);
807 if (!pcr->remove_pci)
808 rtsx_pci_stop_cmd(pcr);
810 wait_for_completion_interruptible_timeout(&finish,
811 msecs_to_jiffies(2));
812 pcr->finish_me = NULL;
814 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
816 static void rtsx_pci_card_detect(struct work_struct *work)
818 struct delayed_work *dwork;
819 struct rtsx_pcr *pcr;
820 unsigned long flags;
821 unsigned int card_detect = 0, card_inserted, card_removed;
822 u32 irq_status;
824 dwork = to_delayed_work(work);
825 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
827 pcr_dbg(pcr, "--> %s\n", __func__);
829 mutex_lock(&pcr->pcr_mutex);
830 spin_lock_irqsave(&pcr->lock, flags);
832 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
833 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
835 irq_status &= CARD_EXIST;
836 card_inserted = pcr->card_inserted & irq_status;
837 card_removed = pcr->card_removed;
838 pcr->card_inserted = 0;
839 pcr->card_removed = 0;
841 spin_unlock_irqrestore(&pcr->lock, flags);
843 if (card_inserted || card_removed) {
844 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
845 card_inserted, card_removed);
847 if (pcr->ops->cd_deglitch)
848 card_inserted = pcr->ops->cd_deglitch(pcr);
850 card_detect = card_inserted | card_removed;
852 pcr->card_exist |= card_inserted;
853 pcr->card_exist &= ~card_removed;
856 mutex_unlock(&pcr->pcr_mutex);
858 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
859 pcr->slots[RTSX_SD_CARD].card_event(
860 pcr->slots[RTSX_SD_CARD].p_dev);
861 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
862 pcr->slots[RTSX_MS_CARD].card_event(
863 pcr->slots[RTSX_MS_CARD].p_dev);
866 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
868 struct rtsx_pcr *pcr = dev_id;
869 u32 int_reg;
871 if (!pcr)
872 return IRQ_NONE;
874 spin_lock(&pcr->lock);
876 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
877 /* Clear interrupt flag */
878 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
879 if ((int_reg & pcr->bier) == 0) {
880 spin_unlock(&pcr->lock);
881 return IRQ_NONE;
883 if (int_reg == 0xFFFFFFFF) {
884 spin_unlock(&pcr->lock);
885 return IRQ_HANDLED;
888 int_reg &= (pcr->bier | 0x7FFFFF);
890 if (int_reg & SD_INT) {
891 if (int_reg & SD_EXIST) {
892 pcr->card_inserted |= SD_EXIST;
893 } else {
894 pcr->card_removed |= SD_EXIST;
895 pcr->card_inserted &= ~SD_EXIST;
899 if (int_reg & MS_INT) {
900 if (int_reg & MS_EXIST) {
901 pcr->card_inserted |= MS_EXIST;
902 } else {
903 pcr->card_removed |= MS_EXIST;
904 pcr->card_inserted &= ~MS_EXIST;
908 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
909 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
910 pcr->trans_result = TRANS_RESULT_FAIL;
911 if (pcr->done)
912 complete(pcr->done);
913 } else if (int_reg & TRANS_OK_INT) {
914 pcr->trans_result = TRANS_RESULT_OK;
915 if (pcr->done)
916 complete(pcr->done);
920 if (pcr->card_inserted || pcr->card_removed)
921 schedule_delayed_work(&pcr->carddet_work,
922 msecs_to_jiffies(200));
924 spin_unlock(&pcr->lock);
925 return IRQ_HANDLED;
928 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
930 dev_info(&(pcr->pci->dev), "%s: pcr->msi_en = %d, pci->irq = %d\n",
931 __func__, pcr->msi_en, pcr->pci->irq);
933 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
934 pcr->msi_en ? 0 : IRQF_SHARED,
935 DRV_NAME_RTSX_PCI, pcr)) {
936 dev_err(&(pcr->pci->dev),
937 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
938 pcr->pci->irq);
939 return -1;
942 pcr->irq = pcr->pci->irq;
943 pci_intx(pcr->pci, !pcr->msi_en);
945 return 0;
948 static void rtsx_pci_idle_work(struct work_struct *work)
950 struct delayed_work *dwork = to_delayed_work(work);
951 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
953 pcr_dbg(pcr, "--> %s\n", __func__);
955 mutex_lock(&pcr->pcr_mutex);
957 pcr->state = PDEV_STAT_IDLE;
959 if (pcr->ops->disable_auto_blink)
960 pcr->ops->disable_auto_blink(pcr);
961 if (pcr->ops->turn_off_led)
962 pcr->ops->turn_off_led(pcr);
964 if (pcr->aspm_en)
965 rtsx_pci_enable_aspm(pcr);
967 mutex_unlock(&pcr->pcr_mutex);
970 #ifdef CONFIG_PM
971 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
973 if (pcr->ops->turn_off_led)
974 pcr->ops->turn_off_led(pcr);
976 rtsx_pci_writel(pcr, RTSX_BIER, 0);
977 pcr->bier = 0;
979 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
980 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
982 if (pcr->ops->force_power_down)
983 pcr->ops->force_power_down(pcr, pm_state);
985 #endif
987 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
989 int err;
991 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
992 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
994 rtsx_pci_enable_bus_int(pcr);
996 /* Power on SSC */
997 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
998 if (err < 0)
999 return err;
1001 /* Wait SSC power stable */
1002 udelay(200);
1004 rtsx_pci_disable_aspm(pcr);
1005 if (pcr->ops->optimize_phy) {
1006 err = pcr->ops->optimize_phy(pcr);
1007 if (err < 0)
1008 return err;
1011 rtsx_pci_init_cmd(pcr);
1013 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1014 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1016 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1017 /* Disable card clock */
1018 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1019 /* Reset delink mode */
1020 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1021 /* Card driving select */
1022 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1023 0xFF, pcr->card_drive_sel);
1024 /* Enable SSC Clock */
1025 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1026 0xFF, SSC_8X_EN | SSC_SEL_4M);
1027 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1028 /* Disable cd_pwr_save */
1029 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1030 /* Clear Link Ready Interrupt */
1031 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1032 LINK_RDY_INT, LINK_RDY_INT);
1033 /* Enlarge the estimation window of PERST# glitch
1034 * to reduce the chance of invalid card interrupt
1036 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1037 /* Update RC oscillator to 400k
1038 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1039 * 1: 2M 0: 400k
1041 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1042 /* Set interrupt write clear
1043 * bit 1: U_elbi_if_rd_clr_en
1044 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1045 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1047 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1049 err = rtsx_pci_send_cmd(pcr, 100);
1050 if (err < 0)
1051 return err;
1053 /* Enable clk_request_n to enable clock power management */
1054 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1055 /* Enter L1 when host tx idle */
1056 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1058 if (pcr->ops->extra_init_hw) {
1059 err = pcr->ops->extra_init_hw(pcr);
1060 if (err < 0)
1061 return err;
1064 /* No CD interrupt if probing driver with card inserted.
1065 * So we need to initialize pcr->card_exist here.
1067 if (pcr->ops->cd_deglitch)
1068 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1069 else
1070 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1072 return 0;
1075 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1077 int err;
1079 spin_lock_init(&pcr->lock);
1080 mutex_init(&pcr->pcr_mutex);
1082 switch (PCI_PID(pcr)) {
1083 default:
1084 case 0x5209:
1085 rts5209_init_params(pcr);
1086 break;
1088 case 0x5229:
1089 rts5229_init_params(pcr);
1090 break;
1092 case 0x5289:
1093 rtl8411_init_params(pcr);
1094 break;
1096 case 0x5227:
1097 rts5227_init_params(pcr);
1098 break;
1100 case 0x522A:
1101 rts522a_init_params(pcr);
1102 break;
1104 case 0x5249:
1105 rts5249_init_params(pcr);
1106 break;
1108 case 0x524A:
1109 rts524a_init_params(pcr);
1110 break;
1112 case 0x525A:
1113 rts525a_init_params(pcr);
1114 break;
1116 case 0x5287:
1117 rtl8411b_init_params(pcr);
1118 break;
1120 case 0x5286:
1121 rtl8402_init_params(pcr);
1122 break;
1125 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1126 PCI_PID(pcr), pcr->ic_version);
1128 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1129 GFP_KERNEL);
1130 if (!pcr->slots)
1131 return -ENOMEM;
1133 if (pcr->ops->fetch_vendor_settings)
1134 pcr->ops->fetch_vendor_settings(pcr);
1136 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1137 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1138 pcr->sd30_drive_sel_1v8);
1139 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1140 pcr->sd30_drive_sel_3v3);
1141 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1142 pcr->card_drive_sel);
1143 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1145 pcr->state = PDEV_STAT_IDLE;
1146 err = rtsx_pci_init_hw(pcr);
1147 if (err < 0) {
1148 kfree(pcr->slots);
1149 return err;
1152 return 0;
1155 static int rtsx_pci_probe(struct pci_dev *pcidev,
1156 const struct pci_device_id *id)
1158 struct rtsx_pcr *pcr;
1159 struct pcr_handle *handle;
1160 u32 base, len;
1161 int ret, i, bar = 0;
1163 dev_dbg(&(pcidev->dev),
1164 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1165 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1166 (int)pcidev->revision);
1168 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1169 if (ret < 0)
1170 return ret;
1172 ret = pci_enable_device(pcidev);
1173 if (ret)
1174 return ret;
1176 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1177 if (ret)
1178 goto disable;
1180 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1181 if (!pcr) {
1182 ret = -ENOMEM;
1183 goto release_pci;
1186 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1187 if (!handle) {
1188 ret = -ENOMEM;
1189 goto free_pcr;
1191 handle->pcr = pcr;
1193 idr_preload(GFP_KERNEL);
1194 spin_lock(&rtsx_pci_lock);
1195 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1196 if (ret >= 0)
1197 pcr->id = ret;
1198 spin_unlock(&rtsx_pci_lock);
1199 idr_preload_end();
1200 if (ret < 0)
1201 goto free_handle;
1203 pcr->pci = pcidev;
1204 dev_set_drvdata(&pcidev->dev, handle);
1206 if (CHK_PCI_PID(pcr, 0x525A))
1207 bar = 1;
1208 len = pci_resource_len(pcidev, bar);
1209 base = pci_resource_start(pcidev, bar);
1210 pcr->remap_addr = ioremap_nocache(base, len);
1211 if (!pcr->remap_addr) {
1212 ret = -ENOMEM;
1213 goto free_handle;
1216 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1217 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1218 GFP_KERNEL);
1219 if (pcr->rtsx_resv_buf == NULL) {
1220 ret = -ENXIO;
1221 goto unmap;
1223 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1224 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1225 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1226 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1228 pcr->card_inserted = 0;
1229 pcr->card_removed = 0;
1230 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1231 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1233 pcr->msi_en = msi_en;
1234 if (pcr->msi_en) {
1235 ret = pci_enable_msi(pcidev);
1236 if (ret)
1237 pcr->msi_en = false;
1240 ret = rtsx_pci_acquire_irq(pcr);
1241 if (ret < 0)
1242 goto disable_msi;
1244 pci_set_master(pcidev);
1245 synchronize_irq(pcr->irq);
1247 ret = rtsx_pci_init_chip(pcr);
1248 if (ret < 0)
1249 goto disable_irq;
1251 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1252 rtsx_pcr_cells[i].platform_data = handle;
1253 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1255 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1256 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1257 if (ret < 0)
1258 goto disable_irq;
1260 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1262 return 0;
1264 disable_irq:
1265 free_irq(pcr->irq, (void *)pcr);
1266 disable_msi:
1267 if (pcr->msi_en)
1268 pci_disable_msi(pcr->pci);
1269 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1270 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1271 unmap:
1272 iounmap(pcr->remap_addr);
1273 free_handle:
1274 kfree(handle);
1275 free_pcr:
1276 kfree(pcr);
1277 release_pci:
1278 pci_release_regions(pcidev);
1279 disable:
1280 pci_disable_device(pcidev);
1282 return ret;
1285 static void rtsx_pci_remove(struct pci_dev *pcidev)
1287 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1288 struct rtsx_pcr *pcr = handle->pcr;
1290 pcr->remove_pci = true;
1292 /* Disable interrupts at the pcr level */
1293 spin_lock_irq(&pcr->lock);
1294 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1295 pcr->bier = 0;
1296 spin_unlock_irq(&pcr->lock);
1298 cancel_delayed_work_sync(&pcr->carddet_work);
1299 cancel_delayed_work_sync(&pcr->idle_work);
1301 mfd_remove_devices(&pcidev->dev);
1303 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1304 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1305 free_irq(pcr->irq, (void *)pcr);
1306 if (pcr->msi_en)
1307 pci_disable_msi(pcr->pci);
1308 iounmap(pcr->remap_addr);
1310 pci_release_regions(pcidev);
1311 pci_disable_device(pcidev);
1313 spin_lock(&rtsx_pci_lock);
1314 idr_remove(&rtsx_pci_idr, pcr->id);
1315 spin_unlock(&rtsx_pci_lock);
1317 kfree(pcr->slots);
1318 kfree(pcr);
1319 kfree(handle);
1321 dev_dbg(&(pcidev->dev),
1322 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1323 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1326 #ifdef CONFIG_PM
1328 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1330 struct pcr_handle *handle;
1331 struct rtsx_pcr *pcr;
1333 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1335 handle = pci_get_drvdata(pcidev);
1336 pcr = handle->pcr;
1338 cancel_delayed_work(&pcr->carddet_work);
1339 cancel_delayed_work(&pcr->idle_work);
1341 mutex_lock(&pcr->pcr_mutex);
1343 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1345 pci_save_state(pcidev);
1346 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1347 pci_disable_device(pcidev);
1348 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1350 mutex_unlock(&pcr->pcr_mutex);
1351 return 0;
1354 static int rtsx_pci_resume(struct pci_dev *pcidev)
1356 struct pcr_handle *handle;
1357 struct rtsx_pcr *pcr;
1358 int ret = 0;
1360 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1362 handle = pci_get_drvdata(pcidev);
1363 pcr = handle->pcr;
1365 mutex_lock(&pcr->pcr_mutex);
1367 pci_set_power_state(pcidev, PCI_D0);
1368 pci_restore_state(pcidev);
1369 ret = pci_enable_device(pcidev);
1370 if (ret)
1371 goto out;
1372 pci_set_master(pcidev);
1374 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1375 if (ret)
1376 goto out;
1378 ret = rtsx_pci_init_hw(pcr);
1379 if (ret)
1380 goto out;
1382 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1384 out:
1385 mutex_unlock(&pcr->pcr_mutex);
1386 return ret;
1389 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1391 struct pcr_handle *handle;
1392 struct rtsx_pcr *pcr;
1394 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1396 handle = pci_get_drvdata(pcidev);
1397 pcr = handle->pcr;
1398 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1400 pci_disable_device(pcidev);
1403 #else /* CONFIG_PM */
1405 #define rtsx_pci_suspend NULL
1406 #define rtsx_pci_resume NULL
1407 #define rtsx_pci_shutdown NULL
1409 #endif /* CONFIG_PM */
1411 static struct pci_driver rtsx_pci_driver = {
1412 .name = DRV_NAME_RTSX_PCI,
1413 .id_table = rtsx_pci_ids,
1414 .probe = rtsx_pci_probe,
1415 .remove = rtsx_pci_remove,
1416 .suspend = rtsx_pci_suspend,
1417 .resume = rtsx_pci_resume,
1418 .shutdown = rtsx_pci_shutdown,
1420 module_pci_driver(rtsx_pci_driver);
1422 MODULE_LICENSE("GPL");
1423 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1424 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");