perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / misc / cardreader / rtsx_pcr.c
blobda445223f4ccc4f79e410dc5b13b767a8dd7ad55
1 /* Driver for Realtek PCI-Express card reader
3 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2, or (at your option) any
8 * later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Author:
19 * Wei WANG <wei_wang@realsil.com.cn>
22 #include <linux/pci.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/highmem.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/idr.h>
30 #include <linux/platform_device.h>
31 #include <linux/mfd/core.h>
32 #include <linux/rtsx_pci.h>
33 #include <linux/mmc/card.h>
34 #include <asm/unaligned.h>
36 #include "rtsx_pcr.h"
38 static bool msi_en = true;
39 module_param(msi_en, bool, S_IRUGO | S_IWUSR);
40 MODULE_PARM_DESC(msi_en, "Enable MSI");
42 static DEFINE_IDR(rtsx_pci_idr);
43 static DEFINE_SPINLOCK(rtsx_pci_lock);
45 static struct mfd_cell rtsx_pcr_cells[] = {
46 [RTSX_SD_CARD] = {
47 .name = DRV_NAME_RTSX_PCI_SDMMC,
49 [RTSX_MS_CARD] = {
50 .name = DRV_NAME_RTSX_PCI_MS,
54 static const struct pci_device_id rtsx_pci_ids[] = {
55 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
56 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
57 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
58 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
59 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
60 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
61 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
62 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
63 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
64 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
65 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
66 { 0, }
69 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
71 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
73 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
74 0xFC, pcr->aspm_en);
77 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
79 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
80 0xFC, 0);
83 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
85 rtsx_pci_write_register(pcr, MSGTXDATA0,
86 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
87 rtsx_pci_write_register(pcr, MSGTXDATA1,
88 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
89 rtsx_pci_write_register(pcr, MSGTXDATA2,
90 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
91 rtsx_pci_write_register(pcr, MSGTXDATA3,
92 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
93 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
94 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
96 return 0;
99 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
101 if (pcr->ops->set_ltr_latency)
102 return pcr->ops->set_ltr_latency(pcr, latency);
103 else
104 return rtsx_comm_set_ltr_latency(pcr, latency);
107 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
109 struct rtsx_cr_option *option = &pcr->option;
111 if (pcr->aspm_enabled == enable)
112 return;
114 if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
115 if (enable)
116 rtsx_pci_enable_aspm(pcr);
117 else
118 rtsx_pci_disable_aspm(pcr);
119 } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
120 u8 mask = FORCE_ASPM_VAL_MASK;
121 u8 val = 0;
123 if (enable)
124 val = pcr->aspm_en;
125 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
128 pcr->aspm_enabled = enable;
131 static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
133 if (pcr->ops->set_aspm)
134 pcr->ops->set_aspm(pcr, false);
135 else
136 rtsx_comm_set_aspm(pcr, false);
139 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
141 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
143 return 0;
146 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
148 if (pcr->ops->set_l1off_cfg_sub_d0)
149 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
152 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
154 struct rtsx_cr_option *option = &pcr->option;
156 rtsx_disable_aspm(pcr);
158 if (option->ltr_enabled)
159 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
161 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
162 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
165 static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
167 if (pcr->ops->full_on)
168 pcr->ops->full_on(pcr);
169 else
170 rtsx_comm_pm_full_on(pcr);
173 void rtsx_pci_start_run(struct rtsx_pcr *pcr)
175 /* If pci device removed, don't queue idle work any more */
176 if (pcr->remove_pci)
177 return;
179 if (pcr->state != PDEV_STAT_RUN) {
180 pcr->state = PDEV_STAT_RUN;
181 if (pcr->ops->enable_auto_blink)
182 pcr->ops->enable_auto_blink(pcr);
183 rtsx_pm_full_on(pcr);
186 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
188 EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
190 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
192 int i;
193 u32 val = HAIMR_WRITE_START;
195 val |= (u32)(addr & 0x3FFF) << 16;
196 val |= (u32)mask << 8;
197 val |= (u32)data;
199 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
201 for (i = 0; i < MAX_RW_REG_CNT; i++) {
202 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
203 if ((val & HAIMR_TRANS_END) == 0) {
204 if (data != (u8)val)
205 return -EIO;
206 return 0;
210 return -ETIMEDOUT;
212 EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
214 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
216 u32 val = HAIMR_READ_START;
217 int i;
219 val |= (u32)(addr & 0x3FFF) << 16;
220 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
222 for (i = 0; i < MAX_RW_REG_CNT; i++) {
223 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
224 if ((val & HAIMR_TRANS_END) == 0)
225 break;
228 if (i >= MAX_RW_REG_CNT)
229 return -ETIMEDOUT;
231 if (data)
232 *data = (u8)(val & 0xFF);
234 return 0;
236 EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
238 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
240 int err, i, finished = 0;
241 u8 tmp;
243 rtsx_pci_init_cmd(pcr);
245 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
246 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
247 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
248 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
250 err = rtsx_pci_send_cmd(pcr, 100);
251 if (err < 0)
252 return err;
254 for (i = 0; i < 100000; i++) {
255 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
256 if (err < 0)
257 return err;
259 if (!(tmp & 0x80)) {
260 finished = 1;
261 break;
265 if (!finished)
266 return -ETIMEDOUT;
268 return 0;
271 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
273 if (pcr->ops->write_phy)
274 return pcr->ops->write_phy(pcr, addr, val);
276 return __rtsx_pci_write_phy_register(pcr, addr, val);
278 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
280 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
282 int err, i, finished = 0;
283 u16 data;
284 u8 *ptr, tmp;
286 rtsx_pci_init_cmd(pcr);
288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
289 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
291 err = rtsx_pci_send_cmd(pcr, 100);
292 if (err < 0)
293 return err;
295 for (i = 0; i < 100000; i++) {
296 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
297 if (err < 0)
298 return err;
300 if (!(tmp & 0x80)) {
301 finished = 1;
302 break;
306 if (!finished)
307 return -ETIMEDOUT;
309 rtsx_pci_init_cmd(pcr);
311 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
312 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
314 err = rtsx_pci_send_cmd(pcr, 100);
315 if (err < 0)
316 return err;
318 ptr = rtsx_pci_get_cmd_data(pcr);
319 data = ((u16)ptr[1] << 8) | ptr[0];
321 if (val)
322 *val = data;
324 return 0;
327 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
329 if (pcr->ops->read_phy)
330 return pcr->ops->read_phy(pcr, addr, val);
332 return __rtsx_pci_read_phy_register(pcr, addr, val);
334 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
336 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
338 if (pcr->ops->stop_cmd)
339 return pcr->ops->stop_cmd(pcr);
341 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
342 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
344 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
345 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
347 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
349 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
350 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
352 unsigned long flags;
353 u32 val = 0;
354 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
356 val |= (u32)(cmd_type & 0x03) << 30;
357 val |= (u32)(reg_addr & 0x3FFF) << 16;
358 val |= (u32)mask << 8;
359 val |= (u32)data;
361 spin_lock_irqsave(&pcr->lock, flags);
362 ptr += pcr->ci;
363 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
364 put_unaligned_le32(val, ptr);
365 ptr++;
366 pcr->ci++;
368 spin_unlock_irqrestore(&pcr->lock, flags);
370 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
372 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
374 u32 val = 1 << 31;
376 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
378 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
379 /* Hardware Auto Response */
380 val |= 0x40000000;
381 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
383 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
385 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
387 struct completion trans_done;
388 u32 val = 1 << 31;
389 long timeleft;
390 unsigned long flags;
391 int err = 0;
393 spin_lock_irqsave(&pcr->lock, flags);
395 /* set up data structures for the wakeup system */
396 pcr->done = &trans_done;
397 pcr->trans_result = TRANS_NOT_READY;
398 init_completion(&trans_done);
400 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
402 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
403 /* Hardware Auto Response */
404 val |= 0x40000000;
405 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
407 spin_unlock_irqrestore(&pcr->lock, flags);
409 /* Wait for TRANS_OK_INT */
410 timeleft = wait_for_completion_interruptible_timeout(
411 &trans_done, msecs_to_jiffies(timeout));
412 if (timeleft <= 0) {
413 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
414 err = -ETIMEDOUT;
415 goto finish_send_cmd;
418 spin_lock_irqsave(&pcr->lock, flags);
419 if (pcr->trans_result == TRANS_RESULT_FAIL)
420 err = -EINVAL;
421 else if (pcr->trans_result == TRANS_RESULT_OK)
422 err = 0;
423 else if (pcr->trans_result == TRANS_NO_DEVICE)
424 err = -ENODEV;
425 spin_unlock_irqrestore(&pcr->lock, flags);
427 finish_send_cmd:
428 spin_lock_irqsave(&pcr->lock, flags);
429 pcr->done = NULL;
430 spin_unlock_irqrestore(&pcr->lock, flags);
432 if ((err < 0) && (err != -ENODEV))
433 rtsx_pci_stop_cmd(pcr);
435 if (pcr->finish_me)
436 complete(pcr->finish_me);
438 return err;
440 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
442 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
443 dma_addr_t addr, unsigned int len, int end)
445 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
446 u64 val;
447 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
449 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
451 if (end)
452 option |= RTSX_SG_END;
453 val = ((u64)addr << 32) | ((u64)len << 12) | option;
455 put_unaligned_le64(val, ptr);
456 pcr->sgi++;
459 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
460 int num_sg, bool read, int timeout)
462 int err = 0, count;
464 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
465 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
466 if (count < 1)
467 return -EINVAL;
468 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
470 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
472 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
474 return err;
476 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
478 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
479 int num_sg, bool read)
481 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
483 if (pcr->remove_pci)
484 return -EINVAL;
486 if ((sglist == NULL) || (num_sg <= 0))
487 return -EINVAL;
489 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
491 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
493 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
494 int num_sg, bool read)
496 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
498 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
500 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
502 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
503 int count, bool read, int timeout)
505 struct completion trans_done;
506 struct scatterlist *sg;
507 dma_addr_t addr;
508 long timeleft;
509 unsigned long flags;
510 unsigned int len;
511 int i, err = 0;
512 u32 val;
513 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
515 if (pcr->remove_pci)
516 return -ENODEV;
518 if ((sglist == NULL) || (count < 1))
519 return -EINVAL;
521 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
522 pcr->sgi = 0;
523 for_each_sg(sglist, sg, count, i) {
524 addr = sg_dma_address(sg);
525 len = sg_dma_len(sg);
526 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
529 spin_lock_irqsave(&pcr->lock, flags);
531 pcr->done = &trans_done;
532 pcr->trans_result = TRANS_NOT_READY;
533 init_completion(&trans_done);
534 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
535 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
537 spin_unlock_irqrestore(&pcr->lock, flags);
539 timeleft = wait_for_completion_interruptible_timeout(
540 &trans_done, msecs_to_jiffies(timeout));
541 if (timeleft <= 0) {
542 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
543 err = -ETIMEDOUT;
544 goto out;
547 spin_lock_irqsave(&pcr->lock, flags);
548 if (pcr->trans_result == TRANS_RESULT_FAIL) {
549 err = -EILSEQ;
550 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
551 pcr->dma_error_count++;
554 else if (pcr->trans_result == TRANS_NO_DEVICE)
555 err = -ENODEV;
556 spin_unlock_irqrestore(&pcr->lock, flags);
558 out:
559 spin_lock_irqsave(&pcr->lock, flags);
560 pcr->done = NULL;
561 spin_unlock_irqrestore(&pcr->lock, flags);
563 if ((err < 0) && (err != -ENODEV))
564 rtsx_pci_stop_cmd(pcr);
566 if (pcr->finish_me)
567 complete(pcr->finish_me);
569 return err;
571 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
573 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
575 int err;
576 int i, j;
577 u16 reg;
578 u8 *ptr;
580 if (buf_len > 512)
581 buf_len = 512;
583 ptr = buf;
584 reg = PPBUF_BASE2;
585 for (i = 0; i < buf_len / 256; i++) {
586 rtsx_pci_init_cmd(pcr);
588 for (j = 0; j < 256; j++)
589 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
591 err = rtsx_pci_send_cmd(pcr, 250);
592 if (err < 0)
593 return err;
595 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
596 ptr += 256;
599 if (buf_len % 256) {
600 rtsx_pci_init_cmd(pcr);
602 for (j = 0; j < buf_len % 256; j++)
603 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
605 err = rtsx_pci_send_cmd(pcr, 250);
606 if (err < 0)
607 return err;
610 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
612 return 0;
614 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
616 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
618 int err;
619 int i, j;
620 u16 reg;
621 u8 *ptr;
623 if (buf_len > 512)
624 buf_len = 512;
626 ptr = buf;
627 reg = PPBUF_BASE2;
628 for (i = 0; i < buf_len / 256; i++) {
629 rtsx_pci_init_cmd(pcr);
631 for (j = 0; j < 256; j++) {
632 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
633 reg++, 0xFF, *ptr);
634 ptr++;
637 err = rtsx_pci_send_cmd(pcr, 250);
638 if (err < 0)
639 return err;
642 if (buf_len % 256) {
643 rtsx_pci_init_cmd(pcr);
645 for (j = 0; j < buf_len % 256; j++) {
646 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
647 reg++, 0xFF, *ptr);
648 ptr++;
651 err = rtsx_pci_send_cmd(pcr, 250);
652 if (err < 0)
653 return err;
656 return 0;
658 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
660 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
662 rtsx_pci_init_cmd(pcr);
664 while (*tbl & 0xFFFF0000) {
665 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
666 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
667 tbl++;
670 return rtsx_pci_send_cmd(pcr, 100);
673 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
675 const u32 *tbl;
677 if (card == RTSX_SD_CARD)
678 tbl = pcr->sd_pull_ctl_enable_tbl;
679 else if (card == RTSX_MS_CARD)
680 tbl = pcr->ms_pull_ctl_enable_tbl;
681 else
682 return -EINVAL;
684 return rtsx_pci_set_pull_ctl(pcr, tbl);
686 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
688 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
690 const u32 *tbl;
692 if (card == RTSX_SD_CARD)
693 tbl = pcr->sd_pull_ctl_disable_tbl;
694 else if (card == RTSX_MS_CARD)
695 tbl = pcr->ms_pull_ctl_disable_tbl;
696 else
697 return -EINVAL;
700 return rtsx_pci_set_pull_ctl(pcr, tbl);
702 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
704 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
706 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN;
708 if (pcr->num_slots > 1)
709 pcr->bier |= MS_INT_EN;
711 /* Enable Bus Interrupt */
712 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
714 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
717 static inline u8 double_ssc_depth(u8 depth)
719 return ((depth > 1) ? (depth - 1) : depth);
722 static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
724 if (div > CLK_DIV_1) {
725 if (ssc_depth > (div - 1))
726 ssc_depth -= (div - 1);
727 else
728 ssc_depth = SSC_DEPTH_4M;
731 return ssc_depth;
734 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
735 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
737 int err, clk;
738 u8 n, clk_divider, mcu_cnt, div;
739 static const u8 depth[] = {
740 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
741 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
742 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
743 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
744 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
747 if (initial_mode) {
748 /* We use 250k(around) here, in initial stage */
749 clk_divider = SD_CLK_DIVIDE_128;
750 card_clock = 30000000;
751 } else {
752 clk_divider = SD_CLK_DIVIDE_0;
754 err = rtsx_pci_write_register(pcr, SD_CFG1,
755 SD_CLK_DIVIDE_MASK, clk_divider);
756 if (err < 0)
757 return err;
759 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
760 if (card_clock == UHS_SDR104_MAX_DTR &&
761 pcr->dma_error_count &&
762 PCI_PID(pcr) == RTS5227_DEVICE_ID)
763 card_clock = UHS_SDR104_MAX_DTR -
764 (pcr->dma_error_count * 20000000);
766 card_clock /= 1000000;
767 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
769 clk = card_clock;
770 if (!initial_mode && double_clk)
771 clk = card_clock * 2;
772 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
773 clk, pcr->cur_clock);
775 if (clk == pcr->cur_clock)
776 return 0;
778 if (pcr->ops->conv_clk_and_div_n)
779 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
780 else
781 n = (u8)(clk - 2);
782 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
783 return -EINVAL;
785 mcu_cnt = (u8)(125/clk + 3);
786 if (mcu_cnt > 15)
787 mcu_cnt = 15;
789 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
790 div = CLK_DIV_1;
791 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
792 if (pcr->ops->conv_clk_and_div_n) {
793 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
794 DIV_N_TO_CLK) * 2;
795 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
796 CLK_TO_DIV_N);
797 } else {
798 n = (n + 2) * 2 - 2;
800 div++;
802 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
804 ssc_depth = depth[ssc_depth];
805 if (double_clk)
806 ssc_depth = double_ssc_depth(ssc_depth);
808 ssc_depth = revise_ssc_depth(ssc_depth, div);
809 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
811 rtsx_pci_init_cmd(pcr);
812 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
813 CLK_LOW_FREQ, CLK_LOW_FREQ);
814 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
815 0xFF, (div << 4) | mcu_cnt);
816 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
817 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
818 SSC_DEPTH_MASK, ssc_depth);
819 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
820 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
821 if (vpclk) {
822 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
823 PHASE_NOT_RESET, 0);
824 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
825 PHASE_NOT_RESET, PHASE_NOT_RESET);
828 err = rtsx_pci_send_cmd(pcr, 2000);
829 if (err < 0)
830 return err;
832 /* Wait SSC clock stable */
833 udelay(SSC_CLOCK_STABLE_WAIT);
834 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
835 if (err < 0)
836 return err;
838 pcr->cur_clock = clk;
839 return 0;
841 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
843 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
845 if (pcr->ops->card_power_on)
846 return pcr->ops->card_power_on(pcr, card);
848 return 0;
850 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
852 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
854 if (pcr->ops->card_power_off)
855 return pcr->ops->card_power_off(pcr, card);
857 return 0;
859 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
861 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
863 static const unsigned int cd_mask[] = {
864 [RTSX_SD_CARD] = SD_EXIST,
865 [RTSX_MS_CARD] = MS_EXIST
868 if (!(pcr->flags & PCR_MS_PMOS)) {
869 /* When using single PMOS, accessing card is not permitted
870 * if the existing card is not the designated one.
872 if (pcr->card_exist & (~cd_mask[card]))
873 return -EIO;
876 return 0;
878 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
880 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
882 if (pcr->ops->switch_output_voltage)
883 return pcr->ops->switch_output_voltage(pcr, voltage);
885 return 0;
887 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
889 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
891 unsigned int val;
893 val = rtsx_pci_readl(pcr, RTSX_BIPR);
894 if (pcr->ops->cd_deglitch)
895 val = pcr->ops->cd_deglitch(pcr);
897 return val;
899 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
901 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
903 struct completion finish;
905 pcr->finish_me = &finish;
906 init_completion(&finish);
908 if (pcr->done)
909 complete(pcr->done);
911 if (!pcr->remove_pci)
912 rtsx_pci_stop_cmd(pcr);
914 wait_for_completion_interruptible_timeout(&finish,
915 msecs_to_jiffies(2));
916 pcr->finish_me = NULL;
918 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
920 static void rtsx_pci_card_detect(struct work_struct *work)
922 struct delayed_work *dwork;
923 struct rtsx_pcr *pcr;
924 unsigned long flags;
925 unsigned int card_detect = 0, card_inserted, card_removed;
926 u32 irq_status;
928 dwork = to_delayed_work(work);
929 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
931 pcr_dbg(pcr, "--> %s\n", __func__);
933 mutex_lock(&pcr->pcr_mutex);
934 spin_lock_irqsave(&pcr->lock, flags);
936 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
937 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
939 irq_status &= CARD_EXIST;
940 card_inserted = pcr->card_inserted & irq_status;
941 card_removed = pcr->card_removed;
942 pcr->card_inserted = 0;
943 pcr->card_removed = 0;
945 spin_unlock_irqrestore(&pcr->lock, flags);
947 if (card_inserted || card_removed) {
948 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
949 card_inserted, card_removed);
951 if (pcr->ops->cd_deglitch)
952 card_inserted = pcr->ops->cd_deglitch(pcr);
954 card_detect = card_inserted | card_removed;
956 pcr->card_exist |= card_inserted;
957 pcr->card_exist &= ~card_removed;
960 mutex_unlock(&pcr->pcr_mutex);
962 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
963 pcr->slots[RTSX_SD_CARD].card_event(
964 pcr->slots[RTSX_SD_CARD].p_dev);
965 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
966 pcr->slots[RTSX_MS_CARD].card_event(
967 pcr->slots[RTSX_MS_CARD].p_dev);
970 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
972 if (pcr->ops->process_ocp)
973 pcr->ops->process_ocp(pcr);
976 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
978 if (pcr->option.ocp_en)
979 rtsx_pci_process_ocp(pcr);
981 return 0;
984 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
986 struct rtsx_pcr *pcr = dev_id;
987 u32 int_reg;
989 if (!pcr)
990 return IRQ_NONE;
992 spin_lock(&pcr->lock);
994 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
995 /* Clear interrupt flag */
996 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
997 if ((int_reg & pcr->bier) == 0) {
998 spin_unlock(&pcr->lock);
999 return IRQ_NONE;
1001 if (int_reg == 0xFFFFFFFF) {
1002 spin_unlock(&pcr->lock);
1003 return IRQ_HANDLED;
1006 int_reg &= (pcr->bier | 0x7FFFFF);
1008 if (int_reg & SD_OC_INT)
1009 rtsx_pci_process_ocp_interrupt(pcr);
1011 if (int_reg & SD_INT) {
1012 if (int_reg & SD_EXIST) {
1013 pcr->card_inserted |= SD_EXIST;
1014 } else {
1015 pcr->card_removed |= SD_EXIST;
1016 pcr->card_inserted &= ~SD_EXIST;
1018 pcr->dma_error_count = 0;
1021 if (int_reg & MS_INT) {
1022 if (int_reg & MS_EXIST) {
1023 pcr->card_inserted |= MS_EXIST;
1024 } else {
1025 pcr->card_removed |= MS_EXIST;
1026 pcr->card_inserted &= ~MS_EXIST;
1030 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1031 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1032 pcr->trans_result = TRANS_RESULT_FAIL;
1033 if (pcr->done)
1034 complete(pcr->done);
1035 } else if (int_reg & TRANS_OK_INT) {
1036 pcr->trans_result = TRANS_RESULT_OK;
1037 if (pcr->done)
1038 complete(pcr->done);
1042 if (pcr->card_inserted || pcr->card_removed)
1043 schedule_delayed_work(&pcr->carddet_work,
1044 msecs_to_jiffies(200));
1046 spin_unlock(&pcr->lock);
1047 return IRQ_HANDLED;
1050 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1052 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1053 __func__, pcr->msi_en, pcr->pci->irq);
1055 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1056 pcr->msi_en ? 0 : IRQF_SHARED,
1057 DRV_NAME_RTSX_PCI, pcr)) {
1058 dev_err(&(pcr->pci->dev),
1059 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1060 pcr->pci->irq);
1061 return -1;
1064 pcr->irq = pcr->pci->irq;
1065 pci_intx(pcr->pci, !pcr->msi_en);
1067 return 0;
1070 static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1072 if (pcr->ops->set_aspm)
1073 pcr->ops->set_aspm(pcr, true);
1074 else
1075 rtsx_comm_set_aspm(pcr, true);
1078 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1080 struct rtsx_cr_option *option = &pcr->option;
1082 if (option->ltr_enabled) {
1083 u32 latency = option->ltr_l1off_latency;
1085 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1086 mdelay(option->l1_snooze_delay);
1088 rtsx_set_ltr_latency(pcr, latency);
1091 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1092 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1094 rtsx_enable_aspm(pcr);
1097 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1099 if (pcr->ops->power_saving)
1100 pcr->ops->power_saving(pcr);
1101 else
1102 rtsx_comm_pm_power_saving(pcr);
1105 static void rtsx_pci_idle_work(struct work_struct *work)
1107 struct delayed_work *dwork = to_delayed_work(work);
1108 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1110 pcr_dbg(pcr, "--> %s\n", __func__);
1112 mutex_lock(&pcr->pcr_mutex);
1114 pcr->state = PDEV_STAT_IDLE;
1116 if (pcr->ops->disable_auto_blink)
1117 pcr->ops->disable_auto_blink(pcr);
1118 if (pcr->ops->turn_off_led)
1119 pcr->ops->turn_off_led(pcr);
1121 rtsx_pm_power_saving(pcr);
1123 mutex_unlock(&pcr->pcr_mutex);
1126 #ifdef CONFIG_PM
1127 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1129 if (pcr->ops->turn_off_led)
1130 pcr->ops->turn_off_led(pcr);
1132 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1133 pcr->bier = 0;
1135 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1136 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1138 if (pcr->ops->force_power_down)
1139 pcr->ops->force_power_down(pcr, pm_state);
1141 #endif
1143 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1145 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1147 if (pcr->ops->enable_ocp)
1148 pcr->ops->enable_ocp(pcr);
1149 else
1150 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
1154 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1156 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1158 if (pcr->ops->disable_ocp)
1159 pcr->ops->disable_ocp(pcr);
1160 else
1161 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1164 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1166 if (pcr->ops->init_ocp) {
1167 pcr->ops->init_ocp(pcr);
1168 } else {
1169 struct rtsx_cr_option *option = &(pcr->option);
1171 if (option->ocp_en) {
1172 u8 val = option->sd_400mA_ocp_thd;
1174 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1175 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1176 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1177 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1178 SD_OCP_THD_MASK, val);
1179 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1180 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1181 rtsx_pci_enable_ocp(pcr);
1182 } else {
1183 /* OC power down */
1184 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1185 OC_POWER_DOWN);
1190 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1192 if (pcr->ops->get_ocpstat)
1193 return pcr->ops->get_ocpstat(pcr, val);
1194 else
1195 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1198 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1200 if (pcr->ops->clear_ocpstat) {
1201 pcr->ops->clear_ocpstat(pcr);
1202 } else {
1203 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1204 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1206 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
1207 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1211 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1213 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1214 MS_CLK_EN | SD40_CLK_EN, 0);
1215 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
1217 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1219 msleep(50);
1221 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1223 return 0;
1226 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1228 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1229 MS_CLK_EN | SD40_CLK_EN, 0);
1231 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1233 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1234 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1236 return 0;
1239 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1241 int err;
1243 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
1244 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1246 rtsx_pci_enable_bus_int(pcr);
1248 /* Power on SSC */
1249 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1250 if (err < 0)
1251 return err;
1253 /* Wait SSC power stable */
1254 udelay(200);
1256 rtsx_pci_disable_aspm(pcr);
1257 if (pcr->ops->optimize_phy) {
1258 err = pcr->ops->optimize_phy(pcr);
1259 if (err < 0)
1260 return err;
1263 rtsx_pci_init_cmd(pcr);
1265 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1266 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1268 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1269 /* Disable card clock */
1270 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1271 /* Reset delink mode */
1272 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1273 /* Card driving select */
1274 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1275 0xFF, pcr->card_drive_sel);
1276 /* Enable SSC Clock */
1277 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1278 0xFF, SSC_8X_EN | SSC_SEL_4M);
1279 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1280 /* Disable cd_pwr_save */
1281 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1282 /* Clear Link Ready Interrupt */
1283 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1284 LINK_RDY_INT, LINK_RDY_INT);
1285 /* Enlarge the estimation window of PERST# glitch
1286 * to reduce the chance of invalid card interrupt
1288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1289 /* Update RC oscillator to 400k
1290 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1291 * 1: 2M 0: 400k
1293 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1294 /* Set interrupt write clear
1295 * bit 1: U_elbi_if_rd_clr_en
1296 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1297 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1299 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1301 err = rtsx_pci_send_cmd(pcr, 100);
1302 if (err < 0)
1303 return err;
1305 switch (PCI_PID(pcr)) {
1306 case PID_5250:
1307 case PID_524A:
1308 case PID_525A:
1309 case PID_5260:
1310 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1311 break;
1312 default:
1313 break;
1316 /* Enable clk_request_n to enable clock power management */
1317 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1318 /* Enter L1 when host tx idle */
1319 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1321 if (pcr->ops->extra_init_hw) {
1322 err = pcr->ops->extra_init_hw(pcr);
1323 if (err < 0)
1324 return err;
1327 /* No CD interrupt if probing driver with card inserted.
1328 * So we need to initialize pcr->card_exist here.
1330 if (pcr->ops->cd_deglitch)
1331 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1332 else
1333 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1335 return 0;
1338 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1340 int err;
1342 spin_lock_init(&pcr->lock);
1343 mutex_init(&pcr->pcr_mutex);
1345 switch (PCI_PID(pcr)) {
1346 default:
1347 case 0x5209:
1348 rts5209_init_params(pcr);
1349 break;
1351 case 0x5229:
1352 rts5229_init_params(pcr);
1353 break;
1355 case 0x5289:
1356 rtl8411_init_params(pcr);
1357 break;
1359 case 0x5227:
1360 rts5227_init_params(pcr);
1361 break;
1363 case 0x522A:
1364 rts522a_init_params(pcr);
1365 break;
1367 case 0x5249:
1368 rts5249_init_params(pcr);
1369 break;
1371 case 0x524A:
1372 rts524a_init_params(pcr);
1373 break;
1375 case 0x525A:
1376 rts525a_init_params(pcr);
1377 break;
1379 case 0x5287:
1380 rtl8411b_init_params(pcr);
1381 break;
1383 case 0x5286:
1384 rtl8402_init_params(pcr);
1385 break;
1386 case 0x5260:
1387 rts5260_init_params(pcr);
1388 break;
1391 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1392 PCI_PID(pcr), pcr->ic_version);
1394 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1395 GFP_KERNEL);
1396 if (!pcr->slots)
1397 return -ENOMEM;
1399 if (pcr->ops->fetch_vendor_settings)
1400 pcr->ops->fetch_vendor_settings(pcr);
1402 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1403 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1404 pcr->sd30_drive_sel_1v8);
1405 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1406 pcr->sd30_drive_sel_3v3);
1407 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1408 pcr->card_drive_sel);
1409 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1411 pcr->state = PDEV_STAT_IDLE;
1412 err = rtsx_pci_init_hw(pcr);
1413 if (err < 0) {
1414 kfree(pcr->slots);
1415 return err;
1418 return 0;
1421 static int rtsx_pci_probe(struct pci_dev *pcidev,
1422 const struct pci_device_id *id)
1424 struct rtsx_pcr *pcr;
1425 struct pcr_handle *handle;
1426 u32 base, len;
1427 int ret, i, bar = 0;
1429 dev_dbg(&(pcidev->dev),
1430 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1431 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1432 (int)pcidev->revision);
1434 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1435 if (ret < 0)
1436 return ret;
1438 ret = pci_enable_device(pcidev);
1439 if (ret)
1440 return ret;
1442 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1443 if (ret)
1444 goto disable;
1446 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1447 if (!pcr) {
1448 ret = -ENOMEM;
1449 goto release_pci;
1452 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1453 if (!handle) {
1454 ret = -ENOMEM;
1455 goto free_pcr;
1457 handle->pcr = pcr;
1459 idr_preload(GFP_KERNEL);
1460 spin_lock(&rtsx_pci_lock);
1461 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1462 if (ret >= 0)
1463 pcr->id = ret;
1464 spin_unlock(&rtsx_pci_lock);
1465 idr_preload_end();
1466 if (ret < 0)
1467 goto free_handle;
1469 pcr->pci = pcidev;
1470 dev_set_drvdata(&pcidev->dev, handle);
1472 if (CHK_PCI_PID(pcr, 0x525A))
1473 bar = 1;
1474 len = pci_resource_len(pcidev, bar);
1475 base = pci_resource_start(pcidev, bar);
1476 pcr->remap_addr = ioremap_nocache(base, len);
1477 if (!pcr->remap_addr) {
1478 ret = -ENOMEM;
1479 goto free_handle;
1482 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1483 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1484 GFP_KERNEL);
1485 if (pcr->rtsx_resv_buf == NULL) {
1486 ret = -ENXIO;
1487 goto unmap;
1489 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1490 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1491 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1492 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1494 pcr->card_inserted = 0;
1495 pcr->card_removed = 0;
1496 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1497 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1499 pcr->msi_en = msi_en;
1500 if (pcr->msi_en) {
1501 ret = pci_enable_msi(pcidev);
1502 if (ret)
1503 pcr->msi_en = false;
1506 ret = rtsx_pci_acquire_irq(pcr);
1507 if (ret < 0)
1508 goto disable_msi;
1510 pci_set_master(pcidev);
1511 synchronize_irq(pcr->irq);
1513 ret = rtsx_pci_init_chip(pcr);
1514 if (ret < 0)
1515 goto disable_irq;
1517 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1518 rtsx_pcr_cells[i].platform_data = handle;
1519 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1521 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1522 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1523 if (ret < 0)
1524 goto disable_irq;
1526 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1528 return 0;
1530 disable_irq:
1531 free_irq(pcr->irq, (void *)pcr);
1532 disable_msi:
1533 if (pcr->msi_en)
1534 pci_disable_msi(pcr->pci);
1535 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1536 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1537 unmap:
1538 iounmap(pcr->remap_addr);
1539 free_handle:
1540 kfree(handle);
1541 free_pcr:
1542 kfree(pcr);
1543 release_pci:
1544 pci_release_regions(pcidev);
1545 disable:
1546 pci_disable_device(pcidev);
1548 return ret;
1551 static void rtsx_pci_remove(struct pci_dev *pcidev)
1553 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1554 struct rtsx_pcr *pcr = handle->pcr;
1556 pcr->remove_pci = true;
1558 /* Disable interrupts at the pcr level */
1559 spin_lock_irq(&pcr->lock);
1560 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1561 pcr->bier = 0;
1562 spin_unlock_irq(&pcr->lock);
1564 cancel_delayed_work_sync(&pcr->carddet_work);
1565 cancel_delayed_work_sync(&pcr->idle_work);
1567 mfd_remove_devices(&pcidev->dev);
1569 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1570 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1571 free_irq(pcr->irq, (void *)pcr);
1572 if (pcr->msi_en)
1573 pci_disable_msi(pcr->pci);
1574 iounmap(pcr->remap_addr);
1576 pci_release_regions(pcidev);
1577 pci_disable_device(pcidev);
1579 spin_lock(&rtsx_pci_lock);
1580 idr_remove(&rtsx_pci_idr, pcr->id);
1581 spin_unlock(&rtsx_pci_lock);
1583 kfree(pcr->slots);
1584 kfree(pcr);
1585 kfree(handle);
1587 dev_dbg(&(pcidev->dev),
1588 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1589 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1592 #ifdef CONFIG_PM
1594 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1596 struct pcr_handle *handle;
1597 struct rtsx_pcr *pcr;
1599 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1601 handle = pci_get_drvdata(pcidev);
1602 pcr = handle->pcr;
1604 cancel_delayed_work(&pcr->carddet_work);
1605 cancel_delayed_work(&pcr->idle_work);
1607 mutex_lock(&pcr->pcr_mutex);
1609 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1611 pci_save_state(pcidev);
1612 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1613 pci_disable_device(pcidev);
1614 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1616 mutex_unlock(&pcr->pcr_mutex);
1617 return 0;
1620 static int rtsx_pci_resume(struct pci_dev *pcidev)
1622 struct pcr_handle *handle;
1623 struct rtsx_pcr *pcr;
1624 int ret = 0;
1626 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1628 handle = pci_get_drvdata(pcidev);
1629 pcr = handle->pcr;
1631 mutex_lock(&pcr->pcr_mutex);
1633 pci_set_power_state(pcidev, PCI_D0);
1634 pci_restore_state(pcidev);
1635 ret = pci_enable_device(pcidev);
1636 if (ret)
1637 goto out;
1638 pci_set_master(pcidev);
1640 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1641 if (ret)
1642 goto out;
1644 ret = rtsx_pci_init_hw(pcr);
1645 if (ret)
1646 goto out;
1648 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1650 out:
1651 mutex_unlock(&pcr->pcr_mutex);
1652 return ret;
1655 static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1657 struct pcr_handle *handle;
1658 struct rtsx_pcr *pcr;
1660 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1662 handle = pci_get_drvdata(pcidev);
1663 pcr = handle->pcr;
1664 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1666 pci_disable_device(pcidev);
1667 free_irq(pcr->irq, (void *)pcr);
1668 if (pcr->msi_en)
1669 pci_disable_msi(pcr->pci);
1672 #else /* CONFIG_PM */
1674 #define rtsx_pci_suspend NULL
1675 #define rtsx_pci_resume NULL
1676 #define rtsx_pci_shutdown NULL
1678 #endif /* CONFIG_PM */
1680 static struct pci_driver rtsx_pci_driver = {
1681 .name = DRV_NAME_RTSX_PCI,
1682 .id_table = rtsx_pci_ids,
1683 .probe = rtsx_pci_probe,
1684 .remove = rtsx_pci_remove,
1685 .suspend = rtsx_pci_suspend,
1686 .resume = rtsx_pci_resume,
1687 .shutdown = rtsx_pci_shutdown,
1689 module_pci_driver(rtsx_pci_driver);
1691 MODULE_LICENSE("GPL");
1692 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1693 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");