1 // SPDX-License-Identifier: GPL-2.0
3 * Tegra20 External Memory Controller driver
5 * Author: Dmitry Osipenko <digetx@gmail.com>
8 #include <linux/bitfield.h>
10 #include <linux/clk/tegra.h>
11 #include <linux/debugfs.h>
12 #include <linux/devfreq.h>
13 #include <linux/err.h>
14 #include <linux/interconnect-provider.h>
15 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_opp.h>
24 #include <linux/slab.h>
25 #include <linux/sort.h>
26 #include <linux/types.h>
28 #include <soc/tegra/common.h>
29 #include <soc/tegra/fuse.h>
31 #include "../jedec_ddr.h"
32 #include "../of_memory.h"
36 #define EMC_INTSTATUS 0x000
37 #define EMC_INTMASK 0x004
39 #define EMC_ADR_CFG_0 0x010
40 #define EMC_TIMING_CONTROL 0x028
49 #define EMC_RD_RCD 0x04c
50 #define EMC_WR_RCD 0x050
52 #define EMC_REXT 0x058
54 #define EMC_QUSE 0x060
55 #define EMC_QRST 0x064
56 #define EMC_QSAFE 0x068
58 #define EMC_REFRESH 0x070
59 #define EMC_BURST_REFRESH_NUM 0x074
60 #define EMC_PDEX2WR 0x078
61 #define EMC_PDEX2RD 0x07c
62 #define EMC_PCHG2PDEN 0x080
63 #define EMC_ACT2PDEN 0x084
64 #define EMC_AR2PDEN 0x088
65 #define EMC_RW2PDEN 0x08c
66 #define EMC_TXSR 0x090
67 #define EMC_TCKE 0x094
68 #define EMC_TFAW 0x098
69 #define EMC_TRPAB 0x09c
70 #define EMC_TCLKSTABLE 0x0a0
71 #define EMC_TCLKSTOP 0x0a4
72 #define EMC_TREFBW 0x0a8
73 #define EMC_QUSE_EXTRA 0x0ac
74 #define EMC_ODT_WRITE 0x0b0
75 #define EMC_ODT_READ 0x0b4
77 #define EMC_FBIO_CFG5 0x104
78 #define EMC_FBIO_CFG6 0x114
79 #define EMC_STAT_CONTROL 0x160
80 #define EMC_STAT_LLMC_CONTROL 0x178
81 #define EMC_STAT_PWR_CLOCK_LIMIT 0x198
82 #define EMC_STAT_PWR_CLOCKS 0x19c
83 #define EMC_STAT_PWR_COUNT 0x1a0
84 #define EMC_AUTO_CAL_INTERVAL 0x2a8
85 #define EMC_CFG_2 0x2b8
86 #define EMC_CFG_DIG_DLL 0x2bc
87 #define EMC_DLL_XFORM_DQS 0x2c0
88 #define EMC_DLL_XFORM_QUSE 0x2c4
89 #define EMC_ZCAL_REF_CNT 0x2e0
90 #define EMC_ZCAL_WAIT_CNT 0x2e4
91 #define EMC_CFG_CLKTRIM_0 0x2d0
92 #define EMC_CFG_CLKTRIM_1 0x2d4
93 #define EMC_CFG_CLKTRIM_2 0x2d8
95 #define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
96 #define EMC_CLKCHANGE_PD_ENABLE BIT(1)
97 #define EMC_CLKCHANGE_SR_ENABLE BIT(2)
99 #define EMC_TIMING_UPDATE BIT(0)
101 #define EMC_REFRESH_OVERFLOW_INT BIT(3)
102 #define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
103 #define EMC_MRR_DIVLD_INT BIT(5)
105 #define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
106 #define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
107 #define EMC_DBG_FORCE_UPDATE BIT(2)
108 #define EMC_DBG_READ_DQM_CTRL BIT(9)
109 #define EMC_DBG_CFG_PRIORITY BIT(24)
111 #define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
112 #define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
114 #define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
115 #define EMC_MRR_MRR_MA GENMASK(23, 16)
116 #define EMC_MRR_MRR_DATA GENMASK(15, 0)
118 #define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
120 #define EMC_PWR_GATHER_CLEAR (1 << 8)
121 #define EMC_PWR_GATHER_DISABLE (2 << 8)
122 #define EMC_PWR_GATHER_ENABLE (3 << 8)
131 static const u16 emc_timing_registers
[] = {
150 EMC_BURST_REFRESH_NUM
,
174 EMC_AUTO_CAL_INTERVAL
,
182 u32 data
[ARRAY_SIZE(emc_timing_registers
)];
185 enum emc_rate_request_type
{
192 struct emc_rate_request
{
193 unsigned long min_rate
;
194 unsigned long max_rate
;
200 struct icc_provider provider
;
201 struct notifier_block clk_nb
;
204 unsigned int dram_bus_width
;
206 struct emc_timing
*timings
;
207 unsigned int num_timings
;
211 unsigned long min_rate
;
212 unsigned long max_rate
;
216 * There are multiple sources in the EMC driver which could request
217 * a min/max clock rate, these rates are contained in this array.
219 struct emc_rate_request requested_rate
[EMC_RATE_TYPE_MAX
];
221 /* protect shared rate-change code path */
222 struct mutex rate_lock
;
224 struct devfreq_simple_ondemand_data ondemand_data
;
226 /* memory chip identity information */
227 union lpddr2_basic_config4 basic_conf4
;
228 unsigned int manufacturer_id
;
229 unsigned int revision_id1
;
230 unsigned int revision_id2
;
235 static irqreturn_t
tegra_emc_isr(int irq
, void *data
)
237 struct tegra_emc
*emc
= data
;
238 u32 intmask
= EMC_REFRESH_OVERFLOW_INT
;
241 status
= readl_relaxed(emc
->regs
+ EMC_INTSTATUS
) & intmask
;
245 /* notify about HW problem */
246 if (status
& EMC_REFRESH_OVERFLOW_INT
)
247 dev_err_ratelimited(emc
->dev
,
248 "refresh request overflow timeout\n");
250 /* clear interrupts */
251 writel_relaxed(status
, emc
->regs
+ EMC_INTSTATUS
);
256 static struct emc_timing
*tegra_emc_find_timing(struct tegra_emc
*emc
,
259 struct emc_timing
*timing
= NULL
;
262 for (i
= 0; i
< emc
->num_timings
; i
++) {
263 if (emc
->timings
[i
].rate
>= rate
) {
264 timing
= &emc
->timings
[i
];
270 dev_err(emc
->dev
, "no timing for rate %lu\n", rate
);
277 static int emc_prepare_timing_change(struct tegra_emc
*emc
, unsigned long rate
)
279 struct emc_timing
*timing
= tegra_emc_find_timing(emc
, rate
);
285 dev_dbg(emc
->dev
, "%s: using timing rate %lu for requested rate %lu\n",
286 __func__
, timing
->rate
, rate
);
288 /* program shadow registers */
289 for (i
= 0; i
< ARRAY_SIZE(timing
->data
); i
++)
290 writel_relaxed(timing
->data
[i
],
291 emc
->regs
+ emc_timing_registers
[i
]);
293 /* wait until programming has settled */
294 readl_relaxed(emc
->regs
+ emc_timing_registers
[i
- 1]);
299 static int emc_complete_timing_change(struct tegra_emc
*emc
, bool flush
)
304 dev_dbg(emc
->dev
, "%s: flush %d\n", __func__
, flush
);
307 /* manually initiate memory timing update */
308 writel_relaxed(EMC_TIMING_UPDATE
,
309 emc
->regs
+ EMC_TIMING_CONTROL
);
313 err
= readl_relaxed_poll_timeout_atomic(emc
->regs
+ EMC_INTSTATUS
, v
,
314 v
& EMC_CLKCHANGE_COMPLETE_INT
,
317 dev_err(emc
->dev
, "emc-car handshake timeout: %d\n", err
);
324 static int tegra_emc_clk_change_notify(struct notifier_block
*nb
,
325 unsigned long msg
, void *data
)
327 struct tegra_emc
*emc
= container_of(nb
, struct tegra_emc
, clk_nb
);
328 struct clk_notifier_data
*cnd
= data
;
332 case PRE_RATE_CHANGE
:
333 err
= emc_prepare_timing_change(emc
, cnd
->new_rate
);
336 case ABORT_RATE_CHANGE
:
337 err
= emc_prepare_timing_change(emc
, cnd
->old_rate
);
341 err
= emc_complete_timing_change(emc
, true);
344 case POST_RATE_CHANGE
:
345 err
= emc_complete_timing_change(emc
, false);
352 return notifier_from_errno(err
);
355 static int load_one_timing_from_dt(struct tegra_emc
*emc
,
356 struct emc_timing
*timing
,
357 struct device_node
*node
)
362 if (!of_device_is_compatible(node
, "nvidia,tegra20-emc-table")) {
363 dev_err(emc
->dev
, "incompatible DT node: %pOF\n", node
);
367 err
= of_property_read_u32(node
, "clock-frequency", &rate
);
369 dev_err(emc
->dev
, "timing %pOF: failed to read rate: %d\n",
374 err
= of_property_read_u32_array(node
, "nvidia,emc-registers",
376 ARRAY_SIZE(emc_timing_registers
));
379 "timing %pOF: failed to read emc timing data: %d\n",
385 * The EMC clock rate is twice the bus rate, and the bus rate is
388 timing
->rate
= rate
* 2 * 1000;
390 dev_dbg(emc
->dev
, "%s: %pOF: EMC rate %lu\n",
391 __func__
, node
, timing
->rate
);
396 static int cmp_timings(const void *_a
, const void *_b
)
398 const struct emc_timing
*a
= _a
;
399 const struct emc_timing
*b
= _b
;
401 if (a
->rate
< b
->rate
)
404 if (a
->rate
> b
->rate
)
410 static int tegra_emc_load_timings_from_dt(struct tegra_emc
*emc
,
411 struct device_node
*node
)
413 struct emc_timing
*timing
;
417 child_count
= of_get_child_count(node
);
419 dev_err(emc
->dev
, "no memory timings in DT node: %pOF\n", node
);
423 emc
->timings
= devm_kcalloc(emc
->dev
, child_count
, sizeof(*timing
),
428 timing
= emc
->timings
;
430 for_each_child_of_node_scoped(node
, child
) {
431 if (of_node_name_eq(child
, "lpddr2"))
434 err
= load_one_timing_from_dt(emc
, timing
++, child
);
441 sort(emc
->timings
, emc
->num_timings
, sizeof(*timing
), cmp_timings
,
444 dev_info_once(emc
->dev
,
445 "got %u timings for RAM code %u (min %luMHz max %luMHz)\n",
447 tegra_read_ram_code(),
448 emc
->timings
[0].rate
/ 1000000,
449 emc
->timings
[emc
->num_timings
- 1].rate
/ 1000000);
454 static struct device_node
*
455 tegra_emc_find_node_by_ram_code(struct tegra_emc
*emc
)
457 struct device
*dev
= emc
->dev
;
458 struct device_node
*np
;
462 if (emc
->mrr_error
) {
463 dev_warn(dev
, "memory timings skipped due to MRR error\n");
467 if (of_get_child_count(dev
->of_node
) == 0) {
468 dev_info_once(dev
, "device-tree doesn't have memory timings\n");
472 if (!of_property_read_bool(dev
->of_node
, "nvidia,use-ram-code"))
473 return of_node_get(dev
->of_node
);
475 ram_code
= tegra_read_ram_code();
477 for (np
= of_find_node_by_name(dev
->of_node
, "emc-tables"); np
;
478 np
= of_find_node_by_name(np
, "emc-tables")) {
479 err
= of_property_read_u32(np
, "nvidia,ram-code", &value
);
480 if (err
|| value
!= ram_code
) {
481 struct device_node
*lpddr2_np
;
482 bool cfg_mismatches
= false;
484 lpddr2_np
= of_find_node_by_name(np
, "lpddr2");
486 const struct lpddr2_info
*info
;
488 info
= of_lpddr2_get_info(lpddr2_np
, dev
);
490 if (info
->manufacturer_id
>= 0 &&
491 info
->manufacturer_id
!= emc
->manufacturer_id
)
492 cfg_mismatches
= true;
494 if (info
->revision_id1
>= 0 &&
495 info
->revision_id1
!= emc
->revision_id1
)
496 cfg_mismatches
= true;
498 if (info
->revision_id2
>= 0 &&
499 info
->revision_id2
!= emc
->revision_id2
)
500 cfg_mismatches
= true;
502 if (info
->density
!= emc
->basic_conf4
.density
)
503 cfg_mismatches
= true;
505 if (info
->io_width
!= emc
->basic_conf4
.io_width
)
506 cfg_mismatches
= true;
508 if (info
->arch_type
!= emc
->basic_conf4
.arch_type
)
509 cfg_mismatches
= true;
511 dev_err(dev
, "failed to parse %pOF\n", lpddr2_np
);
512 cfg_mismatches
= true;
515 of_node_put(lpddr2_np
);
517 cfg_mismatches
= true;
520 if (cfg_mismatches
) {
529 dev_err(dev
, "no memory timings for RAM code %u found in device tree\n",
535 static int emc_read_lpddr_mode_register(struct tegra_emc
*emc
,
536 unsigned int emem_dev
,
537 unsigned int register_addr
,
538 unsigned int *register_data
)
540 u32 memory_dev
= emem_dev
? 1 : 2;
541 u32 val
, mr_mask
= 0xff;
544 /* clear data-valid interrupt status */
545 writel_relaxed(EMC_MRR_DIVLD_INT
, emc
->regs
+ EMC_INTSTATUS
);
547 /* issue mode register read request */
548 val
= FIELD_PREP(EMC_MRR_DEV_SELECTN
, memory_dev
);
549 val
|= FIELD_PREP(EMC_MRR_MRR_MA
, register_addr
);
551 writel_relaxed(val
, emc
->regs
+ EMC_MRR
);
553 /* wait for the LPDDR2 data-valid interrupt */
554 err
= readl_relaxed_poll_timeout_atomic(emc
->regs
+ EMC_INTSTATUS
, val
,
555 val
& EMC_MRR_DIVLD_INT
,
558 dev_err(emc
->dev
, "mode register %u read failed: %d\n",
560 emc
->mrr_error
= true;
564 /* read out mode register data */
565 val
= readl_relaxed(emc
->regs
+ EMC_MRR
);
566 *register_data
= FIELD_GET(EMC_MRR_MRR_DATA
, val
) & mr_mask
;
571 static void emc_read_lpddr_sdram_info(struct tegra_emc
*emc
,
572 unsigned int emem_dev
,
575 /* these registers are standard for all LPDDR JEDEC memory chips */
576 emc_read_lpddr_mode_register(emc
, emem_dev
, 5, &emc
->manufacturer_id
);
577 emc_read_lpddr_mode_register(emc
, emem_dev
, 6, &emc
->revision_id1
);
578 emc_read_lpddr_mode_register(emc
, emem_dev
, 7, &emc
->revision_id2
);
579 emc_read_lpddr_mode_register(emc
, emem_dev
, 8, &emc
->basic_conf4
.value
);
584 dev_info(emc
->dev
, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
585 emem_dev
, emc
->manufacturer_id
,
586 lpddr2_jedec_manufacturer(emc
->manufacturer_id
),
587 emc
->revision_id1
, emc
->revision_id2
,
588 4 >> emc
->basic_conf4
.arch_type
,
589 64 << emc
->basic_conf4
.density
,
590 32 >> emc
->basic_conf4
.io_width
);
593 static int emc_setup_hw(struct tegra_emc
*emc
)
595 u32 emc_cfg
, emc_dbg
, emc_fbio
, emc_adr_cfg
;
596 u32 intmask
= EMC_REFRESH_OVERFLOW_INT
;
597 static bool print_sdram_info_once
;
598 enum emc_dram_type dram_type
;
599 const char *dram_type_str
;
600 unsigned int emem_numdev
;
602 emc_cfg
= readl_relaxed(emc
->regs
+ EMC_CFG_2
);
605 * Depending on a memory type, DRAM should enter either self-refresh
606 * or power-down state on EMC clock change.
608 if (!(emc_cfg
& EMC_CLKCHANGE_PD_ENABLE
) &&
609 !(emc_cfg
& EMC_CLKCHANGE_SR_ENABLE
)) {
611 "bootloader didn't specify DRAM auto-suspend mode\n");
615 /* enable EMC and CAR to handshake on PLL divider/source changes */
616 emc_cfg
|= EMC_CLKCHANGE_REQ_ENABLE
;
617 writel_relaxed(emc_cfg
, emc
->regs
+ EMC_CFG_2
);
619 /* initialize interrupt */
620 writel_relaxed(intmask
, emc
->regs
+ EMC_INTMASK
);
621 writel_relaxed(intmask
, emc
->regs
+ EMC_INTSTATUS
);
623 /* ensure that unwanted debug features are disabled */
624 emc_dbg
= readl_relaxed(emc
->regs
+ EMC_DBG
);
625 emc_dbg
|= EMC_DBG_CFG_PRIORITY
;
626 emc_dbg
&= ~EMC_DBG_READ_MUX_ASSEMBLY
;
627 emc_dbg
&= ~EMC_DBG_WRITE_MUX_ACTIVE
;
628 emc_dbg
&= ~EMC_DBG_FORCE_UPDATE
;
629 writel_relaxed(emc_dbg
, emc
->regs
+ EMC_DBG
);
631 emc_fbio
= readl_relaxed(emc
->regs
+ EMC_FBIO_CFG5
);
633 if (emc_fbio
& EMC_FBIO_CFG5_DRAM_WIDTH_X16
)
634 emc
->dram_bus_width
= 16;
636 emc
->dram_bus_width
= 32;
638 dram_type
= FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE
, emc_fbio
);
641 case DRAM_TYPE_RESERVED
:
642 dram_type_str
= "INVALID";
645 dram_type_str
= "DDR1";
647 case DRAM_TYPE_LPDDR2
:
648 dram_type_str
= "LPDDR2";
651 dram_type_str
= "DDR2";
655 emc_adr_cfg
= readl_relaxed(emc
->regs
+ EMC_ADR_CFG_0
);
656 emem_numdev
= FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV
, emc_adr_cfg
) + 1;
658 dev_info_once(emc
->dev
, "%ubit DRAM bus, %u %s %s attached\n",
659 emc
->dram_bus_width
, emem_numdev
, dram_type_str
,
660 emem_numdev
== 2 ? "devices" : "device");
662 if (dram_type
== DRAM_TYPE_LPDDR2
) {
663 while (emem_numdev
--)
664 emc_read_lpddr_sdram_info(emc
, emem_numdev
,
665 !print_sdram_info_once
);
666 print_sdram_info_once
= true;
672 static long emc_round_rate(unsigned long rate
,
673 unsigned long min_rate
,
674 unsigned long max_rate
,
677 struct emc_timing
*timing
= NULL
;
678 struct tegra_emc
*emc
= arg
;
681 if (!emc
->num_timings
)
682 return clk_get_rate(emc
->clk
);
684 min_rate
= min(min_rate
, emc
->timings
[emc
->num_timings
- 1].rate
);
686 for (i
= 0; i
< emc
->num_timings
; i
++) {
687 if (emc
->timings
[i
].rate
< rate
&& i
!= emc
->num_timings
- 1)
690 if (emc
->timings
[i
].rate
> max_rate
) {
693 if (emc
->timings
[i
].rate
< min_rate
)
697 if (emc
->timings
[i
].rate
< min_rate
)
700 timing
= &emc
->timings
[i
];
705 dev_err(emc
->dev
, "no timing for rate %lu min %lu max %lu\n",
706 rate
, min_rate
, max_rate
);
713 static void tegra_emc_rate_requests_init(struct tegra_emc
*emc
)
717 for (i
= 0; i
< EMC_RATE_TYPE_MAX
; i
++) {
718 emc
->requested_rate
[i
].min_rate
= 0;
719 emc
->requested_rate
[i
].max_rate
= ULONG_MAX
;
723 static int emc_request_rate(struct tegra_emc
*emc
,
724 unsigned long new_min_rate
,
725 unsigned long new_max_rate
,
726 enum emc_rate_request_type type
)
728 struct emc_rate_request
*req
= emc
->requested_rate
;
729 unsigned long min_rate
= 0, max_rate
= ULONG_MAX
;
733 /* select minimum and maximum rates among the requested rates */
734 for (i
= 0; i
< EMC_RATE_TYPE_MAX
; i
++, req
++) {
736 min_rate
= max(new_min_rate
, min_rate
);
737 max_rate
= min(new_max_rate
, max_rate
);
739 min_rate
= max(req
->min_rate
, min_rate
);
740 max_rate
= min(req
->max_rate
, max_rate
);
744 if (min_rate
> max_rate
) {
745 dev_err_ratelimited(emc
->dev
, "%s: type %u: out of range: %lu %lu\n",
746 __func__
, type
, min_rate
, max_rate
);
751 * EMC rate-changes should go via OPP API because it manages voltage
754 err
= dev_pm_opp_set_rate(emc
->dev
, min_rate
);
758 emc
->requested_rate
[type
].min_rate
= new_min_rate
;
759 emc
->requested_rate
[type
].max_rate
= new_max_rate
;
764 static int emc_set_min_rate(struct tegra_emc
*emc
, unsigned long rate
,
765 enum emc_rate_request_type type
)
767 struct emc_rate_request
*req
= &emc
->requested_rate
[type
];
770 mutex_lock(&emc
->rate_lock
);
771 ret
= emc_request_rate(emc
, rate
, req
->max_rate
, type
);
772 mutex_unlock(&emc
->rate_lock
);
777 static int emc_set_max_rate(struct tegra_emc
*emc
, unsigned long rate
,
778 enum emc_rate_request_type type
)
780 struct emc_rate_request
*req
= &emc
->requested_rate
[type
];
783 mutex_lock(&emc
->rate_lock
);
784 ret
= emc_request_rate(emc
, req
->min_rate
, rate
, type
);
785 mutex_unlock(&emc
->rate_lock
);
793 * The memory controller driver exposes some files in debugfs that can be used
794 * to control the EMC frequency. The top-level directory can be found here:
796 * /sys/kernel/debug/emc
798 * It contains the following files:
800 * - available_rates: This file contains a list of valid, space-separated
803 * - min_rate: Writing a value to this file sets the given frequency as the
804 * floor of the permitted range. If this is higher than the currently
805 * configured EMC frequency, this will cause the frequency to be
806 * increased so that it stays within the valid range.
808 * - max_rate: Similarily to the min_rate file, writing a value to this file
809 * sets the given frequency as the ceiling of the permitted range. If
810 * the value is lower than the currently configured EMC frequency, this
811 * will cause the frequency to be decreased so that it stays within the
815 static bool tegra_emc_validate_rate(struct tegra_emc
*emc
, unsigned long rate
)
819 for (i
= 0; i
< emc
->num_timings
; i
++)
820 if (rate
== emc
->timings
[i
].rate
)
826 static int tegra_emc_debug_available_rates_show(struct seq_file
*s
, void *data
)
828 struct tegra_emc
*emc
= s
->private;
829 const char *prefix
= "";
832 for (i
= 0; i
< emc
->num_timings
; i
++) {
833 seq_printf(s
, "%s%lu", prefix
, emc
->timings
[i
].rate
);
841 DEFINE_SHOW_ATTRIBUTE(tegra_emc_debug_available_rates
);
843 static int tegra_emc_debug_min_rate_get(void *data
, u64
*rate
)
845 struct tegra_emc
*emc
= data
;
847 *rate
= emc
->debugfs
.min_rate
;
852 static int tegra_emc_debug_min_rate_set(void *data
, u64 rate
)
854 struct tegra_emc
*emc
= data
;
857 if (!tegra_emc_validate_rate(emc
, rate
))
860 err
= emc_set_min_rate(emc
, rate
, EMC_RATE_DEBUG
);
864 emc
->debugfs
.min_rate
= rate
;
869 DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops
,
870 tegra_emc_debug_min_rate_get
,
871 tegra_emc_debug_min_rate_set
, "%llu\n");
873 static int tegra_emc_debug_max_rate_get(void *data
, u64
*rate
)
875 struct tegra_emc
*emc
= data
;
877 *rate
= emc
->debugfs
.max_rate
;
882 static int tegra_emc_debug_max_rate_set(void *data
, u64 rate
)
884 struct tegra_emc
*emc
= data
;
887 if (!tegra_emc_validate_rate(emc
, rate
))
890 err
= emc_set_max_rate(emc
, rate
, EMC_RATE_DEBUG
);
894 emc
->debugfs
.max_rate
= rate
;
899 DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops
,
900 tegra_emc_debug_max_rate_get
,
901 tegra_emc_debug_max_rate_set
, "%llu\n");
903 static void tegra_emc_debugfs_init(struct tegra_emc
*emc
)
905 struct device
*dev
= emc
->dev
;
909 emc
->debugfs
.min_rate
= ULONG_MAX
;
910 emc
->debugfs
.max_rate
= 0;
912 for (i
= 0; i
< emc
->num_timings
; i
++) {
913 if (emc
->timings
[i
].rate
< emc
->debugfs
.min_rate
)
914 emc
->debugfs
.min_rate
= emc
->timings
[i
].rate
;
916 if (emc
->timings
[i
].rate
> emc
->debugfs
.max_rate
)
917 emc
->debugfs
.max_rate
= emc
->timings
[i
].rate
;
920 if (!emc
->num_timings
) {
921 emc
->debugfs
.min_rate
= clk_get_rate(emc
->clk
);
922 emc
->debugfs
.max_rate
= emc
->debugfs
.min_rate
;
925 err
= clk_set_rate_range(emc
->clk
, emc
->debugfs
.min_rate
,
926 emc
->debugfs
.max_rate
);
928 dev_err(dev
, "failed to set rate range [%lu-%lu] for %pC\n",
929 emc
->debugfs
.min_rate
, emc
->debugfs
.max_rate
,
933 emc
->debugfs
.root
= debugfs_create_dir("emc", NULL
);
935 debugfs_create_file("available_rates", 0444, emc
->debugfs
.root
,
936 emc
, &tegra_emc_debug_available_rates_fops
);
937 debugfs_create_file("min_rate", 0644, emc
->debugfs
.root
,
938 emc
, &tegra_emc_debug_min_rate_fops
);
939 debugfs_create_file("max_rate", 0644, emc
->debugfs
.root
,
940 emc
, &tegra_emc_debug_max_rate_fops
);
943 static inline struct tegra_emc
*
944 to_tegra_emc_provider(struct icc_provider
*provider
)
946 return container_of(provider
, struct tegra_emc
, provider
);
949 static struct icc_node_data
*
950 emc_of_icc_xlate_extended(const struct of_phandle_args
*spec
, void *data
)
952 struct icc_provider
*provider
= data
;
953 struct icc_node_data
*ndata
;
954 struct icc_node
*node
;
956 /* External Memory is the only possible ICC route */
957 list_for_each_entry(node
, &provider
->nodes
, node_list
) {
958 if (node
->id
!= TEGRA_ICC_EMEM
)
961 ndata
= kzalloc(sizeof(*ndata
), GFP_KERNEL
);
963 return ERR_PTR(-ENOMEM
);
966 * SRC and DST nodes should have matching TAG in order to have
967 * it set by default for a requested path.
969 ndata
->tag
= TEGRA_MC_ICC_TAG_ISO
;
975 return ERR_PTR(-EPROBE_DEFER
);
978 static int emc_icc_set(struct icc_node
*src
, struct icc_node
*dst
)
980 struct tegra_emc
*emc
= to_tegra_emc_provider(dst
->provider
);
981 unsigned long long peak_bw
= icc_units_to_bps(dst
->peak_bw
);
982 unsigned long long avg_bw
= icc_units_to_bps(dst
->avg_bw
);
983 unsigned long long rate
= max(avg_bw
, peak_bw
);
984 unsigned int dram_data_bus_width_bytes
;
988 * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
989 * is sampled on both clock edges. This means that EMC clock rate
990 * equals to the peak data-rate.
992 dram_data_bus_width_bytes
= emc
->dram_bus_width
/ 8;
993 do_div(rate
, dram_data_bus_width_bytes
);
994 rate
= min_t(u64
, rate
, U32_MAX
);
996 err
= emc_set_min_rate(emc
, rate
, EMC_RATE_ICC
);
1003 static int tegra_emc_interconnect_init(struct tegra_emc
*emc
)
1005 const struct tegra_mc_soc
*soc
;
1006 struct icc_node
*node
;
1009 emc
->mc
= devm_tegra_memory_controller_get(emc
->dev
);
1010 if (IS_ERR(emc
->mc
))
1011 return PTR_ERR(emc
->mc
);
1015 emc
->provider
.dev
= emc
->dev
;
1016 emc
->provider
.set
= emc_icc_set
;
1017 emc
->provider
.data
= &emc
->provider
;
1018 emc
->provider
.aggregate
= soc
->icc_ops
->aggregate
;
1019 emc
->provider
.xlate_extended
= emc_of_icc_xlate_extended
;
1021 icc_provider_init(&emc
->provider
);
1023 /* create External Memory Controller node */
1024 node
= icc_node_create(TEGRA_ICC_EMC
);
1026 err
= PTR_ERR(node
);
1030 node
->name
= "External Memory Controller";
1031 icc_node_add(node
, &emc
->provider
);
1033 /* link External Memory Controller to External Memory (DRAM) */
1034 err
= icc_link_create(node
, TEGRA_ICC_EMEM
);
1038 /* create External Memory node */
1039 node
= icc_node_create(TEGRA_ICC_EMEM
);
1041 err
= PTR_ERR(node
);
1045 node
->name
= "External Memory (DRAM)";
1046 icc_node_add(node
, &emc
->provider
);
1048 err
= icc_provider_register(&emc
->provider
);
1055 icc_nodes_remove(&emc
->provider
);
1057 dev_err(emc
->dev
, "failed to initialize ICC: %d\n", err
);
1062 static void devm_tegra_emc_unset_callback(void *data
)
1064 tegra20_clk_set_emc_round_callback(NULL
, NULL
);
1067 static void devm_tegra_emc_unreg_clk_notifier(void *data
)
1069 struct tegra_emc
*emc
= data
;
1071 clk_notifier_unregister(emc
->clk
, &emc
->clk_nb
);
1074 static int tegra_emc_init_clk(struct tegra_emc
*emc
)
1078 tegra20_clk_set_emc_round_callback(emc_round_rate
, emc
);
1080 err
= devm_add_action_or_reset(emc
->dev
, devm_tegra_emc_unset_callback
,
1085 emc
->clk
= devm_clk_get(emc
->dev
, NULL
);
1086 if (IS_ERR(emc
->clk
)) {
1087 dev_err(emc
->dev
, "failed to get EMC clock: %pe\n", emc
->clk
);
1088 return PTR_ERR(emc
->clk
);
1091 err
= clk_notifier_register(emc
->clk
, &emc
->clk_nb
);
1093 dev_err(emc
->dev
, "failed to register clk notifier: %d\n", err
);
1097 err
= devm_add_action_or_reset(emc
->dev
,
1098 devm_tegra_emc_unreg_clk_notifier
, emc
);
1105 static int tegra_emc_devfreq_target(struct device
*dev
, unsigned long *freq
,
1108 struct tegra_emc
*emc
= dev_get_drvdata(dev
);
1109 struct dev_pm_opp
*opp
;
1112 opp
= devfreq_recommended_opp(dev
, freq
, flags
);
1114 dev_err(dev
, "failed to find opp for %lu Hz\n", *freq
);
1115 return PTR_ERR(opp
);
1118 rate
= dev_pm_opp_get_freq(opp
);
1119 dev_pm_opp_put(opp
);
1121 return emc_set_min_rate(emc
, rate
, EMC_RATE_DEVFREQ
);
1124 static int tegra_emc_devfreq_get_dev_status(struct device
*dev
,
1125 struct devfreq_dev_status
*stat
)
1127 struct tegra_emc
*emc
= dev_get_drvdata(dev
);
1129 /* freeze counters */
1130 writel_relaxed(EMC_PWR_GATHER_DISABLE
, emc
->regs
+ EMC_STAT_CONTROL
);
1133 * busy_time: number of clocks EMC request was accepted
1134 * total_time: number of clocks PWR_GATHER control was set to ENABLE
1136 stat
->busy_time
= readl_relaxed(emc
->regs
+ EMC_STAT_PWR_COUNT
);
1137 stat
->total_time
= readl_relaxed(emc
->regs
+ EMC_STAT_PWR_CLOCKS
);
1138 stat
->current_frequency
= clk_get_rate(emc
->clk
);
1140 /* clear counters and restart */
1141 writel_relaxed(EMC_PWR_GATHER_CLEAR
, emc
->regs
+ EMC_STAT_CONTROL
);
1142 writel_relaxed(EMC_PWR_GATHER_ENABLE
, emc
->regs
+ EMC_STAT_CONTROL
);
1147 static struct devfreq_dev_profile tegra_emc_devfreq_profile
= {
1149 .target
= tegra_emc_devfreq_target
,
1150 .get_dev_status
= tegra_emc_devfreq_get_dev_status
,
1153 static int tegra_emc_devfreq_init(struct tegra_emc
*emc
)
1155 struct devfreq
*devfreq
;
1158 * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
1159 * should be less than 50. Secondly, multiple active memory clients
1160 * may cause over 20% of lost clock cycles due to stalls caused by
1161 * competing memory accesses. This means that threshold should be
1162 * set to a less than 30 in order to have a properly working governor.
1164 emc
->ondemand_data
.upthreshold
= 20;
1167 * Reset statistic gathers state, select global bandwidth for the
1168 * statistics collection mode and set clocks counter saturation
1171 writel_relaxed(0x00000000, emc
->regs
+ EMC_STAT_CONTROL
);
1172 writel_relaxed(0x00000000, emc
->regs
+ EMC_STAT_LLMC_CONTROL
);
1173 writel_relaxed(0xffffffff, emc
->regs
+ EMC_STAT_PWR_CLOCK_LIMIT
);
1175 devfreq
= devm_devfreq_add_device(emc
->dev
, &tegra_emc_devfreq_profile
,
1176 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1177 &emc
->ondemand_data
);
1178 if (IS_ERR(devfreq
)) {
1179 dev_err(emc
->dev
, "failed to initialize devfreq: %pe", devfreq
);
1180 return PTR_ERR(devfreq
);
1186 static int tegra_emc_probe(struct platform_device
*pdev
)
1188 struct tegra_core_opp_params opp_params
= {};
1189 struct device_node
*np
;
1190 struct tegra_emc
*emc
;
1193 irq
= platform_get_irq(pdev
, 0);
1195 dev_err(&pdev
->dev
, "please update your device tree\n");
1199 emc
= devm_kzalloc(&pdev
->dev
, sizeof(*emc
), GFP_KERNEL
);
1203 mutex_init(&emc
->rate_lock
);
1204 emc
->clk_nb
.notifier_call
= tegra_emc_clk_change_notify
;
1205 emc
->dev
= &pdev
->dev
;
1207 emc
->regs
= devm_platform_ioremap_resource(pdev
, 0);
1208 if (IS_ERR(emc
->regs
))
1209 return PTR_ERR(emc
->regs
);
1211 err
= emc_setup_hw(emc
);
1215 np
= tegra_emc_find_node_by_ram_code(emc
);
1217 err
= tegra_emc_load_timings_from_dt(emc
, np
);
1223 err
= devm_request_irq(&pdev
->dev
, irq
, tegra_emc_isr
, 0,
1224 dev_name(&pdev
->dev
), emc
);
1226 dev_err(&pdev
->dev
, "failed to request IRQ: %d\n", err
);
1230 err
= tegra_emc_init_clk(emc
);
1234 opp_params
.init_state
= true;
1236 err
= devm_tegra_core_dev_init_opp_table(&pdev
->dev
, &opp_params
);
1240 platform_set_drvdata(pdev
, emc
);
1241 tegra_emc_rate_requests_init(emc
);
1242 tegra_emc_debugfs_init(emc
);
1243 tegra_emc_interconnect_init(emc
);
1244 tegra_emc_devfreq_init(emc
);
1247 * Don't allow the kernel module to be unloaded. Unloading adds some
1248 * extra complexity which doesn't really worth the effort in a case of
1251 try_module_get(THIS_MODULE
);
1256 static const struct of_device_id tegra_emc_of_match
[] = {
1257 { .compatible
= "nvidia,tegra20-emc", },
1260 MODULE_DEVICE_TABLE(of
, tegra_emc_of_match
);
1262 static struct platform_driver tegra_emc_driver
= {
1263 .probe
= tegra_emc_probe
,
1265 .name
= "tegra20-emc",
1266 .of_match_table
= tegra_emc_of_match
,
1267 .suppress_bind_attrs
= true,
1268 .sync_state
= icc_sync_state
,
1271 module_platform_driver(tegra_emc_driver
);
1273 MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
1274 MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
1275 MODULE_SOFTDEP("pre: governor_simpleondemand");
1276 MODULE_LICENSE("GPL v2");