1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/export.h>
10 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/sort.h>
18 #include <linux/tegra-icc.h>
20 #include <soc/tegra/fuse.h>
24 static const struct of_device_id tegra_mc_of_match
[] = {
25 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
26 { .compatible
= "nvidia,tegra20-mc-gart", .data
= &tegra20_mc_soc
},
28 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
29 { .compatible
= "nvidia,tegra30-mc", .data
= &tegra30_mc_soc
},
31 #ifdef CONFIG_ARCH_TEGRA_114_SOC
32 { .compatible
= "nvidia,tegra114-mc", .data
= &tegra114_mc_soc
},
34 #ifdef CONFIG_ARCH_TEGRA_124_SOC
35 { .compatible
= "nvidia,tegra124-mc", .data
= &tegra124_mc_soc
},
37 #ifdef CONFIG_ARCH_TEGRA_132_SOC
38 { .compatible
= "nvidia,tegra132-mc", .data
= &tegra132_mc_soc
},
40 #ifdef CONFIG_ARCH_TEGRA_210_SOC
41 { .compatible
= "nvidia,tegra210-mc", .data
= &tegra210_mc_soc
},
43 #ifdef CONFIG_ARCH_TEGRA_186_SOC
44 { .compatible
= "nvidia,tegra186-mc", .data
= &tegra186_mc_soc
},
46 #ifdef CONFIG_ARCH_TEGRA_194_SOC
47 { .compatible
= "nvidia,tegra194-mc", .data
= &tegra194_mc_soc
},
49 #ifdef CONFIG_ARCH_TEGRA_234_SOC
50 { .compatible
= "nvidia,tegra234-mc", .data
= &tegra234_mc_soc
},
54 MODULE_DEVICE_TABLE(of
, tegra_mc_of_match
);
56 static void tegra_mc_devm_action_put_device(void *data
)
58 struct tegra_mc
*mc
= data
;
64 * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
65 * @dev: device pointer for the consumer device
67 * This function will search for the Memory Controller node in a device-tree
68 * and retrieve the Memory Controller handle.
70 * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
72 struct tegra_mc
*devm_tegra_memory_controller_get(struct device
*dev
)
74 struct platform_device
*pdev
;
75 struct device_node
*np
;
79 np
= of_parse_phandle(dev
->of_node
, "nvidia,memory-controller", 0);
81 return ERR_PTR(-ENOENT
);
83 pdev
= of_find_device_by_node(np
);
86 return ERR_PTR(-ENODEV
);
88 mc
= platform_get_drvdata(pdev
);
90 put_device(&pdev
->dev
);
91 return ERR_PTR(-EPROBE_DEFER
);
94 err
= devm_add_action_or_reset(dev
, tegra_mc_devm_action_put_device
, mc
);
100 EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get
);
102 int tegra_mc_probe_device(struct tegra_mc
*mc
, struct device
*dev
)
104 if (mc
->soc
->ops
&& mc
->soc
->ops
->probe_device
)
105 return mc
->soc
->ops
->probe_device(mc
, dev
);
109 EXPORT_SYMBOL_GPL(tegra_mc_probe_device
);
111 int tegra_mc_get_carveout_info(struct tegra_mc
*mc
, unsigned int id
,
112 phys_addr_t
*base
, u64
*size
)
116 if (id
< 1 || id
>= mc
->soc
->num_carveouts
)
120 offset
= 0xc0c + 0x50 * (id
- 1);
122 offset
= 0x2004 + 0x50 * (id
- 6);
124 *base
= mc_ch_readl(mc
, MC_BROADCAST_CHANNEL
, offset
+ 0x0);
125 #ifdef CONFIG_PHYS_ADDR_T_64BIT
126 *base
|= (phys_addr_t
)mc_ch_readl(mc
, MC_BROADCAST_CHANNEL
, offset
+ 0x4) << 32;
130 *size
= mc_ch_readl(mc
, MC_BROADCAST_CHANNEL
, offset
+ 0x8) << 17;
134 EXPORT_SYMBOL_GPL(tegra_mc_get_carveout_info
);
136 static int tegra_mc_block_dma_common(struct tegra_mc
*mc
,
137 const struct tegra_mc_reset
*rst
)
142 spin_lock_irqsave(&mc
->lock
, flags
);
144 value
= mc_readl(mc
, rst
->control
) | BIT(rst
->bit
);
145 mc_writel(mc
, value
, rst
->control
);
147 spin_unlock_irqrestore(&mc
->lock
, flags
);
152 static bool tegra_mc_dma_idling_common(struct tegra_mc
*mc
,
153 const struct tegra_mc_reset
*rst
)
155 return (mc_readl(mc
, rst
->status
) & BIT(rst
->bit
)) != 0;
158 static int tegra_mc_unblock_dma_common(struct tegra_mc
*mc
,
159 const struct tegra_mc_reset
*rst
)
164 spin_lock_irqsave(&mc
->lock
, flags
);
166 value
= mc_readl(mc
, rst
->control
) & ~BIT(rst
->bit
);
167 mc_writel(mc
, value
, rst
->control
);
169 spin_unlock_irqrestore(&mc
->lock
, flags
);
174 static int tegra_mc_reset_status_common(struct tegra_mc
*mc
,
175 const struct tegra_mc_reset
*rst
)
177 return (mc_readl(mc
, rst
->control
) & BIT(rst
->bit
)) != 0;
180 const struct tegra_mc_reset_ops tegra_mc_reset_ops_common
= {
181 .block_dma
= tegra_mc_block_dma_common
,
182 .dma_idling
= tegra_mc_dma_idling_common
,
183 .unblock_dma
= tegra_mc_unblock_dma_common
,
184 .reset_status
= tegra_mc_reset_status_common
,
187 static inline struct tegra_mc
*reset_to_mc(struct reset_controller_dev
*rcdev
)
189 return container_of(rcdev
, struct tegra_mc
, reset
);
192 static const struct tegra_mc_reset
*tegra_mc_reset_find(struct tegra_mc
*mc
,
197 for (i
= 0; i
< mc
->soc
->num_resets
; i
++)
198 if (mc
->soc
->resets
[i
].id
== id
)
199 return &mc
->soc
->resets
[i
];
204 static int tegra_mc_hotreset_assert(struct reset_controller_dev
*rcdev
,
207 struct tegra_mc
*mc
= reset_to_mc(rcdev
);
208 const struct tegra_mc_reset_ops
*rst_ops
;
209 const struct tegra_mc_reset
*rst
;
213 rst
= tegra_mc_reset_find(mc
, id
);
217 rst_ops
= mc
->soc
->reset_ops
;
221 /* DMA flushing will fail if reset is already asserted */
222 if (rst_ops
->reset_status
) {
223 /* check whether reset is asserted */
224 if (rst_ops
->reset_status(mc
, rst
))
228 if (rst_ops
->block_dma
) {
229 /* block clients DMA requests */
230 err
= rst_ops
->block_dma(mc
, rst
);
232 dev_err(mc
->dev
, "failed to block %s DMA: %d\n",
238 if (rst_ops
->dma_idling
) {
239 /* wait for completion of the outstanding DMA requests */
240 while (!rst_ops
->dma_idling(mc
, rst
)) {
242 dev_err(mc
->dev
, "failed to flush %s DMA\n",
247 usleep_range(10, 100);
251 if (rst_ops
->hotreset_assert
) {
252 /* clear clients DMA requests sitting before arbitration */
253 err
= rst_ops
->hotreset_assert(mc
, rst
);
255 dev_err(mc
->dev
, "failed to hot reset %s: %d\n",
264 static int tegra_mc_hotreset_deassert(struct reset_controller_dev
*rcdev
,
267 struct tegra_mc
*mc
= reset_to_mc(rcdev
);
268 const struct tegra_mc_reset_ops
*rst_ops
;
269 const struct tegra_mc_reset
*rst
;
272 rst
= tegra_mc_reset_find(mc
, id
);
276 rst_ops
= mc
->soc
->reset_ops
;
280 if (rst_ops
->hotreset_deassert
) {
281 /* take out client from hot reset */
282 err
= rst_ops
->hotreset_deassert(mc
, rst
);
284 dev_err(mc
->dev
, "failed to deassert hot reset %s: %d\n",
290 if (rst_ops
->unblock_dma
) {
291 /* allow new DMA requests to proceed to arbitration */
292 err
= rst_ops
->unblock_dma(mc
, rst
);
294 dev_err(mc
->dev
, "failed to unblock %s DMA : %d\n",
303 static int tegra_mc_hotreset_status(struct reset_controller_dev
*rcdev
,
306 struct tegra_mc
*mc
= reset_to_mc(rcdev
);
307 const struct tegra_mc_reset_ops
*rst_ops
;
308 const struct tegra_mc_reset
*rst
;
310 rst
= tegra_mc_reset_find(mc
, id
);
314 rst_ops
= mc
->soc
->reset_ops
;
318 return rst_ops
->reset_status(mc
, rst
);
321 static const struct reset_control_ops tegra_mc_reset_ops
= {
322 .assert = tegra_mc_hotreset_assert
,
323 .deassert
= tegra_mc_hotreset_deassert
,
324 .status
= tegra_mc_hotreset_status
,
327 static int tegra_mc_reset_setup(struct tegra_mc
*mc
)
331 mc
->reset
.ops
= &tegra_mc_reset_ops
;
332 mc
->reset
.owner
= THIS_MODULE
;
333 mc
->reset
.of_node
= mc
->dev
->of_node
;
334 mc
->reset
.of_reset_n_cells
= 1;
335 mc
->reset
.nr_resets
= mc
->soc
->num_resets
;
337 err
= reset_controller_register(&mc
->reset
);
344 int tegra_mc_write_emem_configuration(struct tegra_mc
*mc
, unsigned long rate
)
347 struct tegra_mc_timing
*timing
= NULL
;
349 for (i
= 0; i
< mc
->num_timings
; i
++) {
350 if (mc
->timings
[i
].rate
== rate
) {
351 timing
= &mc
->timings
[i
];
357 dev_err(mc
->dev
, "no memory timing registered for rate %lu\n",
362 for (i
= 0; i
< mc
->soc
->num_emem_regs
; ++i
)
363 mc_writel(mc
, timing
->emem_data
[i
], mc
->soc
->emem_regs
[i
]);
367 EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration
);
369 unsigned int tegra_mc_get_emem_device_count(struct tegra_mc
*mc
)
373 dram_count
= mc_readl(mc
, MC_EMEM_ADR_CFG
);
374 dram_count
&= MC_EMEM_ADR_CFG_EMEM_NUMDEV
;
379 EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count
);
381 #if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
382 defined(CONFIG_ARCH_TEGRA_114_SOC) || \
383 defined(CONFIG_ARCH_TEGRA_124_SOC) || \
384 defined(CONFIG_ARCH_TEGRA_132_SOC) || \
385 defined(CONFIG_ARCH_TEGRA_210_SOC)
386 static int tegra_mc_setup_latency_allowance(struct tegra_mc
*mc
)
388 unsigned long long tick
;
392 /* compute the number of MC clock cycles per tick */
393 tick
= (unsigned long long)mc
->tick
* clk_get_rate(mc
->clk
);
394 do_div(tick
, NSEC_PER_SEC
);
396 value
= mc_readl(mc
, MC_EMEM_ARB_CFG
);
397 value
&= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK
;
398 value
|= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick
);
399 mc_writel(mc
, value
, MC_EMEM_ARB_CFG
);
401 /* write latency allowance defaults */
402 for (i
= 0; i
< mc
->soc
->num_clients
; i
++) {
403 const struct tegra_mc_client
*client
= &mc
->soc
->clients
[i
];
406 value
= mc_readl(mc
, client
->regs
.la
.reg
);
407 value
&= ~(client
->regs
.la
.mask
<< client
->regs
.la
.shift
);
408 value
|= (client
->regs
.la
.def
& client
->regs
.la
.mask
) << client
->regs
.la
.shift
;
409 mc_writel(mc
, value
, client
->regs
.la
.reg
);
412 /* latch new values */
413 mc_writel(mc
, MC_TIMING_UPDATE
, MC_TIMING_CONTROL
);
418 static int load_one_timing(struct tegra_mc
*mc
,
419 struct tegra_mc_timing
*timing
,
420 struct device_node
*node
)
425 err
= of_property_read_u32(node
, "clock-frequency", &tmp
);
428 "timing %pOFn: failed to read rate\n", node
);
433 timing
->emem_data
= devm_kcalloc(mc
->dev
, mc
->soc
->num_emem_regs
,
434 sizeof(u32
), GFP_KERNEL
);
435 if (!timing
->emem_data
)
438 err
= of_property_read_u32_array(node
, "nvidia,emem-configuration",
440 mc
->soc
->num_emem_regs
);
443 "timing %pOFn: failed to read EMEM configuration\n",
451 static int load_timings(struct tegra_mc
*mc
, struct device_node
*node
)
453 struct tegra_mc_timing
*timing
;
454 int child_count
= of_get_child_count(node
);
457 mc
->timings
= devm_kcalloc(mc
->dev
, child_count
, sizeof(*timing
),
462 mc
->num_timings
= child_count
;
464 for_each_child_of_node_scoped(node
, child
) {
465 timing
= &mc
->timings
[i
++];
467 err
= load_one_timing(mc
, timing
, child
);
475 static int tegra_mc_setup_timings(struct tegra_mc
*mc
)
477 u32 ram_code
, node_ram_code
;
480 ram_code
= tegra_read_ram_code();
484 for_each_child_of_node_scoped(mc
->dev
->of_node
, node
) {
485 err
= of_property_read_u32(node
, "nvidia,ram-code",
487 if (err
|| (node_ram_code
!= ram_code
))
490 err
= load_timings(mc
, node
);
496 if (mc
->num_timings
== 0)
498 "no memory timings for RAM code %u registered\n",
504 int tegra30_mc_probe(struct tegra_mc
*mc
)
508 mc
->clk
= devm_clk_get_optional(mc
->dev
, "mc");
509 if (IS_ERR(mc
->clk
)) {
510 dev_err(mc
->dev
, "failed to get MC clock: %ld\n", PTR_ERR(mc
->clk
));
511 return PTR_ERR(mc
->clk
);
514 /* ensure that debug features are disabled */
515 mc_writel(mc
, 0x00000000, MC_TIMING_CONTROL_DBG
);
517 err
= tegra_mc_setup_latency_allowance(mc
);
519 dev_err(mc
->dev
, "failed to setup latency allowance: %d\n", err
);
523 err
= tegra_mc_setup_timings(mc
);
525 dev_err(mc
->dev
, "failed to setup timings: %d\n", err
);
532 const struct tegra_mc_ops tegra30_mc_ops
= {
533 .probe
= tegra30_mc_probe
,
534 .handle_irq
= tegra30_mc_handle_irq
,
538 static int mc_global_intstatus_to_channel(const struct tegra_mc
*mc
, u32 status
,
539 unsigned int *mc_channel
)
541 if ((status
& mc
->soc
->ch_intmask
) == 0)
544 *mc_channel
= __ffs((status
& mc
->soc
->ch_intmask
) >>
545 mc
->soc
->global_intstatus_channel_shift
);
550 static u32
mc_channel_to_global_intstatus(const struct tegra_mc
*mc
,
551 unsigned int channel
)
553 return BIT(channel
) << mc
->soc
->global_intstatus_channel_shift
;
556 irqreturn_t
tegra30_mc_handle_irq(int irq
, void *data
)
558 struct tegra_mc
*mc
= data
;
559 unsigned int bit
, channel
;
560 unsigned long status
;
562 if (mc
->soc
->num_channels
) {
566 global_status
= mc_ch_readl(mc
, MC_BROADCAST_CHANNEL
, MC_GLOBAL_INTSTATUS
);
567 err
= mc_global_intstatus_to_channel(mc
, global_status
, &channel
);
569 dev_err_ratelimited(mc
->dev
, "unknown interrupt channel 0x%08x\n",
574 /* mask all interrupts to avoid flooding */
575 status
= mc_ch_readl(mc
, channel
, MC_INTSTATUS
) & mc
->soc
->intmask
;
577 status
= mc_readl(mc
, MC_INTSTATUS
) & mc
->soc
->intmask
;
583 for_each_set_bit(bit
, &status
, 32) {
584 const char *error
= tegra_mc_status_names
[bit
] ?: "unknown";
585 const char *client
= "unknown", *desc
;
586 const char *direction
, *secure
;
587 u32 status_reg
, addr_reg
;
588 u32 intmask
= BIT(bit
);
589 phys_addr_t addr
= 0;
590 #ifdef CONFIG_PHYS_ADDR_T_64BIT
599 case MC_INT_DECERR_VPR
:
600 status_reg
= MC_ERR_VPR_STATUS
;
601 addr_reg
= MC_ERR_VPR_ADR
;
604 case MC_INT_SECERR_SEC
:
605 status_reg
= MC_ERR_SEC_STATUS
;
606 addr_reg
= MC_ERR_SEC_ADR
;
609 case MC_INT_DECERR_MTS
:
610 status_reg
= MC_ERR_MTS_STATUS
;
611 addr_reg
= MC_ERR_MTS_ADR
;
614 case MC_INT_DECERR_GENERALIZED_CARVEOUT
:
615 status_reg
= MC_ERR_GENERALIZED_CARVEOUT_STATUS
;
616 addr_reg
= MC_ERR_GENERALIZED_CARVEOUT_ADR
;
619 case MC_INT_DECERR_ROUTE_SANITY
:
620 status_reg
= MC_ERR_ROUTE_SANITY_STATUS
;
621 addr_reg
= MC_ERR_ROUTE_SANITY_ADR
;
625 status_reg
= MC_ERR_STATUS
;
626 addr_reg
= MC_ERR_ADR
;
628 #ifdef CONFIG_PHYS_ADDR_T_64BIT
629 if (mc
->soc
->has_addr_hi_reg
)
630 addr_hi_reg
= MC_ERR_ADR_HI
;
635 if (mc
->soc
->num_channels
)
636 value
= mc_ch_readl(mc
, channel
, status_reg
);
638 value
= mc_readl(mc
, status_reg
);
640 #ifdef CONFIG_PHYS_ADDR_T_64BIT
641 if (mc
->soc
->num_address_bits
> 32) {
643 if (mc
->soc
->num_channels
)
644 addr
= mc_ch_readl(mc
, channel
, addr_hi_reg
);
646 addr
= mc_readl(mc
, addr_hi_reg
);
648 addr
= ((value
>> MC_ERR_STATUS_ADR_HI_SHIFT
) &
649 MC_ERR_STATUS_ADR_HI_MASK
);
655 if (value
& MC_ERR_STATUS_RW
)
660 if (value
& MC_ERR_STATUS_SECURITY
)
665 id
= value
& mc
->soc
->client_id_mask
;
667 for (i
= 0; i
< mc
->soc
->num_clients
; i
++) {
668 if (mc
->soc
->clients
[i
].id
== id
) {
669 client
= mc
->soc
->clients
[i
].name
;
674 type
= (value
& MC_ERR_STATUS_TYPE_MASK
) >>
675 MC_ERR_STATUS_TYPE_SHIFT
;
676 desc
= tegra_mc_error_names
[type
];
678 switch (value
& MC_ERR_STATUS_TYPE_MASK
) {
679 case MC_ERR_STATUS_TYPE_INVALID_SMMU_PAGE
:
683 if (value
& MC_ERR_STATUS_READABLE
)
688 if (value
& MC_ERR_STATUS_WRITABLE
)
693 if (value
& MC_ERR_STATUS_NONSECURE
)
707 if (mc
->soc
->num_channels
)
708 value
= mc_ch_readl(mc
, channel
, addr_reg
);
710 value
= mc_readl(mc
, addr_reg
);
713 dev_err_ratelimited(mc
->dev
, "%s: %s%s @%pa: %s (%s%s)\n",
714 client
, secure
, direction
, &addr
, error
,
718 /* clear interrupts */
719 if (mc
->soc
->num_channels
) {
720 mc_ch_writel(mc
, channel
, status
, MC_INTSTATUS
);
721 mc_ch_writel(mc
, MC_BROADCAST_CHANNEL
,
722 mc_channel_to_global_intstatus(mc
, channel
),
723 MC_GLOBAL_INTSTATUS
);
725 mc_writel(mc
, status
, MC_INTSTATUS
);
731 const char *const tegra_mc_status_names
[32] = {
732 [ 1] = "External interrupt",
733 [ 6] = "EMEM address decode error",
734 [ 7] = "GART page fault",
735 [ 8] = "Security violation",
736 [ 9] = "EMEM arbitration error",
738 [11] = "Invalid APB ASID update",
739 [12] = "VPR violation",
740 [13] = "Secure carveout violation",
741 [16] = "MTS carveout violation",
742 [17] = "Generalized carveout violation",
743 [20] = "Route Sanity error",
746 const char *const tegra_mc_error_names
[8] = {
747 [2] = "EMEM decode error",
748 [3] = "TrustZone violation",
749 [4] = "Carveout violation",
750 [6] = "SMMU translation error",
753 struct icc_node
*tegra_mc_icc_xlate(const struct of_phandle_args
*spec
, void *data
)
755 struct tegra_mc
*mc
= icc_provider_to_tegra_mc(data
);
756 struct icc_node
*node
;
758 list_for_each_entry(node
, &mc
->provider
.nodes
, node_list
) {
759 if (node
->id
== spec
->args
[0])
764 * If a client driver calls devm_of_icc_get() before the MC driver
765 * is probed, then return EPROBE_DEFER to the client driver.
767 return ERR_PTR(-EPROBE_DEFER
);
770 static int tegra_mc_icc_get(struct icc_node
*node
, u32
*average
, u32
*peak
)
778 static int tegra_mc_icc_set(struct icc_node
*src
, struct icc_node
*dst
)
783 const struct tegra_mc_icc_ops tegra_mc_icc_ops
= {
784 .xlate
= tegra_mc_icc_xlate
,
785 .aggregate
= icc_std_aggregate
,
786 .get_bw
= tegra_mc_icc_get
,
787 .set
= tegra_mc_icc_set
,
791 * Memory Controller (MC) has few Memory Clients that are issuing memory
792 * bandwidth allocation requests to the MC interconnect provider. The MC
793 * provider aggregates the requests and then sends the aggregated request
794 * up to the External Memory Controller (EMC) interconnect provider which
795 * re-configures hardware interface to External Memory (EMEM) in accordance
796 * to the required bandwidth. Each MC interconnect node represents an
797 * individual Memory Client.
799 * Memory interconnect topology:
805 * | | +-----+ +------+
806 * ... | MC +--->+ EMC +--->+ EMEM |
807 * | | +-----+ +------+
813 static int tegra_mc_interconnect_setup(struct tegra_mc
*mc
)
815 struct icc_node
*node
;
819 /* older device-trees don't have interconnect properties */
820 if (!device_property_present(mc
->dev
, "#interconnect-cells") ||
824 mc
->provider
.dev
= mc
->dev
;
825 mc
->provider
.data
= &mc
->provider
;
826 mc
->provider
.set
= mc
->soc
->icc_ops
->set
;
827 mc
->provider
.aggregate
= mc
->soc
->icc_ops
->aggregate
;
828 mc
->provider
.get_bw
= mc
->soc
->icc_ops
->get_bw
;
829 mc
->provider
.xlate
= mc
->soc
->icc_ops
->xlate
;
830 mc
->provider
.xlate_extended
= mc
->soc
->icc_ops
->xlate_extended
;
832 icc_provider_init(&mc
->provider
);
834 /* create Memory Controller node */
835 node
= icc_node_create(TEGRA_ICC_MC
);
837 return PTR_ERR(node
);
839 node
->name
= "Memory Controller";
840 icc_node_add(node
, &mc
->provider
);
842 /* link Memory Controller to External Memory Controller */
843 err
= icc_link_create(node
, TEGRA_ICC_EMC
);
847 for (i
= 0; i
< mc
->soc
->num_clients
; i
++) {
848 /* create MC client node */
849 node
= icc_node_create(mc
->soc
->clients
[i
].id
);
855 node
->name
= mc
->soc
->clients
[i
].name
;
856 icc_node_add(node
, &mc
->provider
);
858 /* link Memory Client to Memory Controller */
859 err
= icc_link_create(node
, TEGRA_ICC_MC
);
863 node
->data
= (struct tegra_mc_client
*)&(mc
->soc
->clients
[i
]);
866 err
= icc_provider_register(&mc
->provider
);
873 icc_nodes_remove(&mc
->provider
);
878 static void tegra_mc_num_channel_enabled(struct tegra_mc
*mc
)
883 value
= mc_ch_readl(mc
, 0, MC_EMEM_ADR_CFG_CHANNEL_ENABLE
);
885 mc
->num_channels
= mc
->soc
->num_channels
;
889 for (i
= 0; i
< 32; i
++) {
895 static int tegra_mc_probe(struct platform_device
*pdev
)
901 mc
= devm_kzalloc(&pdev
->dev
, sizeof(*mc
), GFP_KERNEL
);
905 platform_set_drvdata(pdev
, mc
);
906 spin_lock_init(&mc
->lock
);
907 mc
->soc
= of_device_get_match_data(&pdev
->dev
);
908 mc
->dev
= &pdev
->dev
;
910 mask
= DMA_BIT_MASK(mc
->soc
->num_address_bits
);
912 err
= dma_coerce_mask_and_coherent(&pdev
->dev
, mask
);
914 dev_err(&pdev
->dev
, "failed to set DMA mask: %d\n", err
);
918 /* length of MC tick in nanoseconds */
921 mc
->regs
= devm_platform_ioremap_resource(pdev
, 0);
922 if (IS_ERR(mc
->regs
))
923 return PTR_ERR(mc
->regs
);
925 mc
->debugfs
.root
= debugfs_create_dir("mc", NULL
);
927 if (mc
->soc
->ops
&& mc
->soc
->ops
->probe
) {
928 err
= mc
->soc
->ops
->probe(mc
);
933 tegra_mc_num_channel_enabled(mc
);
935 if (mc
->soc
->ops
&& mc
->soc
->ops
->handle_irq
) {
936 mc
->irq
= platform_get_irq(pdev
, 0);
940 WARN(!mc
->soc
->client_id_mask
, "missing client ID mask for this SoC\n");
942 if (mc
->soc
->num_channels
)
943 mc_ch_writel(mc
, MC_BROADCAST_CHANNEL
, mc
->soc
->intmask
,
946 mc_writel(mc
, mc
->soc
->intmask
, MC_INTMASK
);
948 err
= devm_request_irq(&pdev
->dev
, mc
->irq
, mc
->soc
->ops
->handle_irq
, 0,
949 dev_name(&pdev
->dev
), mc
);
951 dev_err(&pdev
->dev
, "failed to request IRQ#%u: %d\n", mc
->irq
,
957 if (mc
->soc
->reset_ops
) {
958 err
= tegra_mc_reset_setup(mc
);
960 dev_err(&pdev
->dev
, "failed to register reset controller: %d\n", err
);
963 err
= tegra_mc_interconnect_setup(mc
);
965 dev_err(&pdev
->dev
, "failed to initialize interconnect: %d\n",
968 if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU
) && mc
->soc
->smmu
) {
969 mc
->smmu
= tegra_smmu_probe(&pdev
->dev
, mc
->soc
->smmu
, mc
);
970 if (IS_ERR(mc
->smmu
)) {
971 dev_err(&pdev
->dev
, "failed to probe SMMU: %ld\n",
980 static void tegra_mc_sync_state(struct device
*dev
)
982 struct tegra_mc
*mc
= dev_get_drvdata(dev
);
984 /* check whether ICC provider is registered */
985 if (mc
->provider
.dev
== dev
)
989 static struct platform_driver tegra_mc_driver
= {
992 .of_match_table
= tegra_mc_of_match
,
993 .suppress_bind_attrs
= true,
994 .sync_state
= tegra_mc_sync_state
,
996 .prevent_deferred_probe
= true,
997 .probe
= tegra_mc_probe
,
1000 static int tegra_mc_init(void)
1002 return platform_driver_register(&tegra_mc_driver
);
1004 arch_initcall(tegra_mc_init
);
1006 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1007 MODULE_DESCRIPTION("NVIDIA Tegra Memory Controller driver");