Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / arm / plat-s5p / sysmmu.c
blobc8bec9c7655d417ebbcb78096f16a7cbeddcf54d
1 /* linux/arch/arm/plat-s5p/sysmmu.c
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/platform_device.h>
14 #include <linux/export.h>
16 #include <asm/pgtable.h>
18 #include <mach/map.h>
19 #include <mach/regs-sysmmu.h>
20 #include <plat/sysmmu.h>
22 #define CTRL_ENABLE 0x5
23 #define CTRL_BLOCK 0x7
24 #define CTRL_DISABLE 0x0
26 static struct device *dev;
28 static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
29 S5P_PAGE_FAULT_ADDR,
30 S5P_AR_FAULT_ADDR,
31 S5P_AW_FAULT_ADDR,
32 S5P_DEFAULT_SLAVE_ADDR,
33 S5P_AR_FAULT_ADDR,
34 S5P_AR_FAULT_ADDR,
35 S5P_AW_FAULT_ADDR,
36 S5P_AW_FAULT_ADDR
39 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
40 "PAGE FAULT",
41 "AR MULTI-HIT FAULT",
42 "AW MULTI-HIT FAULT",
43 "BUS ERROR",
44 "AR SECURITY PROTECTION FAULT",
45 "AR ACCESS PROTECTION FAULT",
46 "AW SECURITY PROTECTION FAULT",
47 "AW ACCESS PROTECTION FAULT"
50 static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
51 enum S5P_SYSMMU_INTERRUPT_TYPE itype,
52 unsigned long pgtable_base,
53 unsigned long fault_addr);
56 * If adjacent 2 bits are true, the system MMU is enabled.
57 * The system MMU is disabled, otherwise.
59 static unsigned long sysmmu_states;
61 static inline void set_sysmmu_active(sysmmu_ips ips)
63 sysmmu_states |= 3 << (ips * 2);
66 static inline void set_sysmmu_inactive(sysmmu_ips ips)
68 sysmmu_states &= ~(3 << (ips * 2));
71 static inline int is_sysmmu_active(sysmmu_ips ips)
73 return sysmmu_states & (3 << (ips * 2));
76 static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
78 static inline void sysmmu_block(sysmmu_ips ips)
80 __raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
81 dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
84 static inline void sysmmu_unblock(sysmmu_ips ips)
86 __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
87 dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
90 static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
92 __raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
93 dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
96 static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
98 if (unlikely(pgd == 0)) {
99 pgd = (unsigned long)ZERO_PAGE(0);
100 __raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
101 } else {
102 __raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
105 __raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
107 dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
108 sysmmu_ips_name[ips], pgd);
109 __sysmmu_tlb_invalidate(ips);
112 void sysmmu_set_fault_handler(sysmmu_ips ips,
113 int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
114 unsigned long pgtable_base,
115 unsigned long fault_addr))
117 BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
118 fault_handlers[ips] = handler;
121 static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
123 /* SYSMMU is in blocked when interrupt occurred. */
124 unsigned long base = 0;
125 sysmmu_ips ips = (sysmmu_ips)dev_id;
126 enum S5P_SYSMMU_INTERRUPT_TYPE itype;
128 itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
129 __ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
131 BUG_ON(!((itype >= 0) && (itype < 8)));
133 dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
134 sysmmu_ips_name[ips]);
136 if (fault_handlers[ips]) {
137 unsigned long addr;
139 base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
140 addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
142 if (fault_handlers[ips](itype, base, addr)) {
143 __raw_writel(1 << itype,
144 sysmmusfrs[ips] + S5P_INT_CLEAR);
145 dev_notice(dev, "%s from %s is resolved."
146 " Retrying translation.\n",
147 sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
148 } else {
149 base = 0;
153 sysmmu_unblock(ips);
155 if (!base)
156 dev_notice(dev, "%s from %s is not handled.\n",
157 sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
159 return IRQ_HANDLED;
162 void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
164 if (is_sysmmu_active(ips)) {
165 sysmmu_block(ips);
166 __sysmmu_set_ptbase(ips, pgd);
167 sysmmu_unblock(ips);
168 } else {
169 dev_dbg(dev, "%s is disabled. "
170 "Skipping initializing page table base.\n",
171 sysmmu_ips_name[ips]);
175 void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
177 if (!is_sysmmu_active(ips)) {
178 sysmmu_clk_enable(ips);
180 __sysmmu_set_ptbase(ips, pgd);
182 __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
184 set_sysmmu_active(ips);
185 dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
186 } else {
187 dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
191 void s5p_sysmmu_disable(sysmmu_ips ips)
193 if (is_sysmmu_active(ips)) {
194 __raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
195 set_sysmmu_inactive(ips);
196 sysmmu_clk_disable(ips);
197 dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
198 } else {
199 dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
203 void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
205 if (is_sysmmu_active(ips)) {
206 sysmmu_block(ips);
207 __sysmmu_tlb_invalidate(ips);
208 sysmmu_unblock(ips);
209 } else {
210 dev_dbg(dev, "%s is disabled. "
211 "Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
215 static int s5p_sysmmu_probe(struct platform_device *pdev)
217 int i, ret;
218 struct resource *res, *mem;
220 dev = &pdev->dev;
222 for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
223 int irq;
225 sysmmu_clk_init(dev, i);
226 sysmmu_clk_disable(i);
228 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
229 if (!res) {
230 dev_err(dev, "Failed to get the resource of %s.\n",
231 sysmmu_ips_name[i]);
232 ret = -ENODEV;
233 goto err_res;
236 mem = request_mem_region(res->start, resource_size(res),
237 pdev->name);
238 if (!mem) {
239 dev_err(dev, "Failed to request the memory region of %s.\n",
240 sysmmu_ips_name[i]);
241 ret = -EBUSY;
242 goto err_res;
245 sysmmusfrs[i] = ioremap(res->start, resource_size(res));
246 if (!sysmmusfrs[i]) {
247 dev_err(dev, "Failed to ioremap() for %s.\n",
248 sysmmu_ips_name[i]);
249 ret = -ENXIO;
250 goto err_reg;
253 irq = platform_get_irq(pdev, i);
254 if (irq <= 0) {
255 dev_err(dev, "Failed to get the IRQ resource of %s.\n",
256 sysmmu_ips_name[i]);
257 ret = -ENOENT;
258 goto err_map;
261 if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
262 pdev->name, (void *)i)) {
263 dev_err(dev, "Failed to request IRQ for %s.\n",
264 sysmmu_ips_name[i]);
265 ret = -ENOENT;
266 goto err_map;
270 return 0;
272 err_map:
273 iounmap(sysmmusfrs[i]);
274 err_reg:
275 release_mem_region(mem->start, resource_size(mem));
276 err_res:
277 return ret;
280 static int s5p_sysmmu_remove(struct platform_device *pdev)
282 return 0;
284 int s5p_sysmmu_runtime_suspend(struct device *dev)
286 return 0;
289 int s5p_sysmmu_runtime_resume(struct device *dev)
291 return 0;
294 const struct dev_pm_ops s5p_sysmmu_pm_ops = {
295 .runtime_suspend = s5p_sysmmu_runtime_suspend,
296 .runtime_resume = s5p_sysmmu_runtime_resume,
299 static struct platform_driver s5p_sysmmu_driver = {
300 .probe = s5p_sysmmu_probe,
301 .remove = s5p_sysmmu_remove,
302 .driver = {
303 .owner = THIS_MODULE,
304 .name = "s5p-sysmmu",
305 .pm = &s5p_sysmmu_pm_ops,
309 static int __init s5p_sysmmu_init(void)
311 return platform_driver_register(&s5p_sysmmu_driver);
313 arch_initcall(s5p_sysmmu_init);