1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/sparc/mm/leon_m.c
5 * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
6 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
7 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
9 * do srmmu probe in software
13 #include <linux/kernel.h>
17 #include <asm/tlbflush.h>
21 int leon_flush_during_switch
= 1;
22 static int srmmu_swprobe_trace
;
24 static inline unsigned long leon_get_ctable_ptr(void)
28 __asm__
__volatile__("lda [%1] %2, %0\n\t" :
30 "r" (SRMMU_CTXTBL_PTR
),
31 "i" (ASI_LEON_MMUREGS
));
32 return (retval
& SRMMU_CTX_PMASK
) << 4;
36 unsigned long leon_swprobe(unsigned long vaddr
, unsigned long *paddr
)
40 unsigned int pgd
, pmd
, ped
;
42 unsigned int lvl
, pte
, paddrbase
;
44 unsigned int paddr_calc
;
48 if (srmmu_swprobe_trace
)
49 printk(KERN_INFO
"swprobe: trace on\n");
51 ctxtbl
= leon_get_ctable_ptr();
53 if (srmmu_swprobe_trace
)
54 printk(KERN_INFO
"swprobe: leon_get_ctable_ptr returned 0=>0\n");
57 if (!_pfn_valid(PFN(ctxtbl
))) {
58 if (srmmu_swprobe_trace
)
60 "swprobe: !_pfn_valid(%x)=>0\n",
65 ctx
= srmmu_get_context();
66 if (srmmu_swprobe_trace
)
67 printk(KERN_INFO
"swprobe: --- ctx (%x) ---\n", ctx
);
69 pgd
= LEON_BYPASS_LOAD_PA(ctxtbl
+ (ctx
* 4));
71 if (((pgd
& SRMMU_ET_MASK
) == SRMMU_ET_PTE
)) {
72 if (srmmu_swprobe_trace
)
73 printk(KERN_INFO
"swprobe: pgd is entry level 3\n");
76 paddrbase
= pgd
& _SRMMU_PTE_PMASK_LEON
;
79 if (((pgd
& SRMMU_ET_MASK
) != SRMMU_ET_PTD
)) {
80 if (srmmu_swprobe_trace
)
81 printk(KERN_INFO
"swprobe: pgd is invalid => 0\n");
85 if (srmmu_swprobe_trace
)
86 printk(KERN_INFO
"swprobe: --- pgd (%x) ---\n", pgd
);
88 ptr
= (pgd
& SRMMU_PTD_PMASK
) << 4;
89 ptr
+= ((((vaddr
) >> LEON_PGD_SH
) & LEON_PGD_M
) * 4);
90 if (!_pfn_valid(PFN(ptr
)))
93 pmd
= LEON_BYPASS_LOAD_PA(ptr
);
94 if (((pmd
& SRMMU_ET_MASK
) == SRMMU_ET_PTE
)) {
95 if (srmmu_swprobe_trace
)
96 printk(KERN_INFO
"swprobe: pmd is entry level 2\n");
99 paddrbase
= pmd
& _SRMMU_PTE_PMASK_LEON
;
102 if (((pmd
& SRMMU_ET_MASK
) != SRMMU_ET_PTD
)) {
103 if (srmmu_swprobe_trace
)
104 printk(KERN_INFO
"swprobe: pmd is invalid => 0\n");
108 if (srmmu_swprobe_trace
)
109 printk(KERN_INFO
"swprobe: --- pmd (%x) ---\n", pmd
);
111 ptr
= (pmd
& SRMMU_PTD_PMASK
) << 4;
112 ptr
+= (((vaddr
>> LEON_PMD_SH
) & LEON_PMD_M
) * 4);
113 if (!_pfn_valid(PFN(ptr
))) {
114 if (srmmu_swprobe_trace
)
115 printk(KERN_INFO
"swprobe: !_pfn_valid(%x)=>0\n",
120 ped
= LEON_BYPASS_LOAD_PA(ptr
);
122 if (((ped
& SRMMU_ET_MASK
) == SRMMU_ET_PTE
)) {
123 if (srmmu_swprobe_trace
)
124 printk(KERN_INFO
"swprobe: ped is entry level 1\n");
127 paddrbase
= ped
& _SRMMU_PTE_PMASK_LEON
;
130 if (((ped
& SRMMU_ET_MASK
) != SRMMU_ET_PTD
)) {
131 if (srmmu_swprobe_trace
)
132 printk(KERN_INFO
"swprobe: ped is invalid => 0\n");
136 if (srmmu_swprobe_trace
)
137 printk(KERN_INFO
"swprobe: --- ped (%x) ---\n", ped
);
139 ptr
= (ped
& SRMMU_PTD_PMASK
) << 4;
140 ptr
+= (((vaddr
>> LEON_PTE_SH
) & LEON_PTE_M
) * 4);
141 if (!_pfn_valid(PFN(ptr
)))
144 ptr
= LEON_BYPASS_LOAD_PA(ptr
);
145 if (((ptr
& SRMMU_ET_MASK
) == SRMMU_ET_PTE
)) {
146 if (srmmu_swprobe_trace
)
147 printk(KERN_INFO
"swprobe: ptr is entry level 0\n");
150 paddrbase
= ptr
& _SRMMU_PTE_PMASK_LEON
;
153 if (srmmu_swprobe_trace
)
154 printk(KERN_INFO
"swprobe: ptr is invalid => 0\n");
161 (vaddr
& ~(-1 << LEON_PTE_SH
)) | ((pte
& ~0xff) << 4);
165 (vaddr
& ~(-1 << LEON_PMD_SH
)) | ((pte
& ~0xff) << 4);
169 (vaddr
& ~(-1 << LEON_PGD_SH
)) | ((pte
& ~0xff) << 4);
176 if (srmmu_swprobe_trace
)
177 printk(KERN_INFO
"swprobe: padde %x\n", paddr_calc
);
183 void leon_flush_icache_all(void)
185 __asm__
__volatile__(" flush "); /*iflush*/
188 void leon_flush_dcache_all(void)
190 __asm__
__volatile__("sta %%g0, [%%g0] %0\n\t" : :
191 "i"(ASI_LEON_DFLUSH
) : "memory");
194 void leon_flush_pcache_all(struct vm_area_struct
*vma
, unsigned long page
)
196 if (vma
->vm_flags
& VM_EXEC
)
197 leon_flush_icache_all();
198 leon_flush_dcache_all();
201 void leon_flush_cache_all(void)
203 __asm__
__volatile__(" flush "); /*iflush*/
204 __asm__
__volatile__("sta %%g0, [%%g0] %0\n\t" : :
205 "i"(ASI_LEON_DFLUSH
) : "memory");
208 void leon_flush_tlb_all(void)
210 leon_flush_cache_all();
211 __asm__
__volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
212 "i"(ASI_LEON_MMUFLUSH
) : "memory");
215 /* get all cache regs */
216 void leon3_getCacheRegs(struct leon3_cacheregs
*regs
)
218 unsigned long ccr
, iccr
, dccr
;
222 /* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
223 __asm__
__volatile__("lda [%%g0] %3, %0\n\t"
225 "lda [%%g1] %3, %1\n\t"
227 "lda [%%g1] %3, %2\n\t"
228 : "=r"(ccr
), "=r"(iccr
), "=r"(dccr
)
230 : "i"(ASI_LEON_CACHEREGS
) /* input */
231 : "g1" /* clobber list */
238 /* Due to virtual cache we need to check cache configuration if
239 * it is possible to skip flushing in some cases.
241 * Leon2 and Leon3 differ in their way of telling cache information
244 int __init
leon_flush_needed(void)
246 int flush_needed
= -1;
247 unsigned int ssize
, sets
;
249 { "direct mapped", "2-way associative", "3-way associative",
253 struct leon3_cacheregs cregs
;
254 leon3_getCacheRegs(&cregs
);
255 sets
= (cregs
.dccr
& LEON3_XCCR_SETS_MASK
) >> 24;
256 /* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
257 ssize
= 1 << ((cregs
.dccr
& LEON3_XCCR_SSIZE_MASK
) >> 20);
259 printk(KERN_INFO
"CACHE: %s cache, set size %dk\n",
260 sets
> 3 ? "unknown" : setStr
[sets
], ssize
);
261 if ((ssize
<= (PAGE_SIZE
/ 1024)) && (sets
== 0)) {
262 /* Set Size <= Page size ==>
263 flush on every context switch not needed. */
265 printk(KERN_INFO
"CACHE: not flushing on every context switch\n");
270 void leon_switch_mm(void)
272 flush_tlb_mm((void *)0);
273 if (leon_flush_during_switch
)
274 leon_flush_cache_all();
277 static void leon_flush_cache_mm(struct mm_struct
*mm
)
279 leon_flush_cache_all();
282 static void leon_flush_cache_page(struct vm_area_struct
*vma
, unsigned long page
)
284 leon_flush_pcache_all(vma
, page
);
287 static void leon_flush_cache_range(struct vm_area_struct
*vma
,
291 leon_flush_cache_all();
294 static void leon_flush_tlb_mm(struct mm_struct
*mm
)
296 leon_flush_tlb_all();
299 static void leon_flush_tlb_page(struct vm_area_struct
*vma
,
302 leon_flush_tlb_all();
305 static void leon_flush_tlb_range(struct vm_area_struct
*vma
,
309 leon_flush_tlb_all();
312 static void leon_flush_page_to_ram(unsigned long page
)
314 leon_flush_cache_all();
317 static void leon_flush_sig_insns(struct mm_struct
*mm
, unsigned long page
)
319 leon_flush_cache_all();
322 static void leon_flush_page_for_dma(unsigned long page
)
324 leon_flush_dcache_all();
327 void __init
poke_leonsparc(void)
331 static const struct sparc32_cachetlb_ops leon_ops
= {
332 .cache_all
= leon_flush_cache_all
,
333 .cache_mm
= leon_flush_cache_mm
,
334 .cache_page
= leon_flush_cache_page
,
335 .cache_range
= leon_flush_cache_range
,
336 .tlb_all
= leon_flush_tlb_all
,
337 .tlb_mm
= leon_flush_tlb_mm
,
338 .tlb_page
= leon_flush_tlb_page
,
339 .tlb_range
= leon_flush_tlb_range
,
340 .page_to_ram
= leon_flush_page_to_ram
,
341 .sig_insns
= leon_flush_sig_insns
,
342 .page_for_dma
= leon_flush_page_for_dma
,
345 void __init
init_leon(void)
348 sparc32_cachetlb_ops
= &leon_ops
;
349 poke_srmmu
= poke_leonsparc
;
351 leon_flush_during_switch
= leon_flush_needed();