1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/module.h>
5 #include <linux/sched.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include <asm/cacheflush.h>
11 #include <asm/l2_cache.h>
12 #include <nds32_intrinsic.h>
14 #include <asm/cache_info.h>
15 extern struct cache_info L1_cache_info
[2];
17 int va_kernel_present(unsigned long addr
)
24 p4d
= p4d_offset(pgd_offset_k(addr
), addr
);
25 pud
= pud_offset(p4d
, addr
);
26 pmd
= pmd_offset(pud
, addr
);
27 if (!pmd_none(*pmd
)) {
28 ptep
= pte_offset_map(pmd
, addr
);
36 pte_t
va_present(struct mm_struct
* mm
, unsigned long addr
)
44 pgd
= pgd_offset(mm
, addr
);
45 if (!pgd_none(*pgd
)) {
46 p4d
= p4d_offset(pgd
, addr
);
47 if (!p4d_none(*p4d
)) {
48 pud
= pud_offset(p4d
, addr
);
49 if (!pud_none(*pud
)) {
50 pmd
= pmd_offset(pud
, addr
);
51 if (!pmd_none(*pmd
)) {
52 ptep
= pte_offset_map(pmd
, addr
);
64 int va_readable(struct pt_regs
*regs
, unsigned long addr
)
66 struct mm_struct
*mm
= current
->mm
;
70 if (user_mode(regs
)) {
72 pte
= va_present(mm
, addr
);
73 if (!pte
&& pte_read(pte
))
76 /* superuser mode is always readable, so we can only
77 * check it is present or not*/
78 return (! !va_kernel_present(addr
));
83 int va_writable(struct pt_regs
*regs
, unsigned long addr
)
85 struct mm_struct
*mm
= current
->mm
;
89 if (user_mode(regs
)) {
91 pte
= va_present(mm
, addr
);
92 if (!pte
&& pte_write(pte
))
96 pte
= va_kernel_present(addr
);
97 if (!pte
&& pte_kernel_write(pte
))
106 void cpu_icache_inval_all(void)
108 unsigned long end
, line_size
;
110 line_size
= L1_cache_info
[ICACHE
].line_size
;
112 line_size
* L1_cache_info
[ICACHE
].ways
* L1_cache_info
[ICACHE
].sets
;
116 __asm__
volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end
));
118 __asm__
volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end
));
120 __asm__
volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end
));
122 __asm__
volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end
));
127 void cpu_dcache_inval_all(void)
129 __nds32__cctl_l1d_invalall();
132 #ifdef CONFIG_CACHE_L2
133 void dcache_wb_all_level(void)
135 unsigned long flags
, cmd
;
136 local_irq_save(flags
);
137 __nds32__cctl_l1d_wball_alvl();
138 /* Section 1: Ensure the section 2 & 3 program code execution after */
139 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD
,0);
141 /* Section 2: Confirm the writeback all level is done in CPU and L2C */
142 cmd
= CCTL_CMD_L2_SYNC
;
144 L2C_W_REG(L2_CCTL_CMD_OFF
, cmd
);
147 /* Section 3: Writeback whole L2 cache */
148 cmd
= CCTL_ALL_CMD
| CCTL_CMD_L2_IX_WB
;
150 L2C_W_REG(L2_CCTL_CMD_OFF
, cmd
);
152 __nds32__msync_all();
153 local_irq_restore(flags
);
155 EXPORT_SYMBOL(dcache_wb_all_level
);
158 void cpu_dcache_wb_all(void)
160 __nds32__cctl_l1d_wball_one_lvl();
161 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD
,0);
164 void cpu_dcache_wbinval_all(void)
166 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
168 local_irq_save(flags
);
171 cpu_dcache_inval_all();
172 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
173 local_irq_restore(flags
);
180 void cpu_icache_inval_page(unsigned long start
)
182 unsigned long line_size
, end
;
184 line_size
= L1_cache_info
[ICACHE
].line_size
;
185 end
= start
+ PAGE_SIZE
;
189 __asm__
volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end
));
191 __asm__
volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end
));
193 __asm__
volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end
));
195 __asm__
volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end
));
196 } while (end
!= start
);
200 void cpu_dcache_inval_page(unsigned long start
)
202 unsigned long line_size
, end
;
204 line_size
= L1_cache_info
[DCACHE
].line_size
;
205 end
= start
+ PAGE_SIZE
;
209 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
211 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
213 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
215 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
216 } while (end
!= start
);
219 void cpu_dcache_wb_page(unsigned long start
)
221 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
222 unsigned long line_size
, end
;
224 line_size
= L1_cache_info
[DCACHE
].line_size
;
225 end
= start
+ PAGE_SIZE
;
229 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
231 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
233 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
235 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
236 } while (end
!= start
);
237 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD
,0);
241 void cpu_dcache_wbinval_page(unsigned long start
)
243 unsigned long line_size
, end
;
245 line_size
= L1_cache_info
[DCACHE
].line_size
;
246 end
= start
+ PAGE_SIZE
;
250 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
251 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
253 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
255 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
256 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
258 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
260 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
261 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
263 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
265 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
266 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end
));
268 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end
));
269 } while (end
!= start
);
270 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD
,0);
273 void cpu_cache_wbinval_page(unsigned long page
, int flushi
)
275 cpu_dcache_wbinval_page(page
);
277 cpu_icache_inval_page(page
);
283 void cpu_icache_inval_range(unsigned long start
, unsigned long end
)
285 unsigned long line_size
;
287 line_size
= L1_cache_info
[ICACHE
].line_size
;
289 while (end
> start
) {
290 __asm__
volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start
));
296 void cpu_dcache_inval_range(unsigned long start
, unsigned long end
)
298 unsigned long line_size
;
300 line_size
= L1_cache_info
[DCACHE
].line_size
;
302 while (end
> start
) {
303 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start
));
308 void cpu_dcache_wb_range(unsigned long start
, unsigned long end
)
310 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
311 unsigned long line_size
;
313 line_size
= L1_cache_info
[DCACHE
].line_size
;
315 while (end
> start
) {
316 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start
));
319 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD
,0);
323 void cpu_dcache_wbinval_range(unsigned long start
, unsigned long end
)
325 unsigned long line_size
;
327 line_size
= L1_cache_info
[DCACHE
].line_size
;
329 while (end
> start
) {
330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
331 __asm__
volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start
));
333 __asm__
volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start
));
336 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD
,0);
339 void cpu_cache_wbinval_range(unsigned long start
, unsigned long end
, int flushi
)
341 unsigned long line_size
, align_start
, align_end
;
343 line_size
= L1_cache_info
[DCACHE
].line_size
;
344 align_start
= start
& ~(line_size
- 1);
345 align_end
= (end
+ line_size
- 1) & ~(line_size
- 1);
346 cpu_dcache_wbinval_range(align_start
, align_end
);
349 line_size
= L1_cache_info
[ICACHE
].line_size
;
350 align_start
= start
& ~(line_size
- 1);
351 align_end
= (end
+ line_size
- 1) & ~(line_size
- 1);
352 cpu_icache_inval_range(align_start
, align_end
);
356 void cpu_cache_wbinval_range_check(struct vm_area_struct
*vma
,
357 unsigned long start
, unsigned long end
,
358 bool flushi
, bool wbd
)
360 unsigned long line_size
, t_start
, t_end
;
364 line_size
= L1_cache_info
[DCACHE
].line_size
;
365 start
= start
& ~(line_size
- 1);
366 end
= (end
+ line_size
- 1) & ~(line_size
- 1);
368 if ((end
- start
) > (8 * PAGE_SIZE
)) {
370 cpu_dcache_wbinval_all();
372 cpu_icache_inval_all();
376 t_start
= (start
+ PAGE_SIZE
) & PAGE_MASK
;
377 t_end
= ((end
- 1) & PAGE_MASK
);
379 if ((start
& PAGE_MASK
) == t_end
) {
380 if (va_present(vma
->vm_mm
, start
)) {
382 cpu_dcache_wbinval_range(start
, end
);
384 cpu_icache_inval_range(start
, end
);
389 if (va_present(vma
->vm_mm
, start
)) {
391 cpu_dcache_wbinval_range(start
, t_start
);
393 cpu_icache_inval_range(start
, t_start
);
396 if (va_present(vma
->vm_mm
, end
- 1)) {
398 cpu_dcache_wbinval_range(t_end
, end
);
400 cpu_icache_inval_range(t_end
, end
);
403 while (t_start
< t_end
) {
404 if (va_present(vma
->vm_mm
, t_start
)) {
406 cpu_dcache_wbinval_page(t_start
);
408 cpu_icache_inval_page(t_start
);
410 t_start
+= PAGE_SIZE
;
414 #ifdef CONFIG_CACHE_L2
415 static inline void cpu_l2cache_op(unsigned long start
, unsigned long end
, unsigned long op
)
418 unsigned long p_start
= __pa(start
);
419 unsigned long p_end
= __pa(end
);
421 unsigned long line_size
;
422 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
423 line_size
= L2_CACHE_LINE_SIZE();
424 p_start
= p_start
& (~(line_size
- 1));
425 p_end
= (p_end
+ line_size
- 1) & (~(line_size
- 1));
427 (p_start
& ~(line_size
- 1)) | op
|
431 L2C_W_REG(L2_CCTL_CMD_OFF
, cmd
);
433 p_start
+= line_size
;
434 } while (p_end
> p_start
);
435 cmd
= CCTL_CMD_L2_SYNC
;
437 L2C_W_REG(L2_CCTL_CMD_OFF
, cmd
);
442 #define cpu_l2cache_op(start,end,op) do { } while (0)
447 void cpu_dma_wb_range(unsigned long start
, unsigned long end
)
449 unsigned long line_size
;
451 line_size
= L1_cache_info
[DCACHE
].line_size
;
452 start
= start
& (~(line_size
- 1));
453 end
= (end
+ line_size
- 1) & (~(line_size
- 1));
454 if (unlikely(start
== end
))
457 local_irq_save(flags
);
458 cpu_dcache_wb_range(start
, end
);
459 cpu_l2cache_op(start
, end
, CCTL_CMD_L2_PA_WB
);
460 __nds32__msync_all();
461 local_irq_restore(flags
);
464 void cpu_dma_inval_range(unsigned long start
, unsigned long end
)
466 unsigned long line_size
;
467 unsigned long old_start
= start
;
468 unsigned long old_end
= end
;
470 line_size
= L1_cache_info
[DCACHE
].line_size
;
471 start
= start
& (~(line_size
- 1));
472 end
= (end
+ line_size
- 1) & (~(line_size
- 1));
473 if (unlikely(start
== end
))
475 local_irq_save(flags
);
476 if (start
!= old_start
) {
477 cpu_dcache_wbinval_range(start
, start
+ line_size
);
478 cpu_l2cache_op(start
, start
+ line_size
, CCTL_CMD_L2_PA_WBINVAL
);
480 if (end
!= old_end
) {
481 cpu_dcache_wbinval_range(end
- line_size
, end
);
482 cpu_l2cache_op(end
- line_size
, end
, CCTL_CMD_L2_PA_WBINVAL
);
484 cpu_dcache_inval_range(start
, end
);
485 cpu_l2cache_op(start
, end
, CCTL_CMD_L2_PA_INVAL
);
486 __nds32__msync_all();
487 local_irq_restore(flags
);
491 void cpu_dma_wbinval_range(unsigned long start
, unsigned long end
)
493 unsigned long line_size
;
495 line_size
= L1_cache_info
[DCACHE
].line_size
;
496 start
= start
& (~(line_size
- 1));
497 end
= (end
+ line_size
- 1) & (~(line_size
- 1));
498 if (unlikely(start
== end
))
501 local_irq_save(flags
);
502 cpu_dcache_wbinval_range(start
, end
);
503 cpu_l2cache_op(start
, end
, CCTL_CMD_L2_PA_WBINVAL
);
504 __nds32__msync_all();
505 local_irq_restore(flags
);
508 void cpu_proc_init(void)
512 void cpu_proc_fin(void)
516 void cpu_do_idle(void)
518 __nds32__standby_no_wake_grant();
521 void cpu_reset(unsigned long reset
)
525 tmp
= __nds32__mfsr(NDS32_SR_CACHE_CTL
);
526 tmp
&= ~(CACHE_CTL_mskIC_EN
| CACHE_CTL_mskDC_EN
);
527 __nds32__mtsr_isb(tmp
, NDS32_SR_CACHE_CTL
);
528 cpu_dcache_wbinval_all();
529 cpu_icache_inval_all();
531 __asm__
__volatile__("jr.toff %0\n\t"::"r"(reset
));
534 void cpu_switch_mm(struct mm_struct
*mm
)
537 cid
= __nds32__mfsr(NDS32_SR_TLB_MISC
);
538 cid
= (cid
& ~TLB_MISC_mskCID
) | mm
->context
.id
;
539 __nds32__mtsr_dsb(cid
, NDS32_SR_TLB_MISC
);
540 __nds32__mtsr_isb(__pa(mm
->pgd
), NDS32_SR_L1_PPTB
);