treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / nds32 / mm / proc.c
blob837ae7728830cced2ddd0c9692d51e9b4dca6602
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/mm.h>
7 #include <asm/nds32.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include <asm/cacheflush.h>
11 #include <asm/l2_cache.h>
12 #include <nds32_intrinsic.h>
14 #include <asm/cache_info.h>
15 extern struct cache_info L1_cache_info[2];
17 int va_kernel_present(unsigned long addr)
19 p4d_t *p4d;
20 pud_t *pud;
21 pmd_t *pmd;
22 pte_t *ptep, pte;
24 p4d = p4d_offset(pgd_offset_k(addr), addr);
25 pud = pud_offset(p4d, addr);
26 pmd = pmd_offset(pud, addr);
27 if (!pmd_none(*pmd)) {
28 ptep = pte_offset_map(pmd, addr);
29 pte = *ptep;
30 if (pte_present(pte))
31 return pte;
33 return 0;
36 pte_t va_present(struct mm_struct * mm, unsigned long addr)
38 pgd_t *pgd;
39 p4d_t *p4d;
40 pud_t *pud;
41 pmd_t *pmd;
42 pte_t *ptep, pte;
44 pgd = pgd_offset(mm, addr);
45 if (!pgd_none(*pgd)) {
46 p4d = p4d_offset(pgd, addr);
47 if (!p4d_none(*p4d)) {
48 pud = pud_offset(p4d, addr);
49 if (!pud_none(*pud)) {
50 pmd = pmd_offset(pud, addr);
51 if (!pmd_none(*pmd)) {
52 ptep = pte_offset_map(pmd, addr);
53 pte = *ptep;
54 if (pte_present(pte))
55 return pte;
60 return 0;
64 int va_readable(struct pt_regs *regs, unsigned long addr)
66 struct mm_struct *mm = current->mm;
67 pte_t pte;
68 int ret = 0;
70 if (user_mode(regs)) {
71 /* user mode */
72 pte = va_present(mm, addr);
73 if (!pte && pte_read(pte))
74 ret = 1;
75 } else {
76 /* superuser mode is always readable, so we can only
77 * check it is present or not*/
78 return (! !va_kernel_present(addr));
80 return ret;
83 int va_writable(struct pt_regs *regs, unsigned long addr)
85 struct mm_struct *mm = current->mm;
86 pte_t pte;
87 int ret = 0;
89 if (user_mode(regs)) {
90 /* user mode */
91 pte = va_present(mm, addr);
92 if (!pte && pte_write(pte))
93 ret = 1;
94 } else {
95 /* superuser mode */
96 pte = va_kernel_present(addr);
97 if (!pte && pte_kernel_write(pte))
98 ret = 1;
100 return ret;
104 * All
106 void cpu_icache_inval_all(void)
108 unsigned long end, line_size;
110 line_size = L1_cache_info[ICACHE].line_size;
111 end =
112 line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
114 do {
115 end -= line_size;
116 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
117 end -= line_size;
118 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
119 end -= line_size;
120 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
121 end -= line_size;
122 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
123 } while (end > 0);
124 __nds32__isb();
127 void cpu_dcache_inval_all(void)
129 __nds32__cctl_l1d_invalall();
132 #ifdef CONFIG_CACHE_L2
133 void dcache_wb_all_level(void)
135 unsigned long flags, cmd;
136 local_irq_save(flags);
137 __nds32__cctl_l1d_wball_alvl();
138 /* Section 1: Ensure the section 2 & 3 program code execution after */
139 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
141 /* Section 2: Confirm the writeback all level is done in CPU and L2C */
142 cmd = CCTL_CMD_L2_SYNC;
143 L2_CMD_RDY();
144 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
145 L2_CMD_RDY();
147 /* Section 3: Writeback whole L2 cache */
148 cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
149 L2_CMD_RDY();
150 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
151 L2_CMD_RDY();
152 __nds32__msync_all();
153 local_irq_restore(flags);
155 EXPORT_SYMBOL(dcache_wb_all_level);
156 #endif
158 void cpu_dcache_wb_all(void)
160 __nds32__cctl_l1d_wball_one_lvl();
161 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
164 void cpu_dcache_wbinval_all(void)
166 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
167 unsigned long flags;
168 local_irq_save(flags);
169 #endif
170 cpu_dcache_wb_all();
171 cpu_dcache_inval_all();
172 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
173 local_irq_restore(flags);
174 #endif
178 * Page
180 void cpu_icache_inval_page(unsigned long start)
182 unsigned long line_size, end;
184 line_size = L1_cache_info[ICACHE].line_size;
185 end = start + PAGE_SIZE;
187 do {
188 end -= line_size;
189 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
190 end -= line_size;
191 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
192 end -= line_size;
193 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
194 end -= line_size;
195 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
196 } while (end != start);
197 __nds32__isb();
200 void cpu_dcache_inval_page(unsigned long start)
202 unsigned long line_size, end;
204 line_size = L1_cache_info[DCACHE].line_size;
205 end = start + PAGE_SIZE;
207 do {
208 end -= line_size;
209 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
210 end -= line_size;
211 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
212 end -= line_size;
213 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
214 end -= line_size;
215 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
216 } while (end != start);
219 void cpu_dcache_wb_page(unsigned long start)
221 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
222 unsigned long line_size, end;
224 line_size = L1_cache_info[DCACHE].line_size;
225 end = start + PAGE_SIZE;
227 do {
228 end -= line_size;
229 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
230 end -= line_size;
231 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
232 end -= line_size;
233 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
234 end -= line_size;
235 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
236 } while (end != start);
237 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
238 #endif
241 void cpu_dcache_wbinval_page(unsigned long start)
243 unsigned long line_size, end;
245 line_size = L1_cache_info[DCACHE].line_size;
246 end = start + PAGE_SIZE;
248 do {
249 end -= line_size;
250 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
251 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
252 #endif
253 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
254 end -= line_size;
255 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
256 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
257 #endif
258 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
259 end -= line_size;
260 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
261 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
262 #endif
263 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
264 end -= line_size;
265 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
266 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
267 #endif
268 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
269 } while (end != start);
270 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
273 void cpu_cache_wbinval_page(unsigned long page, int flushi)
275 cpu_dcache_wbinval_page(page);
276 if (flushi)
277 cpu_icache_inval_page(page);
281 * Range
283 void cpu_icache_inval_range(unsigned long start, unsigned long end)
285 unsigned long line_size;
287 line_size = L1_cache_info[ICACHE].line_size;
289 while (end > start) {
290 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
291 start += line_size;
293 __nds32__isb();
296 void cpu_dcache_inval_range(unsigned long start, unsigned long end)
298 unsigned long line_size;
300 line_size = L1_cache_info[DCACHE].line_size;
302 while (end > start) {
303 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
304 start += line_size;
308 void cpu_dcache_wb_range(unsigned long start, unsigned long end)
310 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
311 unsigned long line_size;
313 line_size = L1_cache_info[DCACHE].line_size;
315 while (end > start) {
316 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
317 start += line_size;
319 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
320 #endif
323 void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
325 unsigned long line_size;
327 line_size = L1_cache_info[DCACHE].line_size;
329 while (end > start) {
330 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
331 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
332 #endif
333 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
334 start += line_size;
336 __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
339 void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
341 unsigned long line_size, align_start, align_end;
343 line_size = L1_cache_info[DCACHE].line_size;
344 align_start = start & ~(line_size - 1);
345 align_end = (end + line_size - 1) & ~(line_size - 1);
346 cpu_dcache_wbinval_range(align_start, align_end);
348 if (flushi) {
349 line_size = L1_cache_info[ICACHE].line_size;
350 align_start = start & ~(line_size - 1);
351 align_end = (end + line_size - 1) & ~(line_size - 1);
352 cpu_icache_inval_range(align_start, align_end);
356 void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
357 unsigned long start, unsigned long end,
358 bool flushi, bool wbd)
360 unsigned long line_size, t_start, t_end;
362 if (!flushi && !wbd)
363 return;
364 line_size = L1_cache_info[DCACHE].line_size;
365 start = start & ~(line_size - 1);
366 end = (end + line_size - 1) & ~(line_size - 1);
368 if ((end - start) > (8 * PAGE_SIZE)) {
369 if (wbd)
370 cpu_dcache_wbinval_all();
371 if (flushi)
372 cpu_icache_inval_all();
373 return;
376 t_start = (start + PAGE_SIZE) & PAGE_MASK;
377 t_end = ((end - 1) & PAGE_MASK);
379 if ((start & PAGE_MASK) == t_end) {
380 if (va_present(vma->vm_mm, start)) {
381 if (wbd)
382 cpu_dcache_wbinval_range(start, end);
383 if (flushi)
384 cpu_icache_inval_range(start, end);
386 return;
389 if (va_present(vma->vm_mm, start)) {
390 if (wbd)
391 cpu_dcache_wbinval_range(start, t_start);
392 if (flushi)
393 cpu_icache_inval_range(start, t_start);
396 if (va_present(vma->vm_mm, end - 1)) {
397 if (wbd)
398 cpu_dcache_wbinval_range(t_end, end);
399 if (flushi)
400 cpu_icache_inval_range(t_end, end);
403 while (t_start < t_end) {
404 if (va_present(vma->vm_mm, t_start)) {
405 if (wbd)
406 cpu_dcache_wbinval_page(t_start);
407 if (flushi)
408 cpu_icache_inval_page(t_start);
410 t_start += PAGE_SIZE;
414 #ifdef CONFIG_CACHE_L2
415 static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
417 if (atl2c_base) {
418 unsigned long p_start = __pa(start);
419 unsigned long p_end = __pa(end);
420 unsigned long cmd;
421 unsigned long line_size;
422 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
423 line_size = L2_CACHE_LINE_SIZE();
424 p_start = p_start & (~(line_size - 1));
425 p_end = (p_end + line_size - 1) & (~(line_size - 1));
426 cmd =
427 (p_start & ~(line_size - 1)) | op |
428 CCTL_SINGLE_CMD;
429 do {
430 L2_CMD_RDY();
431 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
432 cmd += line_size;
433 p_start += line_size;
434 } while (p_end > p_start);
435 cmd = CCTL_CMD_L2_SYNC;
436 L2_CMD_RDY();
437 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
438 L2_CMD_RDY();
441 #else
442 #define cpu_l2cache_op(start,end,op) do { } while (0)
443 #endif
445 * DMA
447 void cpu_dma_wb_range(unsigned long start, unsigned long end)
449 unsigned long line_size;
450 unsigned long flags;
451 line_size = L1_cache_info[DCACHE].line_size;
452 start = start & (~(line_size - 1));
453 end = (end + line_size - 1) & (~(line_size - 1));
454 if (unlikely(start == end))
455 return;
457 local_irq_save(flags);
458 cpu_dcache_wb_range(start, end);
459 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
460 __nds32__msync_all();
461 local_irq_restore(flags);
464 void cpu_dma_inval_range(unsigned long start, unsigned long end)
466 unsigned long line_size;
467 unsigned long old_start = start;
468 unsigned long old_end = end;
469 unsigned long flags;
470 line_size = L1_cache_info[DCACHE].line_size;
471 start = start & (~(line_size - 1));
472 end = (end + line_size - 1) & (~(line_size - 1));
473 if (unlikely(start == end))
474 return;
475 local_irq_save(flags);
476 if (start != old_start) {
477 cpu_dcache_wbinval_range(start, start + line_size);
478 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
480 if (end != old_end) {
481 cpu_dcache_wbinval_range(end - line_size, end);
482 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
484 cpu_dcache_inval_range(start, end);
485 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
486 __nds32__msync_all();
487 local_irq_restore(flags);
491 void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
493 unsigned long line_size;
494 unsigned long flags;
495 line_size = L1_cache_info[DCACHE].line_size;
496 start = start & (~(line_size - 1));
497 end = (end + line_size - 1) & (~(line_size - 1));
498 if (unlikely(start == end))
499 return;
501 local_irq_save(flags);
502 cpu_dcache_wbinval_range(start, end);
503 cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
504 __nds32__msync_all();
505 local_irq_restore(flags);
508 void cpu_proc_init(void)
512 void cpu_proc_fin(void)
516 void cpu_do_idle(void)
518 __nds32__standby_no_wake_grant();
521 void cpu_reset(unsigned long reset)
523 u32 tmp;
524 GIE_DISABLE();
525 tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
526 tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
527 __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
528 cpu_dcache_wbinval_all();
529 cpu_icache_inval_all();
531 __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
534 void cpu_switch_mm(struct mm_struct *mm)
536 unsigned long cid;
537 cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
538 cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
539 __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
540 __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);