treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / unicore32 / mm / flush.c
blob65954f8d89a298de08d616c9d5d2feb602f0da23
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/arch/unicore32/mm/flush.c
5 * Code specific to PKUnity SoC and UniCore ISA
7 * Copyright (C) 2001-2010 GUAN Xue-tao
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
13 #include <asm/cacheflush.h>
14 #include <asm/tlbflush.h>
16 void flush_cache_mm(struct mm_struct *mm)
20 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end)
23 if (vma->vm_flags & VM_EXEC)
24 __flush_icache_all();
27 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
28 unsigned long pfn)
32 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
33 unsigned long uaddr, void *kaddr, unsigned long len)
35 /* VIPT non-aliasing D-cache */
36 if (vma->vm_flags & VM_EXEC) {
37 unsigned long addr = (unsigned long)kaddr;
39 __cpuc_coherent_kern_range(addr, addr + len);
44 * Copy user data from/to a page which is mapped into a different
45 * processes address space. Really, we want to allow our "user
46 * space" model to handle this.
48 * Note that this code needs to run on the current CPU.
50 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
51 unsigned long uaddr, void *dst, const void *src,
52 unsigned long len)
54 memcpy(dst, src, len);
55 flush_ptrace_access(vma, page, uaddr, dst, len);
58 void __flush_dcache_page(struct address_space *mapping, struct page *page)
61 * Writeback any data associated with the kernel mapping of this
62 * page. This ensures that data in the physical page is mutually
63 * coherent with the kernels mapping.
65 __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
69 * Ensure cache coherency between kernel mapping and userspace mapping
70 * of this page.
72 void flush_dcache_page(struct page *page)
74 struct address_space *mapping;
77 * The zero page is never written to, so never has any dirty
78 * cache lines, and therefore never needs to be flushed.
80 if (page == ZERO_PAGE(0))
81 return;
83 mapping = page_mapping_file(page);
85 if (mapping && !mapping_mapped(mapping))
86 clear_bit(PG_dcache_clean, &page->flags);
87 else {
88 __flush_dcache_page(mapping, page);
89 if (mapping)
90 __flush_icache_all();
91 set_bit(PG_dcache_clean, &page->flags);
94 EXPORT_SYMBOL(flush_dcache_page);