treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sh / mm / cache.c
blob464f160a9576a0bc0b019757b604ed87a41d6083
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * arch/sh/mm/cache.c
5 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 */
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/mutex.h>
11 #include <linux/fs.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
18 void (*local_flush_cache_all)(void *args) = cache_noop;
19 void (*local_flush_cache_mm)(void *args) = cache_noop;
20 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_page)(void *args) = cache_noop;
22 void (*local_flush_cache_range)(void *args) = cache_noop;
23 void (*local_flush_dcache_page)(void *args) = cache_noop;
24 void (*local_flush_icache_range)(void *args) = cache_noop;
25 void (*local_flush_icache_page)(void *args) = cache_noop;
26 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28 void (*__flush_wback_region)(void *start, int size);
29 EXPORT_SYMBOL(__flush_wback_region);
30 void (*__flush_purge_region)(void *start, int size);
31 EXPORT_SYMBOL(__flush_purge_region);
32 void (*__flush_invalidate_region)(void *start, int size);
33 EXPORT_SYMBOL(__flush_invalidate_region);
35 static inline void noop__flush_region(void *start, int size)
39 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
40 int wait)
42 preempt_disable();
44 /* Needing IPI for cross-core flush is SHX3-specific. */
45 #ifdef CONFIG_CPU_SHX3
47 * It's possible that this gets called early on when IRQs are
48 * still disabled due to ioremapping by the boot CPU, so don't
49 * even attempt IPIs unless there are other CPUs online.
51 if (num_online_cpus() > 1)
52 smp_call_function(func, info, wait);
53 #endif
55 func(info);
57 preempt_enable();
60 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
61 unsigned long vaddr, void *dst, const void *src,
62 unsigned long len)
64 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
65 test_bit(PG_dcache_clean, &page->flags)) {
66 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
67 memcpy(vto, src, len);
68 kunmap_coherent(vto);
69 } else {
70 memcpy(dst, src, len);
71 if (boot_cpu_data.dcache.n_aliases)
72 clear_bit(PG_dcache_clean, &page->flags);
75 if (vma->vm_flags & VM_EXEC)
76 flush_cache_page(vma, vaddr, page_to_pfn(page));
79 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
80 unsigned long vaddr, void *dst, const void *src,
81 unsigned long len)
83 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
84 test_bit(PG_dcache_clean, &page->flags)) {
85 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
86 memcpy(dst, vfrom, len);
87 kunmap_coherent(vfrom);
88 } else {
89 memcpy(dst, src, len);
90 if (boot_cpu_data.dcache.n_aliases)
91 clear_bit(PG_dcache_clean, &page->flags);
95 void copy_user_highpage(struct page *to, struct page *from,
96 unsigned long vaddr, struct vm_area_struct *vma)
98 void *vfrom, *vto;
100 vto = kmap_atomic(to);
102 if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) &&
103 test_bit(PG_dcache_clean, &from->flags)) {
104 vfrom = kmap_coherent(from, vaddr);
105 copy_page(vto, vfrom);
106 kunmap_coherent(vfrom);
107 } else {
108 vfrom = kmap_atomic(from);
109 copy_page(vto, vfrom);
110 kunmap_atomic(vfrom);
113 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
114 (vma->vm_flags & VM_EXEC))
115 __flush_purge_region(vto, PAGE_SIZE);
117 kunmap_atomic(vto);
118 /* Make sure this page is cleared on other CPU's too before using it */
119 smp_wmb();
121 EXPORT_SYMBOL(copy_user_highpage);
123 void clear_user_highpage(struct page *page, unsigned long vaddr)
125 void *kaddr = kmap_atomic(page);
127 clear_page(kaddr);
129 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
130 __flush_purge_region(kaddr, PAGE_SIZE);
132 kunmap_atomic(kaddr);
134 EXPORT_SYMBOL(clear_user_highpage);
136 void __update_cache(struct vm_area_struct *vma,
137 unsigned long address, pte_t pte)
139 struct page *page;
140 unsigned long pfn = pte_pfn(pte);
142 if (!boot_cpu_data.dcache.n_aliases)
143 return;
145 page = pfn_to_page(pfn);
146 if (pfn_valid(pfn)) {
147 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
148 if (dirty)
149 __flush_purge_region(page_address(page), PAGE_SIZE);
153 void __flush_anon_page(struct page *page, unsigned long vmaddr)
155 unsigned long addr = (unsigned long) page_address(page);
157 if (pages_do_alias(addr, vmaddr)) {
158 if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) &&
159 test_bit(PG_dcache_clean, &page->flags)) {
160 void *kaddr;
162 kaddr = kmap_coherent(page, vmaddr);
163 /* XXX.. For now kunmap_coherent() does a purge */
164 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
165 kunmap_coherent(kaddr);
166 } else
167 __flush_purge_region((void *)addr, PAGE_SIZE);
171 void flush_cache_all(void)
173 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
175 EXPORT_SYMBOL(flush_cache_all);
177 void flush_cache_mm(struct mm_struct *mm)
179 if (boot_cpu_data.dcache.n_aliases == 0)
180 return;
182 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
185 void flush_cache_dup_mm(struct mm_struct *mm)
187 if (boot_cpu_data.dcache.n_aliases == 0)
188 return;
190 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
193 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
194 unsigned long pfn)
196 struct flusher_data data;
198 data.vma = vma;
199 data.addr1 = addr;
200 data.addr2 = pfn;
202 cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
205 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
206 unsigned long end)
208 struct flusher_data data;
210 data.vma = vma;
211 data.addr1 = start;
212 data.addr2 = end;
214 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
216 EXPORT_SYMBOL(flush_cache_range);
218 void flush_dcache_page(struct page *page)
220 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
222 EXPORT_SYMBOL(flush_dcache_page);
224 void flush_icache_range(unsigned long start, unsigned long end)
226 struct flusher_data data;
228 data.vma = NULL;
229 data.addr1 = start;
230 data.addr2 = end;
232 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
234 EXPORT_SYMBOL(flush_icache_range);
236 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
238 /* Nothing uses the VMA, so just pass the struct page along */
239 cacheop_on_each_cpu(local_flush_icache_page, page, 1);
242 void flush_cache_sigtramp(unsigned long address)
244 cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
247 static void compute_alias(struct cache_info *c)
249 #ifdef CONFIG_MMU
250 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
251 #else
252 c->alias_mask = 0;
253 #endif
254 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
257 static void __init emit_cache_params(void)
259 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
260 boot_cpu_data.icache.ways,
261 boot_cpu_data.icache.sets,
262 boot_cpu_data.icache.way_incr);
263 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
264 boot_cpu_data.icache.entry_mask,
265 boot_cpu_data.icache.alias_mask,
266 boot_cpu_data.icache.n_aliases);
267 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
268 boot_cpu_data.dcache.ways,
269 boot_cpu_data.dcache.sets,
270 boot_cpu_data.dcache.way_incr);
271 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
272 boot_cpu_data.dcache.entry_mask,
273 boot_cpu_data.dcache.alias_mask,
274 boot_cpu_data.dcache.n_aliases);
277 * Emit Secondary Cache parameters if the CPU has a probed L2.
279 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
280 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
281 boot_cpu_data.scache.ways,
282 boot_cpu_data.scache.sets,
283 boot_cpu_data.scache.way_incr);
284 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
285 boot_cpu_data.scache.entry_mask,
286 boot_cpu_data.scache.alias_mask,
287 boot_cpu_data.scache.n_aliases);
291 void __init cpu_cache_init(void)
293 unsigned int cache_disabled = 0;
295 #ifdef SH_CCR
296 cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
297 #endif
299 compute_alias(&boot_cpu_data.icache);
300 compute_alias(&boot_cpu_data.dcache);
301 compute_alias(&boot_cpu_data.scache);
303 __flush_wback_region = noop__flush_region;
304 __flush_purge_region = noop__flush_region;
305 __flush_invalidate_region = noop__flush_region;
308 * No flushing is necessary in the disabled cache case so we can
309 * just keep the noop functions in local_flush_..() and __flush_..()
311 if (unlikely(cache_disabled))
312 goto skip;
314 if (boot_cpu_data.type == CPU_J2) {
315 extern void __weak j2_cache_init(void);
317 j2_cache_init();
318 } else if (boot_cpu_data.family == CPU_FAMILY_SH2) {
319 extern void __weak sh2_cache_init(void);
321 sh2_cache_init();
324 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
325 extern void __weak sh2a_cache_init(void);
327 sh2a_cache_init();
330 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
331 extern void __weak sh3_cache_init(void);
333 sh3_cache_init();
335 if ((boot_cpu_data.type == CPU_SH7705) &&
336 (boot_cpu_data.dcache.sets == 512)) {
337 extern void __weak sh7705_cache_init(void);
339 sh7705_cache_init();
343 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
344 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
345 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
346 extern void __weak sh4_cache_init(void);
348 sh4_cache_init();
350 if ((boot_cpu_data.type == CPU_SH7786) ||
351 (boot_cpu_data.type == CPU_SHX3)) {
352 extern void __weak shx3_cache_init(void);
354 shx3_cache_init();
358 if (boot_cpu_data.family == CPU_FAMILY_SH5) {
359 extern void __weak sh5_cache_init(void);
361 sh5_cache_init();
364 skip:
365 emit_cache_params();