1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
6 * Copyright (C) 2002 - 2010 Paul Mundt
9 #include <linux/init.h>
10 #include <linux/mutex.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
18 void (*local_flush_cache_all
)(void *args
) = cache_noop
;
19 void (*local_flush_cache_mm
)(void *args
) = cache_noop
;
20 void (*local_flush_cache_dup_mm
)(void *args
) = cache_noop
;
21 void (*local_flush_cache_page
)(void *args
) = cache_noop
;
22 void (*local_flush_cache_range
)(void *args
) = cache_noop
;
23 void (*local_flush_dcache_folio
)(void *args
) = cache_noop
;
24 void (*local_flush_icache_range
)(void *args
) = cache_noop
;
25 void (*local_flush_icache_folio
)(void *args
) = cache_noop
;
26 void (*local_flush_cache_sigtramp
)(void *args
) = cache_noop
;
28 void (*__flush_wback_region
)(void *start
, int size
);
29 EXPORT_SYMBOL(__flush_wback_region
);
30 void (*__flush_purge_region
)(void *start
, int size
);
31 EXPORT_SYMBOL(__flush_purge_region
);
32 void (*__flush_invalidate_region
)(void *start
, int size
);
33 EXPORT_SYMBOL(__flush_invalidate_region
);
35 static inline void noop__flush_region(void *start
, int size
)
39 static inline void cacheop_on_each_cpu(void (*func
) (void *info
), void *info
,
44 /* Needing IPI for cross-core flush is SHX3-specific. */
45 #ifdef CONFIG_CPU_SHX3
47 * It's possible that this gets called early on when IRQs are
48 * still disabled due to ioremapping by the boot CPU, so don't
49 * even attempt IPIs unless there are other CPUs online.
51 if (num_online_cpus() > 1)
52 smp_call_function(func
, info
, wait
);
60 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
61 unsigned long vaddr
, void *dst
, const void *src
,
64 struct folio
*folio
= page_folio(page
);
66 if (boot_cpu_data
.dcache
.n_aliases
&& folio_mapped(folio
) &&
67 test_bit(PG_dcache_clean
, &folio
->flags
)) {
68 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
69 memcpy(vto
, src
, len
);
72 memcpy(dst
, src
, len
);
73 if (boot_cpu_data
.dcache
.n_aliases
)
74 clear_bit(PG_dcache_clean
, &folio
->flags
);
77 if (vma
->vm_flags
& VM_EXEC
)
78 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
81 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
82 unsigned long vaddr
, void *dst
, const void *src
,
85 struct folio
*folio
= page_folio(page
);
87 if (boot_cpu_data
.dcache
.n_aliases
&& folio_mapped(folio
) &&
88 test_bit(PG_dcache_clean
, &folio
->flags
)) {
89 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
90 memcpy(dst
, vfrom
, len
);
91 kunmap_coherent(vfrom
);
93 memcpy(dst
, src
, len
);
94 if (boot_cpu_data
.dcache
.n_aliases
)
95 clear_bit(PG_dcache_clean
, &folio
->flags
);
99 void copy_user_highpage(struct page
*to
, struct page
*from
,
100 unsigned long vaddr
, struct vm_area_struct
*vma
)
102 struct folio
*src
= page_folio(from
);
105 vto
= kmap_atomic(to
);
107 if (boot_cpu_data
.dcache
.n_aliases
&& folio_mapped(src
) &&
108 test_bit(PG_dcache_clean
, &src
->flags
)) {
109 vfrom
= kmap_coherent(from
, vaddr
);
110 copy_page(vto
, vfrom
);
111 kunmap_coherent(vfrom
);
113 vfrom
= kmap_atomic(from
);
114 copy_page(vto
, vfrom
);
115 kunmap_atomic(vfrom
);
118 if (pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
) ||
119 (vma
->vm_flags
& VM_EXEC
))
120 __flush_purge_region(vto
, PAGE_SIZE
);
123 /* Make sure this page is cleared on other CPU's too before using it */
126 EXPORT_SYMBOL(copy_user_highpage
);
128 void clear_user_highpage(struct page
*page
, unsigned long vaddr
)
130 void *kaddr
= kmap_atomic(page
);
134 if (pages_do_alias((unsigned long)kaddr
, vaddr
& PAGE_MASK
))
135 __flush_purge_region(kaddr
, PAGE_SIZE
);
137 kunmap_atomic(kaddr
);
139 EXPORT_SYMBOL(clear_user_highpage
);
141 void __update_cache(struct vm_area_struct
*vma
,
142 unsigned long address
, pte_t pte
)
144 unsigned long pfn
= pte_pfn(pte
);
146 if (!boot_cpu_data
.dcache
.n_aliases
)
149 if (pfn_valid(pfn
)) {
150 struct folio
*folio
= page_folio(pfn_to_page(pfn
));
151 int dirty
= !test_and_set_bit(PG_dcache_clean
, &folio
->flags
);
153 __flush_purge_region(folio_address(folio
),
158 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
160 struct folio
*folio
= page_folio(page
);
161 unsigned long addr
= (unsigned long) page_address(page
);
163 if (pages_do_alias(addr
, vmaddr
)) {
164 if (boot_cpu_data
.dcache
.n_aliases
&& folio_mapped(folio
) &&
165 test_bit(PG_dcache_clean
, &folio
->flags
)) {
168 kaddr
= kmap_coherent(page
, vmaddr
);
169 /* XXX.. For now kunmap_coherent() does a purge */
170 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
171 kunmap_coherent(kaddr
);
173 __flush_purge_region(folio_address(folio
),
178 void flush_cache_all(void)
180 cacheop_on_each_cpu(local_flush_cache_all
, NULL
, 1);
182 EXPORT_SYMBOL(flush_cache_all
);
184 void flush_cache_mm(struct mm_struct
*mm
)
186 if (boot_cpu_data
.dcache
.n_aliases
== 0)
189 cacheop_on_each_cpu(local_flush_cache_mm
, mm
, 1);
192 void flush_cache_dup_mm(struct mm_struct
*mm
)
194 if (boot_cpu_data
.dcache
.n_aliases
== 0)
197 cacheop_on_each_cpu(local_flush_cache_dup_mm
, mm
, 1);
200 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long addr
,
203 struct flusher_data data
;
209 cacheop_on_each_cpu(local_flush_cache_page
, (void *)&data
, 1);
212 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
215 struct flusher_data data
;
221 cacheop_on_each_cpu(local_flush_cache_range
, (void *)&data
, 1);
223 EXPORT_SYMBOL(flush_cache_range
);
225 void flush_dcache_folio(struct folio
*folio
)
227 cacheop_on_each_cpu(local_flush_dcache_folio
, folio
, 1);
229 EXPORT_SYMBOL(flush_dcache_folio
);
231 void flush_icache_range(unsigned long start
, unsigned long end
)
233 struct flusher_data data
;
239 cacheop_on_each_cpu(local_flush_icache_range
, (void *)&data
, 1);
241 EXPORT_SYMBOL(flush_icache_range
);
243 void flush_icache_pages(struct vm_area_struct
*vma
, struct page
*page
,
246 /* Nothing uses the VMA, so just pass the folio along */
247 cacheop_on_each_cpu(local_flush_icache_folio
, page_folio(page
), 1);
250 void flush_cache_sigtramp(unsigned long address
)
252 cacheop_on_each_cpu(local_flush_cache_sigtramp
, (void *)address
, 1);
255 static void compute_alias(struct cache_info
*c
)
258 c
->alias_mask
= ((c
->sets
- 1) << c
->entry_shift
) & ~(PAGE_SIZE
- 1);
262 c
->n_aliases
= c
->alias_mask
? (c
->alias_mask
>> PAGE_SHIFT
) + 1 : 0;
265 static void __init
emit_cache_params(void)
267 printk(KERN_NOTICE
"I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
268 boot_cpu_data
.icache
.ways
,
269 boot_cpu_data
.icache
.sets
,
270 boot_cpu_data
.icache
.way_incr
);
271 printk(KERN_NOTICE
"I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
272 boot_cpu_data
.icache
.entry_mask
,
273 boot_cpu_data
.icache
.alias_mask
,
274 boot_cpu_data
.icache
.n_aliases
);
275 printk(KERN_NOTICE
"D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
276 boot_cpu_data
.dcache
.ways
,
277 boot_cpu_data
.dcache
.sets
,
278 boot_cpu_data
.dcache
.way_incr
);
279 printk(KERN_NOTICE
"D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
280 boot_cpu_data
.dcache
.entry_mask
,
281 boot_cpu_data
.dcache
.alias_mask
,
282 boot_cpu_data
.dcache
.n_aliases
);
285 * Emit Secondary Cache parameters if the CPU has a probed L2.
287 if (boot_cpu_data
.flags
& CPU_HAS_L2_CACHE
) {
288 printk(KERN_NOTICE
"S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
289 boot_cpu_data
.scache
.ways
,
290 boot_cpu_data
.scache
.sets
,
291 boot_cpu_data
.scache
.way_incr
);
292 printk(KERN_NOTICE
"S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
293 boot_cpu_data
.scache
.entry_mask
,
294 boot_cpu_data
.scache
.alias_mask
,
295 boot_cpu_data
.scache
.n_aliases
);
299 void __init
cpu_cache_init(void)
301 unsigned int cache_disabled
= 0;
304 cache_disabled
= !(__raw_readl(SH_CCR
) & CCR_CACHE_ENABLE
);
307 compute_alias(&boot_cpu_data
.icache
);
308 compute_alias(&boot_cpu_data
.dcache
);
309 compute_alias(&boot_cpu_data
.scache
);
311 __flush_wback_region
= noop__flush_region
;
312 __flush_purge_region
= noop__flush_region
;
313 __flush_invalidate_region
= noop__flush_region
;
316 * No flushing is necessary in the disabled cache case so we can
317 * just keep the noop functions in local_flush_..() and __flush_..()
319 if (unlikely(cache_disabled
))
322 if (boot_cpu_data
.type
== CPU_J2
) {
324 } else if (boot_cpu_data
.family
== CPU_FAMILY_SH2
) {
328 if (boot_cpu_data
.family
== CPU_FAMILY_SH2A
) {
332 if (boot_cpu_data
.family
== CPU_FAMILY_SH3
) {
335 if ((boot_cpu_data
.type
== CPU_SH7705
) &&
336 (boot_cpu_data
.dcache
.sets
== 512)) {
341 if ((boot_cpu_data
.family
== CPU_FAMILY_SH4
) ||
342 (boot_cpu_data
.family
== CPU_FAMILY_SH4A
) ||
343 (boot_cpu_data
.family
== CPU_FAMILY_SH4AL_DSP
)) {
346 if ((boot_cpu_data
.type
== CPU_SH7786
) ||
347 (boot_cpu_data
.type
== CPU_SHX3
)) {