2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10 * Cache and TLB management
14 #include <linux/init.h>
15 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <linux/syscalls.h>
23 #include <linux/vmalloc.h>
25 #include <asm/cache.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
29 #include <asm/processor.h>
30 #include <asm/sections.h>
31 #include <asm/shmparam.h>
32 #include <asm/mmu_context.h>
33 #include <asm/cachectl.h>
35 #define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
38 * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
39 * of page flushes done flush_cache_page_if_present. There are some
40 * pros and cons in using this option. It may increase the risk of
41 * random segmentation faults.
43 #define CONFIG_FLUSH_PAGE_ACCESSED 0
45 int split_tlb __ro_after_init
;
46 int dcache_stride __ro_after_init
;
47 int icache_stride __ro_after_init
;
48 EXPORT_SYMBOL(dcache_stride
);
50 /* Internal implementation in arch/parisc/kernel/pacache.S */
51 void flush_dcache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
52 EXPORT_SYMBOL(flush_dcache_page_asm
);
53 void purge_dcache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
54 void flush_icache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
55 void flush_data_cache_local(void *); /* flushes local data-cache only */
56 void flush_instruction_cache_local(void); /* flushes local code-cache only */
58 static void flush_kernel_dcache_page_addr(const void *addr
);
60 /* On some machines (i.e., ones with the Merced bus), there can be
61 * only a single PxTLB broadcast at a time; this must be guaranteed
62 * by software. We need a spinlock around all TLB flushes to ensure
65 DEFINE_SPINLOCK(pa_tlb_flush_lock
);
67 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
68 int pa_serialize_tlb_flushes __ro_after_init
;
71 struct pdc_cache_info cache_info __ro_after_init
;
73 struct pdc_btlb_info btlb_info
;
76 DEFINE_STATIC_KEY_TRUE(parisc_has_cache
);
77 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache
);
78 DEFINE_STATIC_KEY_TRUE(parisc_has_icache
);
80 static void cache_flush_local_cpu(void *dummy
)
82 if (static_branch_likely(&parisc_has_icache
))
83 flush_instruction_cache_local();
84 if (static_branch_likely(&parisc_has_dcache
))
85 flush_data_cache_local(NULL
);
88 void flush_cache_all_local(void)
90 cache_flush_local_cpu(NULL
);
93 void flush_cache_all(void)
95 if (static_branch_likely(&parisc_has_cache
))
96 on_each_cpu(cache_flush_local_cpu
, NULL
, 1);
99 static inline void flush_data_cache(void)
101 if (static_branch_likely(&parisc_has_dcache
))
102 on_each_cpu(flush_data_cache_local
, NULL
, 1);
106 /* Kernel virtual address of pfn. */
107 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
109 void __update_cache(pte_t pte
)
111 unsigned long pfn
= pte_pfn(pte
);
115 /* We don't have pte special. As a result, we can be called with
116 an invalid pfn and we don't need to flush the kernel dcache page.
117 This occurs with FireGL card in C8000. */
121 folio
= page_folio(pfn_to_page(pfn
));
122 pfn
= folio_pfn(folio
);
123 nr
= folio_nr_pages(folio
);
124 if (folio_flush_mapping(folio
) &&
125 test_bit(PG_dcache_dirty
, &folio
->flags
)) {
127 flush_kernel_dcache_page_addr(pfn_va(pfn
+ nr
));
128 clear_bit(PG_dcache_dirty
, &folio
->flags
);
129 } else if (parisc_requires_coherency())
131 flush_kernel_dcache_page_addr(pfn_va(pfn
+ nr
));
135 show_cache_info(struct seq_file
*m
)
139 seq_printf(m
, "I-cache\t\t: %ld KB\n",
140 cache_info
.ic_size
/1024 );
141 if (cache_info
.dc_loop
!= 1)
142 snprintf(buf
, 32, "%lu-way associative", cache_info
.dc_loop
);
143 seq_printf(m
, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
144 cache_info
.dc_size
/1024,
145 (cache_info
.dc_conf
.cc_wt
? "WT":"WB"),
146 (cache_info
.dc_conf
.cc_sh
? ", shared I/D":""),
147 ((cache_info
.dc_loop
== 1) ? "direct mapped" : buf
),
148 cache_info
.dc_conf
.cc_alias
150 seq_printf(m
, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
153 cache_info
.dt_conf
.tc_sh
? " - shared with ITLB":""
157 /* BTLB - Block TLB */
158 if (btlb_info
.max_size
==0) {
159 seq_printf(m
, "BTLB\t\t: not supported\n" );
162 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
163 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
164 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
165 btlb_info
.max_size
, (int)4096,
166 btlb_info
.max_size
>>8,
167 btlb_info
.fixed_range_info
.num_i
,
168 btlb_info
.fixed_range_info
.num_d
,
169 btlb_info
.fixed_range_info
.num_comb
,
170 btlb_info
.variable_range_info
.num_i
,
171 btlb_info
.variable_range_info
.num_d
,
172 btlb_info
.variable_range_info
.num_comb
179 parisc_cache_init(void)
181 if (pdc_cache_info(&cache_info
) < 0)
182 panic("parisc_cache_init: pdc_cache_info failed");
185 printk("ic_size %lx dc_size %lx it_size %lx\n",
190 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
192 cache_info
.dc_stride
,
196 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
197 *(unsigned long *) (&cache_info
.dc_conf
),
198 cache_info
.dc_conf
.cc_alias
,
199 cache_info
.dc_conf
.cc_block
,
200 cache_info
.dc_conf
.cc_line
,
201 cache_info
.dc_conf
.cc_shift
);
202 printk(" wt %d sh %d cst %d hv %d\n",
203 cache_info
.dc_conf
.cc_wt
,
204 cache_info
.dc_conf
.cc_sh
,
205 cache_info
.dc_conf
.cc_cst
,
206 cache_info
.dc_conf
.cc_hv
);
208 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
210 cache_info
.ic_stride
,
214 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
215 cache_info
.it_sp_base
,
216 cache_info
.it_sp_stride
,
217 cache_info
.it_sp_count
,
219 cache_info
.it_off_base
,
220 cache_info
.it_off_stride
,
221 cache_info
.it_off_count
);
223 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
224 cache_info
.dt_sp_base
,
225 cache_info
.dt_sp_stride
,
226 cache_info
.dt_sp_count
,
228 cache_info
.dt_off_base
,
229 cache_info
.dt_off_stride
,
230 cache_info
.dt_off_count
);
232 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
233 *(unsigned long *) (&cache_info
.ic_conf
),
234 cache_info
.ic_conf
.cc_alias
,
235 cache_info
.ic_conf
.cc_block
,
236 cache_info
.ic_conf
.cc_line
,
237 cache_info
.ic_conf
.cc_shift
);
238 printk(" wt %d sh %d cst %d hv %d\n",
239 cache_info
.ic_conf
.cc_wt
,
240 cache_info
.ic_conf
.cc_sh
,
241 cache_info
.ic_conf
.cc_cst
,
242 cache_info
.ic_conf
.cc_hv
);
244 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
245 cache_info
.dt_conf
.tc_sh
,
246 cache_info
.dt_conf
.tc_page
,
247 cache_info
.dt_conf
.tc_cst
,
248 cache_info
.dt_conf
.tc_aid
,
249 cache_info
.dt_conf
.tc_sr
);
251 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
252 cache_info
.it_conf
.tc_sh
,
253 cache_info
.it_conf
.tc_page
,
254 cache_info
.it_conf
.tc_cst
,
255 cache_info
.it_conf
.tc_aid
,
256 cache_info
.it_conf
.tc_sr
);
260 if (cache_info
.dt_conf
.tc_sh
== 0 || cache_info
.dt_conf
.tc_sh
== 2) {
261 if (cache_info
.dt_conf
.tc_sh
== 2)
262 printk(KERN_WARNING
"Unexpected TLB configuration. "
263 "Will flush I/D separately (could be optimized).\n");
268 /* "New and Improved" version from Jim Hull
269 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
270 * The following CAFL_STRIDE is an optimized version, see
271 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
272 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
274 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
275 dcache_stride
= CAFL_STRIDE(cache_info
.dc_conf
);
276 icache_stride
= CAFL_STRIDE(cache_info
.ic_conf
);
279 /* stride needs to be non-zero, otherwise cache flushes will not work */
280 WARN_ON(cache_info
.dc_size
&& dcache_stride
== 0);
281 WARN_ON(cache_info
.ic_size
&& icache_stride
== 0);
283 if ((boot_cpu_data
.pdc
.capabilities
& PDC_MODEL_NVA_MASK
) ==
284 PDC_MODEL_NVA_UNSUPPORTED
) {
285 printk(KERN_WARNING
"parisc_cache_init: Only equivalent aliasing supported!\n");
287 panic("SMP kernel required to avoid non-equivalent aliasing");
292 void disable_sr_hashing(void)
294 int srhash_type
, retval
;
295 unsigned long space_bits
;
297 switch (boot_cpu_data
.cpu_type
) {
298 case pcx
: /* We shouldn't get this far. setup.c should prevent it. */
305 srhash_type
= SRHASH_PCXST
;
309 srhash_type
= SRHASH_PCXL
;
312 case pcxl2
: /* pcxl2 doesn't support space register hashing */
315 default: /* Currently all PA2.0 machines use the same ins. sequence */
316 srhash_type
= SRHASH_PA20
;
320 disable_sr_hashing_asm(srhash_type
);
322 retval
= pdc_spaceid_bits(&space_bits
);
323 /* If this procedure isn't implemented, don't panic. */
324 if (retval
< 0 && retval
!= PDC_BAD_OPTION
)
325 panic("pdc_spaceid_bits call failed.\n");
327 panic("SpaceID hashing is still on!\n");
331 __flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
,
332 unsigned long physaddr
)
334 if (!static_branch_likely(&parisc_has_cache
))
338 * The TLB is the engine of coherence on parisc. The CPU is
339 * entitled to speculate any page with a TLB mapping, so here
340 * we kill the mapping then flush the page along a special flush
341 * only alias mapping. This guarantees that the page is no-longer
342 * in the cache for any process and nor may it be speculatively
343 * read in (until the user or kernel specifically accesses it,
346 flush_tlb_page(vma
, vmaddr
);
349 flush_dcache_page_asm(physaddr
, vmaddr
);
350 if (vma
->vm_flags
& VM_EXEC
)
351 flush_icache_page_asm(physaddr
, vmaddr
);
355 static void flush_kernel_dcache_page_addr(const void *addr
)
357 unsigned long vaddr
= (unsigned long)addr
;
360 /* Purge TLB entry to remove translation on all CPUs */
361 purge_tlb_start(flags
);
362 pdtlb(SR_KERNEL
, addr
);
363 purge_tlb_end(flags
);
365 /* Use tmpalias flush to prevent data cache move-in */
367 flush_dcache_page_asm(__pa(vaddr
), vaddr
);
371 static void flush_kernel_icache_page_addr(const void *addr
)
373 unsigned long vaddr
= (unsigned long)addr
;
376 /* Purge TLB entry to remove translation on all CPUs */
377 purge_tlb_start(flags
);
378 pdtlb(SR_KERNEL
, addr
);
379 purge_tlb_end(flags
);
381 /* Use tmpalias flush to prevent instruction cache move-in */
383 flush_icache_page_asm(__pa(vaddr
), vaddr
);
387 void kunmap_flush_on_unmap(const void *addr
)
389 flush_kernel_dcache_page_addr(addr
);
391 EXPORT_SYMBOL(kunmap_flush_on_unmap
);
393 void flush_icache_pages(struct vm_area_struct
*vma
, struct page
*page
,
396 void *kaddr
= page_address(page
);
399 flush_kernel_dcache_page_addr(kaddr
);
400 flush_kernel_icache_page_addr(kaddr
);
408 * Walk page directory for MM to find PTEP pointer for address ADDR.
410 static inline pte_t
*get_ptep(struct mm_struct
*mm
, unsigned long addr
)
413 pgd_t
*pgd
= mm
->pgd
;
418 if (!pgd_none(*pgd
)) {
419 p4d
= p4d_offset(pgd
, addr
);
420 if (!p4d_none(*p4d
)) {
421 pud
= pud_offset(p4d
, addr
);
422 if (!pud_none(*pud
)) {
423 pmd
= pmd_offset(pud
, addr
);
425 ptep
= pte_offset_map(pmd
, addr
);
432 static inline bool pte_needs_flush(pte_t pte
)
434 return (pte_val(pte
) & (_PAGE_PRESENT
| _PAGE_ACCESSED
| _PAGE_NO_CACHE
))
435 == (_PAGE_PRESENT
| _PAGE_ACCESSED
);
439 * Return user physical address. Returns 0 if page is not present.
441 static inline unsigned long get_upa(struct mm_struct
*mm
, unsigned long addr
)
443 unsigned long flags
, space
, pgd
, prot
, pa
;
444 #ifdef CONFIG_TLB_PTLOCK
445 unsigned long pgd_lock
;
449 local_irq_save(flags
);
451 space
= mfsp(SR_USER
);
453 #ifdef CONFIG_TLB_PTLOCK
454 pgd_lock
= mfctl(28);
457 /* Set context for lpa_user */
458 switch_mm_irqs_off(NULL
, mm
, NULL
);
461 /* Restore previous context */
462 #ifdef CONFIG_TLB_PTLOCK
466 mtsp(space
, SR_USER
);
468 local_irq_restore(flags
);
473 void flush_dcache_folio(struct folio
*folio
)
475 struct address_space
*mapping
= folio_flush_mapping(folio
);
476 struct vm_area_struct
*vma
;
477 unsigned long addr
, old_addr
= 0;
479 unsigned long count
= 0;
480 unsigned long i
, nr
, flags
;
483 if (mapping
&& !mapping_mapped(mapping
)) {
484 set_bit(PG_dcache_dirty
, &folio
->flags
);
488 nr
= folio_nr_pages(folio
);
489 kaddr
= folio_address(folio
);
490 for (i
= 0; i
< nr
; i
++)
491 flush_kernel_dcache_page_addr(kaddr
+ i
* PAGE_SIZE
);
496 pgoff
= folio
->index
;
499 * We have carefully arranged in arch_get_unmapped_area() that
500 * *any* mappings of a file are always congruently mapped (whether
501 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
502 * to flush one address here for them all to become coherent
503 * on machines that support equivalent aliasing
505 flush_dcache_mmap_lock_irqsave(mapping
, flags
);
506 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
+ nr
- 1) {
507 unsigned long offset
= pgoff
- vma
->vm_pgoff
;
508 unsigned long pfn
= folio_pfn(folio
);
510 addr
= vma
->vm_start
;
511 nr
= folio_nr_pages(folio
);
516 addr
+= offset
* PAGE_SIZE
;
518 if (addr
+ nr
* PAGE_SIZE
> vma
->vm_end
)
519 nr
= (vma
->vm_end
- addr
) / PAGE_SIZE
;
521 if (old_addr
== 0 || (old_addr
& (SHM_COLOUR
- 1))
522 != (addr
& (SHM_COLOUR
- 1))) {
523 for (i
= 0; i
< nr
; i
++)
524 __flush_cache_page(vma
,
525 addr
+ i
* PAGE_SIZE
,
526 (pfn
+ i
) * PAGE_SIZE
);
528 * Software is allowed to have any number
529 * of private mappings to a page.
531 if (!(vma
->vm_flags
& VM_SHARED
))
534 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
535 old_addr
, addr
, vma
->vm_file
);
536 if (nr
== folio_nr_pages(folio
))
539 WARN_ON(++count
== 4096);
541 flush_dcache_mmap_unlock_irqrestore(mapping
, flags
);
543 EXPORT_SYMBOL(flush_dcache_folio
);
545 /* Defined in arch/parisc/kernel/pacache.S */
546 EXPORT_SYMBOL(flush_kernel_dcache_range_asm
);
547 EXPORT_SYMBOL(flush_kernel_icache_range_asm
);
549 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
550 static unsigned long parisc_cache_flush_threshold __ro_after_init
= FLUSH_THRESHOLD
;
552 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
553 static unsigned long parisc_tlb_flush_threshold __ro_after_init
= ~0UL;
555 void __init
parisc_setup_cache_timing(void)
557 unsigned long rangetime
, alltime
;
559 unsigned long threshold
, threshold2
;
563 alltime
= mfctl(16) - alltime
;
565 size
= (unsigned long)(_end
- _text
);
566 rangetime
= mfctl(16);
567 flush_kernel_dcache_range((unsigned long)_text
, size
);
568 rangetime
= mfctl(16) - rangetime
;
570 printk(KERN_DEBUG
"Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
571 alltime
, size
, rangetime
);
573 threshold
= L1_CACHE_ALIGN((unsigned long)((uint64_t)size
* alltime
/ rangetime
));
574 pr_info("Calculated flush threshold is %lu KiB\n",
578 * The threshold computed above isn't very reliable. The following
579 * heuristic works reasonably well on c8000/rp3440.
581 threshold2
= cache_info
.dc_size
* num_online_cpus();
582 parisc_cache_flush_threshold
= threshold2
;
583 printk(KERN_INFO
"Cache flush threshold set to %lu KiB\n",
584 parisc_cache_flush_threshold
/1024);
586 /* calculate TLB flush threshold */
588 /* On SMP machines, skip the TLB measure of kernel text which
589 * has been mapped as huge pages. */
590 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
591 threshold
= max(cache_info
.it_size
, cache_info
.dt_size
);
592 threshold
*= PAGE_SIZE
;
593 threshold
/= num_online_cpus();
594 goto set_tlb_threshold
;
597 size
= (unsigned long)_end
- (unsigned long)_text
;
598 rangetime
= mfctl(16);
599 flush_tlb_kernel_range((unsigned long)_text
, (unsigned long)_end
);
600 rangetime
= mfctl(16) - rangetime
;
604 alltime
= mfctl(16) - alltime
;
606 printk(KERN_INFO
"Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
607 alltime
, size
, rangetime
);
609 threshold
= PAGE_ALIGN((num_online_cpus() * size
* alltime
) / rangetime
);
610 printk(KERN_INFO
"Calculated TLB flush threshold %lu KiB\n",
614 parisc_tlb_flush_threshold
= max(threshold
, FLUSH_TLB_THRESHOLD
);
615 printk(KERN_INFO
"TLB flush threshold set to %lu KiB\n",
616 parisc_tlb_flush_threshold
/1024);
619 extern void purge_kernel_dcache_page_asm(unsigned long);
620 extern void clear_user_page_asm(void *, unsigned long);
621 extern void copy_user_page_asm(void *, void *, unsigned long);
623 static void flush_cache_page_if_present(struct vm_area_struct
*vma
,
624 unsigned long vmaddr
)
626 #if CONFIG_FLUSH_PAGE_ACCESSED
627 bool needs_flush
= false;
630 ptep
= get_ptep(vma
->vm_mm
, vmaddr
);
632 pte
= ptep_get(ptep
);
633 needs_flush
= pte_needs_flush(pte
);
637 __flush_cache_page(vma
, vmaddr
, PFN_PHYS(pte_pfn(pte
)));
639 struct mm_struct
*mm
= vma
->vm_mm
;
640 unsigned long physaddr
= get_upa(mm
, vmaddr
);
643 __flush_cache_page(vma
, vmaddr
, PAGE_ALIGN_DOWN(physaddr
));
647 void copy_user_highpage(struct page
*to
, struct page
*from
,
648 unsigned long vaddr
, struct vm_area_struct
*vma
)
652 kfrom
= kmap_local_page(from
);
653 kto
= kmap_local_page(to
);
654 __flush_cache_page(vma
, vaddr
, PFN_PHYS(page_to_pfn(from
)));
655 copy_page_asm(kto
, kfrom
);
660 void copy_to_user_page(struct vm_area_struct
*vma
, struct page
*page
,
661 unsigned long user_vaddr
, void *dst
, void *src
, int len
)
663 __flush_cache_page(vma
, user_vaddr
, PFN_PHYS(page_to_pfn(page
)));
664 memcpy(dst
, src
, len
);
665 flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst
));
668 void copy_from_user_page(struct vm_area_struct
*vma
, struct page
*page
,
669 unsigned long user_vaddr
, void *dst
, void *src
, int len
)
671 __flush_cache_page(vma
, user_vaddr
, PFN_PHYS(page_to_pfn(page
)));
672 memcpy(dst
, src
, len
);
673 flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src
));
676 /* __flush_tlb_range()
678 * returns 1 if all TLBs were flushed.
680 int __flush_tlb_range(unsigned long sid
, unsigned long start
,
685 if ((!IS_ENABLED(CONFIG_SMP
) || !arch_irqs_disabled()) &&
686 end
- start
>= parisc_tlb_flush_threshold
) {
691 /* Purge TLB entries for small ranges using the pdtlb and
692 pitlb instructions. These instructions execute locally
693 but cause a purge request to be broadcast to other TLBs. */
694 while (start
< end
) {
695 purge_tlb_start(flags
);
697 pdtlb(SR_TEMP1
, start
);
698 pitlb(SR_TEMP1
, start
);
699 purge_tlb_end(flags
);
705 static void flush_cache_pages(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
709 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
)
710 flush_cache_page_if_present(vma
, addr
);
713 static inline unsigned long mm_total_size(struct mm_struct
*mm
)
715 struct vm_area_struct
*vma
;
716 unsigned long usize
= 0;
717 VMA_ITERATOR(vmi
, mm
, 0);
719 for_each_vma(vmi
, vma
) {
720 if (usize
>= parisc_cache_flush_threshold
)
722 usize
+= vma
->vm_end
- vma
->vm_start
;
727 void flush_cache_mm(struct mm_struct
*mm
)
729 struct vm_area_struct
*vma
;
730 VMA_ITERATOR(vmi
, mm
, 0);
733 * Flushing the whole cache on each cpu takes forever on
734 * rp3440, etc. So, avoid it if the mm isn't too big.
736 * Note that we must flush the entire cache on machines
737 * with aliasing caches to prevent random segmentation
740 if (!parisc_requires_coherency()
741 || mm_total_size(mm
) >= parisc_cache_flush_threshold
) {
742 if (WARN_ON(IS_ENABLED(CONFIG_SMP
) && arch_irqs_disabled()))
750 for_each_vma(vmi
, vma
)
751 flush_cache_pages(vma
, vma
->vm_start
, vma
->vm_end
);
754 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
756 if (!parisc_requires_coherency()
757 || end
- start
>= parisc_cache_flush_threshold
) {
758 if (WARN_ON(IS_ENABLED(CONFIG_SMP
) && arch_irqs_disabled()))
760 flush_tlb_range(vma
, start
, end
);
761 if (vma
->vm_flags
& VM_EXEC
)
768 flush_cache_pages(vma
, start
& PAGE_MASK
, end
);
771 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
, unsigned long pfn
)
773 __flush_cache_page(vma
, vmaddr
, PFN_PHYS(pfn
));
776 void flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
, unsigned long vmaddr
)
781 __flush_cache_page(vma
, vmaddr
, PFN_PHYS(page_to_pfn(page
)));
784 int ptep_clear_flush_young(struct vm_area_struct
*vma
, unsigned long addr
,
787 pte_t pte
= ptep_get(ptep
);
791 set_pte(ptep
, pte_mkold(pte
));
792 #if CONFIG_FLUSH_PAGE_ACCESSED
793 __flush_cache_page(vma
, addr
, PFN_PHYS(pte_pfn(pte
)));
799 * After a PTE is cleared, we have no way to flush the cache for
800 * the physical page. On PA8800 and PA8900 processors, these lines
801 * can cause random cache corruption. Thus, we must flush the cache
802 * as well as the TLB when clearing a PTE that's valid.
804 pte_t
ptep_clear_flush(struct vm_area_struct
*vma
, unsigned long addr
,
807 struct mm_struct
*mm
= (vma
)->vm_mm
;
808 pte_t pte
= ptep_get_and_clear(mm
, addr
, ptep
);
809 unsigned long pfn
= pte_pfn(pte
);
812 __flush_cache_page(vma
, addr
, PFN_PHYS(pfn
));
813 else if (pte_accessible(mm
, pte
))
814 flush_tlb_page(vma
, addr
);
820 * The physical address for pages in the ioremap case can be obtained
821 * from the vm_struct struct. I wasn't able to successfully handle the
822 * vmalloc and vmap cases. We have an array of struct page pointers in
823 * the uninitialized vmalloc case but the flush failed using page_to_pfn.
825 void flush_cache_vmap(unsigned long start
, unsigned long end
)
827 unsigned long addr
, physaddr
;
828 struct vm_struct
*vm
;
830 /* Prevent cache move-in */
831 flush_tlb_kernel_range(start
, end
);
833 if (end
- start
>= parisc_cache_flush_threshold
) {
838 if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start
))) {
843 vm
= find_vm_area((void *)start
);
844 if (WARN_ON_ONCE(!vm
)) {
849 /* The physical addresses of IOREMAP regions are contiguous */
850 if (vm
->flags
& VM_IOREMAP
) {
851 physaddr
= vm
->phys_addr
;
852 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
854 flush_dcache_page_asm(physaddr
, start
);
855 flush_icache_page_asm(physaddr
, start
);
857 physaddr
+= PAGE_SIZE
;
864 EXPORT_SYMBOL(flush_cache_vmap
);
867 * The vm_struct has been retired and the page table is set up. The
868 * last page in the range is a guard page. Its physical address can't
869 * be determined using lpa, so there is no way to flush the range
870 * using flush_dcache_page_asm.
872 void flush_cache_vunmap(unsigned long start
, unsigned long end
)
874 /* Prevent cache move-in */
875 flush_tlb_kernel_range(start
, end
);
878 EXPORT_SYMBOL(flush_cache_vunmap
);
881 * On systems with PA8800/PA8900 processors, there is no way to flush
882 * a vmap range other than using the architected loop to flush the
883 * entire cache. The page directory is not set up, so we can't use
884 * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
885 * L2 is physically indexed but FDCE/FICE instructions in virtual
886 * mode output their virtual address on the core bus, not their
887 * real address. As a result, the L2 cache index formed from the
888 * virtual address will most likely not be the same as the L2 index
889 * formed from the real address.
891 void flush_kernel_vmap_range(void *vaddr
, int size
)
893 unsigned long start
= (unsigned long)vaddr
;
894 unsigned long end
= start
+ size
;
896 flush_tlb_kernel_range(start
, end
);
898 if (!static_branch_likely(&parisc_has_dcache
))
901 /* If interrupts are disabled, we can only do local flush */
902 if (WARN_ON(IS_ENABLED(CONFIG_SMP
) && arch_irqs_disabled())) {
903 flush_data_cache_local(NULL
);
909 EXPORT_SYMBOL(flush_kernel_vmap_range
);
911 void invalidate_kernel_vmap_range(void *vaddr
, int size
)
913 unsigned long start
= (unsigned long)vaddr
;
914 unsigned long end
= start
+ size
;
916 /* Ensure DMA is complete */
919 flush_tlb_kernel_range(start
, end
);
921 if (!static_branch_likely(&parisc_has_dcache
))
924 /* If interrupts are disabled, we can only do local flush */
925 if (WARN_ON(IS_ENABLED(CONFIG_SMP
) && arch_irqs_disabled())) {
926 flush_data_cache_local(NULL
);
932 EXPORT_SYMBOL(invalidate_kernel_vmap_range
);
935 SYSCALL_DEFINE3(cacheflush
, unsigned long, addr
, unsigned long, bytes
,
938 unsigned long start
, end
;
939 ASM_EXCEPTIONTABLE_VAR(error
);
943 if (!access_ok((void __user
*) addr
, bytes
))
948 if (cache
& DCACHE
) {
950 __asm__
__volatile__ (
952 "1: cmpb,*<<,n %0,%2,1b\n"
954 "1: cmpb,<<,n %0,%2,1b\n"
958 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b
, 2b
, "%1")
959 : "+r" (start
), "+r" (error
)
960 : "r" (end
), "r" (dcache_stride
), "i" (SR_USER
));
963 if (cache
& ICACHE
&& error
== 0) {
965 __asm__
__volatile__ (
967 "1: cmpb,*<<,n %0,%2,1b\n"
969 "1: cmpb,<<,n %0,%2,1b\n"
973 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b
, 2b
, "%1")
974 : "+r" (start
), "+r" (error
)
975 : "r" (end
), "r" (icache_stride
), "i" (SR_USER
));