1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/cache.h>
5 #include <linux/highmem.h>
8 #include <asm/tlbflush.h>
10 void update_mmu_cache_range(struct vm_fault
*vmf
, struct vm_area_struct
*vma
,
11 unsigned long address
, pte_t
*pte
, unsigned int nr
)
13 unsigned long pfn
= pte_pfn(*pte
);
17 flush_tlb_page(vma
, address
);
22 folio
= page_folio(pfn_to_page(pfn
));
24 if (test_and_set_bit(PG_dcache_clean
, &folio
->flags
))
27 icache_inv_range(address
, address
+ nr
*PAGE_SIZE
);
28 for (i
= 0; i
< folio_nr_pages(folio
); i
++) {
29 unsigned long addr
= (unsigned long) kmap_local_folio(folio
,
32 dcache_wb_range(addr
, addr
+ PAGE_SIZE
);
33 if (vma
->vm_flags
& VM_EXEC
)
34 icache_inv_range(addr
, addr
+ PAGE_SIZE
);
35 kunmap_local((void *) addr
);
39 void flush_icache_deferred(struct mm_struct
*mm
)
41 unsigned int cpu
= smp_processor_id();
42 cpumask_t
*mask
= &mm
->context
.icache_stale_mask
;
44 if (cpumask_test_cpu(cpu
, mask
)) {
45 cpumask_clear_cpu(cpu
, mask
);
47 * Ensure the remote hart's writes are visible to this hart.
48 * This pairs with a barrier in flush_icache_mm.
51 local_icache_inv_all(NULL
);
55 void flush_icache_mm_range(struct mm_struct
*mm
,
56 unsigned long start
, unsigned long end
)
59 cpumask_t others
, *mask
;
63 #ifdef CONFIG_CPU_HAS_ICACHE_INS
64 if (mm
== current
->mm
) {
65 icache_inv_range(start
, end
);
71 /* Mark every hart's icache as needing a flush for this MM. */
72 mask
= &mm
->context
.icache_stale_mask
;
75 /* Flush this hart's I$ now, and mark it as flushed. */
76 cpu
= smp_processor_id();
77 cpumask_clear_cpu(cpu
, mask
);
78 local_icache_inv_all(NULL
);
81 * Flush the I$ of other harts concurrently executing, and mark them as
84 cpumask_andnot(&others
, mm_cpumask(mm
), cpumask_of(cpu
));
86 if (mm
!= current
->active_mm
|| !cpumask_empty(&others
)) {
87 on_each_cpu_mask(&others
, local_icache_inv_all
, NULL
, 1);