Merge tag 'locking-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / arm / include / asm / tlb.h
blobb8cbe03ad260344abc1fa74a54bcd036b996eb37
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * arch/arm/include/asm/tlb.h
5 * Copyright (C) 2002 Russell King
7 * Experimentation shows that on a StrongARM, it appears to be faster
8 * to use the "invalidate whole tlb" rather than "invalidate single
9 * tlb" for this.
11 * This appears true for both the process fork+exit case, as well as
12 * the munmap-large-area case.
14 #ifndef __ASMARM_TLB_H
15 #define __ASMARM_TLB_H
17 #include <asm/cacheflush.h>
19 #ifndef CONFIG_MMU
21 #include <linux/pagemap.h>
23 #define tlb_flush(tlb) ((void) tlb)
25 #include <asm-generic/tlb.h>
27 #else /* !CONFIG_MMU */
29 #include <linux/swap.h>
30 #include <asm/tlbflush.h>
32 static inline void __tlb_remove_table(void *_table)
34 free_page_and_swap_cache((struct page *)_table);
37 #include <asm-generic/tlb.h>
39 static inline void
40 __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
42 pgtable_pte_page_dtor(pte);
44 #ifndef CONFIG_ARM_LPAE
46 * With the classic ARM MMU, a pte page has two corresponding pmd
47 * entries, each covering 1MB.
49 addr = (addr & PMD_MASK) + SZ_1M;
50 __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
51 #endif
53 tlb_remove_table(tlb, pte);
56 static inline void
57 __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
59 #ifdef CONFIG_ARM_LPAE
60 struct page *page = virt_to_page(pmdp);
62 pgtable_pmd_page_dtor(page);
63 tlb_remove_table(tlb, page);
64 #endif
67 #endif /* CONFIG_MMU */
68 #endif