1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/tlb.h
5 * Copyright (C) 2002 Russell King
6 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/pagemap.h>
12 #include <linux/swap.h>
14 static inline void __tlb_remove_table(void *_table
)
16 free_page_and_swap_cache((struct page
*)_table
);
19 #define tlb_flush tlb_flush
20 static void tlb_flush(struct mmu_gather
*tlb
);
22 #include <asm-generic/tlb.h>
25 * get the tlbi levels in arm64. Default value is 0 if more than one
26 * of cleared_* is set or neither is set.
27 * Arm64 doesn't support p4ds now.
29 static inline int tlb_get_level(struct mmu_gather
*tlb
)
31 if (tlb
->cleared_ptes
&& !(tlb
->cleared_pmds
||
36 if (tlb
->cleared_pmds
&& !(tlb
->cleared_ptes
||
41 if (tlb
->cleared_puds
&& !(tlb
->cleared_ptes
||
49 static inline void tlb_flush(struct mmu_gather
*tlb
)
51 struct vm_area_struct vma
= TLB_FLUSH_VMA(tlb
->mm
, 0);
52 bool last_level
= !tlb
->freed_tables
;
53 unsigned long stride
= tlb_get_unmap_size(tlb
);
54 int tlb_level
= tlb_get_level(tlb
);
57 * If we're tearing down the address space then we only care about
58 * invalidating the walk-cache, since the ASID allocator won't
59 * reallocate our ASID without invalidating the entire TLB.
63 flush_tlb_mm(tlb
->mm
);
67 __flush_tlb_range(&vma
, tlb
->start
, tlb
->end
, stride
,
68 last_level
, tlb_level
);
71 static inline void __pte_free_tlb(struct mmu_gather
*tlb
, pgtable_t pte
,
74 pgtable_pte_page_dtor(pte
);
75 tlb_remove_table(tlb
, pte
);
78 #if CONFIG_PGTABLE_LEVELS > 2
79 static inline void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmdp
,
82 struct page
*page
= virt_to_page(pmdp
);
84 pgtable_pmd_page_dtor(page
);
85 tlb_remove_table(tlb
, page
);
89 #if CONFIG_PGTABLE_LEVELS > 3
90 static inline void __pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pudp
,
93 tlb_remove_table(tlb
, virt_to_page(pudp
));