2 * linux/include/asm-arm/tlb.h
4 * Copyright (C) 2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
17 #ifndef __ASMARM_TLB_H
18 #define __ASMARM_TLB_H
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
25 * TLB handling. This allows us to remove pages from the page
26 * tables, and efficiently handle the TLB issues.
33 DECLARE_PER_CPU(struct mmu_gather
, mmu_gathers
);
35 static inline struct mmu_gather
*
36 tlb_gather_mmu(struct mm_struct
*mm
, unsigned int full_mm_flush
)
38 struct mmu_gather
*tlb
= &get_cpu_var(mmu_gathers
);
41 tlb
->fullmm
= full_mm_flush
;
47 tlb_finish_mmu(struct mmu_gather
*tlb
, unsigned long start
, unsigned long end
)
50 flush_tlb_mm(tlb
->mm
);
52 /* keep the page table cache within bounds */
55 put_cpu_var(mmu_gathers
);
58 #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
61 * In the case of tlb vma handling, we can optimise these away in the
62 * case where we're doing a full MM flush. When we're doing a munmap,
63 * the vmas are adjusted to only cover the region to be torn down.
66 tlb_start_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
69 flush_cache_range(vma
, vma
->vm_start
, vma
->vm_end
);
73 tlb_end_vma(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
)
76 flush_tlb_range(vma
, vma
->vm_start
, vma
->vm_end
);
79 #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
80 #define pte_free_tlb(tlb,ptep) pte_free(ptep)
81 #define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
83 #define tlb_migrate_finish(mm) do { } while (0)