fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / include / asm-x86 / tlbflush_64.h
blob888eb4abdd07a2fffa1c3ca3486962a3b56d7be5
1 #ifndef _X8664_TLBFLUSH_H
2 #define _X8664_TLBFLUSH_H
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6 #include <asm/processor.h>
7 #include <asm/system.h>
9 static inline void __flush_tlb(void)
11 write_cr3(read_cr3());
14 static inline void __flush_tlb_all(void)
16 unsigned long cr4 = read_cr4();
17 write_cr4(cr4 & ~X86_CR4_PGE); /* clear PGE */
18 write_cr4(cr4); /* write old PGE again and flush TLBs */
21 #define __flush_tlb_one(addr) \
22 __asm__ __volatile__("invlpg (%0)" :: "r" (addr) : "memory")
26 * TLB flushing:
28 * - flush_tlb() flushes the current mm struct TLBs
29 * - flush_tlb_all() flushes all processes TLBs
30 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
31 * - flush_tlb_page(vma, vmaddr) flushes one page
32 * - flush_tlb_range(vma, start, end) flushes a range of pages
33 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
34 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
36 * x86-64 can only flush individual pages or full VMs. For a range flush
37 * we always do the full VM. Might be worth trying if for a small
38 * range a few INVLPGs in a row are a win.
41 #ifndef CONFIG_SMP
43 #define flush_tlb() __flush_tlb()
44 #define flush_tlb_all() __flush_tlb_all()
45 #define local_flush_tlb() __flush_tlb()
47 static inline void flush_tlb_mm(struct mm_struct *mm)
49 if (mm == current->active_mm)
50 __flush_tlb();
53 static inline void flush_tlb_page(struct vm_area_struct *vma,
54 unsigned long addr)
56 if (vma->vm_mm == current->active_mm)
57 __flush_tlb_one(addr);
60 static inline void flush_tlb_range(struct vm_area_struct *vma,
61 unsigned long start, unsigned long end)
63 if (vma->vm_mm == current->active_mm)
64 __flush_tlb();
67 #else
69 #include <asm/smp.h>
71 #define local_flush_tlb() \
72 __flush_tlb()
74 extern void flush_tlb_all(void);
75 extern void flush_tlb_current_task(void);
76 extern void flush_tlb_mm(struct mm_struct *);
77 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
79 #define flush_tlb() flush_tlb_current_task()
81 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
83 flush_tlb_mm(vma->vm_mm);
86 #define TLBSTATE_OK 1
87 #define TLBSTATE_LAZY 2
89 /* Roughly an IPI every 20MB with 4k pages for freeing page table
90 ranges. Cost is about 42k of memory for each CPU. */
91 #define ARCH_FREE_PTE_NR 5350
93 #endif
95 static inline void flush_tlb_kernel_range(unsigned long start,
96 unsigned long end)
98 flush_tlb_all();
101 static inline void flush_tlb_pgtables(struct mm_struct *mm,
102 unsigned long start, unsigned long end)
104 /* x86_64 does not keep any page table caches in a software TLB.
105 The CPUs do in their hardware TLBs, but they are handled
106 by the normal TLB flushing algorithms. */
109 #endif /* _X8664_TLBFLUSH_H */