Linux 4.13.16
[linux/fpc-iii.git] / mm / vmacache.c
blob7ffa0ee341b5dab136b34d4d79c54a040569ada0
1 /*
2 * Copyright (C) 2014 Davidlohr Bueso.
3 */
4 #include <linux/sched/signal.h>
5 #include <linux/sched/task.h>
6 #include <linux/mm.h>
7 #include <linux/vmacache.h>
9 /*
10 * Flush vma caches for threads that share a given mm.
12 * The operation is safe because the caller holds the mmap_sem
13 * exclusively and other threads accessing the vma cache will
14 * have mmap_sem held at least for read, so no extra locking
15 * is required to maintain the vma cache.
17 void vmacache_flush_all(struct mm_struct *mm)
19 struct task_struct *g, *p;
21 count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
24 * Single threaded tasks need not iterate the entire
25 * list of process. We can avoid the flushing as well
26 * since the mm's seqnum was increased and don't have
27 * to worry about other threads' seqnum. Current's
28 * flush will occur upon the next lookup.
30 if (atomic_read(&mm->mm_users) == 1)
31 return;
33 rcu_read_lock();
34 for_each_process_thread(g, p) {
36 * Only flush the vmacache pointers as the
37 * mm seqnum is already set and curr's will
38 * be set upon invalidation when the next
39 * lookup is done.
41 if (mm == p->mm)
42 vmacache_flush(p);
44 rcu_read_unlock();
48 * This task may be accessing a foreign mm via (for example)
49 * get_user_pages()->find_vma(). The vmacache is task-local and this
50 * task's vmacache pertains to a different mm (ie, its own). There is
51 * nothing we can do here.
53 * Also handle the case where a kernel thread has adopted this mm via use_mm().
54 * That kernel thread's vmacache is not applicable to this mm.
56 static inline bool vmacache_valid_mm(struct mm_struct *mm)
58 return current->mm == mm && !(current->flags & PF_KTHREAD);
61 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
63 if (vmacache_valid_mm(newvma->vm_mm))
64 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
67 static bool vmacache_valid(struct mm_struct *mm)
69 struct task_struct *curr;
71 if (!vmacache_valid_mm(mm))
72 return false;
74 curr = current;
75 if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
77 * First attempt will always be invalid, initialize
78 * the new cache for this task here.
80 curr->vmacache.seqnum = mm->vmacache_seqnum;
81 vmacache_flush(curr);
82 return false;
84 return true;
87 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
89 int i;
91 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
93 if (!vmacache_valid(mm))
94 return NULL;
96 for (i = 0; i < VMACACHE_SIZE; i++) {
97 struct vm_area_struct *vma = current->vmacache.vmas[i];
99 if (!vma)
100 continue;
101 if (WARN_ON_ONCE(vma->vm_mm != mm))
102 break;
103 if (vma->vm_start <= addr && vma->vm_end > addr) {
104 count_vm_vmacache_event(VMACACHE_FIND_HITS);
105 return vma;
109 return NULL;
112 #ifndef CONFIG_MMU
113 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
114 unsigned long start,
115 unsigned long end)
117 int i;
119 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
121 if (!vmacache_valid(mm))
122 return NULL;
124 for (i = 0; i < VMACACHE_SIZE; i++) {
125 struct vm_area_struct *vma = current->vmacache.vmas[i];
127 if (vma && vma->vm_start == start && vma->vm_end == end) {
128 count_vm_vmacache_event(VMACACHE_FIND_HITS);
129 return vma;
133 return NULL;
135 #endif