tcp: avoid collapses in tcp_prune_queue() if possible
[linux/fpc-iii.git] / mm / vmacache.c
blobdb7596eb6132e8118b73e9c851563cde4a6d376a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2014 Davidlohr Bueso.
4 */
5 #include <linux/sched/signal.h>
6 #include <linux/sched/task.h>
7 #include <linux/mm.h>
8 #include <linux/vmacache.h>
11 * Flush vma caches for threads that share a given mm.
13 * The operation is safe because the caller holds the mmap_sem
14 * exclusively and other threads accessing the vma cache will
15 * have mmap_sem held at least for read, so no extra locking
16 * is required to maintain the vma cache.
18 void vmacache_flush_all(struct mm_struct *mm)
20 struct task_struct *g, *p;
22 count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
25 * Single threaded tasks need not iterate the entire
26 * list of process. We can avoid the flushing as well
27 * since the mm's seqnum was increased and don't have
28 * to worry about other threads' seqnum. Current's
29 * flush will occur upon the next lookup.
31 if (atomic_read(&mm->mm_users) == 1)
32 return;
34 rcu_read_lock();
35 for_each_process_thread(g, p) {
37 * Only flush the vmacache pointers as the
38 * mm seqnum is already set and curr's will
39 * be set upon invalidation when the next
40 * lookup is done.
42 if (mm == p->mm)
43 vmacache_flush(p);
45 rcu_read_unlock();
49 * This task may be accessing a foreign mm via (for example)
50 * get_user_pages()->find_vma(). The vmacache is task-local and this
51 * task's vmacache pertains to a different mm (ie, its own). There is
52 * nothing we can do here.
54 * Also handle the case where a kernel thread has adopted this mm via use_mm().
55 * That kernel thread's vmacache is not applicable to this mm.
57 static inline bool vmacache_valid_mm(struct mm_struct *mm)
59 return current->mm == mm && !(current->flags & PF_KTHREAD);
62 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
64 if (vmacache_valid_mm(newvma->vm_mm))
65 current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
68 static bool vmacache_valid(struct mm_struct *mm)
70 struct task_struct *curr;
72 if (!vmacache_valid_mm(mm))
73 return false;
75 curr = current;
76 if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
78 * First attempt will always be invalid, initialize
79 * the new cache for this task here.
81 curr->vmacache.seqnum = mm->vmacache_seqnum;
82 vmacache_flush(curr);
83 return false;
85 return true;
88 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
90 int i;
92 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
94 if (!vmacache_valid(mm))
95 return NULL;
97 for (i = 0; i < VMACACHE_SIZE; i++) {
98 struct vm_area_struct *vma = current->vmacache.vmas[i];
100 if (!vma)
101 continue;
102 if (WARN_ON_ONCE(vma->vm_mm != mm))
103 break;
104 if (vma->vm_start <= addr && vma->vm_end > addr) {
105 count_vm_vmacache_event(VMACACHE_FIND_HITS);
106 return vma;
110 return NULL;
113 #ifndef CONFIG_MMU
114 struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
115 unsigned long start,
116 unsigned long end)
118 int i;
120 count_vm_vmacache_event(VMACACHE_FIND_CALLS);
122 if (!vmacache_valid(mm))
123 return NULL;
125 for (i = 0; i < VMACACHE_SIZE; i++) {
126 struct vm_area_struct *vma = current->vmacache.vmas[i];
128 if (vma && vma->vm_start == start && vma->vm_end == end) {
129 count_vm_vmacache_event(VMACACHE_FIND_HITS);
130 return vma;
134 return NULL;
136 #endif