Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / infiniband / hw / hfi1 / mmu_rb.c
blobf3fb28e3d5d74060f9d299df5870292f621fff79
1 /*
2 * Copyright(c) 2020 Cornelis Networks, Inc.
3 * Copyright(c) 2016 - 2017 Intel Corporation.
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
8 * GPL LICENSE SUMMARY
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * BSD LICENSE
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
23 * are met:
25 * - Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * - Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
30 * distribution.
31 * - Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/list.h>
49 #include <linux/rculist.h>
50 #include <linux/mmu_notifier.h>
51 #include <linux/interval_tree_generic.h>
52 #include <linux/sched/mm.h>
54 #include "mmu_rb.h"
55 #include "trace.h"
57 static unsigned long mmu_node_start(struct mmu_rb_node *);
58 static unsigned long mmu_node_last(struct mmu_rb_node *);
59 static int mmu_notifier_range_start(struct mmu_notifier *,
60 const struct mmu_notifier_range *);
61 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
62 unsigned long, unsigned long);
63 static void do_remove(struct mmu_rb_handler *handler,
64 struct list_head *del_list);
65 static void handle_remove(struct work_struct *work);
67 static const struct mmu_notifier_ops mn_opts = {
68 .invalidate_range_start = mmu_notifier_range_start,
71 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
72 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
74 static unsigned long mmu_node_start(struct mmu_rb_node *node)
76 return node->addr & PAGE_MASK;
79 static unsigned long mmu_node_last(struct mmu_rb_node *node)
81 return PAGE_ALIGN(node->addr + node->len) - 1;
84 int hfi1_mmu_rb_register(void *ops_arg,
85 struct mmu_rb_ops *ops,
86 struct workqueue_struct *wq,
87 struct mmu_rb_handler **handler)
89 struct mmu_rb_handler *h;
90 int ret;
92 h = kmalloc(sizeof(*h), GFP_KERNEL);
93 if (!h)
94 return -ENOMEM;
96 h->root = RB_ROOT_CACHED;
97 h->ops = ops;
98 h->ops_arg = ops_arg;
99 INIT_HLIST_NODE(&h->mn.hlist);
100 spin_lock_init(&h->lock);
101 h->mn.ops = &mn_opts;
102 INIT_WORK(&h->del_work, handle_remove);
103 INIT_LIST_HEAD(&h->del_list);
104 INIT_LIST_HEAD(&h->lru_list);
105 h->wq = wq;
107 ret = mmu_notifier_register(&h->mn, current->mm);
108 if (ret) {
109 kfree(h);
110 return ret;
113 *handler = h;
114 return 0;
117 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
119 struct mmu_rb_node *rbnode;
120 struct rb_node *node;
121 unsigned long flags;
122 struct list_head del_list;
124 /* Unregister first so we don't get any more notifications. */
125 mmu_notifier_unregister(&handler->mn, handler->mn.mm);
128 * Make sure the wq delete handler is finished running. It will not
129 * be triggered once the mmu notifiers are unregistered above.
131 flush_work(&handler->del_work);
133 INIT_LIST_HEAD(&del_list);
135 spin_lock_irqsave(&handler->lock, flags);
136 while ((node = rb_first_cached(&handler->root))) {
137 rbnode = rb_entry(node, struct mmu_rb_node, node);
138 rb_erase_cached(node, &handler->root);
139 /* move from LRU list to delete list */
140 list_move(&rbnode->list, &del_list);
142 spin_unlock_irqrestore(&handler->lock, flags);
144 do_remove(handler, &del_list);
146 kfree(handler);
149 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
150 struct mmu_rb_node *mnode)
152 struct mmu_rb_node *node;
153 unsigned long flags;
154 int ret = 0;
156 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
158 if (current->mm != handler->mn.mm)
159 return -EPERM;
161 spin_lock_irqsave(&handler->lock, flags);
162 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
163 if (node) {
164 ret = -EINVAL;
165 goto unlock;
167 __mmu_int_rb_insert(mnode, &handler->root);
168 list_add(&mnode->list, &handler->lru_list);
170 ret = handler->ops->insert(handler->ops_arg, mnode);
171 if (ret) {
172 __mmu_int_rb_remove(mnode, &handler->root);
173 list_del(&mnode->list); /* remove from LRU list */
175 mnode->handler = handler;
176 unlock:
177 spin_unlock_irqrestore(&handler->lock, flags);
178 return ret;
181 /* Caller must hold handler lock */
182 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
183 unsigned long addr,
184 unsigned long len)
186 struct mmu_rb_node *node = NULL;
188 trace_hfi1_mmu_rb_search(addr, len);
189 if (!handler->ops->filter) {
190 node = __mmu_int_rb_iter_first(&handler->root, addr,
191 (addr + len) - 1);
192 } else {
193 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
194 (addr + len) - 1);
195 node;
196 node = __mmu_int_rb_iter_next(node, addr,
197 (addr + len) - 1)) {
198 if (handler->ops->filter(node, addr, len))
199 return node;
202 return node;
205 bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
206 unsigned long addr, unsigned long len,
207 struct mmu_rb_node **rb_node)
209 struct mmu_rb_node *node;
210 unsigned long flags;
211 bool ret = false;
213 if (current->mm != handler->mn.mm)
214 return ret;
216 spin_lock_irqsave(&handler->lock, flags);
217 node = __mmu_rb_search(handler, addr, len);
218 if (node) {
219 if (node->addr == addr && node->len == len)
220 goto unlock;
221 __mmu_int_rb_remove(node, &handler->root);
222 list_del(&node->list); /* remove from LRU list */
223 ret = true;
225 unlock:
226 spin_unlock_irqrestore(&handler->lock, flags);
227 *rb_node = node;
228 return ret;
231 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
233 struct mmu_rb_node *rbnode, *ptr;
234 struct list_head del_list;
235 unsigned long flags;
236 bool stop = false;
238 if (current->mm != handler->mn.mm)
239 return;
241 INIT_LIST_HEAD(&del_list);
243 spin_lock_irqsave(&handler->lock, flags);
244 list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list,
245 list) {
246 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
247 &stop)) {
248 __mmu_int_rb_remove(rbnode, &handler->root);
249 /* move from LRU list to delete list */
250 list_move(&rbnode->list, &del_list);
252 if (stop)
253 break;
255 spin_unlock_irqrestore(&handler->lock, flags);
257 while (!list_empty(&del_list)) {
258 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
259 list_del(&rbnode->list);
260 handler->ops->remove(handler->ops_arg, rbnode);
265 * It is up to the caller to ensure that this function does not race with the
266 * mmu invalidate notifier which may be calling the users remove callback on
267 * 'node'.
269 void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
270 struct mmu_rb_node *node)
272 unsigned long flags;
274 if (current->mm != handler->mn.mm)
275 return;
277 /* Validity of handler and node pointers has been checked by caller. */
278 trace_hfi1_mmu_rb_remove(node->addr, node->len);
279 spin_lock_irqsave(&handler->lock, flags);
280 __mmu_int_rb_remove(node, &handler->root);
281 list_del(&node->list); /* remove from LRU list */
282 spin_unlock_irqrestore(&handler->lock, flags);
284 handler->ops->remove(handler->ops_arg, node);
287 static int mmu_notifier_range_start(struct mmu_notifier *mn,
288 const struct mmu_notifier_range *range)
290 struct mmu_rb_handler *handler =
291 container_of(mn, struct mmu_rb_handler, mn);
292 struct rb_root_cached *root = &handler->root;
293 struct mmu_rb_node *node, *ptr = NULL;
294 unsigned long flags;
295 bool added = false;
297 spin_lock_irqsave(&handler->lock, flags);
298 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
299 node; node = ptr) {
300 /* Guard against node removal. */
301 ptr = __mmu_int_rb_iter_next(node, range->start,
302 range->end - 1);
303 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
304 if (handler->ops->invalidate(handler->ops_arg, node)) {
305 __mmu_int_rb_remove(node, root);
306 /* move from LRU list to delete list */
307 list_move(&node->list, &handler->del_list);
308 added = true;
311 spin_unlock_irqrestore(&handler->lock, flags);
313 if (added)
314 queue_work(handler->wq, &handler->del_work);
316 return 0;
320 * Call the remove function for the given handler and the list. This
321 * is expected to be called with a delete list extracted from handler.
322 * The caller should not be holding the handler lock.
324 static void do_remove(struct mmu_rb_handler *handler,
325 struct list_head *del_list)
327 struct mmu_rb_node *node;
329 while (!list_empty(del_list)) {
330 node = list_first_entry(del_list, struct mmu_rb_node, list);
331 list_del(&node->list);
332 handler->ops->remove(handler->ops_arg, node);
337 * Work queue function to remove all nodes that have been queued up to
338 * be removed. The key feature is that mm->mmap_lock is not being held
339 * and the remove callback can sleep while taking it, if needed.
341 static void handle_remove(struct work_struct *work)
343 struct mmu_rb_handler *handler = container_of(work,
344 struct mmu_rb_handler,
345 del_work);
346 struct list_head del_list;
347 unsigned long flags;
349 /* remove anything that is queued to get removed */
350 spin_lock_irqsave(&handler->lock, flags);
351 list_replace_init(&handler->del_list, &del_list);
352 spin_unlock_irqrestore(&handler->lock, flags);
354 do_remove(handler, &del_list);