rtnetlink: check DO_SETLINK_NOTIFY correctly in do_setlink
[linux/fpc-iii.git] / arch / powerpc / mm / mmu_context_iommu.c
blobe0a2d8e806edb01a3b24ab063ec41e87defb0e1f
1 /*
2 * IOMMU helpers in MMU context.
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
23 static DEFINE_MUTEX(mem_list_mutex);
25 struct mm_iommu_table_group_mem_t {
26 struct list_head next;
27 struct rcu_head rcu;
28 unsigned long used;
29 atomic64_t mapped;
30 u64 ua; /* userspace address */
31 u64 entries; /* number of entries in hpas[] */
32 u64 *hpas; /* vmalloc'ed */
35 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
36 unsigned long npages, bool incr)
38 long ret = 0, locked, lock_limit;
40 if (!npages)
41 return 0;
43 down_write(&mm->mmap_sem);
45 if (incr) {
46 locked = mm->locked_vm + npages;
47 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
49 ret = -ENOMEM;
50 else
51 mm->locked_vm += npages;
52 } else {
53 if (WARN_ON_ONCE(npages > mm->locked_vm))
54 npages = mm->locked_vm;
55 mm->locked_vm -= npages;
58 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59 current ? current->pid : 0,
60 incr ? '+' : '-',
61 npages << PAGE_SHIFT,
62 mm->locked_vm << PAGE_SHIFT,
63 rlimit(RLIMIT_MEMLOCK));
64 up_write(&mm->mmap_sem);
66 return ret;
69 bool mm_iommu_preregistered(struct mm_struct *mm)
71 return !list_empty(&mm->context.iommu_group_mem_list);
73 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
76 * Taken from alloc_migrate_target with changes to remove CMA allocations
78 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
79 int **resultp)
81 gfp_t gfp_mask = GFP_USER;
82 struct page *new_page;
84 if (PageCompound(page))
85 return NULL;
87 if (PageHighMem(page))
88 gfp_mask |= __GFP_HIGHMEM;
91 * We don't want the allocation to force an OOM if possibe
93 new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
94 return new_page;
97 static int mm_iommu_move_page_from_cma(struct page *page)
99 int ret = 0;
100 LIST_HEAD(cma_migrate_pages);
102 /* Ignore huge pages for now */
103 if (PageCompound(page))
104 return -EBUSY;
106 lru_add_drain();
107 ret = isolate_lru_page(page);
108 if (ret)
109 return ret;
111 list_add(&page->lru, &cma_migrate_pages);
112 put_page(page); /* Drop the gup reference */
114 ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
115 NULL, 0, MIGRATE_SYNC, MR_CMA);
116 if (ret) {
117 if (!list_empty(&cma_migrate_pages))
118 putback_movable_pages(&cma_migrate_pages);
121 return 0;
124 long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
125 struct mm_iommu_table_group_mem_t **pmem)
127 struct mm_iommu_table_group_mem_t *mem;
128 long i, j, ret = 0, locked_entries = 0;
129 struct page *page = NULL;
131 mutex_lock(&mem_list_mutex);
133 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
134 next) {
135 if ((mem->ua == ua) && (mem->entries == entries)) {
136 ++mem->used;
137 *pmem = mem;
138 goto unlock_exit;
141 /* Overlap? */
142 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
143 (ua < (mem->ua +
144 (mem->entries << PAGE_SHIFT)))) {
145 ret = -EINVAL;
146 goto unlock_exit;
151 ret = mm_iommu_adjust_locked_vm(mm, entries, true);
152 if (ret)
153 goto unlock_exit;
155 locked_entries = entries;
157 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
158 if (!mem) {
159 ret = -ENOMEM;
160 goto unlock_exit;
163 mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
164 if (!mem->hpas) {
165 kfree(mem);
166 ret = -ENOMEM;
167 goto unlock_exit;
170 for (i = 0; i < entries; ++i) {
171 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
172 1/* pages */, 1/* iswrite */, &page)) {
173 ret = -EFAULT;
174 for (j = 0; j < i; ++j)
175 put_page(pfn_to_page(mem->hpas[j] >>
176 PAGE_SHIFT));
177 vfree(mem->hpas);
178 kfree(mem);
179 goto unlock_exit;
182 * If we get a page from the CMA zone, since we are going to
183 * be pinning these entries, we might as well move them out
184 * of the CMA zone if possible. NOTE: faulting in + migration
185 * can be expensive. Batching can be considered later
187 if (is_migrate_cma_page(page)) {
188 if (mm_iommu_move_page_from_cma(page))
189 goto populate;
190 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
191 1/* pages */, 1/* iswrite */,
192 &page)) {
193 ret = -EFAULT;
194 for (j = 0; j < i; ++j)
195 put_page(pfn_to_page(mem->hpas[j] >>
196 PAGE_SHIFT));
197 vfree(mem->hpas);
198 kfree(mem);
199 goto unlock_exit;
202 populate:
203 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
206 atomic64_set(&mem->mapped, 1);
207 mem->used = 1;
208 mem->ua = ua;
209 mem->entries = entries;
210 *pmem = mem;
212 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
214 unlock_exit:
215 if (locked_entries && ret)
216 mm_iommu_adjust_locked_vm(mm, locked_entries, false);
218 mutex_unlock(&mem_list_mutex);
220 return ret;
222 EXPORT_SYMBOL_GPL(mm_iommu_get);
224 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
226 long i;
227 struct page *page = NULL;
229 for (i = 0; i < mem->entries; ++i) {
230 if (!mem->hpas[i])
231 continue;
233 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
234 if (!page)
235 continue;
237 put_page(page);
238 mem->hpas[i] = 0;
242 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
245 mm_iommu_unpin(mem);
246 vfree(mem->hpas);
247 kfree(mem);
250 static void mm_iommu_free(struct rcu_head *head)
252 struct mm_iommu_table_group_mem_t *mem = container_of(head,
253 struct mm_iommu_table_group_mem_t, rcu);
255 mm_iommu_do_free(mem);
258 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
260 list_del_rcu(&mem->next);
261 call_rcu(&mem->rcu, mm_iommu_free);
264 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
266 long ret = 0;
268 mutex_lock(&mem_list_mutex);
270 if (mem->used == 0) {
271 ret = -ENOENT;
272 goto unlock_exit;
275 --mem->used;
276 /* There are still users, exit */
277 if (mem->used)
278 goto unlock_exit;
280 /* Are there still mappings? */
281 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
282 ++mem->used;
283 ret = -EBUSY;
284 goto unlock_exit;
287 /* @mapped became 0 so now mappings are disabled, release the region */
288 mm_iommu_release(mem);
290 mm_iommu_adjust_locked_vm(mm, mem->entries, false);
292 unlock_exit:
293 mutex_unlock(&mem_list_mutex);
295 return ret;
297 EXPORT_SYMBOL_GPL(mm_iommu_put);
299 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
300 unsigned long ua, unsigned long size)
302 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
304 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
305 if ((mem->ua <= ua) &&
306 (ua + size <= mem->ua +
307 (mem->entries << PAGE_SHIFT))) {
308 ret = mem;
309 break;
313 return ret;
315 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
317 struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
318 unsigned long ua, unsigned long size)
320 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
322 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
323 next) {
324 if ((mem->ua <= ua) &&
325 (ua + size <= mem->ua +
326 (mem->entries << PAGE_SHIFT))) {
327 ret = mem;
328 break;
332 return ret;
334 EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
336 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
337 unsigned long ua, unsigned long entries)
339 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
341 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
342 if ((mem->ua == ua) && (mem->entries == entries)) {
343 ret = mem;
344 break;
348 return ret;
350 EXPORT_SYMBOL_GPL(mm_iommu_find);
352 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
353 unsigned long ua, unsigned long *hpa)
355 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
356 u64 *va = &mem->hpas[entry];
358 if (entry >= mem->entries)
359 return -EFAULT;
361 *hpa = *va | (ua & ~PAGE_MASK);
363 return 0;
365 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
367 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
368 unsigned long ua, unsigned long *hpa)
370 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
371 void *va = &mem->hpas[entry];
372 unsigned long *pa;
374 if (entry >= mem->entries)
375 return -EFAULT;
377 pa = (void *) vmalloc_to_phys(va);
378 if (!pa)
379 return -EFAULT;
381 *hpa = *pa | (ua & ~PAGE_MASK);
383 return 0;
385 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
387 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
389 if (atomic64_inc_not_zero(&mem->mapped))
390 return 0;
392 /* Last mm_iommu_put() has been called, no more mappings allowed() */
393 return -ENXIO;
395 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
397 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
399 atomic64_add_unless(&mem->mapped, -1, 1);
401 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
403 void mm_iommu_init(struct mm_struct *mm)
405 INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);