pinctrl: cherryview: Prevent possible interrupt storm on resume
[linux/fpc-iii.git] / arch / powerpc / mm / mmu_context_iommu.c
blobe0f1c33601ddb1ab6bc3e780b1ed43f9f80a1e58
1 /*
2 * IOMMU helpers in MMU context.
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
23 static DEFINE_MUTEX(mem_list_mutex);
25 struct mm_iommu_table_group_mem_t {
26 struct list_head next;
27 struct rcu_head rcu;
28 unsigned long used;
29 atomic64_t mapped;
30 u64 ua; /* userspace address */
31 u64 entries; /* number of entries in hpas[] */
32 u64 *hpas; /* vmalloc'ed */
35 static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
36 unsigned long npages, bool incr)
38 long ret = 0, locked, lock_limit;
40 if (!npages)
41 return 0;
43 down_write(&mm->mmap_sem);
45 if (incr) {
46 locked = mm->locked_vm + npages;
47 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
49 ret = -ENOMEM;
50 else
51 mm->locked_vm += npages;
52 } else {
53 if (WARN_ON_ONCE(npages > mm->locked_vm))
54 npages = mm->locked_vm;
55 mm->locked_vm -= npages;
58 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59 current->pid,
60 incr ? '+' : '-',
61 npages << PAGE_SHIFT,
62 mm->locked_vm << PAGE_SHIFT,
63 rlimit(RLIMIT_MEMLOCK));
64 up_write(&mm->mmap_sem);
66 return ret;
69 bool mm_iommu_preregistered(void)
71 if (!current || !current->mm)
72 return false;
74 return !list_empty(&current->mm->context.iommu_group_mem_list);
76 EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
79 * Taken from alloc_migrate_target with changes to remove CMA allocations
81 struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
82 int **resultp)
84 gfp_t gfp_mask = GFP_USER;
85 struct page *new_page;
87 if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
88 return NULL;
90 if (PageHighMem(page))
91 gfp_mask |= __GFP_HIGHMEM;
94 * We don't want the allocation to force an OOM if possibe
96 new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
97 return new_page;
100 static int mm_iommu_move_page_from_cma(struct page *page)
102 int ret = 0;
103 LIST_HEAD(cma_migrate_pages);
105 /* Ignore huge pages for now */
106 if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
107 return -EBUSY;
109 lru_add_drain();
110 ret = isolate_lru_page(page);
111 if (ret)
112 return ret;
114 list_add(&page->lru, &cma_migrate_pages);
115 put_page(page); /* Drop the gup reference */
117 ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
118 NULL, 0, MIGRATE_SYNC, MR_CMA);
119 if (ret) {
120 if (!list_empty(&cma_migrate_pages))
121 putback_movable_pages(&cma_migrate_pages);
124 return 0;
127 long mm_iommu_get(unsigned long ua, unsigned long entries,
128 struct mm_iommu_table_group_mem_t **pmem)
130 struct mm_iommu_table_group_mem_t *mem;
131 long i, j, ret = 0, locked_entries = 0;
132 struct page *page = NULL;
134 if (!current || !current->mm)
135 return -ESRCH; /* process exited */
137 mutex_lock(&mem_list_mutex);
139 list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
140 next) {
141 if ((mem->ua == ua) && (mem->entries == entries)) {
142 ++mem->used;
143 *pmem = mem;
144 goto unlock_exit;
147 /* Overlap? */
148 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
149 (ua < (mem->ua +
150 (mem->entries << PAGE_SHIFT)))) {
151 ret = -EINVAL;
152 goto unlock_exit;
157 ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
158 if (ret)
159 goto unlock_exit;
161 locked_entries = entries;
163 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
164 if (!mem) {
165 ret = -ENOMEM;
166 goto unlock_exit;
169 mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
170 if (!mem->hpas) {
171 kfree(mem);
172 ret = -ENOMEM;
173 goto unlock_exit;
176 for (i = 0; i < entries; ++i) {
177 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
178 1/* pages */, 1/* iswrite */, &page)) {
179 ret = -EFAULT;
180 for (j = 0; j < i; ++j)
181 put_page(pfn_to_page(mem->hpas[j] >>
182 PAGE_SHIFT));
183 vfree(mem->hpas);
184 kfree(mem);
185 goto unlock_exit;
188 * If we get a page from the CMA zone, since we are going to
189 * be pinning these entries, we might as well move them out
190 * of the CMA zone if possible. NOTE: faulting in + migration
191 * can be expensive. Batching can be considered later
193 if (get_pageblock_migratetype(page) == MIGRATE_CMA) {
194 if (mm_iommu_move_page_from_cma(page))
195 goto populate;
196 if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
197 1/* pages */, 1/* iswrite */,
198 &page)) {
199 ret = -EFAULT;
200 for (j = 0; j < i; ++j)
201 put_page(pfn_to_page(mem->hpas[j] >>
202 PAGE_SHIFT));
203 vfree(mem->hpas);
204 kfree(mem);
205 goto unlock_exit;
208 populate:
209 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
212 atomic64_set(&mem->mapped, 1);
213 mem->used = 1;
214 mem->ua = ua;
215 mem->entries = entries;
216 *pmem = mem;
218 list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
220 unlock_exit:
221 if (locked_entries && ret)
222 mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
224 mutex_unlock(&mem_list_mutex);
226 return ret;
228 EXPORT_SYMBOL_GPL(mm_iommu_get);
230 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
232 long i;
233 struct page *page = NULL;
235 for (i = 0; i < mem->entries; ++i) {
236 if (!mem->hpas[i])
237 continue;
239 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
240 if (!page)
241 continue;
243 put_page(page);
244 mem->hpas[i] = 0;
248 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
251 mm_iommu_unpin(mem);
252 vfree(mem->hpas);
253 kfree(mem);
256 static void mm_iommu_free(struct rcu_head *head)
258 struct mm_iommu_table_group_mem_t *mem = container_of(head,
259 struct mm_iommu_table_group_mem_t, rcu);
261 mm_iommu_do_free(mem);
264 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
266 list_del_rcu(&mem->next);
267 mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
268 call_rcu(&mem->rcu, mm_iommu_free);
271 long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
273 long ret = 0;
275 if (!current || !current->mm)
276 return -ESRCH; /* process exited */
278 mutex_lock(&mem_list_mutex);
280 if (mem->used == 0) {
281 ret = -ENOENT;
282 goto unlock_exit;
285 --mem->used;
286 /* There are still users, exit */
287 if (mem->used)
288 goto unlock_exit;
290 /* Are there still mappings? */
291 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
292 ++mem->used;
293 ret = -EBUSY;
294 goto unlock_exit;
297 /* @mapped became 0 so now mappings are disabled, release the region */
298 mm_iommu_release(mem);
300 unlock_exit:
301 mutex_unlock(&mem_list_mutex);
303 return ret;
305 EXPORT_SYMBOL_GPL(mm_iommu_put);
307 struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
308 unsigned long size)
310 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
312 list_for_each_entry_rcu(mem,
313 &current->mm->context.iommu_group_mem_list,
314 next) {
315 if ((mem->ua <= ua) &&
316 (ua + size <= mem->ua +
317 (mem->entries << PAGE_SHIFT))) {
318 ret = mem;
319 break;
323 return ret;
325 EXPORT_SYMBOL_GPL(mm_iommu_lookup);
327 struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
328 unsigned long entries)
330 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
332 list_for_each_entry_rcu(mem,
333 &current->mm->context.iommu_group_mem_list,
334 next) {
335 if ((mem->ua == ua) && (mem->entries == entries)) {
336 ret = mem;
337 break;
341 return ret;
343 EXPORT_SYMBOL_GPL(mm_iommu_find);
345 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
346 unsigned long ua, unsigned long *hpa)
348 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
349 u64 *va = &mem->hpas[entry];
351 if (entry >= mem->entries)
352 return -EFAULT;
354 *hpa = *va | (ua & ~PAGE_MASK);
356 return 0;
358 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
360 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
362 if (atomic64_inc_not_zero(&mem->mapped))
363 return 0;
365 /* Last mm_iommu_put() has been called, no more mappings allowed() */
366 return -ENXIO;
368 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
370 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
372 atomic64_add_unless(&mem->mapped, -1, 1);
374 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
376 void mm_iommu_init(mm_context_t *ctx)
378 INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
381 void mm_iommu_cleanup(mm_context_t *ctx)
383 struct mm_iommu_table_group_mem_t *mem, *tmp;
385 list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
386 list_del_rcu(&mem->next);
387 mm_iommu_do_free(mem);