2 * IOMMU helpers in MMU context.
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched/signal.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <linux/migrate.h>
19 #include <linux/hugetlb.h>
20 #include <linux/swap.h>
21 #include <asm/mmu_context.h>
23 static DEFINE_MUTEX(mem_list_mutex
);
25 struct mm_iommu_table_group_mem_t
{
26 struct list_head next
;
30 u64 ua
; /* userspace address */
31 u64 entries
; /* number of entries in hpas[] */
32 u64
*hpas
; /* vmalloc'ed */
35 static long mm_iommu_adjust_locked_vm(struct mm_struct
*mm
,
36 unsigned long npages
, bool incr
)
38 long ret
= 0, locked
, lock_limit
;
43 down_write(&mm
->mmap_sem
);
46 locked
= mm
->locked_vm
+ npages
;
47 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
48 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
51 mm
->locked_vm
+= npages
;
53 if (WARN_ON_ONCE(npages
> mm
->locked_vm
))
54 npages
= mm
->locked_vm
;
55 mm
->locked_vm
-= npages
;
58 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59 current
? current
->pid
: 0,
62 mm
->locked_vm
<< PAGE_SHIFT
,
63 rlimit(RLIMIT_MEMLOCK
));
64 up_write(&mm
->mmap_sem
);
69 bool mm_iommu_preregistered(struct mm_struct
*mm
)
71 return !list_empty(&mm
->context
.iommu_group_mem_list
);
73 EXPORT_SYMBOL_GPL(mm_iommu_preregistered
);
76 * Taken from alloc_migrate_target with changes to remove CMA allocations
78 struct page
*new_iommu_non_cma_page(struct page
*page
, unsigned long private,
81 gfp_t gfp_mask
= GFP_USER
;
82 struct page
*new_page
;
84 if (PageCompound(page
))
87 if (PageHighMem(page
))
88 gfp_mask
|= __GFP_HIGHMEM
;
91 * We don't want the allocation to force an OOM if possibe
93 new_page
= alloc_page(gfp_mask
| __GFP_NORETRY
| __GFP_NOWARN
);
97 static int mm_iommu_move_page_from_cma(struct page
*page
)
100 LIST_HEAD(cma_migrate_pages
);
102 /* Ignore huge pages for now */
103 if (PageCompound(page
))
107 ret
= isolate_lru_page(page
);
111 list_add(&page
->lru
, &cma_migrate_pages
);
112 put_page(page
); /* Drop the gup reference */
114 ret
= migrate_pages(&cma_migrate_pages
, new_iommu_non_cma_page
,
115 NULL
, 0, MIGRATE_SYNC
, MR_CMA
);
117 if (!list_empty(&cma_migrate_pages
))
118 putback_movable_pages(&cma_migrate_pages
);
124 long mm_iommu_get(struct mm_struct
*mm
, unsigned long ua
, unsigned long entries
,
125 struct mm_iommu_table_group_mem_t
**pmem
)
127 struct mm_iommu_table_group_mem_t
*mem
;
128 long i
, j
, ret
= 0, locked_entries
= 0;
129 struct page
*page
= NULL
;
131 mutex_lock(&mem_list_mutex
);
133 list_for_each_entry_rcu(mem
, &mm
->context
.iommu_group_mem_list
,
135 if ((mem
->ua
== ua
) && (mem
->entries
== entries
)) {
142 if ((mem
->ua
< (ua
+ (entries
<< PAGE_SHIFT
))) &&
144 (mem
->entries
<< PAGE_SHIFT
)))) {
151 ret
= mm_iommu_adjust_locked_vm(mm
, entries
, true);
155 locked_entries
= entries
;
157 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
163 mem
->hpas
= vzalloc(entries
* sizeof(mem
->hpas
[0]));
170 for (i
= 0; i
< entries
; ++i
) {
171 if (1 != get_user_pages_fast(ua
+ (i
<< PAGE_SHIFT
),
172 1/* pages */, 1/* iswrite */, &page
)) {
174 for (j
= 0; j
< i
; ++j
)
175 put_page(pfn_to_page(mem
->hpas
[j
] >>
182 * If we get a page from the CMA zone, since we are going to
183 * be pinning these entries, we might as well move them out
184 * of the CMA zone if possible. NOTE: faulting in + migration
185 * can be expensive. Batching can be considered later
187 if (is_migrate_cma_page(page
)) {
188 if (mm_iommu_move_page_from_cma(page
))
190 if (1 != get_user_pages_fast(ua
+ (i
<< PAGE_SHIFT
),
191 1/* pages */, 1/* iswrite */,
194 for (j
= 0; j
< i
; ++j
)
195 put_page(pfn_to_page(mem
->hpas
[j
] >>
203 mem
->hpas
[i
] = page_to_pfn(page
) << PAGE_SHIFT
;
206 atomic64_set(&mem
->mapped
, 1);
209 mem
->entries
= entries
;
212 list_add_rcu(&mem
->next
, &mm
->context
.iommu_group_mem_list
);
215 if (locked_entries
&& ret
)
216 mm_iommu_adjust_locked_vm(mm
, locked_entries
, false);
218 mutex_unlock(&mem_list_mutex
);
222 EXPORT_SYMBOL_GPL(mm_iommu_get
);
224 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t
*mem
)
227 struct page
*page
= NULL
;
229 for (i
= 0; i
< mem
->entries
; ++i
) {
233 page
= pfn_to_page(mem
->hpas
[i
] >> PAGE_SHIFT
);
242 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t
*mem
)
250 static void mm_iommu_free(struct rcu_head
*head
)
252 struct mm_iommu_table_group_mem_t
*mem
= container_of(head
,
253 struct mm_iommu_table_group_mem_t
, rcu
);
255 mm_iommu_do_free(mem
);
258 static void mm_iommu_release(struct mm_iommu_table_group_mem_t
*mem
)
260 list_del_rcu(&mem
->next
);
261 call_rcu(&mem
->rcu
, mm_iommu_free
);
264 long mm_iommu_put(struct mm_struct
*mm
, struct mm_iommu_table_group_mem_t
*mem
)
268 mutex_lock(&mem_list_mutex
);
270 if (mem
->used
== 0) {
276 /* There are still users, exit */
280 /* Are there still mappings? */
281 if (atomic_cmpxchg(&mem
->mapped
, 1, 0) != 1) {
287 /* @mapped became 0 so now mappings are disabled, release the region */
288 mm_iommu_release(mem
);
290 mm_iommu_adjust_locked_vm(mm
, mem
->entries
, false);
293 mutex_unlock(&mem_list_mutex
);
297 EXPORT_SYMBOL_GPL(mm_iommu_put
);
299 struct mm_iommu_table_group_mem_t
*mm_iommu_lookup(struct mm_struct
*mm
,
300 unsigned long ua
, unsigned long size
)
302 struct mm_iommu_table_group_mem_t
*mem
, *ret
= NULL
;
304 list_for_each_entry_rcu(mem
, &mm
->context
.iommu_group_mem_list
, next
) {
305 if ((mem
->ua
<= ua
) &&
306 (ua
+ size
<= mem
->ua
+
307 (mem
->entries
<< PAGE_SHIFT
))) {
315 EXPORT_SYMBOL_GPL(mm_iommu_lookup
);
317 struct mm_iommu_table_group_mem_t
*mm_iommu_lookup_rm(struct mm_struct
*mm
,
318 unsigned long ua
, unsigned long size
)
320 struct mm_iommu_table_group_mem_t
*mem
, *ret
= NULL
;
322 list_for_each_entry_lockless(mem
, &mm
->context
.iommu_group_mem_list
,
324 if ((mem
->ua
<= ua
) &&
325 (ua
+ size
<= mem
->ua
+
326 (mem
->entries
<< PAGE_SHIFT
))) {
334 EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm
);
336 struct mm_iommu_table_group_mem_t
*mm_iommu_find(struct mm_struct
*mm
,
337 unsigned long ua
, unsigned long entries
)
339 struct mm_iommu_table_group_mem_t
*mem
, *ret
= NULL
;
341 list_for_each_entry_rcu(mem
, &mm
->context
.iommu_group_mem_list
, next
) {
342 if ((mem
->ua
== ua
) && (mem
->entries
== entries
)) {
350 EXPORT_SYMBOL_GPL(mm_iommu_find
);
352 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t
*mem
,
353 unsigned long ua
, unsigned long *hpa
)
355 const long entry
= (ua
- mem
->ua
) >> PAGE_SHIFT
;
356 u64
*va
= &mem
->hpas
[entry
];
358 if (entry
>= mem
->entries
)
361 *hpa
= *va
| (ua
& ~PAGE_MASK
);
365 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa
);
367 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t
*mem
,
368 unsigned long ua
, unsigned long *hpa
)
370 const long entry
= (ua
- mem
->ua
) >> PAGE_SHIFT
;
371 void *va
= &mem
->hpas
[entry
];
374 if (entry
>= mem
->entries
)
377 pa
= (void *) vmalloc_to_phys(va
);
381 *hpa
= *pa
| (ua
& ~PAGE_MASK
);
385 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm
);
387 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t
*mem
)
389 if (atomic64_inc_not_zero(&mem
->mapped
))
392 /* Last mm_iommu_put() has been called, no more mappings allowed() */
395 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc
);
397 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t
*mem
)
399 atomic64_add_unless(&mem
->mapped
, -1, 1);
401 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec
);
403 void mm_iommu_init(struct mm_struct
*mm
)
405 INIT_LIST_HEAD_RCU(&mm
->context
.iommu_group_mem_list
);