2 * IOMMU helpers in MMU context.
4 * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/rculist.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mutex.h>
18 #include <asm/mmu_context.h>
20 static DEFINE_MUTEX(mem_list_mutex
);
22 struct mm_iommu_table_group_mem_t
{
23 struct list_head next
;
27 u64 ua
; /* userspace address */
28 u64 entries
; /* number of entries in hpas[] */
29 u64
*hpas
; /* vmalloc'ed */
32 static long mm_iommu_adjust_locked_vm(struct mm_struct
*mm
,
33 unsigned long npages
, bool incr
)
35 long ret
= 0, locked
, lock_limit
;
40 down_write(&mm
->mmap_sem
);
43 locked
= mm
->locked_vm
+ npages
;
44 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
45 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
48 mm
->locked_vm
+= npages
;
50 if (WARN_ON_ONCE(npages
> mm
->locked_vm
))
51 npages
= mm
->locked_vm
;
52 mm
->locked_vm
-= npages
;
55 pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
59 mm
->locked_vm
<< PAGE_SHIFT
,
60 rlimit(RLIMIT_MEMLOCK
));
61 up_write(&mm
->mmap_sem
);
66 bool mm_iommu_preregistered(void)
68 if (!current
|| !current
->mm
)
71 return !list_empty(¤t
->mm
->context
.iommu_group_mem_list
);
73 EXPORT_SYMBOL_GPL(mm_iommu_preregistered
);
75 long mm_iommu_get(unsigned long ua
, unsigned long entries
,
76 struct mm_iommu_table_group_mem_t
**pmem
)
78 struct mm_iommu_table_group_mem_t
*mem
;
79 long i
, j
, ret
= 0, locked_entries
= 0;
80 struct page
*page
= NULL
;
82 if (!current
|| !current
->mm
)
83 return -ESRCH
; /* process exited */
85 mutex_lock(&mem_list_mutex
);
87 list_for_each_entry_rcu(mem
, ¤t
->mm
->context
.iommu_group_mem_list
,
89 if ((mem
->ua
== ua
) && (mem
->entries
== entries
)) {
96 if ((mem
->ua
< (ua
+ (entries
<< PAGE_SHIFT
))) &&
98 (mem
->entries
<< PAGE_SHIFT
)))) {
105 ret
= mm_iommu_adjust_locked_vm(current
->mm
, entries
, true);
109 locked_entries
= entries
;
111 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
117 mem
->hpas
= vzalloc(entries
* sizeof(mem
->hpas
[0]));
124 for (i
= 0; i
< entries
; ++i
) {
125 if (1 != get_user_pages_fast(ua
+ (i
<< PAGE_SHIFT
),
126 1/* pages */, 1/* iswrite */, &page
)) {
127 for (j
= 0; j
< i
; ++j
)
128 put_page(pfn_to_page(
129 mem
->hpas
[j
] >> PAGE_SHIFT
));
136 mem
->hpas
[i
] = page_to_pfn(page
) << PAGE_SHIFT
;
139 atomic64_set(&mem
->mapped
, 1);
142 mem
->entries
= entries
;
145 list_add_rcu(&mem
->next
, ¤t
->mm
->context
.iommu_group_mem_list
);
148 if (locked_entries
&& ret
)
149 mm_iommu_adjust_locked_vm(current
->mm
, locked_entries
, false);
151 mutex_unlock(&mem_list_mutex
);
155 EXPORT_SYMBOL_GPL(mm_iommu_get
);
157 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t
*mem
)
160 struct page
*page
= NULL
;
162 for (i
= 0; i
< mem
->entries
; ++i
) {
166 page
= pfn_to_page(mem
->hpas
[i
] >> PAGE_SHIFT
);
175 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t
*mem
)
183 static void mm_iommu_free(struct rcu_head
*head
)
185 struct mm_iommu_table_group_mem_t
*mem
= container_of(head
,
186 struct mm_iommu_table_group_mem_t
, rcu
);
188 mm_iommu_do_free(mem
);
191 static void mm_iommu_release(struct mm_iommu_table_group_mem_t
*mem
)
193 list_del_rcu(&mem
->next
);
194 mm_iommu_adjust_locked_vm(current
->mm
, mem
->entries
, false);
195 call_rcu(&mem
->rcu
, mm_iommu_free
);
198 long mm_iommu_put(struct mm_iommu_table_group_mem_t
*mem
)
202 if (!current
|| !current
->mm
)
203 return -ESRCH
; /* process exited */
205 mutex_lock(&mem_list_mutex
);
207 if (mem
->used
== 0) {
213 /* There are still users, exit */
217 /* Are there still mappings? */
218 if (atomic_cmpxchg(&mem
->mapped
, 1, 0) != 1) {
224 /* @mapped became 0 so now mappings are disabled, release the region */
225 mm_iommu_release(mem
);
228 mutex_unlock(&mem_list_mutex
);
232 EXPORT_SYMBOL_GPL(mm_iommu_put
);
234 struct mm_iommu_table_group_mem_t
*mm_iommu_lookup(unsigned long ua
,
237 struct mm_iommu_table_group_mem_t
*mem
, *ret
= NULL
;
239 list_for_each_entry_rcu(mem
,
240 ¤t
->mm
->context
.iommu_group_mem_list
,
242 if ((mem
->ua
<= ua
) &&
243 (ua
+ size
<= mem
->ua
+
244 (mem
->entries
<< PAGE_SHIFT
))) {
252 EXPORT_SYMBOL_GPL(mm_iommu_lookup
);
254 struct mm_iommu_table_group_mem_t
*mm_iommu_find(unsigned long ua
,
255 unsigned long entries
)
257 struct mm_iommu_table_group_mem_t
*mem
, *ret
= NULL
;
259 list_for_each_entry_rcu(mem
,
260 ¤t
->mm
->context
.iommu_group_mem_list
,
262 if ((mem
->ua
== ua
) && (mem
->entries
== entries
)) {
270 EXPORT_SYMBOL_GPL(mm_iommu_find
);
272 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t
*mem
,
273 unsigned long ua
, unsigned long *hpa
)
275 const long entry
= (ua
- mem
->ua
) >> PAGE_SHIFT
;
276 u64
*va
= &mem
->hpas
[entry
];
278 if (entry
>= mem
->entries
)
281 *hpa
= *va
| (ua
& ~PAGE_MASK
);
285 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa
);
287 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t
*mem
)
289 if (atomic64_inc_not_zero(&mem
->mapped
))
292 /* Last mm_iommu_put() has been called, no more mappings allowed() */
295 EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc
);
297 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t
*mem
)
299 atomic64_add_unless(&mem
->mapped
, -1, 1);
301 EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec
);
303 void mm_iommu_init(mm_context_t
*ctx
)
305 INIT_LIST_HEAD_RCU(&ctx
->iommu_group_mem_list
);
308 void mm_iommu_cleanup(mm_context_t
*ctx
)
310 struct mm_iommu_table_group_mem_t
*mem
, *tmp
;
312 list_for_each_entry_safe(mem
, tmp
, &ctx
->iommu_group_mem_list
, next
) {
313 list_del_rcu(&mem
->next
);
314 mm_iommu_do_free(mem
);