2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <rdma/ib_verbs.h>
45 #include "usnic_log.h"
46 #include "usnic_uiom.h"
47 #include "usnic_uiom_interval_tree.h"
49 #define USNIC_UIOM_PAGE_CHUNK \
50 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
51 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
52 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
54 static int usnic_uiom_dma_fault(struct iommu_domain
*domain
,
56 unsigned long iova
, int flags
,
59 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
65 static void usnic_uiom_put_pages(struct list_head
*chunk_list
, int dirty
)
67 struct usnic_uiom_chunk
*chunk
, *tmp
;
69 struct scatterlist
*sg
;
73 list_for_each_entry_safe(chunk
, tmp
, chunk_list
, list
) {
74 for_each_sg(chunk
->page_list
, sg
, chunk
->nents
, i
) {
77 unpin_user_pages_dirty_lock(&page
, 1, dirty
);
78 usnic_dbg("pa: %pa\n", &pa
);
84 static int usnic_uiom_get_pages(unsigned long addr
, size_t size
, int writable
,
85 int dmasync
, struct usnic_uiom_reg
*uiomr
)
87 struct list_head
*chunk_list
= &uiomr
->chunk_list
;
88 unsigned int gup_flags
= FOLL_LONGTERM
;
89 struct page
**page_list
;
90 struct scatterlist
*sg
;
91 struct usnic_uiom_chunk
*chunk
;
93 unsigned long lock_limit
;
94 unsigned long cur_base
;
100 struct mm_struct
*mm
;
103 * If the combination of the addr and size requested for this memory
104 * region causes an integer overflow, return error.
106 if (((addr
+ size
) < addr
) || PAGE_ALIGN(addr
+ size
) < (addr
+ size
))
115 INIT_LIST_HEAD(chunk_list
);
117 page_list
= (struct page
**) __get_free_page(GFP_KERNEL
);
121 npages
= PAGE_ALIGN(size
+ (addr
& ~PAGE_MASK
)) >> PAGE_SHIFT
;
123 uiomr
->owning_mm
= mm
= current
->mm
;
126 locked
= atomic64_add_return(npages
, ¤t
->mm
->pinned_vm
);
127 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
129 if ((locked
> lock_limit
) && !capable(CAP_IPC_LOCK
)) {
135 gup_flags
|= FOLL_WRITE
;
136 cur_base
= addr
& PAGE_MASK
;
140 ret
= pin_user_pages(cur_base
,
141 min_t(unsigned long, npages
,
142 PAGE_SIZE
/ sizeof(struct page
*)),
143 gup_flags
, page_list
);
152 chunk
= kmalloc(struct_size(chunk
, page_list
,
153 min_t(int, ret
, USNIC_UIOM_PAGE_CHUNK
)),
160 chunk
->nents
= min_t(int, ret
, USNIC_UIOM_PAGE_CHUNK
);
161 sg_init_table(chunk
->page_list
, chunk
->nents
);
162 for_each_sg(chunk
->page_list
, sg
, chunk
->nents
, i
) {
163 sg_set_page(sg
, page_list
[i
+ off
],
166 usnic_dbg("va: 0x%lx pa: %pa\n",
167 cur_base
+ i
*PAGE_SIZE
, &pa
);
169 cur_base
+= chunk
->nents
* PAGE_SIZE
;
172 list_add_tail(&chunk
->list
, chunk_list
);
180 usnic_uiom_put_pages(chunk_list
, 0);
181 atomic64_sub(npages
, ¤t
->mm
->pinned_vm
);
183 mmgrab(uiomr
->owning_mm
);
185 mmap_read_unlock(mm
);
186 free_page((unsigned long) page_list
);
190 static void usnic_uiom_unmap_sorted_intervals(struct list_head
*intervals
,
191 struct usnic_uiom_pd
*pd
)
193 struct usnic_uiom_interval_node
*interval
, *tmp
;
194 long unsigned va
, size
;
196 list_for_each_entry_safe(interval
, tmp
, intervals
, link
) {
197 va
= interval
->start
<< PAGE_SHIFT
;
198 size
= ((interval
->last
- interval
->start
) + 1) << PAGE_SHIFT
;
200 /* Workaround for RH 970401 */
201 usnic_dbg("va 0x%lx size 0x%lx", va
, PAGE_SIZE
);
202 iommu_unmap(pd
->domain
, va
, PAGE_SIZE
);
209 static void __usnic_uiom_reg_release(struct usnic_uiom_pd
*pd
,
210 struct usnic_uiom_reg
*uiomr
,
214 unsigned long vpn_start
, vpn_last
;
215 struct usnic_uiom_interval_node
*interval
, *tmp
;
217 LIST_HEAD(rm_intervals
);
219 npages
= PAGE_ALIGN(uiomr
->length
+ uiomr
->offset
) >> PAGE_SHIFT
;
220 vpn_start
= (uiomr
->va
& PAGE_MASK
) >> PAGE_SHIFT
;
221 vpn_last
= vpn_start
+ npages
- 1;
223 spin_lock(&pd
->lock
);
224 usnic_uiom_remove_interval(&pd
->root
, vpn_start
,
225 vpn_last
, &rm_intervals
);
226 usnic_uiom_unmap_sorted_intervals(&rm_intervals
, pd
);
228 list_for_each_entry_safe(interval
, tmp
, &rm_intervals
, link
) {
229 if (interval
->flags
& IOMMU_WRITE
)
231 list_del(&interval
->link
);
235 usnic_uiom_put_pages(&uiomr
->chunk_list
, dirty
& writable
);
236 spin_unlock(&pd
->lock
);
239 static int usnic_uiom_map_sorted_intervals(struct list_head
*intervals
,
240 struct usnic_uiom_reg
*uiomr
)
244 struct usnic_uiom_chunk
*chunk
;
245 struct usnic_uiom_interval_node
*interval_node
;
247 dma_addr_t pa_start
= 0;
248 dma_addr_t pa_end
= 0;
249 long int va_start
= -EINVAL
;
250 struct usnic_uiom_pd
*pd
= uiomr
->pd
;
251 long int va
= uiomr
->va
& PAGE_MASK
;
252 int flags
= IOMMU_READ
| IOMMU_CACHE
;
254 flags
|= (uiomr
->writable
) ? IOMMU_WRITE
: 0;
255 chunk
= list_first_entry(&uiomr
->chunk_list
, struct usnic_uiom_chunk
,
257 list_for_each_entry(interval_node
, intervals
, link
) {
259 for (i
= 0; i
< chunk
->nents
; i
++, va
+= PAGE_SIZE
) {
260 pa
= sg_phys(&chunk
->page_list
[i
]);
261 if ((va
>> PAGE_SHIFT
) < interval_node
->start
)
264 if ((va
>> PAGE_SHIFT
) == interval_node
->start
) {
265 /* First page of the interval */
271 WARN_ON(va_start
== -EINVAL
);
273 if ((pa_end
+ PAGE_SIZE
!= pa
) &&
275 /* PAs are not contiguous */
276 size
= pa_end
- pa_start
+ PAGE_SIZE
;
277 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
278 va_start
, &pa_start
, size
, flags
);
279 err
= iommu_map(pd
->domain
, va_start
, pa_start
,
280 size
, flags
, GFP_ATOMIC
);
282 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
283 va_start
, &pa_start
, size
, err
);
291 if ((va
>> PAGE_SHIFT
) == interval_node
->last
) {
292 /* Last page of the interval */
293 size
= pa
- pa_start
+ PAGE_SIZE
;
294 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
295 va_start
, &pa_start
, size
, flags
);
296 err
= iommu_map(pd
->domain
, va_start
, pa_start
,
297 size
, flags
, GFP_ATOMIC
);
299 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
300 va_start
, &pa_start
, size
, err
);
310 if (i
== chunk
->nents
) {
312 * Hit last entry of the chunk,
313 * hence advance to next chunk
315 chunk
= list_first_entry(&chunk
->list
,
316 struct usnic_uiom_chunk
,
325 usnic_uiom_unmap_sorted_intervals(intervals
, pd
);
329 struct usnic_uiom_reg
*usnic_uiom_reg_get(struct usnic_uiom_pd
*pd
,
330 unsigned long addr
, size_t size
,
331 int writable
, int dmasync
)
333 struct usnic_uiom_reg
*uiomr
;
334 unsigned long va_base
, vpn_start
, vpn_last
;
335 unsigned long npages
;
337 LIST_HEAD(sorted_diff_intervals
);
340 * Intel IOMMU map throws an error if a translation entry is
341 * changed from read to write. This module may not unmap
342 * and then remap the entry after fixing the permission
343 * b/c this open up a small windows where hw DMA may page fault
344 * Hence, make all entries to be writable.
348 va_base
= addr
& PAGE_MASK
;
349 offset
= addr
& ~PAGE_MASK
;
350 npages
= PAGE_ALIGN(size
+ offset
) >> PAGE_SHIFT
;
351 vpn_start
= (addr
& PAGE_MASK
) >> PAGE_SHIFT
;
352 vpn_last
= vpn_start
+ npages
- 1;
354 uiomr
= kmalloc(sizeof(*uiomr
), GFP_KERNEL
);
356 return ERR_PTR(-ENOMEM
);
359 uiomr
->offset
= offset
;
360 uiomr
->length
= size
;
361 uiomr
->writable
= writable
;
364 err
= usnic_uiom_get_pages(addr
, size
, writable
, dmasync
,
367 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
368 vpn_start
, vpn_last
, err
);
372 spin_lock(&pd
->lock
);
373 err
= usnic_uiom_get_intervals_diff(vpn_start
, vpn_last
,
374 (writable
) ? IOMMU_WRITE
: 0,
377 &sorted_diff_intervals
);
379 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
380 vpn_start
, vpn_last
, err
);
384 err
= usnic_uiom_map_sorted_intervals(&sorted_diff_intervals
, uiomr
);
386 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
387 vpn_start
, vpn_last
, err
);
388 goto out_put_intervals
;
392 err
= usnic_uiom_insert_interval(&pd
->root
, vpn_start
, vpn_last
,
393 (writable
) ? IOMMU_WRITE
: 0);
395 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
396 vpn_start
, vpn_last
, err
);
397 goto out_unmap_intervals
;
400 usnic_uiom_put_interval_set(&sorted_diff_intervals
);
401 spin_unlock(&pd
->lock
);
406 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals
, pd
);
408 usnic_uiom_put_interval_set(&sorted_diff_intervals
);
410 usnic_uiom_put_pages(&uiomr
->chunk_list
, 0);
411 spin_unlock(&pd
->lock
);
412 mmdrop(uiomr
->owning_mm
);
418 static void __usnic_uiom_release_tail(struct usnic_uiom_reg
*uiomr
)
420 mmdrop(uiomr
->owning_mm
);
424 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg
*uiomr
)
426 return PAGE_ALIGN(uiomr
->length
+ uiomr
->offset
) >> PAGE_SHIFT
;
429 void usnic_uiom_reg_release(struct usnic_uiom_reg
*uiomr
)
431 __usnic_uiom_reg_release(uiomr
->pd
, uiomr
, 1);
433 atomic64_sub(usnic_uiom_num_pages(uiomr
), &uiomr
->owning_mm
->pinned_vm
);
434 __usnic_uiom_release_tail(uiomr
);
437 struct usnic_uiom_pd
*usnic_uiom_alloc_pd(struct device
*dev
)
439 struct usnic_uiom_pd
*pd
;
442 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
444 return ERR_PTR(-ENOMEM
);
446 pd
->domain
= domain
= iommu_paging_domain_alloc(dev
);
447 if (IS_ERR(domain
)) {
448 usnic_err("Failed to allocate IOMMU domain");
450 return ERR_CAST(domain
);
453 iommu_set_fault_handler(pd
->domain
, usnic_uiom_dma_fault
, NULL
);
455 spin_lock_init(&pd
->lock
);
456 INIT_LIST_HEAD(&pd
->devs
);
461 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd
*pd
)
463 iommu_domain_free(pd
->domain
);
467 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd
*pd
, struct device
*dev
)
469 struct usnic_uiom_dev
*uiom_dev
;
472 uiom_dev
= kzalloc(sizeof(*uiom_dev
), GFP_ATOMIC
);
477 err
= iommu_attach_device(pd
->domain
, dev
);
481 if (!device_iommu_capable(dev
, IOMMU_CAP_CACHE_COHERENCY
)) {
482 usnic_err("IOMMU of %s does not support cache coherency\n",
485 goto out_detach_device
;
488 spin_lock(&pd
->lock
);
489 list_add_tail(&uiom_dev
->link
, &pd
->devs
);
491 spin_unlock(&pd
->lock
);
496 iommu_detach_device(pd
->domain
, dev
);
502 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd
*pd
, struct device
*dev
)
504 struct usnic_uiom_dev
*uiom_dev
;
507 spin_lock(&pd
->lock
);
508 list_for_each_entry(uiom_dev
, &pd
->devs
, link
) {
509 if (uiom_dev
->dev
== dev
) {
516 usnic_err("Unable to free dev %s - not found\n",
518 spin_unlock(&pd
->lock
);
522 list_del(&uiom_dev
->link
);
524 spin_unlock(&pd
->lock
);
526 return iommu_detach_device(pd
->domain
, dev
);
529 struct device
**usnic_uiom_get_dev_list(struct usnic_uiom_pd
*pd
)
531 struct usnic_uiom_dev
*uiom_dev
;
532 struct device
**devs
;
535 spin_lock(&pd
->lock
);
536 devs
= kcalloc(pd
->dev_cnt
+ 1, sizeof(*devs
), GFP_ATOMIC
);
538 devs
= ERR_PTR(-ENOMEM
);
542 list_for_each_entry(uiom_dev
, &pd
->devs
, link
) {
543 devs
[i
++] = uiom_dev
->dev
;
546 spin_unlock(&pd
->lock
);
550 void usnic_uiom_free_dev_list(struct device
**devs
)