2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/push906f.h>
33 #include <nvif/if000c.h>
34 #include <nvif/if500b.h>
35 #include <nvif/if900b.h>
36 #include <nvif/if000c.h>
38 #include <nvhw/class/cla0b5.h>
40 #include <linux/sched/mm.h>
41 #include <linux/hmm.h>
44 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
45 * it in vram while in use. We likely want to overhaul memory management for
46 * nouveau to be more page like (not necessarily with system page size but a
47 * bigger page size) at lowest level and have some shim layer on top that would
48 * provide the same functionality as TTM.
50 #define DMEM_CHUNK_SIZE (2UL << 20)
51 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
59 typedef int (*nouveau_migrate_copy_t
)(struct nouveau_drm
*drm
, u64 npages
,
60 enum nouveau_aper
, u64 dst_addr
,
61 enum nouveau_aper
, u64 src_addr
);
62 typedef int (*nouveau_clear_page_t
)(struct nouveau_drm
*drm
, u32 length
,
63 enum nouveau_aper
, u64 dst_addr
);
65 struct nouveau_dmem_chunk
{
66 struct list_head list
;
67 struct nouveau_bo
*bo
;
68 struct nouveau_drm
*drm
;
69 unsigned long callocated
;
70 struct dev_pagemap pagemap
;
73 struct nouveau_dmem_migrate
{
74 nouveau_migrate_copy_t copy_func
;
75 nouveau_clear_page_t clear_func
;
76 struct nouveau_channel
*chan
;
80 struct nouveau_drm
*drm
;
81 struct nouveau_dmem_migrate migrate
;
82 struct list_head chunks
;
84 struct page
*free_pages
;
88 static struct nouveau_dmem_chunk
*nouveau_page_to_chunk(struct page
*page
)
90 return container_of(page
->pgmap
, struct nouveau_dmem_chunk
, pagemap
);
93 static struct nouveau_drm
*page_to_drm(struct page
*page
)
95 struct nouveau_dmem_chunk
*chunk
= nouveau_page_to_chunk(page
);
100 unsigned long nouveau_dmem_page_addr(struct page
*page
)
102 struct nouveau_dmem_chunk
*chunk
= nouveau_page_to_chunk(page
);
103 unsigned long off
= (page_to_pfn(page
) << PAGE_SHIFT
) -
104 chunk
->pagemap
.range
.start
;
106 return chunk
->bo
->offset
+ off
;
109 static void nouveau_dmem_page_free(struct page
*page
)
111 struct nouveau_dmem_chunk
*chunk
= nouveau_page_to_chunk(page
);
112 struct nouveau_dmem
*dmem
= chunk
->drm
->dmem
;
114 spin_lock(&dmem
->lock
);
115 page
->zone_device_data
= dmem
->free_pages
;
116 dmem
->free_pages
= page
;
118 WARN_ON(!chunk
->callocated
);
121 * FIXME when chunk->callocated reach 0 we should add the chunk to
122 * a reclaim list so that it can be freed in case of memory pressure.
124 spin_unlock(&dmem
->lock
);
127 static void nouveau_dmem_fence_done(struct nouveau_fence
**fence
)
130 nouveau_fence_wait(*fence
, true, false);
131 nouveau_fence_unref(fence
);
134 * FIXME wait for channel to be IDLE before calling finalizing
140 static vm_fault_t
nouveau_dmem_fault_copy_one(struct nouveau_drm
*drm
,
141 struct vm_fault
*vmf
, struct migrate_vma
*args
,
142 dma_addr_t
*dma_addr
)
144 struct device
*dev
= drm
->dev
->dev
;
145 struct page
*dpage
, *spage
;
146 struct nouveau_svmm
*svmm
;
148 spage
= migrate_pfn_to_page(args
->src
[0]);
149 if (!spage
|| !(args
->src
[0] & MIGRATE_PFN_MIGRATE
))
152 dpage
= alloc_page_vma(GFP_HIGHUSER
, vmf
->vma
, vmf
->address
);
154 return VM_FAULT_SIGBUS
;
157 *dma_addr
= dma_map_page(dev
, dpage
, 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
158 if (dma_mapping_error(dev
, *dma_addr
))
159 goto error_free_page
;
161 svmm
= spage
->zone_device_data
;
162 mutex_lock(&svmm
->mutex
);
163 nouveau_svmm_invalidate(svmm
, args
->start
, args
->end
);
164 if (drm
->dmem
->migrate
.copy_func(drm
, 1, NOUVEAU_APER_HOST
, *dma_addr
,
165 NOUVEAU_APER_VRAM
, nouveau_dmem_page_addr(spage
)))
166 goto error_dma_unmap
;
167 mutex_unlock(&svmm
->mutex
);
169 args
->dst
[0] = migrate_pfn(page_to_pfn(dpage
)) | MIGRATE_PFN_LOCKED
;
173 mutex_unlock(&svmm
->mutex
);
174 dma_unmap_page(dev
, *dma_addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
177 return VM_FAULT_SIGBUS
;
180 static vm_fault_t
nouveau_dmem_migrate_to_ram(struct vm_fault
*vmf
)
182 struct nouveau_drm
*drm
= page_to_drm(vmf
->page
);
183 struct nouveau_dmem
*dmem
= drm
->dmem
;
184 struct nouveau_fence
*fence
;
185 unsigned long src
= 0, dst
= 0;
186 dma_addr_t dma_addr
= 0;
188 struct migrate_vma args
= {
190 .start
= vmf
->address
,
191 .end
= vmf
->address
+ PAGE_SIZE
,
194 .pgmap_owner
= drm
->dev
,
195 .flags
= MIGRATE_VMA_SELECT_DEVICE_PRIVATE
,
199 * FIXME what we really want is to find some heuristic to migrate more
200 * than just one page on CPU fault. When such fault happens it is very
201 * likely that more surrounding page will CPU fault too.
203 if (migrate_vma_setup(&args
) < 0)
204 return VM_FAULT_SIGBUS
;
208 ret
= nouveau_dmem_fault_copy_one(drm
, vmf
, &args
, &dma_addr
);
212 nouveau_fence_new(dmem
->migrate
.chan
, false, &fence
);
213 migrate_vma_pages(&args
);
214 nouveau_dmem_fence_done(&fence
);
215 dma_unmap_page(drm
->dev
->dev
, dma_addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
217 migrate_vma_finalize(&args
);
221 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops
= {
222 .page_free
= nouveau_dmem_page_free
,
223 .migrate_to_ram
= nouveau_dmem_migrate_to_ram
,
227 nouveau_dmem_chunk_alloc(struct nouveau_drm
*drm
, struct page
**ppage
)
229 struct nouveau_dmem_chunk
*chunk
;
230 struct resource
*res
;
233 unsigned long i
, pfn_first
;
236 chunk
= kzalloc(sizeof(*chunk
), GFP_KERNEL
);
242 /* Allocate unused physical address space for device private pages. */
243 res
= request_free_mem_region(&iomem_resource
, DMEM_CHUNK_SIZE
,
251 chunk
->pagemap
.type
= MEMORY_DEVICE_PRIVATE
;
252 chunk
->pagemap
.range
.start
= res
->start
;
253 chunk
->pagemap
.range
.end
= res
->end
;
254 chunk
->pagemap
.nr_range
= 1;
255 chunk
->pagemap
.ops
= &nouveau_dmem_pagemap_ops
;
256 chunk
->pagemap
.owner
= drm
->dev
;
258 ret
= nouveau_bo_new(&drm
->client
, DMEM_CHUNK_SIZE
, 0,
259 NOUVEAU_GEM_DOMAIN_VRAM
, 0, 0, NULL
, NULL
,
264 ret
= nouveau_bo_pin(chunk
->bo
, NOUVEAU_GEM_DOMAIN_VRAM
, false);
268 ptr
= memremap_pages(&chunk
->pagemap
, numa_node_id());
274 mutex_lock(&drm
->dmem
->mutex
);
275 list_add(&chunk
->list
, &drm
->dmem
->chunks
);
276 mutex_unlock(&drm
->dmem
->mutex
);
278 pfn_first
= chunk
->pagemap
.range
.start
>> PAGE_SHIFT
;
279 page
= pfn_to_page(pfn_first
);
280 spin_lock(&drm
->dmem
->lock
);
281 for (i
= 0; i
< DMEM_CHUNK_NPAGES
- 1; ++i
, ++page
) {
282 page
->zone_device_data
= drm
->dmem
->free_pages
;
283 drm
->dmem
->free_pages
= page
;
287 spin_unlock(&drm
->dmem
->lock
);
289 NV_INFO(drm
, "DMEM: registered %ldMB of device memory\n",
290 DMEM_CHUNK_SIZE
>> 20);
295 nouveau_bo_unpin(chunk
->bo
);
297 nouveau_bo_ref(NULL
, &chunk
->bo
);
299 release_mem_region(chunk
->pagemap
.range
.start
, range_len(&chunk
->pagemap
.range
));
307 nouveau_dmem_page_alloc_locked(struct nouveau_drm
*drm
)
309 struct nouveau_dmem_chunk
*chunk
;
310 struct page
*page
= NULL
;
313 spin_lock(&drm
->dmem
->lock
);
314 if (drm
->dmem
->free_pages
) {
315 page
= drm
->dmem
->free_pages
;
316 drm
->dmem
->free_pages
= page
->zone_device_data
;
317 chunk
= nouveau_page_to_chunk(page
);
319 spin_unlock(&drm
->dmem
->lock
);
321 spin_unlock(&drm
->dmem
->lock
);
322 ret
= nouveau_dmem_chunk_alloc(drm
, &page
);
333 nouveau_dmem_page_free_locked(struct nouveau_drm
*drm
, struct page
*page
)
340 nouveau_dmem_resume(struct nouveau_drm
*drm
)
342 struct nouveau_dmem_chunk
*chunk
;
345 if (drm
->dmem
== NULL
)
348 mutex_lock(&drm
->dmem
->mutex
);
349 list_for_each_entry(chunk
, &drm
->dmem
->chunks
, list
) {
350 ret
= nouveau_bo_pin(chunk
->bo
, NOUVEAU_GEM_DOMAIN_VRAM
, false);
351 /* FIXME handle pin failure */
354 mutex_unlock(&drm
->dmem
->mutex
);
358 nouveau_dmem_suspend(struct nouveau_drm
*drm
)
360 struct nouveau_dmem_chunk
*chunk
;
362 if (drm
->dmem
== NULL
)
365 mutex_lock(&drm
->dmem
->mutex
);
366 list_for_each_entry(chunk
, &drm
->dmem
->chunks
, list
)
367 nouveau_bo_unpin(chunk
->bo
);
368 mutex_unlock(&drm
->dmem
->mutex
);
372 nouveau_dmem_fini(struct nouveau_drm
*drm
)
374 struct nouveau_dmem_chunk
*chunk
, *tmp
;
376 if (drm
->dmem
== NULL
)
379 mutex_lock(&drm
->dmem
->mutex
);
381 list_for_each_entry_safe(chunk
, tmp
, &drm
->dmem
->chunks
, list
) {
382 nouveau_bo_unpin(chunk
->bo
);
383 nouveau_bo_ref(NULL
, &chunk
->bo
);
384 list_del(&chunk
->list
);
385 memunmap_pages(&chunk
->pagemap
);
386 release_mem_region(chunk
->pagemap
.range
.start
,
387 range_len(&chunk
->pagemap
.range
));
391 mutex_unlock(&drm
->dmem
->mutex
);
395 nvc0b5_migrate_copy(struct nouveau_drm
*drm
, u64 npages
,
396 enum nouveau_aper dst_aper
, u64 dst_addr
,
397 enum nouveau_aper src_aper
, u64 src_addr
)
399 struct nvif_push
*push
= drm
->dmem
->migrate
.chan
->chan
.push
;
403 ret
= PUSH_WAIT(push
, 13);
407 if (src_aper
!= NOUVEAU_APER_VIRT
) {
409 case NOUVEAU_APER_VRAM
:
410 PUSH_IMMD(push
, NVA0B5
, SET_SRC_PHYS_MODE
,
411 NVDEF(NVA0B5
, SET_SRC_PHYS_MODE
, TARGET
, LOCAL_FB
));
413 case NOUVEAU_APER_HOST
:
414 PUSH_IMMD(push
, NVA0B5
, SET_SRC_PHYS_MODE
,
415 NVDEF(NVA0B5
, SET_SRC_PHYS_MODE
, TARGET
, COHERENT_SYSMEM
));
421 launch_dma
|= NVDEF(NVA0B5
, LAUNCH_DMA
, SRC_TYPE
, PHYSICAL
);
424 if (dst_aper
!= NOUVEAU_APER_VIRT
) {
426 case NOUVEAU_APER_VRAM
:
427 PUSH_IMMD(push
, NVA0B5
, SET_DST_PHYS_MODE
,
428 NVDEF(NVA0B5
, SET_DST_PHYS_MODE
, TARGET
, LOCAL_FB
));
430 case NOUVEAU_APER_HOST
:
431 PUSH_IMMD(push
, NVA0B5
, SET_DST_PHYS_MODE
,
432 NVDEF(NVA0B5
, SET_DST_PHYS_MODE
, TARGET
, COHERENT_SYSMEM
));
438 launch_dma
|= NVDEF(NVA0B5
, LAUNCH_DMA
, DST_TYPE
, PHYSICAL
);
441 PUSH_MTHD(push
, NVA0B5
, OFFSET_IN_UPPER
,
442 NVVAL(NVA0B5
, OFFSET_IN_UPPER
, UPPER
, upper_32_bits(src_addr
)),
444 OFFSET_IN_LOWER
, lower_32_bits(src_addr
),
447 NVVAL(NVA0B5
, OFFSET_OUT_UPPER
, UPPER
, upper_32_bits(dst_addr
)),
449 OFFSET_OUT_LOWER
, lower_32_bits(dst_addr
),
451 PITCH_OUT
, PAGE_SIZE
,
452 LINE_LENGTH_IN
, PAGE_SIZE
,
455 PUSH_MTHD(push
, NVA0B5
, LAUNCH_DMA
, launch_dma
|
456 NVDEF(NVA0B5
, LAUNCH_DMA
, DATA_TRANSFER_TYPE
, NON_PIPELINED
) |
457 NVDEF(NVA0B5
, LAUNCH_DMA
, FLUSH_ENABLE
, TRUE
) |
458 NVDEF(NVA0B5
, LAUNCH_DMA
, SEMAPHORE_TYPE
, NONE
) |
459 NVDEF(NVA0B5
, LAUNCH_DMA
, INTERRUPT_TYPE
, NONE
) |
460 NVDEF(NVA0B5
, LAUNCH_DMA
, SRC_MEMORY_LAYOUT
, PITCH
) |
461 NVDEF(NVA0B5
, LAUNCH_DMA
, DST_MEMORY_LAYOUT
, PITCH
) |
462 NVDEF(NVA0B5
, LAUNCH_DMA
, MULTI_LINE_ENABLE
, TRUE
) |
463 NVDEF(NVA0B5
, LAUNCH_DMA
, REMAP_ENABLE
, FALSE
) |
464 NVDEF(NVA0B5
, LAUNCH_DMA
, BYPASS_L2
, USE_PTE_SETTING
));
469 nvc0b5_migrate_clear(struct nouveau_drm
*drm
, u32 length
,
470 enum nouveau_aper dst_aper
, u64 dst_addr
)
472 struct nvif_push
*push
= drm
->dmem
->migrate
.chan
->chan
.push
;
476 ret
= PUSH_WAIT(push
, 12);
481 case NOUVEAU_APER_VRAM
:
482 PUSH_IMMD(push
, NVA0B5
, SET_DST_PHYS_MODE
,
483 NVDEF(NVA0B5
, SET_DST_PHYS_MODE
, TARGET
, LOCAL_FB
));
485 case NOUVEAU_APER_HOST
:
486 PUSH_IMMD(push
, NVA0B5
, SET_DST_PHYS_MODE
,
487 NVDEF(NVA0B5
, SET_DST_PHYS_MODE
, TARGET
, COHERENT_SYSMEM
));
493 launch_dma
|= NVDEF(NVA0B5
, LAUNCH_DMA
, DST_TYPE
, PHYSICAL
);
495 PUSH_MTHD(push
, NVA0B5
, SET_REMAP_CONST_A
, 0,
496 SET_REMAP_CONST_B
, 0,
498 SET_REMAP_COMPONENTS
,
499 NVDEF(NVA0B5
, SET_REMAP_COMPONENTS
, DST_X
, CONST_A
) |
500 NVDEF(NVA0B5
, SET_REMAP_COMPONENTS
, DST_Y
, CONST_B
) |
501 NVDEF(NVA0B5
, SET_REMAP_COMPONENTS
, COMPONENT_SIZE
, FOUR
) |
502 NVDEF(NVA0B5
, SET_REMAP_COMPONENTS
, NUM_DST_COMPONENTS
, TWO
));
504 PUSH_MTHD(push
, NVA0B5
, OFFSET_OUT_UPPER
,
505 NVVAL(NVA0B5
, OFFSET_OUT_UPPER
, UPPER
, upper_32_bits(dst_addr
)),
507 OFFSET_OUT_LOWER
, lower_32_bits(dst_addr
));
509 PUSH_MTHD(push
, NVA0B5
, LINE_LENGTH_IN
, length
>> 3);
511 PUSH_MTHD(push
, NVA0B5
, LAUNCH_DMA
, launch_dma
|
512 NVDEF(NVA0B5
, LAUNCH_DMA
, DATA_TRANSFER_TYPE
, NON_PIPELINED
) |
513 NVDEF(NVA0B5
, LAUNCH_DMA
, FLUSH_ENABLE
, TRUE
) |
514 NVDEF(NVA0B5
, LAUNCH_DMA
, SEMAPHORE_TYPE
, NONE
) |
515 NVDEF(NVA0B5
, LAUNCH_DMA
, INTERRUPT_TYPE
, NONE
) |
516 NVDEF(NVA0B5
, LAUNCH_DMA
, SRC_MEMORY_LAYOUT
, PITCH
) |
517 NVDEF(NVA0B5
, LAUNCH_DMA
, DST_MEMORY_LAYOUT
, PITCH
) |
518 NVDEF(NVA0B5
, LAUNCH_DMA
, MULTI_LINE_ENABLE
, FALSE
) |
519 NVDEF(NVA0B5
, LAUNCH_DMA
, REMAP_ENABLE
, TRUE
) |
520 NVDEF(NVA0B5
, LAUNCH_DMA
, BYPASS_L2
, USE_PTE_SETTING
));
525 nouveau_dmem_migrate_init(struct nouveau_drm
*drm
)
527 switch (drm
->ttm
.copy
.oclass
) {
528 case PASCAL_DMA_COPY_A
:
529 case PASCAL_DMA_COPY_B
:
530 case VOLTA_DMA_COPY_A
:
531 case TURING_DMA_COPY_A
:
532 drm
->dmem
->migrate
.copy_func
= nvc0b5_migrate_copy
;
533 drm
->dmem
->migrate
.clear_func
= nvc0b5_migrate_clear
;
534 drm
->dmem
->migrate
.chan
= drm
->ttm
.chan
;
543 nouveau_dmem_init(struct nouveau_drm
*drm
)
547 /* This only make sense on PASCAL or newer */
548 if (drm
->client
.device
.info
.family
< NV_DEVICE_INFO_V0_PASCAL
)
551 if (!(drm
->dmem
= kzalloc(sizeof(*drm
->dmem
), GFP_KERNEL
)))
554 drm
->dmem
->drm
= drm
;
555 mutex_init(&drm
->dmem
->mutex
);
556 INIT_LIST_HEAD(&drm
->dmem
->chunks
);
557 mutex_init(&drm
->dmem
->mutex
);
558 spin_lock_init(&drm
->dmem
->lock
);
560 /* Initialize migration dma helpers before registering memory */
561 ret
= nouveau_dmem_migrate_init(drm
);
568 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm
*drm
,
569 struct nouveau_svmm
*svmm
, unsigned long src
,
570 dma_addr_t
*dma_addr
, u64
*pfn
)
572 struct device
*dev
= drm
->dev
->dev
;
573 struct page
*dpage
, *spage
;
576 spage
= migrate_pfn_to_page(src
);
577 if (!(src
& MIGRATE_PFN_MIGRATE
))
580 dpage
= nouveau_dmem_page_alloc_locked(drm
);
584 paddr
= nouveau_dmem_page_addr(dpage
);
586 *dma_addr
= dma_map_page(dev
, spage
, 0, page_size(spage
),
588 if (dma_mapping_error(dev
, *dma_addr
))
590 if (drm
->dmem
->migrate
.copy_func(drm
, 1,
591 NOUVEAU_APER_VRAM
, paddr
, NOUVEAU_APER_HOST
, *dma_addr
))
594 *dma_addr
= DMA_MAPPING_ERROR
;
595 if (drm
->dmem
->migrate
.clear_func(drm
, page_size(dpage
),
596 NOUVEAU_APER_VRAM
, paddr
))
600 dpage
->zone_device_data
= svmm
;
601 *pfn
= NVIF_VMM_PFNMAP_V0_V
| NVIF_VMM_PFNMAP_V0_VRAM
|
602 ((paddr
>> PAGE_SHIFT
) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT
);
603 if (src
& MIGRATE_PFN_WRITE
)
604 *pfn
|= NVIF_VMM_PFNMAP_V0_W
;
605 return migrate_pfn(page_to_pfn(dpage
)) | MIGRATE_PFN_LOCKED
;
608 dma_unmap_page(dev
, *dma_addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
610 nouveau_dmem_page_free_locked(drm
, dpage
);
612 *pfn
= NVIF_VMM_PFNMAP_V0_NONE
;
616 static void nouveau_dmem_migrate_chunk(struct nouveau_drm
*drm
,
617 struct nouveau_svmm
*svmm
, struct migrate_vma
*args
,
618 dma_addr_t
*dma_addrs
, u64
*pfns
)
620 struct nouveau_fence
*fence
;
621 unsigned long addr
= args
->start
, nr_dma
= 0, i
;
623 for (i
= 0; addr
< args
->end
; i
++) {
624 args
->dst
[i
] = nouveau_dmem_migrate_copy_one(drm
, svmm
,
625 args
->src
[i
], dma_addrs
+ nr_dma
, pfns
+ i
);
626 if (!dma_mapping_error(drm
->dev
->dev
, dma_addrs
[nr_dma
]))
631 nouveau_fence_new(drm
->dmem
->migrate
.chan
, false, &fence
);
632 migrate_vma_pages(args
);
633 nouveau_dmem_fence_done(&fence
);
634 nouveau_pfns_map(svmm
, args
->vma
->vm_mm
, args
->start
, pfns
, i
);
637 dma_unmap_page(drm
->dev
->dev
, dma_addrs
[nr_dma
], PAGE_SIZE
,
640 migrate_vma_finalize(args
);
644 nouveau_dmem_migrate_vma(struct nouveau_drm
*drm
,
645 struct nouveau_svmm
*svmm
,
646 struct vm_area_struct
*vma
,
650 unsigned long npages
= (end
- start
) >> PAGE_SHIFT
;
651 unsigned long max
= min(SG_MAX_SINGLE_ALLOC
, npages
);
652 dma_addr_t
*dma_addrs
;
653 struct migrate_vma args
= {
656 .pgmap_owner
= drm
->dev
,
657 .flags
= MIGRATE_VMA_SELECT_SYSTEM
,
663 if (drm
->dmem
== NULL
)
666 args
.src
= kcalloc(max
, sizeof(*args
.src
), GFP_KERNEL
);
669 args
.dst
= kcalloc(max
, sizeof(*args
.dst
), GFP_KERNEL
);
673 dma_addrs
= kmalloc_array(max
, sizeof(*dma_addrs
), GFP_KERNEL
);
677 pfns
= nouveau_pfns_alloc(max
);
681 for (i
= 0; i
< npages
; i
+= max
) {
682 args
.end
= start
+ (max
<< PAGE_SHIFT
);
683 ret
= migrate_vma_setup(&args
);
688 nouveau_dmem_migrate_chunk(drm
, svmm
, &args
, dma_addrs
,
690 args
.start
= args
.end
;
695 nouveau_pfns_free(pfns
);