2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
40 struct nouveau_drm
*drm
;
42 struct list_head inst
;
44 struct nouveau_svm_fault_buffer
{
46 struct nvif_object object
;
52 struct nvif_notify notify
;
54 struct nouveau_svm_fault
{
64 struct nouveau_svmm
*svmm
;
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
73 struct nouveau_pfnmap_args
{
74 struct nvif_ioctl_v0 i
;
75 struct nvif_ioctl_mthd_v0 m
;
76 struct nvif_vmm_pfnmap_v0 p
;
80 struct nouveau_svmm
*svmm
;
82 struct list_head head
;
85 static struct nouveau_ivmm
*
86 nouveau_ivmm_find(struct nouveau_svm
*svm
, u64 inst
)
88 struct nouveau_ivmm
*ivmm
;
89 list_for_each_entry(ivmm
, &svm
->inst
, head
) {
90 if (ivmm
->inst
== inst
)
96 #define SVMM_DBG(s,f,a...) \
97 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
98 #define SVMM_ERR(s,f,a...) \
99 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
102 nouveau_svmm_bind(struct drm_device
*dev
, void *data
,
103 struct drm_file
*file_priv
)
105 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
106 struct drm_nouveau_svm_bind
*args
= data
;
107 unsigned target
, cmd
, priority
;
108 unsigned long addr
, end
;
109 struct mm_struct
*mm
;
111 args
->va_start
&= PAGE_MASK
;
112 args
->va_end
= ALIGN(args
->va_end
, PAGE_SIZE
);
114 /* Sanity check arguments */
115 if (args
->reserved0
|| args
->reserved1
)
117 if (args
->header
& (~NOUVEAU_SVM_BIND_VALID_MASK
))
119 if (args
->va_start
>= args
->va_end
)
122 cmd
= args
->header
>> NOUVEAU_SVM_BIND_COMMAND_SHIFT
;
123 cmd
&= NOUVEAU_SVM_BIND_COMMAND_MASK
;
125 case NOUVEAU_SVM_BIND_COMMAND__MIGRATE
:
131 priority
= args
->header
>> NOUVEAU_SVM_BIND_PRIORITY_SHIFT
;
132 priority
&= NOUVEAU_SVM_BIND_PRIORITY_MASK
;
134 /* FIXME support CPU target ie all target value < GPU_VRAM */
135 target
= args
->header
>> NOUVEAU_SVM_BIND_TARGET_SHIFT
;
136 target
&= NOUVEAU_SVM_BIND_TARGET_MASK
;
138 case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM
:
145 * FIXME: For now refuse non 0 stride, we need to change the migrate
146 * kernel function to handle stride to avoid to create a mess within
147 * each device driver.
153 * Ok we are ask to do something sane, for now we only support migrate
154 * commands but we will add things like memory policy (what to do on
155 * page fault) and maybe some other commands.
158 mm
= get_task_mm(current
);
161 if (!cli
->svm
.svmm
) {
162 mmap_read_unlock(mm
);
166 for (addr
= args
->va_start
, end
= args
->va_end
; addr
< end
;) {
167 struct vm_area_struct
*vma
;
170 vma
= find_vma_intersection(mm
, addr
, end
);
174 addr
= max(addr
, vma
->vm_start
);
175 next
= min(vma
->vm_end
, end
);
176 /* This is a best effort so we ignore errors */
177 nouveau_dmem_migrate_vma(cli
->drm
, cli
->svm
.svmm
, vma
, addr
,
183 * FIXME Return the number of page we have migrated, again we need to
184 * update the migrate API to return that information so that we can
185 * report it to user space.
189 mmap_read_unlock(mm
);
195 /* Unlink channel instance from SVMM. */
197 nouveau_svmm_part(struct nouveau_svmm
*svmm
, u64 inst
)
199 struct nouveau_ivmm
*ivmm
;
201 mutex_lock(&svmm
->vmm
->cli
->drm
->svm
->mutex
);
202 ivmm
= nouveau_ivmm_find(svmm
->vmm
->cli
->drm
->svm
, inst
);
204 list_del(&ivmm
->head
);
207 mutex_unlock(&svmm
->vmm
->cli
->drm
->svm
->mutex
);
211 /* Link channel instance to SVMM. */
213 nouveau_svmm_join(struct nouveau_svmm
*svmm
, u64 inst
)
215 struct nouveau_ivmm
*ivmm
;
217 if (!(ivmm
= kmalloc(sizeof(*ivmm
), GFP_KERNEL
)))
222 mutex_lock(&svmm
->vmm
->cli
->drm
->svm
->mutex
);
223 list_add(&ivmm
->head
, &svmm
->vmm
->cli
->drm
->svm
->inst
);
224 mutex_unlock(&svmm
->vmm
->cli
->drm
->svm
->mutex
);
229 /* Invalidate SVMM address-range on GPU. */
231 nouveau_svmm_invalidate(struct nouveau_svmm
*svmm
, u64 start
, u64 limit
)
234 bool super
= svmm
->vmm
->vmm
.object
.client
->super
;
235 svmm
->vmm
->vmm
.object
.client
->super
= true;
236 nvif_object_mthd(&svmm
->vmm
->vmm
.object
, NVIF_VMM_V0_PFNCLR
,
237 &(struct nvif_vmm_pfnclr_v0
) {
239 .size
= limit
- start
,
240 }, sizeof(struct nvif_vmm_pfnclr_v0
));
241 svmm
->vmm
->vmm
.object
.client
->super
= super
;
246 nouveau_svmm_invalidate_range_start(struct mmu_notifier
*mn
,
247 const struct mmu_notifier_range
*update
)
249 struct nouveau_svmm
*svmm
=
250 container_of(mn
, struct nouveau_svmm
, notifier
);
251 unsigned long start
= update
->start
;
252 unsigned long limit
= update
->end
;
254 if (!mmu_notifier_range_blockable(update
))
257 SVMM_DBG(svmm
, "invalidate %016lx-%016lx", start
, limit
);
259 mutex_lock(&svmm
->mutex
);
260 if (unlikely(!svmm
->vmm
))
264 * Ignore invalidation callbacks for device private pages since
265 * the invalidation is handled as part of the migration process.
267 if (update
->event
== MMU_NOTIFY_MIGRATE
&&
268 update
->migrate_pgmap_owner
== svmm
->vmm
->cli
->drm
->dev
)
271 if (limit
> svmm
->unmanaged
.start
&& start
< svmm
->unmanaged
.limit
) {
272 if (start
< svmm
->unmanaged
.start
) {
273 nouveau_svmm_invalidate(svmm
, start
,
274 svmm
->unmanaged
.limit
);
276 start
= svmm
->unmanaged
.limit
;
279 nouveau_svmm_invalidate(svmm
, start
, limit
);
282 mutex_unlock(&svmm
->mutex
);
286 static void nouveau_svmm_free_notifier(struct mmu_notifier
*mn
)
288 kfree(container_of(mn
, struct nouveau_svmm
, notifier
));
291 static const struct mmu_notifier_ops nouveau_mn_ops
= {
292 .invalidate_range_start
= nouveau_svmm_invalidate_range_start
,
293 .free_notifier
= nouveau_svmm_free_notifier
,
297 nouveau_svmm_fini(struct nouveau_svmm
**psvmm
)
299 struct nouveau_svmm
*svmm
= *psvmm
;
301 mutex_lock(&svmm
->mutex
);
303 mutex_unlock(&svmm
->mutex
);
304 mmu_notifier_put(&svmm
->notifier
);
310 nouveau_svmm_init(struct drm_device
*dev
, void *data
,
311 struct drm_file
*file_priv
)
313 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
314 struct nouveau_svmm
*svmm
;
315 struct drm_nouveau_svm_init
*args
= data
;
318 /* Allocate tracking for SVM-enabled VMM. */
319 if (!(svmm
= kzalloc(sizeof(*svmm
), GFP_KERNEL
)))
321 svmm
->vmm
= &cli
->svm
;
322 svmm
->unmanaged
.start
= args
->unmanaged_addr
;
323 svmm
->unmanaged
.limit
= args
->unmanaged_addr
+ args
->unmanaged_size
;
324 mutex_init(&svmm
->mutex
);
326 /* Check that SVM isn't already enabled for the client. */
327 mutex_lock(&cli
->mutex
);
333 /* Allocate a new GPU VMM that can support SVM (managed by the
334 * client, with replayable faults enabled).
336 * All future channel/memory allocations will make use of this
337 * VMM instead of the standard one.
339 ret
= nvif_vmm_ctor(&cli
->mmu
, "svmVmm",
340 cli
->vmm
.vmm
.object
.oclass
, true,
341 args
->unmanaged_addr
, args
->unmanaged_size
,
342 &(struct gp100_vmm_v0
) {
343 .fault_replay
= true,
344 }, sizeof(struct gp100_vmm_v0
), &cli
->svm
.vmm
);
348 mmap_write_lock(current
->mm
);
349 svmm
->notifier
.ops
= &nouveau_mn_ops
;
350 ret
= __mmu_notifier_register(&svmm
->notifier
, current
->mm
);
353 /* Note, ownership of svmm transfers to mmu_notifier */
355 cli
->svm
.svmm
= svmm
;
357 mmap_write_unlock(current
->mm
);
358 mutex_unlock(&cli
->mutex
);
362 mmap_write_unlock(current
->mm
);
364 mutex_unlock(&cli
->mutex
);
369 /* Issue fault replay for GPU to retry accesses that faulted previously. */
371 nouveau_svm_fault_replay(struct nouveau_svm
*svm
)
373 SVM_DBG(svm
, "replay");
374 WARN_ON(nvif_object_mthd(&svm
->drm
->client
.vmm
.vmm
.object
,
375 GP100_VMM_VN_FAULT_REPLAY
,
376 &(struct gp100_vmm_fault_replay_vn
) {},
377 sizeof(struct gp100_vmm_fault_replay_vn
)));
380 /* Cancel a replayable fault that could not be handled.
382 * Cancelling the fault will trigger recovery to reset the engine
383 * and kill the offending channel (ie. GPU SIGSEGV).
386 nouveau_svm_fault_cancel(struct nouveau_svm
*svm
,
387 u64 inst
, u8 hub
, u8 gpc
, u8 client
)
389 SVM_DBG(svm
, "cancel %016llx %d %02x %02x", inst
, hub
, gpc
, client
);
390 WARN_ON(nvif_object_mthd(&svm
->drm
->client
.vmm
.vmm
.object
,
391 GP100_VMM_VN_FAULT_CANCEL
,
392 &(struct gp100_vmm_fault_cancel_v0
) {
397 }, sizeof(struct gp100_vmm_fault_cancel_v0
)));
401 nouveau_svm_fault_cancel_fault(struct nouveau_svm
*svm
,
402 struct nouveau_svm_fault
*fault
)
404 nouveau_svm_fault_cancel(svm
, fault
->inst
,
411 nouveau_svm_fault_cmp(const void *a
, const void *b
)
413 const struct nouveau_svm_fault
*fa
= *(struct nouveau_svm_fault
**)a
;
414 const struct nouveau_svm_fault
*fb
= *(struct nouveau_svm_fault
**)b
;
416 if ((ret
= (s64
)fa
->inst
- fb
->inst
))
418 if ((ret
= (s64
)fa
->addr
- fb
->addr
))
421 return (fa
->access
== 0 || fa
->access
== 3) -
422 (fb
->access
== 0 || fb
->access
== 3);
426 nouveau_svm_fault_cache(struct nouveau_svm
*svm
,
427 struct nouveau_svm_fault_buffer
*buffer
, u32 offset
)
429 struct nvif_object
*memory
= &buffer
->object
;
430 const u32 instlo
= nvif_rd32(memory
, offset
+ 0x00);
431 const u32 insthi
= nvif_rd32(memory
, offset
+ 0x04);
432 const u32 addrlo
= nvif_rd32(memory
, offset
+ 0x08);
433 const u32 addrhi
= nvif_rd32(memory
, offset
+ 0x0c);
434 const u32 timelo
= nvif_rd32(memory
, offset
+ 0x10);
435 const u32 timehi
= nvif_rd32(memory
, offset
+ 0x14);
436 const u32 engine
= nvif_rd32(memory
, offset
+ 0x18);
437 const u32 info
= nvif_rd32(memory
, offset
+ 0x1c);
438 const u64 inst
= (u64
)insthi
<< 32 | instlo
;
439 const u8 gpc
= (info
& 0x1f000000) >> 24;
440 const u8 hub
= (info
& 0x00100000) >> 20;
441 const u8 client
= (info
& 0x00007f00) >> 8;
442 struct nouveau_svm_fault
*fault
;
444 //XXX: i think we're supposed to spin waiting */
445 if (WARN_ON(!(info
& 0x80000000)))
448 nvif_mask(memory
, offset
+ 0x1c, 0x80000000, 0x00000000);
450 if (!buffer
->fault
[buffer
->fault_nr
]) {
451 fault
= kmalloc(sizeof(*fault
), GFP_KERNEL
);
452 if (WARN_ON(!fault
)) {
453 nouveau_svm_fault_cancel(svm
, inst
, hub
, gpc
, client
);
456 buffer
->fault
[buffer
->fault_nr
] = fault
;
459 fault
= buffer
->fault
[buffer
->fault_nr
++];
461 fault
->addr
= (u64
)addrhi
<< 32 | addrlo
;
462 fault
->time
= (u64
)timehi
<< 32 | timelo
;
463 fault
->engine
= engine
;
466 fault
->access
= (info
& 0x000f0000) >> 16;
467 fault
->client
= client
;
468 fault
->fault
= (info
& 0x0000001f);
470 SVM_DBG(svm
, "fault %016llx %016llx %02x",
471 fault
->inst
, fault
->addr
, fault
->access
);
474 struct svm_notifier
{
475 struct mmu_interval_notifier notifier
;
476 struct nouveau_svmm
*svmm
;
479 static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier
*mni
,
480 const struct mmu_notifier_range
*range
,
481 unsigned long cur_seq
)
483 struct svm_notifier
*sn
=
484 container_of(mni
, struct svm_notifier
, notifier
);
487 * serializes the update to mni->invalidate_seq done by caller and
488 * prevents invalidation of the PTE from progressing while HW is being
489 * programmed. This is very hacky and only works because the normal
490 * notifier that does invalidation is always called after the range
493 if (mmu_notifier_range_blockable(range
))
494 mutex_lock(&sn
->svmm
->mutex
);
495 else if (!mutex_trylock(&sn
->svmm
->mutex
))
497 mmu_interval_set_seq(mni
, cur_seq
);
498 mutex_unlock(&sn
->svmm
->mutex
);
502 static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops
= {
503 .invalidate
= nouveau_svm_range_invalidate
,
506 static void nouveau_hmm_convert_pfn(struct nouveau_drm
*drm
,
507 struct hmm_range
*range
,
508 struct nouveau_pfnmap_args
*args
)
513 * The address prepared here is passed through nvif_object_ioctl()
514 * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
516 * This is all just encoding the internal hmm representation into a
517 * different nouveau internal representation.
519 if (!(range
->hmm_pfns
[0] & HMM_PFN_VALID
)) {
524 page
= hmm_pfn_to_page(range
->hmm_pfns
[0]);
526 * Only map compound pages to the GPU if the CPU is also mapping the
527 * page as a compound page. Otherwise, the PTE protections might not be
528 * consistent (e.g., CPU only maps part of a compound page).
529 * Note that the underlying page might still be larger than the
530 * CPU mapping (e.g., a PUD sized compound page partially mapped with
531 * a PMD sized page table entry).
533 if (hmm_pfn_to_map_order(range
->hmm_pfns
[0])) {
534 unsigned long addr
= args
->p
.addr
;
536 args
->p
.page
= hmm_pfn_to_map_order(range
->hmm_pfns
[0]) +
538 args
->p
.size
= 1UL << args
->p
.page
;
539 args
->p
.addr
&= ~(args
->p
.size
- 1);
540 page
-= (addr
- args
->p
.addr
) >> PAGE_SHIFT
;
542 if (is_device_private_page(page
))
543 args
->p
.phys
[0] = nouveau_dmem_page_addr(page
) |
544 NVIF_VMM_PFNMAP_V0_V
|
545 NVIF_VMM_PFNMAP_V0_VRAM
;
547 args
->p
.phys
[0] = page_to_phys(page
) |
548 NVIF_VMM_PFNMAP_V0_V
|
549 NVIF_VMM_PFNMAP_V0_HOST
;
550 if (range
->hmm_pfns
[0] & HMM_PFN_WRITE
)
551 args
->p
.phys
[0] |= NVIF_VMM_PFNMAP_V0_W
;
554 static int nouveau_range_fault(struct nouveau_svmm
*svmm
,
555 struct nouveau_drm
*drm
,
556 struct nouveau_pfnmap_args
*args
, u32 size
,
557 unsigned long hmm_flags
,
558 struct svm_notifier
*notifier
)
560 unsigned long timeout
=
561 jiffies
+ msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT
);
562 /* Have HMM fault pages within the fault window to the GPU. */
563 unsigned long hmm_pfns
[1];
564 struct hmm_range range
= {
565 .notifier
= ¬ifier
->notifier
,
566 .start
= notifier
->notifier
.interval_tree
.start
,
567 .end
= notifier
->notifier
.interval_tree
.last
+ 1,
568 .default_flags
= hmm_flags
,
569 .hmm_pfns
= hmm_pfns
,
570 .dev_private_owner
= drm
->dev
,
572 struct mm_struct
*mm
= notifier
->notifier
.mm
;
576 if (time_after(jiffies
, timeout
))
579 range
.notifier_seq
= mmu_interval_read_begin(range
.notifier
);
581 ret
= hmm_range_fault(&range
);
582 mmap_read_unlock(mm
);
589 mutex_lock(&svmm
->mutex
);
590 if (mmu_interval_read_retry(range
.notifier
,
591 range
.notifier_seq
)) {
592 mutex_unlock(&svmm
->mutex
);
598 nouveau_hmm_convert_pfn(drm
, &range
, args
);
600 svmm
->vmm
->vmm
.object
.client
->super
= true;
601 ret
= nvif_object_ioctl(&svmm
->vmm
->vmm
.object
, args
, size
, NULL
);
602 svmm
->vmm
->vmm
.object
.client
->super
= false;
603 mutex_unlock(&svmm
->mutex
);
609 nouveau_svm_fault(struct nvif_notify
*notify
)
611 struct nouveau_svm_fault_buffer
*buffer
=
612 container_of(notify
, typeof(*buffer
), notify
);
613 struct nouveau_svm
*svm
=
614 container_of(buffer
, typeof(*svm
), buffer
[buffer
->id
]);
615 struct nvif_object
*device
= &svm
->drm
->client
.device
.object
;
616 struct nouveau_svmm
*svmm
;
618 struct nouveau_pfnmap_args i
;
621 unsigned long hmm_flags
;
622 u64 inst
, start
, limit
;
626 /* Parse available fault buffer entries into a cache, and update
627 * the GET pointer so HW can reuse the entries.
629 SVM_DBG(svm
, "fault handler");
630 if (buffer
->get
== buffer
->put
) {
631 buffer
->put
= nvif_rd32(device
, buffer
->putaddr
);
632 buffer
->get
= nvif_rd32(device
, buffer
->getaddr
);
633 if (buffer
->get
== buffer
->put
)
634 return NVIF_NOTIFY_KEEP
;
636 buffer
->fault_nr
= 0;
638 SVM_DBG(svm
, "get %08x put %08x", buffer
->get
, buffer
->put
);
639 while (buffer
->get
!= buffer
->put
) {
640 nouveau_svm_fault_cache(svm
, buffer
, buffer
->get
* 0x20);
641 if (++buffer
->get
== buffer
->entries
)
644 nvif_wr32(device
, buffer
->getaddr
, buffer
->get
);
645 SVM_DBG(svm
, "%d fault(s) pending", buffer
->fault_nr
);
647 /* Sort parsed faults by instance pointer to prevent unnecessary
648 * instance to SVMM translations, followed by address and access
649 * type to reduce the amount of work when handling the faults.
651 sort(buffer
->fault
, buffer
->fault_nr
, sizeof(*buffer
->fault
),
652 nouveau_svm_fault_cmp
, NULL
);
654 /* Lookup SVMM structure for each unique instance pointer. */
655 mutex_lock(&svm
->mutex
);
656 for (fi
= 0, svmm
= NULL
; fi
< buffer
->fault_nr
; fi
++) {
657 if (!svmm
|| buffer
->fault
[fi
]->inst
!= inst
) {
658 struct nouveau_ivmm
*ivmm
=
659 nouveau_ivmm_find(svm
, buffer
->fault
[fi
]->inst
);
660 svmm
= ivmm
? ivmm
->svmm
: NULL
;
661 inst
= buffer
->fault
[fi
]->inst
;
662 SVM_DBG(svm
, "inst %016llx -> svm-%p", inst
, svmm
);
664 buffer
->fault
[fi
]->svmm
= svmm
;
666 mutex_unlock(&svm
->mutex
);
668 /* Process list of faults. */
669 args
.i
.i
.version
= 0;
670 args
.i
.i
.type
= NVIF_IOCTL_V0_MTHD
;
671 args
.i
.m
.version
= 0;
672 args
.i
.m
.method
= NVIF_VMM_V0_PFNMAP
;
673 args
.i
.p
.version
= 0;
675 for (fi
= 0; fn
= fi
+ 1, fi
< buffer
->fault_nr
; fi
= fn
) {
676 struct svm_notifier notifier
;
677 struct mm_struct
*mm
;
679 /* Cancel any faults from non-SVM channels. */
680 if (!(svmm
= buffer
->fault
[fi
]->svmm
)) {
681 nouveau_svm_fault_cancel_fault(svm
, buffer
->fault
[fi
]);
684 SVMM_DBG(svmm
, "addr %016llx", buffer
->fault
[fi
]->addr
);
686 /* We try and group handling of faults within a small
687 * window into a single update.
689 start
= buffer
->fault
[fi
]->addr
;
690 limit
= start
+ PAGE_SIZE
;
691 if (start
< svmm
->unmanaged
.limit
)
692 limit
= min_t(u64
, limit
, svmm
->unmanaged
.start
);
695 * Prepare the GPU-side update of all pages within the
696 * fault window, determining required pages and access
697 * permissions based on pending faults.
699 args
.i
.p
.addr
= start
;
700 args
.i
.p
.page
= PAGE_SHIFT
;
701 args
.i
.p
.size
= PAGE_SIZE
;
703 * Determine required permissions based on GPU fault
707 switch (buffer
->fault
[fi
]->access
) {
709 hmm_flags
= HMM_PFN_REQ_FAULT
;
711 case 3: /* PREFETCH. */
715 hmm_flags
= HMM_PFN_REQ_FAULT
| HMM_PFN_REQ_WRITE
;
719 mm
= svmm
->notifier
.mm
;
720 if (!mmget_not_zero(mm
)) {
721 nouveau_svm_fault_cancel_fault(svm
, buffer
->fault
[fi
]);
725 notifier
.svmm
= svmm
;
726 ret
= mmu_interval_notifier_insert(¬ifier
.notifier
, mm
,
727 args
.i
.p
.addr
, args
.i
.p
.size
,
728 &nouveau_svm_mni_ops
);
730 ret
= nouveau_range_fault(svmm
, svm
->drm
, &args
.i
,
731 sizeof(args
), hmm_flags
, ¬ifier
);
732 mmu_interval_notifier_remove(¬ifier
.notifier
);
736 limit
= args
.i
.p
.addr
+ args
.i
.p
.size
;
737 for (fn
= fi
; ++fn
< buffer
->fault_nr
; ) {
738 /* It's okay to skip over duplicate addresses from the
739 * same SVMM as faults are ordered by access type such
740 * that only the first one needs to be handled.
742 * ie. WRITE faults appear first, thus any handling of
743 * pending READ faults will already be satisfied.
744 * But if a large page is mapped, make sure subsequent
745 * fault addresses have sufficient access permission.
747 if (buffer
->fault
[fn
]->svmm
!= svmm
||
748 buffer
->fault
[fn
]->addr
>= limit
||
749 (buffer
->fault
[fi
]->access
== 0 /* READ. */ &&
750 !(args
.phys
[0] & NVIF_VMM_PFNMAP_V0_V
)) ||
751 (buffer
->fault
[fi
]->access
!= 0 /* READ. */ &&
752 buffer
->fault
[fi
]->access
!= 3 /* PREFETCH. */ &&
753 !(args
.phys
[0] & NVIF_VMM_PFNMAP_V0_W
)))
757 /* If handling failed completely, cancel all faults. */
760 struct nouveau_svm_fault
*fault
=
763 nouveau_svm_fault_cancel_fault(svm
, fault
);
769 /* Issue fault replay to the GPU. */
771 nouveau_svm_fault_replay(svm
);
772 return NVIF_NOTIFY_KEEP
;
775 static struct nouveau_pfnmap_args
*
776 nouveau_pfns_to_args(void *pfns
)
778 return container_of(pfns
, struct nouveau_pfnmap_args
, p
.phys
);
782 nouveau_pfns_alloc(unsigned long npages
)
784 struct nouveau_pfnmap_args
*args
;
786 args
= kzalloc(struct_size(args
, p
.phys
, npages
), GFP_KERNEL
);
790 args
->i
.type
= NVIF_IOCTL_V0_MTHD
;
791 args
->m
.method
= NVIF_VMM_V0_PFNMAP
;
792 args
->p
.page
= PAGE_SHIFT
;
798 nouveau_pfns_free(u64
*pfns
)
800 struct nouveau_pfnmap_args
*args
= nouveau_pfns_to_args(pfns
);
806 nouveau_pfns_map(struct nouveau_svmm
*svmm
, struct mm_struct
*mm
,
807 unsigned long addr
, u64
*pfns
, unsigned long npages
)
809 struct nouveau_pfnmap_args
*args
= nouveau_pfns_to_args(pfns
);
813 args
->p
.size
= npages
<< PAGE_SHIFT
;
815 mutex_lock(&svmm
->mutex
);
817 svmm
->vmm
->vmm
.object
.client
->super
= true;
818 ret
= nvif_object_ioctl(&svmm
->vmm
->vmm
.object
, args
, sizeof(*args
) +
819 npages
* sizeof(args
->p
.phys
[0]), NULL
);
820 svmm
->vmm
->vmm
.object
.client
->super
= false;
822 mutex_unlock(&svmm
->mutex
);
826 nouveau_svm_fault_buffer_fini(struct nouveau_svm
*svm
, int id
)
828 struct nouveau_svm_fault_buffer
*buffer
= &svm
->buffer
[id
];
829 nvif_notify_put(&buffer
->notify
);
833 nouveau_svm_fault_buffer_init(struct nouveau_svm
*svm
, int id
)
835 struct nouveau_svm_fault_buffer
*buffer
= &svm
->buffer
[id
];
836 struct nvif_object
*device
= &svm
->drm
->client
.device
.object
;
837 buffer
->get
= nvif_rd32(device
, buffer
->getaddr
);
838 buffer
->put
= nvif_rd32(device
, buffer
->putaddr
);
839 SVM_DBG(svm
, "get %08x put %08x (init)", buffer
->get
, buffer
->put
);
840 return nvif_notify_get(&buffer
->notify
);
844 nouveau_svm_fault_buffer_dtor(struct nouveau_svm
*svm
, int id
)
846 struct nouveau_svm_fault_buffer
*buffer
= &svm
->buffer
[id
];
850 for (i
= 0; buffer
->fault
[i
] && i
< buffer
->entries
; i
++)
851 kfree(buffer
->fault
[i
]);
852 kvfree(buffer
->fault
);
855 nouveau_svm_fault_buffer_fini(svm
, id
);
857 nvif_notify_dtor(&buffer
->notify
);
858 nvif_object_dtor(&buffer
->object
);
862 nouveau_svm_fault_buffer_ctor(struct nouveau_svm
*svm
, s32 oclass
, int id
)
864 struct nouveau_svm_fault_buffer
*buffer
= &svm
->buffer
[id
];
865 struct nouveau_drm
*drm
= svm
->drm
;
866 struct nvif_object
*device
= &drm
->client
.device
.object
;
867 struct nvif_clb069_v0 args
= {};
872 ret
= nvif_object_ctor(device
, "svmFaultBuffer", 0, oclass
, &args
,
873 sizeof(args
), &buffer
->object
);
875 SVM_ERR(svm
, "Fault buffer allocation failed: %d", ret
);
879 nvif_object_map(&buffer
->object
, NULL
, 0);
880 buffer
->entries
= args
.entries
;
881 buffer
->getaddr
= args
.get
;
882 buffer
->putaddr
= args
.put
;
884 ret
= nvif_notify_ctor(&buffer
->object
, "svmFault", nouveau_svm_fault
,
885 true, NVB069_V0_NTFY_FAULT
, NULL
, 0, 0,
890 buffer
->fault
= kvzalloc(sizeof(*buffer
->fault
) * buffer
->entries
, GFP_KERNEL
);
894 return nouveau_svm_fault_buffer_init(svm
, id
);
898 nouveau_svm_resume(struct nouveau_drm
*drm
)
900 struct nouveau_svm
*svm
= drm
->svm
;
902 nouveau_svm_fault_buffer_init(svm
, 0);
906 nouveau_svm_suspend(struct nouveau_drm
*drm
)
908 struct nouveau_svm
*svm
= drm
->svm
;
910 nouveau_svm_fault_buffer_fini(svm
, 0);
914 nouveau_svm_fini(struct nouveau_drm
*drm
)
916 struct nouveau_svm
*svm
= drm
->svm
;
918 nouveau_svm_fault_buffer_dtor(svm
, 0);
925 nouveau_svm_init(struct nouveau_drm
*drm
)
927 static const struct nvif_mclass buffers
[] = {
928 { VOLTA_FAULT_BUFFER_A
, 0 },
929 { MAXWELL_FAULT_BUFFER_A
, 0 },
932 struct nouveau_svm
*svm
;
935 /* Disable on Volta and newer until channel recovery is fixed,
936 * otherwise clients will have a trivial way to trash the GPU
939 if (drm
->client
.device
.info
.family
> NV_DEVICE_INFO_V0_PASCAL
)
942 if (!(drm
->svm
= svm
= kzalloc(sizeof(*drm
->svm
), GFP_KERNEL
)))
946 mutex_init(&drm
->svm
->mutex
);
947 INIT_LIST_HEAD(&drm
->svm
->inst
);
949 ret
= nvif_mclass(&drm
->client
.device
.object
, buffers
);
951 SVM_DBG(svm
, "No supported fault buffer class");
952 nouveau_svm_fini(drm
);
956 ret
= nouveau_svm_fault_buffer_ctor(svm
, buffers
[ret
].oclass
, 0);
958 nouveau_svm_fini(drm
);
962 SVM_DBG(svm
, "Initialised");