2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
31 #include "radeon_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * radeon_vm_num_pde - return the number of page directory entries
56 * @rdev: radeon_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned radeon_vm_num_pdes(struct radeon_device
*rdev
)
62 return rdev
->vm_manager
.max_pfn
>> radeon_vm_block_size
;
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
68 * @rdev: radeon_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned radeon_vm_directory_size(struct radeon_device
*rdev
)
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev
) * 8);
78 * radeon_vm_manager_init - init the vm manager
80 * @rdev: radeon_device pointer
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
85 int radeon_vm_manager_init(struct radeon_device
*rdev
)
89 if (!rdev
->vm_manager
.enabled
) {
90 r
= radeon_asic_vm_init(rdev
);
94 rdev
->vm_manager
.enabled
= true;
100 * radeon_vm_manager_fini - tear down the vm manager
102 * @rdev: radeon_device pointer
104 * Tear down the VM manager (cayman+).
106 void radeon_vm_manager_fini(struct radeon_device
*rdev
)
110 if (!rdev
->vm_manager
.enabled
)
113 for (i
= 0; i
< RADEON_NUM_VM
; ++i
)
114 radeon_fence_unref(&rdev
->vm_manager
.active
[i
]);
115 radeon_asic_vm_fini(rdev
);
116 rdev
->vm_manager
.enabled
= false;
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
123 * @head: head of validation list
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
128 struct radeon_bo_list
*radeon_vm_get_bos(struct radeon_device
*rdev
,
129 struct radeon_vm
*vm
,
130 struct list_head
*head
)
132 struct radeon_bo_list
*list
;
135 list
= kvmalloc_array(vm
->max_pde_used
+ 2,
136 sizeof(struct radeon_bo_list
), GFP_KERNEL
);
140 /* add the vm page table to the list */
141 list
[0].robj
= vm
->page_directory
;
142 list
[0].preferred_domains
= RADEON_GEM_DOMAIN_VRAM
;
143 list
[0].allowed_domains
= RADEON_GEM_DOMAIN_VRAM
;
144 list
[0].tv
.bo
= &vm
->page_directory
->tbo
;
145 list
[0].tv
.shared
= true;
146 list
[0].tiling_flags
= 0;
147 list_add(&list
[0].tv
.head
, head
);
149 for (i
= 0, idx
= 1; i
<= vm
->max_pde_used
; i
++) {
150 if (!vm
->page_tables
[i
].bo
)
153 list
[idx
].robj
= vm
->page_tables
[i
].bo
;
154 list
[idx
].preferred_domains
= RADEON_GEM_DOMAIN_VRAM
;
155 list
[idx
].allowed_domains
= RADEON_GEM_DOMAIN_VRAM
;
156 list
[idx
].tv
.bo
= &list
[idx
].robj
->tbo
;
157 list
[idx
].tv
.shared
= true;
158 list
[idx
].tiling_flags
= 0;
159 list_add(&list
[idx
++].tv
.head
, head
);
166 * radeon_vm_grab_id - allocate the next free VMID
168 * @rdev: radeon_device pointer
169 * @vm: vm to allocate id for
170 * @ring: ring we want to submit job to
172 * Allocate an id for the vm (cayman+).
173 * Returns the fence we need to sync to (if any).
175 * Global and local mutex must be locked!
177 struct radeon_fence
*radeon_vm_grab_id(struct radeon_device
*rdev
,
178 struct radeon_vm
*vm
, int ring
)
180 struct radeon_fence
*best
[RADEON_NUM_RINGS
] = {};
181 struct radeon_vm_id
*vm_id
= &vm
->ids
[ring
];
183 unsigned choices
[2] = {};
186 /* check if the id is still valid */
187 if (vm_id
->id
&& vm_id
->last_id_use
&&
188 vm_id
->last_id_use
== rdev
->vm_manager
.active
[vm_id
->id
])
191 /* we definately need to flush */
192 vm_id
->pd_gpu_addr
= ~0ll;
194 /* skip over VMID 0, since it is the system VM */
195 for (i
= 1; i
< rdev
->vm_manager
.nvm
; ++i
) {
196 struct radeon_fence
*fence
= rdev
->vm_manager
.active
[i
];
199 /* found a free one */
201 trace_radeon_vm_grab_id(i
, ring
);
205 if (radeon_fence_is_earlier(fence
, best
[fence
->ring
])) {
206 best
[fence
->ring
] = fence
;
207 choices
[fence
->ring
== ring
? 0 : 1] = i
;
211 for (i
= 0; i
< 2; ++i
) {
213 vm_id
->id
= choices
[i
];
214 trace_radeon_vm_grab_id(choices
[i
], ring
);
215 return rdev
->vm_manager
.active
[choices
[i
]];
219 /* should never happen */
225 * radeon_vm_flush - hardware flush the vm
227 * @rdev: radeon_device pointer
228 * @vm: vm we want to flush
229 * @ring: ring to use for flush
230 * @updates: last vm update that is waited for
232 * Flush the vm (cayman+).
234 * Global and local mutex must be locked!
236 void radeon_vm_flush(struct radeon_device
*rdev
,
237 struct radeon_vm
*vm
,
238 int ring
, struct radeon_fence
*updates
)
240 uint64_t pd_addr
= radeon_bo_gpu_offset(vm
->page_directory
);
241 struct radeon_vm_id
*vm_id
= &vm
->ids
[ring
];
243 if (pd_addr
!= vm_id
->pd_gpu_addr
|| !vm_id
->flushed_updates
||
244 radeon_fence_is_earlier(vm_id
->flushed_updates
, updates
)) {
246 trace_radeon_vm_flush(pd_addr
, ring
, vm
->ids
[ring
].id
);
247 radeon_fence_unref(&vm_id
->flushed_updates
);
248 vm_id
->flushed_updates
= radeon_fence_ref(updates
);
249 vm_id
->pd_gpu_addr
= pd_addr
;
250 radeon_ring_vm_flush(rdev
, &rdev
->ring
[ring
],
251 vm_id
->id
, vm_id
->pd_gpu_addr
);
257 * radeon_vm_fence - remember fence for vm
259 * @rdev: radeon_device pointer
260 * @vm: vm we want to fence
261 * @fence: fence to remember
263 * Fence the vm (cayman+).
264 * Set the fence used to protect page table and id.
266 * Global and local mutex must be locked!
268 void radeon_vm_fence(struct radeon_device
*rdev
,
269 struct radeon_vm
*vm
,
270 struct radeon_fence
*fence
)
272 unsigned vm_id
= vm
->ids
[fence
->ring
].id
;
274 radeon_fence_unref(&rdev
->vm_manager
.active
[vm_id
]);
275 rdev
->vm_manager
.active
[vm_id
] = radeon_fence_ref(fence
);
277 radeon_fence_unref(&vm
->ids
[fence
->ring
].last_id_use
);
278 vm
->ids
[fence
->ring
].last_id_use
= radeon_fence_ref(fence
);
282 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
285 * @bo: requested buffer object
287 * Find @bo inside the requested vm (cayman+).
288 * Search inside the @bos vm list for the requested vm
289 * Returns the found bo_va or NULL if none is found
291 * Object has to be reserved!
293 struct radeon_bo_va
*radeon_vm_bo_find(struct radeon_vm
*vm
,
294 struct radeon_bo
*bo
)
296 struct radeon_bo_va
*bo_va
;
298 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
299 if (bo_va
->vm
== vm
) {
307 * radeon_vm_bo_add - add a bo to a specific vm
309 * @rdev: radeon_device pointer
311 * @bo: radeon buffer object
313 * Add @bo into the requested vm (cayman+).
314 * Add @bo to the list of bos associated with the vm
315 * Returns newly added bo_va or NULL for failure
317 * Object has to be reserved!
319 struct radeon_bo_va
*radeon_vm_bo_add(struct radeon_device
*rdev
,
320 struct radeon_vm
*vm
,
321 struct radeon_bo
*bo
)
323 struct radeon_bo_va
*bo_va
;
325 bo_va
= kzalloc(sizeof(struct radeon_bo_va
), GFP_KERNEL
);
334 bo_va
->ref_count
= 1;
335 INIT_LIST_HEAD(&bo_va
->bo_list
);
336 INIT_LIST_HEAD(&bo_va
->vm_status
);
338 mutex_lock(&vm
->mutex
);
339 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
340 mutex_unlock(&vm
->mutex
);
346 * radeon_vm_set_pages - helper to call the right asic function
348 * @rdev: radeon_device pointer
349 * @ib: indirect buffer to fill with commands
350 * @pe: addr of the page entry
351 * @addr: dst addr to write into pe
352 * @count: number of page entries to update
353 * @incr: increase next addr by incr bytes
354 * @flags: hw access flags
356 * Traces the parameters and calls the right asic functions
357 * to setup the page table using the DMA.
359 static void radeon_vm_set_pages(struct radeon_device
*rdev
,
360 struct radeon_ib
*ib
,
362 uint64_t addr
, unsigned count
,
363 uint32_t incr
, uint32_t flags
)
365 trace_radeon_vm_set_page(pe
, addr
, count
, incr
, flags
);
367 if ((flags
& R600_PTE_GART_MASK
) == R600_PTE_GART_MASK
) {
368 uint64_t src
= rdev
->gart
.table_addr
+ (addr
>> 12) * 8;
369 radeon_asic_vm_copy_pages(rdev
, ib
, pe
, src
, count
);
371 } else if ((flags
& R600_PTE_SYSTEM
) || (count
< 3)) {
372 radeon_asic_vm_write_pages(rdev
, ib
, pe
, addr
,
376 radeon_asic_vm_set_pages(rdev
, ib
, pe
, addr
,
382 * radeon_vm_clear_bo - initially clear the page dir/table
384 * @rdev: radeon_device pointer
387 static int radeon_vm_clear_bo(struct radeon_device
*rdev
,
388 struct radeon_bo
*bo
)
390 struct ttm_operation_ctx ctx
= { true, false };
396 r
= radeon_bo_reserve(bo
, false);
400 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
402 goto error_unreserve
;
404 addr
= radeon_bo_gpu_offset(bo
);
405 entries
= radeon_bo_size(bo
) / 8;
407 r
= radeon_ib_get(rdev
, R600_RING_TYPE_DMA_INDEX
, &ib
, NULL
, 256);
409 goto error_unreserve
;
413 radeon_vm_set_pages(rdev
, &ib
, addr
, 0, entries
, 0, 0);
414 radeon_asic_vm_pad_ib(rdev
, &ib
);
415 WARN_ON(ib
.length_dw
> 64);
417 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
421 ib
.fence
->is_vm_update
= true;
422 radeon_bo_fence(bo
, ib
.fence
, false);
425 radeon_ib_free(rdev
, &ib
);
428 radeon_bo_unreserve(bo
);
433 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
435 * @rdev: radeon_device pointer
436 * @bo_va: bo_va to store the address
437 * @soffset: requested offset of the buffer in the VM address space
438 * @flags: attributes of pages (read/write/valid/etc.)
440 * Set offset of @bo_va (cayman+).
441 * Validate and set the offset requested within the vm address space.
442 * Returns 0 for success, error for failure.
444 * Object has to be reserved and gets unreserved by this function!
446 int radeon_vm_bo_set_addr(struct radeon_device
*rdev
,
447 struct radeon_bo_va
*bo_va
,
451 uint64_t size
= radeon_bo_size(bo_va
->bo
);
452 struct radeon_vm
*vm
= bo_va
->vm
;
453 unsigned last_pfn
, pt_idx
;
458 /* make sure object fit at this offset */
459 eoffset
= soffset
+ size
- 1;
460 if (soffset
>= eoffset
) {
462 goto error_unreserve
;
465 last_pfn
= eoffset
/ RADEON_GPU_PAGE_SIZE
;
466 if (last_pfn
>= rdev
->vm_manager
.max_pfn
) {
467 dev_err(rdev
->dev
, "va above limit (0x%08X >= 0x%08X)\n",
468 last_pfn
, rdev
->vm_manager
.max_pfn
);
470 goto error_unreserve
;
474 eoffset
= last_pfn
= 0;
477 mutex_lock(&vm
->mutex
);
478 soffset
/= RADEON_GPU_PAGE_SIZE
;
479 eoffset
/= RADEON_GPU_PAGE_SIZE
;
480 if (soffset
|| eoffset
) {
481 struct interval_tree_node
*it
;
482 it
= interval_tree_iter_first(&vm
->va
, soffset
, eoffset
);
483 if (it
&& it
!= &bo_va
->it
) {
484 struct radeon_bo_va
*tmp
;
485 tmp
= container_of(it
, struct radeon_bo_va
, it
);
486 /* bo and tmp overlap, invalid offset */
487 dev_err(rdev
->dev
, "bo %p va 0x%010Lx conflict with "
488 "(bo %p 0x%010lx 0x%010lx)\n", bo_va
->bo
,
489 soffset
, tmp
->bo
, tmp
->it
.start
, tmp
->it
.last
);
490 mutex_unlock(&vm
->mutex
);
492 goto error_unreserve
;
496 if (bo_va
->it
.start
|| bo_va
->it
.last
) {
497 /* add a clone of the bo_va to clear the old address */
498 struct radeon_bo_va
*tmp
;
499 tmp
= kzalloc(sizeof(struct radeon_bo_va
), GFP_KERNEL
);
501 mutex_unlock(&vm
->mutex
);
503 goto error_unreserve
;
505 tmp
->it
.start
= bo_va
->it
.start
;
506 tmp
->it
.last
= bo_va
->it
.last
;
508 tmp
->bo
= radeon_bo_ref(bo_va
->bo
);
510 interval_tree_remove(&bo_va
->it
, &vm
->va
);
511 spin_lock(&vm
->status_lock
);
514 list_del_init(&bo_va
->vm_status
);
515 list_add(&tmp
->vm_status
, &vm
->freed
);
516 spin_unlock(&vm
->status_lock
);
519 if (soffset
|| eoffset
) {
520 spin_lock(&vm
->status_lock
);
521 bo_va
->it
.start
= soffset
;
522 bo_va
->it
.last
= eoffset
;
523 list_add(&bo_va
->vm_status
, &vm
->cleared
);
524 spin_unlock(&vm
->status_lock
);
525 interval_tree_insert(&bo_va
->it
, &vm
->va
);
528 bo_va
->flags
= flags
;
530 soffset
>>= radeon_vm_block_size
;
531 eoffset
>>= radeon_vm_block_size
;
533 BUG_ON(eoffset
>= radeon_vm_num_pdes(rdev
));
535 if (eoffset
> vm
->max_pde_used
)
536 vm
->max_pde_used
= eoffset
;
538 radeon_bo_unreserve(bo_va
->bo
);
540 /* walk over the address space and allocate the page tables */
541 for (pt_idx
= soffset
; pt_idx
<= eoffset
; ++pt_idx
) {
542 struct radeon_bo
*pt
;
544 if (vm
->page_tables
[pt_idx
].bo
)
547 /* drop mutex to allocate and clear page table */
548 mutex_unlock(&vm
->mutex
);
550 r
= radeon_bo_create(rdev
, RADEON_VM_PTE_COUNT
* 8,
551 RADEON_GPU_PAGE_SIZE
, true,
552 RADEON_GEM_DOMAIN_VRAM
, 0,
557 r
= radeon_vm_clear_bo(rdev
, pt
);
559 radeon_bo_unref(&pt
);
563 /* aquire mutex again */
564 mutex_lock(&vm
->mutex
);
565 if (vm
->page_tables
[pt_idx
].bo
) {
566 /* someone else allocated the pt in the meantime */
567 mutex_unlock(&vm
->mutex
);
568 radeon_bo_unref(&pt
);
569 mutex_lock(&vm
->mutex
);
573 vm
->page_tables
[pt_idx
].addr
= 0;
574 vm
->page_tables
[pt_idx
].bo
= pt
;
577 mutex_unlock(&vm
->mutex
);
581 radeon_bo_unreserve(bo_va
->bo
);
586 * radeon_vm_map_gart - get the physical address of a gart page
588 * @rdev: radeon_device pointer
589 * @addr: the unmapped addr
591 * Look up the physical address of the page that the pte resolves
593 * Returns the physical address of the page.
595 uint64_t radeon_vm_map_gart(struct radeon_device
*rdev
, uint64_t addr
)
599 /* page table offset */
600 result
= rdev
->gart
.pages_entry
[addr
>> RADEON_GPU_PAGE_SHIFT
];
601 result
&= ~RADEON_GPU_PAGE_MASK
;
607 * radeon_vm_page_flags - translate page flags to what the hw uses
609 * @flags: flags comming from userspace
611 * Translate the flags the userspace ABI uses to hw flags.
613 static uint32_t radeon_vm_page_flags(uint32_t flags
)
615 uint32_t hw_flags
= 0;
617 hw_flags
|= (flags
& RADEON_VM_PAGE_VALID
) ? R600_PTE_VALID
: 0;
618 hw_flags
|= (flags
& RADEON_VM_PAGE_READABLE
) ? R600_PTE_READABLE
: 0;
619 hw_flags
|= (flags
& RADEON_VM_PAGE_WRITEABLE
) ? R600_PTE_WRITEABLE
: 0;
620 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
621 hw_flags
|= R600_PTE_SYSTEM
;
622 hw_flags
|= (flags
& RADEON_VM_PAGE_SNOOPED
) ? R600_PTE_SNOOPED
: 0;
628 * radeon_vm_update_pdes - make sure that page directory is valid
630 * @rdev: radeon_device pointer
632 * @start: start of GPU address range
633 * @end: end of GPU address range
635 * Allocates new page tables if necessary
636 * and updates the page directory (cayman+).
637 * Returns 0 for success, error for failure.
639 * Global and local mutex must be locked!
641 int radeon_vm_update_page_directory(struct radeon_device
*rdev
,
642 struct radeon_vm
*vm
)
644 struct radeon_bo
*pd
= vm
->page_directory
;
645 uint64_t pd_addr
= radeon_bo_gpu_offset(pd
);
646 uint32_t incr
= RADEON_VM_PTE_COUNT
* 8;
647 uint64_t last_pde
= ~0, last_pt
= ~0;
648 unsigned count
= 0, pt_idx
, ndw
;
655 /* assume the worst case */
656 ndw
+= vm
->max_pde_used
* 6;
658 /* update too big for an IB */
662 r
= radeon_ib_get(rdev
, R600_RING_TYPE_DMA_INDEX
, &ib
, NULL
, ndw
* 4);
667 /* walk over the address space and update the page directory */
668 for (pt_idx
= 0; pt_idx
<= vm
->max_pde_used
; ++pt_idx
) {
669 struct radeon_bo
*bo
= vm
->page_tables
[pt_idx
].bo
;
675 pt
= radeon_bo_gpu_offset(bo
);
676 if (vm
->page_tables
[pt_idx
].addr
== pt
)
678 vm
->page_tables
[pt_idx
].addr
= pt
;
680 pde
= pd_addr
+ pt_idx
* 8;
681 if (((last_pde
+ 8 * count
) != pde
) ||
682 ((last_pt
+ incr
* count
) != pt
)) {
685 radeon_vm_set_pages(rdev
, &ib
, last_pde
,
686 last_pt
, count
, incr
,
699 radeon_vm_set_pages(rdev
, &ib
, last_pde
, last_pt
, count
,
700 incr
, R600_PTE_VALID
);
702 if (ib
.length_dw
!= 0) {
703 radeon_asic_vm_pad_ib(rdev
, &ib
);
705 radeon_sync_resv(rdev
, &ib
.sync
, pd
->tbo
.resv
, true);
706 WARN_ON(ib
.length_dw
> ndw
);
707 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
709 radeon_ib_free(rdev
, &ib
);
712 ib
.fence
->is_vm_update
= true;
713 radeon_bo_fence(pd
, ib
.fence
, false);
715 radeon_ib_free(rdev
, &ib
);
721 * radeon_vm_frag_ptes - add fragment information to PTEs
723 * @rdev: radeon_device pointer
724 * @ib: IB for the update
725 * @pe_start: first PTE to handle
726 * @pe_end: last PTE to handle
727 * @addr: addr those PTEs should point to
728 * @flags: hw mapping flags
730 * Global and local mutex must be locked!
732 static void radeon_vm_frag_ptes(struct radeon_device
*rdev
,
733 struct radeon_ib
*ib
,
734 uint64_t pe_start
, uint64_t pe_end
,
735 uint64_t addr
, uint32_t flags
)
738 * The MC L1 TLB supports variable sized pages, based on a fragment
739 * field in the PTE. When this field is set to a non-zero value, page
740 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
741 * flags are considered valid for all PTEs within the fragment range
742 * and corresponding mappings are assumed to be physically contiguous.
744 * The L1 TLB can store a single PTE for the whole fragment,
745 * significantly increasing the space available for translation
746 * caching. This leads to large improvements in throughput when the
747 * TLB is under pressure.
749 * The L2 TLB distributes small and large fragments into two
750 * asymmetric partitions. The large fragment cache is significantly
751 * larger. Thus, we try to use large fragments wherever possible.
752 * Userspace can support this by aligning virtual base address and
753 * allocation size to the fragment size.
756 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
757 uint64_t frag_flags
= ((rdev
->family
== CHIP_CAYMAN
) ||
758 (rdev
->family
== CHIP_ARUBA
)) ?
759 R600_PTE_FRAG_256KB
: R600_PTE_FRAG_64KB
;
760 uint64_t frag_align
= ((rdev
->family
== CHIP_CAYMAN
) ||
761 (rdev
->family
== CHIP_ARUBA
)) ? 0x200 : 0x80;
763 uint64_t frag_start
= ALIGN(pe_start
, frag_align
);
764 uint64_t frag_end
= pe_end
& ~(frag_align
- 1);
768 /* system pages are non continuously */
769 if ((flags
& R600_PTE_SYSTEM
) || !(flags
& R600_PTE_VALID
) ||
770 (frag_start
>= frag_end
)) {
772 count
= (pe_end
- pe_start
) / 8;
773 radeon_vm_set_pages(rdev
, ib
, pe_start
, addr
, count
,
774 RADEON_GPU_PAGE_SIZE
, flags
);
778 /* handle the 4K area at the beginning */
779 if (pe_start
!= frag_start
) {
780 count
= (frag_start
- pe_start
) / 8;
781 radeon_vm_set_pages(rdev
, ib
, pe_start
, addr
, count
,
782 RADEON_GPU_PAGE_SIZE
, flags
);
783 addr
+= RADEON_GPU_PAGE_SIZE
* count
;
786 /* handle the area in the middle */
787 count
= (frag_end
- frag_start
) / 8;
788 radeon_vm_set_pages(rdev
, ib
, frag_start
, addr
, count
,
789 RADEON_GPU_PAGE_SIZE
, flags
| frag_flags
);
791 /* handle the 4K area at the end */
792 if (frag_end
!= pe_end
) {
793 addr
+= RADEON_GPU_PAGE_SIZE
* count
;
794 count
= (pe_end
- frag_end
) / 8;
795 radeon_vm_set_pages(rdev
, ib
, frag_end
, addr
, count
,
796 RADEON_GPU_PAGE_SIZE
, flags
);
801 * radeon_vm_update_ptes - make sure that page tables are valid
803 * @rdev: radeon_device pointer
805 * @start: start of GPU address range
806 * @end: end of GPU address range
807 * @dst: destination address to map to
808 * @flags: mapping flags
810 * Update the page tables in the range @start - @end (cayman+).
812 * Global and local mutex must be locked!
814 static int radeon_vm_update_ptes(struct radeon_device
*rdev
,
815 struct radeon_vm
*vm
,
816 struct radeon_ib
*ib
,
817 uint64_t start
, uint64_t end
,
818 uint64_t dst
, uint32_t flags
)
820 uint64_t mask
= RADEON_VM_PTE_COUNT
- 1;
821 uint64_t last_pte
= ~0, last_dst
= ~0;
825 /* walk over the address space and update the page tables */
826 for (addr
= start
; addr
< end
; ) {
827 uint64_t pt_idx
= addr
>> radeon_vm_block_size
;
828 struct radeon_bo
*pt
= vm
->page_tables
[pt_idx
].bo
;
833 radeon_sync_resv(rdev
, &ib
->sync
, pt
->tbo
.resv
, true);
834 r
= reservation_object_reserve_shared(pt
->tbo
.resv
);
838 if ((addr
& ~mask
) == (end
& ~mask
))
841 nptes
= RADEON_VM_PTE_COUNT
- (addr
& mask
);
843 pte
= radeon_bo_gpu_offset(pt
);
844 pte
+= (addr
& mask
) * 8;
846 if ((last_pte
+ 8 * count
) != pte
) {
849 radeon_vm_frag_ptes(rdev
, ib
, last_pte
,
850 last_pte
+ 8 * count
,
862 dst
+= nptes
* RADEON_GPU_PAGE_SIZE
;
866 radeon_vm_frag_ptes(rdev
, ib
, last_pte
,
867 last_pte
+ 8 * count
,
875 * radeon_vm_fence_pts - fence page tables after an update
878 * @start: start of GPU address range
879 * @end: end of GPU address range
880 * @fence: fence to use
882 * Fence the page tables in the range @start - @end (cayman+).
884 * Global and local mutex must be locked!
886 static void radeon_vm_fence_pts(struct radeon_vm
*vm
,
887 uint64_t start
, uint64_t end
,
888 struct radeon_fence
*fence
)
892 start
>>= radeon_vm_block_size
;
893 end
= (end
- 1) >> radeon_vm_block_size
;
895 for (i
= start
; i
<= end
; ++i
)
896 radeon_bo_fence(vm
->page_tables
[i
].bo
, fence
, true);
900 * radeon_vm_bo_update - map a bo into the vm page table
902 * @rdev: radeon_device pointer
904 * @bo: radeon buffer object
907 * Fill in the page table entries for @bo (cayman+).
908 * Returns 0 for success, -EINVAL for failure.
910 * Object have to be reserved and mutex must be locked!
912 int radeon_vm_bo_update(struct radeon_device
*rdev
,
913 struct radeon_bo_va
*bo_va
,
914 struct ttm_mem_reg
*mem
)
916 struct radeon_vm
*vm
= bo_va
->vm
;
918 unsigned nptes
, ncmds
, ndw
;
923 if (!bo_va
->it
.start
) {
924 dev_err(rdev
->dev
, "bo %p don't has a mapping in vm %p\n",
929 spin_lock(&vm
->status_lock
);
931 if (list_empty(&bo_va
->vm_status
)) {
932 spin_unlock(&vm
->status_lock
);
935 list_del_init(&bo_va
->vm_status
);
937 list_del(&bo_va
->vm_status
);
938 list_add(&bo_va
->vm_status
, &vm
->cleared
);
940 spin_unlock(&vm
->status_lock
);
942 bo_va
->flags
&= ~RADEON_VM_PAGE_VALID
;
943 bo_va
->flags
&= ~RADEON_VM_PAGE_SYSTEM
;
944 bo_va
->flags
&= ~RADEON_VM_PAGE_SNOOPED
;
945 if (bo_va
->bo
&& radeon_ttm_tt_is_readonly(bo_va
->bo
->tbo
.ttm
))
946 bo_va
->flags
&= ~RADEON_VM_PAGE_WRITEABLE
;
949 addr
= mem
->start
<< PAGE_SHIFT
;
950 if (mem
->mem_type
!= TTM_PL_SYSTEM
) {
951 bo_va
->flags
|= RADEON_VM_PAGE_VALID
;
953 if (mem
->mem_type
== TTM_PL_TT
) {
954 bo_va
->flags
|= RADEON_VM_PAGE_SYSTEM
;
955 if (!(bo_va
->bo
->flags
& (RADEON_GEM_GTT_WC
| RADEON_GEM_GTT_UC
)))
956 bo_va
->flags
|= RADEON_VM_PAGE_SNOOPED
;
959 addr
+= rdev
->vm_manager
.vram_base_offset
;
965 trace_radeon_vm_bo_update(bo_va
);
967 nptes
= bo_va
->it
.last
- bo_va
->it
.start
+ 1;
969 /* reserve space for one command every (1 << BLOCK_SIZE) entries
970 or 2k dwords (whatever is smaller) */
971 ncmds
= (nptes
>> min(radeon_vm_block_size
, 11)) + 1;
976 flags
= radeon_vm_page_flags(bo_va
->flags
);
977 if ((flags
& R600_PTE_GART_MASK
) == R600_PTE_GART_MASK
) {
978 /* only copy commands needed */
981 } else if (flags
& R600_PTE_SYSTEM
) {
982 /* header for write data commands */
985 /* body of write data command */
989 /* set page commands needed */
992 /* two extra commands for begin/end of fragment */
996 /* update too big for an IB */
1000 r
= radeon_ib_get(rdev
, R600_RING_TYPE_DMA_INDEX
, &ib
, NULL
, ndw
* 4);
1005 if (!(bo_va
->flags
& RADEON_VM_PAGE_VALID
)) {
1008 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
)
1009 radeon_sync_fence(&ib
.sync
, vm
->ids
[i
].last_id_use
);
1012 r
= radeon_vm_update_ptes(rdev
, vm
, &ib
, bo_va
->it
.start
,
1013 bo_va
->it
.last
+ 1, addr
,
1014 radeon_vm_page_flags(bo_va
->flags
));
1016 radeon_ib_free(rdev
, &ib
);
1020 radeon_asic_vm_pad_ib(rdev
, &ib
);
1021 WARN_ON(ib
.length_dw
> ndw
);
1023 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
1025 radeon_ib_free(rdev
, &ib
);
1028 ib
.fence
->is_vm_update
= true;
1029 radeon_vm_fence_pts(vm
, bo_va
->it
.start
, bo_va
->it
.last
+ 1, ib
.fence
);
1030 radeon_fence_unref(&bo_va
->last_pt_update
);
1031 bo_va
->last_pt_update
= radeon_fence_ref(ib
.fence
);
1032 radeon_ib_free(rdev
, &ib
);
1038 * radeon_vm_clear_freed - clear freed BOs in the PT
1040 * @rdev: radeon_device pointer
1043 * Make sure all freed BOs are cleared in the PT.
1044 * Returns 0 for success.
1046 * PTs have to be reserved and mutex must be locked!
1048 int radeon_vm_clear_freed(struct radeon_device
*rdev
,
1049 struct radeon_vm
*vm
)
1051 struct radeon_bo_va
*bo_va
;
1054 spin_lock(&vm
->status_lock
);
1055 while (!list_empty(&vm
->freed
)) {
1056 bo_va
= list_first_entry(&vm
->freed
,
1057 struct radeon_bo_va
, vm_status
);
1058 spin_unlock(&vm
->status_lock
);
1060 r
= radeon_vm_bo_update(rdev
, bo_va
, NULL
);
1061 radeon_bo_unref(&bo_va
->bo
);
1062 radeon_fence_unref(&bo_va
->last_pt_update
);
1063 spin_lock(&vm
->status_lock
);
1064 list_del(&bo_va
->vm_status
);
1070 spin_unlock(&vm
->status_lock
);
1076 * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1078 * @rdev: radeon_device pointer
1081 * Make sure all invalidated BOs are cleared in the PT.
1082 * Returns 0 for success.
1084 * PTs have to be reserved and mutex must be locked!
1086 int radeon_vm_clear_invalids(struct radeon_device
*rdev
,
1087 struct radeon_vm
*vm
)
1089 struct radeon_bo_va
*bo_va
;
1092 spin_lock(&vm
->status_lock
);
1093 while (!list_empty(&vm
->invalidated
)) {
1094 bo_va
= list_first_entry(&vm
->invalidated
,
1095 struct radeon_bo_va
, vm_status
);
1096 spin_unlock(&vm
->status_lock
);
1098 r
= radeon_vm_bo_update(rdev
, bo_va
, NULL
);
1102 spin_lock(&vm
->status_lock
);
1104 spin_unlock(&vm
->status_lock
);
1110 * radeon_vm_bo_rmv - remove a bo to a specific vm
1112 * @rdev: radeon_device pointer
1113 * @bo_va: requested bo_va
1115 * Remove @bo_va->bo from the requested vm (cayman+).
1117 * Object have to be reserved!
1119 void radeon_vm_bo_rmv(struct radeon_device
*rdev
,
1120 struct radeon_bo_va
*bo_va
)
1122 struct radeon_vm
*vm
= bo_va
->vm
;
1124 list_del(&bo_va
->bo_list
);
1126 mutex_lock(&vm
->mutex
);
1127 if (bo_va
->it
.start
|| bo_va
->it
.last
)
1128 interval_tree_remove(&bo_va
->it
, &vm
->va
);
1130 spin_lock(&vm
->status_lock
);
1131 list_del(&bo_va
->vm_status
);
1132 if (bo_va
->it
.start
|| bo_va
->it
.last
) {
1133 bo_va
->bo
= radeon_bo_ref(bo_va
->bo
);
1134 list_add(&bo_va
->vm_status
, &vm
->freed
);
1136 radeon_fence_unref(&bo_va
->last_pt_update
);
1139 spin_unlock(&vm
->status_lock
);
1141 mutex_unlock(&vm
->mutex
);
1145 * radeon_vm_bo_invalidate - mark the bo as invalid
1147 * @rdev: radeon_device pointer
1149 * @bo: radeon buffer object
1151 * Mark @bo as invalid (cayman+).
1153 void radeon_vm_bo_invalidate(struct radeon_device
*rdev
,
1154 struct radeon_bo
*bo
)
1156 struct radeon_bo_va
*bo_va
;
1158 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
1159 spin_lock(&bo_va
->vm
->status_lock
);
1160 if (list_empty(&bo_va
->vm_status
) &&
1161 (bo_va
->it
.start
|| bo_va
->it
.last
))
1162 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
1163 spin_unlock(&bo_va
->vm
->status_lock
);
1168 * radeon_vm_init - initialize a vm instance
1170 * @rdev: radeon_device pointer
1173 * Init @vm fields (cayman+).
1175 int radeon_vm_init(struct radeon_device
*rdev
, struct radeon_vm
*vm
)
1177 const unsigned align
= min(RADEON_VM_PTB_ALIGN_SIZE
,
1178 RADEON_VM_PTE_COUNT
* 8);
1179 unsigned pd_size
, pd_entries
, pts_size
;
1182 vm
->ib_bo_va
= NULL
;
1183 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
1185 vm
->ids
[i
].flushed_updates
= NULL
;
1186 vm
->ids
[i
].last_id_use
= NULL
;
1188 mutex_init(&vm
->mutex
);
1189 vm
->va
= RB_ROOT_CACHED
;
1190 spin_lock_init(&vm
->status_lock
);
1191 INIT_LIST_HEAD(&vm
->invalidated
);
1192 INIT_LIST_HEAD(&vm
->freed
);
1193 INIT_LIST_HEAD(&vm
->cleared
);
1195 pd_size
= radeon_vm_directory_size(rdev
);
1196 pd_entries
= radeon_vm_num_pdes(rdev
);
1198 /* allocate page table array */
1199 pts_size
= pd_entries
* sizeof(struct radeon_vm_pt
);
1200 vm
->page_tables
= kzalloc(pts_size
, GFP_KERNEL
);
1201 if (vm
->page_tables
== NULL
) {
1202 DRM_ERROR("Cannot allocate memory for page table array\n");
1206 r
= radeon_bo_create(rdev
, pd_size
, align
, true,
1207 RADEON_GEM_DOMAIN_VRAM
, 0, NULL
,
1208 NULL
, &vm
->page_directory
);
1212 r
= radeon_vm_clear_bo(rdev
, vm
->page_directory
);
1214 radeon_bo_unref(&vm
->page_directory
);
1215 vm
->page_directory
= NULL
;
1223 * radeon_vm_fini - tear down a vm instance
1225 * @rdev: radeon_device pointer
1228 * Tear down @vm (cayman+).
1229 * Unbind the VM and remove all bos from the vm bo list
1231 void radeon_vm_fini(struct radeon_device
*rdev
, struct radeon_vm
*vm
)
1233 struct radeon_bo_va
*bo_va
, *tmp
;
1236 if (!RB_EMPTY_ROOT(&vm
->va
.rb_root
)) {
1237 dev_err(rdev
->dev
, "still active bo inside vm\n");
1239 rbtree_postorder_for_each_entry_safe(bo_va
, tmp
,
1240 &vm
->va
.rb_root
, it
.rb
) {
1241 interval_tree_remove(&bo_va
->it
, &vm
->va
);
1242 r
= radeon_bo_reserve(bo_va
->bo
, false);
1244 list_del_init(&bo_va
->bo_list
);
1245 radeon_bo_unreserve(bo_va
->bo
);
1246 radeon_fence_unref(&bo_va
->last_pt_update
);
1250 list_for_each_entry_safe(bo_va
, tmp
, &vm
->freed
, vm_status
) {
1251 radeon_bo_unref(&bo_va
->bo
);
1252 radeon_fence_unref(&bo_va
->last_pt_update
);
1256 for (i
= 0; i
< radeon_vm_num_pdes(rdev
); i
++)
1257 radeon_bo_unref(&vm
->page_tables
[i
].bo
);
1258 kfree(vm
->page_tables
);
1260 radeon_bo_unref(&vm
->page_directory
);
1262 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
1263 radeon_fence_unref(&vm
->ids
[i
].flushed_updates
);
1264 radeon_fence_unref(&vm
->ids
[i
].last_id_use
);
1267 mutex_destroy(&vm
->mutex
);