Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / radeon / radeon_ttm.c
blob23195d5d4e9191bce05d2eeeded4ed1196f6684b
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
33 #include <linux/dma-mapping.h>
34 #include <linux/pagemap.h>
35 #include <linux/pci.h>
36 #include <linux/seq_file.h>
37 #include <linux/slab.h>
38 #include <linux/swap.h>
39 #include <linux/swiotlb.h>
41 #include <drm/drm_agpsupport.h>
42 #include <drm/drm_debugfs.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_file.h>
45 #include <drm/drm_prime.h>
46 #include <drm/radeon_drm.h>
47 #include <drm/ttm/ttm_bo_api.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_module.h>
50 #include <drm/ttm/ttm_placement.h>
52 #include "radeon_reg.h"
53 #include "radeon.h"
54 #include "radeon_ttm.h"
56 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
57 static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
59 static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
60 struct ttm_tt *ttm,
61 struct ttm_resource *bo_mem);
62 static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
63 struct ttm_tt *ttm);
65 struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
67 struct radeon_mman *mman;
68 struct radeon_device *rdev;
70 mman = container_of(bdev, struct radeon_mman, bdev);
71 rdev = container_of(mman, struct radeon_device, mman);
72 return rdev;
75 static int radeon_ttm_init_vram(struct radeon_device *rdev)
77 return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_VRAM,
78 false, rdev->mc.real_vram_size >> PAGE_SHIFT);
81 static int radeon_ttm_init_gtt(struct radeon_device *rdev)
83 return ttm_range_man_init(&rdev->mman.bdev, TTM_PL_TT,
84 true, rdev->mc.gtt_size >> PAGE_SHIFT);
87 static void radeon_evict_flags(struct ttm_buffer_object *bo,
88 struct ttm_placement *placement)
90 static const struct ttm_place placements = {
91 .fpfn = 0,
92 .lpfn = 0,
93 .mem_type = TTM_PL_SYSTEM,
94 .flags = 0
97 struct radeon_bo *rbo;
99 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
100 placement->placement = &placements;
101 placement->busy_placement = &placements;
102 placement->num_placement = 1;
103 placement->num_busy_placement = 1;
104 return;
106 rbo = container_of(bo, struct radeon_bo, tbo);
107 switch (bo->mem.mem_type) {
108 case TTM_PL_VRAM:
109 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
110 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
111 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
112 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
113 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
114 int i;
116 /* Try evicting to the CPU inaccessible part of VRAM
117 * first, but only set GTT as busy placement, so this
118 * BO will be evicted to GTT rather than causing other
119 * BOs to be evicted from VRAM
121 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
122 RADEON_GEM_DOMAIN_GTT);
123 rbo->placement.num_busy_placement = 0;
124 for (i = 0; i < rbo->placement.num_placement; i++) {
125 if (rbo->placements[i].mem_type == TTM_PL_VRAM) {
126 if (rbo->placements[i].fpfn < fpfn)
127 rbo->placements[i].fpfn = fpfn;
128 } else {
129 rbo->placement.busy_placement =
130 &rbo->placements[i];
131 rbo->placement.num_busy_placement = 1;
134 } else
135 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
136 break;
137 case TTM_PL_TT:
138 default:
139 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
141 *placement = rbo->placement;
144 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
146 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
147 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
149 if (radeon_ttm_tt_has_userptr(rdev, bo->ttm))
150 return -EPERM;
151 return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
152 filp->private_data);
155 static int radeon_move_blit(struct ttm_buffer_object *bo,
156 bool evict,
157 struct ttm_resource *new_mem,
158 struct ttm_resource *old_mem)
160 struct radeon_device *rdev;
161 uint64_t old_start, new_start;
162 struct radeon_fence *fence;
163 unsigned num_pages;
164 int r, ridx;
166 rdev = radeon_get_rdev(bo->bdev);
167 ridx = radeon_copy_ring_index(rdev);
168 old_start = (u64)old_mem->start << PAGE_SHIFT;
169 new_start = (u64)new_mem->start << PAGE_SHIFT;
171 switch (old_mem->mem_type) {
172 case TTM_PL_VRAM:
173 old_start += rdev->mc.vram_start;
174 break;
175 case TTM_PL_TT:
176 old_start += rdev->mc.gtt_start;
177 break;
178 default:
179 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
180 return -EINVAL;
182 switch (new_mem->mem_type) {
183 case TTM_PL_VRAM:
184 new_start += rdev->mc.vram_start;
185 break;
186 case TTM_PL_TT:
187 new_start += rdev->mc.gtt_start;
188 break;
189 default:
190 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
191 return -EINVAL;
193 if (!rdev->ring[ridx].ready) {
194 DRM_ERROR("Trying to move memory with ring turned off.\n");
195 return -EINVAL;
198 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
200 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
201 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
202 if (IS_ERR(fence))
203 return PTR_ERR(fence);
205 r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
206 radeon_fence_unref(&fence);
207 return r;
210 static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
211 struct ttm_operation_ctx *ctx,
212 struct ttm_resource *new_mem,
213 struct ttm_place *hop)
215 struct radeon_device *rdev;
216 struct radeon_bo *rbo;
217 struct ttm_resource *old_mem = &bo->mem;
218 int r;
220 if (new_mem->mem_type == TTM_PL_TT) {
221 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
222 if (r)
223 return r;
226 r = ttm_bo_wait_ctx(bo, ctx);
227 if (r)
228 return r;
230 /* Can't move a pinned BO */
231 rbo = container_of(bo, struct radeon_bo, tbo);
232 if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
233 return -EINVAL;
235 rdev = radeon_get_rdev(bo->bdev);
236 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
237 ttm_bo_move_null(bo, new_mem);
238 goto out;
240 if (old_mem->mem_type == TTM_PL_SYSTEM &&
241 new_mem->mem_type == TTM_PL_TT) {
242 ttm_bo_move_null(bo, new_mem);
243 goto out;
246 if (old_mem->mem_type == TTM_PL_TT &&
247 new_mem->mem_type == TTM_PL_SYSTEM) {
248 radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
249 ttm_resource_free(bo, &bo->mem);
250 ttm_bo_assign_mem(bo, new_mem);
251 goto out;
253 if (rdev->ring[radeon_copy_ring_index(rdev)].ready &&
254 rdev->asic->copy.copy != NULL) {
255 if ((old_mem->mem_type == TTM_PL_SYSTEM &&
256 new_mem->mem_type == TTM_PL_VRAM) ||
257 (old_mem->mem_type == TTM_PL_VRAM &&
258 new_mem->mem_type == TTM_PL_SYSTEM)) {
259 hop->fpfn = 0;
260 hop->lpfn = 0;
261 hop->mem_type = TTM_PL_TT;
262 hop->flags = 0;
263 return -EMULTIHOP;
266 r = radeon_move_blit(bo, evict, new_mem, old_mem);
267 } else {
268 r = -ENODEV;
271 if (r) {
272 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
273 if (r)
274 return r;
277 out:
278 /* update statistics */
279 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
280 radeon_bo_move_notify(bo, evict, new_mem);
281 return 0;
284 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
286 struct radeon_device *rdev = radeon_get_rdev(bdev);
287 size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
289 switch (mem->mem_type) {
290 case TTM_PL_SYSTEM:
291 /* system memory */
292 return 0;
293 case TTM_PL_TT:
294 #if IS_ENABLED(CONFIG_AGP)
295 if (rdev->flags & RADEON_IS_AGP) {
296 /* RADEON_IS_AGP is set only if AGP is active */
297 mem->bus.offset = (mem->start << PAGE_SHIFT) +
298 rdev->mc.agp_base;
299 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
300 mem->bus.caching = ttm_write_combined;
302 #endif
303 break;
304 case TTM_PL_VRAM:
305 mem->bus.offset = mem->start << PAGE_SHIFT;
306 /* check if it's visible */
307 if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size)
308 return -EINVAL;
309 mem->bus.offset += rdev->mc.aper_base;
310 mem->bus.is_iomem = true;
311 mem->bus.caching = ttm_write_combined;
312 #ifdef __alpha__
314 * Alpha: use bus.addr to hold the ioremap() return,
315 * so we can modify bus.base below.
317 mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
318 if (!mem->bus.addr)
319 return -ENOMEM;
322 * Alpha: Use just the bus offset plus
323 * the hose/domain memory base for bus.base.
324 * It then can be used to build PTEs for VRAM
325 * access, as done in ttm_bo_vm_fault().
327 mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) +
328 rdev->ddev->hose->dense_mem_base;
329 #endif
330 break;
331 default:
332 return -EINVAL;
334 return 0;
338 * TTM backend functions.
340 struct radeon_ttm_tt {
341 struct ttm_tt ttm;
342 u64 offset;
344 uint64_t userptr;
345 struct mm_struct *usermm;
346 uint32_t userflags;
347 bool bound;
350 /* prepare the sg table with the user pages */
351 static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
353 struct radeon_device *rdev = radeon_get_rdev(bdev);
354 struct radeon_ttm_tt *gtt = (void *)ttm;
355 unsigned pinned = 0;
356 int r;
358 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
359 enum dma_data_direction direction = write ?
360 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
362 if (current->mm != gtt->usermm)
363 return -EPERM;
365 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
366 /* check that we only pin down anonymous memory
367 to prevent problems with writeback */
368 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
369 struct vm_area_struct *vma;
370 vma = find_vma(gtt->usermm, gtt->userptr);
371 if (!vma || vma->vm_file || vma->vm_end < end)
372 return -EPERM;
375 do {
376 unsigned num_pages = ttm->num_pages - pinned;
377 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
378 struct page **pages = ttm->pages + pinned;
380 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
381 pages, NULL);
382 if (r < 0)
383 goto release_pages;
385 pinned += r;
387 } while (pinned < ttm->num_pages);
389 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
390 ttm->num_pages << PAGE_SHIFT,
391 GFP_KERNEL);
392 if (r)
393 goto release_sg;
395 r = dma_map_sgtable(rdev->dev, ttm->sg, direction, 0);
396 if (r)
397 goto release_sg;
399 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
400 gtt->ttm.dma_address, ttm->num_pages);
402 return 0;
404 release_sg:
405 kfree(ttm->sg);
407 release_pages:
408 release_pages(ttm->pages, pinned);
409 return r;
412 static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
414 struct radeon_device *rdev = radeon_get_rdev(bdev);
415 struct radeon_ttm_tt *gtt = (void *)ttm;
416 struct sg_page_iter sg_iter;
418 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
419 enum dma_data_direction direction = write ?
420 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
422 /* double check that we don't free the table twice */
423 if (!ttm->sg->sgl)
424 return;
426 /* free the sg table and pages again */
427 dma_unmap_sgtable(rdev->dev, ttm->sg, direction, 0);
429 for_each_sgtable_page(ttm->sg, &sg_iter, 0) {
430 struct page *page = sg_page_iter_page(&sg_iter);
431 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
432 set_page_dirty(page);
434 mark_page_accessed(page);
435 put_page(page);
438 sg_free_table(ttm->sg);
441 static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
443 struct radeon_ttm_tt *gtt = (void*)ttm;
445 return (gtt->bound);
448 static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
449 struct ttm_tt *ttm,
450 struct ttm_resource *bo_mem)
452 struct radeon_ttm_tt *gtt = (void*)ttm;
453 struct radeon_device *rdev = radeon_get_rdev(bdev);
454 uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
455 RADEON_GART_PAGE_WRITE;
456 int r;
458 if (gtt->bound)
459 return 0;
461 if (gtt->userptr) {
462 radeon_ttm_tt_pin_userptr(bdev, ttm);
463 flags &= ~RADEON_GART_PAGE_WRITE;
466 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
467 if (!ttm->num_pages) {
468 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
469 ttm->num_pages, bo_mem, ttm);
471 if (ttm->caching == ttm_cached)
472 flags |= RADEON_GART_PAGE_SNOOP;
473 r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
474 ttm->pages, gtt->ttm.dma_address, flags);
475 if (r) {
476 DRM_ERROR("failed to bind %u pages at 0x%08X\n",
477 ttm->num_pages, (unsigned)gtt->offset);
478 return r;
480 gtt->bound = true;
481 return 0;
484 static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
486 struct radeon_ttm_tt *gtt = (void *)ttm;
487 struct radeon_device *rdev = radeon_get_rdev(bdev);
489 if (!gtt->bound)
490 return;
492 radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
494 if (gtt->userptr)
495 radeon_ttm_tt_unpin_userptr(bdev, ttm);
496 gtt->bound = false;
499 static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
501 struct radeon_ttm_tt *gtt = (void *)ttm;
503 radeon_ttm_backend_unbind(bdev, ttm);
504 ttm_tt_destroy_common(bdev, ttm);
506 ttm_tt_fini(&gtt->ttm);
507 kfree(gtt);
510 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
511 uint32_t page_flags)
513 struct radeon_ttm_tt *gtt;
514 enum ttm_caching caching;
515 struct radeon_bo *rbo;
516 #if IS_ENABLED(CONFIG_AGP)
517 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
519 if (rdev->flags & RADEON_IS_AGP) {
520 return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
521 page_flags);
523 #endif
524 rbo = container_of(bo, struct radeon_bo, tbo);
526 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
527 if (gtt == NULL) {
528 return NULL;
531 if (rbo->flags & RADEON_GEM_GTT_UC)
532 caching = ttm_uncached;
533 else if (rbo->flags & RADEON_GEM_GTT_WC)
534 caching = ttm_write_combined;
535 else
536 caching = ttm_cached;
538 if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
539 kfree(gtt);
540 return NULL;
542 return &gtt->ttm;
545 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
546 struct ttm_tt *ttm)
548 #if IS_ENABLED(CONFIG_AGP)
549 if (rdev->flags & RADEON_IS_AGP)
550 return NULL;
551 #endif
553 if (!ttm)
554 return NULL;
555 return container_of(ttm, struct radeon_ttm_tt, ttm);
558 static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
559 struct ttm_tt *ttm,
560 struct ttm_operation_ctx *ctx)
562 struct radeon_device *rdev = radeon_get_rdev(bdev);
563 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
564 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
566 if (gtt && gtt->userptr) {
567 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
568 if (!ttm->sg)
569 return -ENOMEM;
571 ttm->page_flags |= TTM_PAGE_FLAG_SG;
572 return 0;
575 if (slave && ttm->sg) {
576 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
577 gtt->ttm.dma_address, ttm->num_pages);
578 return 0;
581 return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
584 static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
586 struct radeon_device *rdev = radeon_get_rdev(bdev);
587 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
588 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
590 if (gtt && gtt->userptr) {
591 kfree(ttm->sg);
592 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
593 return;
596 if (slave)
597 return;
599 return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
602 int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
603 struct ttm_tt *ttm, uint64_t addr,
604 uint32_t flags)
606 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
608 if (gtt == NULL)
609 return -EINVAL;
611 gtt->userptr = addr;
612 gtt->usermm = current->mm;
613 gtt->userflags = flags;
614 return 0;
617 bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
618 struct ttm_tt *ttm)
620 #if IS_ENABLED(CONFIG_AGP)
621 struct radeon_device *rdev = radeon_get_rdev(bdev);
622 if (rdev->flags & RADEON_IS_AGP)
623 return ttm_agp_is_bound(ttm);
624 #endif
625 return radeon_ttm_backend_is_bound(ttm);
628 static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
629 struct ttm_tt *ttm,
630 struct ttm_resource *bo_mem)
632 #if IS_ENABLED(CONFIG_AGP)
633 struct radeon_device *rdev = radeon_get_rdev(bdev);
634 #endif
636 if (!bo_mem)
637 return -EINVAL;
638 #if IS_ENABLED(CONFIG_AGP)
639 if (rdev->flags & RADEON_IS_AGP)
640 return ttm_agp_bind(ttm, bo_mem);
641 #endif
643 return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
646 static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
647 struct ttm_tt *ttm)
649 #if IS_ENABLED(CONFIG_AGP)
650 struct radeon_device *rdev = radeon_get_rdev(bdev);
652 if (rdev->flags & RADEON_IS_AGP) {
653 ttm_agp_unbind(ttm);
654 return;
656 #endif
657 radeon_ttm_backend_unbind(bdev, ttm);
660 static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev,
661 struct ttm_tt *ttm)
663 #if IS_ENABLED(CONFIG_AGP)
664 struct radeon_device *rdev = radeon_get_rdev(bdev);
666 if (rdev->flags & RADEON_IS_AGP) {
667 ttm_agp_unbind(ttm);
668 ttm_tt_destroy_common(bdev, ttm);
669 ttm_agp_destroy(ttm);
670 return;
672 #endif
673 radeon_ttm_backend_destroy(bdev, ttm);
676 bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev,
677 struct ttm_tt *ttm)
679 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
681 if (gtt == NULL)
682 return false;
684 return !!gtt->userptr;
687 bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
688 struct ttm_tt *ttm)
690 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
692 if (gtt == NULL)
693 return false;
695 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
698 static void
699 radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
701 radeon_bo_move_notify(bo, false, NULL);
704 static struct ttm_bo_driver radeon_bo_driver = {
705 .ttm_tt_create = &radeon_ttm_tt_create,
706 .ttm_tt_populate = &radeon_ttm_tt_populate,
707 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
708 .ttm_tt_destroy = &radeon_ttm_tt_destroy,
709 .eviction_valuable = ttm_bo_eviction_valuable,
710 .evict_flags = &radeon_evict_flags,
711 .move = &radeon_bo_move,
712 .verify_access = &radeon_verify_access,
713 .delete_mem_notify = &radeon_bo_delete_mem_notify,
714 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
717 int radeon_ttm_init(struct radeon_device *rdev)
719 int r;
721 /* No others user of address space so set it to 0 */
722 r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
723 rdev->ddev->anon_inode->i_mapping,
724 rdev->ddev->vma_offset_manager,
725 rdev->need_swiotlb,
726 dma_addressing_limited(&rdev->pdev->dev));
727 if (r) {
728 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
729 return r;
731 rdev->mman.initialized = true;
733 r = radeon_ttm_init_vram(rdev);
734 if (r) {
735 DRM_ERROR("Failed initializing VRAM heap.\n");
736 return r;
738 /* Change the size here instead of the init above so only lpfn is affected */
739 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
741 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
742 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
743 NULL, &rdev->stolen_vga_memory);
744 if (r) {
745 return r;
747 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
748 if (r)
749 return r;
750 r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
751 radeon_bo_unreserve(rdev->stolen_vga_memory);
752 if (r) {
753 radeon_bo_unref(&rdev->stolen_vga_memory);
754 return r;
756 DRM_INFO("radeon: %uM of VRAM memory ready\n",
757 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
759 r = radeon_ttm_init_gtt(rdev);
760 if (r) {
761 DRM_ERROR("Failed initializing GTT heap.\n");
762 return r;
764 DRM_INFO("radeon: %uM of GTT memory ready.\n",
765 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
767 r = radeon_ttm_debugfs_init(rdev);
768 if (r) {
769 DRM_ERROR("Failed to init debugfs\n");
770 return r;
772 return 0;
775 void radeon_ttm_fini(struct radeon_device *rdev)
777 int r;
779 if (!rdev->mman.initialized)
780 return;
781 radeon_ttm_debugfs_fini(rdev);
782 if (rdev->stolen_vga_memory) {
783 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
784 if (r == 0) {
785 radeon_bo_unpin(rdev->stolen_vga_memory);
786 radeon_bo_unreserve(rdev->stolen_vga_memory);
788 radeon_bo_unref(&rdev->stolen_vga_memory);
790 ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
791 ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
792 ttm_bo_device_release(&rdev->mman.bdev);
793 radeon_gart_fini(rdev);
794 rdev->mman.initialized = false;
795 DRM_INFO("radeon: ttm finalized\n");
798 /* this should only be called at bootup or when userspace
799 * isn't running */
800 void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
802 struct ttm_resource_manager *man;
804 if (!rdev->mman.initialized)
805 return;
807 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
808 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
809 man->size = size >> PAGE_SHIFT;
812 static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
814 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
815 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
816 vm_fault_t ret;
818 down_read(&rdev->pm.mclk_lock);
820 ret = ttm_bo_vm_reserve(bo, vmf);
821 if (ret)
822 goto unlock_mclk;
824 ret = radeon_bo_fault_reserve_notify(bo);
825 if (ret)
826 goto unlock_resv;
828 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
829 TTM_BO_VM_NUM_PREFAULT, 1);
830 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
831 goto unlock_mclk;
833 unlock_resv:
834 dma_resv_unlock(bo->base.resv);
836 unlock_mclk:
837 up_read(&rdev->pm.mclk_lock);
838 return ret;
841 static struct vm_operations_struct radeon_ttm_vm_ops = {
842 .fault = radeon_ttm_fault,
843 .open = ttm_bo_vm_open,
844 .close = ttm_bo_vm_close,
845 .access = ttm_bo_vm_access
848 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
850 int r;
851 struct drm_file *file_priv = filp->private_data;
852 struct radeon_device *rdev = file_priv->minor->dev->dev_private;
854 if (rdev == NULL)
855 return -EINVAL;
857 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
858 if (unlikely(r != 0))
859 return r;
861 vma->vm_ops = &radeon_ttm_vm_ops;
862 return 0;
865 #if defined(CONFIG_DEBUG_FS)
867 static int radeon_mm_dump_table(struct seq_file *m, void *data)
869 struct drm_info_node *node = (struct drm_info_node *)m->private;
870 unsigned ttm_pl = *(int*)node->info_ent->data;
871 struct drm_device *dev = node->minor->dev;
872 struct radeon_device *rdev = dev->dev_private;
873 struct ttm_resource_manager *man = ttm_manager_type(&rdev->mman.bdev, ttm_pl);
874 struct drm_printer p = drm_seq_file_printer(m);
876 man->func->debug(man, &p);
877 return 0;
880 static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data)
882 struct drm_info_node *node = (struct drm_info_node *)m->private;
883 struct drm_device *dev = node->minor->dev;
884 struct radeon_device *rdev = dev->dev_private;
886 return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
889 static int ttm_pl_vram = TTM_PL_VRAM;
890 static int ttm_pl_tt = TTM_PL_TT;
892 static struct drm_info_list radeon_ttm_debugfs_list[] = {
893 {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
894 {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
895 {"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL}
898 static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
900 struct radeon_device *rdev = inode->i_private;
901 i_size_write(inode, rdev->mc.mc_vram_size);
902 filep->private_data = inode->i_private;
903 return 0;
906 static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
907 size_t size, loff_t *pos)
909 struct radeon_device *rdev = f->private_data;
910 ssize_t result = 0;
911 int r;
913 if (size & 0x3 || *pos & 0x3)
914 return -EINVAL;
916 while (size) {
917 unsigned long flags;
918 uint32_t value;
920 if (*pos >= rdev->mc.mc_vram_size)
921 return result;
923 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
924 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
925 if (rdev->family >= CHIP_CEDAR)
926 WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
927 value = RREG32(RADEON_MM_DATA);
928 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
930 r = put_user(value, (uint32_t *)buf);
931 if (r)
932 return r;
934 result += 4;
935 buf += 4;
936 *pos += 4;
937 size -= 4;
940 return result;
943 static const struct file_operations radeon_ttm_vram_fops = {
944 .owner = THIS_MODULE,
945 .open = radeon_ttm_vram_open,
946 .read = radeon_ttm_vram_read,
947 .llseek = default_llseek
950 static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
952 struct radeon_device *rdev = inode->i_private;
953 i_size_write(inode, rdev->mc.gtt_size);
954 filep->private_data = inode->i_private;
955 return 0;
958 static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
959 size_t size, loff_t *pos)
961 struct radeon_device *rdev = f->private_data;
962 ssize_t result = 0;
963 int r;
965 while (size) {
966 loff_t p = *pos / PAGE_SIZE;
967 unsigned off = *pos & ~PAGE_MASK;
968 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
969 struct page *page;
970 void *ptr;
972 if (p >= rdev->gart.num_cpu_pages)
973 return result;
975 page = rdev->gart.pages[p];
976 if (page) {
977 ptr = kmap(page);
978 ptr += off;
980 r = copy_to_user(buf, ptr, cur_size);
981 kunmap(rdev->gart.pages[p]);
982 } else
983 r = clear_user(buf, cur_size);
985 if (r)
986 return -EFAULT;
988 result += cur_size;
989 buf += cur_size;
990 *pos += cur_size;
991 size -= cur_size;
994 return result;
997 static const struct file_operations radeon_ttm_gtt_fops = {
998 .owner = THIS_MODULE,
999 .open = radeon_ttm_gtt_open,
1000 .read = radeon_ttm_gtt_read,
1001 .llseek = default_llseek
1004 #endif
1006 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
1008 #if defined(CONFIG_DEBUG_FS)
1009 unsigned count;
1011 struct drm_minor *minor = rdev->ddev->primary;
1012 struct dentry *root = minor->debugfs_root;
1014 rdev->mman.vram = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO,
1015 root, rdev,
1016 &radeon_ttm_vram_fops);
1018 rdev->mman.gtt = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO,
1019 root, rdev, &radeon_ttm_gtt_fops);
1021 count = ARRAY_SIZE(radeon_ttm_debugfs_list);
1023 return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
1024 #else
1026 return 0;
1027 #endif
1030 static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
1032 #if defined(CONFIG_DEBUG_FS)
1034 debugfs_remove(rdev->mman.vram);
1035 rdev->mman.vram = NULL;
1037 debugfs_remove(rdev->mman.gtt);
1038 rdev->mman.gtt = NULL;
1039 #endif