1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
32 static const struct ttm_place vram_placement_flags
= {
35 .mem_type
= TTM_PL_VRAM
,
39 static const struct ttm_place sys_placement_flags
= {
42 .mem_type
= TTM_PL_SYSTEM
,
46 static const struct ttm_place gmr_placement_flags
= {
49 .mem_type
= VMW_PL_GMR
,
53 static const struct ttm_place mob_placement_flags
= {
56 .mem_type
= VMW_PL_MOB
,
60 struct ttm_placement vmw_vram_placement
= {
62 .placement
= &vram_placement_flags
,
63 .num_busy_placement
= 1,
64 .busy_placement
= &vram_placement_flags
67 static const struct ttm_place vram_gmr_placement_flags
[] = {
71 .mem_type
= TTM_PL_VRAM
,
76 .mem_type
= VMW_PL_GMR
,
81 static const struct ttm_place gmr_vram_placement_flags
[] = {
85 .mem_type
= VMW_PL_GMR
,
90 .mem_type
= TTM_PL_VRAM
,
95 struct ttm_placement vmw_vram_gmr_placement
= {
97 .placement
= vram_gmr_placement_flags
,
98 .num_busy_placement
= 1,
99 .busy_placement
= &gmr_placement_flags
102 struct ttm_placement vmw_vram_sys_placement
= {
104 .placement
= &vram_placement_flags
,
105 .num_busy_placement
= 1,
106 .busy_placement
= &sys_placement_flags
109 struct ttm_placement vmw_sys_placement
= {
111 .placement
= &sys_placement_flags
,
112 .num_busy_placement
= 1,
113 .busy_placement
= &sys_placement_flags
116 static const struct ttm_place evictable_placement_flags
[] = {
120 .mem_type
= TTM_PL_SYSTEM
,
125 .mem_type
= TTM_PL_VRAM
,
130 .mem_type
= VMW_PL_GMR
,
135 .mem_type
= VMW_PL_MOB
,
140 static const struct ttm_place nonfixed_placement_flags
[] = {
144 .mem_type
= TTM_PL_SYSTEM
,
149 .mem_type
= VMW_PL_GMR
,
154 .mem_type
= VMW_PL_MOB
,
159 struct ttm_placement vmw_evictable_placement
= {
161 .placement
= evictable_placement_flags
,
162 .num_busy_placement
= 1,
163 .busy_placement
= &sys_placement_flags
166 struct ttm_placement vmw_srf_placement
= {
168 .num_busy_placement
= 2,
169 .placement
= &gmr_placement_flags
,
170 .busy_placement
= gmr_vram_placement_flags
173 struct ttm_placement vmw_mob_placement
= {
175 .num_busy_placement
= 1,
176 .placement
= &mob_placement_flags
,
177 .busy_placement
= &mob_placement_flags
180 struct ttm_placement vmw_nonfixed_placement
= {
182 .placement
= nonfixed_placement_flags
,
183 .num_busy_placement
= 1,
184 .busy_placement
= &sys_placement_flags
188 struct ttm_tt dma_ttm
;
189 struct vmw_private
*dev_priv
;
194 struct vmw_sg_table vsgt
;
195 uint64_t sg_alloc_size
;
200 const size_t vmw_tt_size
= sizeof(struct vmw_ttm_tt
);
203 * Helper functions to advance a struct vmw_piter iterator.
205 * @viter: Pointer to the iterator.
207 * These functions return false if past the end of the list,
208 * true otherwise. Functions are selected depending on the current
211 static bool __vmw_piter_non_sg_next(struct vmw_piter
*viter
)
213 return ++(viter
->i
) < viter
->num_pages
;
216 static bool __vmw_piter_sg_next(struct vmw_piter
*viter
)
218 bool ret
= __vmw_piter_non_sg_next(viter
);
220 return __sg_page_iter_dma_next(&viter
->iter
) && ret
;
225 * Helper functions to return a pointer to the current page.
227 * @viter: Pointer to the iterator
229 * These functions return a pointer to the page currently
230 * pointed to by @viter. Functions are selected depending on the
231 * current mapping mode.
233 static struct page
*__vmw_piter_non_sg_page(struct vmw_piter
*viter
)
235 return viter
->pages
[viter
->i
];
239 * Helper functions to return the DMA address of the current page.
241 * @viter: Pointer to the iterator
243 * These functions return the DMA address of the page currently
244 * pointed to by @viter. Functions are selected depending on the
245 * current mapping mode.
247 static dma_addr_t
__vmw_piter_phys_addr(struct vmw_piter
*viter
)
249 return page_to_phys(viter
->pages
[viter
->i
]);
252 static dma_addr_t
__vmw_piter_dma_addr(struct vmw_piter
*viter
)
254 return viter
->addrs
[viter
->i
];
257 static dma_addr_t
__vmw_piter_sg_addr(struct vmw_piter
*viter
)
259 return sg_page_iter_dma_address(&viter
->iter
);
264 * vmw_piter_start - Initialize a struct vmw_piter.
266 * @viter: Pointer to the iterator to initialize
267 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
269 * Note that we're following the convention of __sg_page_iter_start, so that
270 * the iterator doesn't point to a valid page after initialization; it has
271 * to be advanced one step first.
273 void vmw_piter_start(struct vmw_piter
*viter
, const struct vmw_sg_table
*vsgt
,
274 unsigned long p_offset
)
276 viter
->i
= p_offset
- 1;
277 viter
->num_pages
= vsgt
->num_pages
;
278 viter
->page
= &__vmw_piter_non_sg_page
;
279 viter
->pages
= vsgt
->pages
;
280 switch (vsgt
->mode
) {
282 viter
->next
= &__vmw_piter_non_sg_next
;
283 viter
->dma_address
= &__vmw_piter_phys_addr
;
285 case vmw_dma_alloc_coherent
:
286 viter
->next
= &__vmw_piter_non_sg_next
;
287 viter
->dma_address
= &__vmw_piter_dma_addr
;
288 viter
->addrs
= vsgt
->addrs
;
290 case vmw_dma_map_populate
:
291 case vmw_dma_map_bind
:
292 viter
->next
= &__vmw_piter_sg_next
;
293 viter
->dma_address
= &__vmw_piter_sg_addr
;
294 __sg_page_iter_start(&viter
->iter
.base
, vsgt
->sgt
->sgl
,
295 vsgt
->sgt
->orig_nents
, p_offset
);
303 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
306 * @vmw_tt: Pointer to a struct vmw_ttm_backend
308 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
310 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt
*vmw_tt
)
312 struct device
*dev
= vmw_tt
->dev_priv
->dev
->dev
;
314 dma_unmap_sgtable(dev
, &vmw_tt
->sgt
, DMA_BIDIRECTIONAL
, 0);
315 vmw_tt
->sgt
.nents
= vmw_tt
->sgt
.orig_nents
;
319 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
321 * @vmw_tt: Pointer to a struct vmw_ttm_backend
323 * This function is used to get device addresses from the kernel DMA layer.
324 * However, it's violating the DMA API in that when this operation has been
325 * performed, it's illegal for the CPU to write to the pages without first
326 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
327 * therefore only legal to call this function if we know that the function
328 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
329 * a CPU write buffer flush.
331 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt
*vmw_tt
)
333 struct device
*dev
= vmw_tt
->dev_priv
->dev
->dev
;
335 return dma_map_sgtable(dev
, &vmw_tt
->sgt
, DMA_BIDIRECTIONAL
, 0);
339 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
341 * @vmw_tt: Pointer to a struct vmw_ttm_tt
343 * Select the correct function for and make sure the TTM pages are
344 * visible to the device. Allocate storage for the device mappings.
345 * If a mapping has already been performed, indicated by the storage
346 * pointer being non NULL, the function returns success.
348 static int vmw_ttm_map_dma(struct vmw_ttm_tt
*vmw_tt
)
350 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
351 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
352 struct vmw_sg_table
*vsgt
= &vmw_tt
->vsgt
;
353 struct ttm_operation_ctx ctx
= {
354 .interruptible
= true,
357 struct vmw_piter iter
;
360 static size_t sgl_size
;
361 static size_t sgt_size
;
362 struct scatterlist
*sg
;
367 vsgt
->mode
= dev_priv
->map_mode
;
368 vsgt
->pages
= vmw_tt
->dma_ttm
.pages
;
369 vsgt
->num_pages
= vmw_tt
->dma_ttm
.num_pages
;
370 vsgt
->addrs
= vmw_tt
->dma_ttm
.dma_address
;
371 vsgt
->sgt
= &vmw_tt
->sgt
;
373 switch (dev_priv
->map_mode
) {
374 case vmw_dma_map_bind
:
375 case vmw_dma_map_populate
:
376 if (unlikely(!sgl_size
)) {
377 sgl_size
= ttm_round_pot(sizeof(struct scatterlist
));
378 sgt_size
= ttm_round_pot(sizeof(struct sg_table
));
380 vmw_tt
->sg_alloc_size
= sgt_size
+ sgl_size
* vsgt
->num_pages
;
381 ret
= ttm_mem_global_alloc(glob
, vmw_tt
->sg_alloc_size
, &ctx
);
382 if (unlikely(ret
!= 0))
385 sg
= __sg_alloc_table_from_pages(&vmw_tt
->sgt
, vsgt
->pages
,
387 (unsigned long) vsgt
->num_pages
<< PAGE_SHIFT
,
388 dma_get_max_seg_size(dev_priv
->dev
->dev
),
389 NULL
, 0, GFP_KERNEL
);
392 goto out_sg_alloc_fail
;
395 if (vsgt
->num_pages
> vmw_tt
->sgt
.orig_nents
) {
396 uint64_t over_alloc
=
397 sgl_size
* (vsgt
->num_pages
-
398 vmw_tt
->sgt
.orig_nents
);
400 ttm_mem_global_free(glob
, over_alloc
);
401 vmw_tt
->sg_alloc_size
-= over_alloc
;
404 ret
= vmw_ttm_map_for_dma(vmw_tt
);
405 if (unlikely(ret
!= 0))
413 old
= ~((dma_addr_t
) 0);
414 vmw_tt
->vsgt
.num_regions
= 0;
415 for (vmw_piter_start(&iter
, vsgt
, 0); vmw_piter_next(&iter
);) {
416 dma_addr_t cur
= vmw_piter_dma_addr(&iter
);
418 if (cur
!= old
+ PAGE_SIZE
)
419 vmw_tt
->vsgt
.num_regions
++;
423 vmw_tt
->mapped
= true;
427 sg_free_table(vmw_tt
->vsgt
.sgt
);
428 vmw_tt
->vsgt
.sgt
= NULL
;
430 ttm_mem_global_free(glob
, vmw_tt
->sg_alloc_size
);
435 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
437 * @vmw_tt: Pointer to a struct vmw_ttm_tt
439 * Tear down any previously set up device DMA mappings and free
440 * any storage space allocated for them. If there are no mappings set up,
441 * this function is a NOP.
443 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt
*vmw_tt
)
445 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
447 if (!vmw_tt
->vsgt
.sgt
)
450 switch (dev_priv
->map_mode
) {
451 case vmw_dma_map_bind
:
452 case vmw_dma_map_populate
:
453 vmw_ttm_unmap_from_dma(vmw_tt
);
454 sg_free_table(vmw_tt
->vsgt
.sgt
);
455 vmw_tt
->vsgt
.sgt
= NULL
;
456 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
457 vmw_tt
->sg_alloc_size
);
462 vmw_tt
->mapped
= false;
466 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
469 * @bo: Pointer to a struct ttm_buffer_object
471 * Returns a pointer to a struct vmw_sg_table object. The object should
472 * not be freed after use.
473 * Note that for the device addresses to be valid, the buffer object must
474 * either be reserved or pinned.
476 const struct vmw_sg_table
*vmw_bo_sg_table(struct ttm_buffer_object
*bo
)
478 struct vmw_ttm_tt
*vmw_tt
=
479 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
);
481 return &vmw_tt
->vsgt
;
485 static int vmw_ttm_bind(struct ttm_bo_device
*bdev
,
486 struct ttm_tt
*ttm
, struct ttm_resource
*bo_mem
)
488 struct vmw_ttm_tt
*vmw_be
=
489 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
);
498 ret
= vmw_ttm_map_dma(vmw_be
);
499 if (unlikely(ret
!= 0))
502 vmw_be
->gmr_id
= bo_mem
->start
;
503 vmw_be
->mem_type
= bo_mem
->mem_type
;
505 switch (bo_mem
->mem_type
) {
507 ret
= vmw_gmr_bind(vmw_be
->dev_priv
, &vmw_be
->vsgt
,
508 ttm
->num_pages
, vmw_be
->gmr_id
);
511 if (unlikely(vmw_be
->mob
== NULL
)) {
513 vmw_mob_create(ttm
->num_pages
);
514 if (unlikely(vmw_be
->mob
== NULL
))
518 ret
= vmw_mob_bind(vmw_be
->dev_priv
, vmw_be
->mob
,
519 &vmw_be
->vsgt
, ttm
->num_pages
,
525 vmw_be
->bound
= true;
529 static void vmw_ttm_unbind(struct ttm_bo_device
*bdev
,
532 struct vmw_ttm_tt
*vmw_be
=
533 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
);
538 switch (vmw_be
->mem_type
) {
540 vmw_gmr_unbind(vmw_be
->dev_priv
, vmw_be
->gmr_id
);
543 vmw_mob_unbind(vmw_be
->dev_priv
, vmw_be
->mob
);
549 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_map_bind
)
550 vmw_ttm_unmap_dma(vmw_be
);
551 vmw_be
->bound
= false;
555 static void vmw_ttm_destroy(struct ttm_bo_device
*bdev
, struct ttm_tt
*ttm
)
557 struct vmw_ttm_tt
*vmw_be
=
558 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
);
560 vmw_ttm_unbind(bdev
, ttm
);
561 ttm_tt_destroy_common(bdev
, ttm
);
562 vmw_ttm_unmap_dma(vmw_be
);
563 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
564 ttm_tt_fini(&vmw_be
->dma_ttm
);
569 vmw_mob_destroy(vmw_be
->mob
);
575 static int vmw_ttm_populate(struct ttm_bo_device
*bdev
,
576 struct ttm_tt
*ttm
, struct ttm_operation_ctx
*ctx
)
578 /* TODO: maybe completely drop this ? */
579 if (ttm_tt_is_populated(ttm
))
582 return ttm_pool_alloc(&bdev
->pool
, ttm
, ctx
);
585 static void vmw_ttm_unpopulate(struct ttm_bo_device
*bdev
,
588 struct vmw_ttm_tt
*vmw_tt
= container_of(ttm
, struct vmw_ttm_tt
,
592 vmw_mob_destroy(vmw_tt
->mob
);
596 vmw_ttm_unmap_dma(vmw_tt
);
597 ttm_pool_free(&bdev
->pool
, ttm
);
600 static struct ttm_tt
*vmw_ttm_tt_create(struct ttm_buffer_object
*bo
,
603 struct vmw_ttm_tt
*vmw_be
;
606 vmw_be
= kzalloc(sizeof(*vmw_be
), GFP_KERNEL
);
610 vmw_be
->dev_priv
= container_of(bo
->bdev
, struct vmw_private
, bdev
);
613 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
614 ret
= ttm_dma_tt_init(&vmw_be
->dma_ttm
, bo
, page_flags
,
617 ret
= ttm_tt_init(&vmw_be
->dma_ttm
, bo
, page_flags
,
619 if (unlikely(ret
!= 0))
622 return &vmw_be
->dma_ttm
;
628 static void vmw_evict_flags(struct ttm_buffer_object
*bo
,
629 struct ttm_placement
*placement
)
631 *placement
= vmw_sys_placement
;
634 static int vmw_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
636 struct ttm_object_file
*tfile
=
637 vmw_fpriv((struct drm_file
*)filp
->private_data
)->tfile
;
639 return vmw_user_bo_verify_access(bo
, tfile
);
642 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_resource
*mem
)
644 struct vmw_private
*dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
646 switch (mem
->mem_type
) {
652 mem
->bus
.offset
= (mem
->start
<< PAGE_SHIFT
) +
653 dev_priv
->vram_start
;
654 mem
->bus
.is_iomem
= true;
655 mem
->bus
.caching
= ttm_cached
;
664 * vmw_move_notify - TTM move_notify_callback
666 * @bo: The TTM buffer object about to move.
667 * @mem: The struct ttm_resource indicating to what memory
668 * region the move is taking place.
670 * Calls move_notify for all subsystems needing it.
671 * (currently only resources).
673 static void vmw_move_notify(struct ttm_buffer_object
*bo
,
675 struct ttm_resource
*mem
)
679 vmw_bo_move_notify(bo
, mem
);
680 vmw_query_move_notify(bo
, mem
);
685 * vmw_swap_notify - TTM move_notify_callback
687 * @bo: The TTM buffer object about to be swapped out.
689 static void vmw_swap_notify(struct ttm_buffer_object
*bo
)
691 vmw_bo_swap_notify(bo
);
692 (void) ttm_bo_wait(bo
, false, false);
695 static int vmw_move(struct ttm_buffer_object
*bo
,
697 struct ttm_operation_ctx
*ctx
,
698 struct ttm_resource
*new_mem
,
699 struct ttm_place
*hop
)
701 struct ttm_resource_manager
*old_man
= ttm_manager_type(bo
->bdev
, bo
->mem
.mem_type
);
702 struct ttm_resource_manager
*new_man
= ttm_manager_type(bo
->bdev
, new_mem
->mem_type
);
705 if (new_man
->use_tt
&& new_mem
->mem_type
!= TTM_PL_SYSTEM
) {
706 ret
= vmw_ttm_bind(bo
->bdev
, bo
->ttm
, new_mem
);
711 vmw_move_notify(bo
, evict
, new_mem
);
713 if (old_man
->use_tt
&& new_man
->use_tt
) {
714 if (bo
->mem
.mem_type
== TTM_PL_SYSTEM
) {
715 ttm_bo_assign_mem(bo
, new_mem
);
718 ret
= ttm_bo_wait_ctx(bo
, ctx
);
722 vmw_ttm_unbind(bo
->bdev
, bo
->ttm
);
723 ttm_resource_free(bo
, &bo
->mem
);
724 ttm_bo_assign_mem(bo
, new_mem
);
727 ret
= ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
733 swap(*new_mem
, bo
->mem
);
734 vmw_move_notify(bo
, false, new_mem
);
735 swap(*new_mem
, bo
->mem
);
740 vmw_delete_mem_notify(struct ttm_buffer_object
*bo
)
742 vmw_move_notify(bo
, false, NULL
);
745 struct ttm_bo_driver vmw_bo_driver
= {
746 .ttm_tt_create
= &vmw_ttm_tt_create
,
747 .ttm_tt_populate
= &vmw_ttm_populate
,
748 .ttm_tt_unpopulate
= &vmw_ttm_unpopulate
,
749 .ttm_tt_destroy
= &vmw_ttm_destroy
,
750 .eviction_valuable
= ttm_bo_eviction_valuable
,
751 .evict_flags
= vmw_evict_flags
,
753 .verify_access
= vmw_verify_access
,
754 .delete_mem_notify
= vmw_delete_mem_notify
,
755 .swap_notify
= vmw_swap_notify
,
756 .io_mem_reserve
= &vmw_ttm_io_mem_reserve
,
759 int vmw_bo_create_and_populate(struct vmw_private
*dev_priv
,
760 unsigned long bo_size
,
761 struct ttm_buffer_object
**bo_p
)
763 struct ttm_operation_ctx ctx
= {
764 .interruptible
= false,
767 struct ttm_buffer_object
*bo
;
770 ret
= vmw_bo_create_kernel(dev_priv
, bo_size
,
773 if (unlikely(ret
!= 0))
776 ret
= ttm_bo_reserve(bo
, false, true, NULL
);
778 ret
= vmw_ttm_populate(bo
->bdev
, bo
->ttm
, &ctx
);
779 if (likely(ret
== 0)) {
780 struct vmw_ttm_tt
*vmw_tt
=
781 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
);
782 ret
= vmw_ttm_map_dma(vmw_tt
);
785 ttm_bo_unreserve(bo
);
787 if (likely(ret
== 0))