1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
33 static const struct ttm_place vram_placement_flags
= {
36 .flags
= TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
39 static const struct ttm_place vram_ne_placement_flags
= {
42 .flags
= TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
45 static const struct ttm_place sys_placement_flags
= {
48 .flags
= TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
51 static const struct ttm_place sys_ne_placement_flags
= {
54 .flags
= TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
57 static const struct ttm_place gmr_placement_flags
= {
60 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
63 static const struct ttm_place gmr_ne_placement_flags
= {
66 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
69 static const struct ttm_place mob_placement_flags
= {
72 .flags
= VMW_PL_FLAG_MOB
| TTM_PL_FLAG_CACHED
75 static const struct ttm_place mob_ne_placement_flags
= {
78 .flags
= VMW_PL_FLAG_MOB
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
81 struct ttm_placement vmw_vram_placement
= {
83 .placement
= &vram_placement_flags
,
84 .num_busy_placement
= 1,
85 .busy_placement
= &vram_placement_flags
88 static const struct ttm_place vram_gmr_placement_flags
[] = {
92 .flags
= TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
96 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
100 static const struct ttm_place gmr_vram_placement_flags
[] = {
104 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
108 .flags
= TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
112 struct ttm_placement vmw_vram_gmr_placement
= {
114 .placement
= vram_gmr_placement_flags
,
115 .num_busy_placement
= 1,
116 .busy_placement
= &gmr_placement_flags
119 static const struct ttm_place vram_gmr_ne_placement_flags
[] = {
123 .flags
= TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
|
128 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
|
133 struct ttm_placement vmw_vram_gmr_ne_placement
= {
135 .placement
= vram_gmr_ne_placement_flags
,
136 .num_busy_placement
= 1,
137 .busy_placement
= &gmr_ne_placement_flags
140 struct ttm_placement vmw_vram_sys_placement
= {
142 .placement
= &vram_placement_flags
,
143 .num_busy_placement
= 1,
144 .busy_placement
= &sys_placement_flags
147 struct ttm_placement vmw_vram_ne_placement
= {
149 .placement
= &vram_ne_placement_flags
,
150 .num_busy_placement
= 1,
151 .busy_placement
= &vram_ne_placement_flags
154 struct ttm_placement vmw_sys_placement
= {
156 .placement
= &sys_placement_flags
,
157 .num_busy_placement
= 1,
158 .busy_placement
= &sys_placement_flags
161 struct ttm_placement vmw_sys_ne_placement
= {
163 .placement
= &sys_ne_placement_flags
,
164 .num_busy_placement
= 1,
165 .busy_placement
= &sys_ne_placement_flags
168 static const struct ttm_place evictable_placement_flags
[] = {
172 .flags
= TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
176 .flags
= TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
180 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
184 .flags
= VMW_PL_FLAG_MOB
| TTM_PL_FLAG_CACHED
188 static const struct ttm_place nonfixed_placement_flags
[] = {
192 .flags
= TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
196 .flags
= VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
200 .flags
= VMW_PL_FLAG_MOB
| TTM_PL_FLAG_CACHED
204 struct ttm_placement vmw_evictable_placement
= {
206 .placement
= evictable_placement_flags
,
207 .num_busy_placement
= 1,
208 .busy_placement
= &sys_placement_flags
211 struct ttm_placement vmw_srf_placement
= {
213 .num_busy_placement
= 2,
214 .placement
= &gmr_placement_flags
,
215 .busy_placement
= gmr_vram_placement_flags
218 struct ttm_placement vmw_mob_placement
= {
220 .num_busy_placement
= 1,
221 .placement
= &mob_placement_flags
,
222 .busy_placement
= &mob_placement_flags
225 struct ttm_placement vmw_mob_ne_placement
= {
227 .num_busy_placement
= 1,
228 .placement
= &mob_ne_placement_flags
,
229 .busy_placement
= &mob_ne_placement_flags
232 struct ttm_placement vmw_nonfixed_placement
= {
234 .placement
= nonfixed_placement_flags
,
235 .num_busy_placement
= 1,
236 .busy_placement
= &sys_placement_flags
240 struct ttm_dma_tt dma_ttm
;
241 struct vmw_private
*dev_priv
;
246 struct vmw_sg_table vsgt
;
247 uint64_t sg_alloc_size
;
251 const size_t vmw_tt_size
= sizeof(struct vmw_ttm_tt
);
254 * Helper functions to advance a struct vmw_piter iterator.
256 * @viter: Pointer to the iterator.
258 * These functions return false if past the end of the list,
259 * true otherwise. Functions are selected depending on the current
262 static bool __vmw_piter_non_sg_next(struct vmw_piter
*viter
)
264 return ++(viter
->i
) < viter
->num_pages
;
267 static bool __vmw_piter_sg_next(struct vmw_piter
*viter
)
269 bool ret
= __vmw_piter_non_sg_next(viter
);
271 return __sg_page_iter_dma_next(&viter
->iter
) && ret
;
276 * Helper functions to return a pointer to the current page.
278 * @viter: Pointer to the iterator
280 * These functions return a pointer to the page currently
281 * pointed to by @viter. Functions are selected depending on the
282 * current mapping mode.
284 static struct page
*__vmw_piter_non_sg_page(struct vmw_piter
*viter
)
286 return viter
->pages
[viter
->i
];
290 * Helper functions to return the DMA address of the current page.
292 * @viter: Pointer to the iterator
294 * These functions return the DMA address of the page currently
295 * pointed to by @viter. Functions are selected depending on the
296 * current mapping mode.
298 static dma_addr_t
__vmw_piter_phys_addr(struct vmw_piter
*viter
)
300 return page_to_phys(viter
->pages
[viter
->i
]);
303 static dma_addr_t
__vmw_piter_dma_addr(struct vmw_piter
*viter
)
305 return viter
->addrs
[viter
->i
];
308 static dma_addr_t
__vmw_piter_sg_addr(struct vmw_piter
*viter
)
310 return sg_page_iter_dma_address(&viter
->iter
);
315 * vmw_piter_start - Initialize a struct vmw_piter.
317 * @viter: Pointer to the iterator to initialize
318 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
320 * Note that we're following the convention of __sg_page_iter_start, so that
321 * the iterator doesn't point to a valid page after initialization; it has
322 * to be advanced one step first.
324 void vmw_piter_start(struct vmw_piter
*viter
, const struct vmw_sg_table
*vsgt
,
325 unsigned long p_offset
)
327 viter
->i
= p_offset
- 1;
328 viter
->num_pages
= vsgt
->num_pages
;
329 viter
->page
= &__vmw_piter_non_sg_page
;
330 viter
->pages
= vsgt
->pages
;
331 switch (vsgt
->mode
) {
333 viter
->next
= &__vmw_piter_non_sg_next
;
334 viter
->dma_address
= &__vmw_piter_phys_addr
;
336 case vmw_dma_alloc_coherent
:
337 viter
->next
= &__vmw_piter_non_sg_next
;
338 viter
->dma_address
= &__vmw_piter_dma_addr
;
339 viter
->addrs
= vsgt
->addrs
;
341 case vmw_dma_map_populate
:
342 case vmw_dma_map_bind
:
343 viter
->next
= &__vmw_piter_sg_next
;
344 viter
->dma_address
= &__vmw_piter_sg_addr
;
345 __sg_page_iter_start(&viter
->iter
.base
, vsgt
->sgt
->sgl
,
346 vsgt
->sgt
->orig_nents
, p_offset
);
354 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
357 * @vmw_tt: Pointer to a struct vmw_ttm_backend
359 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
361 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt
*vmw_tt
)
363 struct device
*dev
= vmw_tt
->dev_priv
->dev
->dev
;
365 dma_unmap_sg(dev
, vmw_tt
->sgt
.sgl
, vmw_tt
->sgt
.nents
,
367 vmw_tt
->sgt
.nents
= vmw_tt
->sgt
.orig_nents
;
371 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
373 * @vmw_tt: Pointer to a struct vmw_ttm_backend
375 * This function is used to get device addresses from the kernel DMA layer.
376 * However, it's violating the DMA API in that when this operation has been
377 * performed, it's illegal for the CPU to write to the pages without first
378 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
379 * therefore only legal to call this function if we know that the function
380 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
381 * a CPU write buffer flush.
383 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt
*vmw_tt
)
385 struct device
*dev
= vmw_tt
->dev_priv
->dev
->dev
;
388 ret
= dma_map_sg(dev
, vmw_tt
->sgt
.sgl
, vmw_tt
->sgt
.orig_nents
,
390 if (unlikely(ret
== 0))
393 vmw_tt
->sgt
.nents
= ret
;
399 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
401 * @vmw_tt: Pointer to a struct vmw_ttm_tt
403 * Select the correct function for and make sure the TTM pages are
404 * visible to the device. Allocate storage for the device mappings.
405 * If a mapping has already been performed, indicated by the storage
406 * pointer being non NULL, the function returns success.
408 static int vmw_ttm_map_dma(struct vmw_ttm_tt
*vmw_tt
)
410 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
411 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
412 struct vmw_sg_table
*vsgt
= &vmw_tt
->vsgt
;
413 struct ttm_operation_ctx ctx
= {
414 .interruptible
= true,
417 struct vmw_piter iter
;
420 static size_t sgl_size
;
421 static size_t sgt_size
;
426 vsgt
->mode
= dev_priv
->map_mode
;
427 vsgt
->pages
= vmw_tt
->dma_ttm
.ttm
.pages
;
428 vsgt
->num_pages
= vmw_tt
->dma_ttm
.ttm
.num_pages
;
429 vsgt
->addrs
= vmw_tt
->dma_ttm
.dma_address
;
430 vsgt
->sgt
= &vmw_tt
->sgt
;
432 switch (dev_priv
->map_mode
) {
433 case vmw_dma_map_bind
:
434 case vmw_dma_map_populate
:
435 if (unlikely(!sgl_size
)) {
436 sgl_size
= ttm_round_pot(sizeof(struct scatterlist
));
437 sgt_size
= ttm_round_pot(sizeof(struct sg_table
));
439 vmw_tt
->sg_alloc_size
= sgt_size
+ sgl_size
* vsgt
->num_pages
;
440 ret
= ttm_mem_global_alloc(glob
, vmw_tt
->sg_alloc_size
, &ctx
);
441 if (unlikely(ret
!= 0))
444 ret
= __sg_alloc_table_from_pages
445 (&vmw_tt
->sgt
, vsgt
->pages
, vsgt
->num_pages
, 0,
446 (unsigned long) vsgt
->num_pages
<< PAGE_SHIFT
,
447 dma_get_max_seg_size(dev_priv
->dev
->dev
),
449 if (unlikely(ret
!= 0))
450 goto out_sg_alloc_fail
;
452 if (vsgt
->num_pages
> vmw_tt
->sgt
.nents
) {
453 uint64_t over_alloc
=
454 sgl_size
* (vsgt
->num_pages
-
457 ttm_mem_global_free(glob
, over_alloc
);
458 vmw_tt
->sg_alloc_size
-= over_alloc
;
461 ret
= vmw_ttm_map_for_dma(vmw_tt
);
462 if (unlikely(ret
!= 0))
470 old
= ~((dma_addr_t
) 0);
471 vmw_tt
->vsgt
.num_regions
= 0;
472 for (vmw_piter_start(&iter
, vsgt
, 0); vmw_piter_next(&iter
);) {
473 dma_addr_t cur
= vmw_piter_dma_addr(&iter
);
475 if (cur
!= old
+ PAGE_SIZE
)
476 vmw_tt
->vsgt
.num_regions
++;
480 vmw_tt
->mapped
= true;
484 sg_free_table(vmw_tt
->vsgt
.sgt
);
485 vmw_tt
->vsgt
.sgt
= NULL
;
487 ttm_mem_global_free(glob
, vmw_tt
->sg_alloc_size
);
492 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
494 * @vmw_tt: Pointer to a struct vmw_ttm_tt
496 * Tear down any previously set up device DMA mappings and free
497 * any storage space allocated for them. If there are no mappings set up,
498 * this function is a NOP.
500 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt
*vmw_tt
)
502 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
504 if (!vmw_tt
->vsgt
.sgt
)
507 switch (dev_priv
->map_mode
) {
508 case vmw_dma_map_bind
:
509 case vmw_dma_map_populate
:
510 vmw_ttm_unmap_from_dma(vmw_tt
);
511 sg_free_table(vmw_tt
->vsgt
.sgt
);
512 vmw_tt
->vsgt
.sgt
= NULL
;
513 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
514 vmw_tt
->sg_alloc_size
);
519 vmw_tt
->mapped
= false;
524 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
526 * @bo: Pointer to a struct ttm_buffer_object
528 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
529 * instead of a pointer to a struct vmw_ttm_backend as argument.
530 * Note that the buffer object must be either pinned or reserved before
531 * calling this function.
533 int vmw_bo_map_dma(struct ttm_buffer_object
*bo
)
535 struct vmw_ttm_tt
*vmw_tt
=
536 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
538 return vmw_ttm_map_dma(vmw_tt
);
543 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
545 * @bo: Pointer to a struct ttm_buffer_object
547 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
548 * instead of a pointer to a struct vmw_ttm_backend as argument.
550 void vmw_bo_unmap_dma(struct ttm_buffer_object
*bo
)
552 struct vmw_ttm_tt
*vmw_tt
=
553 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
555 vmw_ttm_unmap_dma(vmw_tt
);
560 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
563 * @bo: Pointer to a struct ttm_buffer_object
565 * Returns a pointer to a struct vmw_sg_table object. The object should
566 * not be freed after use.
567 * Note that for the device addresses to be valid, the buffer object must
568 * either be reserved or pinned.
570 const struct vmw_sg_table
*vmw_bo_sg_table(struct ttm_buffer_object
*bo
)
572 struct vmw_ttm_tt
*vmw_tt
=
573 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
575 return &vmw_tt
->vsgt
;
579 static int vmw_ttm_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
581 struct vmw_ttm_tt
*vmw_be
=
582 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
585 ret
= vmw_ttm_map_dma(vmw_be
);
586 if (unlikely(ret
!= 0))
589 vmw_be
->gmr_id
= bo_mem
->start
;
590 vmw_be
->mem_type
= bo_mem
->mem_type
;
592 switch (bo_mem
->mem_type
) {
594 return vmw_gmr_bind(vmw_be
->dev_priv
, &vmw_be
->vsgt
,
595 ttm
->num_pages
, vmw_be
->gmr_id
);
597 if (unlikely(vmw_be
->mob
== NULL
)) {
599 vmw_mob_create(ttm
->num_pages
);
600 if (unlikely(vmw_be
->mob
== NULL
))
604 return vmw_mob_bind(vmw_be
->dev_priv
, vmw_be
->mob
,
605 &vmw_be
->vsgt
, ttm
->num_pages
,
613 static int vmw_ttm_unbind(struct ttm_tt
*ttm
)
615 struct vmw_ttm_tt
*vmw_be
=
616 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
618 switch (vmw_be
->mem_type
) {
620 vmw_gmr_unbind(vmw_be
->dev_priv
, vmw_be
->gmr_id
);
623 vmw_mob_unbind(vmw_be
->dev_priv
, vmw_be
->mob
);
629 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_map_bind
)
630 vmw_ttm_unmap_dma(vmw_be
);
636 static void vmw_ttm_destroy(struct ttm_tt
*ttm
)
638 struct vmw_ttm_tt
*vmw_be
=
639 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
641 vmw_ttm_unmap_dma(vmw_be
);
642 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
643 ttm_dma_tt_fini(&vmw_be
->dma_ttm
);
648 vmw_mob_destroy(vmw_be
->mob
);
654 static int vmw_ttm_populate(struct ttm_tt
*ttm
, struct ttm_operation_ctx
*ctx
)
656 struct vmw_ttm_tt
*vmw_tt
=
657 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
658 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
659 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
662 if (ttm
->state
!= tt_unpopulated
)
665 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
) {
667 ttm_round_pot(ttm
->num_pages
* sizeof(dma_addr_t
));
668 ret
= ttm_mem_global_alloc(glob
, size
, ctx
);
669 if (unlikely(ret
!= 0))
672 ret
= ttm_dma_populate(&vmw_tt
->dma_ttm
, dev_priv
->dev
->dev
,
674 if (unlikely(ret
!= 0))
675 ttm_mem_global_free(glob
, size
);
677 ret
= ttm_pool_populate(ttm
, ctx
);
682 static void vmw_ttm_unpopulate(struct ttm_tt
*ttm
)
684 struct vmw_ttm_tt
*vmw_tt
= container_of(ttm
, struct vmw_ttm_tt
,
686 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
687 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
691 vmw_mob_destroy(vmw_tt
->mob
);
695 vmw_ttm_unmap_dma(vmw_tt
);
696 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
) {
698 ttm_round_pot(ttm
->num_pages
* sizeof(dma_addr_t
));
700 ttm_dma_unpopulate(&vmw_tt
->dma_ttm
, dev_priv
->dev
->dev
);
701 ttm_mem_global_free(glob
, size
);
703 ttm_pool_unpopulate(ttm
);
706 static struct ttm_backend_func vmw_ttm_func
= {
707 .bind
= vmw_ttm_bind
,
708 .unbind
= vmw_ttm_unbind
,
709 .destroy
= vmw_ttm_destroy
,
712 static struct ttm_tt
*vmw_ttm_tt_create(struct ttm_buffer_object
*bo
,
715 struct vmw_ttm_tt
*vmw_be
;
718 vmw_be
= kzalloc(sizeof(*vmw_be
), GFP_KERNEL
);
722 vmw_be
->dma_ttm
.ttm
.func
= &vmw_ttm_func
;
723 vmw_be
->dev_priv
= container_of(bo
->bdev
, struct vmw_private
, bdev
);
726 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
727 ret
= ttm_dma_tt_init(&vmw_be
->dma_ttm
, bo
, page_flags
);
729 ret
= ttm_tt_init(&vmw_be
->dma_ttm
.ttm
, bo
, page_flags
);
730 if (unlikely(ret
!= 0))
733 return &vmw_be
->dma_ttm
.ttm
;
739 static int vmw_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
744 static int vmw_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
745 struct ttm_mem_type_manager
*man
)
751 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
752 man
->available_caching
= TTM_PL_FLAG_CACHED
;
753 man
->default_caching
= TTM_PL_FLAG_CACHED
;
756 /* "On-card" video ram */
757 man
->func
= &ttm_bo_manager_func
;
759 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_MAPPABLE
;
760 man
->available_caching
= TTM_PL_FLAG_CACHED
;
761 man
->default_caching
= TTM_PL_FLAG_CACHED
;
766 * "Guest Memory Regions" is an aperture like feature with
767 * one slot per bo. There is an upper limit of the number of
768 * slots as well as the bo size.
770 man
->func
= &vmw_gmrid_manager_func
;
772 man
->flags
= TTM_MEMTYPE_FLAG_CMA
| TTM_MEMTYPE_FLAG_MAPPABLE
;
773 man
->available_caching
= TTM_PL_FLAG_CACHED
;
774 man
->default_caching
= TTM_PL_FLAG_CACHED
;
777 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
783 static void vmw_evict_flags(struct ttm_buffer_object
*bo
,
784 struct ttm_placement
*placement
)
786 *placement
= vmw_sys_placement
;
789 static int vmw_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
791 struct ttm_object_file
*tfile
=
792 vmw_fpriv((struct drm_file
*)filp
->private_data
)->tfile
;
794 return vmw_user_bo_verify_access(bo
, tfile
);
797 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
799 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
800 struct vmw_private
*dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
802 mem
->bus
.addr
= NULL
;
803 mem
->bus
.is_iomem
= false;
805 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
807 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
809 switch (mem
->mem_type
) {
815 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
816 mem
->bus
.base
= dev_priv
->vram_start
;
817 mem
->bus
.is_iomem
= true;
825 static void vmw_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
829 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
835 * vmw_move_notify - TTM move_notify_callback
837 * @bo: The TTM buffer object about to move.
838 * @mem: The struct ttm_mem_reg indicating to what memory
839 * region the move is taking place.
841 * Calls move_notify for all subsystems needing it.
842 * (currently only resources).
844 static void vmw_move_notify(struct ttm_buffer_object
*bo
,
846 struct ttm_mem_reg
*mem
)
848 vmw_bo_move_notify(bo
, mem
);
849 vmw_query_move_notify(bo
, mem
);
854 * vmw_swap_notify - TTM move_notify_callback
856 * @bo: The TTM buffer object about to be swapped out.
858 static void vmw_swap_notify(struct ttm_buffer_object
*bo
)
860 vmw_bo_swap_notify(bo
);
861 (void) ttm_bo_wait(bo
, false, false);
865 struct ttm_bo_driver vmw_bo_driver
= {
866 .ttm_tt_create
= &vmw_ttm_tt_create
,
867 .ttm_tt_populate
= &vmw_ttm_populate
,
868 .ttm_tt_unpopulate
= &vmw_ttm_unpopulate
,
869 .invalidate_caches
= vmw_invalidate_caches
,
870 .init_mem_type
= vmw_init_mem_type
,
871 .eviction_valuable
= ttm_bo_eviction_valuable
,
872 .evict_flags
= vmw_evict_flags
,
874 .verify_access
= vmw_verify_access
,
875 .move_notify
= vmw_move_notify
,
876 .swap_notify
= vmw_swap_notify
,
877 .fault_reserve_notify
= &vmw_ttm_fault_reserve_notify
,
878 .io_mem_reserve
= &vmw_ttm_io_mem_reserve
,
879 .io_mem_free
= &vmw_ttm_io_mem_free
,