1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
33 static uint32_t vram_placement_flags
= TTM_PL_FLAG_VRAM
|
36 static uint32_t vram_ne_placement_flags
= TTM_PL_FLAG_VRAM
|
40 static uint32_t sys_placement_flags
= TTM_PL_FLAG_SYSTEM
|
43 static uint32_t sys_ne_placement_flags
= TTM_PL_FLAG_SYSTEM
|
47 static uint32_t gmr_placement_flags
= VMW_PL_FLAG_GMR
|
50 static uint32_t gmr_ne_placement_flags
= VMW_PL_FLAG_GMR
|
54 static uint32_t mob_placement_flags
= VMW_PL_FLAG_MOB
|
57 struct ttm_placement vmw_vram_placement
= {
61 .placement
= &vram_placement_flags
,
62 .num_busy_placement
= 1,
63 .busy_placement
= &vram_placement_flags
66 static uint32_t vram_gmr_placement_flags
[] = {
67 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
,
68 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
71 static uint32_t gmr_vram_placement_flags
[] = {
72 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
,
73 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
76 struct ttm_placement vmw_vram_gmr_placement
= {
80 .placement
= vram_gmr_placement_flags
,
81 .num_busy_placement
= 1,
82 .busy_placement
= &gmr_placement_flags
85 static uint32_t vram_gmr_ne_placement_flags
[] = {
86 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
,
87 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
90 struct ttm_placement vmw_vram_gmr_ne_placement
= {
94 .placement
= vram_gmr_ne_placement_flags
,
95 .num_busy_placement
= 1,
96 .busy_placement
= &gmr_ne_placement_flags
99 struct ttm_placement vmw_vram_sys_placement
= {
103 .placement
= &vram_placement_flags
,
104 .num_busy_placement
= 1,
105 .busy_placement
= &sys_placement_flags
108 struct ttm_placement vmw_vram_ne_placement
= {
112 .placement
= &vram_ne_placement_flags
,
113 .num_busy_placement
= 1,
114 .busy_placement
= &vram_ne_placement_flags
117 struct ttm_placement vmw_sys_placement
= {
121 .placement
= &sys_placement_flags
,
122 .num_busy_placement
= 1,
123 .busy_placement
= &sys_placement_flags
126 struct ttm_placement vmw_sys_ne_placement
= {
130 .placement
= &sys_ne_placement_flags
,
131 .num_busy_placement
= 1,
132 .busy_placement
= &sys_ne_placement_flags
135 static uint32_t evictable_placement_flags
[] = {
136 TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
,
137 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
,
138 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
,
139 VMW_PL_FLAG_MOB
| TTM_PL_FLAG_CACHED
142 struct ttm_placement vmw_evictable_placement
= {
146 .placement
= evictable_placement_flags
,
147 .num_busy_placement
= 1,
148 .busy_placement
= &sys_placement_flags
151 struct ttm_placement vmw_srf_placement
= {
155 .num_busy_placement
= 2,
156 .placement
= &gmr_placement_flags
,
157 .busy_placement
= gmr_vram_placement_flags
160 struct ttm_placement vmw_mob_placement
= {
164 .num_busy_placement
= 1,
165 .placement
= &mob_placement_flags
,
166 .busy_placement
= &mob_placement_flags
170 struct ttm_dma_tt dma_ttm
;
171 struct vmw_private
*dev_priv
;
176 struct vmw_sg_table vsgt
;
177 uint64_t sg_alloc_size
;
181 const size_t vmw_tt_size
= sizeof(struct vmw_ttm_tt
);
184 * Helper functions to advance a struct vmw_piter iterator.
186 * @viter: Pointer to the iterator.
188 * These functions return false if past the end of the list,
189 * true otherwise. Functions are selected depending on the current
192 static bool __vmw_piter_non_sg_next(struct vmw_piter
*viter
)
194 return ++(viter
->i
) < viter
->num_pages
;
197 static bool __vmw_piter_sg_next(struct vmw_piter
*viter
)
199 return __sg_page_iter_next(&viter
->iter
);
204 * Helper functions to return a pointer to the current page.
206 * @viter: Pointer to the iterator
208 * These functions return a pointer to the page currently
209 * pointed to by @viter. Functions are selected depending on the
210 * current mapping mode.
212 static struct page
*__vmw_piter_non_sg_page(struct vmw_piter
*viter
)
214 return viter
->pages
[viter
->i
];
217 static struct page
*__vmw_piter_sg_page(struct vmw_piter
*viter
)
219 return sg_page_iter_page(&viter
->iter
);
224 * Helper functions to return the DMA address of the current page.
226 * @viter: Pointer to the iterator
228 * These functions return the DMA address of the page currently
229 * pointed to by @viter. Functions are selected depending on the
230 * current mapping mode.
232 static dma_addr_t
__vmw_piter_phys_addr(struct vmw_piter
*viter
)
234 return page_to_phys(viter
->pages
[viter
->i
]);
237 static dma_addr_t
__vmw_piter_dma_addr(struct vmw_piter
*viter
)
239 return viter
->addrs
[viter
->i
];
242 static dma_addr_t
__vmw_piter_sg_addr(struct vmw_piter
*viter
)
244 return sg_page_iter_dma_address(&viter
->iter
);
249 * vmw_piter_start - Initialize a struct vmw_piter.
251 * @viter: Pointer to the iterator to initialize
252 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
254 * Note that we're following the convention of __sg_page_iter_start, so that
255 * the iterator doesn't point to a valid page after initialization; it has
256 * to be advanced one step first.
258 void vmw_piter_start(struct vmw_piter
*viter
, const struct vmw_sg_table
*vsgt
,
259 unsigned long p_offset
)
261 viter
->i
= p_offset
- 1;
262 viter
->num_pages
= vsgt
->num_pages
;
263 switch (vsgt
->mode
) {
265 viter
->next
= &__vmw_piter_non_sg_next
;
266 viter
->dma_address
= &__vmw_piter_phys_addr
;
267 viter
->page
= &__vmw_piter_non_sg_page
;
268 viter
->pages
= vsgt
->pages
;
270 case vmw_dma_alloc_coherent
:
271 viter
->next
= &__vmw_piter_non_sg_next
;
272 viter
->dma_address
= &__vmw_piter_dma_addr
;
273 viter
->page
= &__vmw_piter_non_sg_page
;
274 viter
->addrs
= vsgt
->addrs
;
275 viter
->pages
= vsgt
->pages
;
277 case vmw_dma_map_populate
:
278 case vmw_dma_map_bind
:
279 viter
->next
= &__vmw_piter_sg_next
;
280 viter
->dma_address
= &__vmw_piter_sg_addr
;
281 viter
->page
= &__vmw_piter_sg_page
;
282 __sg_page_iter_start(&viter
->iter
, vsgt
->sgt
->sgl
,
283 vsgt
->sgt
->orig_nents
, p_offset
);
291 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
294 * @vmw_tt: Pointer to a struct vmw_ttm_backend
296 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
298 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt
*vmw_tt
)
300 struct device
*dev
= vmw_tt
->dev_priv
->dev
->dev
;
302 dma_unmap_sg(dev
, vmw_tt
->sgt
.sgl
, vmw_tt
->sgt
.nents
,
304 vmw_tt
->sgt
.nents
= vmw_tt
->sgt
.orig_nents
;
308 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
310 * @vmw_tt: Pointer to a struct vmw_ttm_backend
312 * This function is used to get device addresses from the kernel DMA layer.
313 * However, it's violating the DMA API in that when this operation has been
314 * performed, it's illegal for the CPU to write to the pages without first
315 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
316 * therefore only legal to call this function if we know that the function
317 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
318 * a CPU write buffer flush.
320 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt
*vmw_tt
)
322 struct device
*dev
= vmw_tt
->dev_priv
->dev
->dev
;
325 ret
= dma_map_sg(dev
, vmw_tt
->sgt
.sgl
, vmw_tt
->sgt
.orig_nents
,
327 if (unlikely(ret
== 0))
330 vmw_tt
->sgt
.nents
= ret
;
336 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
338 * @vmw_tt: Pointer to a struct vmw_ttm_tt
340 * Select the correct function for and make sure the TTM pages are
341 * visible to the device. Allocate storage for the device mappings.
342 * If a mapping has already been performed, indicated by the storage
343 * pointer being non NULL, the function returns success.
345 static int vmw_ttm_map_dma(struct vmw_ttm_tt
*vmw_tt
)
347 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
348 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
349 struct vmw_sg_table
*vsgt
= &vmw_tt
->vsgt
;
350 struct vmw_piter iter
;
353 static size_t sgl_size
;
354 static size_t sgt_size
;
359 vsgt
->mode
= dev_priv
->map_mode
;
360 vsgt
->pages
= vmw_tt
->dma_ttm
.ttm
.pages
;
361 vsgt
->num_pages
= vmw_tt
->dma_ttm
.ttm
.num_pages
;
362 vsgt
->addrs
= vmw_tt
->dma_ttm
.dma_address
;
363 vsgt
->sgt
= &vmw_tt
->sgt
;
365 switch (dev_priv
->map_mode
) {
366 case vmw_dma_map_bind
:
367 case vmw_dma_map_populate
:
368 if (unlikely(!sgl_size
)) {
369 sgl_size
= ttm_round_pot(sizeof(struct scatterlist
));
370 sgt_size
= ttm_round_pot(sizeof(struct sg_table
));
372 vmw_tt
->sg_alloc_size
= sgt_size
+ sgl_size
* vsgt
->num_pages
;
373 ret
= ttm_mem_global_alloc(glob
, vmw_tt
->sg_alloc_size
, false,
375 if (unlikely(ret
!= 0))
378 ret
= sg_alloc_table_from_pages(&vmw_tt
->sgt
, vsgt
->pages
,
381 vsgt
->num_pages
<< PAGE_SHIFT
,
383 if (unlikely(ret
!= 0))
384 goto out_sg_alloc_fail
;
386 if (vsgt
->num_pages
> vmw_tt
->sgt
.nents
) {
387 uint64_t over_alloc
=
388 sgl_size
* (vsgt
->num_pages
-
391 ttm_mem_global_free(glob
, over_alloc
);
392 vmw_tt
->sg_alloc_size
-= over_alloc
;
395 ret
= vmw_ttm_map_for_dma(vmw_tt
);
396 if (unlikely(ret
!= 0))
404 old
= ~((dma_addr_t
) 0);
405 vmw_tt
->vsgt
.num_regions
= 0;
406 for (vmw_piter_start(&iter
, vsgt
, 0); vmw_piter_next(&iter
);) {
407 dma_addr_t cur
= vmw_piter_dma_addr(&iter
);
409 if (cur
!= old
+ PAGE_SIZE
)
410 vmw_tt
->vsgt
.num_regions
++;
414 vmw_tt
->mapped
= true;
418 sg_free_table(vmw_tt
->vsgt
.sgt
);
419 vmw_tt
->vsgt
.sgt
= NULL
;
421 ttm_mem_global_free(glob
, vmw_tt
->sg_alloc_size
);
426 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
428 * @vmw_tt: Pointer to a struct vmw_ttm_tt
430 * Tear down any previously set up device DMA mappings and free
431 * any storage space allocated for them. If there are no mappings set up,
432 * this function is a NOP.
434 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt
*vmw_tt
)
436 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
438 if (!vmw_tt
->vsgt
.sgt
)
441 switch (dev_priv
->map_mode
) {
442 case vmw_dma_map_bind
:
443 case vmw_dma_map_populate
:
444 vmw_ttm_unmap_from_dma(vmw_tt
);
445 sg_free_table(vmw_tt
->vsgt
.sgt
);
446 vmw_tt
->vsgt
.sgt
= NULL
;
447 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
448 vmw_tt
->sg_alloc_size
);
453 vmw_tt
->mapped
= false;
458 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
460 * @bo: Pointer to a struct ttm_buffer_object
462 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
463 * instead of a pointer to a struct vmw_ttm_backend as argument.
464 * Note that the buffer object must be either pinned or reserved before
465 * calling this function.
467 int vmw_bo_map_dma(struct ttm_buffer_object
*bo
)
469 struct vmw_ttm_tt
*vmw_tt
=
470 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
472 return vmw_ttm_map_dma(vmw_tt
);
477 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
479 * @bo: Pointer to a struct ttm_buffer_object
481 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
482 * instead of a pointer to a struct vmw_ttm_backend as argument.
484 void vmw_bo_unmap_dma(struct ttm_buffer_object
*bo
)
486 struct vmw_ttm_tt
*vmw_tt
=
487 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
489 vmw_ttm_unmap_dma(vmw_tt
);
494 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
497 * @bo: Pointer to a struct ttm_buffer_object
499 * Returns a pointer to a struct vmw_sg_table object. The object should
500 * not be freed after use.
501 * Note that for the device addresses to be valid, the buffer object must
502 * either be reserved or pinned.
504 const struct vmw_sg_table
*vmw_bo_sg_table(struct ttm_buffer_object
*bo
)
506 struct vmw_ttm_tt
*vmw_tt
=
507 container_of(bo
->ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
509 return &vmw_tt
->vsgt
;
513 static int vmw_ttm_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
515 struct vmw_ttm_tt
*vmw_be
=
516 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
519 ret
= vmw_ttm_map_dma(vmw_be
);
520 if (unlikely(ret
!= 0))
523 vmw_be
->gmr_id
= bo_mem
->start
;
524 vmw_be
->mem_type
= bo_mem
->mem_type
;
526 switch (bo_mem
->mem_type
) {
528 return vmw_gmr_bind(vmw_be
->dev_priv
, &vmw_be
->vsgt
,
529 ttm
->num_pages
, vmw_be
->gmr_id
);
531 if (unlikely(vmw_be
->mob
== NULL
)) {
533 vmw_mob_create(ttm
->num_pages
);
534 if (unlikely(vmw_be
->mob
== NULL
))
538 return vmw_mob_bind(vmw_be
->dev_priv
, vmw_be
->mob
,
539 &vmw_be
->vsgt
, ttm
->num_pages
,
547 static int vmw_ttm_unbind(struct ttm_tt
*ttm
)
549 struct vmw_ttm_tt
*vmw_be
=
550 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
552 switch (vmw_be
->mem_type
) {
554 vmw_gmr_unbind(vmw_be
->dev_priv
, vmw_be
->gmr_id
);
557 vmw_mob_unbind(vmw_be
->dev_priv
, vmw_be
->mob
);
563 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_map_bind
)
564 vmw_ttm_unmap_dma(vmw_be
);
570 static void vmw_ttm_destroy(struct ttm_tt
*ttm
)
572 struct vmw_ttm_tt
*vmw_be
=
573 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
575 vmw_ttm_unmap_dma(vmw_be
);
576 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
577 ttm_dma_tt_fini(&vmw_be
->dma_ttm
);
582 vmw_mob_destroy(vmw_be
->mob
);
588 static int vmw_ttm_populate(struct ttm_tt
*ttm
)
590 struct vmw_ttm_tt
*vmw_tt
=
591 container_of(ttm
, struct vmw_ttm_tt
, dma_ttm
.ttm
);
592 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
593 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
596 if (ttm
->state
!= tt_unpopulated
)
599 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
) {
601 ttm_round_pot(ttm
->num_pages
* sizeof(dma_addr_t
));
602 ret
= ttm_mem_global_alloc(glob
, size
, false, true);
603 if (unlikely(ret
!= 0))
606 ret
= ttm_dma_populate(&vmw_tt
->dma_ttm
, dev_priv
->dev
->dev
);
607 if (unlikely(ret
!= 0))
608 ttm_mem_global_free(glob
, size
);
610 ret
= ttm_pool_populate(ttm
);
615 static void vmw_ttm_unpopulate(struct ttm_tt
*ttm
)
617 struct vmw_ttm_tt
*vmw_tt
= container_of(ttm
, struct vmw_ttm_tt
,
619 struct vmw_private
*dev_priv
= vmw_tt
->dev_priv
;
620 struct ttm_mem_global
*glob
= vmw_mem_glob(dev_priv
);
624 vmw_mob_destroy(vmw_tt
->mob
);
628 vmw_ttm_unmap_dma(vmw_tt
);
629 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
) {
631 ttm_round_pot(ttm
->num_pages
* sizeof(dma_addr_t
));
633 ttm_dma_unpopulate(&vmw_tt
->dma_ttm
, dev_priv
->dev
->dev
);
634 ttm_mem_global_free(glob
, size
);
636 ttm_pool_unpopulate(ttm
);
639 static struct ttm_backend_func vmw_ttm_func
= {
640 .bind
= vmw_ttm_bind
,
641 .unbind
= vmw_ttm_unbind
,
642 .destroy
= vmw_ttm_destroy
,
645 static struct ttm_tt
*vmw_ttm_tt_create(struct ttm_bo_device
*bdev
,
646 unsigned long size
, uint32_t page_flags
,
647 struct page
*dummy_read_page
)
649 struct vmw_ttm_tt
*vmw_be
;
652 vmw_be
= kzalloc(sizeof(*vmw_be
), GFP_KERNEL
);
656 vmw_be
->dma_ttm
.ttm
.func
= &vmw_ttm_func
;
657 vmw_be
->dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
660 if (vmw_be
->dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
661 ret
= ttm_dma_tt_init(&vmw_be
->dma_ttm
, bdev
, size
, page_flags
,
664 ret
= ttm_tt_init(&vmw_be
->dma_ttm
.ttm
, bdev
, size
, page_flags
,
666 if (unlikely(ret
!= 0))
669 return &vmw_be
->dma_ttm
.ttm
;
675 static int vmw_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
680 static int vmw_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
681 struct ttm_mem_type_manager
*man
)
687 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
688 man
->available_caching
= TTM_PL_FLAG_CACHED
;
689 man
->default_caching
= TTM_PL_FLAG_CACHED
;
692 /* "On-card" video ram */
693 man
->func
= &ttm_bo_manager_func
;
695 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_MAPPABLE
;
696 man
->available_caching
= TTM_PL_FLAG_CACHED
;
697 man
->default_caching
= TTM_PL_FLAG_CACHED
;
702 * "Guest Memory Regions" is an aperture like feature with
703 * one slot per bo. There is an upper limit of the number of
704 * slots as well as the bo size.
706 man
->func
= &vmw_gmrid_manager_func
;
708 man
->flags
= TTM_MEMTYPE_FLAG_CMA
| TTM_MEMTYPE_FLAG_MAPPABLE
;
709 man
->available_caching
= TTM_PL_FLAG_CACHED
;
710 man
->default_caching
= TTM_PL_FLAG_CACHED
;
713 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
719 static void vmw_evict_flags(struct ttm_buffer_object
*bo
,
720 struct ttm_placement
*placement
)
722 *placement
= vmw_sys_placement
;
725 static int vmw_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
727 struct ttm_object_file
*tfile
=
728 vmw_fpriv((struct drm_file
*)filp
->private_data
)->tfile
;
730 return vmw_user_dmabuf_verify_access(bo
, tfile
);
733 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
735 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
736 struct vmw_private
*dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
738 mem
->bus
.addr
= NULL
;
739 mem
->bus
.is_iomem
= false;
741 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
743 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
745 switch (mem
->mem_type
) {
751 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
752 mem
->bus
.base
= dev_priv
->vram_start
;
753 mem
->bus
.is_iomem
= true;
761 static void vmw_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
765 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
771 * FIXME: We're using the old vmware polling method to sync.
772 * Do this with fences instead.
775 static void *vmw_sync_obj_ref(void *sync_obj
)
779 vmw_fence_obj_reference((struct vmw_fence_obj
*) sync_obj
);
782 static void vmw_sync_obj_unref(void **sync_obj
)
784 vmw_fence_obj_unreference((struct vmw_fence_obj
**) sync_obj
);
787 static int vmw_sync_obj_flush(void *sync_obj
)
789 vmw_fence_obj_flush((struct vmw_fence_obj
*) sync_obj
);
793 static bool vmw_sync_obj_signaled(void *sync_obj
)
795 return vmw_fence_obj_signaled((struct vmw_fence_obj
*) sync_obj
,
796 DRM_VMW_FENCE_FLAG_EXEC
);
800 static int vmw_sync_obj_wait(void *sync_obj
, bool lazy
, bool interruptible
)
802 return vmw_fence_obj_wait((struct vmw_fence_obj
*) sync_obj
,
803 DRM_VMW_FENCE_FLAG_EXEC
,
805 VMW_FENCE_WAIT_TIMEOUT
);
809 * vmw_move_notify - TTM move_notify_callback
811 * @bo: The TTM buffer object about to move.
812 * @mem: The truct ttm_mem_reg indicating to what memory
813 * region the move is taking place.
815 * Calls move_notify for all subsystems needing it.
816 * (currently only resources).
818 static void vmw_move_notify(struct ttm_buffer_object
*bo
,
819 struct ttm_mem_reg
*mem
)
821 vmw_resource_move_notify(bo
, mem
);
826 * vmw_swap_notify - TTM move_notify_callback
828 * @bo: The TTM buffer object about to be swapped out.
830 static void vmw_swap_notify(struct ttm_buffer_object
*bo
)
832 struct ttm_bo_device
*bdev
= bo
->bdev
;
834 spin_lock(&bdev
->fence_lock
);
835 ttm_bo_wait(bo
, false, false, false);
836 spin_unlock(&bdev
->fence_lock
);
840 struct ttm_bo_driver vmw_bo_driver
= {
841 .ttm_tt_create
= &vmw_ttm_tt_create
,
842 .ttm_tt_populate
= &vmw_ttm_populate
,
843 .ttm_tt_unpopulate
= &vmw_ttm_unpopulate
,
844 .invalidate_caches
= vmw_invalidate_caches
,
845 .init_mem_type
= vmw_init_mem_type
,
846 .evict_flags
= vmw_evict_flags
,
848 .verify_access
= vmw_verify_access
,
849 .sync_obj_signaled
= vmw_sync_obj_signaled
,
850 .sync_obj_wait
= vmw_sync_obj_wait
,
851 .sync_obj_flush
= vmw_sync_obj_flush
,
852 .sync_obj_unref
= vmw_sync_obj_unref
,
853 .sync_obj_ref
= vmw_sync_obj_ref
,
854 .move_notify
= vmw_move_notify
,
855 .swap_notify
= vmw_swap_notify
,
856 .fault_reserve_notify
= &vmw_ttm_fault_reserve_notify
,
857 .io_mem_reserve
= &vmw_ttm_io_mem_reserve
,
858 .io_mem_free
= &vmw_ttm_io_mem_free
,