1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "ttm/ttm_bo_driver.h"
30 #include "ttm/ttm_placement.h"
32 static uint32_t vram_placement_flags
= TTM_PL_FLAG_VRAM
|
35 static uint32_t vram_ne_placement_flags
= TTM_PL_FLAG_VRAM
|
39 static uint32_t sys_placement_flags
= TTM_PL_FLAG_SYSTEM
|
42 static uint32_t gmr_placement_flags
= VMW_PL_FLAG_GMR
|
45 static uint32_t gmr_ne_placement_flags
= VMW_PL_FLAG_GMR
|
49 struct ttm_placement vmw_vram_placement
= {
53 .placement
= &vram_placement_flags
,
54 .num_busy_placement
= 1,
55 .busy_placement
= &vram_placement_flags
58 static uint32_t vram_gmr_placement_flags
[] = {
59 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
,
60 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
63 static uint32_t gmr_vram_placement_flags
[] = {
64 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
,
65 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
68 struct ttm_placement vmw_vram_gmr_placement
= {
72 .placement
= vram_gmr_placement_flags
,
73 .num_busy_placement
= 1,
74 .busy_placement
= &gmr_placement_flags
77 static uint32_t vram_gmr_ne_placement_flags
[] = {
78 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
,
79 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_NO_EVICT
82 struct ttm_placement vmw_vram_gmr_ne_placement
= {
86 .placement
= vram_gmr_ne_placement_flags
,
87 .num_busy_placement
= 1,
88 .busy_placement
= &gmr_ne_placement_flags
91 struct ttm_placement vmw_vram_sys_placement
= {
95 .placement
= &vram_placement_flags
,
96 .num_busy_placement
= 1,
97 .busy_placement
= &sys_placement_flags
100 struct ttm_placement vmw_vram_ne_placement
= {
104 .placement
= &vram_ne_placement_flags
,
105 .num_busy_placement
= 1,
106 .busy_placement
= &vram_ne_placement_flags
109 struct ttm_placement vmw_sys_placement
= {
113 .placement
= &sys_placement_flags
,
114 .num_busy_placement
= 1,
115 .busy_placement
= &sys_placement_flags
118 static uint32_t evictable_placement_flags
[] = {
119 TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
,
120 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
,
121 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
124 struct ttm_placement vmw_evictable_placement
= {
128 .placement
= evictable_placement_flags
,
129 .num_busy_placement
= 1,
130 .busy_placement
= &sys_placement_flags
133 struct ttm_placement vmw_srf_placement
= {
137 .num_busy_placement
= 2,
138 .placement
= &gmr_placement_flags
,
139 .busy_placement
= gmr_vram_placement_flags
142 struct vmw_ttm_backend
{
143 struct ttm_backend backend
;
145 unsigned long num_pages
;
146 struct vmw_private
*dev_priv
;
150 static int vmw_ttm_populate(struct ttm_backend
*backend
,
151 unsigned long num_pages
, struct page
**pages
,
152 struct page
*dummy_read_page
,
153 dma_addr_t
*dma_addrs
)
155 struct vmw_ttm_backend
*vmw_be
=
156 container_of(backend
, struct vmw_ttm_backend
, backend
);
158 vmw_be
->pages
= pages
;
159 vmw_be
->num_pages
= num_pages
;
164 static int vmw_ttm_bind(struct ttm_backend
*backend
, struct ttm_mem_reg
*bo_mem
)
166 struct vmw_ttm_backend
*vmw_be
=
167 container_of(backend
, struct vmw_ttm_backend
, backend
);
169 vmw_be
->gmr_id
= bo_mem
->start
;
171 return vmw_gmr_bind(vmw_be
->dev_priv
, vmw_be
->pages
,
172 vmw_be
->num_pages
, vmw_be
->gmr_id
);
175 static int vmw_ttm_unbind(struct ttm_backend
*backend
)
177 struct vmw_ttm_backend
*vmw_be
=
178 container_of(backend
, struct vmw_ttm_backend
, backend
);
180 vmw_gmr_unbind(vmw_be
->dev_priv
, vmw_be
->gmr_id
);
184 static void vmw_ttm_clear(struct ttm_backend
*backend
)
186 struct vmw_ttm_backend
*vmw_be
=
187 container_of(backend
, struct vmw_ttm_backend
, backend
);
189 vmw_be
->pages
= NULL
;
190 vmw_be
->num_pages
= 0;
193 static void vmw_ttm_destroy(struct ttm_backend
*backend
)
195 struct vmw_ttm_backend
*vmw_be
=
196 container_of(backend
, struct vmw_ttm_backend
, backend
);
201 static struct ttm_backend_func vmw_ttm_func
= {
202 .populate
= vmw_ttm_populate
,
203 .clear
= vmw_ttm_clear
,
204 .bind
= vmw_ttm_bind
,
205 .unbind
= vmw_ttm_unbind
,
206 .destroy
= vmw_ttm_destroy
,
209 struct ttm_backend
*vmw_ttm_backend_init(struct ttm_bo_device
*bdev
)
211 struct vmw_ttm_backend
*vmw_be
;
213 vmw_be
= kmalloc(sizeof(*vmw_be
), GFP_KERNEL
);
217 vmw_be
->backend
.func
= &vmw_ttm_func
;
218 vmw_be
->dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
220 return &vmw_be
->backend
;
223 int vmw_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
228 int vmw_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
229 struct ttm_mem_type_manager
*man
)
235 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
236 man
->available_caching
= TTM_PL_FLAG_CACHED
;
237 man
->default_caching
= TTM_PL_FLAG_CACHED
;
240 /* "On-card" video ram */
241 man
->func
= &ttm_bo_manager_func
;
243 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_MAPPABLE
;
244 man
->available_caching
= TTM_PL_FLAG_CACHED
;
245 man
->default_caching
= TTM_PL_FLAG_CACHED
;
249 * "Guest Memory Regions" is an aperture like feature with
250 * one slot per bo. There is an upper limit of the number of
251 * slots as well as the bo size.
253 man
->func
= &vmw_gmrid_manager_func
;
255 man
->flags
= TTM_MEMTYPE_FLAG_CMA
| TTM_MEMTYPE_FLAG_MAPPABLE
;
256 man
->available_caching
= TTM_PL_FLAG_CACHED
;
257 man
->default_caching
= TTM_PL_FLAG_CACHED
;
260 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
266 void vmw_evict_flags(struct ttm_buffer_object
*bo
,
267 struct ttm_placement
*placement
)
269 *placement
= vmw_sys_placement
;
273 * FIXME: Proper access checks on buffers.
276 static int vmw_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
281 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
283 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
284 struct vmw_private
*dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
286 mem
->bus
.addr
= NULL
;
287 mem
->bus
.is_iomem
= false;
289 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
291 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
293 switch (mem
->mem_type
) {
298 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
299 mem
->bus
.base
= dev_priv
->vram_start
;
300 mem
->bus
.is_iomem
= true;
308 static void vmw_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
312 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
318 * FIXME: We're using the old vmware polling method to sync.
319 * Do this with fences instead.
322 static void *vmw_sync_obj_ref(void *sync_obj
)
326 vmw_fence_obj_reference((struct vmw_fence_obj
*) sync_obj
);
329 static void vmw_sync_obj_unref(void **sync_obj
)
331 vmw_fence_obj_unreference((struct vmw_fence_obj
**) sync_obj
);
334 static int vmw_sync_obj_flush(void *sync_obj
, void *sync_arg
)
336 vmw_fence_obj_flush((struct vmw_fence_obj
*) sync_obj
);
340 static bool vmw_sync_obj_signaled(void *sync_obj
, void *sync_arg
)
342 unsigned long flags
= (unsigned long) sync_arg
;
343 return vmw_fence_obj_signaled((struct vmw_fence_obj
*) sync_obj
,
348 static int vmw_sync_obj_wait(void *sync_obj
, void *sync_arg
,
349 bool lazy
, bool interruptible
)
351 unsigned long flags
= (unsigned long) sync_arg
;
353 return vmw_fence_obj_wait((struct vmw_fence_obj
*) sync_obj
,
356 VMW_FENCE_WAIT_TIMEOUT
);
359 struct ttm_bo_driver vmw_bo_driver
= {
360 .create_ttm_backend_entry
= vmw_ttm_backend_init
,
361 .invalidate_caches
= vmw_invalidate_caches
,
362 .init_mem_type
= vmw_init_mem_type
,
363 .evict_flags
= vmw_evict_flags
,
365 .verify_access
= vmw_verify_access
,
366 .sync_obj_signaled
= vmw_sync_obj_signaled
,
367 .sync_obj_wait
= vmw_sync_obj_wait
,
368 .sync_obj_flush
= vmw_sync_obj_flush
,
369 .sync_obj_unref
= vmw_sync_obj_unref
,
370 .sync_obj_ref
= vmw_sync_obj_ref
,
373 .fault_reserve_notify
= &vmw_ttm_fault_reserve_notify
,
374 .io_mem_reserve
= &vmw_ttm_io_mem_reserve
,
375 .io_mem_free
= &vmw_ttm_io_mem_free
,