1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "ttm/ttm_bo_driver.h"
30 #include "ttm/ttm_placement.h"
32 static uint32_t vram_placement_flags
= TTM_PL_FLAG_VRAM
|
35 static uint32_t vram_ne_placement_flags
= TTM_PL_FLAG_VRAM
|
39 static uint32_t sys_placement_flags
= TTM_PL_FLAG_SYSTEM
|
42 static uint32_t gmr_placement_flags
= VMW_PL_FLAG_GMR
|
45 struct ttm_placement vmw_vram_placement
= {
49 .placement
= &vram_placement_flags
,
50 .num_busy_placement
= 1,
51 .busy_placement
= &vram_placement_flags
54 static uint32_t vram_gmr_placement_flags
[] = {
55 TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_CACHED
,
56 VMW_PL_FLAG_GMR
| TTM_PL_FLAG_CACHED
59 struct ttm_placement vmw_vram_gmr_placement
= {
63 .placement
= vram_gmr_placement_flags
,
64 .num_busy_placement
= 1,
65 .busy_placement
= &gmr_placement_flags
68 struct ttm_placement vmw_vram_sys_placement
= {
72 .placement
= &vram_placement_flags
,
73 .num_busy_placement
= 1,
74 .busy_placement
= &sys_placement_flags
77 struct ttm_placement vmw_vram_ne_placement
= {
81 .placement
= &vram_ne_placement_flags
,
82 .num_busy_placement
= 1,
83 .busy_placement
= &vram_ne_placement_flags
86 struct ttm_placement vmw_sys_placement
= {
90 .placement
= &sys_placement_flags
,
91 .num_busy_placement
= 1,
92 .busy_placement
= &sys_placement_flags
95 struct vmw_ttm_backend
{
96 struct ttm_backend backend
;
98 unsigned long num_pages
;
99 struct vmw_private
*dev_priv
;
103 static int vmw_ttm_populate(struct ttm_backend
*backend
,
104 unsigned long num_pages
, struct page
**pages
,
105 struct page
*dummy_read_page
,
106 dma_addr_t
*dma_addrs
)
108 struct vmw_ttm_backend
*vmw_be
=
109 container_of(backend
, struct vmw_ttm_backend
, backend
);
111 vmw_be
->pages
= pages
;
112 vmw_be
->num_pages
= num_pages
;
117 static int vmw_ttm_bind(struct ttm_backend
*backend
, struct ttm_mem_reg
*bo_mem
)
119 struct vmw_ttm_backend
*vmw_be
=
120 container_of(backend
, struct vmw_ttm_backend
, backend
);
122 vmw_be
->gmr_id
= bo_mem
->start
;
124 return vmw_gmr_bind(vmw_be
->dev_priv
, vmw_be
->pages
,
125 vmw_be
->num_pages
, vmw_be
->gmr_id
);
128 static int vmw_ttm_unbind(struct ttm_backend
*backend
)
130 struct vmw_ttm_backend
*vmw_be
=
131 container_of(backend
, struct vmw_ttm_backend
, backend
);
133 vmw_gmr_unbind(vmw_be
->dev_priv
, vmw_be
->gmr_id
);
137 static void vmw_ttm_clear(struct ttm_backend
*backend
)
139 struct vmw_ttm_backend
*vmw_be
=
140 container_of(backend
, struct vmw_ttm_backend
, backend
);
142 vmw_be
->pages
= NULL
;
143 vmw_be
->num_pages
= 0;
146 static void vmw_ttm_destroy(struct ttm_backend
*backend
)
148 struct vmw_ttm_backend
*vmw_be
=
149 container_of(backend
, struct vmw_ttm_backend
, backend
);
154 static struct ttm_backend_func vmw_ttm_func
= {
155 .populate
= vmw_ttm_populate
,
156 .clear
= vmw_ttm_clear
,
157 .bind
= vmw_ttm_bind
,
158 .unbind
= vmw_ttm_unbind
,
159 .destroy
= vmw_ttm_destroy
,
162 struct ttm_backend
*vmw_ttm_backend_init(struct ttm_bo_device
*bdev
)
164 struct vmw_ttm_backend
*vmw_be
;
166 vmw_be
= kmalloc(sizeof(*vmw_be
), GFP_KERNEL
);
170 vmw_be
->backend
.func
= &vmw_ttm_func
;
171 vmw_be
->dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
173 return &vmw_be
->backend
;
176 int vmw_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
181 int vmw_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
182 struct ttm_mem_type_manager
*man
)
188 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
189 man
->available_caching
= TTM_PL_FLAG_CACHED
;
190 man
->default_caching
= TTM_PL_FLAG_CACHED
;
193 /* "On-card" video ram */
194 man
->func
= &ttm_bo_manager_func
;
196 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_MAPPABLE
;
197 man
->available_caching
= TTM_PL_FLAG_CACHED
;
198 man
->default_caching
= TTM_PL_FLAG_CACHED
;
202 * "Guest Memory Regions" is an aperture like feature with
203 * one slot per bo. There is an upper limit of the number of
204 * slots as well as the bo size.
206 man
->func
= &vmw_gmrid_manager_func
;
208 man
->flags
= TTM_MEMTYPE_FLAG_CMA
| TTM_MEMTYPE_FLAG_MAPPABLE
;
209 man
->available_caching
= TTM_PL_FLAG_CACHED
;
210 man
->default_caching
= TTM_PL_FLAG_CACHED
;
213 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
219 void vmw_evict_flags(struct ttm_buffer_object
*bo
,
220 struct ttm_placement
*placement
)
222 *placement
= vmw_sys_placement
;
226 * FIXME: Proper access checks on buffers.
229 static int vmw_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
234 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
236 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
237 struct vmw_private
*dev_priv
= container_of(bdev
, struct vmw_private
, bdev
);
239 mem
->bus
.addr
= NULL
;
240 mem
->bus
.is_iomem
= false;
242 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
244 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
246 switch (mem
->mem_type
) {
251 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
252 mem
->bus
.base
= dev_priv
->vram_start
;
253 mem
->bus
.is_iomem
= true;
261 static void vmw_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
265 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object
*bo
)
271 * FIXME: We're using the old vmware polling method to sync.
272 * Do this with fences instead.
275 static void *vmw_sync_obj_ref(void *sync_obj
)
280 static void vmw_sync_obj_unref(void **sync_obj
)
285 static int vmw_sync_obj_flush(void *sync_obj
, void *sync_arg
)
287 struct vmw_private
*dev_priv
= (struct vmw_private
*)sync_arg
;
289 mutex_lock(&dev_priv
->hw_mutex
);
290 vmw_write(dev_priv
, SVGA_REG_SYNC
, SVGA_SYNC_GENERIC
);
291 mutex_unlock(&dev_priv
->hw_mutex
);
295 static bool vmw_sync_obj_signaled(void *sync_obj
, void *sync_arg
)
297 struct vmw_private
*dev_priv
= (struct vmw_private
*)sync_arg
;
298 uint32_t sequence
= (unsigned long) sync_obj
;
300 return vmw_fence_signaled(dev_priv
, sequence
);
303 static int vmw_sync_obj_wait(void *sync_obj
, void *sync_arg
,
304 bool lazy
, bool interruptible
)
306 struct vmw_private
*dev_priv
= (struct vmw_private
*)sync_arg
;
307 uint32_t sequence
= (unsigned long) sync_obj
;
309 return vmw_wait_fence(dev_priv
, false, sequence
, false, 3*HZ
);
312 struct ttm_bo_driver vmw_bo_driver
= {
313 .create_ttm_backend_entry
= vmw_ttm_backend_init
,
314 .invalidate_caches
= vmw_invalidate_caches
,
315 .init_mem_type
= vmw_init_mem_type
,
316 .evict_flags
= vmw_evict_flags
,
318 .verify_access
= vmw_verify_access
,
319 .sync_obj_signaled
= vmw_sync_obj_signaled
,
320 .sync_obj_wait
= vmw_sync_obj_wait
,
321 .sync_obj_flush
= vmw_sync_obj_flush
,
322 .sync_obj_unref
= vmw_sync_obj_unref
,
323 .sync_obj_ref
= vmw_sync_obj_ref
,
326 .fault_reserve_notify
= &vmw_ttm_fault_reserve_notify
,
327 .io_mem_reserve
= &vmw_ttm_io_mem_reserve
,
328 .io_mem_free
= &vmw_ttm_io_mem_free
,