2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
27 #include "qxl_object.h"
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
33 struct qxl_device
*qdev
;
35 bo
= container_of(tbo
, struct qxl_bo
, tbo
);
36 qdev
= (struct qxl_device
*)bo
->gem_base
.dev
->dev_private
;
38 qxl_surface_evict(qdev
, bo
, false);
39 qxl_fence_fini(&bo
->fence
);
40 mutex_lock(&qdev
->gem
.mutex
);
41 list_del_init(&bo
->list
);
42 mutex_unlock(&qdev
->gem
.mutex
);
43 drm_gem_object_release(&bo
->gem_base
);
47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object
*bo
)
49 if (bo
->destroy
== &qxl_ttm_bo_destroy
)
54 void qxl_ttm_placement_from_domain(struct qxl_bo
*qbo
, u32 domain
, bool pinned
)
57 u32 pflag
= pinned
? TTM_PL_FLAG_NO_EVICT
: 0;
59 qbo
->placement
.fpfn
= 0;
60 qbo
->placement
.lpfn
= 0;
61 qbo
->placement
.placement
= qbo
->placements
;
62 qbo
->placement
.busy_placement
= qbo
->placements
;
63 if (domain
== QXL_GEM_DOMAIN_VRAM
)
64 qbo
->placements
[c
++] = TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_VRAM
| pflag
;
65 if (domain
== QXL_GEM_DOMAIN_SURFACE
)
66 qbo
->placements
[c
++] = TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_PRIV0
| pflag
;
67 if (domain
== QXL_GEM_DOMAIN_CPU
)
68 qbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
| pflag
;
70 qbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
71 qbo
->placement
.num_placement
= c
;
72 qbo
->placement
.num_busy_placement
= c
;
76 int qxl_bo_create(struct qxl_device
*qdev
,
77 unsigned long size
, bool kernel
, bool pinned
, u32 domain
,
78 struct qxl_surface
*surf
,
79 struct qxl_bo
**bo_ptr
)
82 enum ttm_bo_type type
;
85 if (unlikely(qdev
->mman
.bdev
.dev_mapping
== NULL
))
86 qdev
->mman
.bdev
.dev_mapping
= qdev
->ddev
->dev_mapping
;
88 type
= ttm_bo_type_kernel
;
90 type
= ttm_bo_type_device
;
92 bo
= kzalloc(sizeof(struct qxl_bo
), GFP_KERNEL
);
95 size
= roundup(size
, PAGE_SIZE
);
96 r
= drm_gem_object_init(qdev
->ddev
, &bo
->gem_base
, size
);
102 bo
->pin_count
= pinned
? 1 : 0;
104 qxl_fence_init(qdev
, &bo
->fence
);
105 INIT_LIST_HEAD(&bo
->list
);
110 qxl_ttm_placement_from_domain(bo
, domain
, pinned
);
112 r
= ttm_bo_init(&qdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
113 &bo
->placement
, 0, !kernel
, NULL
, size
,
114 NULL
, &qxl_ttm_bo_destroy
);
115 if (unlikely(r
!= 0)) {
116 if (r
!= -ERESTARTSYS
)
118 "object_init failed for (%lu, 0x%08X)\n",
126 int qxl_bo_kmap(struct qxl_bo
*bo
, void **ptr
)
136 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
139 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
145 void *qxl_bo_kmap_atomic_page(struct qxl_device
*qdev
,
146 struct qxl_bo
*bo
, int page_offset
)
148 struct ttm_mem_type_manager
*man
= &bo
->tbo
.bdev
->man
[bo
->tbo
.mem
.mem_type
];
151 struct io_mapping
*map
;
153 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
154 map
= qdev
->vram_mapping
;
155 else if (bo
->tbo
.mem
.mem_type
== TTM_PL_PRIV0
)
156 map
= qdev
->surface_mapping
;
160 (void) ttm_mem_io_lock(man
, false);
161 ret
= ttm_mem_io_reserve(bo
->tbo
.bdev
, &bo
->tbo
.mem
);
162 ttm_mem_io_unlock(man
);
164 return io_mapping_map_atomic_wc(map
, bo
->tbo
.mem
.bus
.offset
+ page_offset
);
167 rptr
= bo
->kptr
+ (page_offset
* PAGE_SIZE
);
171 ret
= qxl_bo_kmap(bo
, &rptr
);
175 rptr
+= page_offset
* PAGE_SIZE
;
179 void qxl_bo_kunmap(struct qxl_bo
*bo
)
181 if (bo
->kptr
== NULL
)
184 ttm_bo_kunmap(&bo
->kmap
);
187 void qxl_bo_kunmap_atomic_page(struct qxl_device
*qdev
,
188 struct qxl_bo
*bo
, void *pmap
)
190 struct ttm_mem_type_manager
*man
= &bo
->tbo
.bdev
->man
[bo
->tbo
.mem
.mem_type
];
191 struct io_mapping
*map
;
193 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
194 map
= qdev
->vram_mapping
;
195 else if (bo
->tbo
.mem
.mem_type
== TTM_PL_PRIV0
)
196 map
= qdev
->surface_mapping
;
200 io_mapping_unmap_atomic(pmap
);
202 (void) ttm_mem_io_lock(man
, false);
203 ttm_mem_io_free(bo
->tbo
.bdev
, &bo
->tbo
.mem
);
204 ttm_mem_io_unlock(man
);
210 void qxl_bo_unref(struct qxl_bo
**bo
)
212 struct ttm_buffer_object
*tbo
;
222 struct qxl_bo
*qxl_bo_ref(struct qxl_bo
*bo
)
224 ttm_bo_reference(&bo
->tbo
);
228 int qxl_bo_pin(struct qxl_bo
*bo
, u32 domain
, u64
*gpu_addr
)
230 struct qxl_device
*qdev
= (struct qxl_device
*)bo
->gem_base
.dev
->dev_private
;
236 *gpu_addr
= qxl_bo_gpu_offset(bo
);
239 qxl_ttm_placement_from_domain(bo
, domain
, true);
240 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
241 if (likely(r
== 0)) {
243 if (gpu_addr
!= NULL
)
244 *gpu_addr
= qxl_bo_gpu_offset(bo
);
246 if (unlikely(r
!= 0))
247 dev_err(qdev
->dev
, "%p pin failed\n", bo
);
251 int qxl_bo_unpin(struct qxl_bo
*bo
)
253 struct qxl_device
*qdev
= (struct qxl_device
*)bo
->gem_base
.dev
->dev_private
;
256 if (!bo
->pin_count
) {
257 dev_warn(qdev
->dev
, "%p unpin not necessary\n", bo
);
263 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
264 bo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
265 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
266 if (unlikely(r
!= 0))
267 dev_err(qdev
->dev
, "%p validate failed for unpin\n", bo
);
271 void qxl_bo_force_delete(struct qxl_device
*qdev
)
273 struct qxl_bo
*bo
, *n
;
275 if (list_empty(&qdev
->gem
.objects
))
277 dev_err(qdev
->dev
, "Userspace still has active objects !\n");
278 list_for_each_entry_safe(bo
, n
, &qdev
->gem
.objects
, list
) {
279 mutex_lock(&qdev
->ddev
->struct_mutex
);
280 dev_err(qdev
->dev
, "%p %p %lu %lu force free\n",
281 &bo
->gem_base
, bo
, (unsigned long)bo
->gem_base
.size
,
282 *((unsigned long *)&bo
->gem_base
.refcount
));
283 mutex_lock(&qdev
->gem
.mutex
);
284 list_del_init(&bo
->list
);
285 mutex_unlock(&qdev
->gem
.mutex
);
286 /* this should unref the ttm bo */
287 drm_gem_object_unreference(&bo
->gem_base
);
288 mutex_unlock(&qdev
->ddev
->struct_mutex
);
292 int qxl_bo_init(struct qxl_device
*qdev
)
294 return qxl_ttm_init(qdev
);
297 void qxl_bo_fini(struct qxl_device
*qdev
)
302 int qxl_bo_check_id(struct qxl_device
*qdev
, struct qxl_bo
*bo
)
305 if (bo
->type
== QXL_GEM_DOMAIN_SURFACE
&& bo
->surface_id
== 0) {
306 /* allocate a surface id for this surface now */
307 ret
= qxl_surface_id_alloc(qdev
, bo
);
311 ret
= qxl_hw_surface_alloc(qdev
, bo
, NULL
);
318 int qxl_surf_evict(struct qxl_device
*qdev
)
320 return ttm_bo_evict_mm(&qdev
->mman
.bdev
, TTM_PL_PRIV0
);
323 int qxl_vram_evict(struct qxl_device
*qdev
)
325 return ttm_bo_evict_mm(&qdev
->mman
.bdev
, TTM_PL_VRAM
);