2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <linux/delay.h>
29 #include <drm/drm_file.h>
30 #include <drm/drm_debugfs.h>
31 #include <drm/qxl_drm.h>
32 #include <drm/ttm/ttm_bo_api.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_page_alloc.h>
36 #include <drm/ttm/ttm_placement.h>
39 #include "qxl_object.h"
41 static struct qxl_device
*qxl_get_qdev(struct ttm_bo_device
*bdev
)
43 struct qxl_mman
*mman
;
44 struct qxl_device
*qdev
;
46 mman
= container_of(bdev
, struct qxl_mman
, bdev
);
47 qdev
= container_of(mman
, struct qxl_device
, mman
);
51 static int qxl_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
56 static int qxl_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
57 struct ttm_mem_type_manager
*man
)
59 struct qxl_device
*qdev
= qxl_get_qdev(bdev
);
60 unsigned int gpu_offset_shift
=
61 64 - (qdev
->rom
->slot_gen_bits
+ qdev
->rom
->slot_id_bits
+ 8);
62 struct qxl_memslot
*slot
;
67 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
68 man
->available_caching
= TTM_PL_MASK_CACHING
;
69 man
->default_caching
= TTM_PL_FLAG_CACHED
;
73 /* "On-card" video ram */
74 slot
= (type
== TTM_PL_VRAM
) ?
75 &qdev
->main_slot
: &qdev
->surfaces_slot
;
76 slot
->gpu_offset
= (uint64_t)type
<< gpu_offset_shift
;
77 man
->func
= &ttm_bo_manager_func
;
78 man
->gpu_offset
= slot
->gpu_offset
;
79 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
80 TTM_MEMTYPE_FLAG_MAPPABLE
;
81 man
->available_caching
= TTM_PL_MASK_CACHING
;
82 man
->default_caching
= TTM_PL_FLAG_CACHED
;
85 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type
);
91 static void qxl_evict_flags(struct ttm_buffer_object
*bo
,
92 struct ttm_placement
*placement
)
95 static const struct ttm_place placements
= {
98 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
101 if (!qxl_ttm_bo_is_qxl_bo(bo
)) {
102 placement
->placement
= &placements
;
103 placement
->busy_placement
= &placements
;
104 placement
->num_placement
= 1;
105 placement
->num_busy_placement
= 1;
109 qxl_ttm_placement_from_domain(qbo
, QXL_GEM_DOMAIN_CPU
, false);
110 *placement
= qbo
->placement
;
113 int qxl_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
114 struct ttm_mem_reg
*mem
)
116 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
117 struct qxl_device
*qdev
= qxl_get_qdev(bdev
);
119 mem
->bus
.addr
= NULL
;
121 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
123 mem
->bus
.is_iomem
= false;
124 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
126 switch (mem
->mem_type
) {
131 mem
->bus
.is_iomem
= true;
132 mem
->bus
.base
= qdev
->vram_base
;
133 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
136 mem
->bus
.is_iomem
= true;
137 mem
->bus
.base
= qdev
->surfaceram_base
;
138 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
146 static void qxl_ttm_io_mem_free(struct ttm_bo_device
*bdev
,
147 struct ttm_mem_reg
*mem
)
152 * TTM backend functions.
156 struct qxl_device
*qdev
;
160 static int qxl_ttm_backend_bind(struct ttm_tt
*ttm
,
161 struct ttm_mem_reg
*bo_mem
)
163 struct qxl_ttm_tt
*gtt
= (void *)ttm
;
165 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
166 if (!ttm
->num_pages
) {
167 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
168 ttm
->num_pages
, bo_mem
, ttm
);
170 /* Not implemented */
174 static int qxl_ttm_backend_unbind(struct ttm_tt
*ttm
)
176 /* Not implemented */
180 static void qxl_ttm_backend_destroy(struct ttm_tt
*ttm
)
182 struct qxl_ttm_tt
*gtt
= (void *)ttm
;
184 ttm_tt_fini(>t
->ttm
);
188 static struct ttm_backend_func qxl_backend_func
= {
189 .bind
= &qxl_ttm_backend_bind
,
190 .unbind
= &qxl_ttm_backend_unbind
,
191 .destroy
= &qxl_ttm_backend_destroy
,
194 static struct ttm_tt
*qxl_ttm_tt_create(struct ttm_buffer_object
*bo
,
197 struct qxl_device
*qdev
;
198 struct qxl_ttm_tt
*gtt
;
200 qdev
= qxl_get_qdev(bo
->bdev
);
201 gtt
= kzalloc(sizeof(struct qxl_ttm_tt
), GFP_KERNEL
);
204 gtt
->ttm
.func
= &qxl_backend_func
;
206 if (ttm_tt_init(>t
->ttm
, bo
, page_flags
)) {
213 static void qxl_move_null(struct ttm_buffer_object
*bo
,
214 struct ttm_mem_reg
*new_mem
)
216 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
218 BUG_ON(old_mem
->mm_node
!= NULL
);
220 new_mem
->mm_node
= NULL
;
223 static int qxl_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
224 struct ttm_operation_ctx
*ctx
,
225 struct ttm_mem_reg
*new_mem
)
227 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
230 ret
= ttm_bo_wait(bo
, ctx
->interruptible
, ctx
->no_wait_gpu
);
234 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
235 qxl_move_null(bo
, new_mem
);
238 return ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
241 static void qxl_bo_move_notify(struct ttm_buffer_object
*bo
,
243 struct ttm_mem_reg
*new_mem
)
246 struct qxl_device
*qdev
;
248 if (!qxl_ttm_bo_is_qxl_bo(bo
))
251 qdev
= qbo
->tbo
.base
.dev
->dev_private
;
253 if (bo
->mem
.mem_type
== TTM_PL_PRIV
&& qbo
->surface_id
)
254 qxl_surface_evict(qdev
, qbo
, new_mem
? true : false);
257 static struct ttm_bo_driver qxl_bo_driver
= {
258 .ttm_tt_create
= &qxl_ttm_tt_create
,
259 .invalidate_caches
= &qxl_invalidate_caches
,
260 .init_mem_type
= &qxl_init_mem_type
,
261 .eviction_valuable
= ttm_bo_eviction_valuable
,
262 .evict_flags
= &qxl_evict_flags
,
263 .move
= &qxl_bo_move
,
264 .io_mem_reserve
= &qxl_ttm_io_mem_reserve
,
265 .io_mem_free
= &qxl_ttm_io_mem_free
,
266 .move_notify
= &qxl_bo_move_notify
,
269 int qxl_ttm_init(struct qxl_device
*qdev
)
272 int num_io_pages
; /* != rom->num_io_pages, we include surface0 */
274 /* No others user of address space so set it to 0 */
275 r
= ttm_bo_device_init(&qdev
->mman
.bdev
,
277 qdev
->ddev
.anon_inode
->i_mapping
,
278 qdev
->ddev
.vma_offset_manager
,
281 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
284 /* NOTE: this includes the framebuffer (aka surface 0) */
285 num_io_pages
= qdev
->rom
->ram_header_offset
/ PAGE_SIZE
;
286 r
= ttm_bo_init_mm(&qdev
->mman
.bdev
, TTM_PL_VRAM
,
289 DRM_ERROR("Failed initializing VRAM heap.\n");
292 r
= ttm_bo_init_mm(&qdev
->mman
.bdev
, TTM_PL_PRIV
,
293 qdev
->surfaceram_size
/ PAGE_SIZE
);
295 DRM_ERROR("Failed initializing Surfaces heap.\n");
298 DRM_INFO("qxl: %uM of VRAM memory size\n",
299 (unsigned int)qdev
->vram_size
/ (1024 * 1024));
300 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
301 ((unsigned int)num_io_pages
* PAGE_SIZE
) / (1024 * 1024));
302 DRM_INFO("qxl: %uM of Surface memory size\n",
303 (unsigned int)qdev
->surfaceram_size
/ (1024 * 1024));
307 void qxl_ttm_fini(struct qxl_device
*qdev
)
309 ttm_bo_clean_mm(&qdev
->mman
.bdev
, TTM_PL_VRAM
);
310 ttm_bo_clean_mm(&qdev
->mman
.bdev
, TTM_PL_PRIV
);
311 ttm_bo_device_release(&qdev
->mman
.bdev
);
312 DRM_INFO("qxl: ttm finalized\n");
315 #define QXL_DEBUGFS_MEM_TYPES 2
317 #if defined(CONFIG_DEBUG_FS)
318 static int qxl_mm_dump_table(struct seq_file
*m
, void *data
)
320 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
321 struct drm_mm
*mm
= (struct drm_mm
*)node
->info_ent
->data
;
322 struct drm_printer p
= drm_seq_file_printer(m
);
324 spin_lock(&ttm_bo_glob
.lru_lock
);
325 drm_mm_print(mm
, &p
);
326 spin_unlock(&ttm_bo_glob
.lru_lock
);
331 int qxl_ttm_debugfs_init(struct qxl_device
*qdev
)
333 #if defined(CONFIG_DEBUG_FS)
334 static struct drm_info_list qxl_mem_types_list
[QXL_DEBUGFS_MEM_TYPES
];
335 static char qxl_mem_types_names
[QXL_DEBUGFS_MEM_TYPES
][32];
338 for (i
= 0; i
< QXL_DEBUGFS_MEM_TYPES
; i
++) {
340 sprintf(qxl_mem_types_names
[i
], "qxl_mem_mm");
342 sprintf(qxl_mem_types_names
[i
], "qxl_surf_mm");
343 qxl_mem_types_list
[i
].name
= qxl_mem_types_names
[i
];
344 qxl_mem_types_list
[i
].show
= &qxl_mm_dump_table
;
345 qxl_mem_types_list
[i
].driver_features
= 0;
347 qxl_mem_types_list
[i
].data
= qdev
->mman
.bdev
.man
[TTM_PL_VRAM
].priv
;
349 qxl_mem_types_list
[i
].data
= qdev
->mman
.bdev
.man
[TTM_PL_PRIV
].priv
;
352 return qxl_debugfs_add_files(qdev
, qxl_mem_types_list
, i
);