2 * Copyright 2013 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Dave Airlie
26 #include <drm/ttm/ttm_bo_api.h>
27 #include <drm/ttm/ttm_bo_driver.h>
28 #include <drm/ttm/ttm_placement.h>
29 #include <drm/ttm/ttm_page_alloc.h>
30 #include <drm/ttm/ttm_module.h>
33 #include <drm/qxl_drm.h>
35 #include "qxl_object.h"
37 #include <linux/delay.h>
39 static struct qxl_device
*qxl_get_qdev(struct ttm_bo_device
*bdev
)
41 struct qxl_mman
*mman
;
42 struct qxl_device
*qdev
;
44 mman
= container_of(bdev
, struct qxl_mman
, bdev
);
45 qdev
= container_of(mman
, struct qxl_device
, mman
);
49 static struct vm_operations_struct qxl_ttm_vm_ops
;
50 static const struct vm_operations_struct
*ttm_vm_ops
;
52 static vm_fault_t
qxl_ttm_fault(struct vm_fault
*vmf
)
54 struct ttm_buffer_object
*bo
;
57 bo
= (struct ttm_buffer_object
*)vmf
->vma
->vm_private_data
;
59 return VM_FAULT_NOPAGE
;
60 ret
= ttm_vm_ops
->fault(vmf
);
64 int qxl_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
66 struct drm_file
*file_priv
;
67 struct qxl_device
*qdev
;
70 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
73 file_priv
= filp
->private_data
;
74 qdev
= file_priv
->minor
->dev
->dev_private
;
77 "filp->private_data->minor->dev->dev_private == NULL\n");
80 DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
81 filp
->private_data
, vma
->vm_pgoff
);
83 r
= ttm_bo_mmap(filp
, vma
, &qdev
->mman
.bdev
);
86 if (unlikely(ttm_vm_ops
== NULL
)) {
87 ttm_vm_ops
= vma
->vm_ops
;
88 qxl_ttm_vm_ops
= *ttm_vm_ops
;
89 qxl_ttm_vm_ops
.fault
= &qxl_ttm_fault
;
91 vma
->vm_ops
= &qxl_ttm_vm_ops
;
95 static int qxl_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
100 static int qxl_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
101 struct ttm_mem_type_manager
*man
)
103 struct qxl_device
*qdev
= qxl_get_qdev(bdev
);
104 unsigned int gpu_offset_shift
=
105 64 - (qdev
->rom
->slot_gen_bits
+ qdev
->rom
->slot_id_bits
+ 8);
106 struct qxl_memslot
*slot
;
111 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
112 man
->available_caching
= TTM_PL_MASK_CACHING
;
113 man
->default_caching
= TTM_PL_FLAG_CACHED
;
117 /* "On-card" video ram */
118 slot
= (type
== TTM_PL_VRAM
) ?
119 &qdev
->main_slot
: &qdev
->surfaces_slot
;
120 slot
->gpu_offset
= (uint64_t)type
<< gpu_offset_shift
;
121 man
->func
= &ttm_bo_manager_func
;
122 man
->gpu_offset
= slot
->gpu_offset
;
123 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
124 TTM_MEMTYPE_FLAG_MAPPABLE
;
125 man
->available_caching
= TTM_PL_MASK_CACHING
;
126 man
->default_caching
= TTM_PL_FLAG_CACHED
;
129 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type
);
135 static void qxl_evict_flags(struct ttm_buffer_object
*bo
,
136 struct ttm_placement
*placement
)
139 static const struct ttm_place placements
= {
142 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
145 if (!qxl_ttm_bo_is_qxl_bo(bo
)) {
146 placement
->placement
= &placements
;
147 placement
->busy_placement
= &placements
;
148 placement
->num_placement
= 1;
149 placement
->num_busy_placement
= 1;
153 qxl_ttm_placement_from_domain(qbo
, QXL_GEM_DOMAIN_CPU
, false);
154 *placement
= qbo
->placement
;
157 static int qxl_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
159 struct qxl_bo
*qbo
= to_qxl_bo(bo
);
161 return drm_vma_node_verify_access(&qbo
->gem_base
.vma_node
,
165 static int qxl_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
166 struct ttm_mem_reg
*mem
)
168 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
169 struct qxl_device
*qdev
= qxl_get_qdev(bdev
);
171 mem
->bus
.addr
= NULL
;
173 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
175 mem
->bus
.is_iomem
= false;
176 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
178 switch (mem
->mem_type
) {
183 mem
->bus
.is_iomem
= true;
184 mem
->bus
.base
= qdev
->vram_base
;
185 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
188 mem
->bus
.is_iomem
= true;
189 mem
->bus
.base
= qdev
->surfaceram_base
;
190 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
198 static void qxl_ttm_io_mem_free(struct ttm_bo_device
*bdev
,
199 struct ttm_mem_reg
*mem
)
204 * TTM backend functions.
208 struct qxl_device
*qdev
;
212 static int qxl_ttm_backend_bind(struct ttm_tt
*ttm
,
213 struct ttm_mem_reg
*bo_mem
)
215 struct qxl_ttm_tt
*gtt
= (void *)ttm
;
217 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
218 if (!ttm
->num_pages
) {
219 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
220 ttm
->num_pages
, bo_mem
, ttm
);
222 /* Not implemented */
226 static int qxl_ttm_backend_unbind(struct ttm_tt
*ttm
)
228 /* Not implemented */
232 static void qxl_ttm_backend_destroy(struct ttm_tt
*ttm
)
234 struct qxl_ttm_tt
*gtt
= (void *)ttm
;
236 ttm_tt_fini(>t
->ttm
);
240 static struct ttm_backend_func qxl_backend_func
= {
241 .bind
= &qxl_ttm_backend_bind
,
242 .unbind
= &qxl_ttm_backend_unbind
,
243 .destroy
= &qxl_ttm_backend_destroy
,
246 static struct ttm_tt
*qxl_ttm_tt_create(struct ttm_buffer_object
*bo
,
249 struct qxl_device
*qdev
;
250 struct qxl_ttm_tt
*gtt
;
252 qdev
= qxl_get_qdev(bo
->bdev
);
253 gtt
= kzalloc(sizeof(struct qxl_ttm_tt
), GFP_KERNEL
);
256 gtt
->ttm
.func
= &qxl_backend_func
;
258 if (ttm_tt_init(>t
->ttm
, bo
, page_flags
)) {
265 static void qxl_move_null(struct ttm_buffer_object
*bo
,
266 struct ttm_mem_reg
*new_mem
)
268 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
270 BUG_ON(old_mem
->mm_node
!= NULL
);
272 new_mem
->mm_node
= NULL
;
275 static int qxl_bo_move(struct ttm_buffer_object
*bo
, bool evict
,
276 struct ttm_operation_ctx
*ctx
,
277 struct ttm_mem_reg
*new_mem
)
279 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
282 ret
= ttm_bo_wait(bo
, ctx
->interruptible
, ctx
->no_wait_gpu
);
286 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
287 qxl_move_null(bo
, new_mem
);
290 return ttm_bo_move_memcpy(bo
, ctx
, new_mem
);
293 static void qxl_bo_move_notify(struct ttm_buffer_object
*bo
,
295 struct ttm_mem_reg
*new_mem
)
298 struct qxl_device
*qdev
;
300 if (!qxl_ttm_bo_is_qxl_bo(bo
))
303 qdev
= qbo
->gem_base
.dev
->dev_private
;
305 if (bo
->mem
.mem_type
== TTM_PL_PRIV
&& qbo
->surface_id
)
306 qxl_surface_evict(qdev
, qbo
, new_mem
? true : false);
309 static struct ttm_bo_driver qxl_bo_driver
= {
310 .ttm_tt_create
= &qxl_ttm_tt_create
,
311 .invalidate_caches
= &qxl_invalidate_caches
,
312 .init_mem_type
= &qxl_init_mem_type
,
313 .eviction_valuable
= ttm_bo_eviction_valuable
,
314 .evict_flags
= &qxl_evict_flags
,
315 .move
= &qxl_bo_move
,
316 .verify_access
= &qxl_verify_access
,
317 .io_mem_reserve
= &qxl_ttm_io_mem_reserve
,
318 .io_mem_free
= &qxl_ttm_io_mem_free
,
319 .move_notify
= &qxl_bo_move_notify
,
322 int qxl_ttm_init(struct qxl_device
*qdev
)
325 int num_io_pages
; /* != rom->num_io_pages, we include surface0 */
327 /* No others user of address space so set it to 0 */
328 r
= ttm_bo_device_init(&qdev
->mman
.bdev
,
330 qdev
->ddev
.anon_inode
->i_mapping
,
331 DRM_FILE_PAGE_OFFSET
, 0);
333 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
336 /* NOTE: this includes the framebuffer (aka surface 0) */
337 num_io_pages
= qdev
->rom
->ram_header_offset
/ PAGE_SIZE
;
338 r
= ttm_bo_init_mm(&qdev
->mman
.bdev
, TTM_PL_VRAM
,
341 DRM_ERROR("Failed initializing VRAM heap.\n");
344 r
= ttm_bo_init_mm(&qdev
->mman
.bdev
, TTM_PL_PRIV
,
345 qdev
->surfaceram_size
/ PAGE_SIZE
);
347 DRM_ERROR("Failed initializing Surfaces heap.\n");
350 DRM_INFO("qxl: %uM of VRAM memory size\n",
351 (unsigned int)qdev
->vram_size
/ (1024 * 1024));
352 DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
353 ((unsigned int)num_io_pages
* PAGE_SIZE
) / (1024 * 1024));
354 DRM_INFO("qxl: %uM of Surface memory size\n",
355 (unsigned int)qdev
->surfaceram_size
/ (1024 * 1024));
359 void qxl_ttm_fini(struct qxl_device
*qdev
)
361 ttm_bo_clean_mm(&qdev
->mman
.bdev
, TTM_PL_VRAM
);
362 ttm_bo_clean_mm(&qdev
->mman
.bdev
, TTM_PL_PRIV
);
363 ttm_bo_device_release(&qdev
->mman
.bdev
);
364 DRM_INFO("qxl: ttm finalized\n");
367 #define QXL_DEBUGFS_MEM_TYPES 2
369 #if defined(CONFIG_DEBUG_FS)
370 static int qxl_mm_dump_table(struct seq_file
*m
, void *data
)
372 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
373 struct drm_mm
*mm
= (struct drm_mm
*)node
->info_ent
->data
;
374 struct drm_device
*dev
= node
->minor
->dev
;
375 struct qxl_device
*rdev
= dev
->dev_private
;
376 struct ttm_bo_global
*glob
= rdev
->mman
.bdev
.glob
;
377 struct drm_printer p
= drm_seq_file_printer(m
);
379 spin_lock(&glob
->lru_lock
);
380 drm_mm_print(mm
, &p
);
381 spin_unlock(&glob
->lru_lock
);
386 int qxl_ttm_debugfs_init(struct qxl_device
*qdev
)
388 #if defined(CONFIG_DEBUG_FS)
389 static struct drm_info_list qxl_mem_types_list
[QXL_DEBUGFS_MEM_TYPES
];
390 static char qxl_mem_types_names
[QXL_DEBUGFS_MEM_TYPES
][32];
393 for (i
= 0; i
< QXL_DEBUGFS_MEM_TYPES
; i
++) {
395 sprintf(qxl_mem_types_names
[i
], "qxl_mem_mm");
397 sprintf(qxl_mem_types_names
[i
], "qxl_surf_mm");
398 qxl_mem_types_list
[i
].name
= qxl_mem_types_names
[i
];
399 qxl_mem_types_list
[i
].show
= &qxl_mm_dump_table
;
400 qxl_mem_types_list
[i
].driver_features
= 0;
402 qxl_mem_types_list
[i
].data
= qdev
->mman
.bdev
.man
[TTM_PL_VRAM
].priv
;
404 qxl_mem_types_list
[i
].data
= qdev
->mman
.bdev
.man
[TTM_PL_PRIV
].priv
;
407 return qxl_debugfs_add_files(qdev
, qxl_mem_types_list
, i
);