2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
26 * Authors: Dave Airlie <airlied@redhat.com>
29 #include <drm/ttm/ttm_page_alloc.h>
31 #include "mgag200_drv.h"
33 static inline struct mga_device
*
34 mgag200_bdev(struct ttm_bo_device
*bd
)
36 return container_of(bd
, struct mga_device
, ttm
.bdev
);
40 mgag200_ttm_mem_global_init(struct drm_global_reference
*ref
)
42 return ttm_mem_global_init(ref
->object
);
46 mgag200_ttm_mem_global_release(struct drm_global_reference
*ref
)
48 ttm_mem_global_release(ref
->object
);
51 static int mgag200_ttm_global_init(struct mga_device
*ast
)
53 struct drm_global_reference
*global_ref
;
56 global_ref
= &ast
->ttm
.mem_global_ref
;
57 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
58 global_ref
->size
= sizeof(struct ttm_mem_global
);
59 global_ref
->init
= &mgag200_ttm_mem_global_init
;
60 global_ref
->release
= &mgag200_ttm_mem_global_release
;
61 r
= drm_global_item_ref(global_ref
);
63 DRM_ERROR("Failed setting up TTM memory accounting "
68 ast
->ttm
.bo_global_ref
.mem_glob
=
69 ast
->ttm
.mem_global_ref
.object
;
70 global_ref
= &ast
->ttm
.bo_global_ref
.ref
;
71 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
72 global_ref
->size
= sizeof(struct ttm_bo_global
);
73 global_ref
->init
= &ttm_bo_global_init
;
74 global_ref
->release
= &ttm_bo_global_release
;
75 r
= drm_global_item_ref(global_ref
);
77 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
78 drm_global_item_unref(&ast
->ttm
.mem_global_ref
);
85 mgag200_ttm_global_release(struct mga_device
*ast
)
87 if (ast
->ttm
.mem_global_ref
.release
== NULL
)
90 drm_global_item_unref(&ast
->ttm
.bo_global_ref
.ref
);
91 drm_global_item_unref(&ast
->ttm
.mem_global_ref
);
92 ast
->ttm
.mem_global_ref
.release
= NULL
;
96 static void mgag200_bo_ttm_destroy(struct ttm_buffer_object
*tbo
)
98 struct mgag200_bo
*bo
;
100 bo
= container_of(tbo
, struct mgag200_bo
, bo
);
102 drm_gem_object_release(&bo
->gem
);
106 static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object
*bo
)
108 if (bo
->destroy
== &mgag200_bo_ttm_destroy
)
114 mgag200_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
115 struct ttm_mem_type_manager
*man
)
119 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
120 man
->available_caching
= TTM_PL_MASK_CACHING
;
121 man
->default_caching
= TTM_PL_FLAG_CACHED
;
124 man
->func
= &ttm_bo_manager_func
;
125 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
126 TTM_MEMTYPE_FLAG_MAPPABLE
;
127 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
129 man
->default_caching
= TTM_PL_FLAG_WC
;
132 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
139 mgag200_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
141 struct mgag200_bo
*mgabo
= mgag200_bo(bo
);
143 if (!mgag200_ttm_bo_is_mgag200_bo(bo
))
146 mgag200_ttm_placement(mgabo
, TTM_PL_FLAG_SYSTEM
);
147 *pl
= mgabo
->placement
;
150 static int mgag200_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
152 struct mgag200_bo
*mgabo
= mgag200_bo(bo
);
154 return drm_vma_node_verify_access(&mgabo
->gem
.vma_node
,
158 static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
159 struct ttm_mem_reg
*mem
)
161 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
162 struct mga_device
*mdev
= mgag200_bdev(bdev
);
164 mem
->bus
.addr
= NULL
;
166 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
168 mem
->bus
.is_iomem
= false;
169 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
171 switch (mem
->mem_type
) {
176 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
177 mem
->bus
.base
= pci_resource_start(mdev
->dev
->pdev
, 0);
178 mem
->bus
.is_iomem
= true;
187 static void mgag200_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
191 static void mgag200_ttm_backend_destroy(struct ttm_tt
*tt
)
197 static struct ttm_backend_func mgag200_tt_backend_func
= {
198 .destroy
= &mgag200_ttm_backend_destroy
,
202 static struct ttm_tt
*mgag200_ttm_tt_create(struct ttm_buffer_object
*bo
,
207 tt
= kzalloc(sizeof(struct ttm_tt
), GFP_KERNEL
);
210 tt
->func
= &mgag200_tt_backend_func
;
211 if (ttm_tt_init(tt
, bo
, page_flags
)) {
218 struct ttm_bo_driver mgag200_bo_driver
= {
219 .ttm_tt_create
= mgag200_ttm_tt_create
,
220 .init_mem_type
= mgag200_bo_init_mem_type
,
221 .eviction_valuable
= ttm_bo_eviction_valuable
,
222 .evict_flags
= mgag200_bo_evict_flags
,
224 .verify_access
= mgag200_bo_verify_access
,
225 .io_mem_reserve
= &mgag200_ttm_io_mem_reserve
,
226 .io_mem_free
= &mgag200_ttm_io_mem_free
,
229 int mgag200_mm_init(struct mga_device
*mdev
)
232 struct drm_device
*dev
= mdev
->dev
;
233 struct ttm_bo_device
*bdev
= &mdev
->ttm
.bdev
;
235 ret
= mgag200_ttm_global_init(mdev
);
239 ret
= ttm_bo_device_init(&mdev
->ttm
.bdev
,
240 mdev
->ttm
.bo_global_ref
.ref
.object
,
242 dev
->anon_inode
->i_mapping
,
243 DRM_FILE_PAGE_OFFSET
,
246 DRM_ERROR("Error initialising bo driver; %d\n", ret
);
250 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
, mdev
->mc
.vram_size
>> PAGE_SHIFT
);
252 DRM_ERROR("Failed ttm VRAM init: %d\n", ret
);
256 arch_io_reserve_memtype_wc(pci_resource_start(dev
->pdev
, 0),
257 pci_resource_len(dev
->pdev
, 0));
259 mdev
->fb_mtrr
= arch_phys_wc_add(pci_resource_start(dev
->pdev
, 0),
260 pci_resource_len(dev
->pdev
, 0));
265 void mgag200_mm_fini(struct mga_device
*mdev
)
267 struct drm_device
*dev
= mdev
->dev
;
269 ttm_bo_device_release(&mdev
->ttm
.bdev
);
271 mgag200_ttm_global_release(mdev
);
273 arch_io_free_memtype_wc(pci_resource_start(dev
->pdev
, 0),
274 pci_resource_len(dev
->pdev
, 0));
275 arch_phys_wc_del(mdev
->fb_mtrr
);
279 void mgag200_ttm_placement(struct mgag200_bo
*bo
, int domain
)
284 bo
->placement
.placement
= bo
->placements
;
285 bo
->placement
.busy_placement
= bo
->placements
;
286 if (domain
& TTM_PL_FLAG_VRAM
)
287 bo
->placements
[c
++].flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM
;
288 if (domain
& TTM_PL_FLAG_SYSTEM
)
289 bo
->placements
[c
++].flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
291 bo
->placements
[c
++].flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
292 bo
->placement
.num_placement
= c
;
293 bo
->placement
.num_busy_placement
= c
;
294 for (i
= 0; i
< c
; ++i
) {
295 bo
->placements
[i
].fpfn
= 0;
296 bo
->placements
[i
].lpfn
= 0;
300 int mgag200_bo_create(struct drm_device
*dev
, int size
, int align
,
301 uint32_t flags
, struct mgag200_bo
**pmgabo
)
303 struct mga_device
*mdev
= dev
->dev_private
;
304 struct mgag200_bo
*mgabo
;
308 mgabo
= kzalloc(sizeof(struct mgag200_bo
), GFP_KERNEL
);
312 ret
= drm_gem_object_init(dev
, &mgabo
->gem
, size
);
318 mgabo
->bo
.bdev
= &mdev
->ttm
.bdev
;
320 mgag200_ttm_placement(mgabo
, TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_SYSTEM
);
322 acc_size
= ttm_bo_dma_acc_size(&mdev
->ttm
.bdev
, size
,
323 sizeof(struct mgag200_bo
));
325 ret
= ttm_bo_init(&mdev
->ttm
.bdev
, &mgabo
->bo
, size
,
326 ttm_bo_type_device
, &mgabo
->placement
,
327 align
>> PAGE_SHIFT
, false, acc_size
,
328 NULL
, NULL
, mgag200_bo_ttm_destroy
);
336 static inline u64
mgag200_bo_gpu_offset(struct mgag200_bo
*bo
)
338 return bo
->bo
.offset
;
341 int mgag200_bo_pin(struct mgag200_bo
*bo
, u32 pl_flag
, u64
*gpu_addr
)
343 struct ttm_operation_ctx ctx
= { false, false };
349 *gpu_addr
= mgag200_bo_gpu_offset(bo
);
353 mgag200_ttm_placement(bo
, pl_flag
);
354 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
355 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
356 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
362 *gpu_addr
= mgag200_bo_gpu_offset(bo
);
366 int mgag200_bo_unpin(struct mgag200_bo
*bo
)
368 struct ttm_operation_ctx ctx
= { false, false };
370 if (!bo
->pin_count
) {
371 DRM_ERROR("unpin bad %p\n", bo
);
378 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
379 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
380 return ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
383 int mgag200_bo_push_sysram(struct mgag200_bo
*bo
)
385 struct ttm_operation_ctx ctx
= { false, false };
387 if (!bo
->pin_count
) {
388 DRM_ERROR("unpin bad %p\n", bo
);
395 if (bo
->kmap
.virtual)
396 ttm_bo_kunmap(&bo
->kmap
);
398 mgag200_ttm_placement(bo
, TTM_PL_FLAG_SYSTEM
);
399 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
400 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
402 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
404 DRM_ERROR("pushing to VRAM failed\n");
410 int mgag200_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
412 struct drm_file
*file_priv
;
413 struct mga_device
*mdev
;
415 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
418 file_priv
= filp
->private_data
;
419 mdev
= file_priv
->minor
->dev
->dev_private
;
420 return ttm_bo_mmap(filp
, vma
, &mdev
->ttm
.bdev
);