2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
26 * Authors: Dave Airlie <airlied@redhat.com>
29 #include "cirrus_drv.h"
30 #include <ttm/ttm_page_alloc.h>
32 static inline struct cirrus_device
*
33 cirrus_bdev(struct ttm_bo_device
*bd
)
35 return container_of(bd
, struct cirrus_device
, ttm
.bdev
);
39 cirrus_ttm_mem_global_init(struct drm_global_reference
*ref
)
41 return ttm_mem_global_init(ref
->object
);
45 cirrus_ttm_mem_global_release(struct drm_global_reference
*ref
)
47 ttm_mem_global_release(ref
->object
);
50 static int cirrus_ttm_global_init(struct cirrus_device
*cirrus
)
52 struct drm_global_reference
*global_ref
;
55 global_ref
= &cirrus
->ttm
.mem_global_ref
;
56 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
57 global_ref
->size
= sizeof(struct ttm_mem_global
);
58 global_ref
->init
= &cirrus_ttm_mem_global_init
;
59 global_ref
->release
= &cirrus_ttm_mem_global_release
;
60 r
= drm_global_item_ref(global_ref
);
62 DRM_ERROR("Failed setting up TTM memory accounting "
67 cirrus
->ttm
.bo_global_ref
.mem_glob
=
68 cirrus
->ttm
.mem_global_ref
.object
;
69 global_ref
= &cirrus
->ttm
.bo_global_ref
.ref
;
70 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
71 global_ref
->size
= sizeof(struct ttm_bo_global
);
72 global_ref
->init
= &ttm_bo_global_init
;
73 global_ref
->release
= &ttm_bo_global_release
;
74 r
= drm_global_item_ref(global_ref
);
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&cirrus
->ttm
.mem_global_ref
);
84 cirrus_ttm_global_release(struct cirrus_device
*cirrus
)
86 if (cirrus
->ttm
.mem_global_ref
.release
== NULL
)
89 drm_global_item_unref(&cirrus
->ttm
.bo_global_ref
.ref
);
90 drm_global_item_unref(&cirrus
->ttm
.mem_global_ref
);
91 cirrus
->ttm
.mem_global_ref
.release
= NULL
;
95 static void cirrus_bo_ttm_destroy(struct ttm_buffer_object
*tbo
)
99 bo
= container_of(tbo
, struct cirrus_bo
, bo
);
101 drm_gem_object_release(&bo
->gem
);
105 bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object
*bo
)
107 if (bo
->destroy
== &cirrus_bo_ttm_destroy
)
113 cirrus_bo_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
114 struct ttm_mem_type_manager
*man
)
118 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
119 man
->available_caching
= TTM_PL_MASK_CACHING
;
120 man
->default_caching
= TTM_PL_FLAG_CACHED
;
123 man
->func
= &ttm_bo_manager_func
;
124 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
125 TTM_MEMTYPE_FLAG_MAPPABLE
;
126 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
128 man
->default_caching
= TTM_PL_FLAG_WC
;
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
138 cirrus_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
140 struct cirrus_bo
*cirrusbo
= cirrus_bo(bo
);
142 if (!cirrus_ttm_bo_is_cirrus_bo(bo
))
145 cirrus_ttm_placement(cirrusbo
, TTM_PL_FLAG_SYSTEM
);
146 *pl
= cirrusbo
->placement
;
149 static int cirrus_bo_verify_access(struct ttm_buffer_object
*bo
, struct file
*filp
)
154 static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
155 struct ttm_mem_reg
*mem
)
157 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
158 struct cirrus_device
*cirrus
= cirrus_bdev(bdev
);
160 mem
->bus
.addr
= NULL
;
162 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
164 mem
->bus
.is_iomem
= false;
165 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
167 switch (mem
->mem_type
) {
172 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
173 mem
->bus
.base
= pci_resource_start(cirrus
->dev
->pdev
, 0);
174 mem
->bus
.is_iomem
= true;
183 static void cirrus_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
187 static int cirrus_bo_move(struct ttm_buffer_object
*bo
,
188 bool evict
, bool interruptible
,
189 bool no_wait_reserve
, bool no_wait_gpu
,
190 struct ttm_mem_reg
*new_mem
)
193 r
= ttm_bo_move_memcpy(bo
, evict
, no_wait_reserve
, no_wait_gpu
, new_mem
);
198 static void cirrus_ttm_backend_destroy(struct ttm_tt
*tt
)
204 static struct ttm_backend_func cirrus_tt_backend_func
= {
205 .destroy
= &cirrus_ttm_backend_destroy
,
209 struct ttm_tt
*cirrus_ttm_tt_create(struct ttm_bo_device
*bdev
,
210 unsigned long size
, uint32_t page_flags
,
211 struct page
*dummy_read_page
)
215 tt
= kzalloc(sizeof(struct ttm_tt
), GFP_KERNEL
);
218 tt
->func
= &cirrus_tt_backend_func
;
219 if (ttm_tt_init(tt
, bdev
, size
, page_flags
, dummy_read_page
)) {
226 static int cirrus_ttm_tt_populate(struct ttm_tt
*ttm
)
228 return ttm_pool_populate(ttm
);
231 static void cirrus_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
233 ttm_pool_unpopulate(ttm
);
236 struct ttm_bo_driver cirrus_bo_driver
= {
237 .ttm_tt_create
= cirrus_ttm_tt_create
,
238 .ttm_tt_populate
= cirrus_ttm_tt_populate
,
239 .ttm_tt_unpopulate
= cirrus_ttm_tt_unpopulate
,
240 .init_mem_type
= cirrus_bo_init_mem_type
,
241 .evict_flags
= cirrus_bo_evict_flags
,
242 .move
= cirrus_bo_move
,
243 .verify_access
= cirrus_bo_verify_access
,
244 .io_mem_reserve
= &cirrus_ttm_io_mem_reserve
,
245 .io_mem_free
= &cirrus_ttm_io_mem_free
,
248 int cirrus_mm_init(struct cirrus_device
*cirrus
)
251 struct drm_device
*dev
= cirrus
->dev
;
252 struct ttm_bo_device
*bdev
= &cirrus
->ttm
.bdev
;
254 ret
= cirrus_ttm_global_init(cirrus
);
258 ret
= ttm_bo_device_init(&cirrus
->ttm
.bdev
,
259 cirrus
->ttm
.bo_global_ref
.ref
.object
,
260 &cirrus_bo_driver
, DRM_FILE_PAGE_OFFSET
,
263 DRM_ERROR("Error initialising bo driver; %d\n", ret
);
267 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
268 cirrus
->mc
.vram_size
>> PAGE_SHIFT
);
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret
);
274 cirrus
->fb_mtrr
= drm_mtrr_add(pci_resource_start(dev
->pdev
, 0),
275 pci_resource_len(dev
->pdev
, 0),
278 cirrus
->mm_inited
= true;
282 void cirrus_mm_fini(struct cirrus_device
*cirrus
)
284 struct drm_device
*dev
= cirrus
->dev
;
286 if (!cirrus
->mm_inited
)
289 ttm_bo_device_release(&cirrus
->ttm
.bdev
);
291 cirrus_ttm_global_release(cirrus
);
293 if (cirrus
->fb_mtrr
>= 0) {
294 drm_mtrr_del(cirrus
->fb_mtrr
,
295 pci_resource_start(dev
->pdev
, 0),
296 pci_resource_len(dev
->pdev
, 0), DRM_MTRR_WC
);
297 cirrus
->fb_mtrr
= -1;
301 void cirrus_ttm_placement(struct cirrus_bo
*bo
, int domain
)
304 bo
->placement
.fpfn
= 0;
305 bo
->placement
.lpfn
= 0;
306 bo
->placement
.placement
= bo
->placements
;
307 bo
->placement
.busy_placement
= bo
->placements
;
308 if (domain
& TTM_PL_FLAG_VRAM
)
309 bo
->placements
[c
++] = TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM
;
310 if (domain
& TTM_PL_FLAG_SYSTEM
)
311 bo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
313 bo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
314 bo
->placement
.num_placement
= c
;
315 bo
->placement
.num_busy_placement
= c
;
318 int cirrus_bo_reserve(struct cirrus_bo
*bo
, bool no_wait
)
322 ret
= ttm_bo_reserve(&bo
->bo
, true, no_wait
, false, 0);
324 if (ret
!= -ERESTARTSYS
)
325 DRM_ERROR("reserve failed %p\n", bo
);
331 void cirrus_bo_unreserve(struct cirrus_bo
*bo
)
333 ttm_bo_unreserve(&bo
->bo
);
336 int cirrus_bo_create(struct drm_device
*dev
, int size
, int align
,
337 uint32_t flags
, struct cirrus_bo
**pcirrusbo
)
339 struct cirrus_device
*cirrus
= dev
->dev_private
;
340 struct cirrus_bo
*cirrusbo
;
344 cirrusbo
= kzalloc(sizeof(struct cirrus_bo
), GFP_KERNEL
);
348 ret
= drm_gem_object_init(dev
, &cirrusbo
->gem
, size
);
354 cirrusbo
->gem
.driver_private
= NULL
;
355 cirrusbo
->bo
.bdev
= &cirrus
->ttm
.bdev
;
357 cirrus_ttm_placement(cirrusbo
, TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_SYSTEM
);
359 acc_size
= ttm_bo_dma_acc_size(&cirrus
->ttm
.bdev
, size
,
360 sizeof(struct cirrus_bo
));
362 ret
= ttm_bo_init(&cirrus
->ttm
.bdev
, &cirrusbo
->bo
, size
,
363 ttm_bo_type_device
, &cirrusbo
->placement
,
364 align
>> PAGE_SHIFT
, 0, false, NULL
, acc_size
,
365 NULL
, cirrus_bo_ttm_destroy
);
369 *pcirrusbo
= cirrusbo
;
373 static inline u64
cirrus_bo_gpu_offset(struct cirrus_bo
*bo
)
375 return bo
->bo
.offset
;
378 int cirrus_bo_pin(struct cirrus_bo
*bo
, u32 pl_flag
, u64
*gpu_addr
)
385 *gpu_addr
= cirrus_bo_gpu_offset(bo
);
388 cirrus_ttm_placement(bo
, pl_flag
);
389 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
390 bo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
391 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, false, false, false);
397 *gpu_addr
= cirrus_bo_gpu_offset(bo
);
401 int cirrus_bo_unpin(struct cirrus_bo
*bo
)
404 if (!bo
->pin_count
) {
405 DRM_ERROR("unpin bad %p\n", bo
);
412 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
413 bo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
414 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, false, false, false);
421 int cirrus_bo_push_sysram(struct cirrus_bo
*bo
)
424 if (!bo
->pin_count
) {
425 DRM_ERROR("unpin bad %p\n", bo
);
432 if (bo
->kmap
.virtual)
433 ttm_bo_kunmap(&bo
->kmap
);
435 cirrus_ttm_placement(bo
, TTM_PL_FLAG_SYSTEM
);
436 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
437 bo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
439 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, false, false, false);
441 DRM_ERROR("pushing to VRAM failed\n");
447 int cirrus_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
449 struct drm_file
*file_priv
;
450 struct cirrus_device
*cirrus
;
452 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
453 return drm_mmap(filp
, vma
);
455 file_priv
= filp
->private_data
;
456 cirrus
= file_priv
->minor
->dev
->dev_private
;
457 return ttm_bo_mmap(filp
, vma
, &cirrus
->ttm
.bdev
);