1 /* Hisilicon Hibmc SoC drm driver
3 * Based on the bochs drm driver.
5 * Copyright (c) 2016 Huawei Limited.
8 * Rongrong Zou <zourongrong@huawei.com>
9 * Rongrong Zou <zourongrong@gmail.com>
10 * Jianhua Li <lijianhua@huawei.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/ttm/ttm_page_alloc.h>
22 #include "hibmc_drm_drv.h"
24 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
26 static inline struct hibmc_drm_private
*
27 hibmc_bdev(struct ttm_bo_device
*bd
)
29 return container_of(bd
, struct hibmc_drm_private
, bdev
);
33 hibmc_ttm_mem_global_init(struct drm_global_reference
*ref
)
35 return ttm_mem_global_init(ref
->object
);
39 hibmc_ttm_mem_global_release(struct drm_global_reference
*ref
)
41 ttm_mem_global_release(ref
->object
);
44 static int hibmc_ttm_global_init(struct hibmc_drm_private
*hibmc
)
48 hibmc
->mem_global_ref
.global_type
= DRM_GLOBAL_TTM_MEM
;
49 hibmc
->mem_global_ref
.size
= sizeof(struct ttm_mem_global
);
50 hibmc
->mem_global_ref
.init
= &hibmc_ttm_mem_global_init
;
51 hibmc
->mem_global_ref
.release
= &hibmc_ttm_mem_global_release
;
52 ret
= drm_global_item_ref(&hibmc
->mem_global_ref
);
54 DRM_ERROR("could not get ref on ttm global: %d\n", ret
);
58 hibmc
->bo_global_ref
.mem_glob
=
59 hibmc
->mem_global_ref
.object
;
60 hibmc
->bo_global_ref
.ref
.global_type
= DRM_GLOBAL_TTM_BO
;
61 hibmc
->bo_global_ref
.ref
.size
= sizeof(struct ttm_bo_global
);
62 hibmc
->bo_global_ref
.ref
.init
= &ttm_bo_global_init
;
63 hibmc
->bo_global_ref
.ref
.release
= &ttm_bo_global_release
;
64 ret
= drm_global_item_ref(&hibmc
->bo_global_ref
.ref
);
66 DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret
);
67 drm_global_item_unref(&hibmc
->mem_global_ref
);
74 hibmc_ttm_global_release(struct hibmc_drm_private
*hibmc
)
76 drm_global_item_unref(&hibmc
->bo_global_ref
.ref
);
77 drm_global_item_unref(&hibmc
->mem_global_ref
);
78 hibmc
->mem_global_ref
.release
= NULL
;
81 static void hibmc_bo_ttm_destroy(struct ttm_buffer_object
*tbo
)
83 struct hibmc_bo
*bo
= container_of(tbo
, struct hibmc_bo
, bo
);
85 drm_gem_object_release(&bo
->gem
);
89 static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object
*bo
)
91 return bo
->destroy
== &hibmc_bo_ttm_destroy
;
95 hibmc_bo_init_mem_type(struct ttm_bo_device
*bdev
, u32 type
,
96 struct ttm_mem_type_manager
*man
)
100 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
101 man
->available_caching
= TTM_PL_MASK_CACHING
;
102 man
->default_caching
= TTM_PL_FLAG_CACHED
;
105 man
->func
= &ttm_bo_manager_func
;
106 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
107 TTM_MEMTYPE_FLAG_MAPPABLE
;
108 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
110 man
->default_caching
= TTM_PL_FLAG_WC
;
113 DRM_ERROR("unsupported memory type %u\n", type
);
119 void hibmc_ttm_placement(struct hibmc_bo
*bo
, int domain
)
124 bo
->placement
.placement
= bo
->placements
;
125 bo
->placement
.busy_placement
= bo
->placements
;
126 if (domain
& TTM_PL_FLAG_VRAM
)
127 bo
->placements
[count
++].flags
= TTM_PL_FLAG_WC
|
128 TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM
;
129 if (domain
& TTM_PL_FLAG_SYSTEM
)
130 bo
->placements
[count
++].flags
= TTM_PL_MASK_CACHING
|
133 bo
->placements
[count
++].flags
= TTM_PL_MASK_CACHING
|
136 bo
->placement
.num_placement
= count
;
137 bo
->placement
.num_busy_placement
= count
;
138 for (i
= 0; i
< count
; i
++) {
139 bo
->placements
[i
].fpfn
= 0;
140 bo
->placements
[i
].lpfn
= 0;
145 hibmc_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
147 struct hibmc_bo
*hibmcbo
= hibmc_bo(bo
);
149 if (!hibmc_ttm_bo_is_hibmc_bo(bo
))
152 hibmc_ttm_placement(hibmcbo
, TTM_PL_FLAG_SYSTEM
);
153 *pl
= hibmcbo
->placement
;
156 static int hibmc_bo_verify_access(struct ttm_buffer_object
*bo
,
159 struct hibmc_bo
*hibmcbo
= hibmc_bo(bo
);
161 return drm_vma_node_verify_access(&hibmcbo
->gem
.vma_node
,
165 static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
166 struct ttm_mem_reg
*mem
)
168 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
169 struct hibmc_drm_private
*hibmc
= hibmc_bdev(bdev
);
171 mem
->bus
.addr
= NULL
;
173 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
175 mem
->bus
.is_iomem
= false;
176 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
178 switch (mem
->mem_type
) {
183 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
184 mem
->bus
.base
= pci_resource_start(hibmc
->dev
->pdev
, 0);
185 mem
->bus
.is_iomem
= true;
193 static void hibmc_ttm_backend_destroy(struct ttm_tt
*tt
)
199 static struct ttm_backend_func hibmc_tt_backend_func
= {
200 .destroy
= &hibmc_ttm_backend_destroy
,
203 static struct ttm_tt
*hibmc_ttm_tt_create(struct ttm_bo_device
*bdev
,
206 struct page
*dummy_read_page
)
211 tt
= kzalloc(sizeof(*tt
), GFP_KERNEL
);
213 DRM_ERROR("failed to allocate ttm_tt\n");
216 tt
->func
= &hibmc_tt_backend_func
;
217 ret
= ttm_tt_init(tt
, bdev
, size
, page_flags
, dummy_read_page
);
219 DRM_ERROR("failed to initialize ttm_tt: %d\n", ret
);
226 static int hibmc_ttm_tt_populate(struct ttm_tt
*ttm
,
227 struct ttm_operation_ctx
*ctx
)
229 return ttm_pool_populate(ttm
, ctx
);
232 static void hibmc_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
234 ttm_pool_unpopulate(ttm
);
237 struct ttm_bo_driver hibmc_bo_driver
= {
238 .ttm_tt_create
= hibmc_ttm_tt_create
,
239 .ttm_tt_populate
= hibmc_ttm_tt_populate
,
240 .ttm_tt_unpopulate
= hibmc_ttm_tt_unpopulate
,
241 .init_mem_type
= hibmc_bo_init_mem_type
,
242 .evict_flags
= hibmc_bo_evict_flags
,
244 .verify_access
= hibmc_bo_verify_access
,
245 .io_mem_reserve
= &hibmc_ttm_io_mem_reserve
,
249 int hibmc_mm_init(struct hibmc_drm_private
*hibmc
)
252 struct drm_device
*dev
= hibmc
->dev
;
253 struct ttm_bo_device
*bdev
= &hibmc
->bdev
;
255 ret
= hibmc_ttm_global_init(hibmc
);
259 ret
= ttm_bo_device_init(&hibmc
->bdev
,
260 hibmc
->bo_global_ref
.ref
.object
,
262 dev
->anon_inode
->i_mapping
,
263 DRM_FILE_PAGE_OFFSET
,
266 hibmc_ttm_global_release(hibmc
);
267 DRM_ERROR("error initializing bo driver: %d\n", ret
);
271 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
272 hibmc
->fb_size
>> PAGE_SHIFT
);
274 hibmc_ttm_global_release(hibmc
);
275 DRM_ERROR("failed ttm VRAM init: %d\n", ret
);
279 hibmc
->mm_inited
= true;
283 void hibmc_mm_fini(struct hibmc_drm_private
*hibmc
)
285 if (!hibmc
->mm_inited
)
288 ttm_bo_device_release(&hibmc
->bdev
);
289 hibmc_ttm_global_release(hibmc
);
290 hibmc
->mm_inited
= false;
293 static void hibmc_bo_unref(struct hibmc_bo
**bo
)
295 struct ttm_buffer_object
*tbo
;
305 int hibmc_bo_create(struct drm_device
*dev
, int size
, int align
,
306 u32 flags
, struct hibmc_bo
**phibmcbo
)
308 struct hibmc_drm_private
*hibmc
= dev
->dev_private
;
309 struct hibmc_bo
*hibmcbo
;
313 hibmcbo
= kzalloc(sizeof(*hibmcbo
), GFP_KERNEL
);
315 DRM_ERROR("failed to allocate hibmcbo\n");
318 ret
= drm_gem_object_init(dev
, &hibmcbo
->gem
, size
);
320 DRM_ERROR("failed to initialize drm gem object: %d\n", ret
);
325 hibmcbo
->bo
.bdev
= &hibmc
->bdev
;
327 hibmc_ttm_placement(hibmcbo
, TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_SYSTEM
);
329 acc_size
= ttm_bo_dma_acc_size(&hibmc
->bdev
, size
,
330 sizeof(struct hibmc_bo
));
332 ret
= ttm_bo_init(&hibmc
->bdev
, &hibmcbo
->bo
, size
,
333 ttm_bo_type_device
, &hibmcbo
->placement
,
334 align
>> PAGE_SHIFT
, false, NULL
, acc_size
,
335 NULL
, NULL
, hibmc_bo_ttm_destroy
);
337 hibmc_bo_unref(&hibmcbo
);
338 DRM_ERROR("failed to initialize ttm_bo: %d\n", ret
);
346 int hibmc_bo_pin(struct hibmc_bo
*bo
, u32 pl_flag
, u64
*gpu_addr
)
348 struct ttm_operation_ctx ctx
= { false, false };
354 *gpu_addr
= bo
->bo
.offset
;
358 hibmc_ttm_placement(bo
, pl_flag
);
359 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
360 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
361 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
367 *gpu_addr
= bo
->bo
.offset
;
371 int hibmc_bo_unpin(struct hibmc_bo
*bo
)
373 struct ttm_operation_ctx ctx
= { false, false };
376 if (!bo
->pin_count
) {
377 DRM_ERROR("unpin bad %p\n", bo
);
384 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
385 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
386 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
388 DRM_ERROR("validate failed for unpin: %d\n", ret
);
395 int hibmc_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
397 struct drm_file
*file_priv
;
398 struct hibmc_drm_private
*hibmc
;
400 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
403 file_priv
= filp
->private_data
;
404 hibmc
= file_priv
->minor
->dev
->dev_private
;
405 return ttm_bo_mmap(filp
, vma
, &hibmc
->bdev
);
408 int hibmc_gem_create(struct drm_device
*dev
, u32 size
, bool iskernel
,
409 struct drm_gem_object
**obj
)
411 struct hibmc_bo
*hibmcbo
;
416 size
= PAGE_ALIGN(size
);
418 DRM_ERROR("error: zero size\n");
422 ret
= hibmc_bo_create(dev
, size
, 0, 0, &hibmcbo
);
424 if (ret
!= -ERESTARTSYS
)
425 DRM_ERROR("failed to allocate GEM object: %d\n", ret
);
428 *obj
= &hibmcbo
->gem
;
432 int hibmc_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
433 struct drm_mode_create_dumb
*args
)
435 struct drm_gem_object
*gobj
;
439 args
->pitch
= ALIGN(args
->width
* DIV_ROUND_UP(args
->bpp
, 8), 16);
440 args
->size
= args
->pitch
* args
->height
;
442 ret
= hibmc_gem_create(dev
, args
->size
, false,
445 DRM_ERROR("failed to create GEM object: %d\n", ret
);
449 ret
= drm_gem_handle_create(file
, gobj
, &handle
);
450 drm_gem_object_put_unlocked(gobj
);
452 DRM_ERROR("failed to unreference GEM object: %d\n", ret
);
456 args
->handle
= handle
;
460 void hibmc_gem_free_object(struct drm_gem_object
*obj
)
462 struct hibmc_bo
*hibmcbo
= gem_to_hibmc_bo(obj
);
464 hibmc_bo_unref(&hibmcbo
);
467 static u64
hibmc_bo_mmap_offset(struct hibmc_bo
*bo
)
469 return drm_vma_node_offset_addr(&bo
->bo
.vma_node
);
472 int hibmc_dumb_mmap_offset(struct drm_file
*file
, struct drm_device
*dev
,
473 u32 handle
, u64
*offset
)
475 struct drm_gem_object
*obj
;
478 obj
= drm_gem_object_lookup(file
, handle
);
482 bo
= gem_to_hibmc_bo(obj
);
483 *offset
= hibmc_bo_mmap_offset(bo
);
485 drm_gem_object_put_unlocked(obj
);
489 static void hibmc_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
491 struct hibmc_framebuffer
*hibmc_fb
= to_hibmc_framebuffer(fb
);
493 drm_gem_object_put_unlocked(hibmc_fb
->obj
);
494 drm_framebuffer_cleanup(fb
);
498 static const struct drm_framebuffer_funcs hibmc_fb_funcs
= {
499 .destroy
= hibmc_user_framebuffer_destroy
,
502 struct hibmc_framebuffer
*
503 hibmc_framebuffer_init(struct drm_device
*dev
,
504 const struct drm_mode_fb_cmd2
*mode_cmd
,
505 struct drm_gem_object
*obj
)
507 struct hibmc_framebuffer
*hibmc_fb
;
510 hibmc_fb
= kzalloc(sizeof(*hibmc_fb
), GFP_KERNEL
);
512 DRM_ERROR("failed to allocate hibmc_fb\n");
513 return ERR_PTR(-ENOMEM
);
516 drm_helper_mode_fill_fb_struct(dev
, &hibmc_fb
->fb
, mode_cmd
);
518 ret
= drm_framebuffer_init(dev
, &hibmc_fb
->fb
, &hibmc_fb_funcs
);
520 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret
);
528 static struct drm_framebuffer
*
529 hibmc_user_framebuffer_create(struct drm_device
*dev
,
530 struct drm_file
*filp
,
531 const struct drm_mode_fb_cmd2
*mode_cmd
)
533 struct drm_gem_object
*obj
;
534 struct hibmc_framebuffer
*hibmc_fb
;
536 DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
537 mode_cmd
->width
, mode_cmd
->height
,
538 (mode_cmd
->pixel_format
) & 0xff,
539 (mode_cmd
->pixel_format
>> 8) & 0xff,
540 (mode_cmd
->pixel_format
>> 16) & 0xff,
541 (mode_cmd
->pixel_format
>> 24) & 0xff);
543 obj
= drm_gem_object_lookup(filp
, mode_cmd
->handles
[0]);
545 return ERR_PTR(-ENOENT
);
547 hibmc_fb
= hibmc_framebuffer_init(dev
, mode_cmd
, obj
);
548 if (IS_ERR(hibmc_fb
)) {
549 drm_gem_object_put_unlocked(obj
);
550 return ERR_PTR((long)hibmc_fb
);
552 return &hibmc_fb
->fb
;
555 const struct drm_mode_config_funcs hibmc_mode_funcs
= {
556 .atomic_check
= drm_atomic_helper_check
,
557 .atomic_commit
= drm_atomic_helper_commit
,
558 .fb_create
= hibmc_user_framebuffer_create
,