1 /* Hisilicon Hibmc SoC drm driver
3 * Based on the bochs drm driver.
5 * Copyright (c) 2016 Huawei Limited.
8 * Rongrong Zou <zourongrong@huawei.com>
9 * Rongrong Zou <zourongrong@gmail.com>
10 * Jianhua Li <lijianhua@huawei.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/ttm/ttm_page_alloc.h>
22 #include "hibmc_drm_drv.h"
24 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
26 static inline struct hibmc_drm_private
*
27 hibmc_bdev(struct ttm_bo_device
*bd
)
29 return container_of(bd
, struct hibmc_drm_private
, bdev
);
32 static void hibmc_bo_ttm_destroy(struct ttm_buffer_object
*tbo
)
34 struct hibmc_bo
*bo
= container_of(tbo
, struct hibmc_bo
, bo
);
36 drm_gem_object_release(&bo
->gem
);
40 static bool hibmc_ttm_bo_is_hibmc_bo(struct ttm_buffer_object
*bo
)
42 return bo
->destroy
== &hibmc_bo_ttm_destroy
;
46 hibmc_bo_init_mem_type(struct ttm_bo_device
*bdev
, u32 type
,
47 struct ttm_mem_type_manager
*man
)
51 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
52 man
->available_caching
= TTM_PL_MASK_CACHING
;
53 man
->default_caching
= TTM_PL_FLAG_CACHED
;
56 man
->func
= &ttm_bo_manager_func
;
57 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
58 TTM_MEMTYPE_FLAG_MAPPABLE
;
59 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
61 man
->default_caching
= TTM_PL_FLAG_WC
;
64 DRM_ERROR("unsupported memory type %u\n", type
);
70 void hibmc_ttm_placement(struct hibmc_bo
*bo
, int domain
)
75 bo
->placement
.placement
= bo
->placements
;
76 bo
->placement
.busy_placement
= bo
->placements
;
77 if (domain
& TTM_PL_FLAG_VRAM
)
78 bo
->placements
[count
++].flags
= TTM_PL_FLAG_WC
|
79 TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM
;
80 if (domain
& TTM_PL_FLAG_SYSTEM
)
81 bo
->placements
[count
++].flags
= TTM_PL_MASK_CACHING
|
84 bo
->placements
[count
++].flags
= TTM_PL_MASK_CACHING
|
87 bo
->placement
.num_placement
= count
;
88 bo
->placement
.num_busy_placement
= count
;
89 for (i
= 0; i
< count
; i
++) {
90 bo
->placements
[i
].fpfn
= 0;
91 bo
->placements
[i
].lpfn
= 0;
96 hibmc_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
98 struct hibmc_bo
*hibmcbo
= hibmc_bo(bo
);
100 if (!hibmc_ttm_bo_is_hibmc_bo(bo
))
103 hibmc_ttm_placement(hibmcbo
, TTM_PL_FLAG_SYSTEM
);
104 *pl
= hibmcbo
->placement
;
107 static int hibmc_bo_verify_access(struct ttm_buffer_object
*bo
,
110 struct hibmc_bo
*hibmcbo
= hibmc_bo(bo
);
112 return drm_vma_node_verify_access(&hibmcbo
->gem
.vma_node
,
116 static int hibmc_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
117 struct ttm_mem_reg
*mem
)
119 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
120 struct hibmc_drm_private
*hibmc
= hibmc_bdev(bdev
);
122 mem
->bus
.addr
= NULL
;
124 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
126 mem
->bus
.is_iomem
= false;
127 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
129 switch (mem
->mem_type
) {
134 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
135 mem
->bus
.base
= pci_resource_start(hibmc
->dev
->pdev
, 0);
136 mem
->bus
.is_iomem
= true;
144 static void hibmc_ttm_backend_destroy(struct ttm_tt
*tt
)
150 static struct ttm_backend_func hibmc_tt_backend_func
= {
151 .destroy
= &hibmc_ttm_backend_destroy
,
154 static struct ttm_tt
*hibmc_ttm_tt_create(struct ttm_buffer_object
*bo
,
160 tt
= kzalloc(sizeof(*tt
), GFP_KERNEL
);
162 DRM_ERROR("failed to allocate ttm_tt\n");
165 tt
->func
= &hibmc_tt_backend_func
;
166 ret
= ttm_tt_init(tt
, bo
, page_flags
);
168 DRM_ERROR("failed to initialize ttm_tt: %d\n", ret
);
175 struct ttm_bo_driver hibmc_bo_driver
= {
176 .ttm_tt_create
= hibmc_ttm_tt_create
,
177 .init_mem_type
= hibmc_bo_init_mem_type
,
178 .evict_flags
= hibmc_bo_evict_flags
,
180 .verify_access
= hibmc_bo_verify_access
,
181 .io_mem_reserve
= &hibmc_ttm_io_mem_reserve
,
185 int hibmc_mm_init(struct hibmc_drm_private
*hibmc
)
188 struct drm_device
*dev
= hibmc
->dev
;
189 struct ttm_bo_device
*bdev
= &hibmc
->bdev
;
191 ret
= ttm_bo_device_init(&hibmc
->bdev
,
193 dev
->anon_inode
->i_mapping
,
194 DRM_FILE_PAGE_OFFSET
,
197 DRM_ERROR("error initializing bo driver: %d\n", ret
);
201 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
202 hibmc
->fb_size
>> PAGE_SHIFT
);
204 DRM_ERROR("failed ttm VRAM init: %d\n", ret
);
208 hibmc
->mm_inited
= true;
212 void hibmc_mm_fini(struct hibmc_drm_private
*hibmc
)
214 if (!hibmc
->mm_inited
)
217 ttm_bo_device_release(&hibmc
->bdev
);
218 hibmc
->mm_inited
= false;
221 static void hibmc_bo_unref(struct hibmc_bo
**bo
)
223 struct ttm_buffer_object
*tbo
;
233 int hibmc_bo_create(struct drm_device
*dev
, int size
, int align
,
234 u32 flags
, struct hibmc_bo
**phibmcbo
)
236 struct hibmc_drm_private
*hibmc
= dev
->dev_private
;
237 struct hibmc_bo
*hibmcbo
;
241 hibmcbo
= kzalloc(sizeof(*hibmcbo
), GFP_KERNEL
);
243 DRM_ERROR("failed to allocate hibmcbo\n");
246 ret
= drm_gem_object_init(dev
, &hibmcbo
->gem
, size
);
248 DRM_ERROR("failed to initialize drm gem object: %d\n", ret
);
253 hibmcbo
->bo
.bdev
= &hibmc
->bdev
;
255 hibmc_ttm_placement(hibmcbo
, TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_SYSTEM
);
257 acc_size
= ttm_bo_dma_acc_size(&hibmc
->bdev
, size
,
258 sizeof(struct hibmc_bo
));
260 ret
= ttm_bo_init(&hibmc
->bdev
, &hibmcbo
->bo
, size
,
261 ttm_bo_type_device
, &hibmcbo
->placement
,
262 align
>> PAGE_SHIFT
, false, acc_size
,
263 NULL
, NULL
, hibmc_bo_ttm_destroy
);
265 hibmc_bo_unref(&hibmcbo
);
266 DRM_ERROR("failed to initialize ttm_bo: %d\n", ret
);
274 int hibmc_bo_pin(struct hibmc_bo
*bo
, u32 pl_flag
, u64
*gpu_addr
)
276 struct ttm_operation_ctx ctx
= { false, false };
282 *gpu_addr
= bo
->bo
.offset
;
286 hibmc_ttm_placement(bo
, pl_flag
);
287 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
288 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
289 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
295 *gpu_addr
= bo
->bo
.offset
;
299 int hibmc_bo_unpin(struct hibmc_bo
*bo
)
301 struct ttm_operation_ctx ctx
= { false, false };
304 if (!bo
->pin_count
) {
305 DRM_ERROR("unpin bad %p\n", bo
);
312 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
313 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
314 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, &ctx
);
316 DRM_ERROR("validate failed for unpin: %d\n", ret
);
323 int hibmc_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
325 struct drm_file
*file_priv
;
326 struct hibmc_drm_private
*hibmc
;
328 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
331 file_priv
= filp
->private_data
;
332 hibmc
= file_priv
->minor
->dev
->dev_private
;
333 return ttm_bo_mmap(filp
, vma
, &hibmc
->bdev
);
336 int hibmc_gem_create(struct drm_device
*dev
, u32 size
, bool iskernel
,
337 struct drm_gem_object
**obj
)
339 struct hibmc_bo
*hibmcbo
;
344 size
= PAGE_ALIGN(size
);
346 DRM_ERROR("error: zero size\n");
350 ret
= hibmc_bo_create(dev
, size
, 0, 0, &hibmcbo
);
352 if (ret
!= -ERESTARTSYS
)
353 DRM_ERROR("failed to allocate GEM object: %d\n", ret
);
356 *obj
= &hibmcbo
->gem
;
360 int hibmc_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
361 struct drm_mode_create_dumb
*args
)
363 struct drm_gem_object
*gobj
;
367 args
->pitch
= ALIGN(args
->width
* DIV_ROUND_UP(args
->bpp
, 8), 16);
368 args
->size
= args
->pitch
* args
->height
;
370 ret
= hibmc_gem_create(dev
, args
->size
, false,
373 DRM_ERROR("failed to create GEM object: %d\n", ret
);
377 ret
= drm_gem_handle_create(file
, gobj
, &handle
);
378 drm_gem_object_put_unlocked(gobj
);
380 DRM_ERROR("failed to unreference GEM object: %d\n", ret
);
384 args
->handle
= handle
;
388 void hibmc_gem_free_object(struct drm_gem_object
*obj
)
390 struct hibmc_bo
*hibmcbo
= gem_to_hibmc_bo(obj
);
392 hibmc_bo_unref(&hibmcbo
);
395 static u64
hibmc_bo_mmap_offset(struct hibmc_bo
*bo
)
397 return drm_vma_node_offset_addr(&bo
->bo
.vma_node
);
400 int hibmc_dumb_mmap_offset(struct drm_file
*file
, struct drm_device
*dev
,
401 u32 handle
, u64
*offset
)
403 struct drm_gem_object
*obj
;
406 obj
= drm_gem_object_lookup(file
, handle
);
410 bo
= gem_to_hibmc_bo(obj
);
411 *offset
= hibmc_bo_mmap_offset(bo
);
413 drm_gem_object_put_unlocked(obj
);
417 static void hibmc_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
419 struct hibmc_framebuffer
*hibmc_fb
= to_hibmc_framebuffer(fb
);
421 drm_gem_object_put_unlocked(hibmc_fb
->obj
);
422 drm_framebuffer_cleanup(fb
);
426 static const struct drm_framebuffer_funcs hibmc_fb_funcs
= {
427 .destroy
= hibmc_user_framebuffer_destroy
,
430 struct hibmc_framebuffer
*
431 hibmc_framebuffer_init(struct drm_device
*dev
,
432 const struct drm_mode_fb_cmd2
*mode_cmd
,
433 struct drm_gem_object
*obj
)
435 struct hibmc_framebuffer
*hibmc_fb
;
438 hibmc_fb
= kzalloc(sizeof(*hibmc_fb
), GFP_KERNEL
);
440 DRM_ERROR("failed to allocate hibmc_fb\n");
441 return ERR_PTR(-ENOMEM
);
444 drm_helper_mode_fill_fb_struct(dev
, &hibmc_fb
->fb
, mode_cmd
);
446 ret
= drm_framebuffer_init(dev
, &hibmc_fb
->fb
, &hibmc_fb_funcs
);
448 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret
);
456 static struct drm_framebuffer
*
457 hibmc_user_framebuffer_create(struct drm_device
*dev
,
458 struct drm_file
*filp
,
459 const struct drm_mode_fb_cmd2
*mode_cmd
)
461 struct drm_gem_object
*obj
;
462 struct hibmc_framebuffer
*hibmc_fb
;
464 DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
465 mode_cmd
->width
, mode_cmd
->height
,
466 (mode_cmd
->pixel_format
) & 0xff,
467 (mode_cmd
->pixel_format
>> 8) & 0xff,
468 (mode_cmd
->pixel_format
>> 16) & 0xff,
469 (mode_cmd
->pixel_format
>> 24) & 0xff);
471 obj
= drm_gem_object_lookup(filp
, mode_cmd
->handles
[0]);
473 return ERR_PTR(-ENOENT
);
475 hibmc_fb
= hibmc_framebuffer_init(dev
, mode_cmd
, obj
);
476 if (IS_ERR(hibmc_fb
)) {
477 drm_gem_object_put_unlocked(obj
);
478 return ERR_PTR((long)hibmc_fb
);
480 return &hibmc_fb
->fb
;
483 const struct drm_mode_config_funcs hibmc_mode_funcs
= {
484 .atomic_check
= drm_atomic_helper_check
,
485 .atomic_commit
= drm_atomic_helper_commit
,
486 .fb_create
= hibmc_user_framebuffer_create
,