2 * Copyright (C) 2012 Red Hat
4 * based in parts on udlfb.c:
5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License v2. See the file COPYING in the main directory of this archive for
13 #include <linux/module.h>
14 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
19 #include <drm/drm_crtc.h>
20 #include <drm/drm_crtc_helper.h>
23 #include <drm/drm_fb_helper.h>
25 #define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
27 static int fb_defio
= 0; /* Optionally enable experimental fb_defio mmap support */
28 static int fb_bpp
= 16;
30 module_param(fb_bpp
, int, S_IWUSR
| S_IRUSR
| S_IWGRP
| S_IRGRP
);
31 module_param(fb_defio
, int, S_IWUSR
| S_IRUSR
| S_IWGRP
| S_IRGRP
);
34 struct drm_fb_helper helper
;
35 struct udl_framebuffer ufb
;
36 struct list_head fbdev_list
;
40 #define DL_ALIGN_UP(x, a) ALIGN(x, a)
41 #define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
43 /** Read the red component (0..255) of a 32 bpp colour. */
44 #define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
46 /** Read the green component (0..255) of a 32 bpp colour. */
47 #define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
49 /** Read the blue component (0..255) of a 32 bpp colour. */
50 #define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
52 /** Return red/green component of a 16 bpp colour number. */
53 #define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
55 /** Return green/blue component of a 16 bpp colour number. */
56 #define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
58 /** Return 8 bpp colour number from red, green and blue components. */
59 #define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
62 static uint8_t rgb8(uint32_t col
)
64 uint8_t red
= DLO_RGB_GETRED(col
);
65 uint8_t grn
= DLO_RGB_GETGRN(col
);
66 uint8_t blu
= DLO_RGB_GETBLU(col
);
68 return DLO_RGB8(red
, grn
, blu
);
71 static uint16_t rgb16(uint32_t col
)
73 uint8_t red
= DLO_RGB_GETRED(col
);
74 uint8_t grn
= DLO_RGB_GETGRN(col
);
75 uint8_t blu
= DLO_RGB_GETBLU(col
);
77 return (DLO_RG16(red
, grn
) << 8) + DLO_GB16(grn
, blu
);
82 * NOTE: fb_defio.c is holding info->fbdefio.mutex
83 * Touching ANY framebuffer memory that triggers a page fault
84 * in fb_defio will cause a deadlock, when it also tries to
85 * grab the same mutex.
87 static void udlfb_dpy_deferred_io(struct fb_info
*info
,
88 struct list_head
*pagelist
)
91 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
92 struct udl_fbdev
*ufbdev
= info
->par
;
93 struct drm_device
*dev
= ufbdev
->ufb
.base
.dev
;
94 struct udl_device
*udl
= dev
->dev_private
;
97 cycles_t start_cycles
, end_cycles
;
99 int bytes_identical
= 0;
100 int bytes_rendered
= 0;
105 start_cycles
= get_cycles();
107 urb
= udl_get_urb(dev
);
111 cmd
= urb
->transfer_buffer
;
113 /* walk the written page list and render each to device */
114 list_for_each_entry(cur
, &fbdefio
->pagelist
, lru
) {
116 if (udl_render_hline(dev
, (ufbdev
->ufb
.base
.bits_per_pixel
/ 8),
117 &urb
, (char *) info
->fix
.smem_start
,
118 &cmd
, cur
->index
<< PAGE_SHIFT
,
119 cur
->index
<< PAGE_SHIFT
,
120 PAGE_SIZE
, &bytes_identical
, &bytes_sent
))
122 bytes_rendered
+= PAGE_SIZE
;
125 if (cmd
> (char *) urb
->transfer_buffer
) {
126 /* Send partial buffer remaining before exiting */
127 int len
= cmd
- (char *) urb
->transfer_buffer
;
128 udl_submit_urb(dev
, urb
, len
);
131 udl_urb_completion(urb
);
134 atomic_add(bytes_sent
, &udl
->bytes_sent
);
135 atomic_add(bytes_identical
, &udl
->bytes_identical
);
136 atomic_add(bytes_rendered
, &udl
->bytes_rendered
);
137 end_cycles
= get_cycles();
138 atomic_add(((unsigned int) ((end_cycles
- start_cycles
)
139 >> 10)), /* Kcycles */
140 &udl
->cpu_kcycles_used
);
143 int udl_handle_damage(struct udl_framebuffer
*fb
, int x
, int y
,
144 int width
, int height
)
146 struct drm_device
*dev
= fb
->base
.dev
;
147 struct udl_device
*udl
= dev
->dev_private
;
150 cycles_t start_cycles
, end_cycles
;
152 int bytes_identical
= 0;
155 int bpp
= (fb
->base
.bits_per_pixel
/ 8);
157 bool store_for_later
= false;
163 if (!fb
->obj
->vmapping
) {
164 ret
= udl_gem_vmap(fb
->obj
);
165 if (ret
== -ENOMEM
) {
166 DRM_ERROR("failed to vmap fb\n");
169 if (!fb
->obj
->vmapping
) {
170 DRM_ERROR("failed to vmapping\n");
175 aligned_x
= DL_ALIGN_DOWN(x
, sizeof(unsigned long));
176 width
= DL_ALIGN_UP(width
+ (x
-aligned_x
), sizeof(unsigned long));
180 (x
+ width
> fb
->base
.width
) ||
181 (y
+ height
> fb
->base
.height
))
184 /* if we are in atomic just store the info
185 can't test inside spin lock */
187 store_for_later
= true;
192 spin_lock_irqsave(&fb
->dirty_lock
, flags
);
203 if (store_for_later
) {
208 spin_unlock_irqrestore(&fb
->dirty_lock
, flags
);
212 fb
->x1
= fb
->y1
= INT_MAX
;
215 spin_unlock_irqrestore(&fb
->dirty_lock
, flags
);
216 start_cycles
= get_cycles();
218 urb
= udl_get_urb(dev
);
221 cmd
= urb
->transfer_buffer
;
223 for (i
= y
; i
<= y2
; i
++) {
224 const int line_offset
= fb
->base
.pitches
[0] * i
;
225 const int byte_offset
= line_offset
+ (x
* bpp
);
226 const int dev_byte_offset
= (fb
->base
.width
* bpp
* i
) + (x
* bpp
);
227 if (udl_render_hline(dev
, bpp
, &urb
,
228 (char *) fb
->obj
->vmapping
,
229 &cmd
, byte_offset
, dev_byte_offset
,
231 &bytes_identical
, &bytes_sent
))
235 if (cmd
> (char *) urb
->transfer_buffer
) {
236 /* Send partial buffer remaining before exiting */
237 int len
= cmd
- (char *) urb
->transfer_buffer
;
238 ret
= udl_submit_urb(dev
, urb
, len
);
241 udl_urb_completion(urb
);
244 atomic_add(bytes_sent
, &udl
->bytes_sent
);
245 atomic_add(bytes_identical
, &udl
->bytes_identical
);
246 atomic_add(width
*height
*bpp
, &udl
->bytes_rendered
);
247 end_cycles
= get_cycles();
248 atomic_add(((unsigned int) ((end_cycles
- start_cycles
)
249 >> 10)), /* Kcycles */
250 &udl
->cpu_kcycles_used
);
255 static int udl_fb_mmap(struct fb_info
*info
, struct vm_area_struct
*vma
)
257 unsigned long start
= vma
->vm_start
;
258 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
259 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
260 unsigned long page
, pos
;
262 if (offset
+ size
> info
->fix
.smem_len
)
265 pos
= (unsigned long)info
->fix
.smem_start
+ offset
;
267 pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
271 page
= vmalloc_to_pfn((void *)pos
);
272 if (remap_pfn_range(vma
, start
, page
, PAGE_SIZE
, PAGE_SHARED
))
277 if (size
> PAGE_SIZE
)
283 /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
287 static void udl_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
289 struct udl_fbdev
*ufbdev
= info
->par
;
291 sys_fillrect(info
, rect
);
293 udl_handle_damage(&ufbdev
->ufb
, rect
->dx
, rect
->dy
, rect
->width
,
297 static void udl_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
299 struct udl_fbdev
*ufbdev
= info
->par
;
301 sys_copyarea(info
, region
);
303 udl_handle_damage(&ufbdev
->ufb
, region
->dx
, region
->dy
, region
->width
,
307 static void udl_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
309 struct udl_fbdev
*ufbdev
= info
->par
;
311 sys_imageblit(info
, image
);
313 udl_handle_damage(&ufbdev
->ufb
, image
->dx
, image
->dy
, image
->width
,
318 * It's common for several clients to have framebuffer open simultaneously.
319 * e.g. both fbcon and X. Makes things interesting.
320 * Assumes caller is holding info->lock (for open and release at least)
322 static int udl_fb_open(struct fb_info
*info
, int user
)
324 struct udl_fbdev
*ufbdev
= info
->par
;
325 struct drm_device
*dev
= ufbdev
->ufb
.base
.dev
;
326 struct udl_device
*udl
= dev
->dev_private
;
328 /* If the USB device is gone, we don't accept new opens */
329 if (drm_device_is_unplugged(udl
->ddev
))
334 if (fb_defio
&& (info
->fbdefio
== NULL
)) {
335 /* enable defio at last moment if not disabled by client */
337 struct fb_deferred_io
*fbdefio
;
339 fbdefio
= kmalloc(sizeof(struct fb_deferred_io
), GFP_KERNEL
);
342 fbdefio
->delay
= DL_DEFIO_WRITE_DELAY
;
343 fbdefio
->deferred_io
= udlfb_dpy_deferred_io
;
346 info
->fbdefio
= fbdefio
;
347 fb_deferred_io_init(info
);
350 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
351 info
->node
, user
, info
, ufbdev
->fb_count
);
358 * Assumes caller is holding info->lock mutex (for open and release at least)
360 static int udl_fb_release(struct fb_info
*info
, int user
)
362 struct udl_fbdev
*ufbdev
= info
->par
;
366 if ((ufbdev
->fb_count
== 0) && (info
->fbdefio
)) {
367 fb_deferred_io_cleanup(info
);
368 kfree(info
->fbdefio
);
369 info
->fbdefio
= NULL
;
370 info
->fbops
->fb_mmap
= udl_fb_mmap
;
373 pr_warn("released /dev/fb%d user=%d count=%d\n",
374 info
->node
, user
, ufbdev
->fb_count
);
379 static struct fb_ops udlfb_ops
= {
380 .owner
= THIS_MODULE
,
381 .fb_check_var
= drm_fb_helper_check_var
,
382 .fb_set_par
= drm_fb_helper_set_par
,
383 .fb_fillrect
= udl_fb_fillrect
,
384 .fb_copyarea
= udl_fb_copyarea
,
385 .fb_imageblit
= udl_fb_imageblit
,
386 .fb_pan_display
= drm_fb_helper_pan_display
,
387 .fb_blank
= drm_fb_helper_blank
,
388 .fb_setcmap
= drm_fb_helper_setcmap
,
389 .fb_debug_enter
= drm_fb_helper_debug_enter
,
390 .fb_debug_leave
= drm_fb_helper_debug_leave
,
391 .fb_mmap
= udl_fb_mmap
,
392 .fb_open
= udl_fb_open
,
393 .fb_release
= udl_fb_release
,
396 static int udl_user_framebuffer_dirty(struct drm_framebuffer
*fb
,
397 struct drm_file
*file
,
398 unsigned flags
, unsigned color
,
399 struct drm_clip_rect
*clips
,
402 struct udl_framebuffer
*ufb
= to_udl_fb(fb
);
406 drm_modeset_lock_all(fb
->dev
);
411 if (ufb
->obj
->base
.import_attach
) {
412 ret
= dma_buf_begin_cpu_access(ufb
->obj
->base
.import_attach
->dmabuf
,
413 0, ufb
->obj
->base
.size
,
419 for (i
= 0; i
< num_clips
; i
++) {
420 ret
= udl_handle_damage(ufb
, clips
[i
].x1
, clips
[i
].y1
,
421 clips
[i
].x2
- clips
[i
].x1
,
422 clips
[i
].y2
- clips
[i
].y1
);
427 if (ufb
->obj
->base
.import_attach
) {
428 dma_buf_end_cpu_access(ufb
->obj
->base
.import_attach
->dmabuf
,
429 0, ufb
->obj
->base
.size
,
434 drm_modeset_unlock_all(fb
->dev
);
439 static void udl_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
441 struct udl_framebuffer
*ufb
= to_udl_fb(fb
);
444 drm_gem_object_unreference_unlocked(&ufb
->obj
->base
);
446 drm_framebuffer_cleanup(fb
);
450 static const struct drm_framebuffer_funcs udlfb_funcs
= {
451 .destroy
= udl_user_framebuffer_destroy
,
452 .dirty
= udl_user_framebuffer_dirty
,
457 udl_framebuffer_init(struct drm_device
*dev
,
458 struct udl_framebuffer
*ufb
,
459 struct drm_mode_fb_cmd2
*mode_cmd
,
460 struct udl_gem_object
*obj
)
464 spin_lock_init(&ufb
->dirty_lock
);
466 drm_helper_mode_fill_fb_struct(&ufb
->base
, mode_cmd
);
467 ret
= drm_framebuffer_init(dev
, &ufb
->base
, &udlfb_funcs
);
472 static int udlfb_create(struct drm_fb_helper
*helper
,
473 struct drm_fb_helper_surface_size
*sizes
)
475 struct udl_fbdev
*ufbdev
=
476 container_of(helper
, struct udl_fbdev
, helper
);
477 struct drm_device
*dev
= ufbdev
->helper
.dev
;
478 struct fb_info
*info
;
479 struct device
*device
= dev
->dev
;
480 struct drm_framebuffer
*fb
;
481 struct drm_mode_fb_cmd2 mode_cmd
;
482 struct udl_gem_object
*obj
;
486 if (sizes
->surface_bpp
== 24)
487 sizes
->surface_bpp
= 32;
489 mode_cmd
.width
= sizes
->surface_width
;
490 mode_cmd
.height
= sizes
->surface_height
;
491 mode_cmd
.pitches
[0] = mode_cmd
.width
* ((sizes
->surface_bpp
+ 7) / 8);
493 mode_cmd
.pixel_format
= drm_mode_legacy_fb_format(sizes
->surface_bpp
,
494 sizes
->surface_depth
);
496 size
= mode_cmd
.pitches
[0] * mode_cmd
.height
;
497 size
= ALIGN(size
, PAGE_SIZE
);
499 obj
= udl_gem_alloc_object(dev
, size
);
503 ret
= udl_gem_vmap(obj
);
505 DRM_ERROR("failed to vmap fb\n");
509 info
= framebuffer_alloc(0, device
);
516 ret
= udl_framebuffer_init(dev
, &ufbdev
->ufb
, &mode_cmd
, obj
);
520 fb
= &ufbdev
->ufb
.base
;
522 ufbdev
->helper
.fb
= fb
;
523 ufbdev
->helper
.fbdev
= info
;
525 strcpy(info
->fix
.id
, "udldrmfb");
527 info
->screen_base
= ufbdev
->ufb
.obj
->vmapping
;
528 info
->fix
.smem_len
= size
;
529 info
->fix
.smem_start
= (unsigned long)ufbdev
->ufb
.obj
->vmapping
;
531 info
->flags
= FBINFO_DEFAULT
| FBINFO_CAN_FORCE_OUTPUT
;
532 info
->fbops
= &udlfb_ops
;
533 drm_fb_helper_fill_fix(info
, fb
->pitches
[0], fb
->depth
);
534 drm_fb_helper_fill_var(info
, &ufbdev
->helper
, sizes
->fb_width
, sizes
->fb_height
);
536 ret
= fb_alloc_cmap(&info
->cmap
, 256, 0);
543 DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
544 fb
->width
, fb
->height
,
545 ufbdev
->ufb
.obj
->vmapping
);
549 drm_gem_object_unreference(&ufbdev
->ufb
.obj
->base
);
554 static const struct drm_fb_helper_funcs udl_fb_helper_funcs
= {
555 .fb_probe
= udlfb_create
,
558 static void udl_fbdev_destroy(struct drm_device
*dev
,
559 struct udl_fbdev
*ufbdev
)
561 struct fb_info
*info
;
562 if (ufbdev
->helper
.fbdev
) {
563 info
= ufbdev
->helper
.fbdev
;
564 unregister_framebuffer(info
);
566 fb_dealloc_cmap(&info
->cmap
);
567 framebuffer_release(info
);
569 drm_fb_helper_fini(&ufbdev
->helper
);
570 drm_framebuffer_unregister_private(&ufbdev
->ufb
.base
);
571 drm_framebuffer_cleanup(&ufbdev
->ufb
.base
);
572 drm_gem_object_unreference_unlocked(&ufbdev
->ufb
.obj
->base
);
575 int udl_fbdev_init(struct drm_device
*dev
)
577 struct udl_device
*udl
= dev
->dev_private
;
578 int bpp_sel
= fb_bpp
;
579 struct udl_fbdev
*ufbdev
;
582 ufbdev
= kzalloc(sizeof(struct udl_fbdev
), GFP_KERNEL
);
588 drm_fb_helper_prepare(dev
, &ufbdev
->helper
, &udl_fb_helper_funcs
);
590 ret
= drm_fb_helper_init(dev
, &ufbdev
->helper
,
595 ret
= drm_fb_helper_single_add_all_connectors(&ufbdev
->helper
);
599 /* disable all the possible outputs/crtcs before entering KMS mode */
600 drm_helper_disable_unused_functions(dev
);
602 ret
= drm_fb_helper_initial_config(&ufbdev
->helper
, bpp_sel
);
609 drm_fb_helper_fini(&ufbdev
->helper
);
615 void udl_fbdev_cleanup(struct drm_device
*dev
)
617 struct udl_device
*udl
= dev
->dev_private
;
621 udl_fbdev_destroy(dev
, udl
->fbdev
);
626 void udl_fbdev_unplug(struct drm_device
*dev
)
628 struct udl_device
*udl
= dev
->dev_private
;
629 struct udl_fbdev
*ufbdev
;
634 if (ufbdev
->helper
.fbdev
) {
635 struct fb_info
*info
;
636 info
= ufbdev
->helper
.fbdev
;
637 unlink_framebuffer(info
);
641 struct drm_framebuffer
*
642 udl_fb_user_fb_create(struct drm_device
*dev
,
643 struct drm_file
*file
,
644 struct drm_mode_fb_cmd2
*mode_cmd
)
646 struct drm_gem_object
*obj
;
647 struct udl_framebuffer
*ufb
;
651 obj
= drm_gem_object_lookup(dev
, file
, mode_cmd
->handles
[0]);
653 return ERR_PTR(-ENOENT
);
655 size
= mode_cmd
->pitches
[0] * mode_cmd
->height
;
656 size
= ALIGN(size
, PAGE_SIZE
);
658 if (size
> obj
->size
) {
659 DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size
, obj
->size
, mode_cmd
->pitches
[0], mode_cmd
->height
);
660 return ERR_PTR(-ENOMEM
);
663 ufb
= kzalloc(sizeof(*ufb
), GFP_KERNEL
);
665 return ERR_PTR(-ENOMEM
);
667 ret
= udl_framebuffer_init(dev
, ufb
, mode_cmd
, to_udl_bo(obj
));
670 return ERR_PTR(-EINVAL
);