2 * Copyright (C) 2017 Samsung Electronics Co.Ltd
4 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Exynos DRM Image Post Processing (IPP) related functions
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
19 #include <linux/uaccess.h>
21 #include <drm/drm_file.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_mode.h>
24 #include <drm/exynos_drm.h>
26 #include "exynos_drm_drv.h"
27 #include "exynos_drm_gem.h"
28 #include "exynos_drm_ipp.h"
31 static LIST_HEAD(ipp_list
);
34 * exynos_drm_ipp_register - Register a new picture processor hardware module
36 * @ipp: ipp module to init
37 * @funcs: callbacks for the new ipp object
38 * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
39 * @formats: array of supported formats
40 * @num_formats: size of the supported formats array
41 * @name: name (for debugging purposes)
43 * Initializes a ipp module.
46 * Zero on success, error code on failure.
48 int exynos_drm_ipp_register(struct device
*dev
, struct exynos_drm_ipp
*ipp
,
49 const struct exynos_drm_ipp_funcs
*funcs
, unsigned int caps
,
50 const struct exynos_drm_ipp_formats
*formats
,
51 unsigned int num_formats
, const char *name
)
56 WARN_ON(!num_formats
);
58 spin_lock_init(&ipp
->lock
);
59 INIT_LIST_HEAD(&ipp
->todo_list
);
60 init_waitqueue_head(&ipp
->done_wq
);
63 ipp
->capabilities
= caps
;
65 ipp
->formats
= formats
;
66 ipp
->num_formats
= num_formats
;
68 /* ipp_list modification is serialized by component framework */
69 list_add_tail(&ipp
->head
, &ipp_list
);
72 DRM_DEV_DEBUG_DRIVER(dev
, "Registered ipp %d\n", ipp
->id
);
78 * exynos_drm_ipp_unregister - Unregister the picture processor module
82 void exynos_drm_ipp_unregister(struct device
*dev
,
83 struct exynos_drm_ipp
*ipp
)
86 WARN_ON(!list_empty(&ipp
->todo_list
));
91 * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
94 * @file_priv: DRM file info
96 * Construct a list of ipp ids.
98 * Called by the user via ioctl.
101 * Zero on success, negative errno on failure.
103 int exynos_drm_ipp_get_res_ioctl(struct drm_device
*dev
, void *data
,
104 struct drm_file
*file_priv
)
106 struct drm_exynos_ioctl_ipp_get_res
*resp
= data
;
107 struct exynos_drm_ipp
*ipp
;
108 uint32_t __user
*ipp_ptr
= (uint32_t __user
*)
109 (unsigned long)resp
->ipp_id_ptr
;
110 unsigned int count
= num_ipp
, copied
= 0;
113 * This ioctl is called twice, once to determine how much space is
114 * needed, and the 2nd time to fill it.
116 if (count
&& resp
->count_ipps
>= count
) {
117 list_for_each_entry(ipp
, &ipp_list
, head
) {
118 if (put_user(ipp
->id
, ipp_ptr
+ copied
))
123 resp
->count_ipps
= count
;
128 static inline struct exynos_drm_ipp
*__ipp_get(uint32_t id
)
130 struct exynos_drm_ipp
*ipp
;
132 list_for_each_entry(ipp
, &ipp_list
, head
)
139 * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
142 * @file_priv: DRM file info
144 * Construct a structure describing ipp module capabilities.
146 * Called by the user via ioctl.
149 * Zero on success, negative errno on failure.
151 int exynos_drm_ipp_get_caps_ioctl(struct drm_device
*dev
, void *data
,
152 struct drm_file
*file_priv
)
154 struct drm_exynos_ioctl_ipp_get_caps
*resp
= data
;
155 void __user
*ptr
= (void __user
*)(unsigned long)resp
->formats_ptr
;
156 struct exynos_drm_ipp
*ipp
;
159 ipp
= __ipp_get(resp
->ipp_id
);
163 resp
->ipp_id
= ipp
->id
;
164 resp
->capabilities
= ipp
->capabilities
;
167 * This ioctl is called twice, once to determine how much space is
168 * needed, and the 2nd time to fill it.
170 if (resp
->formats_count
>= ipp
->num_formats
) {
171 for (i
= 0; i
< ipp
->num_formats
; i
++) {
172 struct drm_exynos_ipp_format tmp
= {
173 .fourcc
= ipp
->formats
[i
].fourcc
,
174 .type
= ipp
->formats
[i
].type
,
175 .modifier
= ipp
->formats
[i
].modifier
,
178 if (copy_to_user(ptr
, &tmp
, sizeof(tmp
)))
183 resp
->formats_count
= ipp
->num_formats
;
188 static inline const struct exynos_drm_ipp_formats
*__ipp_format_get(
189 struct exynos_drm_ipp
*ipp
, uint32_t fourcc
,
190 uint64_t mod
, unsigned int type
)
194 for (i
= 0; i
< ipp
->num_formats
; i
++) {
195 if ((ipp
->formats
[i
].type
& type
) &&
196 ipp
->formats
[i
].fourcc
== fourcc
&&
197 ipp
->formats
[i
].modifier
== mod
)
198 return &ipp
->formats
[i
];
204 * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
207 * @file_priv: DRM file info
209 * Construct a structure describing ipp module limitations for provided
212 * Called by the user via ioctl.
215 * Zero on success, negative errno on failure.
217 int exynos_drm_ipp_get_limits_ioctl(struct drm_device
*dev
, void *data
,
218 struct drm_file
*file_priv
)
220 struct drm_exynos_ioctl_ipp_get_limits
*resp
= data
;
221 void __user
*ptr
= (void __user
*)(unsigned long)resp
->limits_ptr
;
222 const struct exynos_drm_ipp_formats
*format
;
223 struct exynos_drm_ipp
*ipp
;
225 if (resp
->type
!= DRM_EXYNOS_IPP_FORMAT_SOURCE
&&
226 resp
->type
!= DRM_EXYNOS_IPP_FORMAT_DESTINATION
)
229 ipp
= __ipp_get(resp
->ipp_id
);
233 format
= __ipp_format_get(ipp
, resp
->fourcc
, resp
->modifier
,
239 * This ioctl is called twice, once to determine how much space is
240 * needed, and the 2nd time to fill it.
242 if (format
->num_limits
&& resp
->limits_count
>= format
->num_limits
)
243 if (copy_to_user((void __user
*)ptr
, format
->limits
,
244 sizeof(*format
->limits
) * format
->num_limits
))
246 resp
->limits_count
= format
->num_limits
;
251 struct drm_pending_exynos_ipp_event
{
252 struct drm_pending_event base
;
253 struct drm_exynos_ipp_event event
;
256 static inline struct exynos_drm_ipp_task
*
257 exynos_drm_ipp_task_alloc(struct exynos_drm_ipp
*ipp
)
259 struct exynos_drm_ipp_task
*task
;
261 task
= kzalloc(sizeof(*task
), GFP_KERNEL
);
265 task
->dev
= ipp
->dev
;
269 task
->src
.rect
.w
= task
->dst
.rect
.w
= UINT_MAX
;
270 task
->src
.rect
.h
= task
->dst
.rect
.h
= UINT_MAX
;
271 task
->transform
.rotation
= DRM_MODE_ROTATE_0
;
273 DRM_DEV_DEBUG_DRIVER(task
->dev
, "Allocated task %pK\n", task
);
278 static const struct exynos_drm_param_map
{
282 } exynos_drm_ipp_params_maps
[] = {
284 DRM_EXYNOS_IPP_TASK_BUFFER
| DRM_EXYNOS_IPP_TASK_TYPE_SOURCE
,
285 sizeof(struct drm_exynos_ipp_task_buffer
),
286 offsetof(struct exynos_drm_ipp_task
, src
.buf
),
288 DRM_EXYNOS_IPP_TASK_BUFFER
|
289 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION
,
290 sizeof(struct drm_exynos_ipp_task_buffer
),
291 offsetof(struct exynos_drm_ipp_task
, dst
.buf
),
293 DRM_EXYNOS_IPP_TASK_RECTANGLE
| DRM_EXYNOS_IPP_TASK_TYPE_SOURCE
,
294 sizeof(struct drm_exynos_ipp_task_rect
),
295 offsetof(struct exynos_drm_ipp_task
, src
.rect
),
297 DRM_EXYNOS_IPP_TASK_RECTANGLE
|
298 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION
,
299 sizeof(struct drm_exynos_ipp_task_rect
),
300 offsetof(struct exynos_drm_ipp_task
, dst
.rect
),
302 DRM_EXYNOS_IPP_TASK_TRANSFORM
,
303 sizeof(struct drm_exynos_ipp_task_transform
),
304 offsetof(struct exynos_drm_ipp_task
, transform
),
306 DRM_EXYNOS_IPP_TASK_ALPHA
,
307 sizeof(struct drm_exynos_ipp_task_alpha
),
308 offsetof(struct exynos_drm_ipp_task
, alpha
),
312 static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task
*task
,
313 struct drm_exynos_ioctl_ipp_commit
*arg
)
315 const struct exynos_drm_param_map
*map
= exynos_drm_ipp_params_maps
;
316 void __user
*params
= (void __user
*)(unsigned long)arg
->params_ptr
;
317 unsigned int size
= arg
->params_size
;
322 if (get_user(id
, (uint32_t __user
*)params
))
325 for (i
= 0; i
< ARRAY_SIZE(exynos_drm_ipp_params_maps
); i
++)
328 if (i
== ARRAY_SIZE(exynos_drm_ipp_params_maps
) ||
332 if (copy_from_user((void *)task
+ map
[i
].offset
, params
,
336 params
+= map
[i
].size
;
340 DRM_DEV_DEBUG_DRIVER(task
->dev
,
341 "Got task %pK configuration from userspace\n",
346 static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer
*buf
,
347 struct drm_file
*filp
)
352 /* get GEM buffers and check their size */
353 for (i
= 0; i
< buf
->format
->num_planes
; i
++) {
354 unsigned int height
= (i
== 0) ? buf
->buf
.height
:
355 DIV_ROUND_UP(buf
->buf
.height
, buf
->format
->vsub
);
356 unsigned long size
= height
* buf
->buf
.pitch
[i
];
357 struct exynos_drm_gem
*gem
= exynos_drm_gem_get(filp
,
363 buf
->exynos_gem
[i
] = gem
;
365 if (size
+ buf
->buf
.offset
[i
] > buf
->exynos_gem
[i
]->size
) {
370 buf
->dma_addr
[i
] = buf
->exynos_gem
[i
]->dma_addr
+
377 exynos_drm_gem_put(buf
->exynos_gem
[i
]);
378 buf
->exynos_gem
[i
] = NULL
;
383 static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer
*buf
)
387 if (!buf
->exynos_gem
[0])
389 for (i
= 0; i
< buf
->format
->num_planes
; i
++)
390 exynos_drm_gem_put(buf
->exynos_gem
[i
]);
393 static void exynos_drm_ipp_task_free(struct exynos_drm_ipp
*ipp
,
394 struct exynos_drm_ipp_task
*task
)
396 DRM_DEV_DEBUG_DRIVER(task
->dev
, "Freeing task %pK\n", task
);
398 exynos_drm_ipp_task_release_buf(&task
->src
);
399 exynos_drm_ipp_task_release_buf(&task
->dst
);
401 drm_event_cancel_free(ipp
->drm_dev
, &task
->event
->base
);
405 struct drm_ipp_limit
{
406 struct drm_exynos_ipp_limit_val h
;
407 struct drm_exynos_ipp_limit_val v
;
410 enum drm_ipp_size_id
{
411 IPP_LIMIT_BUFFER
, IPP_LIMIT_AREA
, IPP_LIMIT_ROTATED
, IPP_LIMIT_MAX
414 static const enum drm_exynos_ipp_limit_type limit_id_fallback
[IPP_LIMIT_MAX
][4] = {
415 [IPP_LIMIT_BUFFER
] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER
},
416 [IPP_LIMIT_AREA
] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA
,
417 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER
},
418 [IPP_LIMIT_ROTATED
] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED
,
419 DRM_EXYNOS_IPP_LIMIT_SIZE_AREA
,
420 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER
},
423 static inline void __limit_set_val(unsigned int *ptr
, unsigned int val
)
429 static void __get_size_limit(const struct drm_exynos_ipp_limit
*limits
,
430 unsigned int num_limits
, enum drm_ipp_size_id id
,
431 struct drm_ipp_limit
*res
)
433 const struct drm_exynos_ipp_limit
*l
= limits
;
436 memset(res
, 0, sizeof(*res
));
437 for (i
= 0; limit_id_fallback
[id
][i
]; i
++)
438 for (l
= limits
; l
- limits
< num_limits
; l
++) {
439 if (((l
->type
& DRM_EXYNOS_IPP_LIMIT_TYPE_MASK
) !=
440 DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE
) ||
441 ((l
->type
& DRM_EXYNOS_IPP_LIMIT_SIZE_MASK
) !=
442 limit_id_fallback
[id
][i
]))
444 __limit_set_val(&res
->h
.min
, l
->h
.min
);
445 __limit_set_val(&res
->h
.max
, l
->h
.max
);
446 __limit_set_val(&res
->h
.align
, l
->h
.align
);
447 __limit_set_val(&res
->v
.min
, l
->v
.min
);
448 __limit_set_val(&res
->v
.max
, l
->v
.max
);
449 __limit_set_val(&res
->v
.align
, l
->v
.align
);
453 static inline bool __align_check(unsigned int val
, unsigned int align
)
455 if (align
&& (val
& (align
- 1))) {
456 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
463 static inline bool __size_limit_check(unsigned int val
,
464 struct drm_exynos_ipp_limit_val
*l
)
466 if ((l
->min
&& val
< l
->min
) || (l
->max
&& val
> l
->max
)) {
467 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
468 val
, l
->min
, l
->max
);
471 return __align_check(val
, l
->align
);
474 static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer
*buf
,
475 const struct drm_exynos_ipp_limit
*limits
, unsigned int num_limits
,
476 bool rotate
, bool swap
)
478 enum drm_ipp_size_id id
= rotate
? IPP_LIMIT_ROTATED
: IPP_LIMIT_AREA
;
479 struct drm_ipp_limit l
;
480 struct drm_exynos_ipp_limit_val
*lh
= &l
.h
, *lv
= &l
.v
;
481 int real_width
= buf
->buf
.pitch
[0] / buf
->format
->cpp
[0];
486 __get_size_limit(limits
, num_limits
, IPP_LIMIT_BUFFER
, &l
);
487 if (!__size_limit_check(real_width
, &l
.h
) ||
488 !__size_limit_check(buf
->buf
.height
, &l
.v
))
495 __get_size_limit(limits
, num_limits
, id
, &l
);
496 if (!__size_limit_check(buf
->rect
.w
, lh
) ||
497 !__align_check(buf
->rect
.x
, lh
->align
) ||
498 !__size_limit_check(buf
->rect
.h
, lv
) ||
499 !__align_check(buf
->rect
.y
, lv
->align
))
505 static inline bool __scale_limit_check(unsigned int src
, unsigned int dst
,
506 unsigned int min
, unsigned int max
)
508 if ((max
&& (dst
<< 16) > src
* max
) ||
509 (min
&& (dst
<< 16) < src
* min
)) {
510 DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
512 min
>> 16, 100000 * (min
& 0xffff) / (1 << 16),
513 max
>> 16, 100000 * (max
& 0xffff) / (1 << 16));
519 static int exynos_drm_ipp_check_scale_limits(
520 struct drm_exynos_ipp_task_rect
*src
,
521 struct drm_exynos_ipp_task_rect
*dst
,
522 const struct drm_exynos_ipp_limit
*limits
,
523 unsigned int num_limits
, bool swap
)
525 const struct drm_exynos_ipp_limit_val
*lh
, *lv
;
528 for (; num_limits
; limits
++, num_limits
--)
529 if ((limits
->type
& DRM_EXYNOS_IPP_LIMIT_TYPE_MASK
) ==
530 DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE
)
535 lh
= (!swap
) ? &limits
->h
: &limits
->v
;
536 lv
= (!swap
) ? &limits
->v
: &limits
->h
;
537 dw
= (!swap
) ? dst
->w
: dst
->h
;
538 dh
= (!swap
) ? dst
->h
: dst
->w
;
540 if (!__scale_limit_check(src
->w
, dw
, lh
->min
, lh
->max
) ||
541 !__scale_limit_check(src
->h
, dh
, lv
->min
, lv
->max
))
547 static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task
*task
,
548 struct exynos_drm_ipp_buffer
*buf
,
549 struct exynos_drm_ipp_buffer
*src
,
550 struct exynos_drm_ipp_buffer
*dst
,
551 bool rotate
, bool swap
)
553 const struct exynos_drm_ipp_formats
*fmt
;
556 fmt
= __ipp_format_get(task
->ipp
, buf
->buf
.fourcc
, buf
->buf
.modifier
,
557 buf
== src
? DRM_EXYNOS_IPP_FORMAT_SOURCE
:
558 DRM_EXYNOS_IPP_FORMAT_DESTINATION
);
560 DRM_DEV_DEBUG_DRIVER(task
->dev
,
561 "Task %pK: %s format not supported\n",
562 task
, buf
== src
? "src" : "dst");
567 if (buf
->buf
.width
== 0 || buf
->buf
.height
== 0)
570 buf
->format
= drm_format_info(buf
->buf
.fourcc
);
571 for (i
= 0; i
< buf
->format
->num_planes
; i
++) {
572 unsigned int width
= (i
== 0) ? buf
->buf
.width
:
573 DIV_ROUND_UP(buf
->buf
.width
, buf
->format
->hsub
);
575 if (buf
->buf
.pitch
[i
] == 0)
576 buf
->buf
.pitch
[i
] = width
* buf
->format
->cpp
[i
];
577 if (buf
->buf
.pitch
[i
] < width
* buf
->format
->cpp
[i
])
579 if (!buf
->buf
.gem_id
[i
])
583 /* pitch for additional planes must match */
584 if (buf
->format
->num_planes
> 2 &&
585 buf
->buf
.pitch
[1] != buf
->buf
.pitch
[2])
588 /* check driver limits */
589 ret
= exynos_drm_ipp_check_size_limits(buf
, fmt
->limits
,
592 buf
== dst
? swap
: false);
595 ret
= exynos_drm_ipp_check_scale_limits(&src
->rect
, &dst
->rect
,
597 fmt
->num_limits
, swap
);
601 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task
*task
)
603 struct exynos_drm_ipp
*ipp
= task
->ipp
;
604 struct exynos_drm_ipp_buffer
*src
= &task
->src
, *dst
= &task
->dst
;
605 unsigned int rotation
= task
->transform
.rotation
;
607 bool swap
= drm_rotation_90_or_270(rotation
);
608 bool rotate
= (rotation
!= DRM_MODE_ROTATE_0
);
611 DRM_DEV_DEBUG_DRIVER(task
->dev
, "Checking task %pK\n", task
);
613 if (src
->rect
.w
== UINT_MAX
)
614 src
->rect
.w
= src
->buf
.width
;
615 if (src
->rect
.h
== UINT_MAX
)
616 src
->rect
.h
= src
->buf
.height
;
617 if (dst
->rect
.w
== UINT_MAX
)
618 dst
->rect
.w
= dst
->buf
.width
;
619 if (dst
->rect
.h
== UINT_MAX
)
620 dst
->rect
.h
= dst
->buf
.height
;
622 if (src
->rect
.x
+ src
->rect
.w
> (src
->buf
.width
) ||
623 src
->rect
.y
+ src
->rect
.h
> (src
->buf
.height
) ||
624 dst
->rect
.x
+ dst
->rect
.w
> (dst
->buf
.width
) ||
625 dst
->rect
.y
+ dst
->rect
.h
> (dst
->buf
.height
)) {
626 DRM_DEV_DEBUG_DRIVER(task
->dev
,
627 "Task %pK: defined area is outside provided buffers\n",
632 if ((!swap
&& (src
->rect
.w
!= dst
->rect
.w
||
633 src
->rect
.h
!= dst
->rect
.h
)) ||
634 (swap
&& (src
->rect
.w
!= dst
->rect
.h
||
635 src
->rect
.h
!= dst
->rect
.w
)))
638 if ((!(ipp
->capabilities
& DRM_EXYNOS_IPP_CAP_CROP
) &&
639 (src
->rect
.x
|| src
->rect
.y
|| dst
->rect
.x
|| dst
->rect
.y
)) ||
640 (!(ipp
->capabilities
& DRM_EXYNOS_IPP_CAP_ROTATE
) && rotate
) ||
641 (!(ipp
->capabilities
& DRM_EXYNOS_IPP_CAP_SCALE
) && scale
) ||
642 (!(ipp
->capabilities
& DRM_EXYNOS_IPP_CAP_CONVERT
) &&
643 src
->buf
.fourcc
!= dst
->buf
.fourcc
)) {
644 DRM_DEV_DEBUG_DRIVER(task
->dev
, "Task %pK: hw capabilities exceeded\n",
649 ret
= exynos_drm_ipp_check_format(task
, src
, src
, dst
, rotate
, swap
);
653 ret
= exynos_drm_ipp_check_format(task
, dst
, src
, dst
, false, swap
);
657 DRM_DEV_DEBUG_DRIVER(ipp
->dev
, "Task %pK: all checks done.\n",
663 static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task
*task
,
664 struct drm_file
*filp
)
666 struct exynos_drm_ipp_buffer
*src
= &task
->src
, *dst
= &task
->dst
;
669 DRM_DEV_DEBUG_DRIVER(task
->dev
, "Setting buffer for task %pK\n",
672 ret
= exynos_drm_ipp_task_setup_buffer(src
, filp
);
674 DRM_DEV_DEBUG_DRIVER(task
->dev
,
675 "Task %pK: src buffer setup failed\n",
679 ret
= exynos_drm_ipp_task_setup_buffer(dst
, filp
);
681 DRM_DEV_DEBUG_DRIVER(task
->dev
,
682 "Task %pK: dst buffer setup failed\n",
687 DRM_DEV_DEBUG_DRIVER(task
->dev
, "Task %pK: buffers prepared.\n",
694 static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task
*task
,
695 struct drm_file
*file_priv
, uint64_t user_data
)
697 struct drm_pending_exynos_ipp_event
*e
= NULL
;
700 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
704 e
->event
.base
.type
= DRM_EXYNOS_IPP_EVENT
;
705 e
->event
.base
.length
= sizeof(e
->event
);
706 e
->event
.user_data
= user_data
;
708 ret
= drm_event_reserve_init(task
->ipp
->drm_dev
, file_priv
, &e
->base
,
720 static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task
*task
)
722 struct timespec64 now
;
724 ktime_get_ts64(&now
);
725 task
->event
->event
.tv_sec
= now
.tv_sec
;
726 task
->event
->event
.tv_usec
= now
.tv_nsec
/ NSEC_PER_USEC
;
727 task
->event
->event
.sequence
= atomic_inc_return(&task
->ipp
->sequence
);
729 drm_send_event(task
->ipp
->drm_dev
, &task
->event
->base
);
732 static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task
*task
)
736 if (ret
== 0 && task
->event
) {
737 exynos_drm_ipp_event_send(task
);
738 /* ensure event won't be canceled on task free */
742 exynos_drm_ipp_task_free(task
->ipp
, task
);
746 static void exynos_drm_ipp_cleanup_work(struct work_struct
*work
)
748 struct exynos_drm_ipp_task
*task
= container_of(work
,
749 struct exynos_drm_ipp_task
, cleanup_work
);
751 exynos_drm_ipp_task_cleanup(task
);
754 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp
*ipp
);
757 * exynos_drm_ipp_task_done - finish given task and set return code
758 * @task: ipp task to finish
759 * @ret: error code or 0 if operation has been performed successfully
761 void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task
*task
, int ret
)
763 struct exynos_drm_ipp
*ipp
= task
->ipp
;
766 DRM_DEV_DEBUG_DRIVER(task
->dev
, "ipp: %d, task %pK done: %d\n",
769 spin_lock_irqsave(&ipp
->lock
, flags
);
770 if (ipp
->task
== task
)
772 task
->flags
|= DRM_EXYNOS_IPP_TASK_DONE
;
774 spin_unlock_irqrestore(&ipp
->lock
, flags
);
776 exynos_drm_ipp_next_task(ipp
);
777 wake_up(&ipp
->done_wq
);
779 if (task
->flags
& DRM_EXYNOS_IPP_TASK_ASYNC
) {
780 INIT_WORK(&task
->cleanup_work
, exynos_drm_ipp_cleanup_work
);
781 schedule_work(&task
->cleanup_work
);
785 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp
*ipp
)
787 struct exynos_drm_ipp_task
*task
;
791 DRM_DEV_DEBUG_DRIVER(ipp
->dev
, "ipp: %d, try to run new task\n",
794 spin_lock_irqsave(&ipp
->lock
, flags
);
796 if (ipp
->task
|| list_empty(&ipp
->todo_list
)) {
797 spin_unlock_irqrestore(&ipp
->lock
, flags
);
801 task
= list_first_entry(&ipp
->todo_list
, struct exynos_drm_ipp_task
,
803 list_del_init(&task
->head
);
806 spin_unlock_irqrestore(&ipp
->lock
, flags
);
808 DRM_DEV_DEBUG_DRIVER(ipp
->dev
,
809 "ipp: %d, selected task %pK to run\n", ipp
->id
,
812 ret
= ipp
->funcs
->commit(ipp
, task
);
814 exynos_drm_ipp_task_done(task
, ret
);
817 static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp
*ipp
,
818 struct exynos_drm_ipp_task
*task
)
822 spin_lock_irqsave(&ipp
->lock
, flags
);
823 list_add(&task
->head
, &ipp
->todo_list
);
824 spin_unlock_irqrestore(&ipp
->lock
, flags
);
826 exynos_drm_ipp_next_task(ipp
);
829 static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp
*ipp
,
830 struct exynos_drm_ipp_task
*task
)
834 spin_lock_irqsave(&ipp
->lock
, flags
);
835 if (task
->flags
& DRM_EXYNOS_IPP_TASK_DONE
) {
836 /* already completed task */
837 exynos_drm_ipp_task_cleanup(task
);
838 } else if (ipp
->task
!= task
) {
839 /* task has not been scheduled for execution yet */
840 list_del_init(&task
->head
);
841 exynos_drm_ipp_task_cleanup(task
);
844 * currently processed task, call abort() and perform
845 * cleanup with async worker
847 task
->flags
|= DRM_EXYNOS_IPP_TASK_ASYNC
;
848 spin_unlock_irqrestore(&ipp
->lock
, flags
);
849 if (ipp
->funcs
->abort
)
850 ipp
->funcs
->abort(ipp
, task
);
853 spin_unlock_irqrestore(&ipp
->lock
, flags
);
857 * exynos_drm_ipp_commit_ioctl - perform image processing operation
860 * @file_priv: DRM file info
862 * Construct a ipp task from the set of properties provided from the user
863 * and try to schedule it to framebuffer processor hardware.
865 * Called by the user via ioctl.
868 * Zero on success, negative errno on failure.
870 int exynos_drm_ipp_commit_ioctl(struct drm_device
*dev
, void *data
,
871 struct drm_file
*file_priv
)
873 struct drm_exynos_ioctl_ipp_commit
*arg
= data
;
874 struct exynos_drm_ipp
*ipp
;
875 struct exynos_drm_ipp_task
*task
;
878 if ((arg
->flags
& ~DRM_EXYNOS_IPP_FLAGS
) || arg
->reserved
)
881 /* can't test and expect an event at the same time */
882 if ((arg
->flags
& DRM_EXYNOS_IPP_FLAG_TEST_ONLY
) &&
883 (arg
->flags
& DRM_EXYNOS_IPP_FLAG_EVENT
))
886 ipp
= __ipp_get(arg
->ipp_id
);
890 task
= exynos_drm_ipp_task_alloc(ipp
);
894 ret
= exynos_drm_ipp_task_set(task
, arg
);
898 ret
= exynos_drm_ipp_task_check(task
);
902 ret
= exynos_drm_ipp_task_setup_buffers(task
, file_priv
);
903 if (ret
|| arg
->flags
& DRM_EXYNOS_IPP_FLAG_TEST_ONLY
)
906 if (arg
->flags
& DRM_EXYNOS_IPP_FLAG_EVENT
) {
907 ret
= exynos_drm_ipp_event_create(task
, file_priv
,
914 * Queue task for processing on the hardware. task object will be
915 * then freed after exynos_drm_ipp_task_done()
917 if (arg
->flags
& DRM_EXYNOS_IPP_FLAG_NONBLOCK
) {
918 DRM_DEV_DEBUG_DRIVER(ipp
->dev
,
919 "ipp: %d, nonblocking processing task %pK\n",
922 task
->flags
|= DRM_EXYNOS_IPP_TASK_ASYNC
;
923 exynos_drm_ipp_schedule_task(task
->ipp
, task
);
926 DRM_DEV_DEBUG_DRIVER(ipp
->dev
, "ipp: %d, processing task %pK\n",
928 exynos_drm_ipp_schedule_task(ipp
, task
);
929 ret
= wait_event_interruptible(ipp
->done_wq
,
930 task
->flags
& DRM_EXYNOS_IPP_TASK_DONE
);
932 exynos_drm_ipp_task_abort(ipp
, task
);
934 ret
= exynos_drm_ipp_task_cleanup(task
);
938 exynos_drm_ipp_task_free(ipp
, task
);