vt: vt_ioctl: fix VT_DISALLOCATE freeing in-use virtual console
[linux/fpc-iii.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
blob23226a0212e8fd2d05d362c633ffe15815388ffe
1 /*
2 * Copyright (C) 2017 Samsung Electronics Co.Ltd
3 * Authors:
4 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Exynos DRM Image Post Processing (IPP) related functions
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
20 #include <drm/drmP.h>
21 #include <drm/drm_mode.h>
22 #include <uapi/drm/exynos_drm.h>
24 #include "exynos_drm_drv.h"
25 #include "exynos_drm_gem.h"
26 #include "exynos_drm_ipp.h"
28 static int num_ipp;
29 static LIST_HEAD(ipp_list);
31 /**
32 * exynos_drm_ipp_register - Register a new picture processor hardware module
33 * @dev: DRM device
34 * @ipp: ipp module to init
35 * @funcs: callbacks for the new ipp object
36 * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*)
37 * @formats: array of supported formats
38 * @num_formats: size of the supported formats array
39 * @name: name (for debugging purposes)
41 * Initializes a ipp module.
43 * Returns:
44 * Zero on success, error code on failure.
46 int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
47 const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
48 const struct exynos_drm_ipp_formats *formats,
49 unsigned int num_formats, const char *name)
51 WARN_ON(!ipp);
52 WARN_ON(!funcs);
53 WARN_ON(!formats);
54 WARN_ON(!num_formats);
56 spin_lock_init(&ipp->lock);
57 INIT_LIST_HEAD(&ipp->todo_list);
58 init_waitqueue_head(&ipp->done_wq);
59 ipp->dev = dev;
60 ipp->funcs = funcs;
61 ipp->capabilities = caps;
62 ipp->name = name;
63 ipp->formats = formats;
64 ipp->num_formats = num_formats;
66 /* ipp_list modification is serialized by component framework */
67 list_add_tail(&ipp->head, &ipp_list);
68 ipp->id = num_ipp++;
70 DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
72 return 0;
75 /**
76 * exynos_drm_ipp_unregister - Unregister the picture processor module
77 * @dev: DRM device
78 * @ipp: ipp module
80 void exynos_drm_ipp_unregister(struct drm_device *dev,
81 struct exynos_drm_ipp *ipp)
83 WARN_ON(ipp->task);
84 WARN_ON(!list_empty(&ipp->todo_list));
85 list_del(&ipp->head);
88 /**
89 * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules
90 * @dev: DRM device
91 * @data: ioctl data
92 * @file_priv: DRM file info
94 * Construct a list of ipp ids.
96 * Called by the user via ioctl.
98 * Returns:
99 * Zero on success, negative errno on failure.
101 int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data,
102 struct drm_file *file_priv)
104 struct drm_exynos_ioctl_ipp_get_res *resp = data;
105 struct exynos_drm_ipp *ipp;
106 uint32_t __user *ipp_ptr = (uint32_t __user *)
107 (unsigned long)resp->ipp_id_ptr;
108 unsigned int count = num_ipp, copied = 0;
111 * This ioctl is called twice, once to determine how much space is
112 * needed, and the 2nd time to fill it.
114 if (count && resp->count_ipps >= count) {
115 list_for_each_entry(ipp, &ipp_list, head) {
116 if (put_user(ipp->id, ipp_ptr + copied))
117 return -EFAULT;
118 copied++;
121 resp->count_ipps = count;
123 return 0;
126 static inline struct exynos_drm_ipp *__ipp_get(uint32_t id)
128 struct exynos_drm_ipp *ipp;
130 list_for_each_entry(ipp, &ipp_list, head)
131 if (ipp->id == id)
132 return ipp;
133 return NULL;
137 * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats
138 * @dev: DRM device
139 * @data: ioctl data
140 * @file_priv: DRM file info
142 * Construct a structure describing ipp module capabilities.
144 * Called by the user via ioctl.
146 * Returns:
147 * Zero on success, negative errno on failure.
149 int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file_priv)
152 struct drm_exynos_ioctl_ipp_get_caps *resp = data;
153 void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr;
154 struct exynos_drm_ipp *ipp;
155 int i;
157 ipp = __ipp_get(resp->ipp_id);
158 if (!ipp)
159 return -ENOENT;
161 resp->ipp_id = ipp->id;
162 resp->capabilities = ipp->capabilities;
165 * This ioctl is called twice, once to determine how much space is
166 * needed, and the 2nd time to fill it.
168 if (resp->formats_count >= ipp->num_formats) {
169 for (i = 0; i < ipp->num_formats; i++) {
170 struct drm_exynos_ipp_format tmp = {
171 .fourcc = ipp->formats[i].fourcc,
172 .type = ipp->formats[i].type,
173 .modifier = ipp->formats[i].modifier,
176 if (copy_to_user(ptr, &tmp, sizeof(tmp)))
177 return -EFAULT;
178 ptr += sizeof(tmp);
181 resp->formats_count = ipp->num_formats;
183 return 0;
186 static inline const struct exynos_drm_ipp_formats *__ipp_format_get(
187 struct exynos_drm_ipp *ipp, uint32_t fourcc,
188 uint64_t mod, unsigned int type)
190 int i;
192 for (i = 0; i < ipp->num_formats; i++) {
193 if ((ipp->formats[i].type & type) &&
194 ipp->formats[i].fourcc == fourcc &&
195 ipp->formats[i].modifier == mod)
196 return &ipp->formats[i];
198 return NULL;
202 * exynos_drm_ipp_get_limits_ioctl - get ipp module limits
203 * @dev: DRM device
204 * @data: ioctl data
205 * @file_priv: DRM file info
207 * Construct a structure describing ipp module limitations for provided
208 * picture format.
210 * Called by the user via ioctl.
212 * Returns:
213 * Zero on success, negative errno on failure.
215 int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data,
216 struct drm_file *file_priv)
218 struct drm_exynos_ioctl_ipp_get_limits *resp = data;
219 void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr;
220 const struct exynos_drm_ipp_formats *format;
221 struct exynos_drm_ipp *ipp;
223 if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE &&
224 resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION)
225 return -EINVAL;
227 ipp = __ipp_get(resp->ipp_id);
228 if (!ipp)
229 return -ENOENT;
231 format = __ipp_format_get(ipp, resp->fourcc, resp->modifier,
232 resp->type);
233 if (!format)
234 return -EINVAL;
237 * This ioctl is called twice, once to determine how much space is
238 * needed, and the 2nd time to fill it.
240 if (format->num_limits && resp->limits_count >= format->num_limits)
241 if (copy_to_user((void __user *)ptr, format->limits,
242 sizeof(*format->limits) * format->num_limits))
243 return -EFAULT;
244 resp->limits_count = format->num_limits;
246 return 0;
249 struct drm_pending_exynos_ipp_event {
250 struct drm_pending_event base;
251 struct drm_exynos_ipp_event event;
254 static inline struct exynos_drm_ipp_task *
255 exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp)
257 struct exynos_drm_ipp_task *task;
259 task = kzalloc(sizeof(*task), GFP_KERNEL);
260 if (!task)
261 return NULL;
263 task->dev = ipp->dev;
264 task->ipp = ipp;
266 /* some defaults */
267 task->src.rect.w = task->dst.rect.w = UINT_MAX;
268 task->src.rect.h = task->dst.rect.h = UINT_MAX;
269 task->transform.rotation = DRM_MODE_ROTATE_0;
271 DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
273 return task;
276 static const struct exynos_drm_param_map {
277 unsigned int id;
278 unsigned int size;
279 unsigned int offset;
280 } exynos_drm_ipp_params_maps[] = {
282 DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
283 sizeof(struct drm_exynos_ipp_task_buffer),
284 offsetof(struct exynos_drm_ipp_task, src.buf),
285 }, {
286 DRM_EXYNOS_IPP_TASK_BUFFER |
287 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
288 sizeof(struct drm_exynos_ipp_task_buffer),
289 offsetof(struct exynos_drm_ipp_task, dst.buf),
290 }, {
291 DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE,
292 sizeof(struct drm_exynos_ipp_task_rect),
293 offsetof(struct exynos_drm_ipp_task, src.rect),
294 }, {
295 DRM_EXYNOS_IPP_TASK_RECTANGLE |
296 DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION,
297 sizeof(struct drm_exynos_ipp_task_rect),
298 offsetof(struct exynos_drm_ipp_task, dst.rect),
299 }, {
300 DRM_EXYNOS_IPP_TASK_TRANSFORM,
301 sizeof(struct drm_exynos_ipp_task_transform),
302 offsetof(struct exynos_drm_ipp_task, transform),
303 }, {
304 DRM_EXYNOS_IPP_TASK_ALPHA,
305 sizeof(struct drm_exynos_ipp_task_alpha),
306 offsetof(struct exynos_drm_ipp_task, alpha),
310 static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
311 struct drm_exynos_ioctl_ipp_commit *arg)
313 const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps;
314 void __user *params = (void __user *)(unsigned long)arg->params_ptr;
315 unsigned int size = arg->params_size;
316 uint32_t id;
317 int i;
319 while (size) {
320 if (get_user(id, (uint32_t __user *)params))
321 return -EFAULT;
323 for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++)
324 if (map[i].id == id)
325 break;
326 if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) ||
327 map[i].size > size)
328 return -EINVAL;
330 if (copy_from_user((void *)task + map[i].offset, params,
331 map[i].size))
332 return -EFAULT;
334 params += map[i].size;
335 size -= map[i].size;
338 DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
339 return 0;
342 static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
343 struct drm_file *filp)
345 int ret = 0;
346 int i;
348 /* get GEM buffers and check their size */
349 for (i = 0; i < buf->format->num_planes; i++) {
350 unsigned int height = (i == 0) ? buf->buf.height :
351 DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
352 unsigned long size = height * buf->buf.pitch[i];
353 struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
354 buf->buf.gem_id[i]);
355 if (!gem) {
356 ret = -ENOENT;
357 goto gem_free;
359 buf->exynos_gem[i] = gem;
361 if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
362 i++;
363 ret = -EINVAL;
364 goto gem_free;
366 buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr +
367 buf->buf.offset[i];
370 return 0;
371 gem_free:
372 while (i--) {
373 exynos_drm_gem_put(buf->exynos_gem[i]);
374 buf->exynos_gem[i] = NULL;
376 return ret;
379 static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
381 int i;
383 if (!buf->exynos_gem[0])
384 return;
385 for (i = 0; i < buf->format->num_planes; i++)
386 exynos_drm_gem_put(buf->exynos_gem[i]);
389 static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
390 struct exynos_drm_ipp_task *task)
392 DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
394 exynos_drm_ipp_task_release_buf(&task->src);
395 exynos_drm_ipp_task_release_buf(&task->dst);
396 if (task->event)
397 drm_event_cancel_free(ipp->dev, &task->event->base);
398 kfree(task);
401 struct drm_ipp_limit {
402 struct drm_exynos_ipp_limit_val h;
403 struct drm_exynos_ipp_limit_val v;
406 enum drm_ipp_size_id {
407 IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
410 static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
411 [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
412 [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
413 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
414 [IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED,
415 DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
416 DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
419 static inline void __limit_set_val(unsigned int *ptr, unsigned int val)
421 if (!*ptr)
422 *ptr = val;
425 static void __get_size_limit(const struct drm_exynos_ipp_limit *limits,
426 unsigned int num_limits, enum drm_ipp_size_id id,
427 struct drm_ipp_limit *res)
429 const struct drm_exynos_ipp_limit *l = limits;
430 int i = 0;
432 memset(res, 0, sizeof(*res));
433 for (i = 0; limit_id_fallback[id][i]; i++)
434 for (l = limits; l - limits < num_limits; l++) {
435 if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) !=
436 DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) ||
437 ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) !=
438 limit_id_fallback[id][i]))
439 continue;
440 __limit_set_val(&res->h.min, l->h.min);
441 __limit_set_val(&res->h.max, l->h.max);
442 __limit_set_val(&res->h.align, l->h.align);
443 __limit_set_val(&res->v.min, l->v.min);
444 __limit_set_val(&res->v.max, l->v.max);
445 __limit_set_val(&res->v.align, l->v.align);
449 static inline bool __align_check(unsigned int val, unsigned int align)
451 if (align && (val & (align - 1))) {
452 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n",
453 val, align);
454 return false;
456 return true;
459 static inline bool __size_limit_check(unsigned int val,
460 struct drm_exynos_ipp_limit_val *l)
462 if ((l->min && val < l->min) || (l->max && val > l->max)) {
463 DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n",
464 val, l->min, l->max);
465 return false;
467 return __align_check(val, l->align);
470 static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
471 const struct drm_exynos_ipp_limit *limits, unsigned int num_limits,
472 bool rotate, bool swap)
474 enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
475 struct drm_ipp_limit l;
476 struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
477 int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
479 if (!limits)
480 return 0;
482 __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
483 if (!__size_limit_check(real_width, &l.h) ||
484 !__size_limit_check(buf->buf.height, &l.v))
485 return -EINVAL;
487 if (swap) {
488 lv = &l.h;
489 lh = &l.v;
491 __get_size_limit(limits, num_limits, id, &l);
492 if (!__size_limit_check(buf->rect.w, lh) ||
493 !__align_check(buf->rect.x, lh->align) ||
494 !__size_limit_check(buf->rect.h, lv) ||
495 !__align_check(buf->rect.y, lv->align))
496 return -EINVAL;
498 return 0;
501 static inline bool __scale_limit_check(unsigned int src, unsigned int dst,
502 unsigned int min, unsigned int max)
504 if ((max && (dst << 16) > src * max) ||
505 (min && (dst << 16) < src * min)) {
506 DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n",
507 src, dst,
508 min >> 16, 100000 * (min & 0xffff) / (1 << 16),
509 max >> 16, 100000 * (max & 0xffff) / (1 << 16));
510 return false;
512 return true;
515 static int exynos_drm_ipp_check_scale_limits(
516 struct drm_exynos_ipp_task_rect *src,
517 struct drm_exynos_ipp_task_rect *dst,
518 const struct drm_exynos_ipp_limit *limits,
519 unsigned int num_limits, bool swap)
521 const struct drm_exynos_ipp_limit_val *lh, *lv;
522 int dw, dh;
524 for (; num_limits; limits++, num_limits--)
525 if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) ==
526 DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE)
527 break;
528 if (!num_limits)
529 return 0;
531 lh = (!swap) ? &limits->h : &limits->v;
532 lv = (!swap) ? &limits->v : &limits->h;
533 dw = (!swap) ? dst->w : dst->h;
534 dh = (!swap) ? dst->h : dst->w;
536 if (!__scale_limit_check(src->w, dw, lh->min, lh->max) ||
537 !__scale_limit_check(src->h, dh, lv->min, lv->max))
538 return -EINVAL;
540 return 0;
543 static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
544 struct exynos_drm_ipp_buffer *buf,
545 struct exynos_drm_ipp_buffer *src,
546 struct exynos_drm_ipp_buffer *dst,
547 bool rotate, bool swap)
549 const struct exynos_drm_ipp_formats *fmt;
550 int ret, i;
552 fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
553 buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
554 DRM_EXYNOS_IPP_FORMAT_DESTINATION);
555 if (!fmt) {
556 DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
557 buf == src ? "src" : "dst");
558 return -EINVAL;
561 /* basic checks */
562 if (buf->buf.width == 0 || buf->buf.height == 0)
563 return -EINVAL;
565 buf->format = drm_format_info(buf->buf.fourcc);
566 for (i = 0; i < buf->format->num_planes; i++) {
567 unsigned int width = (i == 0) ? buf->buf.width :
568 DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
570 if (buf->buf.pitch[i] == 0)
571 buf->buf.pitch[i] = width * buf->format->cpp[i];
572 if (buf->buf.pitch[i] < width * buf->format->cpp[i])
573 return -EINVAL;
574 if (!buf->buf.gem_id[i])
575 return -ENOENT;
578 /* pitch for additional planes must match */
579 if (buf->format->num_planes > 2 &&
580 buf->buf.pitch[1] != buf->buf.pitch[2])
581 return -EINVAL;
583 /* check driver limits */
584 ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
585 fmt->num_limits,
586 rotate,
587 buf == dst ? swap : false);
588 if (ret)
589 return ret;
590 ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
591 fmt->limits,
592 fmt->num_limits, swap);
593 return ret;
596 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
598 struct exynos_drm_ipp *ipp = task->ipp;
599 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
600 unsigned int rotation = task->transform.rotation;
601 int ret = 0;
602 bool swap = drm_rotation_90_or_270(rotation);
603 bool rotate = (rotation != DRM_MODE_ROTATE_0);
604 bool scale = false;
606 DRM_DEBUG_DRIVER("Checking task %pK\n", task);
608 if (src->rect.w == UINT_MAX)
609 src->rect.w = src->buf.width;
610 if (src->rect.h == UINT_MAX)
611 src->rect.h = src->buf.height;
612 if (dst->rect.w == UINT_MAX)
613 dst->rect.w = dst->buf.width;
614 if (dst->rect.h == UINT_MAX)
615 dst->rect.h = dst->buf.height;
617 if (src->rect.x + src->rect.w > (src->buf.width) ||
618 src->rect.y + src->rect.h > (src->buf.height) ||
619 dst->rect.x + dst->rect.w > (dst->buf.width) ||
620 dst->rect.y + dst->rect.h > (dst->buf.height)) {
621 DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
622 task);
623 return -EINVAL;
626 if ((!swap && (src->rect.w != dst->rect.w ||
627 src->rect.h != dst->rect.h)) ||
628 (swap && (src->rect.w != dst->rect.h ||
629 src->rect.h != dst->rect.w)))
630 scale = true;
632 if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) &&
633 (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) ||
634 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) ||
635 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
636 (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
637 src->buf.fourcc != dst->buf.fourcc)) {
638 DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
639 return -EINVAL;
642 ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
643 if (ret)
644 return ret;
646 ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
647 if (ret)
648 return ret;
650 DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
652 return ret;
655 static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
656 struct drm_file *filp)
658 struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
659 int ret = 0;
661 DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
663 ret = exynos_drm_ipp_task_setup_buffer(src, filp);
664 if (ret) {
665 DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
666 return ret;
668 ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
669 if (ret) {
670 DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
671 return ret;
674 DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
676 return ret;
680 static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
681 struct drm_file *file_priv, uint64_t user_data)
683 struct drm_pending_exynos_ipp_event *e = NULL;
684 int ret;
686 e = kzalloc(sizeof(*e), GFP_KERNEL);
687 if (!e)
688 return -ENOMEM;
690 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
691 e->event.base.length = sizeof(e->event);
692 e->event.user_data = user_data;
694 ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
695 &e->event.base);
696 if (ret)
697 goto free;
699 task->event = e;
700 return 0;
701 free:
702 kfree(e);
703 return ret;
706 static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
708 struct timespec64 now;
710 ktime_get_ts64(&now);
711 task->event->event.tv_sec = now.tv_sec;
712 task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
713 task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
715 drm_send_event(task->dev, &task->event->base);
718 static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
720 int ret = task->ret;
722 if (ret == 0 && task->event) {
723 exynos_drm_ipp_event_send(task);
724 /* ensure event won't be canceled on task free */
725 task->event = NULL;
728 exynos_drm_ipp_task_free(task->ipp, task);
729 return ret;
732 static void exynos_drm_ipp_cleanup_work(struct work_struct *work)
734 struct exynos_drm_ipp_task *task = container_of(work,
735 struct exynos_drm_ipp_task, cleanup_work);
737 exynos_drm_ipp_task_cleanup(task);
740 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp);
743 * exynos_drm_ipp_task_done - finish given task and set return code
744 * @task: ipp task to finish
745 * @ret: error code or 0 if operation has been performed successfully
747 void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
749 struct exynos_drm_ipp *ipp = task->ipp;
750 unsigned long flags;
752 DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
754 spin_lock_irqsave(&ipp->lock, flags);
755 if (ipp->task == task)
756 ipp->task = NULL;
757 task->flags |= DRM_EXYNOS_IPP_TASK_DONE;
758 task->ret = ret;
759 spin_unlock_irqrestore(&ipp->lock, flags);
761 exynos_drm_ipp_next_task(ipp);
762 wake_up(&ipp->done_wq);
764 if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) {
765 INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work);
766 schedule_work(&task->cleanup_work);
770 static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
772 struct exynos_drm_ipp_task *task;
773 unsigned long flags;
774 int ret;
776 DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
778 spin_lock_irqsave(&ipp->lock, flags);
780 if (ipp->task || list_empty(&ipp->todo_list)) {
781 spin_unlock_irqrestore(&ipp->lock, flags);
782 return;
785 task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task,
786 head);
787 list_del_init(&task->head);
788 ipp->task = task;
790 spin_unlock_irqrestore(&ipp->lock, flags);
792 DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
794 ret = ipp->funcs->commit(ipp, task);
795 if (ret)
796 exynos_drm_ipp_task_done(task, ret);
799 static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp,
800 struct exynos_drm_ipp_task *task)
802 unsigned long flags;
804 spin_lock_irqsave(&ipp->lock, flags);
805 list_add(&task->head, &ipp->todo_list);
806 spin_unlock_irqrestore(&ipp->lock, flags);
808 exynos_drm_ipp_next_task(ipp);
811 static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp,
812 struct exynos_drm_ipp_task *task)
814 unsigned long flags;
816 spin_lock_irqsave(&ipp->lock, flags);
817 if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) {
818 /* already completed task */
819 exynos_drm_ipp_task_cleanup(task);
820 } else if (ipp->task != task) {
821 /* task has not been scheduled for execution yet */
822 list_del_init(&task->head);
823 exynos_drm_ipp_task_cleanup(task);
824 } else {
826 * currently processed task, call abort() and perform
827 * cleanup with async worker
829 task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
830 spin_unlock_irqrestore(&ipp->lock, flags);
831 if (ipp->funcs->abort)
832 ipp->funcs->abort(ipp, task);
833 return;
835 spin_unlock_irqrestore(&ipp->lock, flags);
839 * exynos_drm_ipp_commit_ioctl - perform image processing operation
840 * @dev: DRM device
841 * @data: ioctl data
842 * @file_priv: DRM file info
844 * Construct a ipp task from the set of properties provided from the user
845 * and try to schedule it to framebuffer processor hardware.
847 * Called by the user via ioctl.
849 * Returns:
850 * Zero on success, negative errno on failure.
852 int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
853 struct drm_file *file_priv)
855 struct drm_exynos_ioctl_ipp_commit *arg = data;
856 struct exynos_drm_ipp *ipp;
857 struct exynos_drm_ipp_task *task;
858 int ret = 0;
860 if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved)
861 return -EINVAL;
863 /* can't test and expect an event at the same time */
864 if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) &&
865 (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT))
866 return -EINVAL;
868 ipp = __ipp_get(arg->ipp_id);
869 if (!ipp)
870 return -ENOENT;
872 task = exynos_drm_ipp_task_alloc(ipp);
873 if (!task)
874 return -ENOMEM;
876 ret = exynos_drm_ipp_task_set(task, arg);
877 if (ret)
878 goto free;
880 ret = exynos_drm_ipp_task_check(task);
881 if (ret)
882 goto free;
884 ret = exynos_drm_ipp_task_setup_buffers(task, file_priv);
885 if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY)
886 goto free;
888 if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) {
889 ret = exynos_drm_ipp_event_create(task, file_priv,
890 arg->user_data);
891 if (ret)
892 goto free;
896 * Queue task for processing on the hardware. task object will be
897 * then freed after exynos_drm_ipp_task_done()
899 if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
900 DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
901 ipp->id, task);
903 task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
904 exynos_drm_ipp_schedule_task(task->ipp, task);
905 ret = 0;
906 } else {
907 DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
908 task);
909 exynos_drm_ipp_schedule_task(ipp, task);
910 ret = wait_event_interruptible(ipp->done_wq,
911 task->flags & DRM_EXYNOS_IPP_TASK_DONE);
912 if (ret)
913 exynos_drm_ipp_task_abort(ipp, task);
914 else
915 ret = exynos_drm_ipp_task_cleanup(task);
917 return ret;
918 free:
919 exynos_drm_ipp_task_free(ipp, task);
921 return ret;