PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / exynos / exynos_drm_ipp.c
blob09312b8774709478f43ea8690f802651fa85feb4
1 /*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
20 #include <drm/drmP.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
28 * IPP stands for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
35 * TODO
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
48 /* platform device pointer for ipp device. */
49 static struct platform_device *exynos_drm_ipp_pdev;
52 * A structure of event.
54 * @base: base of event.
55 * @event: ipp event.
57 struct drm_exynos_ipp_send_event {
58 struct drm_pending_event base;
59 struct drm_exynos_ipp_event event;
63 * A structure of memory node.
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
72 struct drm_exynos_ipp_mem_node {
73 struct list_head list;
74 enum drm_exynos_ops_id ops_id;
75 u32 prop_id;
76 u32 buf_id;
77 struct drm_exynos_ipp_buf_info buf_info;
78 struct drm_file *filp;
82 * A structure of ipp context.
84 * @subdrv: prepare initialization using subdrv.
85 * @ipp_lock: lock for synchronization of access to ipp_idr.
86 * @prop_lock: lock for synchronization of access to prop_idr.
87 * @ipp_idr: ipp driver idr.
88 * @prop_idr: property idr.
89 * @event_workq: event work queue.
90 * @cmd_workq: command work queue.
92 struct ipp_context {
93 struct exynos_drm_subdrv subdrv;
94 struct mutex ipp_lock;
95 struct mutex prop_lock;
96 struct idr ipp_idr;
97 struct idr prop_idr;
98 struct workqueue_struct *event_workq;
99 struct workqueue_struct *cmd_workq;
102 static LIST_HEAD(exynos_drm_ippdrv_list);
103 static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
104 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
106 int exynos_platform_device_ipp_register(void)
108 struct platform_device *pdev;
110 if (exynos_drm_ipp_pdev)
111 return -EEXIST;
113 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
114 if (IS_ERR(pdev))
115 return PTR_ERR(pdev);
117 exynos_drm_ipp_pdev = pdev;
119 return 0;
122 void exynos_platform_device_ipp_unregister(void)
124 if (exynos_drm_ipp_pdev) {
125 platform_device_unregister(exynos_drm_ipp_pdev);
126 exynos_drm_ipp_pdev = NULL;
130 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
132 if (!ippdrv)
133 return -EINVAL;
135 mutex_lock(&exynos_drm_ippdrv_lock);
136 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
137 mutex_unlock(&exynos_drm_ippdrv_lock);
139 return 0;
142 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
144 if (!ippdrv)
145 return -EINVAL;
147 mutex_lock(&exynos_drm_ippdrv_lock);
148 list_del(&ippdrv->drv_list);
149 mutex_unlock(&exynos_drm_ippdrv_lock);
151 return 0;
154 static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
155 u32 *idp)
157 int ret;
159 /* do the allocation under our mutexlock */
160 mutex_lock(lock);
161 ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
162 mutex_unlock(lock);
163 if (ret < 0)
164 return ret;
166 *idp = ret;
167 return 0;
170 static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
172 void *obj;
174 DRM_DEBUG_KMS("id[%d]\n", id);
176 mutex_lock(lock);
178 /* find object using handle */
179 obj = idr_find(id_idr, id);
180 if (!obj) {
181 DRM_ERROR("failed to find object.\n");
182 mutex_unlock(lock);
183 return ERR_PTR(-ENODEV);
186 mutex_unlock(lock);
188 return obj;
191 static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
192 enum drm_exynos_ipp_cmd cmd)
195 * check dedicated flag and WB, OUTPUT operation with
196 * power on state.
198 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
199 !pm_runtime_suspended(ippdrv->dev)))
200 return true;
202 return false;
205 static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
206 struct drm_exynos_ipp_property *property)
208 struct exynos_drm_ippdrv *ippdrv;
209 u32 ipp_id = property->ipp_id;
211 DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
213 if (ipp_id) {
214 /* find ipp driver using idr */
215 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
216 ipp_id);
217 if (IS_ERR(ippdrv)) {
218 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
219 return ippdrv;
223 * WB, OUTPUT opertion not supported multi-operation.
224 * so, make dedicated state at set property ioctl.
225 * when ipp driver finished operations, clear dedicated flags.
227 if (ipp_check_dedicated(ippdrv, property->cmd)) {
228 DRM_ERROR("already used choose device.\n");
229 return ERR_PTR(-EBUSY);
233 * This is necessary to find correct device in ipp drivers.
234 * ipp drivers have different abilities,
235 * so need to check property.
237 if (ippdrv->check_property &&
238 ippdrv->check_property(ippdrv->dev, property)) {
239 DRM_ERROR("not support property.\n");
240 return ERR_PTR(-EINVAL);
243 return ippdrv;
244 } else {
246 * This case is search all ipp driver for finding.
247 * user application don't set ipp_id in this case,
248 * so ipp subsystem search correct driver in driver list.
250 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
251 if (ipp_check_dedicated(ippdrv, property->cmd)) {
252 DRM_DEBUG_KMS("used device.\n");
253 continue;
256 if (ippdrv->check_property &&
257 ippdrv->check_property(ippdrv->dev, property)) {
258 DRM_DEBUG_KMS("not support property.\n");
259 continue;
262 return ippdrv;
265 DRM_ERROR("not support ipp driver operations.\n");
268 return ERR_PTR(-ENODEV);
271 static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
273 struct exynos_drm_ippdrv *ippdrv;
274 struct drm_exynos_ipp_cmd_node *c_node;
275 int count = 0;
277 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
279 if (list_empty(&exynos_drm_ippdrv_list)) {
280 DRM_DEBUG_KMS("ippdrv_list is empty.\n");
281 return ERR_PTR(-ENODEV);
285 * This case is search ipp driver by prop_id handle.
286 * sometimes, ipp subsystem find driver by prop_id.
287 * e.g PAUSE state, queue buf, command contro.
289 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
290 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
292 if (!list_empty(&ippdrv->cmd_list)) {
293 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
294 if (c_node->property.prop_id == prop_id)
295 return ippdrv;
299 return ERR_PTR(-ENODEV);
302 int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
303 struct drm_file *file)
305 struct drm_exynos_file_private *file_priv = file->driver_priv;
306 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
307 struct device *dev = priv->dev;
308 struct ipp_context *ctx = get_ipp_context(dev);
309 struct drm_exynos_ipp_prop_list *prop_list = data;
310 struct exynos_drm_ippdrv *ippdrv;
311 int count = 0;
313 if (!ctx) {
314 DRM_ERROR("invalid context.\n");
315 return -EINVAL;
318 if (!prop_list) {
319 DRM_ERROR("invalid property parameter.\n");
320 return -EINVAL;
323 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
325 if (!prop_list->ipp_id) {
326 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
327 count++;
329 * Supports ippdrv list count for user application.
330 * First step user application getting ippdrv count.
331 * and second step getting ippdrv capability using ipp_id.
333 prop_list->count = count;
334 } else {
336 * Getting ippdrv capability by ipp_id.
337 * some device not supported wb, output interface.
338 * so, user application detect correct ipp driver
339 * using this ioctl.
341 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
342 prop_list->ipp_id);
343 if (IS_ERR(ippdrv)) {
344 DRM_ERROR("not found ipp%d driver.\n",
345 prop_list->ipp_id);
346 return PTR_ERR(ippdrv);
349 prop_list = ippdrv->prop_list;
352 return 0;
355 static void ipp_print_property(struct drm_exynos_ipp_property *property,
356 int idx)
358 struct drm_exynos_ipp_config *config = &property->config[idx];
359 struct drm_exynos_pos *pos = &config->pos;
360 struct drm_exynos_sz *sz = &config->sz;
362 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
363 property->prop_id, idx ? "dst" : "src", config->fmt);
365 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
366 pos->x, pos->y, pos->w, pos->h,
367 sz->hsize, sz->vsize, config->flip, config->degree);
370 static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
372 struct exynos_drm_ippdrv *ippdrv;
373 struct drm_exynos_ipp_cmd_node *c_node;
374 u32 prop_id = property->prop_id;
376 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
378 ippdrv = ipp_find_drv_by_handle(prop_id);
379 if (IS_ERR(ippdrv)) {
380 DRM_ERROR("failed to get ipp driver.\n");
381 return -EINVAL;
385 * Find command node using command list in ippdrv.
386 * when we find this command no using prop_id.
387 * return property information set in this command node.
389 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
390 if ((c_node->property.prop_id == prop_id) &&
391 (c_node->state == IPP_STATE_STOP)) {
392 DRM_DEBUG_KMS("found cmd[%d]ippdrv[0x%x]\n",
393 property->cmd, (int)ippdrv);
395 c_node->property = *property;
396 return 0;
400 DRM_ERROR("failed to search property.\n");
402 return -EINVAL;
405 static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
407 struct drm_exynos_ipp_cmd_work *cmd_work;
409 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
410 if (!cmd_work)
411 return ERR_PTR(-ENOMEM);
413 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
415 return cmd_work;
418 static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
420 struct drm_exynos_ipp_event_work *event_work;
422 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
423 if (!event_work)
424 return ERR_PTR(-ENOMEM);
426 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
428 return event_work;
431 int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
432 struct drm_file *file)
434 struct drm_exynos_file_private *file_priv = file->driver_priv;
435 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
436 struct device *dev = priv->dev;
437 struct ipp_context *ctx = get_ipp_context(dev);
438 struct drm_exynos_ipp_property *property = data;
439 struct exynos_drm_ippdrv *ippdrv;
440 struct drm_exynos_ipp_cmd_node *c_node;
441 int ret, i;
443 if (!ctx) {
444 DRM_ERROR("invalid context.\n");
445 return -EINVAL;
448 if (!property) {
449 DRM_ERROR("invalid property parameter.\n");
450 return -EINVAL;
454 * This is log print for user application property.
455 * user application set various property.
457 for_each_ipp_ops(i)
458 ipp_print_property(property, i);
461 * set property ioctl generated new prop_id.
462 * but in this case already asigned prop_id using old set property.
463 * e.g PAUSE state. this case supports find current prop_id and use it
464 * instead of allocation.
466 if (property->prop_id) {
467 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
468 return ipp_find_and_set_property(property);
471 /* find ipp driver using ipp id */
472 ippdrv = ipp_find_driver(ctx, property);
473 if (IS_ERR(ippdrv)) {
474 DRM_ERROR("failed to get ipp driver.\n");
475 return -EINVAL;
478 /* allocate command node */
479 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
480 if (!c_node)
481 return -ENOMEM;
483 /* create property id */
484 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
485 &property->prop_id);
486 if (ret) {
487 DRM_ERROR("failed to create id.\n");
488 goto err_clear;
491 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
492 property->prop_id, property->cmd, (int)ippdrv);
494 /* stored property information and ippdrv in private data */
495 c_node->priv = priv;
496 c_node->property = *property;
497 c_node->state = IPP_STATE_IDLE;
499 c_node->start_work = ipp_create_cmd_work();
500 if (IS_ERR(c_node->start_work)) {
501 DRM_ERROR("failed to create start work.\n");
502 goto err_clear;
505 c_node->stop_work = ipp_create_cmd_work();
506 if (IS_ERR(c_node->stop_work)) {
507 DRM_ERROR("failed to create stop work.\n");
508 goto err_free_start;
511 c_node->event_work = ipp_create_event_work();
512 if (IS_ERR(c_node->event_work)) {
513 DRM_ERROR("failed to create event work.\n");
514 goto err_free_stop;
517 mutex_init(&c_node->cmd_lock);
518 mutex_init(&c_node->mem_lock);
519 mutex_init(&c_node->event_lock);
521 init_completion(&c_node->start_complete);
522 init_completion(&c_node->stop_complete);
524 for_each_ipp_ops(i)
525 INIT_LIST_HEAD(&c_node->mem_list[i]);
527 INIT_LIST_HEAD(&c_node->event_list);
528 list_splice_init(&priv->event_list, &c_node->event_list);
529 list_add_tail(&c_node->list, &ippdrv->cmd_list);
531 /* make dedicated state without m2m */
532 if (!ipp_is_m2m_cmd(property->cmd))
533 ippdrv->dedicated = true;
535 return 0;
537 err_free_stop:
538 kfree(c_node->stop_work);
539 err_free_start:
540 kfree(c_node->start_work);
541 err_clear:
542 kfree(c_node);
543 return ret;
546 static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
548 /* delete list */
549 list_del(&c_node->list);
551 /* destroy mutex */
552 mutex_destroy(&c_node->cmd_lock);
553 mutex_destroy(&c_node->mem_lock);
554 mutex_destroy(&c_node->event_lock);
556 /* free command node */
557 kfree(c_node->start_work);
558 kfree(c_node->stop_work);
559 kfree(c_node->event_work);
560 kfree(c_node);
563 static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
565 struct drm_exynos_ipp_property *property = &c_node->property;
566 struct drm_exynos_ipp_mem_node *m_node;
567 struct list_head *head;
568 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
570 mutex_lock(&c_node->mem_lock);
572 for_each_ipp_ops(i) {
573 /* source/destination memory list */
574 head = &c_node->mem_list[i];
576 if (list_empty(head)) {
577 DRM_DEBUG_KMS("%s memory empty.\n", i ? "dst" : "src");
578 continue;
581 /* find memory node entry */
582 list_for_each_entry(m_node, head, list) {
583 DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
584 i ? "dst" : "src", count[i], (int)m_node);
585 count[i]++;
589 DRM_DEBUG_KMS("min[%d]max[%d]\n",
590 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
591 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
594 * M2M operations should be need paired memory address.
595 * so, need to check minimum count about src, dst.
596 * other case not use paired memory, so use maximum count
598 if (ipp_is_m2m_cmd(property->cmd))
599 ret = min(count[EXYNOS_DRM_OPS_SRC],
600 count[EXYNOS_DRM_OPS_DST]);
601 else
602 ret = max(count[EXYNOS_DRM_OPS_SRC],
603 count[EXYNOS_DRM_OPS_DST]);
605 mutex_unlock(&c_node->mem_lock);
607 return ret;
610 static struct drm_exynos_ipp_mem_node
611 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
612 struct drm_exynos_ipp_queue_buf *qbuf)
614 struct drm_exynos_ipp_mem_node *m_node;
615 struct list_head *head;
616 int count = 0;
618 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
620 /* source/destination memory list */
621 head = &c_node->mem_list[qbuf->ops_id];
623 /* find memory node from memory list */
624 list_for_each_entry(m_node, head, list) {
625 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
627 /* compare buffer id */
628 if (m_node->buf_id == qbuf->buf_id)
629 return m_node;
632 return NULL;
635 static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
636 struct drm_exynos_ipp_cmd_node *c_node,
637 struct drm_exynos_ipp_mem_node *m_node)
639 struct exynos_drm_ipp_ops *ops = NULL;
640 int ret = 0;
642 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
644 if (!m_node) {
645 DRM_ERROR("invalid queue node.\n");
646 return -EFAULT;
649 mutex_lock(&c_node->mem_lock);
651 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
653 /* get operations callback */
654 ops = ippdrv->ops[m_node->ops_id];
655 if (!ops) {
656 DRM_ERROR("not support ops.\n");
657 ret = -EFAULT;
658 goto err_unlock;
661 /* set address and enable irq */
662 if (ops->set_addr) {
663 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
664 m_node->buf_id, IPP_BUF_ENQUEUE);
665 if (ret) {
666 DRM_ERROR("failed to set addr.\n");
667 goto err_unlock;
671 err_unlock:
672 mutex_unlock(&c_node->mem_lock);
673 return ret;
676 static struct drm_exynos_ipp_mem_node
677 *ipp_get_mem_node(struct drm_device *drm_dev,
678 struct drm_file *file,
679 struct drm_exynos_ipp_cmd_node *c_node,
680 struct drm_exynos_ipp_queue_buf *qbuf)
682 struct drm_exynos_ipp_mem_node *m_node;
683 struct drm_exynos_ipp_buf_info buf_info;
684 void *addr;
685 int i;
687 mutex_lock(&c_node->mem_lock);
689 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
690 if (!m_node)
691 goto err_unlock;
693 /* clear base address for error handling */
694 memset(&buf_info, 0x0, sizeof(buf_info));
696 /* operations, buffer id */
697 m_node->ops_id = qbuf->ops_id;
698 m_node->prop_id = qbuf->prop_id;
699 m_node->buf_id = qbuf->buf_id;
701 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
702 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
704 for_each_ipp_planar(i) {
705 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
707 /* get dma address by handle */
708 if (qbuf->handle[i]) {
709 addr = exynos_drm_gem_get_dma_addr(drm_dev,
710 qbuf->handle[i], file);
711 if (IS_ERR(addr)) {
712 DRM_ERROR("failed to get addr.\n");
713 goto err_clear;
716 buf_info.handles[i] = qbuf->handle[i];
717 buf_info.base[i] = *(dma_addr_t *) addr;
718 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
719 i, buf_info.base[i], (int)buf_info.handles[i]);
723 m_node->filp = file;
724 m_node->buf_info = buf_info;
725 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
727 mutex_unlock(&c_node->mem_lock);
728 return m_node;
730 err_clear:
731 kfree(m_node);
732 err_unlock:
733 mutex_unlock(&c_node->mem_lock);
734 return ERR_PTR(-EFAULT);
737 static int ipp_put_mem_node(struct drm_device *drm_dev,
738 struct drm_exynos_ipp_cmd_node *c_node,
739 struct drm_exynos_ipp_mem_node *m_node)
741 int i;
743 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
745 if (!m_node) {
746 DRM_ERROR("invalid dequeue node.\n");
747 return -EFAULT;
750 if (list_empty(&m_node->list)) {
751 DRM_ERROR("empty memory node.\n");
752 return -ENOMEM;
755 mutex_lock(&c_node->mem_lock);
757 DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
759 /* put gem buffer */
760 for_each_ipp_planar(i) {
761 unsigned long handle = m_node->buf_info.handles[i];
762 if (handle)
763 exynos_drm_gem_put_dma_addr(drm_dev, handle,
764 m_node->filp);
767 /* delete list in queue */
768 list_del(&m_node->list);
769 kfree(m_node);
771 mutex_unlock(&c_node->mem_lock);
773 return 0;
776 static void ipp_free_event(struct drm_pending_event *event)
778 kfree(event);
781 static int ipp_get_event(struct drm_device *drm_dev,
782 struct drm_file *file,
783 struct drm_exynos_ipp_cmd_node *c_node,
784 struct drm_exynos_ipp_queue_buf *qbuf)
786 struct drm_exynos_ipp_send_event *e;
787 unsigned long flags;
789 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
791 e = kzalloc(sizeof(*e), GFP_KERNEL);
792 if (!e) {
793 spin_lock_irqsave(&drm_dev->event_lock, flags);
794 file->event_space += sizeof(e->event);
795 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
796 return -ENOMEM;
799 /* make event */
800 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
801 e->event.base.length = sizeof(e->event);
802 e->event.user_data = qbuf->user_data;
803 e->event.prop_id = qbuf->prop_id;
804 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
805 e->base.event = &e->event.base;
806 e->base.file_priv = file;
807 e->base.destroy = ipp_free_event;
808 list_add_tail(&e->base.link, &c_node->event_list);
810 return 0;
813 static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
814 struct drm_exynos_ipp_queue_buf *qbuf)
816 struct drm_exynos_ipp_send_event *e, *te;
817 int count = 0;
819 if (list_empty(&c_node->event_list)) {
820 DRM_DEBUG_KMS("event_list is empty.\n");
821 return;
824 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
825 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
828 * qbuf == NULL condition means all event deletion.
829 * stop operations want to delete all event list.
830 * another case delete only same buf id.
832 if (!qbuf) {
833 /* delete list */
834 list_del(&e->base.link);
835 kfree(e);
838 /* compare buffer id */
839 if (qbuf && (qbuf->buf_id ==
840 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
841 /* delete list */
842 list_del(&e->base.link);
843 kfree(e);
844 return;
849 static void ipp_handle_cmd_work(struct device *dev,
850 struct exynos_drm_ippdrv *ippdrv,
851 struct drm_exynos_ipp_cmd_work *cmd_work,
852 struct drm_exynos_ipp_cmd_node *c_node)
854 struct ipp_context *ctx = get_ipp_context(dev);
856 cmd_work->ippdrv = ippdrv;
857 cmd_work->c_node = c_node;
858 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
861 static int ipp_queue_buf_with_run(struct device *dev,
862 struct drm_exynos_ipp_cmd_node *c_node,
863 struct drm_exynos_ipp_mem_node *m_node,
864 struct drm_exynos_ipp_queue_buf *qbuf)
866 struct exynos_drm_ippdrv *ippdrv;
867 struct drm_exynos_ipp_property *property;
868 struct exynos_drm_ipp_ops *ops;
869 int ret;
871 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
872 if (IS_ERR(ippdrv)) {
873 DRM_ERROR("failed to get ipp driver.\n");
874 return -EFAULT;
877 ops = ippdrv->ops[qbuf->ops_id];
878 if (!ops) {
879 DRM_ERROR("failed to get ops.\n");
880 return -EFAULT;
883 property = &c_node->property;
885 if (c_node->state != IPP_STATE_START) {
886 DRM_DEBUG_KMS("bypass for invalid state.\n");
887 return 0;
890 if (!ipp_check_mem_list(c_node)) {
891 DRM_DEBUG_KMS("empty memory.\n");
892 return 0;
896 * If set destination buffer and enabled clock,
897 * then m2m operations need start operations at queue_buf
899 if (ipp_is_m2m_cmd(property->cmd)) {
900 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
902 cmd_work->ctrl = IPP_CTRL_PLAY;
903 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
904 } else {
905 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
906 if (ret) {
907 DRM_ERROR("failed to set m node.\n");
908 return ret;
912 return 0;
915 static void ipp_clean_queue_buf(struct drm_device *drm_dev,
916 struct drm_exynos_ipp_cmd_node *c_node,
917 struct drm_exynos_ipp_queue_buf *qbuf)
919 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
921 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
922 /* delete list */
923 list_for_each_entry_safe(m_node, tm_node,
924 &c_node->mem_list[qbuf->ops_id], list) {
925 if (m_node->buf_id == qbuf->buf_id &&
926 m_node->ops_id == qbuf->ops_id)
927 ipp_put_mem_node(drm_dev, c_node, m_node);
932 int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
933 struct drm_file *file)
935 struct drm_exynos_file_private *file_priv = file->driver_priv;
936 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
937 struct device *dev = priv->dev;
938 struct ipp_context *ctx = get_ipp_context(dev);
939 struct drm_exynos_ipp_queue_buf *qbuf = data;
940 struct drm_exynos_ipp_cmd_node *c_node;
941 struct drm_exynos_ipp_mem_node *m_node;
942 int ret;
944 if (!qbuf) {
945 DRM_ERROR("invalid buf parameter.\n");
946 return -EINVAL;
949 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
950 DRM_ERROR("invalid ops parameter.\n");
951 return -EINVAL;
954 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
955 qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
956 qbuf->buf_id, qbuf->buf_type);
958 /* find command node */
959 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
960 qbuf->prop_id);
961 if (IS_ERR(c_node)) {
962 DRM_ERROR("failed to get command node.\n");
963 return PTR_ERR(c_node);
966 /* buffer control */
967 switch (qbuf->buf_type) {
968 case IPP_BUF_ENQUEUE:
969 /* get memory node */
970 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
971 if (IS_ERR(m_node)) {
972 DRM_ERROR("failed to get m_node.\n");
973 return PTR_ERR(m_node);
977 * first step get event for destination buffer.
978 * and second step when M2M case run with destination buffer
979 * if needed.
981 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
982 /* get event for destination buffer */
983 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
984 if (ret) {
985 DRM_ERROR("failed to get event.\n");
986 goto err_clean_node;
990 * M2M case run play control for streaming feature.
991 * other case set address and waiting.
993 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
994 if (ret) {
995 DRM_ERROR("failed to run command.\n");
996 goto err_clean_node;
999 break;
1000 case IPP_BUF_DEQUEUE:
1001 mutex_lock(&c_node->cmd_lock);
1003 /* put event for destination buffer */
1004 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1005 ipp_put_event(c_node, qbuf);
1007 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1009 mutex_unlock(&c_node->cmd_lock);
1010 break;
1011 default:
1012 DRM_ERROR("invalid buffer control.\n");
1013 return -EINVAL;
1016 return 0;
1018 err_clean_node:
1019 DRM_ERROR("clean memory nodes.\n");
1021 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1022 return ret;
1025 static bool exynos_drm_ipp_check_valid(struct device *dev,
1026 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1028 if (ctrl != IPP_CTRL_PLAY) {
1029 if (pm_runtime_suspended(dev)) {
1030 DRM_ERROR("pm:runtime_suspended.\n");
1031 goto err_status;
1035 switch (ctrl) {
1036 case IPP_CTRL_PLAY:
1037 if (state != IPP_STATE_IDLE)
1038 goto err_status;
1039 break;
1040 case IPP_CTRL_STOP:
1041 if (state == IPP_STATE_STOP)
1042 goto err_status;
1043 break;
1044 case IPP_CTRL_PAUSE:
1045 if (state != IPP_STATE_START)
1046 goto err_status;
1047 break;
1048 case IPP_CTRL_RESUME:
1049 if (state != IPP_STATE_STOP)
1050 goto err_status;
1051 break;
1052 default:
1053 DRM_ERROR("invalid state.\n");
1054 goto err_status;
1057 return true;
1059 err_status:
1060 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1061 return false;
1064 int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1065 struct drm_file *file)
1067 struct drm_exynos_file_private *file_priv = file->driver_priv;
1068 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1069 struct exynos_drm_ippdrv *ippdrv = NULL;
1070 struct device *dev = priv->dev;
1071 struct ipp_context *ctx = get_ipp_context(dev);
1072 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1073 struct drm_exynos_ipp_cmd_work *cmd_work;
1074 struct drm_exynos_ipp_cmd_node *c_node;
1076 if (!ctx) {
1077 DRM_ERROR("invalid context.\n");
1078 return -EINVAL;
1081 if (!cmd_ctrl) {
1082 DRM_ERROR("invalid control parameter.\n");
1083 return -EINVAL;
1086 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1087 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1089 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1090 if (IS_ERR(ippdrv)) {
1091 DRM_ERROR("failed to get ipp driver.\n");
1092 return PTR_ERR(ippdrv);
1095 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1096 cmd_ctrl->prop_id);
1097 if (IS_ERR(c_node)) {
1098 DRM_ERROR("invalid command node list.\n");
1099 return PTR_ERR(c_node);
1102 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1103 c_node->state)) {
1104 DRM_ERROR("invalid state.\n");
1105 return -EINVAL;
1108 switch (cmd_ctrl->ctrl) {
1109 case IPP_CTRL_PLAY:
1110 if (pm_runtime_suspended(ippdrv->dev))
1111 pm_runtime_get_sync(ippdrv->dev);
1112 c_node->state = IPP_STATE_START;
1114 cmd_work = c_node->start_work;
1115 cmd_work->ctrl = cmd_ctrl->ctrl;
1116 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1117 c_node->state = IPP_STATE_START;
1118 break;
1119 case IPP_CTRL_STOP:
1120 cmd_work = c_node->stop_work;
1121 cmd_work->ctrl = cmd_ctrl->ctrl;
1122 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1124 if (!wait_for_completion_timeout(&c_node->stop_complete,
1125 msecs_to_jiffies(300))) {
1126 DRM_ERROR("timeout stop:prop_id[%d]\n",
1127 c_node->property.prop_id);
1130 c_node->state = IPP_STATE_STOP;
1131 ippdrv->dedicated = false;
1132 ipp_clean_cmd_node(c_node);
1134 if (list_empty(&ippdrv->cmd_list))
1135 pm_runtime_put_sync(ippdrv->dev);
1136 break;
1137 case IPP_CTRL_PAUSE:
1138 cmd_work = c_node->stop_work;
1139 cmd_work->ctrl = cmd_ctrl->ctrl;
1140 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1142 if (!wait_for_completion_timeout(&c_node->stop_complete,
1143 msecs_to_jiffies(200))) {
1144 DRM_ERROR("timeout stop:prop_id[%d]\n",
1145 c_node->property.prop_id);
1148 c_node->state = IPP_STATE_STOP;
1149 break;
1150 case IPP_CTRL_RESUME:
1151 c_node->state = IPP_STATE_START;
1152 cmd_work = c_node->start_work;
1153 cmd_work->ctrl = cmd_ctrl->ctrl;
1154 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1155 break;
1156 default:
1157 DRM_ERROR("could not support this state currently.\n");
1158 return -EINVAL;
1161 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1162 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1164 return 0;
1167 int exynos_drm_ippnb_register(struct notifier_block *nb)
1169 return blocking_notifier_chain_register(
1170 &exynos_drm_ippnb_list, nb);
1173 int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1175 return blocking_notifier_chain_unregister(
1176 &exynos_drm_ippnb_list, nb);
1179 int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1181 return blocking_notifier_call_chain(
1182 &exynos_drm_ippnb_list, val, v);
1185 static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1186 struct drm_exynos_ipp_property *property)
1188 struct exynos_drm_ipp_ops *ops = NULL;
1189 bool swap = false;
1190 int ret, i;
1192 if (!property) {
1193 DRM_ERROR("invalid property parameter.\n");
1194 return -EINVAL;
1197 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1199 /* reset h/w block */
1200 if (ippdrv->reset &&
1201 ippdrv->reset(ippdrv->dev)) {
1202 DRM_ERROR("failed to reset.\n");
1203 return -EINVAL;
1206 /* set source,destination operations */
1207 for_each_ipp_ops(i) {
1208 struct drm_exynos_ipp_config *config =
1209 &property->config[i];
1211 ops = ippdrv->ops[i];
1212 if (!ops || !config) {
1213 DRM_ERROR("not support ops and config.\n");
1214 return -EINVAL;
1217 /* set format */
1218 if (ops->set_fmt) {
1219 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1220 if (ret) {
1221 DRM_ERROR("not support format.\n");
1222 return ret;
1226 /* set transform for rotation, flip */
1227 if (ops->set_transf) {
1228 ret = ops->set_transf(ippdrv->dev, config->degree,
1229 config->flip, &swap);
1230 if (ret) {
1231 DRM_ERROR("not support tranf.\n");
1232 return -EINVAL;
1236 /* set size */
1237 if (ops->set_size) {
1238 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1239 &config->sz);
1240 if (ret) {
1241 DRM_ERROR("not support size.\n");
1242 return ret;
1247 return 0;
1250 static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1251 struct drm_exynos_ipp_cmd_node *c_node)
1253 struct drm_exynos_ipp_mem_node *m_node;
1254 struct drm_exynos_ipp_property *property = &c_node->property;
1255 struct list_head *head;
1256 int ret, i;
1258 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1260 /* store command info in ippdrv */
1261 ippdrv->c_node = c_node;
1263 if (!ipp_check_mem_list(c_node)) {
1264 DRM_DEBUG_KMS("empty memory.\n");
1265 return -ENOMEM;
1268 /* set current property in ippdrv */
1269 ret = ipp_set_property(ippdrv, property);
1270 if (ret) {
1271 DRM_ERROR("failed to set property.\n");
1272 ippdrv->c_node = NULL;
1273 return ret;
1276 /* check command */
1277 switch (property->cmd) {
1278 case IPP_CMD_M2M:
1279 for_each_ipp_ops(i) {
1280 /* source/destination memory list */
1281 head = &c_node->mem_list[i];
1283 m_node = list_first_entry(head,
1284 struct drm_exynos_ipp_mem_node, list);
1285 if (!m_node) {
1286 DRM_ERROR("failed to get node.\n");
1287 ret = -EFAULT;
1288 return ret;
1291 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1293 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1294 if (ret) {
1295 DRM_ERROR("failed to set m node.\n");
1296 return ret;
1299 break;
1300 case IPP_CMD_WB:
1301 /* destination memory list */
1302 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1304 list_for_each_entry(m_node, head, list) {
1305 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1306 if (ret) {
1307 DRM_ERROR("failed to set m node.\n");
1308 return ret;
1311 break;
1312 case IPP_CMD_OUTPUT:
1313 /* source memory list */
1314 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1316 list_for_each_entry(m_node, head, list) {
1317 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1318 if (ret) {
1319 DRM_ERROR("failed to set m node.\n");
1320 return ret;
1323 break;
1324 default:
1325 DRM_ERROR("invalid operations.\n");
1326 return -EINVAL;
1329 DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1331 /* start operations */
1332 if (ippdrv->start) {
1333 ret = ippdrv->start(ippdrv->dev, property->cmd);
1334 if (ret) {
1335 DRM_ERROR("failed to start ops.\n");
1336 return ret;
1340 return 0;
1343 static int ipp_stop_property(struct drm_device *drm_dev,
1344 struct exynos_drm_ippdrv *ippdrv,
1345 struct drm_exynos_ipp_cmd_node *c_node)
1347 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1348 struct drm_exynos_ipp_property *property = &c_node->property;
1349 struct list_head *head;
1350 int ret = 0, i;
1352 DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1354 /* put event */
1355 ipp_put_event(c_node, NULL);
1357 /* check command */
1358 switch (property->cmd) {
1359 case IPP_CMD_M2M:
1360 for_each_ipp_ops(i) {
1361 /* source/destination memory list */
1362 head = &c_node->mem_list[i];
1364 if (list_empty(head)) {
1365 DRM_DEBUG_KMS("mem_list is empty.\n");
1366 break;
1369 list_for_each_entry_safe(m_node, tm_node,
1370 head, list) {
1371 ret = ipp_put_mem_node(drm_dev, c_node,
1372 m_node);
1373 if (ret) {
1374 DRM_ERROR("failed to put m_node.\n");
1375 goto err_clear;
1379 break;
1380 case IPP_CMD_WB:
1381 /* destination memory list */
1382 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1384 if (list_empty(head)) {
1385 DRM_DEBUG_KMS("mem_list is empty.\n");
1386 break;
1389 list_for_each_entry_safe(m_node, tm_node, head, list) {
1390 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1391 if (ret) {
1392 DRM_ERROR("failed to put m_node.\n");
1393 goto err_clear;
1396 break;
1397 case IPP_CMD_OUTPUT:
1398 /* source memory list */
1399 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1401 if (list_empty(head)) {
1402 DRM_DEBUG_KMS("mem_list is empty.\n");
1403 break;
1406 list_for_each_entry_safe(m_node, tm_node, head, list) {
1407 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1408 if (ret) {
1409 DRM_ERROR("failed to put m_node.\n");
1410 goto err_clear;
1413 break;
1414 default:
1415 DRM_ERROR("invalid operations.\n");
1416 ret = -EINVAL;
1417 goto err_clear;
1420 err_clear:
1421 /* stop operations */
1422 if (ippdrv->stop)
1423 ippdrv->stop(ippdrv->dev, property->cmd);
1425 return ret;
1428 void ipp_sched_cmd(struct work_struct *work)
1430 struct drm_exynos_ipp_cmd_work *cmd_work =
1431 (struct drm_exynos_ipp_cmd_work *)work;
1432 struct exynos_drm_ippdrv *ippdrv;
1433 struct drm_exynos_ipp_cmd_node *c_node;
1434 struct drm_exynos_ipp_property *property;
1435 int ret;
1437 ippdrv = cmd_work->ippdrv;
1438 if (!ippdrv) {
1439 DRM_ERROR("invalid ippdrv list.\n");
1440 return;
1443 c_node = cmd_work->c_node;
1444 if (!c_node) {
1445 DRM_ERROR("invalid command node list.\n");
1446 return;
1449 mutex_lock(&c_node->cmd_lock);
1451 property = &c_node->property;
1453 switch (cmd_work->ctrl) {
1454 case IPP_CTRL_PLAY:
1455 case IPP_CTRL_RESUME:
1456 ret = ipp_start_property(ippdrv, c_node);
1457 if (ret) {
1458 DRM_ERROR("failed to start property:prop_id[%d]\n",
1459 c_node->property.prop_id);
1460 goto err_unlock;
1464 * M2M case supports wait_completion of transfer.
1465 * because M2M case supports single unit operation
1466 * with multiple queue.
1467 * M2M need to wait completion of data transfer.
1469 if (ipp_is_m2m_cmd(property->cmd)) {
1470 if (!wait_for_completion_timeout
1471 (&c_node->start_complete, msecs_to_jiffies(200))) {
1472 DRM_ERROR("timeout event:prop_id[%d]\n",
1473 c_node->property.prop_id);
1474 goto err_unlock;
1477 break;
1478 case IPP_CTRL_STOP:
1479 case IPP_CTRL_PAUSE:
1480 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1481 c_node);
1482 if (ret) {
1483 DRM_ERROR("failed to stop property.\n");
1484 goto err_unlock;
1487 complete(&c_node->stop_complete);
1488 break;
1489 default:
1490 DRM_ERROR("unknown control type\n");
1491 break;
1494 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1496 err_unlock:
1497 mutex_unlock(&c_node->cmd_lock);
1500 static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1501 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1503 struct drm_device *drm_dev = ippdrv->drm_dev;
1504 struct drm_exynos_ipp_property *property = &c_node->property;
1505 struct drm_exynos_ipp_mem_node *m_node;
1506 struct drm_exynos_ipp_queue_buf qbuf;
1507 struct drm_exynos_ipp_send_event *e;
1508 struct list_head *head;
1509 struct timeval now;
1510 unsigned long flags;
1511 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1512 int ret, i;
1514 for_each_ipp_ops(i)
1515 DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1517 if (!drm_dev) {
1518 DRM_ERROR("failed to get drm_dev.\n");
1519 return -EINVAL;
1522 if (!property) {
1523 DRM_ERROR("failed to get property.\n");
1524 return -EINVAL;
1527 if (list_empty(&c_node->event_list)) {
1528 DRM_DEBUG_KMS("event list is empty.\n");
1529 return 0;
1532 if (!ipp_check_mem_list(c_node)) {
1533 DRM_DEBUG_KMS("empty memory.\n");
1534 return 0;
1537 /* check command */
1538 switch (property->cmd) {
1539 case IPP_CMD_M2M:
1540 for_each_ipp_ops(i) {
1541 /* source/destination memory list */
1542 head = &c_node->mem_list[i];
1544 m_node = list_first_entry(head,
1545 struct drm_exynos_ipp_mem_node, list);
1546 if (!m_node) {
1547 DRM_ERROR("empty memory node.\n");
1548 return -ENOMEM;
1551 tbuf_id[i] = m_node->buf_id;
1552 DRM_DEBUG_KMS("%s buf_id[%d]\n",
1553 i ? "dst" : "src", tbuf_id[i]);
1555 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1556 if (ret)
1557 DRM_ERROR("failed to put m_node.\n");
1559 break;
1560 case IPP_CMD_WB:
1561 /* clear buf for finding */
1562 memset(&qbuf, 0x0, sizeof(qbuf));
1563 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1564 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1566 /* get memory node entry */
1567 m_node = ipp_find_mem_node(c_node, &qbuf);
1568 if (!m_node) {
1569 DRM_ERROR("empty memory node.\n");
1570 return -ENOMEM;
1573 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1575 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1576 if (ret)
1577 DRM_ERROR("failed to put m_node.\n");
1578 break;
1579 case IPP_CMD_OUTPUT:
1580 /* source memory list */
1581 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1583 m_node = list_first_entry(head,
1584 struct drm_exynos_ipp_mem_node, list);
1585 if (!m_node) {
1586 DRM_ERROR("empty memory node.\n");
1587 return -ENOMEM;
1590 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1592 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1593 if (ret)
1594 DRM_ERROR("failed to put m_node.\n");
1595 break;
1596 default:
1597 DRM_ERROR("invalid operations.\n");
1598 return -EINVAL;
1601 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1602 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1603 tbuf_id[1], buf_id[1], property->prop_id);
1606 * command node have event list of destination buffer
1607 * If destination buffer enqueue to mem list,
1608 * then we make event and link to event list tail.
1609 * so, we get first event for first enqueued buffer.
1611 e = list_first_entry(&c_node->event_list,
1612 struct drm_exynos_ipp_send_event, base.link);
1614 if (!e) {
1615 DRM_ERROR("empty event.\n");
1616 return -EINVAL;
1619 do_gettimeofday(&now);
1620 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1621 e->event.tv_sec = now.tv_sec;
1622 e->event.tv_usec = now.tv_usec;
1623 e->event.prop_id = property->prop_id;
1625 /* set buffer id about source destination */
1626 for_each_ipp_ops(i)
1627 e->event.buf_id[i] = tbuf_id[i];
1629 spin_lock_irqsave(&drm_dev->event_lock, flags);
1630 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1631 wake_up_interruptible(&e->base.file_priv->event_wait);
1632 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1634 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1635 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1637 return 0;
1640 void ipp_sched_event(struct work_struct *work)
1642 struct drm_exynos_ipp_event_work *event_work =
1643 (struct drm_exynos_ipp_event_work *)work;
1644 struct exynos_drm_ippdrv *ippdrv;
1645 struct drm_exynos_ipp_cmd_node *c_node;
1646 int ret;
1648 if (!event_work) {
1649 DRM_ERROR("failed to get event_work.\n");
1650 return;
1653 DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1655 ippdrv = event_work->ippdrv;
1656 if (!ippdrv) {
1657 DRM_ERROR("failed to get ipp driver.\n");
1658 return;
1661 c_node = ippdrv->c_node;
1662 if (!c_node) {
1663 DRM_ERROR("failed to get command node.\n");
1664 return;
1668 * IPP supports command thread, event thread synchronization.
1669 * If IPP close immediately from user land, then IPP make
1670 * synchronization with command thread, so make complete event.
1671 * or going out operations.
1673 if (c_node->state != IPP_STATE_START) {
1674 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1675 c_node->state, c_node->property.prop_id);
1676 goto err_completion;
1679 mutex_lock(&c_node->event_lock);
1681 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1682 if (ret) {
1683 DRM_ERROR("failed to send event.\n");
1684 goto err_completion;
1687 err_completion:
1688 if (ipp_is_m2m_cmd(c_node->property.cmd))
1689 complete(&c_node->start_complete);
1691 mutex_unlock(&c_node->event_lock);
1694 static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1696 struct ipp_context *ctx = get_ipp_context(dev);
1697 struct exynos_drm_ippdrv *ippdrv;
1698 int ret, count = 0;
1700 /* get ipp driver entry */
1701 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1702 ippdrv->drm_dev = drm_dev;
1704 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1705 &ippdrv->ipp_id);
1706 if (ret) {
1707 DRM_ERROR("failed to create id.\n");
1708 goto err_idr;
1711 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1712 count++, (int)ippdrv, ippdrv->ipp_id);
1714 if (ippdrv->ipp_id == 0) {
1715 DRM_ERROR("failed to get ipp_id[%d]\n",
1716 ippdrv->ipp_id);
1717 goto err_idr;
1720 /* store parent device for node */
1721 ippdrv->parent_dev = dev;
1723 /* store event work queue and handler */
1724 ippdrv->event_workq = ctx->event_workq;
1725 ippdrv->sched_event = ipp_sched_event;
1726 INIT_LIST_HEAD(&ippdrv->cmd_list);
1728 if (is_drm_iommu_supported(drm_dev)) {
1729 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1730 if (ret) {
1731 DRM_ERROR("failed to activate iommu\n");
1732 goto err_iommu;
1737 return 0;
1739 err_iommu:
1740 /* get ipp driver entry */
1741 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1742 if (is_drm_iommu_supported(drm_dev))
1743 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1745 err_idr:
1746 idr_destroy(&ctx->ipp_idr);
1747 idr_destroy(&ctx->prop_idr);
1748 return ret;
1751 static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1753 struct exynos_drm_ippdrv *ippdrv;
1755 /* get ipp driver entry */
1756 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1757 if (is_drm_iommu_supported(drm_dev))
1758 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1760 ippdrv->drm_dev = NULL;
1761 exynos_drm_ippdrv_unregister(ippdrv);
1765 static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1766 struct drm_file *file)
1768 struct drm_exynos_file_private *file_priv = file->driver_priv;
1769 struct exynos_drm_ipp_private *priv;
1771 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1772 if (!priv)
1773 return -ENOMEM;
1774 priv->dev = dev;
1775 file_priv->ipp_priv = priv;
1777 INIT_LIST_HEAD(&priv->event_list);
1779 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
1781 return 0;
1784 static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1785 struct drm_file *file)
1787 struct drm_exynos_file_private *file_priv = file->driver_priv;
1788 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1789 struct exynos_drm_ippdrv *ippdrv = NULL;
1790 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1791 int count = 0;
1793 DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
1795 if (list_empty(&exynos_drm_ippdrv_list)) {
1796 DRM_DEBUG_KMS("ippdrv_list is empty.\n");
1797 goto err_clear;
1800 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1801 if (list_empty(&ippdrv->cmd_list))
1802 continue;
1804 list_for_each_entry_safe(c_node, tc_node,
1805 &ippdrv->cmd_list, list) {
1806 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1807 count++, (int)ippdrv);
1809 if (c_node->priv == priv) {
1811 * userland goto unnormal state. process killed.
1812 * and close the file.
1813 * so, IPP didn't called stop cmd ctrl.
1814 * so, we are make stop operation in this state.
1816 if (c_node->state == IPP_STATE_START) {
1817 ipp_stop_property(drm_dev, ippdrv,
1818 c_node);
1819 c_node->state = IPP_STATE_STOP;
1822 ippdrv->dedicated = false;
1823 ipp_clean_cmd_node(c_node);
1824 if (list_empty(&ippdrv->cmd_list))
1825 pm_runtime_put_sync(ippdrv->dev);
1830 err_clear:
1831 kfree(priv);
1832 return;
1835 static int ipp_probe(struct platform_device *pdev)
1837 struct device *dev = &pdev->dev;
1838 struct ipp_context *ctx;
1839 struct exynos_drm_subdrv *subdrv;
1840 int ret;
1842 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1843 if (!ctx)
1844 return -ENOMEM;
1846 mutex_init(&ctx->ipp_lock);
1847 mutex_init(&ctx->prop_lock);
1849 idr_init(&ctx->ipp_idr);
1850 idr_init(&ctx->prop_idr);
1853 * create single thread for ipp event
1854 * IPP supports event thread for IPP drivers.
1855 * IPP driver send event_work to this thread.
1856 * and IPP event thread send event to user process.
1858 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1859 if (!ctx->event_workq) {
1860 dev_err(dev, "failed to create event workqueue\n");
1861 return -EINVAL;
1865 * create single thread for ipp command
1866 * IPP supports command thread for user process.
1867 * user process make command node using set property ioctl.
1868 * and make start_work and send this work to command thread.
1869 * and then this command thread start property.
1871 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1872 if (!ctx->cmd_workq) {
1873 dev_err(dev, "failed to create cmd workqueue\n");
1874 ret = -EINVAL;
1875 goto err_event_workq;
1878 /* set sub driver informations */
1879 subdrv = &ctx->subdrv;
1880 subdrv->dev = dev;
1881 subdrv->probe = ipp_subdrv_probe;
1882 subdrv->remove = ipp_subdrv_remove;
1883 subdrv->open = ipp_subdrv_open;
1884 subdrv->close = ipp_subdrv_close;
1886 platform_set_drvdata(pdev, ctx);
1888 ret = exynos_drm_subdrv_register(subdrv);
1889 if (ret < 0) {
1890 DRM_ERROR("failed to register drm ipp device.\n");
1891 goto err_cmd_workq;
1894 dev_info(dev, "drm ipp registered successfully.\n");
1896 return 0;
1898 err_cmd_workq:
1899 destroy_workqueue(ctx->cmd_workq);
1900 err_event_workq:
1901 destroy_workqueue(ctx->event_workq);
1902 return ret;
1905 static int ipp_remove(struct platform_device *pdev)
1907 struct ipp_context *ctx = platform_get_drvdata(pdev);
1909 /* unregister sub driver */
1910 exynos_drm_subdrv_unregister(&ctx->subdrv);
1912 /* remove,destroy ipp idr */
1913 idr_destroy(&ctx->ipp_idr);
1914 idr_destroy(&ctx->prop_idr);
1916 mutex_destroy(&ctx->ipp_lock);
1917 mutex_destroy(&ctx->prop_lock);
1919 /* destroy command, event work queue */
1920 destroy_workqueue(ctx->cmd_workq);
1921 destroy_workqueue(ctx->event_workq);
1923 return 0;
1926 static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1928 DRM_DEBUG_KMS("enable[%d]\n", enable);
1930 return 0;
1933 #ifdef CONFIG_PM_SLEEP
1934 static int ipp_suspend(struct device *dev)
1936 struct ipp_context *ctx = get_ipp_context(dev);
1938 if (pm_runtime_suspended(dev))
1939 return 0;
1941 return ipp_power_ctrl(ctx, false);
1944 static int ipp_resume(struct device *dev)
1946 struct ipp_context *ctx = get_ipp_context(dev);
1948 if (!pm_runtime_suspended(dev))
1949 return ipp_power_ctrl(ctx, true);
1951 return 0;
1953 #endif
1955 #ifdef CONFIG_PM_RUNTIME
1956 static int ipp_runtime_suspend(struct device *dev)
1958 struct ipp_context *ctx = get_ipp_context(dev);
1960 return ipp_power_ctrl(ctx, false);
1963 static int ipp_runtime_resume(struct device *dev)
1965 struct ipp_context *ctx = get_ipp_context(dev);
1967 return ipp_power_ctrl(ctx, true);
1969 #endif
1971 static const struct dev_pm_ops ipp_pm_ops = {
1972 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
1973 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
1976 struct platform_driver ipp_driver = {
1977 .probe = ipp_probe,
1978 .remove = ipp_remove,
1979 .driver = {
1980 .name = "exynos-drm-ipp",
1981 .owner = THIS_MODULE,
1982 .pm = &ipp_pm_ops,