2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/platform_device.h>
16 #include <linux/types.h>
17 #include <linux/clk.h>
18 #include <linux/pm_runtime.h>
21 #include <drm/exynos_drm.h>
22 #include "exynos_drm_drv.h"
23 #include "exynos_drm_gem.h"
24 #include "exynos_drm_ipp.h"
25 #include "exynos_drm_iommu.h"
28 * IPP stands for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
36 * 1. expand command control id.
37 * 2. integrate property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
45 #define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46 #define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49 * A structure of event.
51 * @base: base of event.
54 struct drm_exynos_ipp_send_event
{
55 struct drm_pending_event base
;
56 struct drm_exynos_ipp_event event
;
60 * A structure of memory node.
62 * @list: list head to memory queue information.
63 * @ops_id: id of operations.
64 * @prop_id: id of property.
65 * @buf_id: id of buffer.
66 * @buf_info: gem objects and dma address, size.
67 * @filp: a pointer to drm_file.
69 struct drm_exynos_ipp_mem_node
{
70 struct list_head list
;
71 enum drm_exynos_ops_id ops_id
;
74 struct drm_exynos_ipp_buf_info buf_info
;
78 * A structure of ipp context.
80 * @subdrv: prepare initialization using subdrv.
81 * @ipp_lock: lock for synchronization of access to ipp_idr.
82 * @prop_lock: lock for synchronization of access to prop_idr.
83 * @ipp_idr: ipp driver idr.
84 * @prop_idr: property idr.
85 * @event_workq: event work queue.
86 * @cmd_workq: command work queue.
89 struct exynos_drm_subdrv subdrv
;
90 struct mutex ipp_lock
;
91 struct mutex prop_lock
;
94 struct workqueue_struct
*event_workq
;
95 struct workqueue_struct
*cmd_workq
;
98 static LIST_HEAD(exynos_drm_ippdrv_list
);
99 static DEFINE_MUTEX(exynos_drm_ippdrv_lock
);
100 static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list
);
102 int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv
*ippdrv
)
104 mutex_lock(&exynos_drm_ippdrv_lock
);
105 list_add_tail(&ippdrv
->drv_list
, &exynos_drm_ippdrv_list
);
106 mutex_unlock(&exynos_drm_ippdrv_lock
);
111 int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv
*ippdrv
)
113 mutex_lock(&exynos_drm_ippdrv_lock
);
114 list_del(&ippdrv
->drv_list
);
115 mutex_unlock(&exynos_drm_ippdrv_lock
);
120 static int ipp_create_id(struct idr
*id_idr
, struct mutex
*lock
, void *obj
)
125 ret
= idr_alloc(id_idr
, obj
, 1, 0, GFP_KERNEL
);
131 static void ipp_remove_id(struct idr
*id_idr
, struct mutex
*lock
, u32 id
)
134 idr_remove(id_idr
, id
);
138 static void *ipp_find_obj(struct idr
*id_idr
, struct mutex
*lock
, u32 id
)
143 obj
= idr_find(id_idr
, id
);
149 static int ipp_check_driver(struct exynos_drm_ippdrv
*ippdrv
,
150 struct drm_exynos_ipp_property
*property
)
152 if (ippdrv
->dedicated
|| (!ipp_is_m2m_cmd(property
->cmd
) &&
153 !pm_runtime_suspended(ippdrv
->dev
)))
156 if (ippdrv
->check_property
&&
157 ippdrv
->check_property(ippdrv
->dev
, property
))
163 static struct exynos_drm_ippdrv
*ipp_find_driver(struct ipp_context
*ctx
,
164 struct drm_exynos_ipp_property
*property
)
166 struct exynos_drm_ippdrv
*ippdrv
;
167 u32 ipp_id
= property
->ipp_id
;
171 ippdrv
= ipp_find_obj(&ctx
->ipp_idr
, &ctx
->ipp_lock
, ipp_id
);
173 DRM_DEBUG("ipp%d driver not found\n", ipp_id
);
174 return ERR_PTR(-ENODEV
);
177 ret
= ipp_check_driver(ippdrv
, property
);
179 DRM_DEBUG("ipp%d driver check error %d\n", ipp_id
, ret
);
185 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
186 ret
= ipp_check_driver(ippdrv
, property
);
191 DRM_DEBUG("cannot find driver suitable for given property.\n");
194 return ERR_PTR(-ENODEV
);
197 static struct exynos_drm_ippdrv
*ipp_find_drv_by_handle(u32 prop_id
)
199 struct exynos_drm_ippdrv
*ippdrv
;
200 struct drm_exynos_ipp_cmd_node
*c_node
;
203 DRM_DEBUG_KMS("prop_id[%d]\n", prop_id
);
206 * This case is search ipp driver by prop_id handle.
207 * sometimes, ipp subsystem find driver by prop_id.
208 * e.g PAUSE state, queue buf, command control.
210 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
211 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count
++, (int)ippdrv
);
213 mutex_lock(&ippdrv
->cmd_lock
);
214 list_for_each_entry(c_node
, &ippdrv
->cmd_list
, list
) {
215 if (c_node
->property
.prop_id
== prop_id
) {
216 mutex_unlock(&ippdrv
->cmd_lock
);
220 mutex_unlock(&ippdrv
->cmd_lock
);
223 return ERR_PTR(-ENODEV
);
226 int exynos_drm_ipp_get_property(struct drm_device
*drm_dev
, void *data
,
227 struct drm_file
*file
)
229 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
230 struct device
*dev
= file_priv
->ipp_dev
;
231 struct ipp_context
*ctx
= get_ipp_context(dev
);
232 struct drm_exynos_ipp_prop_list
*prop_list
= data
;
233 struct exynos_drm_ippdrv
*ippdrv
;
237 DRM_ERROR("invalid context.\n");
242 DRM_ERROR("invalid property parameter.\n");
246 DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list
->ipp_id
);
248 if (!prop_list
->ipp_id
) {
249 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
)
253 * Supports ippdrv list count for user application.
254 * First step user application getting ippdrv count.
255 * and second step getting ippdrv capability using ipp_id.
257 prop_list
->count
= count
;
260 * Getting ippdrv capability by ipp_id.
261 * some device not supported wb, output interface.
262 * so, user application detect correct ipp driver
265 ippdrv
= ipp_find_obj(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
268 DRM_ERROR("not found ipp%d driver.\n",
273 *prop_list
= ippdrv
->prop_list
;
279 static void ipp_print_property(struct drm_exynos_ipp_property
*property
,
282 struct drm_exynos_ipp_config
*config
= &property
->config
[idx
];
283 struct drm_exynos_pos
*pos
= &config
->pos
;
284 struct drm_exynos_sz
*sz
= &config
->sz
;
286 DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
287 property
->prop_id
, idx
? "dst" : "src", config
->fmt
);
289 DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
290 pos
->x
, pos
->y
, pos
->w
, pos
->h
,
291 sz
->hsize
, sz
->vsize
, config
->flip
, config
->degree
);
294 static struct drm_exynos_ipp_cmd_work
*ipp_create_cmd_work(void)
296 struct drm_exynos_ipp_cmd_work
*cmd_work
;
298 cmd_work
= kzalloc(sizeof(*cmd_work
), GFP_KERNEL
);
300 return ERR_PTR(-ENOMEM
);
302 INIT_WORK((struct work_struct
*)cmd_work
, ipp_sched_cmd
);
307 static struct drm_exynos_ipp_event_work
*ipp_create_event_work(void)
309 struct drm_exynos_ipp_event_work
*event_work
;
311 event_work
= kzalloc(sizeof(*event_work
), GFP_KERNEL
);
313 return ERR_PTR(-ENOMEM
);
315 INIT_WORK(&event_work
->work
, ipp_sched_event
);
320 int exynos_drm_ipp_set_property(struct drm_device
*drm_dev
, void *data
,
321 struct drm_file
*file
)
323 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
324 struct device
*dev
= file_priv
->ipp_dev
;
325 struct ipp_context
*ctx
= get_ipp_context(dev
);
326 struct drm_exynos_ipp_property
*property
= data
;
327 struct exynos_drm_ippdrv
*ippdrv
;
328 struct drm_exynos_ipp_cmd_node
*c_node
;
333 DRM_ERROR("invalid context.\n");
338 DRM_ERROR("invalid property parameter.\n");
342 prop_id
= property
->prop_id
;
345 * This is log print for user application property.
346 * user application set various property.
349 ipp_print_property(property
, i
);
352 * In case prop_id is not zero try to set existing property.
355 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
, prop_id
);
357 if (!c_node
|| c_node
->filp
!= file
) {
358 DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id
);
362 if (c_node
->state
!= IPP_STATE_STOP
) {
363 DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id
);
367 c_node
->property
= *property
;
372 /* find ipp driver using ipp id */
373 ippdrv
= ipp_find_driver(ctx
, property
);
374 if (IS_ERR(ippdrv
)) {
375 DRM_ERROR("failed to get ipp driver.\n");
379 /* allocate command node */
380 c_node
= kzalloc(sizeof(*c_node
), GFP_KERNEL
);
384 ret
= ipp_create_id(&ctx
->prop_idr
, &ctx
->prop_lock
, c_node
);
386 DRM_ERROR("failed to create id.\n");
389 property
->prop_id
= ret
;
391 DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
392 property
->prop_id
, property
->cmd
, (int)ippdrv
);
394 /* stored property information and ippdrv in private data */
395 c_node
->property
= *property
;
396 c_node
->state
= IPP_STATE_IDLE
;
399 c_node
->start_work
= ipp_create_cmd_work();
400 if (IS_ERR(c_node
->start_work
)) {
401 DRM_ERROR("failed to create start work.\n");
402 ret
= PTR_ERR(c_node
->start_work
);
406 c_node
->stop_work
= ipp_create_cmd_work();
407 if (IS_ERR(c_node
->stop_work
)) {
408 DRM_ERROR("failed to create stop work.\n");
409 ret
= PTR_ERR(c_node
->stop_work
);
413 c_node
->event_work
= ipp_create_event_work();
414 if (IS_ERR(c_node
->event_work
)) {
415 DRM_ERROR("failed to create event work.\n");
416 ret
= PTR_ERR(c_node
->event_work
);
420 mutex_init(&c_node
->lock
);
421 mutex_init(&c_node
->mem_lock
);
422 mutex_init(&c_node
->event_lock
);
424 init_completion(&c_node
->start_complete
);
425 init_completion(&c_node
->stop_complete
);
428 INIT_LIST_HEAD(&c_node
->mem_list
[i
]);
430 INIT_LIST_HEAD(&c_node
->event_list
);
431 mutex_lock(&ippdrv
->cmd_lock
);
432 list_add_tail(&c_node
->list
, &ippdrv
->cmd_list
);
433 mutex_unlock(&ippdrv
->cmd_lock
);
435 /* make dedicated state without m2m */
436 if (!ipp_is_m2m_cmd(property
->cmd
))
437 ippdrv
->dedicated
= true;
442 kfree(c_node
->stop_work
);
444 kfree(c_node
->start_work
);
446 ipp_remove_id(&ctx
->prop_idr
, &ctx
->prop_lock
, property
->prop_id
);
452 static int ipp_validate_mem_node(struct drm_device
*drm_dev
,
453 struct drm_exynos_ipp_mem_node
*m_node
,
454 struct drm_exynos_ipp_cmd_node
*c_node
)
456 struct drm_exynos_ipp_config
*ipp_cfg
;
457 unsigned int num_plane
;
458 unsigned long size
, buf_size
= 0, plane_size
, img_size
= 0;
459 unsigned int bpp
, width
, height
;
462 ipp_cfg
= &c_node
->property
.config
[m_node
->ops_id
];
463 num_plane
= drm_format_num_planes(ipp_cfg
->fmt
);
466 * This is a rather simplified validation of a memory node.
467 * It basically verifies provided gem object handles
468 * and the buffer sizes with respect to current configuration.
469 * This is not the best that can be done
470 * but it seems more than enough
472 for (i
= 0; i
< num_plane
; ++i
) {
473 width
= ipp_cfg
->sz
.hsize
;
474 height
= ipp_cfg
->sz
.vsize
;
475 bpp
= drm_format_plane_cpp(ipp_cfg
->fmt
, i
);
478 * The result of drm_format_plane_cpp() for chroma planes must
479 * be used with drm_format_xxxx_chroma_subsampling() for
483 width
/= drm_format_horz_chroma_subsampling(
485 height
/= drm_format_vert_chroma_subsampling(
488 plane_size
= width
* height
* bpp
;
489 img_size
+= plane_size
;
491 if (m_node
->buf_info
.handles
[i
]) {
492 size
= exynos_drm_gem_get_size(drm_dev
,
493 m_node
->buf_info
.handles
[i
],
495 if (plane_size
> size
) {
497 "buffer %d is smaller than required\n",
506 if (buf_size
< img_size
) {
507 DRM_ERROR("size of buffers(%lu) is smaller than image(%lu)\n",
515 static int ipp_put_mem_node(struct drm_device
*drm_dev
,
516 struct drm_exynos_ipp_cmd_node
*c_node
,
517 struct drm_exynos_ipp_mem_node
*m_node
)
521 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node
);
524 DRM_ERROR("invalid dequeue node.\n");
528 DRM_DEBUG_KMS("ops_id[%d]\n", m_node
->ops_id
);
531 for_each_ipp_planar(i
) {
532 unsigned long handle
= m_node
->buf_info
.handles
[i
];
534 exynos_drm_gem_put_dma_addr(drm_dev
, handle
,
538 list_del(&m_node
->list
);
544 static struct drm_exynos_ipp_mem_node
545 *ipp_get_mem_node(struct drm_device
*drm_dev
,
546 struct drm_exynos_ipp_cmd_node
*c_node
,
547 struct drm_exynos_ipp_queue_buf
*qbuf
)
549 struct drm_exynos_ipp_mem_node
*m_node
;
550 struct drm_exynos_ipp_buf_info
*buf_info
;
553 m_node
= kzalloc(sizeof(*m_node
), GFP_KERNEL
);
555 return ERR_PTR(-ENOMEM
);
557 buf_info
= &m_node
->buf_info
;
559 /* operations, buffer id */
560 m_node
->ops_id
= qbuf
->ops_id
;
561 m_node
->prop_id
= qbuf
->prop_id
;
562 m_node
->buf_id
= qbuf
->buf_id
;
563 INIT_LIST_HEAD(&m_node
->list
);
565 DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node
, qbuf
->ops_id
);
566 DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf
->prop_id
, m_node
->buf_id
);
568 for_each_ipp_planar(i
) {
569 DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i
, qbuf
->handle
[i
]);
571 /* get dma address by handle */
572 if (qbuf
->handle
[i
]) {
575 addr
= exynos_drm_gem_get_dma_addr(drm_dev
,
576 qbuf
->handle
[i
], c_node
->filp
);
578 DRM_ERROR("failed to get addr.\n");
579 ipp_put_mem_node(drm_dev
, c_node
, m_node
);
580 return ERR_PTR(-EFAULT
);
583 buf_info
->handles
[i
] = qbuf
->handle
[i
];
584 buf_info
->base
[i
] = *addr
;
585 DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i
,
586 buf_info
->base
[i
], buf_info
->handles
[i
]);
590 mutex_lock(&c_node
->mem_lock
);
591 if (ipp_validate_mem_node(drm_dev
, m_node
, c_node
)) {
592 ipp_put_mem_node(drm_dev
, c_node
, m_node
);
593 mutex_unlock(&c_node
->mem_lock
);
594 return ERR_PTR(-EFAULT
);
596 list_add_tail(&m_node
->list
, &c_node
->mem_list
[qbuf
->ops_id
]);
597 mutex_unlock(&c_node
->mem_lock
);
602 static void ipp_clean_mem_nodes(struct drm_device
*drm_dev
,
603 struct drm_exynos_ipp_cmd_node
*c_node
, int ops
)
605 struct drm_exynos_ipp_mem_node
*m_node
, *tm_node
;
606 struct list_head
*head
= &c_node
->mem_list
[ops
];
608 mutex_lock(&c_node
->mem_lock
);
610 list_for_each_entry_safe(m_node
, tm_node
, head
, list
) {
613 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
615 DRM_ERROR("failed to put m_node.\n");
618 mutex_unlock(&c_node
->mem_lock
);
621 static void ipp_free_event(struct drm_pending_event
*event
)
626 static int ipp_get_event(struct drm_device
*drm_dev
,
627 struct drm_exynos_ipp_cmd_node
*c_node
,
628 struct drm_exynos_ipp_queue_buf
*qbuf
)
630 struct drm_exynos_ipp_send_event
*e
;
633 DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf
->ops_id
, qbuf
->buf_id
);
635 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
637 spin_lock_irqsave(&drm_dev
->event_lock
, flags
);
638 c_node
->filp
->event_space
+= sizeof(e
->event
);
639 spin_unlock_irqrestore(&drm_dev
->event_lock
, flags
);
644 e
->event
.base
.type
= DRM_EXYNOS_IPP_EVENT
;
645 e
->event
.base
.length
= sizeof(e
->event
);
646 e
->event
.user_data
= qbuf
->user_data
;
647 e
->event
.prop_id
= qbuf
->prop_id
;
648 e
->event
.buf_id
[EXYNOS_DRM_OPS_DST
] = qbuf
->buf_id
;
649 e
->base
.event
= &e
->event
.base
;
650 e
->base
.file_priv
= c_node
->filp
;
651 e
->base
.destroy
= ipp_free_event
;
652 mutex_lock(&c_node
->event_lock
);
653 list_add_tail(&e
->base
.link
, &c_node
->event_list
);
654 mutex_unlock(&c_node
->event_lock
);
659 static void ipp_put_event(struct drm_exynos_ipp_cmd_node
*c_node
,
660 struct drm_exynos_ipp_queue_buf
*qbuf
)
662 struct drm_exynos_ipp_send_event
*e
, *te
;
665 mutex_lock(&c_node
->event_lock
);
666 list_for_each_entry_safe(e
, te
, &c_node
->event_list
, base
.link
) {
667 DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count
++, (int)e
);
670 * qbuf == NULL condition means all event deletion.
671 * stop operations want to delete all event list.
672 * another case delete only same buf id.
676 list_del(&e
->base
.link
);
680 /* compare buffer id */
681 if (qbuf
&& (qbuf
->buf_id
==
682 e
->event
.buf_id
[EXYNOS_DRM_OPS_DST
])) {
684 list_del(&e
->base
.link
);
691 mutex_unlock(&c_node
->event_lock
);
695 static void ipp_clean_cmd_node(struct ipp_context
*ctx
,
696 struct drm_exynos_ipp_cmd_node
*c_node
)
701 cancel_work_sync(&c_node
->start_work
->work
);
702 cancel_work_sync(&c_node
->stop_work
->work
);
703 cancel_work_sync(&c_node
->event_work
->work
);
706 ipp_put_event(c_node
, NULL
);
709 ipp_clean_mem_nodes(ctx
->subdrv
.drm_dev
, c_node
, i
);
712 list_del(&c_node
->list
);
714 ipp_remove_id(&ctx
->prop_idr
, &ctx
->prop_lock
,
715 c_node
->property
.prop_id
);
718 mutex_destroy(&c_node
->lock
);
719 mutex_destroy(&c_node
->mem_lock
);
720 mutex_destroy(&c_node
->event_lock
);
722 /* free command node */
723 kfree(c_node
->start_work
);
724 kfree(c_node
->stop_work
);
725 kfree(c_node
->event_work
);
729 static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node
*c_node
)
731 switch (c_node
->property
.cmd
) {
733 return !list_empty(&c_node
->mem_list
[EXYNOS_DRM_OPS_DST
]);
735 return !list_empty(&c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
]);
738 return !list_empty(&c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
]) &&
739 !list_empty(&c_node
->mem_list
[EXYNOS_DRM_OPS_DST
]);
743 static struct drm_exynos_ipp_mem_node
744 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node
*c_node
,
745 struct drm_exynos_ipp_queue_buf
*qbuf
)
747 struct drm_exynos_ipp_mem_node
*m_node
;
748 struct list_head
*head
;
751 DRM_DEBUG_KMS("buf_id[%d]\n", qbuf
->buf_id
);
753 /* source/destination memory list */
754 head
= &c_node
->mem_list
[qbuf
->ops_id
];
756 /* find memory node from memory list */
757 list_for_each_entry(m_node
, head
, list
) {
758 DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count
++, (int)m_node
);
760 /* compare buffer id */
761 if (m_node
->buf_id
== qbuf
->buf_id
)
768 static int ipp_set_mem_node(struct exynos_drm_ippdrv
*ippdrv
,
769 struct drm_exynos_ipp_cmd_node
*c_node
,
770 struct drm_exynos_ipp_mem_node
*m_node
)
772 struct exynos_drm_ipp_ops
*ops
= NULL
;
775 DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node
);
778 DRM_ERROR("invalid queue node.\n");
782 DRM_DEBUG_KMS("ops_id[%d]\n", m_node
->ops_id
);
784 /* get operations callback */
785 ops
= ippdrv
->ops
[m_node
->ops_id
];
787 DRM_ERROR("not support ops.\n");
791 /* set address and enable irq */
793 ret
= ops
->set_addr(ippdrv
->dev
, &m_node
->buf_info
,
794 m_node
->buf_id
, IPP_BUF_ENQUEUE
);
796 DRM_ERROR("failed to set addr.\n");
804 static void ipp_handle_cmd_work(struct device
*dev
,
805 struct exynos_drm_ippdrv
*ippdrv
,
806 struct drm_exynos_ipp_cmd_work
*cmd_work
,
807 struct drm_exynos_ipp_cmd_node
*c_node
)
809 struct ipp_context
*ctx
= get_ipp_context(dev
);
811 cmd_work
->ippdrv
= ippdrv
;
812 cmd_work
->c_node
= c_node
;
813 queue_work(ctx
->cmd_workq
, &cmd_work
->work
);
816 static int ipp_queue_buf_with_run(struct device
*dev
,
817 struct drm_exynos_ipp_cmd_node
*c_node
,
818 struct drm_exynos_ipp_mem_node
*m_node
,
819 struct drm_exynos_ipp_queue_buf
*qbuf
)
821 struct exynos_drm_ippdrv
*ippdrv
;
822 struct drm_exynos_ipp_property
*property
;
823 struct exynos_drm_ipp_ops
*ops
;
826 ippdrv
= ipp_find_drv_by_handle(qbuf
->prop_id
);
827 if (IS_ERR(ippdrv
)) {
828 DRM_ERROR("failed to get ipp driver.\n");
832 ops
= ippdrv
->ops
[qbuf
->ops_id
];
834 DRM_ERROR("failed to get ops.\n");
838 property
= &c_node
->property
;
840 if (c_node
->state
!= IPP_STATE_START
) {
841 DRM_DEBUG_KMS("bypass for invalid state.\n");
845 mutex_lock(&c_node
->mem_lock
);
846 if (!ipp_check_mem_list(c_node
)) {
847 mutex_unlock(&c_node
->mem_lock
);
848 DRM_DEBUG_KMS("empty memory.\n");
853 * If set destination buffer and enabled clock,
854 * then m2m operations need start operations at queue_buf
856 if (ipp_is_m2m_cmd(property
->cmd
)) {
857 struct drm_exynos_ipp_cmd_work
*cmd_work
= c_node
->start_work
;
859 cmd_work
->ctrl
= IPP_CTRL_PLAY
;
860 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
862 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
864 mutex_unlock(&c_node
->mem_lock
);
865 DRM_ERROR("failed to set m node.\n");
869 mutex_unlock(&c_node
->mem_lock
);
874 static void ipp_clean_queue_buf(struct drm_device
*drm_dev
,
875 struct drm_exynos_ipp_cmd_node
*c_node
,
876 struct drm_exynos_ipp_queue_buf
*qbuf
)
878 struct drm_exynos_ipp_mem_node
*m_node
, *tm_node
;
881 mutex_lock(&c_node
->mem_lock
);
882 list_for_each_entry_safe(m_node
, tm_node
,
883 &c_node
->mem_list
[qbuf
->ops_id
], list
) {
884 if (m_node
->buf_id
== qbuf
->buf_id
&&
885 m_node
->ops_id
== qbuf
->ops_id
)
886 ipp_put_mem_node(drm_dev
, c_node
, m_node
);
888 mutex_unlock(&c_node
->mem_lock
);
891 int exynos_drm_ipp_queue_buf(struct drm_device
*drm_dev
, void *data
,
892 struct drm_file
*file
)
894 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
895 struct device
*dev
= file_priv
->ipp_dev
;
896 struct ipp_context
*ctx
= get_ipp_context(dev
);
897 struct drm_exynos_ipp_queue_buf
*qbuf
= data
;
898 struct drm_exynos_ipp_cmd_node
*c_node
;
899 struct drm_exynos_ipp_mem_node
*m_node
;
903 DRM_ERROR("invalid buf parameter.\n");
907 if (qbuf
->ops_id
>= EXYNOS_DRM_OPS_MAX
) {
908 DRM_ERROR("invalid ops parameter.\n");
912 DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
913 qbuf
->prop_id
, qbuf
->ops_id
? "dst" : "src",
914 qbuf
->buf_id
, qbuf
->buf_type
);
916 /* find command node */
917 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
,
919 if (!c_node
|| c_node
->filp
!= file
) {
920 DRM_ERROR("failed to get command node.\n");
925 switch (qbuf
->buf_type
) {
926 case IPP_BUF_ENQUEUE
:
927 /* get memory node */
928 m_node
= ipp_get_mem_node(drm_dev
, c_node
, qbuf
);
929 if (IS_ERR(m_node
)) {
930 DRM_ERROR("failed to get m_node.\n");
931 return PTR_ERR(m_node
);
935 * first step get event for destination buffer.
936 * and second step when M2M case run with destination buffer
939 if (qbuf
->ops_id
== EXYNOS_DRM_OPS_DST
) {
940 /* get event for destination buffer */
941 ret
= ipp_get_event(drm_dev
, c_node
, qbuf
);
943 DRM_ERROR("failed to get event.\n");
948 * M2M case run play control for streaming feature.
949 * other case set address and waiting.
951 ret
= ipp_queue_buf_with_run(dev
, c_node
, m_node
, qbuf
);
953 DRM_ERROR("failed to run command.\n");
958 case IPP_BUF_DEQUEUE
:
959 mutex_lock(&c_node
->lock
);
961 /* put event for destination buffer */
962 if (qbuf
->ops_id
== EXYNOS_DRM_OPS_DST
)
963 ipp_put_event(c_node
, qbuf
);
965 ipp_clean_queue_buf(drm_dev
, c_node
, qbuf
);
967 mutex_unlock(&c_node
->lock
);
970 DRM_ERROR("invalid buffer control.\n");
977 DRM_ERROR("clean memory nodes.\n");
979 ipp_clean_queue_buf(drm_dev
, c_node
, qbuf
);
983 static bool exynos_drm_ipp_check_valid(struct device
*dev
,
984 enum drm_exynos_ipp_ctrl ctrl
, enum drm_exynos_ipp_state state
)
986 if (ctrl
!= IPP_CTRL_PLAY
) {
987 if (pm_runtime_suspended(dev
)) {
988 DRM_ERROR("pm:runtime_suspended.\n");
995 if (state
!= IPP_STATE_IDLE
)
999 if (state
== IPP_STATE_STOP
)
1002 case IPP_CTRL_PAUSE
:
1003 if (state
!= IPP_STATE_START
)
1006 case IPP_CTRL_RESUME
:
1007 if (state
!= IPP_STATE_STOP
)
1011 DRM_ERROR("invalid state.\n");
1018 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl
, state
);
1022 int exynos_drm_ipp_cmd_ctrl(struct drm_device
*drm_dev
, void *data
,
1023 struct drm_file
*file
)
1025 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1026 struct exynos_drm_ippdrv
*ippdrv
= NULL
;
1027 struct device
*dev
= file_priv
->ipp_dev
;
1028 struct ipp_context
*ctx
= get_ipp_context(dev
);
1029 struct drm_exynos_ipp_cmd_ctrl
*cmd_ctrl
= data
;
1030 struct drm_exynos_ipp_cmd_work
*cmd_work
;
1031 struct drm_exynos_ipp_cmd_node
*c_node
;
1034 DRM_ERROR("invalid context.\n");
1039 DRM_ERROR("invalid control parameter.\n");
1043 DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1044 cmd_ctrl
->ctrl
, cmd_ctrl
->prop_id
);
1046 ippdrv
= ipp_find_drv_by_handle(cmd_ctrl
->prop_id
);
1047 if (IS_ERR(ippdrv
)) {
1048 DRM_ERROR("failed to get ipp driver.\n");
1049 return PTR_ERR(ippdrv
);
1052 c_node
= ipp_find_obj(&ctx
->prop_idr
, &ctx
->prop_lock
,
1054 if (!c_node
|| c_node
->filp
!= file
) {
1055 DRM_ERROR("invalid command node list.\n");
1059 if (!exynos_drm_ipp_check_valid(ippdrv
->dev
, cmd_ctrl
->ctrl
,
1061 DRM_ERROR("invalid state.\n");
1065 switch (cmd_ctrl
->ctrl
) {
1067 if (pm_runtime_suspended(ippdrv
->dev
))
1068 pm_runtime_get_sync(ippdrv
->dev
);
1070 c_node
->state
= IPP_STATE_START
;
1072 cmd_work
= c_node
->start_work
;
1073 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1074 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1077 cmd_work
= c_node
->stop_work
;
1078 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1079 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1081 if (!wait_for_completion_timeout(&c_node
->stop_complete
,
1082 msecs_to_jiffies(300))) {
1083 DRM_ERROR("timeout stop:prop_id[%d]\n",
1084 c_node
->property
.prop_id
);
1087 c_node
->state
= IPP_STATE_STOP
;
1088 ippdrv
->dedicated
= false;
1089 mutex_lock(&ippdrv
->cmd_lock
);
1090 ipp_clean_cmd_node(ctx
, c_node
);
1092 if (list_empty(&ippdrv
->cmd_list
))
1093 pm_runtime_put_sync(ippdrv
->dev
);
1094 mutex_unlock(&ippdrv
->cmd_lock
);
1096 case IPP_CTRL_PAUSE
:
1097 cmd_work
= c_node
->stop_work
;
1098 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1099 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1101 if (!wait_for_completion_timeout(&c_node
->stop_complete
,
1102 msecs_to_jiffies(200))) {
1103 DRM_ERROR("timeout stop:prop_id[%d]\n",
1104 c_node
->property
.prop_id
);
1107 c_node
->state
= IPP_STATE_STOP
;
1109 case IPP_CTRL_RESUME
:
1110 c_node
->state
= IPP_STATE_START
;
1111 cmd_work
= c_node
->start_work
;
1112 cmd_work
->ctrl
= cmd_ctrl
->ctrl
;
1113 ipp_handle_cmd_work(dev
, ippdrv
, cmd_work
, c_node
);
1116 DRM_ERROR("could not support this state currently.\n");
1120 DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1121 cmd_ctrl
->ctrl
, cmd_ctrl
->prop_id
);
1126 int exynos_drm_ippnb_register(struct notifier_block
*nb
)
1128 return blocking_notifier_chain_register(
1129 &exynos_drm_ippnb_list
, nb
);
1132 int exynos_drm_ippnb_unregister(struct notifier_block
*nb
)
1134 return blocking_notifier_chain_unregister(
1135 &exynos_drm_ippnb_list
, nb
);
1138 int exynos_drm_ippnb_send_event(unsigned long val
, void *v
)
1140 return blocking_notifier_call_chain(
1141 &exynos_drm_ippnb_list
, val
, v
);
1144 static int ipp_set_property(struct exynos_drm_ippdrv
*ippdrv
,
1145 struct drm_exynos_ipp_property
*property
)
1147 struct exynos_drm_ipp_ops
*ops
= NULL
;
1152 DRM_ERROR("invalid property parameter.\n");
1156 DRM_DEBUG_KMS("prop_id[%d]\n", property
->prop_id
);
1158 /* reset h/w block */
1159 if (ippdrv
->reset
&&
1160 ippdrv
->reset(ippdrv
->dev
)) {
1164 /* set source,destination operations */
1165 for_each_ipp_ops(i
) {
1166 struct drm_exynos_ipp_config
*config
=
1167 &property
->config
[i
];
1169 ops
= ippdrv
->ops
[i
];
1170 if (!ops
|| !config
) {
1171 DRM_ERROR("not support ops and config.\n");
1177 ret
= ops
->set_fmt(ippdrv
->dev
, config
->fmt
);
1182 /* set transform for rotation, flip */
1183 if (ops
->set_transf
) {
1184 ret
= ops
->set_transf(ippdrv
->dev
, config
->degree
,
1185 config
->flip
, &swap
);
1191 if (ops
->set_size
) {
1192 ret
= ops
->set_size(ippdrv
->dev
, swap
, &config
->pos
,
1202 static int ipp_start_property(struct exynos_drm_ippdrv
*ippdrv
,
1203 struct drm_exynos_ipp_cmd_node
*c_node
)
1205 struct drm_exynos_ipp_mem_node
*m_node
;
1206 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1207 struct list_head
*head
;
1210 DRM_DEBUG_KMS("prop_id[%d]\n", property
->prop_id
);
1212 /* store command info in ippdrv */
1213 ippdrv
->c_node
= c_node
;
1215 mutex_lock(&c_node
->mem_lock
);
1216 if (!ipp_check_mem_list(c_node
)) {
1217 DRM_DEBUG_KMS("empty memory.\n");
1222 /* set current property in ippdrv */
1223 ret
= ipp_set_property(ippdrv
, property
);
1225 DRM_ERROR("failed to set property.\n");
1226 ippdrv
->c_node
= NULL
;
1231 switch (property
->cmd
) {
1233 for_each_ipp_ops(i
) {
1234 /* source/destination memory list */
1235 head
= &c_node
->mem_list
[i
];
1237 m_node
= list_first_entry(head
,
1238 struct drm_exynos_ipp_mem_node
, list
);
1240 DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node
);
1242 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1244 DRM_ERROR("failed to set m node.\n");
1250 /* destination memory list */
1251 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_DST
];
1253 list_for_each_entry(m_node
, head
, list
) {
1254 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1256 DRM_ERROR("failed to set m node.\n");
1261 case IPP_CMD_OUTPUT
:
1262 /* source memory list */
1263 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1265 list_for_each_entry(m_node
, head
, list
) {
1266 ret
= ipp_set_mem_node(ippdrv
, c_node
, m_node
);
1268 DRM_ERROR("failed to set m node.\n");
1274 DRM_ERROR("invalid operations.\n");
1278 mutex_unlock(&c_node
->mem_lock
);
1280 DRM_DEBUG_KMS("cmd[%d]\n", property
->cmd
);
1282 /* start operations */
1283 if (ippdrv
->start
) {
1284 ret
= ippdrv
->start(ippdrv
->dev
, property
->cmd
);
1286 DRM_ERROR("failed to start ops.\n");
1287 ippdrv
->c_node
= NULL
;
1295 mutex_unlock(&c_node
->mem_lock
);
1296 ippdrv
->c_node
= NULL
;
1300 static int ipp_stop_property(struct drm_device
*drm_dev
,
1301 struct exynos_drm_ippdrv
*ippdrv
,
1302 struct drm_exynos_ipp_cmd_node
*c_node
)
1304 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1307 DRM_DEBUG_KMS("prop_id[%d]\n", property
->prop_id
);
1309 /* stop operations */
1311 ippdrv
->stop(ippdrv
->dev
, property
->cmd
);
1314 switch (property
->cmd
) {
1317 ipp_clean_mem_nodes(drm_dev
, c_node
, i
);
1320 ipp_clean_mem_nodes(drm_dev
, c_node
, EXYNOS_DRM_OPS_DST
);
1322 case IPP_CMD_OUTPUT
:
1323 ipp_clean_mem_nodes(drm_dev
, c_node
, EXYNOS_DRM_OPS_SRC
);
1326 DRM_ERROR("invalid operations.\n");
1333 void ipp_sched_cmd(struct work_struct
*work
)
1335 struct drm_exynos_ipp_cmd_work
*cmd_work
=
1336 container_of(work
, struct drm_exynos_ipp_cmd_work
, work
);
1337 struct exynos_drm_ippdrv
*ippdrv
;
1338 struct drm_exynos_ipp_cmd_node
*c_node
;
1339 struct drm_exynos_ipp_property
*property
;
1342 ippdrv
= cmd_work
->ippdrv
;
1344 DRM_ERROR("invalid ippdrv list.\n");
1348 c_node
= cmd_work
->c_node
;
1350 DRM_ERROR("invalid command node list.\n");
1354 mutex_lock(&c_node
->lock
);
1356 property
= &c_node
->property
;
1358 switch (cmd_work
->ctrl
) {
1360 case IPP_CTRL_RESUME
:
1361 ret
= ipp_start_property(ippdrv
, c_node
);
1363 DRM_ERROR("failed to start property:prop_id[%d]\n",
1364 c_node
->property
.prop_id
);
1369 * M2M case supports wait_completion of transfer.
1370 * because M2M case supports single unit operation
1371 * with multiple queue.
1372 * M2M need to wait completion of data transfer.
1374 if (ipp_is_m2m_cmd(property
->cmd
)) {
1375 if (!wait_for_completion_timeout
1376 (&c_node
->start_complete
, msecs_to_jiffies(200))) {
1377 DRM_ERROR("timeout event:prop_id[%d]\n",
1378 c_node
->property
.prop_id
);
1384 case IPP_CTRL_PAUSE
:
1385 ret
= ipp_stop_property(ippdrv
->drm_dev
, ippdrv
,
1388 DRM_ERROR("failed to stop property.\n");
1392 complete(&c_node
->stop_complete
);
1395 DRM_ERROR("unknown control type\n");
1399 DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work
->ctrl
);
1402 mutex_unlock(&c_node
->lock
);
1405 static int ipp_send_event(struct exynos_drm_ippdrv
*ippdrv
,
1406 struct drm_exynos_ipp_cmd_node
*c_node
, int *buf_id
)
1408 struct drm_device
*drm_dev
= ippdrv
->drm_dev
;
1409 struct drm_exynos_ipp_property
*property
= &c_node
->property
;
1410 struct drm_exynos_ipp_mem_node
*m_node
;
1411 struct drm_exynos_ipp_queue_buf qbuf
;
1412 struct drm_exynos_ipp_send_event
*e
;
1413 struct list_head
*head
;
1415 unsigned long flags
;
1416 u32 tbuf_id
[EXYNOS_DRM_OPS_MAX
] = {0, };
1420 DRM_DEBUG_KMS("%s buf_id[%d]\n", i
? "dst" : "src", buf_id
[i
]);
1423 DRM_ERROR("failed to get drm_dev.\n");
1428 DRM_ERROR("failed to get property.\n");
1432 mutex_lock(&c_node
->event_lock
);
1433 if (list_empty(&c_node
->event_list
)) {
1434 DRM_DEBUG_KMS("event list is empty.\n");
1436 goto err_event_unlock
;
1439 mutex_lock(&c_node
->mem_lock
);
1440 if (!ipp_check_mem_list(c_node
)) {
1441 DRM_DEBUG_KMS("empty memory.\n");
1443 goto err_mem_unlock
;
1447 switch (property
->cmd
) {
1449 for_each_ipp_ops(i
) {
1450 /* source/destination memory list */
1451 head
= &c_node
->mem_list
[i
];
1453 m_node
= list_first_entry(head
,
1454 struct drm_exynos_ipp_mem_node
, list
);
1456 tbuf_id
[i
] = m_node
->buf_id
;
1457 DRM_DEBUG_KMS("%s buf_id[%d]\n",
1458 i
? "dst" : "src", tbuf_id
[i
]);
1460 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1462 DRM_ERROR("failed to put m_node.\n");
1466 /* clear buf for finding */
1467 memset(&qbuf
, 0x0, sizeof(qbuf
));
1468 qbuf
.ops_id
= EXYNOS_DRM_OPS_DST
;
1469 qbuf
.buf_id
= buf_id
[EXYNOS_DRM_OPS_DST
];
1471 /* get memory node entry */
1472 m_node
= ipp_find_mem_node(c_node
, &qbuf
);
1474 DRM_ERROR("empty memory node.\n");
1476 goto err_mem_unlock
;
1479 tbuf_id
[EXYNOS_DRM_OPS_DST
] = m_node
->buf_id
;
1481 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1483 DRM_ERROR("failed to put m_node.\n");
1485 case IPP_CMD_OUTPUT
:
1486 /* source memory list */
1487 head
= &c_node
->mem_list
[EXYNOS_DRM_OPS_SRC
];
1489 m_node
= list_first_entry(head
,
1490 struct drm_exynos_ipp_mem_node
, list
);
1492 tbuf_id
[EXYNOS_DRM_OPS_SRC
] = m_node
->buf_id
;
1494 ret
= ipp_put_mem_node(drm_dev
, c_node
, m_node
);
1496 DRM_ERROR("failed to put m_node.\n");
1499 DRM_ERROR("invalid operations.\n");
1501 goto err_mem_unlock
;
1503 mutex_unlock(&c_node
->mem_lock
);
1505 if (tbuf_id
[EXYNOS_DRM_OPS_DST
] != buf_id
[EXYNOS_DRM_OPS_DST
])
1506 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1507 tbuf_id
[1], buf_id
[1], property
->prop_id
);
1510 * command node have event list of destination buffer
1511 * If destination buffer enqueue to mem list,
1512 * then we make event and link to event list tail.
1513 * so, we get first event for first enqueued buffer.
1515 e
= list_first_entry(&c_node
->event_list
,
1516 struct drm_exynos_ipp_send_event
, base
.link
);
1518 do_gettimeofday(&now
);
1519 DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now
.tv_sec
, now
.tv_usec
);
1520 e
->event
.tv_sec
= now
.tv_sec
;
1521 e
->event
.tv_usec
= now
.tv_usec
;
1522 e
->event
.prop_id
= property
->prop_id
;
1524 /* set buffer id about source destination */
1526 e
->event
.buf_id
[i
] = tbuf_id
[i
];
1528 spin_lock_irqsave(&drm_dev
->event_lock
, flags
);
1529 list_move_tail(&e
->base
.link
, &e
->base
.file_priv
->event_list
);
1530 wake_up_interruptible(&e
->base
.file_priv
->event_wait
);
1531 spin_unlock_irqrestore(&drm_dev
->event_lock
, flags
);
1532 mutex_unlock(&c_node
->event_lock
);
1534 DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1535 property
->cmd
, property
->prop_id
, tbuf_id
[EXYNOS_DRM_OPS_DST
]);
1540 mutex_unlock(&c_node
->mem_lock
);
1542 mutex_unlock(&c_node
->event_lock
);
1546 void ipp_sched_event(struct work_struct
*work
)
1548 struct drm_exynos_ipp_event_work
*event_work
=
1549 container_of(work
, struct drm_exynos_ipp_event_work
, work
);
1550 struct exynos_drm_ippdrv
*ippdrv
;
1551 struct drm_exynos_ipp_cmd_node
*c_node
;
1555 DRM_ERROR("failed to get event_work.\n");
1559 DRM_DEBUG_KMS("buf_id[%d]\n", event_work
->buf_id
[EXYNOS_DRM_OPS_DST
]);
1561 ippdrv
= event_work
->ippdrv
;
1563 DRM_ERROR("failed to get ipp driver.\n");
1567 c_node
= ippdrv
->c_node
;
1569 DRM_ERROR("failed to get command node.\n");
1574 * IPP supports command thread, event thread synchronization.
1575 * If IPP close immediately from user land, then IPP make
1576 * synchronization with command thread, so make complete event.
1577 * or going out operations.
1579 if (c_node
->state
!= IPP_STATE_START
) {
1580 DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1581 c_node
->state
, c_node
->property
.prop_id
);
1582 goto err_completion
;
1585 ret
= ipp_send_event(ippdrv
, c_node
, event_work
->buf_id
);
1587 DRM_ERROR("failed to send event.\n");
1588 goto err_completion
;
1592 if (ipp_is_m2m_cmd(c_node
->property
.cmd
))
1593 complete(&c_node
->start_complete
);
1596 static int ipp_subdrv_probe(struct drm_device
*drm_dev
, struct device
*dev
)
1598 struct ipp_context
*ctx
= get_ipp_context(dev
);
1599 struct exynos_drm_ippdrv
*ippdrv
;
1602 /* get ipp driver entry */
1603 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1604 ippdrv
->drm_dev
= drm_dev
;
1606 ret
= ipp_create_id(&ctx
->ipp_idr
, &ctx
->ipp_lock
, ippdrv
);
1608 DRM_ERROR("failed to create id.\n");
1611 ippdrv
->prop_list
.ipp_id
= ret
;
1613 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1614 count
++, (int)ippdrv
, ret
);
1616 /* store parent device for node */
1617 ippdrv
->parent_dev
= dev
;
1619 /* store event work queue and handler */
1620 ippdrv
->event_workq
= ctx
->event_workq
;
1621 ippdrv
->sched_event
= ipp_sched_event
;
1622 INIT_LIST_HEAD(&ippdrv
->cmd_list
);
1623 mutex_init(&ippdrv
->cmd_lock
);
1625 if (is_drm_iommu_supported(drm_dev
)) {
1626 ret
= drm_iommu_attach_device(drm_dev
, ippdrv
->dev
);
1628 DRM_ERROR("failed to activate iommu\n");
1637 /* get ipp driver entry */
1638 list_for_each_entry_continue_reverse(ippdrv
, &exynos_drm_ippdrv_list
,
1640 if (is_drm_iommu_supported(drm_dev
))
1641 drm_iommu_detach_device(drm_dev
, ippdrv
->dev
);
1643 ipp_remove_id(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
1644 ippdrv
->prop_list
.ipp_id
);
1650 static void ipp_subdrv_remove(struct drm_device
*drm_dev
, struct device
*dev
)
1652 struct exynos_drm_ippdrv
*ippdrv
, *t
;
1653 struct ipp_context
*ctx
= get_ipp_context(dev
);
1655 /* get ipp driver entry */
1656 list_for_each_entry_safe(ippdrv
, t
, &exynos_drm_ippdrv_list
, drv_list
) {
1657 if (is_drm_iommu_supported(drm_dev
))
1658 drm_iommu_detach_device(drm_dev
, ippdrv
->dev
);
1660 ipp_remove_id(&ctx
->ipp_idr
, &ctx
->ipp_lock
,
1661 ippdrv
->prop_list
.ipp_id
);
1663 ippdrv
->drm_dev
= NULL
;
1664 exynos_drm_ippdrv_unregister(ippdrv
);
1668 static int ipp_subdrv_open(struct drm_device
*drm_dev
, struct device
*dev
,
1669 struct drm_file
*file
)
1671 struct drm_exynos_file_private
*file_priv
= file
->driver_priv
;
1673 file_priv
->ipp_dev
= dev
;
1675 DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev
);
1680 static void ipp_subdrv_close(struct drm_device
*drm_dev
, struct device
*dev
,
1681 struct drm_file
*file
)
1683 struct exynos_drm_ippdrv
*ippdrv
= NULL
;
1684 struct ipp_context
*ctx
= get_ipp_context(dev
);
1685 struct drm_exynos_ipp_cmd_node
*c_node
, *tc_node
;
1688 list_for_each_entry(ippdrv
, &exynos_drm_ippdrv_list
, drv_list
) {
1689 mutex_lock(&ippdrv
->cmd_lock
);
1690 list_for_each_entry_safe(c_node
, tc_node
,
1691 &ippdrv
->cmd_list
, list
) {
1692 DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1693 count
++, (int)ippdrv
);
1695 if (c_node
->filp
== file
) {
1697 * userland goto unnormal state. process killed.
1698 * and close the file.
1699 * so, IPP didn't called stop cmd ctrl.
1700 * so, we are make stop operation in this state.
1702 if (c_node
->state
== IPP_STATE_START
) {
1703 ipp_stop_property(drm_dev
, ippdrv
,
1705 c_node
->state
= IPP_STATE_STOP
;
1708 ippdrv
->dedicated
= false;
1709 ipp_clean_cmd_node(ctx
, c_node
);
1710 if (list_empty(&ippdrv
->cmd_list
))
1711 pm_runtime_put_sync(ippdrv
->dev
);
1714 mutex_unlock(&ippdrv
->cmd_lock
);
1720 static int ipp_probe(struct platform_device
*pdev
)
1722 struct device
*dev
= &pdev
->dev
;
1723 struct ipp_context
*ctx
;
1724 struct exynos_drm_subdrv
*subdrv
;
1727 ctx
= devm_kzalloc(dev
, sizeof(*ctx
), GFP_KERNEL
);
1731 mutex_init(&ctx
->ipp_lock
);
1732 mutex_init(&ctx
->prop_lock
);
1734 idr_init(&ctx
->ipp_idr
);
1735 idr_init(&ctx
->prop_idr
);
1738 * create single thread for ipp event
1739 * IPP supports event thread for IPP drivers.
1740 * IPP driver send event_work to this thread.
1741 * and IPP event thread send event to user process.
1743 ctx
->event_workq
= create_singlethread_workqueue("ipp_event");
1744 if (!ctx
->event_workq
) {
1745 dev_err(dev
, "failed to create event workqueue\n");
1750 * create single thread for ipp command
1751 * IPP supports command thread for user process.
1752 * user process make command node using set property ioctl.
1753 * and make start_work and send this work to command thread.
1754 * and then this command thread start property.
1756 ctx
->cmd_workq
= create_singlethread_workqueue("ipp_cmd");
1757 if (!ctx
->cmd_workq
) {
1758 dev_err(dev
, "failed to create cmd workqueue\n");
1760 goto err_event_workq
;
1763 /* set sub driver informations */
1764 subdrv
= &ctx
->subdrv
;
1766 subdrv
->probe
= ipp_subdrv_probe
;
1767 subdrv
->remove
= ipp_subdrv_remove
;
1768 subdrv
->open
= ipp_subdrv_open
;
1769 subdrv
->close
= ipp_subdrv_close
;
1771 platform_set_drvdata(pdev
, ctx
);
1773 ret
= exynos_drm_subdrv_register(subdrv
);
1775 DRM_ERROR("failed to register drm ipp device.\n");
1779 dev_info(dev
, "drm ipp registered successfully.\n");
1784 destroy_workqueue(ctx
->cmd_workq
);
1786 destroy_workqueue(ctx
->event_workq
);
1790 static int ipp_remove(struct platform_device
*pdev
)
1792 struct ipp_context
*ctx
= platform_get_drvdata(pdev
);
1794 /* unregister sub driver */
1795 exynos_drm_subdrv_unregister(&ctx
->subdrv
);
1797 /* remove,destroy ipp idr */
1798 idr_destroy(&ctx
->ipp_idr
);
1799 idr_destroy(&ctx
->prop_idr
);
1801 mutex_destroy(&ctx
->ipp_lock
);
1802 mutex_destroy(&ctx
->prop_lock
);
1804 /* destroy command, event work queue */
1805 destroy_workqueue(ctx
->cmd_workq
);
1806 destroy_workqueue(ctx
->event_workq
);
1811 struct platform_driver ipp_driver
= {
1813 .remove
= ipp_remove
,
1815 .name
= "exynos-drm-ipp",
1816 .owner
= THIS_MODULE
,