4 * TI OMAP3 ISP - Statistics core
6 * Copyright (C) 2010 Nokia Corporation
7 * Copyright (C) 2009 Texas Instruments, Inc
9 * Contacts: David Cohen <dacohen@gmail.com>
10 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11 * Sakari Ailus <sakari.ailus@iki.fi>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
24 #define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch != NULL)
27 * MAGIC_SIZE must always be the greatest common divisor of
28 * AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
31 #define MAGIC_NUM 0x55
33 /* HACK: AF module seems to be writing one more paxel data than it should. */
34 #define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE
37 * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
38 * the next buffer to start to be written in the same point where the overflow
39 * occurred instead of the configured address. The only known way to make it to
40 * go back to a valid state is having a valid buffer processing. Of course it
41 * requires at least a doubled buffer size to avoid an access to invalid memory
42 * region. But it does not fix everything. It may happen more than one
43 * consecutive SBL overflows. In that case, it might be unpredictable how many
44 * buffers the allocated memory should fit. For that case, a recover
45 * configuration was created. It produces the minimum buffer size for each H3A
46 * module and decrease the change for more SBL overflows. This recover state
47 * will be enabled every time a SBL overflow occur. As the output buffer size
48 * isn't big, it's possible to have an extra size able to fit many recover
49 * buffers making it extreamily unlikely to have an access to invalid memory
52 #define NUM_H3A_RECOVER_BUFS 10
55 * HACK: Because of HW issues the generic layer sometimes need to have
56 * different behaviour for different statistic modules.
58 #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af)
59 #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb)
60 #define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
62 static void __isp_stat_buf_sync_magic(struct ispstat
*stat
,
63 struct ispstat_buffer
*buf
,
64 u32 buf_size
, enum dma_data_direction dir
,
65 void (*dma_sync
)(struct device
*,
66 dma_addr_t
, unsigned long, size_t,
67 enum dma_data_direction
))
69 /* Sync the initial and final magic words. */
70 dma_sync(stat
->isp
->dev
, buf
->dma_addr
, 0, MAGIC_SIZE
, dir
);
71 dma_sync(stat
->isp
->dev
, buf
->dma_addr
+ (buf_size
& PAGE_MASK
),
72 buf_size
& ~PAGE_MASK
, MAGIC_SIZE
, dir
);
75 static void isp_stat_buf_sync_magic_for_device(struct ispstat
*stat
,
76 struct ispstat_buffer
*buf
,
78 enum dma_data_direction dir
)
80 if (ISP_STAT_USES_DMAENGINE(stat
))
83 __isp_stat_buf_sync_magic(stat
, buf
, buf_size
, dir
,
84 dma_sync_single_range_for_device
);
87 static void isp_stat_buf_sync_magic_for_cpu(struct ispstat
*stat
,
88 struct ispstat_buffer
*buf
,
90 enum dma_data_direction dir
)
92 if (ISP_STAT_USES_DMAENGINE(stat
))
95 __isp_stat_buf_sync_magic(stat
, buf
, buf_size
, dir
,
96 dma_sync_single_range_for_cpu
);
99 static int isp_stat_buf_check_magic(struct ispstat
*stat
,
100 struct ispstat_buffer
*buf
)
102 const u32 buf_size
= IS_H3A_AF(stat
) ?
103 buf
->buf_size
+ AF_EXTRA_DATA
: buf
->buf_size
;
108 isp_stat_buf_sync_magic_for_cpu(stat
, buf
, buf_size
, DMA_FROM_DEVICE
);
110 /* Checking initial magic numbers. They shouldn't be here anymore. */
111 for (w
= buf
->virt_addr
, end
= w
+ MAGIC_SIZE
; w
< end
; w
++)
112 if (likely(*w
!= MAGIC_NUM
))
116 dev_dbg(stat
->isp
->dev
,
117 "%s: beginning magic check does not match.\n",
122 /* Checking magic numbers at the end. They must be still here. */
123 for (w
= buf
->virt_addr
+ buf_size
, end
= w
+ MAGIC_SIZE
;
125 if (unlikely(*w
!= MAGIC_NUM
)) {
126 dev_dbg(stat
->isp
->dev
,
127 "%s: ending magic check does not match.\n",
133 isp_stat_buf_sync_magic_for_device(stat
, buf
, buf_size
,
139 static void isp_stat_buf_insert_magic(struct ispstat
*stat
,
140 struct ispstat_buffer
*buf
)
142 const u32 buf_size
= IS_H3A_AF(stat
) ?
143 stat
->buf_size
+ AF_EXTRA_DATA
: stat
->buf_size
;
145 isp_stat_buf_sync_magic_for_cpu(stat
, buf
, buf_size
, DMA_FROM_DEVICE
);
148 * Inserting MAGIC_NUM at the beginning and end of the buffer.
149 * buf->buf_size is set only after the buffer is queued. For now the
150 * right buf_size for the current configuration is pointed by
153 memset(buf
->virt_addr
, MAGIC_NUM
, MAGIC_SIZE
);
154 memset(buf
->virt_addr
+ buf_size
, MAGIC_NUM
, MAGIC_SIZE
);
156 isp_stat_buf_sync_magic_for_device(stat
, buf
, buf_size
,
160 static void isp_stat_buf_sync_for_device(struct ispstat
*stat
,
161 struct ispstat_buffer
*buf
)
163 if (ISP_STAT_USES_DMAENGINE(stat
))
166 dma_sync_sg_for_device(stat
->isp
->dev
, buf
->sgt
.sgl
,
167 buf
->sgt
.nents
, DMA_FROM_DEVICE
);
170 static void isp_stat_buf_sync_for_cpu(struct ispstat
*stat
,
171 struct ispstat_buffer
*buf
)
173 if (ISP_STAT_USES_DMAENGINE(stat
))
176 dma_sync_sg_for_cpu(stat
->isp
->dev
, buf
->sgt
.sgl
,
177 buf
->sgt
.nents
, DMA_FROM_DEVICE
);
180 static void isp_stat_buf_clear(struct ispstat
*stat
)
184 for (i
= 0; i
< STAT_MAX_BUFS
; i
++)
185 stat
->buf
[i
].empty
= 1;
188 static struct ispstat_buffer
*
189 __isp_stat_buf_find(struct ispstat
*stat
, int look_empty
)
191 struct ispstat_buffer
*found
= NULL
;
194 for (i
= 0; i
< STAT_MAX_BUFS
; i
++) {
195 struct ispstat_buffer
*curr
= &stat
->buf
[i
];
198 * Don't select the buffer which is being copied to
199 * userspace or used by the module.
201 if (curr
== stat
->locked_buf
|| curr
== stat
->active_buf
)
204 /* Don't select uninitialised buffers if it's not required */
205 if (!look_empty
&& curr
->empty
)
208 /* Pick uninitialised buffer over anything else if look_empty */
214 /* Choose the oldest buffer */
216 (s32
)curr
->frame_number
- (s32
)found
->frame_number
< 0)
223 static inline struct ispstat_buffer
*
224 isp_stat_buf_find_oldest(struct ispstat
*stat
)
226 return __isp_stat_buf_find(stat
, 0);
229 static inline struct ispstat_buffer
*
230 isp_stat_buf_find_oldest_or_empty(struct ispstat
*stat
)
232 return __isp_stat_buf_find(stat
, 1);
235 static int isp_stat_buf_queue(struct ispstat
*stat
)
237 if (!stat
->active_buf
)
240 v4l2_get_timestamp(&stat
->active_buf
->ts
);
242 stat
->active_buf
->buf_size
= stat
->buf_size
;
243 if (isp_stat_buf_check_magic(stat
, stat
->active_buf
)) {
244 dev_dbg(stat
->isp
->dev
, "%s: data wasn't properly written.\n",
248 stat
->active_buf
->config_counter
= stat
->config_counter
;
249 stat
->active_buf
->frame_number
= stat
->frame_number
;
250 stat
->active_buf
->empty
= 0;
251 stat
->active_buf
= NULL
;
253 return STAT_BUF_DONE
;
256 /* Get next free buffer to write the statistics to and mark it active. */
257 static void isp_stat_buf_next(struct ispstat
*stat
)
259 if (unlikely(stat
->active_buf
))
260 /* Overwriting unused active buffer */
261 dev_dbg(stat
->isp
->dev
,
262 "%s: new buffer requested without queuing active one.\n",
265 stat
->active_buf
= isp_stat_buf_find_oldest_or_empty(stat
);
268 static void isp_stat_buf_release(struct ispstat
*stat
)
272 isp_stat_buf_sync_for_device(stat
, stat
->locked_buf
);
273 spin_lock_irqsave(&stat
->isp
->stat_lock
, flags
);
274 stat
->locked_buf
= NULL
;
275 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
278 /* Get buffer to userspace. */
279 static struct ispstat_buffer
*isp_stat_buf_get(struct ispstat
*stat
,
280 struct omap3isp_stat_data
*data
)
284 struct ispstat_buffer
*buf
;
286 spin_lock_irqsave(&stat
->isp
->stat_lock
, flags
);
289 buf
= isp_stat_buf_find_oldest(stat
);
291 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
292 dev_dbg(stat
->isp
->dev
, "%s: cannot find a buffer.\n",
294 return ERR_PTR(-EBUSY
);
296 if (isp_stat_buf_check_magic(stat
, buf
)) {
297 dev_dbg(stat
->isp
->dev
,
298 "%s: current buffer has corrupted data\n.",
300 /* Mark empty because it doesn't have valid data. */
303 /* Buffer isn't corrupted. */
308 stat
->locked_buf
= buf
;
310 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
312 if (buf
->buf_size
> data
->buf_size
) {
313 dev_warn(stat
->isp
->dev
,
314 "%s: userspace's buffer size is not enough.\n",
316 isp_stat_buf_release(stat
);
317 return ERR_PTR(-EINVAL
);
320 isp_stat_buf_sync_for_cpu(stat
, buf
);
322 rval
= copy_to_user(data
->buf
,
327 dev_info(stat
->isp
->dev
,
328 "%s: failed copying %d bytes of stat data\n",
329 stat
->subdev
.name
, rval
);
330 buf
= ERR_PTR(-EFAULT
);
331 isp_stat_buf_release(stat
);
337 static void isp_stat_bufs_free(struct ispstat
*stat
)
339 struct device
*dev
= ISP_STAT_USES_DMAENGINE(stat
)
340 ? NULL
: stat
->isp
->dev
;
343 for (i
= 0; i
< STAT_MAX_BUFS
; i
++) {
344 struct ispstat_buffer
*buf
= &stat
->buf
[i
];
349 sg_free_table(&buf
->sgt
);
351 dma_free_coherent(dev
, stat
->buf_alloc_size
, buf
->virt_addr
,
355 buf
->virt_addr
= NULL
;
359 dev_dbg(stat
->isp
->dev
, "%s: all buffers were freed.\n",
362 stat
->buf_alloc_size
= 0;
363 stat
->active_buf
= NULL
;
366 static int isp_stat_bufs_alloc_one(struct device
*dev
,
367 struct ispstat_buffer
*buf
,
372 buf
->virt_addr
= dma_alloc_coherent(dev
, size
, &buf
->dma_addr
,
373 GFP_KERNEL
| GFP_DMA
);
377 ret
= dma_get_sgtable(dev
, &buf
->sgt
, buf
->virt_addr
, buf
->dma_addr
,
380 dma_free_coherent(dev
, size
, buf
->virt_addr
, buf
->dma_addr
);
381 buf
->virt_addr
= NULL
;
390 * The device passed to the DMA API depends on whether the statistics block uses
391 * ISP DMA, external DMA or PIO to transfer data.
393 * The first case (for the AEWB and AF engines) passes the ISP device, resulting
394 * in the DMA buffers being mapped through the ISP IOMMU.
396 * The second case (for the histogram engine) should pass the DMA engine device.
397 * As that device isn't accessible through the OMAP DMA engine API the driver
398 * passes NULL instead, resulting in the buffers being mapped directly as
401 * The third case (for the histogram engine) doesn't require any mapping. The
402 * buffers could be allocated with kmalloc/vmalloc, but we still use
403 * dma_alloc_coherent() for consistency purpose.
405 static int isp_stat_bufs_alloc(struct ispstat
*stat
, u32 size
)
407 struct device
*dev
= ISP_STAT_USES_DMAENGINE(stat
)
408 ? NULL
: stat
->isp
->dev
;
412 spin_lock_irqsave(&stat
->isp
->stat_lock
, flags
);
414 BUG_ON(stat
->locked_buf
!= NULL
);
416 /* Are the old buffers big enough? */
417 if (stat
->buf_alloc_size
>= size
) {
418 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
422 if (stat
->state
!= ISPSTAT_DISABLED
|| stat
->buf_processing
) {
423 dev_info(stat
->isp
->dev
,
424 "%s: trying to allocate memory when busy\n",
426 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
430 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
432 isp_stat_bufs_free(stat
);
434 stat
->buf_alloc_size
= size
;
436 for (i
= 0; i
< STAT_MAX_BUFS
; i
++) {
437 struct ispstat_buffer
*buf
= &stat
->buf
[i
];
440 ret
= isp_stat_bufs_alloc_one(dev
, buf
, size
);
442 dev_err(stat
->isp
->dev
,
443 "%s: Failed to allocate DMA buffer %u\n",
444 stat
->subdev
.name
, i
);
445 isp_stat_bufs_free(stat
);
451 dev_dbg(stat
->isp
->dev
,
452 "%s: buffer[%u] allocated. dma=0x%08lx virt=0x%08lx",
453 stat
->subdev
.name
, i
,
454 (unsigned long)buf
->dma_addr
,
455 (unsigned long)buf
->virt_addr
);
461 static void isp_stat_queue_event(struct ispstat
*stat
, int err
)
463 struct video_device
*vdev
= stat
->subdev
.devnode
;
464 struct v4l2_event event
;
465 struct omap3isp_stat_event_status
*status
= (void *)event
.u
.data
;
467 memset(&event
, 0, sizeof(event
));
469 status
->frame_number
= stat
->frame_number
;
470 status
->config_counter
= stat
->config_counter
;
474 event
.type
= stat
->event_type
;
475 v4l2_event_queue(vdev
, &event
);
480 * omap3isp_stat_request_statistics - Request statistics.
481 * @data: Pointer to return statistics data.
483 * Returns 0 if successful.
485 int omap3isp_stat_request_statistics(struct ispstat
*stat
,
486 struct omap3isp_stat_data
*data
)
488 struct ispstat_buffer
*buf
;
490 if (stat
->state
!= ISPSTAT_ENABLED
) {
491 dev_dbg(stat
->isp
->dev
, "%s: engine not enabled.\n",
496 mutex_lock(&stat
->ioctl_lock
);
497 buf
= isp_stat_buf_get(stat
, data
);
499 mutex_unlock(&stat
->ioctl_lock
);
504 data
->config_counter
= buf
->config_counter
;
505 data
->frame_number
= buf
->frame_number
;
506 data
->buf_size
= buf
->buf_size
;
509 isp_stat_buf_release(stat
);
510 mutex_unlock(&stat
->ioctl_lock
);
516 * omap3isp_stat_config - Receives new statistic engine configuration.
517 * @new_conf: Pointer to config structure.
519 * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
520 * was unable to allocate memory for the buffer, or other errors if parameters
523 int omap3isp_stat_config(struct ispstat
*stat
, void *new_conf
)
526 unsigned long irqflags
;
527 struct ispstat_generic_config
*user_cfg
= new_conf
;
528 u32 buf_size
= user_cfg
->buf_size
;
531 dev_dbg(stat
->isp
->dev
, "%s: configuration is NULL\n",
536 mutex_lock(&stat
->ioctl_lock
);
538 dev_dbg(stat
->isp
->dev
,
539 "%s: configuring module with buffer size=0x%08lx\n",
540 stat
->subdev
.name
, (unsigned long)buf_size
);
542 ret
= stat
->ops
->validate_params(stat
, new_conf
);
544 mutex_unlock(&stat
->ioctl_lock
);
545 dev_dbg(stat
->isp
->dev
, "%s: configuration values are invalid.\n",
550 if (buf_size
!= user_cfg
->buf_size
)
551 dev_dbg(stat
->isp
->dev
,
552 "%s: driver has corrected buffer size request to 0x%08lx\n",
554 (unsigned long)user_cfg
->buf_size
);
557 * Hack: H3A modules may need a doubled buffer size to avoid access
558 * to a invalid memory address after a SBL overflow.
559 * The buffer size is always PAGE_ALIGNED.
560 * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
561 * inserted at the end to data integrity check purpose.
562 * Hack 3: AF module writes one paxel data more than it should, so
563 * the buffer allocation must consider it to avoid invalid memory
565 * Hack 4: H3A need to allocate extra space for the recover state.
568 buf_size
= user_cfg
->buf_size
* 2 + MAGIC_SIZE
;
571 * Adding one extra paxel data size for each recover
572 * buffer + 2 regular ones.
574 buf_size
+= AF_EXTRA_DATA
* (NUM_H3A_RECOVER_BUFS
+ 2);
575 if (stat
->recover_priv
) {
576 struct ispstat_generic_config
*recover_cfg
=
578 buf_size
+= recover_cfg
->buf_size
*
579 NUM_H3A_RECOVER_BUFS
;
581 buf_size
= PAGE_ALIGN(buf_size
);
582 } else { /* Histogram */
583 buf_size
= PAGE_ALIGN(user_cfg
->buf_size
+ MAGIC_SIZE
);
586 ret
= isp_stat_bufs_alloc(stat
, buf_size
);
588 mutex_unlock(&stat
->ioctl_lock
);
592 spin_lock_irqsave(&stat
->isp
->stat_lock
, irqflags
);
593 stat
->ops
->set_params(stat
, new_conf
);
594 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
597 * Returning the right future config_counter for this setup, so
598 * userspace can *know* when it has been applied.
600 user_cfg
->config_counter
= stat
->config_counter
+ stat
->inc_config
;
602 /* Module has a valid configuration. */
603 stat
->configured
= 1;
604 dev_dbg(stat
->isp
->dev
,
605 "%s: module has been successfully configured.\n",
608 mutex_unlock(&stat
->ioctl_lock
);
614 * isp_stat_buf_process - Process statistic buffers.
615 * @buf_state: points out if buffer is ready to be processed. It's necessary
616 * because histogram needs to copy the data from internal memory
617 * before be able to process the buffer.
619 static int isp_stat_buf_process(struct ispstat
*stat
, int buf_state
)
621 int ret
= STAT_NO_BUF
;
623 if (!atomic_add_unless(&stat
->buf_err
, -1, 0) &&
624 buf_state
== STAT_BUF_DONE
&& stat
->state
== ISPSTAT_ENABLED
) {
625 ret
= isp_stat_buf_queue(stat
);
626 isp_stat_buf_next(stat
);
632 int omap3isp_stat_pcr_busy(struct ispstat
*stat
)
634 return stat
->ops
->busy(stat
);
637 int omap3isp_stat_busy(struct ispstat
*stat
)
639 return omap3isp_stat_pcr_busy(stat
) | stat
->buf_processing
|
640 (stat
->state
!= ISPSTAT_DISABLED
);
644 * isp_stat_pcr_enable - Disables/Enables statistic engines.
645 * @pcr_enable: 0/1 - Disables/Enables the engine.
647 * Must be called from ISP driver when the module is idle and synchronized
650 static void isp_stat_pcr_enable(struct ispstat
*stat
, u8 pcr_enable
)
652 if ((stat
->state
!= ISPSTAT_ENABLING
&&
653 stat
->state
!= ISPSTAT_ENABLED
) && pcr_enable
)
654 /* Userspace has disabled the module. Aborting. */
657 stat
->ops
->enable(stat
, pcr_enable
);
658 if (stat
->state
== ISPSTAT_DISABLING
&& !pcr_enable
)
659 stat
->state
= ISPSTAT_DISABLED
;
660 else if (stat
->state
== ISPSTAT_ENABLING
&& pcr_enable
)
661 stat
->state
= ISPSTAT_ENABLED
;
664 void omap3isp_stat_suspend(struct ispstat
*stat
)
668 spin_lock_irqsave(&stat
->isp
->stat_lock
, flags
);
670 if (stat
->state
!= ISPSTAT_DISABLED
)
671 stat
->ops
->enable(stat
, 0);
672 if (stat
->state
== ISPSTAT_ENABLED
)
673 stat
->state
= ISPSTAT_SUSPENDED
;
675 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
678 void omap3isp_stat_resume(struct ispstat
*stat
)
680 /* Module will be re-enabled with its pipeline */
681 if (stat
->state
== ISPSTAT_SUSPENDED
)
682 stat
->state
= ISPSTAT_ENABLING
;
685 static void isp_stat_try_enable(struct ispstat
*stat
)
687 unsigned long irqflags
;
689 if (stat
->priv
== NULL
)
690 /* driver wasn't initialised */
693 spin_lock_irqsave(&stat
->isp
->stat_lock
, irqflags
);
694 if (stat
->state
== ISPSTAT_ENABLING
&& !stat
->buf_processing
&&
695 stat
->buf_alloc_size
) {
697 * Userspace's requested to enable the engine but it wasn't yet.
701 isp_stat_buf_next(stat
);
702 stat
->ops
->setup_regs(stat
, stat
->priv
);
703 isp_stat_buf_insert_magic(stat
, stat
->active_buf
);
706 * H3A module has some hw issues which forces the driver to
707 * ignore next buffers even if it was disabled in the meantime.
708 * On the other hand, Histogram shouldn't ignore buffers anymore
709 * if it's being enabled.
712 atomic_set(&stat
->buf_err
, 0);
714 isp_stat_pcr_enable(stat
, 1);
715 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
716 dev_dbg(stat
->isp
->dev
, "%s: module is enabled.\n",
719 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
723 void omap3isp_stat_isr_frame_sync(struct ispstat
*stat
)
725 isp_stat_try_enable(stat
);
728 void omap3isp_stat_sbl_overflow(struct ispstat
*stat
)
730 unsigned long irqflags
;
732 spin_lock_irqsave(&stat
->isp
->stat_lock
, irqflags
);
734 * Due to a H3A hw issue which prevents the next buffer to start from
735 * the correct memory address, 2 buffers must be ignored.
737 atomic_set(&stat
->buf_err
, 2);
740 * If more than one SBL overflow happen in a row, H3A module may access
741 * invalid memory region.
742 * stat->sbl_ovl_recover is set to tell to the driver to temporarily use
743 * a soft configuration which helps to avoid consecutive overflows.
745 if (stat
->recover_priv
)
746 stat
->sbl_ovl_recover
= 1;
747 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
751 * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
752 * @enable: 0/1 - Disables/Enables the engine.
754 * Client should configure all the module registers before this.
755 * This function can be called from a userspace request.
757 int omap3isp_stat_enable(struct ispstat
*stat
, u8 enable
)
759 unsigned long irqflags
;
761 dev_dbg(stat
->isp
->dev
, "%s: user wants to %s module.\n",
762 stat
->subdev
.name
, enable
? "enable" : "disable");
764 /* Prevent enabling while configuring */
765 mutex_lock(&stat
->ioctl_lock
);
767 spin_lock_irqsave(&stat
->isp
->stat_lock
, irqflags
);
769 if (!stat
->configured
&& enable
) {
770 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
771 mutex_unlock(&stat
->ioctl_lock
);
772 dev_dbg(stat
->isp
->dev
,
773 "%s: cannot enable module as it's never been successfully configured so far.\n",
779 if (stat
->state
== ISPSTAT_DISABLING
)
780 /* Previous disabling request wasn't done yet */
781 stat
->state
= ISPSTAT_ENABLED
;
782 else if (stat
->state
== ISPSTAT_DISABLED
)
783 /* Module is now being enabled */
784 stat
->state
= ISPSTAT_ENABLING
;
786 if (stat
->state
== ISPSTAT_ENABLING
) {
787 /* Previous enabling request wasn't done yet */
788 stat
->state
= ISPSTAT_DISABLED
;
789 } else if (stat
->state
== ISPSTAT_ENABLED
) {
790 /* Module is now being disabled */
791 stat
->state
= ISPSTAT_DISABLING
;
792 isp_stat_buf_clear(stat
);
796 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
797 mutex_unlock(&stat
->ioctl_lock
);
802 int omap3isp_stat_s_stream(struct v4l2_subdev
*subdev
, int enable
)
804 struct ispstat
*stat
= v4l2_get_subdevdata(subdev
);
808 * Only set enable PCR bit if the module was previously
809 * enabled through ioctl.
811 isp_stat_try_enable(stat
);
814 /* Disable PCR bit and config enable field */
815 omap3isp_stat_enable(stat
, 0);
816 spin_lock_irqsave(&stat
->isp
->stat_lock
, flags
);
817 stat
->ops
->enable(stat
, 0);
818 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, flags
);
821 * If module isn't busy, a new interrupt may come or not to
822 * set the state to DISABLED. As Histogram needs to read its
823 * internal memory to clear it, let interrupt handler
824 * responsible of changing state to DISABLED. If the last
825 * interrupt is coming, it's still safe as the handler will
826 * ignore the second time when state is already set to DISABLED.
827 * It's necessary to synchronize Histogram with streamoff, once
828 * the module may be considered idle before last SDMA transfer
829 * starts if we return here.
831 if (!omap3isp_stat_pcr_busy(stat
))
832 omap3isp_stat_isr(stat
);
834 dev_dbg(stat
->isp
->dev
, "%s: module is being disabled\n",
842 * __stat_isr - Interrupt handler for statistic drivers
844 static void __stat_isr(struct ispstat
*stat
, int from_dma
)
846 int ret
= STAT_BUF_DONE
;
848 unsigned long irqflags
;
849 struct isp_pipeline
*pipe
;
852 * stat->buf_processing must be set before disable module. It's
853 * necessary to not inform too early the buffers aren't busy in case
854 * of SDMA is going to be used.
856 spin_lock_irqsave(&stat
->isp
->stat_lock
, irqflags
);
857 if (stat
->state
== ISPSTAT_DISABLED
) {
858 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
861 buf_processing
= stat
->buf_processing
;
862 stat
->buf_processing
= 1;
863 stat
->ops
->enable(stat
, 0);
865 if (buf_processing
&& !from_dma
) {
866 if (stat
->state
== ISPSTAT_ENABLED
) {
867 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
868 dev_err(stat
->isp
->dev
,
869 "%s: interrupt occurred when module was still processing a buffer.\n",
875 * Interrupt handler was called from streamoff when
876 * the module wasn't busy anymore to ensure it is being
877 * disabled after process last buffer. If such buffer
878 * processing has already started, no need to do
881 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
885 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
887 /* If it's busy we can't process this buffer anymore */
888 if (!omap3isp_stat_pcr_busy(stat
)) {
889 if (!from_dma
&& stat
->ops
->buf_process
)
890 /* Module still need to copy data to buffer. */
891 ret
= stat
->ops
->buf_process(stat
);
892 if (ret
== STAT_BUF_WAITING_DMA
)
893 /* Buffer is not ready yet */
896 spin_lock_irqsave(&stat
->isp
->stat_lock
, irqflags
);
899 * Histogram needs to read its internal memory to clear it
900 * before be disabled. For that reason, common statistic layer
901 * can return only after call stat's buf_process() operator.
903 if (stat
->state
== ISPSTAT_DISABLING
) {
904 stat
->state
= ISPSTAT_DISABLED
;
905 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
906 stat
->buf_processing
= 0;
909 pipe
= to_isp_pipeline(&stat
->subdev
.entity
);
910 stat
->frame_number
= atomic_read(&pipe
->frame_number
);
913 * Before this point, 'ret' stores the buffer's status if it's
914 * ready to be processed. Afterwards, it holds the status if
915 * it was processed successfully.
917 ret
= isp_stat_buf_process(stat
, ret
);
919 if (likely(!stat
->sbl_ovl_recover
)) {
920 stat
->ops
->setup_regs(stat
, stat
->priv
);
923 * Using recover config to increase the chance to have
924 * a good buffer processing and make the H3A module to
925 * go back to a valid state.
928 stat
->ops
->setup_regs(stat
, stat
->recover_priv
);
929 stat
->sbl_ovl_recover
= 0;
932 * Set 'update' in case of the module needs to use
933 * regular configuration after next buffer.
938 isp_stat_buf_insert_magic(stat
, stat
->active_buf
);
941 * Hack: H3A modules may access invalid memory address or send
942 * corrupted data to userspace if more than 1 SBL overflow
943 * happens in a row without re-writing its buffer's start memory
944 * address in the meantime. Such situation is avoided if the
945 * module is not immediately re-enabled when the ISR misses the
946 * timing to process the buffer and to setup the registers.
947 * Because of that, pcr_enable(1) was moved to inside this 'if'
948 * block. But the next interruption will still happen as during
949 * pcr_enable(0) the module was busy.
951 isp_stat_pcr_enable(stat
, 1);
952 spin_unlock_irqrestore(&stat
->isp
->stat_lock
, irqflags
);
955 * If a SBL overflow occurs and the H3A driver misses the timing
956 * to process the buffer, stat->buf_err is set and won't be
957 * cleared now. So the next buffer will be correctly ignored.
958 * It's necessary due to a hw issue which makes the next H3A
959 * buffer to start from the memory address where the previous
960 * one stopped, instead of start where it was configured to.
961 * Do not "stat->buf_err = 0" here.
964 if (stat
->ops
->buf_process
)
966 * Driver may need to erase current data prior to
967 * process a new buffer. If it misses the timing, the
968 * next buffer might be wrong. So should be ignored.
969 * It happens only for Histogram.
971 atomic_set(&stat
->buf_err
, 1);
974 dev_dbg(stat
->isp
->dev
,
975 "%s: cannot process buffer, device is busy.\n",
980 stat
->buf_processing
= 0;
981 isp_stat_queue_event(stat
, ret
!= STAT_BUF_DONE
);
984 void omap3isp_stat_isr(struct ispstat
*stat
)
989 void omap3isp_stat_dma_isr(struct ispstat
*stat
)
994 int omap3isp_stat_subscribe_event(struct v4l2_subdev
*subdev
,
996 struct v4l2_event_subscription
*sub
)
998 struct ispstat
*stat
= v4l2_get_subdevdata(subdev
);
1000 if (sub
->type
!= stat
->event_type
)
1003 return v4l2_event_subscribe(fh
, sub
, STAT_NEVENTS
, NULL
);
1006 int omap3isp_stat_unsubscribe_event(struct v4l2_subdev
*subdev
,
1008 struct v4l2_event_subscription
*sub
)
1010 return v4l2_event_unsubscribe(fh
, sub
);
1013 void omap3isp_stat_unregister_entities(struct ispstat
*stat
)
1015 v4l2_device_unregister_subdev(&stat
->subdev
);
1018 int omap3isp_stat_register_entities(struct ispstat
*stat
,
1019 struct v4l2_device
*vdev
)
1021 return v4l2_device_register_subdev(vdev
, &stat
->subdev
);
1024 static int isp_stat_init_entities(struct ispstat
*stat
, const char *name
,
1025 const struct v4l2_subdev_ops
*sd_ops
)
1027 struct v4l2_subdev
*subdev
= &stat
->subdev
;
1028 struct media_entity
*me
= &subdev
->entity
;
1030 v4l2_subdev_init(subdev
, sd_ops
);
1031 snprintf(subdev
->name
, V4L2_SUBDEV_NAME_SIZE
, "OMAP3 ISP %s", name
);
1032 subdev
->grp_id
= 1 << 16; /* group ID for isp subdevs */
1033 subdev
->flags
|= V4L2_SUBDEV_FL_HAS_EVENTS
| V4L2_SUBDEV_FL_HAS_DEVNODE
;
1034 v4l2_set_subdevdata(subdev
, stat
);
1036 stat
->pad
.flags
= MEDIA_PAD_FL_SINK
| MEDIA_PAD_FL_MUST_CONNECT
;
1039 return media_entity_pads_init(me
, 1, &stat
->pad
);
1042 int omap3isp_stat_init(struct ispstat
*stat
, const char *name
,
1043 const struct v4l2_subdev_ops
*sd_ops
)
1047 stat
->buf
= kcalloc(STAT_MAX_BUFS
, sizeof(*stat
->buf
), GFP_KERNEL
);
1051 isp_stat_buf_clear(stat
);
1052 mutex_init(&stat
->ioctl_lock
);
1053 atomic_set(&stat
->buf_err
, 0);
1055 ret
= isp_stat_init_entities(stat
, name
, sd_ops
);
1057 mutex_destroy(&stat
->ioctl_lock
);
1064 void omap3isp_stat_cleanup(struct ispstat
*stat
)
1066 media_entity_cleanup(&stat
->subdev
.entity
);
1067 mutex_destroy(&stat
->ioctl_lock
);
1068 isp_stat_bufs_free(stat
);