Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / media / platform / qcom / camss / camss-vfe-gen1.c
blobeb33c03df27e55ea9bb1778ccae25df433e7a85c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * camss-vfe-gen1.c
5 * Qualcomm MSM Camera Subsystem - VFE Common functionality for Gen 1 versions of hw (4.1, 4.7..)
7 * Copyright (C) 2020 Linaro Ltd.
8 */
10 #include "camss.h"
11 #include "camss-vfe.h"
12 #include "camss-vfe-gen1.h"
14 /* Max number of frame drop updates per frame */
15 #define VFE_FRAME_DROP_UPDATES 2
16 #define VFE_NEXT_SOF_MS 500
18 int vfe_gen1_halt(struct vfe_device *vfe)
20 unsigned long time;
22 reinit_completion(&vfe->halt_complete);
24 vfe->ops_gen1->halt_request(vfe);
26 time = wait_for_completion_timeout(&vfe->halt_complete,
27 msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
28 if (!time) {
29 dev_err(vfe->camss->dev, "VFE halt timeout\n");
30 return -EIO;
33 return 0;
36 static int vfe_disable_output(struct vfe_line *line)
38 struct vfe_device *vfe = to_vfe(line);
39 struct vfe_output *output = &line->output;
40 const struct vfe_hw_ops *ops = vfe->res->hw_ops;
41 unsigned long flags;
42 unsigned long time;
43 unsigned int i;
45 spin_lock_irqsave(&vfe->output_lock, flags);
47 output->gen1.wait_sof = 1;
48 spin_unlock_irqrestore(&vfe->output_lock, flags);
50 time = wait_for_completion_timeout(&output->sof, msecs_to_jiffies(VFE_NEXT_SOF_MS));
51 if (!time)
52 dev_err(vfe->camss->dev, "VFE sof timeout\n");
54 spin_lock_irqsave(&vfe->output_lock, flags);
55 for (i = 0; i < output->wm_num; i++)
56 vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 0);
58 ops->reg_update(vfe, line->id);
59 output->wait_reg_update = 1;
60 spin_unlock_irqrestore(&vfe->output_lock, flags);
62 time = wait_for_completion_timeout(&output->reg_update, msecs_to_jiffies(VFE_NEXT_SOF_MS));
63 if (!time)
64 dev_err(vfe->camss->dev, "VFE reg update timeout\n");
66 spin_lock_irqsave(&vfe->output_lock, flags);
68 if (line->id != VFE_LINE_PIX) {
69 vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 0);
70 vfe->ops_gen1->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
71 vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
72 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 0);
73 spin_unlock_irqrestore(&vfe->output_lock, flags);
74 } else {
75 for (i = 0; i < output->wm_num; i++) {
76 vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
77 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 0);
80 vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 0);
81 vfe->ops_gen1->set_module_cfg(vfe, 0);
82 vfe->ops_gen1->set_realign_cfg(vfe, line, 0);
83 vfe->ops_gen1->set_xbar_cfg(vfe, output, 0);
84 vfe->ops_gen1->set_camif_cmd(vfe, 0);
86 spin_unlock_irqrestore(&vfe->output_lock, flags);
88 vfe->ops_gen1->camif_wait_for_stop(vfe, vfe->camss->dev);
91 return 0;
95 * vfe_gen1_disable - Disable streaming on VFE line
96 * @line: VFE line
98 * Return 0 on success or a negative error code otherwise
100 int vfe_gen1_disable(struct vfe_line *line)
102 struct vfe_device *vfe = to_vfe(line);
104 vfe_disable_output(line);
106 vfe_put_output(line);
108 mutex_lock(&vfe->stream_lock);
110 if (vfe->stream_count == 1)
111 vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
113 vfe->stream_count--;
115 mutex_unlock(&vfe->stream_lock);
117 return 0;
120 static void vfe_output_init_addrs(struct vfe_device *vfe,
121 struct vfe_output *output, u8 sync,
122 struct vfe_line *line)
124 u32 ping_addr;
125 u32 pong_addr;
126 unsigned int i;
128 output->gen1.active_buf = 0;
130 for (i = 0; i < output->wm_num; i++) {
131 if (output->buf[0])
132 ping_addr = output->buf[0]->addr[i];
133 else
134 ping_addr = 0;
136 if (output->buf[1])
137 pong_addr = output->buf[1]->addr[i];
138 else
139 pong_addr = ping_addr;
141 vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
142 vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
143 if (sync)
144 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
148 static void vfe_output_frame_drop(struct vfe_device *vfe,
149 struct vfe_output *output,
150 u32 drop_pattern)
152 u8 drop_period;
153 unsigned int i;
155 /* We need to toggle update period to be valid on next frame */
156 output->drop_update_idx++;
157 output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
158 drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
160 for (i = 0; i < output->wm_num; i++) {
161 vfe->ops_gen1->wm_set_framedrop_period(vfe, output->wm_idx[i], drop_period);
162 vfe->ops_gen1->wm_set_framedrop_pattern(vfe, output->wm_idx[i], drop_pattern);
165 vfe->res->hw_ops->reg_update(vfe, container_of(output, struct vfe_line, output)->id);
168 static int vfe_enable_output(struct vfe_line *line)
170 struct vfe_device *vfe = to_vfe(line);
171 struct vfe_output *output = &line->output;
172 const struct vfe_hw_ops *ops = vfe->res->hw_ops;
173 struct media_entity *sensor;
174 unsigned long flags;
175 unsigned int frame_skip = 0;
176 unsigned int i;
177 u16 ub_size;
179 ub_size = vfe->ops_gen1->get_ub_size(vfe->id);
180 if (!ub_size)
181 return -EINVAL;
183 sensor = camss_find_sensor(&line->subdev.entity);
184 if (sensor) {
185 struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
187 v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
188 /* Max frame skip is 29 frames */
189 if (frame_skip > VFE_FRAME_DROP_VAL - 1)
190 frame_skip = VFE_FRAME_DROP_VAL - 1;
193 spin_lock_irqsave(&vfe->output_lock, flags);
195 ops->reg_update_clear(vfe, line->id);
197 if (output->state > VFE_OUTPUT_RESERVED) {
198 dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", output->state);
199 spin_unlock_irqrestore(&vfe->output_lock, flags);
200 return -EINVAL;
202 output->state = VFE_OUTPUT_IDLE;
204 output->buf[0] = vfe_buf_get_pending(output);
205 output->buf[1] = vfe_buf_get_pending(output);
207 if (!output->buf[0] && output->buf[1]) {
208 output->buf[0] = output->buf[1];
209 output->buf[1] = NULL;
212 if (output->buf[0])
213 output->state = VFE_OUTPUT_SINGLE;
215 if (output->buf[1])
216 output->state = VFE_OUTPUT_CONTINUOUS;
218 switch (output->state) {
219 case VFE_OUTPUT_SINGLE:
220 vfe_output_frame_drop(vfe, output, 1 << frame_skip);
221 break;
222 case VFE_OUTPUT_CONTINUOUS:
223 vfe_output_frame_drop(vfe, output, 3 << frame_skip);
224 break;
225 default:
226 vfe_output_frame_drop(vfe, output, 0);
227 break;
230 output->sequence = 0;
231 output->gen1.wait_sof = 0;
232 output->wait_reg_update = 0;
233 reinit_completion(&output->sof);
234 reinit_completion(&output->reg_update);
236 vfe_output_init_addrs(vfe, output, 0, line);
238 if (line->id != VFE_LINE_PIX) {
239 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 1);
240 vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
241 vfe->ops_gen1->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
242 vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[0]);
243 vfe->ops_gen1->set_rdi_cid(vfe, line->id, 0);
244 vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[0],
245 (ub_size + 1) * output->wm_idx[0], ub_size);
246 vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 1);
247 vfe->ops_gen1->wm_enable(vfe, output->wm_idx[0], 1);
248 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[0]);
249 } else {
250 ub_size /= output->wm_num;
251 for (i = 0; i < output->wm_num; i++) {
252 vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 1);
253 vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[i]);
254 vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[i],
255 (ub_size + 1) * output->wm_idx[i], ub_size);
256 vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i],
257 &line->video_out.active_fmt.fmt.pix_mp, i, 1);
258 vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 1);
259 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
261 vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 1);
262 vfe->ops_gen1->set_module_cfg(vfe, 1);
263 vfe->ops_gen1->set_camif_cfg(vfe, line);
264 vfe->ops_gen1->set_realign_cfg(vfe, line, 1);
265 vfe->ops_gen1->set_xbar_cfg(vfe, output, 1);
266 vfe->ops_gen1->set_demux_cfg(vfe, line);
267 vfe->ops_gen1->set_scale_cfg(vfe, line);
268 vfe->ops_gen1->set_crop_cfg(vfe, line);
269 vfe->ops_gen1->set_clamp_cfg(vfe);
270 vfe->ops_gen1->set_camif_cmd(vfe, 1);
273 ops->reg_update(vfe, line->id);
275 spin_unlock_irqrestore(&vfe->output_lock, flags);
277 return 0;
280 static int vfe_get_output(struct vfe_line *line)
282 struct vfe_device *vfe = to_vfe(line);
283 struct vfe_output *output;
284 struct v4l2_format *f = &line->video_out.active_fmt;
285 unsigned long flags;
286 int i;
287 int wm_idx;
289 spin_lock_irqsave(&vfe->output_lock, flags);
291 output = &line->output;
292 if (output->state > VFE_OUTPUT_RESERVED) {
293 dev_err(vfe->camss->dev, "Output is running\n");
294 goto error;
296 output->state = VFE_OUTPUT_RESERVED;
298 output->gen1.active_buf = 0;
300 switch (f->fmt.pix_mp.pixelformat) {
301 case V4L2_PIX_FMT_NV12:
302 case V4L2_PIX_FMT_NV21:
303 case V4L2_PIX_FMT_NV16:
304 case V4L2_PIX_FMT_NV61:
305 output->wm_num = 2;
306 break;
307 default:
308 output->wm_num = 1;
309 break;
312 for (i = 0; i < output->wm_num; i++) {
313 wm_idx = vfe_reserve_wm(vfe, line->id);
314 if (wm_idx < 0) {
315 dev_err(vfe->camss->dev, "Can not reserve wm\n");
316 goto error_get_wm;
318 output->wm_idx[i] = wm_idx;
321 output->drop_update_idx = 0;
323 spin_unlock_irqrestore(&vfe->output_lock, flags);
325 return 0;
327 error_get_wm:
328 for (i--; i >= 0; i--)
329 vfe_release_wm(vfe, output->wm_idx[i]);
330 output->state = VFE_OUTPUT_OFF;
331 error:
332 spin_unlock_irqrestore(&vfe->output_lock, flags);
334 return -EINVAL;
337 int vfe_gen1_enable(struct vfe_line *line)
339 struct vfe_device *vfe = to_vfe(line);
340 int ret;
342 mutex_lock(&vfe->stream_lock);
344 if (!vfe->stream_count) {
345 vfe->ops_gen1->enable_irq_common(vfe);
346 vfe->ops_gen1->bus_enable_wr_if(vfe, 1);
347 vfe->ops_gen1->set_qos(vfe);
348 vfe->ops_gen1->set_ds(vfe);
351 vfe->stream_count++;
353 mutex_unlock(&vfe->stream_lock);
355 ret = vfe_get_output(line);
356 if (ret < 0)
357 goto error_get_output;
359 ret = vfe_enable_output(line);
360 if (ret < 0)
361 goto error_enable_output;
363 vfe->was_streaming = 1;
365 return 0;
367 error_enable_output:
368 vfe_put_output(line);
370 error_get_output:
371 mutex_lock(&vfe->stream_lock);
373 if (vfe->stream_count == 1)
374 vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
376 vfe->stream_count--;
378 mutex_unlock(&vfe->stream_lock);
380 return ret;
383 static void vfe_output_update_ping_addr(struct vfe_device *vfe,
384 struct vfe_output *output, u8 sync,
385 struct vfe_line *line)
387 u32 addr;
388 unsigned int i;
390 for (i = 0; i < output->wm_num; i++) {
391 if (output->buf[0])
392 addr = output->buf[0]->addr[i];
393 else
394 addr = 0;
396 vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
397 if (sync)
398 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
402 static void vfe_output_update_pong_addr(struct vfe_device *vfe,
403 struct vfe_output *output, u8 sync,
404 struct vfe_line *line)
406 u32 addr;
407 unsigned int i;
409 for (i = 0; i < output->wm_num; i++) {
410 if (output->buf[1])
411 addr = output->buf[1]->addr[i];
412 else
413 addr = 0;
415 vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
416 if (sync)
417 vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
421 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
422 struct vfe_output *output)
424 switch (output->state) {
425 case VFE_OUTPUT_CONTINUOUS:
426 vfe_output_frame_drop(vfe, output, 3);
427 break;
428 case VFE_OUTPUT_SINGLE:
429 default:
430 dev_err_ratelimited(vfe->camss->dev,
431 "Next buf in wrong state! %d\n",
432 output->state);
433 break;
437 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
438 struct vfe_output *output)
440 switch (output->state) {
441 case VFE_OUTPUT_CONTINUOUS:
442 output->state = VFE_OUTPUT_SINGLE;
443 vfe_output_frame_drop(vfe, output, 1);
444 break;
445 case VFE_OUTPUT_SINGLE:
446 output->state = VFE_OUTPUT_STOPPING;
447 vfe_output_frame_drop(vfe, output, 0);
448 break;
449 default:
450 dev_err_ratelimited(vfe->camss->dev,
451 "Last buff in wrong state! %d\n",
452 output->state);
453 break;
457 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
458 struct vfe_output *output,
459 struct camss_buffer *new_buf,
460 struct vfe_line *line)
462 int inactive_idx;
464 switch (output->state) {
465 case VFE_OUTPUT_SINGLE:
466 inactive_idx = !output->gen1.active_buf;
468 if (!output->buf[inactive_idx]) {
469 output->buf[inactive_idx] = new_buf;
471 if (inactive_idx)
472 vfe_output_update_pong_addr(vfe, output, 0, line);
473 else
474 vfe_output_update_ping_addr(vfe, output, 0, line);
476 vfe_output_frame_drop(vfe, output, 3);
477 output->state = VFE_OUTPUT_CONTINUOUS;
478 } else {
479 vfe_buf_add_pending(output, new_buf);
480 dev_err_ratelimited(vfe->camss->dev,
481 "Inactive buffer is busy\n");
483 break;
485 case VFE_OUTPUT_IDLE:
486 if (!output->buf[0]) {
487 output->buf[0] = new_buf;
489 vfe_output_init_addrs(vfe, output, 1, line);
490 vfe_output_frame_drop(vfe, output, 1);
492 output->state = VFE_OUTPUT_SINGLE;
493 } else {
494 vfe_buf_add_pending(output, new_buf);
495 dev_err_ratelimited(vfe->camss->dev,
496 "Output idle with buffer set!\n");
498 break;
500 case VFE_OUTPUT_CONTINUOUS:
501 default:
502 vfe_buf_add_pending(output, new_buf);
503 break;
508 * vfe_isr_halt_ack - Process halt ack
509 * @vfe: VFE Device
511 static void vfe_isr_halt_ack(struct vfe_device *vfe)
513 complete(&vfe->halt_complete);
514 vfe->ops_gen1->halt_clear(vfe);
518 * vfe_isr_sof - Process start of frame interrupt
519 * @vfe: VFE Device
520 * @line_id: VFE line
522 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
524 struct vfe_output *output;
525 unsigned long flags;
527 spin_lock_irqsave(&vfe->output_lock, flags);
528 output = &vfe->line[line_id].output;
529 if (output->gen1.wait_sof) {
530 output->gen1.wait_sof = 0;
531 complete(&output->sof);
533 spin_unlock_irqrestore(&vfe->output_lock, flags);
537 * vfe_isr_reg_update - Process reg update interrupt
538 * @vfe: VFE Device
539 * @line_id: VFE line
541 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
543 struct vfe_output *output;
544 struct vfe_line *line = &vfe->line[line_id];
545 unsigned long flags;
547 spin_lock_irqsave(&vfe->output_lock, flags);
548 vfe->res->hw_ops->reg_update_clear(vfe, line_id);
550 output = &line->output;
552 if (output->wait_reg_update) {
553 output->wait_reg_update = 0;
554 complete(&output->reg_update);
555 spin_unlock_irqrestore(&vfe->output_lock, flags);
556 return;
559 if (output->state == VFE_OUTPUT_STOPPING) {
560 /* Release last buffer when hw is idle */
561 if (output->last_buffer) {
562 vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
563 VB2_BUF_STATE_DONE);
564 output->last_buffer = NULL;
566 output->state = VFE_OUTPUT_IDLE;
568 /* Buffers received in stopping state are queued in */
569 /* dma pending queue, start next capture here */
571 output->buf[0] = vfe_buf_get_pending(output);
572 output->buf[1] = vfe_buf_get_pending(output);
574 if (!output->buf[0] && output->buf[1]) {
575 output->buf[0] = output->buf[1];
576 output->buf[1] = NULL;
579 if (output->buf[0])
580 output->state = VFE_OUTPUT_SINGLE;
582 if (output->buf[1])
583 output->state = VFE_OUTPUT_CONTINUOUS;
585 switch (output->state) {
586 case VFE_OUTPUT_SINGLE:
587 vfe_output_frame_drop(vfe, output, 2);
588 break;
589 case VFE_OUTPUT_CONTINUOUS:
590 vfe_output_frame_drop(vfe, output, 3);
591 break;
592 default:
593 vfe_output_frame_drop(vfe, output, 0);
594 break;
597 vfe_output_init_addrs(vfe, output, 1, &vfe->line[line_id]);
600 spin_unlock_irqrestore(&vfe->output_lock, flags);
604 * vfe_isr_wm_done - Process write master done interrupt
605 * @vfe: VFE Device
606 * @wm: Write master id
608 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
610 struct camss_buffer *ready_buf;
611 struct vfe_output *output;
612 dma_addr_t *new_addr;
613 unsigned long flags;
614 u32 active_index;
615 u64 ts = ktime_get_ns();
616 unsigned int i;
618 active_index = vfe->ops_gen1->wm_get_ping_pong_status(vfe, wm);
620 spin_lock_irqsave(&vfe->output_lock, flags);
622 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
623 dev_err_ratelimited(vfe->camss->dev,
624 "Received wm done for unmapped index\n");
625 goto out_unlock;
627 output = &vfe->line[vfe->wm_output_map[wm]].output;
629 if (output->gen1.active_buf == active_index && 0) {
630 dev_err_ratelimited(vfe->camss->dev,
631 "Active buffer mismatch!\n");
632 goto out_unlock;
634 output->gen1.active_buf = active_index;
636 ready_buf = output->buf[!active_index];
637 if (!ready_buf) {
638 dev_err_ratelimited(vfe->camss->dev,
639 "Missing ready buf %d %d!\n",
640 !active_index, output->state);
641 goto out_unlock;
644 ready_buf->vb.vb2_buf.timestamp = ts;
645 ready_buf->vb.sequence = output->sequence++;
647 /* Get next buffer */
648 output->buf[!active_index] = vfe_buf_get_pending(output);
649 if (!output->buf[!active_index]) {
650 /* No next buffer - set same address */
651 new_addr = ready_buf->addr;
652 vfe_buf_update_wm_on_last(vfe, output);
653 } else {
654 new_addr = output->buf[!active_index]->addr;
655 vfe_buf_update_wm_on_next(vfe, output);
658 if (active_index)
659 for (i = 0; i < output->wm_num; i++)
660 vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], new_addr[i]);
661 else
662 for (i = 0; i < output->wm_num; i++)
663 vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], new_addr[i]);
665 spin_unlock_irqrestore(&vfe->output_lock, flags);
667 if (output->state == VFE_OUTPUT_STOPPING)
668 output->last_buffer = ready_buf;
669 else
670 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
672 return;
674 out_unlock:
675 spin_unlock_irqrestore(&vfe->output_lock, flags);
679 * vfe_queue_buffer - Add empty buffer
680 * @vid: Video device structure
681 * @buf: Buffer to be enqueued
683 * Add an empty buffer - depending on the current number of buffers it will be
684 * put in pending buffer queue or directly given to the hardware to be filled.
686 * Return 0 on success or a negative error code otherwise
688 static int vfe_queue_buffer(struct camss_video *vid, struct camss_buffer *buf)
690 struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
691 struct vfe_device *vfe = to_vfe(line);
692 struct vfe_output *output;
693 unsigned long flags;
695 output = &line->output;
697 spin_lock_irqsave(&vfe->output_lock, flags);
699 vfe_buf_update_wm_on_new(vfe, output, buf, line);
701 spin_unlock_irqrestore(&vfe->output_lock, flags);
703 return 0;
706 #define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
708 int vfe_word_per_line(u32 format, u32 width)
710 int val = 0;
712 switch (format) {
713 case V4L2_PIX_FMT_NV12:
714 case V4L2_PIX_FMT_NV21:
715 case V4L2_PIX_FMT_NV16:
716 case V4L2_PIX_FMT_NV61:
717 val = CALC_WORD(width, 1, 8);
718 break;
719 case V4L2_PIX_FMT_YUYV:
720 case V4L2_PIX_FMT_YVYU:
721 case V4L2_PIX_FMT_UYVY:
722 case V4L2_PIX_FMT_VYUY:
723 val = CALC_WORD(width, 2, 8);
724 break;
727 return val;
730 const struct vfe_isr_ops vfe_isr_ops_gen1 = {
731 .reset_ack = vfe_isr_reset_ack,
732 .halt_ack = vfe_isr_halt_ack,
733 .reg_update = vfe_isr_reg_update,
734 .sof = vfe_isr_sof,
735 .comp_done = vfe_isr_comp_done,
736 .wm_done = vfe_isr_wm_done,
739 const struct camss_video_ops vfe_video_ops_gen1 = {
740 .queue_buffer = vfe_queue_buffer,
741 .flush_buffers = vfe_flush_buffers,