Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_encoder.c
blob288e95ee8e1d51d05697b606909f90a7d8aa315a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/debugfs.h>
10 #include <linux/kthread.h>
11 #include <linux/seq_file.h>
13 #include <drm/drm_crtc.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_probe_helper.h>
17 #include "msm_drv.h"
18 #include "dpu_kms.h"
19 #include "dpu_hwio.h"
20 #include "dpu_hw_catalog.h"
21 #include "dpu_hw_intf.h"
22 #include "dpu_hw_ctl.h"
23 #include "dpu_hw_dspp.h"
24 #include "dpu_formats.h"
25 #include "dpu_encoder_phys.h"
26 #include "dpu_crtc.h"
27 #include "dpu_trace.h"
28 #include "dpu_core_irq.h"
30 #define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
31 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
33 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
34 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
36 #define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
37 (p) ? (p)->parent->base.id : -1, \
38 (p) ? (p)->intf_idx - INTF_0 : -1, \
39 (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
40 ##__VA_ARGS__)
42 #define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
43 (p) ? (p)->parent->base.id : -1, \
44 (p) ? (p)->intf_idx - INTF_0 : -1, \
45 (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
46 ##__VA_ARGS__)
49 * Two to anticipate panels that can do cmd/vid dynamic switching
50 * plan is to create all possible physical encoder types, and switch between
51 * them at runtime
53 #define NUM_PHYS_ENCODER_TYPES 2
55 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
56 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
58 #define MAX_CHANNELS_PER_ENC 2
60 #define IDLE_SHORT_TIMEOUT 1
62 #define MAX_HDISPLAY_SPLIT 1080
64 /* timeout in frames waiting for frame done */
65 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
67 /**
68 * enum dpu_enc_rc_events - events for resource control state machine
69 * @DPU_ENC_RC_EVENT_KICKOFF:
70 * This event happens at NORMAL priority.
71 * Event that signals the start of the transfer. When this event is
72 * received, enable MDP/DSI core clocks. Regardless of the previous
73 * state, the resource should be in ON state at the end of this event.
74 * @DPU_ENC_RC_EVENT_FRAME_DONE:
75 * This event happens at INTERRUPT level.
76 * Event signals the end of the data transfer after the PP FRAME_DONE
77 * event. At the end of this event, a delayed work is scheduled to go to
78 * IDLE_PC state after IDLE_TIMEOUT time.
79 * @DPU_ENC_RC_EVENT_PRE_STOP:
80 * This event happens at NORMAL priority.
81 * This event, when received during the ON state, leave the RC STATE
82 * in the PRE_OFF state. It should be followed by the STOP event as
83 * part of encoder disable.
84 * If received during IDLE or OFF states, it will do nothing.
85 * @DPU_ENC_RC_EVENT_STOP:
86 * This event happens at NORMAL priority.
87 * When this event is received, disable all the MDP/DSI core clocks, and
88 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
89 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
90 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
91 * Resource state should be in OFF at the end of the event.
92 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
93 * This event happens at NORMAL priority from a work item.
94 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
95 * This would disable MDP/DSI core clocks and change the resource state
96 * to IDLE.
98 enum dpu_enc_rc_events {
99 DPU_ENC_RC_EVENT_KICKOFF = 1,
100 DPU_ENC_RC_EVENT_FRAME_DONE,
101 DPU_ENC_RC_EVENT_PRE_STOP,
102 DPU_ENC_RC_EVENT_STOP,
103 DPU_ENC_RC_EVENT_ENTER_IDLE
107 * enum dpu_enc_rc_states - states that the resource control maintains
108 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
109 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
110 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
111 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
112 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
114 enum dpu_enc_rc_states {
115 DPU_ENC_RC_STATE_OFF,
116 DPU_ENC_RC_STATE_PRE_OFF,
117 DPU_ENC_RC_STATE_ON,
118 DPU_ENC_RC_STATE_IDLE
122 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
123 * encoders. Virtual encoder manages one "logical" display. Physical
124 * encoders manage one intf block, tied to a specific panel/sub-panel.
125 * Virtual encoder defers as much as possible to the physical encoders.
126 * Virtual encoder registers itself with the DRM Framework as the encoder.
127 * @base: drm_encoder base class for registration with DRM
128 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
129 * @bus_scaling_client: Client handle to the bus scaling interface
130 * @enabled: True if the encoder is active, protected by enc_lock
131 * @num_phys_encs: Actual number of physical encoders contained.
132 * @phys_encs: Container of physical encoders managed.
133 * @cur_master: Pointer to the current master in this mode. Optimization
134 * Only valid after enable. Cleared as disable.
135 * @cur_slave: As above but for the slave encoder.
136 * @hw_pp: Handle to the pingpong blocks used for the display. No.
137 * pingpong blocks can be different than num_phys_encs.
138 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
139 * for partial update right-only cases, such as pingpong
140 * split where virtual pingpong does not generate IRQs
141 * @crtc: Pointer to the currently assigned crtc. Normally you
142 * would use crtc->state->encoder_mask to determine the
143 * link between encoder/crtc. However in this case we need
144 * to track crtc in the disable() hook which is called
145 * _after_ encoder_mask is cleared.
146 * @crtc_kickoff_cb: Callback into CRTC that will flush & start
147 * all CTL paths
148 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
149 * @debugfs_root: Debug file system root file node
150 * @enc_lock: Lock around physical encoder
151 * create/destroy/enable/disable
152 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
153 * busy processing current command.
154 * Bit0 = phys_encs[0] etc.
155 * @crtc_frame_event_cb: callback handler for frame event
156 * @crtc_frame_event_cb_data: callback handler private data
157 * @frame_done_timeout_ms: frame done timeout in ms
158 * @frame_done_timer: watchdog timer for frame done event
159 * @vsync_event_timer: vsync timer
160 * @disp_info: local copy of msm_display_info struct
161 * @idle_pc_supported: indicate if idle power collaps is supported
162 * @rc_lock: resource control mutex lock to protect
163 * virt encoder over various state changes
164 * @rc_state: resource controller state
165 * @delayed_off_work: delayed worker to schedule disabling of
166 * clks and resources after IDLE_TIMEOUT time.
167 * @vsync_event_work: worker to handle vsync event for autorefresh
168 * @topology: topology of the display
169 * @idle_timeout: idle timeout duration in milliseconds
171 struct dpu_encoder_virt {
172 struct drm_encoder base;
173 spinlock_t enc_spinlock;
174 uint32_t bus_scaling_client;
176 bool enabled;
178 unsigned int num_phys_encs;
179 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
180 struct dpu_encoder_phys *cur_master;
181 struct dpu_encoder_phys *cur_slave;
182 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
184 bool intfs_swapped;
186 struct drm_crtc *crtc;
188 struct dentry *debugfs_root;
189 struct mutex enc_lock;
190 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
191 void (*crtc_frame_event_cb)(void *, u32 event);
192 void *crtc_frame_event_cb_data;
194 atomic_t frame_done_timeout_ms;
195 struct timer_list frame_done_timer;
196 struct timer_list vsync_event_timer;
198 struct msm_display_info disp_info;
200 bool idle_pc_supported;
201 struct mutex rc_lock;
202 enum dpu_enc_rc_states rc_state;
203 struct delayed_work delayed_off_work;
204 struct kthread_work vsync_event_work;
205 struct msm_display_topology topology;
207 u32 idle_timeout;
210 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
212 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
213 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
216 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
218 struct dpu_hw_dither_cfg dither_cfg = { 0 };
220 if (!hw_pp->ops.setup_dither)
221 return;
223 switch (bpc) {
224 case 6:
225 dither_cfg.c0_bitdepth = 6;
226 dither_cfg.c1_bitdepth = 6;
227 dither_cfg.c2_bitdepth = 6;
228 dither_cfg.c3_bitdepth = 6;
229 dither_cfg.temporal_en = 0;
230 break;
231 default:
232 hw_pp->ops.setup_dither(hw_pp, NULL);
233 return;
236 memcpy(&dither_cfg.matrix, dither_matrix,
237 sizeof(u32) * DITHER_MATRIX_SZ);
239 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
242 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
243 enum dpu_intr_idx intr_idx)
245 DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
246 DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
247 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
249 if (phys_enc->parent_ops->handle_frame_done)
250 phys_enc->parent_ops->handle_frame_done(
251 phys_enc->parent, phys_enc,
252 DPU_ENCODER_FRAME_EVENT_ERROR);
255 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
256 int32_t hw_id, struct dpu_encoder_wait_info *info);
258 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
259 enum dpu_intr_idx intr_idx,
260 struct dpu_encoder_wait_info *wait_info)
262 struct dpu_encoder_irq *irq;
263 u32 irq_status;
264 int ret;
266 if (!wait_info || intr_idx >= INTR_IDX_MAX) {
267 DPU_ERROR("invalid params\n");
268 return -EINVAL;
270 irq = &phys_enc->irq[intr_idx];
272 /* note: do master / slave checking outside */
274 /* return EWOULDBLOCK since we know the wait isn't necessary */
275 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
276 DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
277 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
278 irq->irq_idx);
279 return -EWOULDBLOCK;
282 if (irq->irq_idx < 0) {
283 DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
284 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
285 irq->name);
286 return 0;
289 DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
290 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
291 irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
292 atomic_read(wait_info->atomic_cnt));
294 ret = dpu_encoder_helper_wait_event_timeout(
295 DRMID(phys_enc->parent),
296 irq->hw_idx,
297 wait_info);
299 if (ret <= 0) {
300 irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
301 irq->irq_idx, true);
302 if (irq_status) {
303 unsigned long flags;
305 DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
306 "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
307 DRMID(phys_enc->parent), intr_idx,
308 irq->hw_idx, irq->irq_idx,
309 phys_enc->hw_pp->idx - PINGPONG_0,
310 atomic_read(wait_info->atomic_cnt));
311 local_irq_save(flags);
312 irq->cb.func(phys_enc, irq->irq_idx);
313 local_irq_restore(flags);
314 ret = 0;
315 } else {
316 ret = -ETIMEDOUT;
317 DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
318 "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
319 DRMID(phys_enc->parent), intr_idx,
320 irq->hw_idx, irq->irq_idx,
321 phys_enc->hw_pp->idx - PINGPONG_0,
322 atomic_read(wait_info->atomic_cnt));
324 } else {
325 ret = 0;
326 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
327 intr_idx, irq->hw_idx, irq->irq_idx,
328 phys_enc->hw_pp->idx - PINGPONG_0,
329 atomic_read(wait_info->atomic_cnt));
332 return ret;
335 int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
336 enum dpu_intr_idx intr_idx)
338 struct dpu_encoder_irq *irq;
339 int ret = 0;
341 if (intr_idx >= INTR_IDX_MAX) {
342 DPU_ERROR("invalid params\n");
343 return -EINVAL;
345 irq = &phys_enc->irq[intr_idx];
347 if (irq->irq_idx >= 0) {
348 DPU_DEBUG_PHYS(phys_enc,
349 "skipping already registered irq %s type %d\n",
350 irq->name, irq->intr_type);
351 return 0;
354 irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
355 irq->intr_type, irq->hw_idx);
356 if (irq->irq_idx < 0) {
357 DPU_ERROR_PHYS(phys_enc,
358 "failed to lookup IRQ index for %s type:%d\n",
359 irq->name, irq->intr_type);
360 return -EINVAL;
363 ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
364 &irq->cb);
365 if (ret) {
366 DPU_ERROR_PHYS(phys_enc,
367 "failed to register IRQ callback for %s\n",
368 irq->name);
369 irq->irq_idx = -EINVAL;
370 return ret;
373 ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
374 if (ret) {
375 DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
376 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
377 irq->irq_idx);
378 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
379 irq->irq_idx, &irq->cb);
380 irq->irq_idx = -EINVAL;
381 return ret;
384 trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
385 irq->hw_idx, irq->irq_idx);
387 return ret;
390 int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
391 enum dpu_intr_idx intr_idx)
393 struct dpu_encoder_irq *irq;
394 int ret;
396 irq = &phys_enc->irq[intr_idx];
398 /* silently skip irqs that weren't registered */
399 if (irq->irq_idx < 0) {
400 DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
401 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
402 irq->irq_idx);
403 return 0;
406 ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
407 if (ret) {
408 DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
409 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
410 irq->irq_idx, ret);
413 ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
414 &irq->cb);
415 if (ret) {
416 DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
417 DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
418 irq->irq_idx, ret);
421 trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
422 irq->hw_idx, irq->irq_idx);
424 irq->irq_idx = -EINVAL;
426 return 0;
429 void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
430 struct dpu_encoder_hw_resources *hw_res)
432 struct dpu_encoder_virt *dpu_enc = NULL;
433 int i = 0;
435 dpu_enc = to_dpu_encoder_virt(drm_enc);
436 DPU_DEBUG_ENC(dpu_enc, "\n");
438 /* Query resources used by phys encs, expected to be without overlap */
439 memset(hw_res, 0, sizeof(*hw_res));
441 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
442 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
444 if (phys->ops.get_hw_resources)
445 phys->ops.get_hw_resources(phys, hw_res);
449 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
451 struct dpu_encoder_virt *dpu_enc = NULL;
452 int i = 0;
454 if (!drm_enc) {
455 DPU_ERROR("invalid encoder\n");
456 return;
459 dpu_enc = to_dpu_encoder_virt(drm_enc);
460 DPU_DEBUG_ENC(dpu_enc, "\n");
462 mutex_lock(&dpu_enc->enc_lock);
464 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
465 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
467 if (phys->ops.destroy) {
468 phys->ops.destroy(phys);
469 --dpu_enc->num_phys_encs;
470 dpu_enc->phys_encs[i] = NULL;
474 if (dpu_enc->num_phys_encs)
475 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
476 dpu_enc->num_phys_encs);
477 dpu_enc->num_phys_encs = 0;
478 mutex_unlock(&dpu_enc->enc_lock);
480 drm_encoder_cleanup(drm_enc);
481 mutex_destroy(&dpu_enc->enc_lock);
484 void dpu_encoder_helper_split_config(
485 struct dpu_encoder_phys *phys_enc,
486 enum dpu_intf interface)
488 struct dpu_encoder_virt *dpu_enc;
489 struct split_pipe_cfg cfg = { 0 };
490 struct dpu_hw_mdp *hw_mdptop;
491 struct msm_display_info *disp_info;
493 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
494 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
495 return;
498 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
499 hw_mdptop = phys_enc->hw_mdptop;
500 disp_info = &dpu_enc->disp_info;
502 if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
503 return;
506 * disable split modes since encoder will be operating in as the only
507 * encoder, either for the entire use case in the case of, for example,
508 * single DSI, or for this frame in the case of left/right only partial
509 * update.
511 if (phys_enc->split_role == ENC_ROLE_SOLO) {
512 if (hw_mdptop->ops.setup_split_pipe)
513 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
514 return;
517 cfg.en = true;
518 cfg.mode = phys_enc->intf_mode;
519 cfg.intf = interface;
521 if (cfg.en && phys_enc->ops.needs_single_flush &&
522 phys_enc->ops.needs_single_flush(phys_enc))
523 cfg.split_flush_en = true;
525 if (phys_enc->split_role == ENC_ROLE_MASTER) {
526 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
528 if (hw_mdptop->ops.setup_split_pipe)
529 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
533 static struct msm_display_topology dpu_encoder_get_topology(
534 struct dpu_encoder_virt *dpu_enc,
535 struct dpu_kms *dpu_kms,
536 struct drm_display_mode *mode)
538 struct msm_display_topology topology = {0};
539 int i, intf_count = 0;
541 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
542 if (dpu_enc->phys_encs[i])
543 intf_count++;
545 /* Datapath topology selection
547 * Dual display
548 * 2 LM, 2 INTF ( Split display using 2 interfaces)
550 * Single display
551 * 1 LM, 1 INTF
552 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
554 * Adding color blocks only to primary interface if available in
555 * sufficient number
557 if (intf_count == 2)
558 topology.num_lm = 2;
559 else if (!dpu_kms->catalog->caps->has_3d_merge)
560 topology.num_lm = 1;
561 else
562 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
564 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
565 if (dpu_kms->catalog->dspp &&
566 (dpu_kms->catalog->dspp_count >= topology.num_lm))
567 topology.num_dspp = topology.num_lm;
570 topology.num_enc = 0;
571 topology.num_intf = intf_count;
573 return topology;
575 static int dpu_encoder_virt_atomic_check(
576 struct drm_encoder *drm_enc,
577 struct drm_crtc_state *crtc_state,
578 struct drm_connector_state *conn_state)
580 struct dpu_encoder_virt *dpu_enc;
581 struct msm_drm_private *priv;
582 struct dpu_kms *dpu_kms;
583 const struct drm_display_mode *mode;
584 struct drm_display_mode *adj_mode;
585 struct msm_display_topology topology;
586 struct dpu_global_state *global_state;
587 int i = 0;
588 int ret = 0;
590 if (!drm_enc || !crtc_state || !conn_state) {
591 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
592 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
593 return -EINVAL;
596 dpu_enc = to_dpu_encoder_virt(drm_enc);
597 DPU_DEBUG_ENC(dpu_enc, "\n");
599 priv = drm_enc->dev->dev_private;
600 dpu_kms = to_dpu_kms(priv->kms);
601 mode = &crtc_state->mode;
602 adj_mode = &crtc_state->adjusted_mode;
603 global_state = dpu_kms_get_global_state(crtc_state->state);
604 if (IS_ERR(global_state))
605 return PTR_ERR(global_state);
607 trace_dpu_enc_atomic_check(DRMID(drm_enc));
609 /* perform atomic check on the first physical encoder (master) */
610 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
611 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
613 if (phys->ops.atomic_check)
614 ret = phys->ops.atomic_check(phys, crtc_state,
615 conn_state);
616 else if (phys->ops.mode_fixup)
617 if (!phys->ops.mode_fixup(phys, mode, adj_mode))
618 ret = -EINVAL;
620 if (ret) {
621 DPU_ERROR_ENC(dpu_enc,
622 "mode unsupported, phys idx %d\n", i);
623 break;
627 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
629 /* Reserve dynamic resources now. */
630 if (!ret) {
632 * Release and Allocate resources on every modeset
633 * Dont allocate when active is false.
635 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
636 dpu_rm_release(global_state, drm_enc);
638 if (!crtc_state->active_changed || crtc_state->active)
639 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
640 drm_enc, crtc_state, topology);
644 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
646 return ret;
649 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
650 struct msm_display_info *disp_info)
652 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
653 struct msm_drm_private *priv;
654 struct dpu_kms *dpu_kms;
655 struct dpu_hw_mdp *hw_mdptop;
656 struct drm_encoder *drm_enc;
657 int i;
659 if (!dpu_enc || !disp_info) {
660 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
661 dpu_enc != NULL, disp_info != NULL);
662 return;
663 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
664 DPU_ERROR("invalid num phys enc %d/%d\n",
665 dpu_enc->num_phys_encs,
666 (int) ARRAY_SIZE(dpu_enc->hw_pp));
667 return;
670 drm_enc = &dpu_enc->base;
671 /* this pointers are checked in virt_enable_helper */
672 priv = drm_enc->dev->dev_private;
674 dpu_kms = to_dpu_kms(priv->kms);
675 hw_mdptop = dpu_kms->hw_mdp;
676 if (!hw_mdptop) {
677 DPU_ERROR("invalid mdptop\n");
678 return;
681 if (hw_mdptop->ops.setup_vsync_source &&
682 disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
683 for (i = 0; i < dpu_enc->num_phys_encs; i++)
684 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
686 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
687 if (disp_info->is_te_using_watchdog_timer)
688 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
689 else
690 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
692 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
696 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
698 struct dpu_encoder_virt *dpu_enc;
699 int i;
701 if (!drm_enc) {
702 DPU_ERROR("invalid encoder\n");
703 return;
706 dpu_enc = to_dpu_encoder_virt(drm_enc);
708 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
709 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
710 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
712 if (phys->ops.irq_control)
713 phys->ops.irq_control(phys, enable);
718 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
719 bool enable)
721 struct msm_drm_private *priv;
722 struct dpu_kms *dpu_kms;
723 struct dpu_encoder_virt *dpu_enc;
725 dpu_enc = to_dpu_encoder_virt(drm_enc);
726 priv = drm_enc->dev->dev_private;
727 dpu_kms = to_dpu_kms(priv->kms);
729 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
731 if (!dpu_enc->cur_master) {
732 DPU_ERROR("encoder master not set\n");
733 return;
736 if (enable) {
737 /* enable DPU core clks */
738 pm_runtime_get_sync(&dpu_kms->pdev->dev);
740 /* enable all the irq */
741 _dpu_encoder_irq_control(drm_enc, true);
743 } else {
744 /* disable all the irq */
745 _dpu_encoder_irq_control(drm_enc, false);
747 /* disable DPU core clks */
748 pm_runtime_put_sync(&dpu_kms->pdev->dev);
753 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
754 u32 sw_event)
756 struct dpu_encoder_virt *dpu_enc;
757 struct msm_drm_private *priv;
758 bool is_vid_mode = false;
760 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
761 DPU_ERROR("invalid parameters\n");
762 return -EINVAL;
764 dpu_enc = to_dpu_encoder_virt(drm_enc);
765 priv = drm_enc->dev->dev_private;
766 is_vid_mode = dpu_enc->disp_info.capabilities &
767 MSM_DISPLAY_CAP_VID_MODE;
770 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
771 * events and return early for other events (ie wb display).
773 if (!dpu_enc->idle_pc_supported &&
774 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
775 sw_event != DPU_ENC_RC_EVENT_STOP &&
776 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
777 return 0;
779 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
780 dpu_enc->rc_state, "begin");
782 switch (sw_event) {
783 case DPU_ENC_RC_EVENT_KICKOFF:
784 /* cancel delayed off work, if any */
785 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
786 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
787 sw_event);
789 mutex_lock(&dpu_enc->rc_lock);
791 /* return if the resource control is already in ON state */
792 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
793 DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
794 DRMID(drm_enc), sw_event);
795 mutex_unlock(&dpu_enc->rc_lock);
796 return 0;
797 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
798 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
799 DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
800 DRMID(drm_enc), sw_event,
801 dpu_enc->rc_state);
802 mutex_unlock(&dpu_enc->rc_lock);
803 return -EINVAL;
806 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
807 _dpu_encoder_irq_control(drm_enc, true);
808 else
809 _dpu_encoder_resource_control_helper(drm_enc, true);
811 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
813 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
814 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
815 "kickoff");
817 mutex_unlock(&dpu_enc->rc_lock);
818 break;
820 case DPU_ENC_RC_EVENT_FRAME_DONE:
822 * mutex lock is not used as this event happens at interrupt
823 * context. And locking is not required as, the other events
824 * like KICKOFF and STOP does a wait-for-idle before executing
825 * the resource_control
827 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
828 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
829 DRMID(drm_enc), sw_event,
830 dpu_enc->rc_state);
831 return -EINVAL;
835 * schedule off work item only when there are no
836 * frames pending
838 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
839 DRM_DEBUG_KMS("id:%d skip schedule work\n",
840 DRMID(drm_enc));
841 return 0;
844 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
845 msecs_to_jiffies(dpu_enc->idle_timeout));
847 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
848 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
849 "frame done");
850 break;
852 case DPU_ENC_RC_EVENT_PRE_STOP:
853 /* cancel delayed off work, if any */
854 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
855 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
856 sw_event);
858 mutex_lock(&dpu_enc->rc_lock);
860 if (is_vid_mode &&
861 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
862 _dpu_encoder_irq_control(drm_enc, true);
864 /* skip if is already OFF or IDLE, resources are off already */
865 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
866 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
867 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
868 DRMID(drm_enc), sw_event,
869 dpu_enc->rc_state);
870 mutex_unlock(&dpu_enc->rc_lock);
871 return 0;
874 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
876 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
877 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
878 "pre stop");
880 mutex_unlock(&dpu_enc->rc_lock);
881 break;
883 case DPU_ENC_RC_EVENT_STOP:
884 mutex_lock(&dpu_enc->rc_lock);
886 /* return if the resource control is already in OFF state */
887 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
888 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
889 DRMID(drm_enc), sw_event);
890 mutex_unlock(&dpu_enc->rc_lock);
891 return 0;
892 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
893 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
894 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
895 mutex_unlock(&dpu_enc->rc_lock);
896 return -EINVAL;
900 * expect to arrive here only if in either idle state or pre-off
901 * and in IDLE state the resources are already disabled
903 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
904 _dpu_encoder_resource_control_helper(drm_enc, false);
906 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
908 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
909 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
910 "stop");
912 mutex_unlock(&dpu_enc->rc_lock);
913 break;
915 case DPU_ENC_RC_EVENT_ENTER_IDLE:
916 mutex_lock(&dpu_enc->rc_lock);
918 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
919 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
920 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
921 mutex_unlock(&dpu_enc->rc_lock);
922 return 0;
926 * if we are in ON but a frame was just kicked off,
927 * ignore the IDLE event, it's probably a stale timer event
929 if (dpu_enc->frame_busy_mask[0]) {
930 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
931 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
932 mutex_unlock(&dpu_enc->rc_lock);
933 return 0;
936 if (is_vid_mode)
937 _dpu_encoder_irq_control(drm_enc, false);
938 else
939 _dpu_encoder_resource_control_helper(drm_enc, false);
941 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
943 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
944 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
945 "idle");
947 mutex_unlock(&dpu_enc->rc_lock);
948 break;
950 default:
951 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
952 sw_event);
953 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
954 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
955 "error");
956 break;
959 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
960 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
961 "end");
962 return 0;
965 static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
966 struct drm_display_mode *mode,
967 struct drm_display_mode *adj_mode)
969 struct dpu_encoder_virt *dpu_enc;
970 struct msm_drm_private *priv;
971 struct dpu_kms *dpu_kms;
972 struct list_head *connector_list;
973 struct drm_connector *conn = NULL, *conn_iter;
974 struct drm_crtc *drm_crtc;
975 struct dpu_crtc_state *cstate;
976 struct dpu_global_state *global_state;
977 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
978 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
979 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
980 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
981 int num_lm, num_ctl, num_pp;
982 int i, j;
984 if (!drm_enc) {
985 DPU_ERROR("invalid encoder\n");
986 return;
989 dpu_enc = to_dpu_encoder_virt(drm_enc);
990 DPU_DEBUG_ENC(dpu_enc, "\n");
992 priv = drm_enc->dev->dev_private;
993 dpu_kms = to_dpu_kms(priv->kms);
994 connector_list = &dpu_kms->dev->mode_config.connector_list;
996 global_state = dpu_kms_get_existing_global_state(dpu_kms);
997 if (IS_ERR_OR_NULL(global_state)) {
998 DPU_ERROR("Failed to get global state");
999 return;
1002 trace_dpu_enc_mode_set(DRMID(drm_enc));
1004 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
1005 msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
1007 list_for_each_entry(conn_iter, connector_list, head)
1008 if (conn_iter->encoder == drm_enc)
1009 conn = conn_iter;
1011 if (!conn) {
1012 DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
1013 return;
1014 } else if (!conn->state) {
1015 DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
1016 return;
1019 drm_for_each_crtc(drm_crtc, drm_enc->dev)
1020 if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
1021 break;
1023 /* Query resource that have been reserved in atomic check step. */
1024 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1025 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1026 ARRAY_SIZE(hw_pp));
1027 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1028 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1029 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1030 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1031 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1032 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1033 ARRAY_SIZE(hw_dspp));
1035 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1036 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1037 : NULL;
1039 cstate = to_dpu_crtc_state(drm_crtc->state);
1041 for (i = 0; i < num_lm; i++) {
1042 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1044 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1045 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1046 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1049 cstate->num_mixers = num_lm;
1051 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1052 int num_blk;
1053 struct dpu_hw_blk *hw_blk[MAX_CHANNELS_PER_ENC];
1054 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1056 if (!dpu_enc->hw_pp[i]) {
1057 DPU_ERROR_ENC(dpu_enc,
1058 "no pp block assigned at idx: %d\n", i);
1059 return;
1062 if (!hw_ctl[i]) {
1063 DPU_ERROR_ENC(dpu_enc,
1064 "no ctl block assigned at idx: %d\n", i);
1065 return;
1068 phys->hw_pp = dpu_enc->hw_pp[i];
1069 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1071 num_blk = dpu_rm_get_assigned_resources(&dpu_kms->rm,
1072 global_state, drm_enc->base.id, DPU_HW_BLK_INTF,
1073 hw_blk, ARRAY_SIZE(hw_blk));
1074 for (j = 0; j < num_blk; j++) {
1075 struct dpu_hw_intf *hw_intf;
1077 hw_intf = to_dpu_hw_intf(hw_blk[i]);
1078 if (hw_intf->idx == phys->intf_idx)
1079 phys->hw_intf = hw_intf;
1082 if (!phys->hw_intf) {
1083 DPU_ERROR_ENC(dpu_enc,
1084 "no intf block assigned at idx: %d\n", i);
1085 return;
1088 phys->connector = conn->state->connector;
1089 if (phys->ops.mode_set)
1090 phys->ops.mode_set(phys, mode, adj_mode);
1094 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1096 struct dpu_encoder_virt *dpu_enc = NULL;
1097 int i;
1099 if (!drm_enc || !drm_enc->dev) {
1100 DPU_ERROR("invalid parameters\n");
1101 return;
1104 dpu_enc = to_dpu_encoder_virt(drm_enc);
1105 if (!dpu_enc || !dpu_enc->cur_master) {
1106 DPU_ERROR("invalid dpu encoder/master\n");
1107 return;
1111 if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
1112 dpu_enc->cur_master->hw_mdptop &&
1113 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1114 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1115 dpu_enc->cur_master->hw_mdptop);
1117 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1119 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1120 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1121 unsigned bpc = dpu_enc->phys_encs[0]->connector->display_info.bpc;
1122 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1123 if (!dpu_enc->hw_pp[i])
1124 continue;
1125 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1130 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1132 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1134 mutex_lock(&dpu_enc->enc_lock);
1136 if (!dpu_enc->enabled)
1137 goto out;
1139 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1140 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1141 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1142 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1144 _dpu_encoder_virt_enable_helper(drm_enc);
1146 out:
1147 mutex_unlock(&dpu_enc->enc_lock);
1150 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1152 struct dpu_encoder_virt *dpu_enc = NULL;
1153 int ret = 0;
1154 struct msm_drm_private *priv;
1155 struct drm_display_mode *cur_mode = NULL;
1157 if (!drm_enc) {
1158 DPU_ERROR("invalid encoder\n");
1159 return;
1161 dpu_enc = to_dpu_encoder_virt(drm_enc);
1163 mutex_lock(&dpu_enc->enc_lock);
1164 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1165 priv = drm_enc->dev->dev_private;
1167 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1168 cur_mode->vdisplay);
1170 /* always enable slave encoder before master */
1171 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1172 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1174 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1175 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1177 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1178 if (ret) {
1179 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1180 ret);
1181 goto out;
1184 _dpu_encoder_virt_enable_helper(drm_enc);
1186 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
1187 ret = msm_dp_display_enable(priv->dp,
1188 drm_enc);
1189 if (ret) {
1190 DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
1191 ret);
1192 goto out;
1195 dpu_enc->enabled = true;
1197 out:
1198 mutex_unlock(&dpu_enc->enc_lock);
1201 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1203 struct dpu_encoder_virt *dpu_enc = NULL;
1204 struct msm_drm_private *priv;
1205 int i = 0;
1207 if (!drm_enc) {
1208 DPU_ERROR("invalid encoder\n");
1209 return;
1210 } else if (!drm_enc->dev) {
1211 DPU_ERROR("invalid dev\n");
1212 return;
1215 dpu_enc = to_dpu_encoder_virt(drm_enc);
1216 DPU_DEBUG_ENC(dpu_enc, "\n");
1218 mutex_lock(&dpu_enc->enc_lock);
1219 dpu_enc->enabled = false;
1221 priv = drm_enc->dev->dev_private;
1223 trace_dpu_enc_disable(DRMID(drm_enc));
1225 /* wait for idle */
1226 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1228 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
1229 if (msm_dp_display_pre_disable(priv->dp, drm_enc))
1230 DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
1233 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1235 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1236 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1238 if (phys->ops.disable)
1239 phys->ops.disable(phys);
1243 /* after phys waits for frame-done, should be no more frames pending */
1244 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1245 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1246 del_timer_sync(&dpu_enc->frame_done_timer);
1249 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1251 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1252 dpu_enc->phys_encs[i]->connector = NULL;
1255 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1257 if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
1258 if (msm_dp_display_disable(priv->dp, drm_enc))
1259 DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
1262 mutex_unlock(&dpu_enc->enc_lock);
1265 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1266 enum dpu_intf_type type, u32 controller_id)
1268 int i = 0;
1270 for (i = 0; i < catalog->intf_count; i++) {
1271 if (catalog->intf[i].type == type
1272 && catalog->intf[i].controller_id == controller_id) {
1273 return catalog->intf[i].id;
1277 return INTF_MAX;
1280 static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1281 struct dpu_encoder_phys *phy_enc)
1283 struct dpu_encoder_virt *dpu_enc = NULL;
1284 unsigned long lock_flags;
1286 if (!drm_enc || !phy_enc)
1287 return;
1289 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1290 dpu_enc = to_dpu_encoder_virt(drm_enc);
1292 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1293 if (dpu_enc->crtc)
1294 dpu_crtc_vblank_callback(dpu_enc->crtc);
1295 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1297 atomic_inc(&phy_enc->vsync_cnt);
1298 DPU_ATRACE_END("encoder_vblank_callback");
1301 static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1302 struct dpu_encoder_phys *phy_enc)
1304 if (!phy_enc)
1305 return;
1307 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1308 atomic_inc(&phy_enc->underrun_cnt);
1309 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1310 atomic_read(&phy_enc->underrun_cnt));
1311 DPU_ATRACE_END("encoder_underrun_callback");
1314 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1316 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1317 unsigned long lock_flags;
1319 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1320 /* crtc should always be cleared before re-assigning */
1321 WARN_ON(crtc && dpu_enc->crtc);
1322 dpu_enc->crtc = crtc;
1323 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1326 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1327 struct drm_crtc *crtc, bool enable)
1329 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1330 unsigned long lock_flags;
1331 int i;
1333 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1335 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1336 if (dpu_enc->crtc != crtc) {
1337 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1338 return;
1340 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1342 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1343 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1345 if (phys->ops.control_vblank_irq)
1346 phys->ops.control_vblank_irq(phys, enable);
1350 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1351 void (*frame_event_cb)(void *, u32 event),
1352 void *frame_event_cb_data)
1354 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1355 unsigned long lock_flags;
1356 bool enable;
1358 enable = frame_event_cb ? true : false;
1360 if (!drm_enc) {
1361 DPU_ERROR("invalid encoder\n");
1362 return;
1364 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1366 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1367 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1368 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1369 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1372 static void dpu_encoder_frame_done_callback(
1373 struct drm_encoder *drm_enc,
1374 struct dpu_encoder_phys *ready_phys, u32 event)
1376 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1377 unsigned int i;
1379 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1380 | DPU_ENCODER_FRAME_EVENT_ERROR
1381 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1383 if (!dpu_enc->frame_busy_mask[0]) {
1385 * suppress frame_done without waiter,
1386 * likely autorefresh
1388 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
1389 event, ready_phys->intf_idx);
1390 return;
1393 /* One of the physical encoders has become idle */
1394 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1395 if (dpu_enc->phys_encs[i] == ready_phys) {
1396 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1397 dpu_enc->frame_busy_mask[0]);
1398 clear_bit(i, dpu_enc->frame_busy_mask);
1402 if (!dpu_enc->frame_busy_mask[0]) {
1403 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1404 del_timer(&dpu_enc->frame_done_timer);
1406 dpu_encoder_resource_control(drm_enc,
1407 DPU_ENC_RC_EVENT_FRAME_DONE);
1409 if (dpu_enc->crtc_frame_event_cb)
1410 dpu_enc->crtc_frame_event_cb(
1411 dpu_enc->crtc_frame_event_cb_data,
1412 event);
1414 } else {
1415 if (dpu_enc->crtc_frame_event_cb)
1416 dpu_enc->crtc_frame_event_cb(
1417 dpu_enc->crtc_frame_event_cb_data, event);
1421 static void dpu_encoder_off_work(struct work_struct *work)
1423 struct dpu_encoder_virt *dpu_enc = container_of(work,
1424 struct dpu_encoder_virt, delayed_off_work.work);
1426 if (!dpu_enc) {
1427 DPU_ERROR("invalid dpu encoder\n");
1428 return;
1431 dpu_encoder_resource_control(&dpu_enc->base,
1432 DPU_ENC_RC_EVENT_ENTER_IDLE);
1434 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1435 DPU_ENCODER_FRAME_EVENT_IDLE);
1439 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1440 * @drm_enc: Pointer to drm encoder structure
1441 * @phys: Pointer to physical encoder structure
1442 * @extra_flush_bits: Additional bit mask to include in flush trigger
1444 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1445 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1447 struct dpu_hw_ctl *ctl;
1448 int pending_kickoff_cnt;
1449 u32 ret = UINT_MAX;
1451 if (!phys->hw_pp) {
1452 DPU_ERROR("invalid pingpong hw\n");
1453 return;
1456 ctl = phys->hw_ctl;
1457 if (!ctl->ops.trigger_flush) {
1458 DPU_ERROR("missing trigger cb\n");
1459 return;
1462 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1464 if (extra_flush_bits && ctl->ops.update_pending_flush)
1465 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1467 ctl->ops.trigger_flush(ctl);
1469 if (ctl->ops.get_pending_flush)
1470 ret = ctl->ops.get_pending_flush(ctl);
1472 trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
1473 pending_kickoff_cnt, ctl->idx,
1474 extra_flush_bits, ret);
1478 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1479 * @phys: Pointer to physical encoder structure
1481 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1483 if (!phys) {
1484 DPU_ERROR("invalid argument(s)\n");
1485 return;
1488 if (!phys->hw_pp) {
1489 DPU_ERROR("invalid pingpong hw\n");
1490 return;
1493 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1494 phys->ops.trigger_start(phys);
1497 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1499 struct dpu_hw_ctl *ctl;
1501 ctl = phys_enc->hw_ctl;
1502 if (ctl->ops.trigger_start) {
1503 ctl->ops.trigger_start(ctl);
1504 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1508 static int dpu_encoder_helper_wait_event_timeout(
1509 int32_t drm_id,
1510 int32_t hw_id,
1511 struct dpu_encoder_wait_info *info)
1513 int rc = 0;
1514 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1515 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1516 s64 time;
1518 do {
1519 rc = wait_event_timeout(*(info->wq),
1520 atomic_read(info->atomic_cnt) == 0, jiffies);
1521 time = ktime_to_ms(ktime_get());
1523 trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
1524 expected_time,
1525 atomic_read(info->atomic_cnt));
1526 /* If we timed out, counter is valid and time is less, wait again */
1527 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1528 (time < expected_time));
1530 return rc;
1533 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1535 struct dpu_encoder_virt *dpu_enc;
1536 struct dpu_hw_ctl *ctl;
1537 int rc;
1539 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1540 ctl = phys_enc->hw_ctl;
1542 if (!ctl->ops.reset)
1543 return;
1545 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
1546 ctl->idx);
1548 rc = ctl->ops.reset(ctl);
1549 if (rc)
1550 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1552 phys_enc->enable_state = DPU_ENC_ENABLED;
1556 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1557 * Iterate through the physical encoders and perform consolidated flush
1558 * and/or control start triggering as needed. This is done in the virtual
1559 * encoder rather than the individual physical ones in order to handle
1560 * use cases that require visibility into multiple physical encoders at
1561 * a time.
1562 * @dpu_enc: Pointer to virtual encoder structure
1564 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1566 struct dpu_hw_ctl *ctl;
1567 uint32_t i, pending_flush;
1568 unsigned long lock_flags;
1570 pending_flush = 0x0;
1572 /* update pending counts and trigger kickoff ctl flush atomically */
1573 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1575 /* don't perform flush/start operations for slave encoders */
1576 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1577 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1579 if (phys->enable_state == DPU_ENC_DISABLED)
1580 continue;
1582 ctl = phys->hw_ctl;
1585 * This is cleared in frame_done worker, which isn't invoked
1586 * for async commits. So don't set this for async, since it'll
1587 * roll over to the next commit.
1589 if (phys->split_role != ENC_ROLE_SLAVE)
1590 set_bit(i, dpu_enc->frame_busy_mask);
1592 if (!phys->ops.needs_single_flush ||
1593 !phys->ops.needs_single_flush(phys))
1594 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1595 else if (ctl->ops.get_pending_flush)
1596 pending_flush |= ctl->ops.get_pending_flush(ctl);
1599 /* for split flush, combine pending flush masks and send to master */
1600 if (pending_flush && dpu_enc->cur_master) {
1601 _dpu_encoder_trigger_flush(
1602 &dpu_enc->base,
1603 dpu_enc->cur_master,
1604 pending_flush);
1607 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1609 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1612 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1614 struct dpu_encoder_virt *dpu_enc;
1615 struct dpu_encoder_phys *phys;
1616 unsigned int i;
1617 struct dpu_hw_ctl *ctl;
1618 struct msm_display_info *disp_info;
1620 if (!drm_enc) {
1621 DPU_ERROR("invalid encoder\n");
1622 return;
1624 dpu_enc = to_dpu_encoder_virt(drm_enc);
1625 disp_info = &dpu_enc->disp_info;
1627 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1628 phys = dpu_enc->phys_encs[i];
1630 ctl = phys->hw_ctl;
1631 if (ctl->ops.clear_pending_flush)
1632 ctl->ops.clear_pending_flush(ctl);
1634 /* update only for command mode primary ctl */
1635 if ((phys == dpu_enc->cur_master) &&
1636 (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1637 && ctl->ops.trigger_pending)
1638 ctl->ops.trigger_pending(ctl);
1642 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1643 struct drm_display_mode *mode)
1645 u64 pclk_rate;
1646 u32 pclk_period;
1647 u32 line_time;
1650 * For linetime calculation, only operate on master encoder.
1652 if (!dpu_enc->cur_master)
1653 return 0;
1655 if (!dpu_enc->cur_master->ops.get_line_count) {
1656 DPU_ERROR("get_line_count function not defined\n");
1657 return 0;
1660 pclk_rate = mode->clock; /* pixel clock in kHz */
1661 if (pclk_rate == 0) {
1662 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1663 return 0;
1666 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1667 if (pclk_period == 0) {
1668 DPU_ERROR("pclk period is 0\n");
1669 return 0;
1673 * Line time calculation based on Pixel clock and HTOTAL.
1674 * Final unit is in ns.
1676 line_time = (pclk_period * mode->htotal) / 1000;
1677 if (line_time == 0) {
1678 DPU_ERROR("line time calculation is 0\n");
1679 return 0;
1682 DPU_DEBUG_ENC(dpu_enc,
1683 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1684 pclk_rate, pclk_period, line_time);
1686 return line_time;
1689 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1691 struct drm_display_mode *mode;
1692 struct dpu_encoder_virt *dpu_enc;
1693 u32 cur_line;
1694 u32 line_time;
1695 u32 vtotal, time_to_vsync;
1696 ktime_t cur_time;
1698 dpu_enc = to_dpu_encoder_virt(drm_enc);
1700 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1701 DPU_ERROR("crtc/crtc state object is NULL\n");
1702 return -EINVAL;
1704 mode = &drm_enc->crtc->state->adjusted_mode;
1706 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1707 if (!line_time)
1708 return -EINVAL;
1710 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1712 vtotal = mode->vtotal;
1713 if (cur_line >= vtotal)
1714 time_to_vsync = line_time * vtotal;
1715 else
1716 time_to_vsync = line_time * (vtotal - cur_line);
1718 if (time_to_vsync == 0) {
1719 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1720 vtotal);
1721 return -EINVAL;
1724 cur_time = ktime_get();
1725 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1727 DPU_DEBUG_ENC(dpu_enc,
1728 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1729 cur_line, vtotal, time_to_vsync,
1730 ktime_to_ms(cur_time),
1731 ktime_to_ms(*wakeup_time));
1732 return 0;
1735 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1737 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1738 vsync_event_timer);
1739 struct drm_encoder *drm_enc = &dpu_enc->base;
1740 struct msm_drm_private *priv;
1741 struct msm_drm_thread *event_thread;
1743 if (!drm_enc->dev || !drm_enc->crtc) {
1744 DPU_ERROR("invalid parameters\n");
1745 return;
1748 priv = drm_enc->dev->dev_private;
1750 if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1751 DPU_ERROR("invalid crtc index\n");
1752 return;
1754 event_thread = &priv->event_thread[drm_enc->crtc->index];
1755 if (!event_thread) {
1756 DPU_ERROR("event_thread not found for crtc:%d\n",
1757 drm_enc->crtc->index);
1758 return;
1761 del_timer(&dpu_enc->vsync_event_timer);
1764 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1766 struct dpu_encoder_virt *dpu_enc = container_of(work,
1767 struct dpu_encoder_virt, vsync_event_work);
1768 ktime_t wakeup_time;
1770 if (!dpu_enc) {
1771 DPU_ERROR("invalid dpu encoder\n");
1772 return;
1775 if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1776 return;
1778 trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1779 mod_timer(&dpu_enc->vsync_event_timer,
1780 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1783 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1785 struct dpu_encoder_virt *dpu_enc;
1786 struct dpu_encoder_phys *phys;
1787 bool needs_hw_reset = false;
1788 unsigned int i;
1790 dpu_enc = to_dpu_encoder_virt(drm_enc);
1792 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1794 /* prepare for next kickoff, may include waiting on previous kickoff */
1795 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1796 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1797 phys = dpu_enc->phys_encs[i];
1798 if (phys->ops.prepare_for_kickoff)
1799 phys->ops.prepare_for_kickoff(phys);
1800 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1801 needs_hw_reset = true;
1803 DPU_ATRACE_END("enc_prepare_for_kickoff");
1805 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1807 /* if any phys needs reset, reset all phys, in-order */
1808 if (needs_hw_reset) {
1809 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1810 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1811 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1816 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1818 struct dpu_encoder_virt *dpu_enc;
1819 struct dpu_encoder_phys *phys;
1820 ktime_t wakeup_time;
1821 unsigned long timeout_ms;
1822 unsigned int i;
1824 DPU_ATRACE_BEGIN("encoder_kickoff");
1825 dpu_enc = to_dpu_encoder_virt(drm_enc);
1827 trace_dpu_enc_kickoff(DRMID(drm_enc));
1829 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1830 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1832 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1833 mod_timer(&dpu_enc->frame_done_timer,
1834 jiffies + msecs_to_jiffies(timeout_ms));
1836 /* All phys encs are ready to go, trigger the kickoff */
1837 _dpu_encoder_kickoff_phys(dpu_enc);
1839 /* allow phys encs to handle any post-kickoff business */
1840 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1841 phys = dpu_enc->phys_encs[i];
1842 if (phys->ops.handle_post_kickoff)
1843 phys->ops.handle_post_kickoff(phys);
1846 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1847 !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1848 trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1849 ktime_to_ms(wakeup_time));
1850 mod_timer(&dpu_enc->vsync_event_timer,
1851 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1854 DPU_ATRACE_END("encoder_kickoff");
1857 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
1859 struct dpu_encoder_virt *dpu_enc;
1860 struct dpu_encoder_phys *phys;
1861 int i;
1863 if (!drm_enc) {
1864 DPU_ERROR("invalid encoder\n");
1865 return;
1867 dpu_enc = to_dpu_encoder_virt(drm_enc);
1869 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1870 phys = dpu_enc->phys_encs[i];
1871 if (phys->ops.prepare_commit)
1872 phys->ops.prepare_commit(phys);
1876 #ifdef CONFIG_DEBUG_FS
1877 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
1879 struct dpu_encoder_virt *dpu_enc = s->private;
1880 int i;
1882 mutex_lock(&dpu_enc->enc_lock);
1883 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1884 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1886 seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
1887 phys->intf_idx - INTF_0,
1888 atomic_read(&phys->vsync_cnt),
1889 atomic_read(&phys->underrun_cnt));
1891 switch (phys->intf_mode) {
1892 case INTF_MODE_VIDEO:
1893 seq_puts(s, "mode: video\n");
1894 break;
1895 case INTF_MODE_CMD:
1896 seq_puts(s, "mode: command\n");
1897 break;
1898 default:
1899 seq_puts(s, "mode: ???\n");
1900 break;
1903 mutex_unlock(&dpu_enc->enc_lock);
1905 return 0;
1908 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
1910 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1912 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1913 int i;
1915 char name[DPU_NAME_SIZE];
1917 if (!drm_enc->dev) {
1918 DPU_ERROR("invalid encoder or kms\n");
1919 return -EINVAL;
1922 snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
1924 /* create overall sub-directory for the encoder */
1925 dpu_enc->debugfs_root = debugfs_create_dir(name,
1926 drm_enc->dev->primary->debugfs_root);
1928 /* don't error check these */
1929 debugfs_create_file("status", 0600,
1930 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
1932 for (i = 0; i < dpu_enc->num_phys_encs; i++)
1933 if (dpu_enc->phys_encs[i]->ops.late_register)
1934 dpu_enc->phys_encs[i]->ops.late_register(
1935 dpu_enc->phys_encs[i],
1936 dpu_enc->debugfs_root);
1938 return 0;
1940 #else
1941 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
1943 return 0;
1945 #endif
1947 static int dpu_encoder_late_register(struct drm_encoder *encoder)
1949 return _dpu_encoder_init_debugfs(encoder);
1952 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
1954 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
1956 debugfs_remove_recursive(dpu_enc->debugfs_root);
1959 static int dpu_encoder_virt_add_phys_encs(
1960 u32 display_caps,
1961 struct dpu_encoder_virt *dpu_enc,
1962 struct dpu_enc_phys_init_params *params)
1964 struct dpu_encoder_phys *enc = NULL;
1966 DPU_DEBUG_ENC(dpu_enc, "\n");
1969 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
1970 * in this function, check up-front.
1972 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
1973 ARRAY_SIZE(dpu_enc->phys_encs)) {
1974 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
1975 dpu_enc->num_phys_encs);
1976 return -EINVAL;
1979 if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
1980 enc = dpu_encoder_phys_vid_init(params);
1982 if (IS_ERR_OR_NULL(enc)) {
1983 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
1984 PTR_ERR(enc));
1985 return enc == NULL ? -EINVAL : PTR_ERR(enc);
1988 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
1989 ++dpu_enc->num_phys_encs;
1992 if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
1993 enc = dpu_encoder_phys_cmd_init(params);
1995 if (IS_ERR_OR_NULL(enc)) {
1996 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
1997 PTR_ERR(enc));
1998 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2001 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2002 ++dpu_enc->num_phys_encs;
2005 if (params->split_role == ENC_ROLE_SLAVE)
2006 dpu_enc->cur_slave = enc;
2007 else
2008 dpu_enc->cur_master = enc;
2010 return 0;
2013 static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2014 .handle_vblank_virt = dpu_encoder_vblank_callback,
2015 .handle_underrun_virt = dpu_encoder_underrun_callback,
2016 .handle_frame_done = dpu_encoder_frame_done_callback,
2019 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2020 struct dpu_kms *dpu_kms,
2021 struct msm_display_info *disp_info)
2023 int ret = 0;
2024 int i = 0;
2025 enum dpu_intf_type intf_type = INTF_NONE;
2026 struct dpu_enc_phys_init_params phys_params;
2028 if (!dpu_enc) {
2029 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2030 return -EINVAL;
2033 dpu_enc->cur_master = NULL;
2035 memset(&phys_params, 0, sizeof(phys_params));
2036 phys_params.dpu_kms = dpu_kms;
2037 phys_params.parent = &dpu_enc->base;
2038 phys_params.parent_ops = &dpu_encoder_parent_ops;
2039 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2041 DPU_DEBUG("\n");
2043 switch (disp_info->intf_type) {
2044 case DRM_MODE_ENCODER_DSI:
2045 intf_type = INTF_DSI;
2046 break;
2047 case DRM_MODE_ENCODER_TMDS:
2048 intf_type = INTF_DP;
2049 break;
2052 WARN_ON(disp_info->num_of_h_tiles < 1);
2054 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2056 if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2057 (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2058 dpu_enc->idle_pc_supported =
2059 dpu_kms->catalog->caps->has_idle_pc;
2061 mutex_lock(&dpu_enc->enc_lock);
2062 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2064 * Left-most tile is at index 0, content is controller id
2065 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2066 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2068 u32 controller_id = disp_info->h_tile_instance[i];
2070 if (disp_info->num_of_h_tiles > 1) {
2071 if (i == 0)
2072 phys_params.split_role = ENC_ROLE_MASTER;
2073 else
2074 phys_params.split_role = ENC_ROLE_SLAVE;
2075 } else {
2076 phys_params.split_role = ENC_ROLE_SOLO;
2079 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2080 i, controller_id, phys_params.split_role);
2082 phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2083 intf_type,
2084 controller_id);
2085 if (phys_params.intf_idx == INTF_MAX) {
2086 DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
2087 intf_type, controller_id);
2088 ret = -EINVAL;
2091 if (!ret) {
2092 ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
2093 dpu_enc,
2094 &phys_params);
2095 if (ret)
2096 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2100 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2101 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2102 atomic_set(&phys->vsync_cnt, 0);
2103 atomic_set(&phys->underrun_cnt, 0);
2105 mutex_unlock(&dpu_enc->enc_lock);
2107 return ret;
2110 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2112 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2113 frame_done_timer);
2114 struct drm_encoder *drm_enc = &dpu_enc->base;
2115 u32 event;
2117 if (!drm_enc->dev) {
2118 DPU_ERROR("invalid parameters\n");
2119 return;
2122 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2123 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2124 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2125 return;
2126 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2127 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2128 return;
2131 DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2133 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2134 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2135 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2138 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2139 .mode_set = dpu_encoder_virt_mode_set,
2140 .disable = dpu_encoder_virt_disable,
2141 .enable = dpu_kms_encoder_enable,
2142 .atomic_check = dpu_encoder_virt_atomic_check,
2144 /* This is called by dpu_kms_encoder_enable */
2145 .commit = dpu_encoder_virt_enable,
2148 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2149 .destroy = dpu_encoder_destroy,
2150 .late_register = dpu_encoder_late_register,
2151 .early_unregister = dpu_encoder_early_unregister,
2154 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2155 struct msm_display_info *disp_info)
2157 struct msm_drm_private *priv = dev->dev_private;
2158 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2159 struct drm_encoder *drm_enc = NULL;
2160 struct dpu_encoder_virt *dpu_enc = NULL;
2161 int ret = 0;
2163 dpu_enc = to_dpu_encoder_virt(enc);
2165 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2166 if (ret)
2167 goto fail;
2169 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2170 timer_setup(&dpu_enc->frame_done_timer,
2171 dpu_encoder_frame_done_timeout, 0);
2173 if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2174 timer_setup(&dpu_enc->vsync_event_timer,
2175 dpu_encoder_vsync_event_handler,
2179 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2180 dpu_encoder_off_work);
2181 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2183 kthread_init_work(&dpu_enc->vsync_event_work,
2184 dpu_encoder_vsync_event_work_handler);
2186 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2188 DPU_DEBUG_ENC(dpu_enc, "created\n");
2190 return ret;
2192 fail:
2193 DPU_ERROR("failed to create encoder\n");
2194 if (drm_enc)
2195 dpu_encoder_destroy(drm_enc);
2197 return ret;
2202 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2203 int drm_enc_mode)
2205 struct dpu_encoder_virt *dpu_enc = NULL;
2206 int rc = 0;
2208 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2209 if (!dpu_enc)
2210 return ERR_PTR(-ENOMEM);
2212 rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2213 drm_enc_mode, NULL);
2214 if (rc) {
2215 devm_kfree(dev->dev, dpu_enc);
2216 return ERR_PTR(rc);
2219 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2221 spin_lock_init(&dpu_enc->enc_spinlock);
2222 dpu_enc->enabled = false;
2223 mutex_init(&dpu_enc->enc_lock);
2224 mutex_init(&dpu_enc->rc_lock);
2226 return &dpu_enc->base;
2229 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2230 enum msm_event_wait event)
2232 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2233 struct dpu_encoder_virt *dpu_enc = NULL;
2234 int i, ret = 0;
2236 if (!drm_enc) {
2237 DPU_ERROR("invalid encoder\n");
2238 return -EINVAL;
2240 dpu_enc = to_dpu_encoder_virt(drm_enc);
2241 DPU_DEBUG_ENC(dpu_enc, "\n");
2243 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2244 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2246 switch (event) {
2247 case MSM_ENC_COMMIT_DONE:
2248 fn_wait = phys->ops.wait_for_commit_done;
2249 break;
2250 case MSM_ENC_TX_COMPLETE:
2251 fn_wait = phys->ops.wait_for_tx_complete;
2252 break;
2253 case MSM_ENC_VBLANK:
2254 fn_wait = phys->ops.wait_for_vblank;
2255 break;
2256 default:
2257 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2258 event);
2259 return -EINVAL;
2262 if (fn_wait) {
2263 DPU_ATRACE_BEGIN("wait_for_completion_event");
2264 ret = fn_wait(phys);
2265 DPU_ATRACE_END("wait_for_completion_event");
2266 if (ret)
2267 return ret;
2271 return ret;
2274 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2276 struct dpu_encoder_virt *dpu_enc = NULL;
2278 if (!encoder) {
2279 DPU_ERROR("invalid encoder\n");
2280 return INTF_MODE_NONE;
2282 dpu_enc = to_dpu_encoder_virt(encoder);
2284 if (dpu_enc->cur_master)
2285 return dpu_enc->cur_master->intf_mode;
2287 if (dpu_enc->num_phys_encs)
2288 return dpu_enc->phys_encs[0]->intf_mode;
2290 return INTF_MODE_NONE;