WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / display / intel_display_debugfs.c
blobca41e8c00ad7b7c3f28c57fd738fa860a91ad173
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
24 return to_i915(node->minor->dev);
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
29 struct drm_i915_private *dev_priv = node_to_i915(m->private);
31 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 dev_priv->fb_tracking.busy_bits);
34 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 dev_priv->fb_tracking.flip_bits);
37 return 0;
40 static int i915_fbc_status(struct seq_file *m, void *unused)
42 struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 struct intel_fbc *fbc = &dev_priv->fbc;
44 intel_wakeref_t wakeref;
46 if (!HAS_FBC(dev_priv))
47 return -ENODEV;
49 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 mutex_lock(&fbc->lock);
52 if (intel_fbc_is_active(dev_priv))
53 seq_puts(m, "FBC enabled\n");
54 else
55 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
57 if (intel_fbc_is_active(dev_priv)) {
58 u32 mask;
60 if (INTEL_GEN(dev_priv) >= 8)
61 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 else if (INTEL_GEN(dev_priv) >= 7)
63 mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 else if (INTEL_GEN(dev_priv) >= 5)
65 mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 else if (IS_G4X(dev_priv))
67 mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 else
69 mask = intel_de_read(dev_priv, FBC_STATUS) &
70 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
72 seq_printf(m, "Compressing: %s\n", yesno(mask));
75 mutex_unlock(&fbc->lock);
76 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
78 return 0;
81 static int i915_fbc_false_color_get(void *data, u64 *val)
83 struct drm_i915_private *dev_priv = data;
85 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 return -ENODEV;
88 *val = dev_priv->fbc.false_color;
90 return 0;
93 static int i915_fbc_false_color_set(void *data, u64 val)
95 struct drm_i915_private *dev_priv = data;
96 u32 reg;
98 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 return -ENODEV;
101 mutex_lock(&dev_priv->fbc.lock);
103 reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 dev_priv->fbc.false_color = val;
106 intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
109 mutex_unlock(&dev_priv->fbc.lock);
110 return 0;
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 i915_fbc_false_color_get, i915_fbc_false_color_set,
115 "%llu\n");
117 static int i915_ips_status(struct seq_file *m, void *unused)
119 struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 intel_wakeref_t wakeref;
122 if (!HAS_IPS(dev_priv))
123 return -ENODEV;
125 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
127 seq_printf(m, "Enabled by kernel parameter: %s\n",
128 yesno(dev_priv->params.enable_ips));
130 if (INTEL_GEN(dev_priv) >= 8) {
131 seq_puts(m, "Currently: unknown\n");
132 } else {
133 if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 seq_puts(m, "Currently: enabled\n");
135 else
136 seq_puts(m, "Currently: disabled\n");
139 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
141 return 0;
144 static int i915_sr_status(struct seq_file *m, void *unused)
146 struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 intel_wakeref_t wakeref;
148 bool sr_enabled = false;
150 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
152 if (INTEL_GEN(dev_priv) >= 9)
153 /* no global SR status; inspect per-plane WM */;
154 else if (HAS_PCH_SPLIT(dev_priv))
155 sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 else if (IS_I915GM(dev_priv))
160 sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 else if (IS_PINEVIEW(dev_priv))
162 sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
166 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
168 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
170 return 0;
173 static int i915_opregion(struct seq_file *m, void *unused)
175 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
177 if (opregion->header)
178 seq_write(m, opregion->header, OPREGION_SIZE);
180 return 0;
183 static int i915_vbt(struct seq_file *m, void *unused)
185 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
187 if (opregion->vbt)
188 seq_write(m, opregion->vbt, opregion->vbt_size);
190 return 0;
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
195 struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 struct drm_device *dev = &dev_priv->drm;
197 struct intel_framebuffer *fbdev_fb = NULL;
198 struct drm_framebuffer *drm_fb;
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
204 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 fbdev_fb->base.width,
206 fbdev_fb->base.height,
207 fbdev_fb->base.format->depth,
208 fbdev_fb->base.format->cpp[0] * 8,
209 fbdev_fb->base.modifier,
210 drm_framebuffer_read_refcount(&fbdev_fb->base));
211 i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 seq_putc(m, '\n');
214 #endif
216 mutex_lock(&dev->mode_config.fb_lock);
217 drm_for_each_fb(drm_fb, dev) {
218 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 if (fb == fbdev_fb)
220 continue;
222 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 fb->base.width,
224 fb->base.height,
225 fb->base.format->depth,
226 fb->base.format->cpp[0] * 8,
227 fb->base.modifier,
228 drm_framebuffer_read_refcount(&fb->base));
229 i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 seq_putc(m, '\n');
232 mutex_unlock(&dev->mode_config.fb_lock);
234 return 0;
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
239 u8 val;
240 static const char * const sink_status[] = {
241 "inactive",
242 "transition to active, capture and display",
243 "active, display from RFB",
244 "active, capture and display on sink device timings",
245 "transition to inactive, capture and display, timing re-sync",
246 "reserved",
247 "reserved",
248 "sink internal error",
250 struct drm_connector *connector = m->private;
251 struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 struct intel_dp *intel_dp =
253 intel_attached_dp(to_intel_connector(connector));
254 int ret;
256 if (!CAN_PSR(dev_priv)) {
257 seq_puts(m, "PSR Unsupported\n");
258 return -ENODEV;
261 if (connector->status != connector_status_connected)
262 return -ENODEV;
264 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
266 if (ret == 1) {
267 const char *str = "unknown";
269 val &= DP_PSR_SINK_STATE_MASK;
270 if (val < ARRAY_SIZE(sink_status))
271 str = sink_status[val];
272 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 } else {
274 return ret;
277 return 0;
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
284 u32 val, status_val;
285 const char *status = "unknown";
287 if (dev_priv->psr.psr2_enabled) {
288 static const char * const live_status[] = {
289 "IDLE",
290 "CAPTURE",
291 "CAPTURE_FS",
292 "SLEEP",
293 "BUFON_FW",
294 "ML_UP",
295 "SU_STANDBY",
296 "FAST_SLEEP",
297 "DEEP_SLEEP",
298 "BUF_ON",
299 "TG_ON"
301 val = intel_de_read(dev_priv,
302 EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 EDP_PSR2_STATUS_STATE_SHIFT;
305 if (status_val < ARRAY_SIZE(live_status))
306 status = live_status[status_val];
307 } else {
308 static const char * const live_status[] = {
309 "IDLE",
310 "SRDONACK",
311 "SRDENT",
312 "BUFOFF",
313 "BUFON",
314 "AUXACK",
315 "SRDOFFACK",
316 "SRDENT_ON",
318 val = intel_de_read(dev_priv,
319 EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 EDP_PSR_STATUS_STATE_SHIFT;
322 if (status_val < ARRAY_SIZE(live_status))
323 status = live_status[status_val];
326 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
331 struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 struct i915_psr *psr = &dev_priv->psr;
333 intel_wakeref_t wakeref;
334 const char *status;
335 bool enabled;
336 u32 val;
338 if (!HAS_PSR(dev_priv))
339 return -ENODEV;
341 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 if (psr->dp)
343 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 seq_puts(m, "\n");
346 if (!psr->sink_support)
347 return 0;
349 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 mutex_lock(&psr->lock);
352 if (psr->enabled)
353 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 else
355 status = "disabled";
356 seq_printf(m, "PSR mode: %s\n", status);
358 if (!psr->enabled) {
359 seq_printf(m, "PSR sink not reliable: %s\n",
360 yesno(psr->sink_not_reliable));
362 goto unlock;
365 if (psr->psr2_enabled) {
366 val = intel_de_read(dev_priv,
367 EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 enabled = val & EDP_PSR2_ENABLE;
369 } else {
370 val = intel_de_read(dev_priv,
371 EDP_PSR_CTL(dev_priv->psr.transcoder));
372 enabled = val & EDP_PSR_ENABLE;
374 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 enableddisabled(enabled), val);
376 psr_source_status(dev_priv, m);
377 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 psr->busy_frontbuffer_bits);
381 * SKL+ Perf counter is reset to 0 everytime DC state is entered
383 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 val = intel_de_read(dev_priv,
385 EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 val &= EDP_PSR_PERF_CNT_MASK;
387 seq_printf(m, "Performance counter: %u\n", val);
390 if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 seq_printf(m, "Last attempted entry at: %lld\n",
392 psr->last_entry_attempt);
393 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
396 if (psr->psr2_enabled) {
397 u32 su_frames_val[3];
398 int frame;
401 * Reading all 3 registers before hand to minimize crossing a
402 * frame boundary between register reads
404 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 val = intel_de_read(dev_priv,
406 PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 su_frames_val[frame / 3] = val;
410 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
412 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 u32 su_blocks;
415 su_blocks = su_frames_val[frame / 3] &
416 PSR2_SU_STATUS_MASK(frame);
417 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 seq_printf(m, "%d\t%d\n", frame, su_blocks);
421 seq_printf(m, "PSR2 selective fetch: %s\n",
422 enableddisabled(psr->psr2_sel_fetch_enabled));
425 unlock:
426 mutex_unlock(&psr->lock);
427 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
429 return 0;
432 static int
433 i915_edp_psr_debug_set(void *data, u64 val)
435 struct drm_i915_private *dev_priv = data;
436 intel_wakeref_t wakeref;
437 int ret;
439 if (!CAN_PSR(dev_priv))
440 return -ENODEV;
442 drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
444 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
446 ret = intel_psr_debug_set(dev_priv, val);
448 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
450 return ret;
453 static int
454 i915_edp_psr_debug_get(void *data, u64 *val)
456 struct drm_i915_private *dev_priv = data;
458 if (!CAN_PSR(dev_priv))
459 return -ENODEV;
461 *val = READ_ONCE(dev_priv->psr.debug);
462 return 0;
465 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
466 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
467 "%llu\n");
469 static int i915_power_domain_info(struct seq_file *m, void *unused)
471 struct drm_i915_private *dev_priv = node_to_i915(m->private);
472 struct i915_power_domains *power_domains = &dev_priv->power_domains;
473 int i;
475 mutex_lock(&power_domains->lock);
477 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
478 for (i = 0; i < power_domains->power_well_count; i++) {
479 struct i915_power_well *power_well;
480 enum intel_display_power_domain power_domain;
482 power_well = &power_domains->power_wells[i];
483 seq_printf(m, "%-25s %d\n", power_well->desc->name,
484 power_well->count);
486 for_each_power_domain(power_domain, power_well->desc->domains)
487 seq_printf(m, " %-23s %d\n",
488 intel_display_power_domain_str(power_domain),
489 power_domains->domain_use_count[power_domain]);
492 mutex_unlock(&power_domains->lock);
494 return 0;
497 static int i915_dmc_info(struct seq_file *m, void *unused)
499 struct drm_i915_private *dev_priv = node_to_i915(m->private);
500 intel_wakeref_t wakeref;
501 struct intel_csr *csr;
502 i915_reg_t dc5_reg, dc6_reg = {};
504 if (!HAS_CSR(dev_priv))
505 return -ENODEV;
507 csr = &dev_priv->csr;
509 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
511 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
512 seq_printf(m, "path: %s\n", csr->fw_path);
514 if (!csr->dmc_payload)
515 goto out;
517 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
518 CSR_VERSION_MINOR(csr->version));
520 if (INTEL_GEN(dev_priv) >= 12) {
521 if (IS_DGFX(dev_priv)) {
522 dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
523 } else {
524 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
525 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
529 * NOTE: DMC_DEBUG3 is a general purpose reg.
530 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
531 * reg for DC3CO debugging and validation,
532 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
534 seq_printf(m, "DC3CO count: %d\n",
535 intel_de_read(dev_priv, DMC_DEBUG3));
536 } else {
537 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
538 SKL_CSR_DC3_DC5_COUNT;
539 if (!IS_GEN9_LP(dev_priv))
540 dc6_reg = SKL_CSR_DC5_DC6_COUNT;
543 seq_printf(m, "DC3 -> DC5 count: %d\n",
544 intel_de_read(dev_priv, dc5_reg));
545 if (dc6_reg.reg)
546 seq_printf(m, "DC5 -> DC6 count: %d\n",
547 intel_de_read(dev_priv, dc6_reg));
549 out:
550 seq_printf(m, "program base: 0x%08x\n",
551 intel_de_read(dev_priv, CSR_PROGRAM(0)));
552 seq_printf(m, "ssp base: 0x%08x\n",
553 intel_de_read(dev_priv, CSR_SSP_BASE));
554 seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
556 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
558 return 0;
561 static void intel_seq_print_mode(struct seq_file *m, int tabs,
562 const struct drm_display_mode *mode)
564 int i;
566 for (i = 0; i < tabs; i++)
567 seq_putc(m, '\t');
569 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
572 static void intel_encoder_info(struct seq_file *m,
573 struct intel_crtc *crtc,
574 struct intel_encoder *encoder)
576 struct drm_i915_private *dev_priv = node_to_i915(m->private);
577 struct drm_connector_list_iter conn_iter;
578 struct drm_connector *connector;
580 seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
581 encoder->base.base.id, encoder->base.name);
583 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
584 drm_for_each_connector_iter(connector, &conn_iter) {
585 const struct drm_connector_state *conn_state =
586 connector->state;
588 if (conn_state->best_encoder != &encoder->base)
589 continue;
591 seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
592 connector->base.id, connector->name);
594 drm_connector_list_iter_end(&conn_iter);
597 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
599 const struct drm_display_mode *mode = panel->fixed_mode;
601 seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
604 static void intel_hdcp_info(struct seq_file *m,
605 struct intel_connector *intel_connector)
607 bool hdcp_cap, hdcp2_cap;
609 if (!intel_connector->hdcp.shim) {
610 seq_puts(m, "No Connector Support");
611 goto out;
614 hdcp_cap = intel_hdcp_capable(intel_connector);
615 hdcp2_cap = intel_hdcp2_capable(intel_connector);
617 if (hdcp_cap)
618 seq_puts(m, "HDCP1.4 ");
619 if (hdcp2_cap)
620 seq_puts(m, "HDCP2.2 ");
622 if (!hdcp_cap && !hdcp2_cap)
623 seq_puts(m, "None");
625 out:
626 seq_puts(m, "\n");
629 static void intel_dp_info(struct seq_file *m,
630 struct intel_connector *intel_connector)
632 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
633 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
634 const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
636 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
637 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
638 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
639 intel_panel_info(m, &intel_connector->panel);
641 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
642 edid ? edid->data : NULL, &intel_dp->aux);
645 static void intel_dp_mst_info(struct seq_file *m,
646 struct intel_connector *intel_connector)
648 bool has_audio = intel_connector->port->has_audio;
650 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
653 static void intel_hdmi_info(struct seq_file *m,
654 struct intel_connector *intel_connector)
656 struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
657 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
659 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
662 static void intel_lvds_info(struct seq_file *m,
663 struct intel_connector *intel_connector)
665 intel_panel_info(m, &intel_connector->panel);
668 static void intel_connector_info(struct seq_file *m,
669 struct drm_connector *connector)
671 struct intel_connector *intel_connector = to_intel_connector(connector);
672 const struct drm_connector_state *conn_state = connector->state;
673 struct intel_encoder *encoder =
674 to_intel_encoder(conn_state->best_encoder);
675 const struct drm_display_mode *mode;
677 seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
678 connector->base.id, connector->name,
679 drm_get_connector_status_name(connector->status));
681 if (connector->status == connector_status_disconnected)
682 return;
684 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
685 connector->display_info.width_mm,
686 connector->display_info.height_mm);
687 seq_printf(m, "\tsubpixel order: %s\n",
688 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
689 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
691 if (!encoder)
692 return;
694 switch (connector->connector_type) {
695 case DRM_MODE_CONNECTOR_DisplayPort:
696 case DRM_MODE_CONNECTOR_eDP:
697 if (encoder->type == INTEL_OUTPUT_DP_MST)
698 intel_dp_mst_info(m, intel_connector);
699 else
700 intel_dp_info(m, intel_connector);
701 break;
702 case DRM_MODE_CONNECTOR_LVDS:
703 if (encoder->type == INTEL_OUTPUT_LVDS)
704 intel_lvds_info(m, intel_connector);
705 break;
706 case DRM_MODE_CONNECTOR_HDMIA:
707 if (encoder->type == INTEL_OUTPUT_HDMI ||
708 encoder->type == INTEL_OUTPUT_DDI)
709 intel_hdmi_info(m, intel_connector);
710 break;
711 default:
712 break;
715 seq_puts(m, "\tHDCP version: ");
716 intel_hdcp_info(m, intel_connector);
718 seq_printf(m, "\tmodes:\n");
719 list_for_each_entry(mode, &connector->modes, head)
720 intel_seq_print_mode(m, 2, mode);
723 static const char *plane_type(enum drm_plane_type type)
725 switch (type) {
726 case DRM_PLANE_TYPE_OVERLAY:
727 return "OVL";
728 case DRM_PLANE_TYPE_PRIMARY:
729 return "PRI";
730 case DRM_PLANE_TYPE_CURSOR:
731 return "CUR";
733 * Deliberately omitting default: to generate compiler warnings
734 * when a new drm_plane_type gets added.
738 return "unknown";
741 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
744 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
745 * will print them all to visualize if the values are misused
747 snprintf(buf, bufsize,
748 "%s%s%s%s%s%s(0x%08x)",
749 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
750 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
751 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
752 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
753 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
754 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
755 rotation);
758 static const char *plane_visibility(const struct intel_plane_state *plane_state)
760 if (plane_state->uapi.visible)
761 return "visible";
763 if (plane_state->planar_slave)
764 return "planar-slave";
766 return "hidden";
769 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
771 const struct intel_plane_state *plane_state =
772 to_intel_plane_state(plane->base.state);
773 const struct drm_framebuffer *fb = plane_state->uapi.fb;
774 struct drm_format_name_buf format_name;
775 struct drm_rect src, dst;
776 char rot_str[48];
778 src = drm_plane_state_src(&plane_state->uapi);
779 dst = drm_plane_state_dest(&plane_state->uapi);
781 if (fb)
782 drm_get_format_name(fb->format->format, &format_name);
784 plane_rotation(rot_str, sizeof(rot_str),
785 plane_state->uapi.rotation);
787 seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
788 fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
789 fb ? fb->modifier : 0,
790 fb ? fb->width : 0, fb ? fb->height : 0,
791 plane_visibility(plane_state),
792 DRM_RECT_FP_ARG(&src),
793 DRM_RECT_ARG(&dst),
794 rot_str);
796 if (plane_state->planar_linked_plane)
797 seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
798 plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
799 plane_state->planar_slave ? "slave" : "master");
802 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
804 const struct intel_plane_state *plane_state =
805 to_intel_plane_state(plane->base.state);
806 const struct drm_framebuffer *fb = plane_state->hw.fb;
807 struct drm_format_name_buf format_name;
808 char rot_str[48];
810 if (!fb)
811 return;
813 drm_get_format_name(fb->format->format, &format_name);
815 plane_rotation(rot_str, sizeof(rot_str),
816 plane_state->hw.rotation);
818 seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
819 fb->base.id, format_name.str,
820 fb->modifier, fb->width, fb->height,
821 yesno(plane_state->uapi.visible),
822 DRM_RECT_FP_ARG(&plane_state->uapi.src),
823 DRM_RECT_ARG(&plane_state->uapi.dst),
824 rot_str);
827 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
829 struct drm_i915_private *dev_priv = node_to_i915(m->private);
830 struct intel_plane *plane;
832 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
833 seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
834 plane->base.base.id, plane->base.name,
835 plane_type(plane->base.type));
836 intel_plane_uapi_info(m, plane);
837 intel_plane_hw_info(m, plane);
841 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
843 const struct intel_crtc_state *crtc_state =
844 to_intel_crtc_state(crtc->base.state);
845 int num_scalers = crtc->num_scalers;
846 int i;
848 /* Not all platformas have a scaler */
849 if (num_scalers) {
850 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
851 num_scalers,
852 crtc_state->scaler_state.scaler_users,
853 crtc_state->scaler_state.scaler_id);
855 for (i = 0; i < num_scalers; i++) {
856 const struct intel_scaler *sc =
857 &crtc_state->scaler_state.scalers[i];
859 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
860 i, yesno(sc->in_use), sc->mode);
862 seq_puts(m, "\n");
863 } else {
864 seq_puts(m, "\tNo scalers available on this platform\n");
868 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
870 struct drm_i915_private *dev_priv = node_to_i915(m->private);
871 const struct intel_crtc_state *crtc_state =
872 to_intel_crtc_state(crtc->base.state);
873 struct intel_encoder *encoder;
875 seq_printf(m, "[CRTC:%d:%s]:\n",
876 crtc->base.base.id, crtc->base.name);
878 seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
879 yesno(crtc_state->uapi.enable),
880 yesno(crtc_state->uapi.active),
881 DRM_MODE_ARG(&crtc_state->uapi.mode));
883 if (crtc_state->hw.enable) {
884 seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
885 yesno(crtc_state->hw.active),
886 DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
888 seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
889 crtc_state->pipe_src_w, crtc_state->pipe_src_h,
890 yesno(crtc_state->dither), crtc_state->pipe_bpp);
892 intel_scaler_info(m, crtc);
895 if (crtc_state->bigjoiner)
896 seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
897 crtc_state->bigjoiner_linked_crtc->base.base.id,
898 crtc_state->bigjoiner_linked_crtc->base.name,
899 crtc_state->bigjoiner_slave ? "slave" : "master");
901 for_each_intel_encoder_mask(&dev_priv->drm, encoder,
902 crtc_state->uapi.encoder_mask)
903 intel_encoder_info(m, crtc, encoder);
905 intel_plane_info(m, crtc);
907 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
908 yesno(!crtc->cpu_fifo_underrun_disabled),
909 yesno(!crtc->pch_fifo_underrun_disabled));
912 static int i915_display_info(struct seq_file *m, void *unused)
914 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915 struct drm_device *dev = &dev_priv->drm;
916 struct intel_crtc *crtc;
917 struct drm_connector *connector;
918 struct drm_connector_list_iter conn_iter;
919 intel_wakeref_t wakeref;
921 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
923 drm_modeset_lock_all(dev);
925 seq_printf(m, "CRTC info\n");
926 seq_printf(m, "---------\n");
927 for_each_intel_crtc(dev, crtc)
928 intel_crtc_info(m, crtc);
930 seq_printf(m, "\n");
931 seq_printf(m, "Connector info\n");
932 seq_printf(m, "--------------\n");
933 drm_connector_list_iter_begin(dev, &conn_iter);
934 drm_for_each_connector_iter(connector, &conn_iter)
935 intel_connector_info(m, connector);
936 drm_connector_list_iter_end(&conn_iter);
938 drm_modeset_unlock_all(dev);
940 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
942 return 0;
945 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
947 struct drm_i915_private *dev_priv = node_to_i915(m->private);
948 struct drm_device *dev = &dev_priv->drm;
949 int i;
951 drm_modeset_lock_all(dev);
953 seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
954 dev_priv->dpll.ref_clks.nssc,
955 dev_priv->dpll.ref_clks.ssc);
957 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
958 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
960 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
961 pll->info->id);
962 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
963 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
964 seq_printf(m, " tracked hardware state:\n");
965 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
966 seq_printf(m, " dpll_md: 0x%08x\n",
967 pll->state.hw_state.dpll_md);
968 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
969 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
970 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
971 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
972 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
973 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
974 pll->state.hw_state.mg_refclkin_ctl);
975 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
976 pll->state.hw_state.mg_clktop2_coreclkctl1);
977 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
978 pll->state.hw_state.mg_clktop2_hsclkctl);
979 seq_printf(m, " mg_pll_div0: 0x%08x\n",
980 pll->state.hw_state.mg_pll_div0);
981 seq_printf(m, " mg_pll_div1: 0x%08x\n",
982 pll->state.hw_state.mg_pll_div1);
983 seq_printf(m, " mg_pll_lf: 0x%08x\n",
984 pll->state.hw_state.mg_pll_lf);
985 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
986 pll->state.hw_state.mg_pll_frac_lock);
987 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
988 pll->state.hw_state.mg_pll_ssc);
989 seq_printf(m, " mg_pll_bias: 0x%08x\n",
990 pll->state.hw_state.mg_pll_bias);
991 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
992 pll->state.hw_state.mg_pll_tdc_coldst_bias);
994 drm_modeset_unlock_all(dev);
996 return 0;
999 static int i915_ipc_status_show(struct seq_file *m, void *data)
1001 struct drm_i915_private *dev_priv = m->private;
1003 seq_printf(m, "Isochronous Priority Control: %s\n",
1004 yesno(dev_priv->ipc_enabled));
1005 return 0;
1008 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1010 struct drm_i915_private *dev_priv = inode->i_private;
1012 if (!HAS_IPC(dev_priv))
1013 return -ENODEV;
1015 return single_open(file, i915_ipc_status_show, dev_priv);
1018 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1019 size_t len, loff_t *offp)
1021 struct seq_file *m = file->private_data;
1022 struct drm_i915_private *dev_priv = m->private;
1023 intel_wakeref_t wakeref;
1024 bool enable;
1025 int ret;
1027 ret = kstrtobool_from_user(ubuf, len, &enable);
1028 if (ret < 0)
1029 return ret;
1031 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1032 if (!dev_priv->ipc_enabled && enable)
1033 drm_info(&dev_priv->drm,
1034 "Enabling IPC: WM will be proper only after next commit\n");
1035 dev_priv->wm.distrust_bios_wm = true;
1036 dev_priv->ipc_enabled = enable;
1037 intel_enable_ipc(dev_priv);
1040 return len;
1043 static const struct file_operations i915_ipc_status_fops = {
1044 .owner = THIS_MODULE,
1045 .open = i915_ipc_status_open,
1046 .read = seq_read,
1047 .llseek = seq_lseek,
1048 .release = single_release,
1049 .write = i915_ipc_status_write
1052 static int i915_ddb_info(struct seq_file *m, void *unused)
1054 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1055 struct drm_device *dev = &dev_priv->drm;
1056 struct skl_ddb_entry *entry;
1057 struct intel_crtc *crtc;
1059 if (INTEL_GEN(dev_priv) < 9)
1060 return -ENODEV;
1062 drm_modeset_lock_all(dev);
1064 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1066 for_each_intel_crtc(&dev_priv->drm, crtc) {
1067 struct intel_crtc_state *crtc_state =
1068 to_intel_crtc_state(crtc->base.state);
1069 enum pipe pipe = crtc->pipe;
1070 enum plane_id plane_id;
1072 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1074 for_each_plane_id_on_crtc(crtc, plane_id) {
1075 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1076 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
1077 entry->start, entry->end,
1078 skl_ddb_entry_size(entry));
1081 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1082 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
1083 entry->end, skl_ddb_entry_size(entry));
1086 drm_modeset_unlock_all(dev);
1088 return 0;
1091 static void drrs_status_per_crtc(struct seq_file *m,
1092 struct drm_device *dev,
1093 struct intel_crtc *intel_crtc)
1095 struct drm_i915_private *dev_priv = to_i915(dev);
1096 struct i915_drrs *drrs = &dev_priv->drrs;
1097 int vrefresh = 0;
1098 struct drm_connector *connector;
1099 struct drm_connector_list_iter conn_iter;
1101 drm_connector_list_iter_begin(dev, &conn_iter);
1102 drm_for_each_connector_iter(connector, &conn_iter) {
1103 bool supported = false;
1105 if (connector->state->crtc != &intel_crtc->base)
1106 continue;
1108 seq_printf(m, "%s:\n", connector->name);
1110 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1111 drrs->type == SEAMLESS_DRRS_SUPPORT)
1112 supported = true;
1114 seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1116 drm_connector_list_iter_end(&conn_iter);
1118 seq_puts(m, "\n");
1120 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1121 struct intel_panel *panel;
1123 mutex_lock(&drrs->mutex);
1124 /* DRRS Supported */
1125 seq_puts(m, "\tDRRS Enabled: Yes\n");
1127 /* disable_drrs() will make drrs->dp NULL */
1128 if (!drrs->dp) {
1129 seq_puts(m, "Idleness DRRS: Disabled\n");
1130 if (dev_priv->psr.enabled)
1131 seq_puts(m,
1132 "\tAs PSR is enabled, DRRS is not enabled\n");
1133 mutex_unlock(&drrs->mutex);
1134 return;
1137 panel = &drrs->dp->attached_connector->panel;
1138 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1139 drrs->busy_frontbuffer_bits);
1141 seq_puts(m, "\n\t\t");
1142 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1143 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1144 vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1145 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1146 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1147 vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1148 } else {
1149 seq_printf(m, "DRRS_State: Unknown(%d)\n",
1150 drrs->refresh_rate_type);
1151 mutex_unlock(&drrs->mutex);
1152 return;
1154 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1156 seq_puts(m, "\n\t\t");
1157 mutex_unlock(&drrs->mutex);
1158 } else {
1159 /* DRRS not supported. Print the VBT parameter*/
1160 seq_puts(m, "\tDRRS Enabled : No");
1162 seq_puts(m, "\n");
1165 static int i915_drrs_status(struct seq_file *m, void *unused)
1167 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1168 struct drm_device *dev = &dev_priv->drm;
1169 struct intel_crtc *intel_crtc;
1170 int active_crtc_cnt = 0;
1172 drm_modeset_lock_all(dev);
1173 for_each_intel_crtc(dev, intel_crtc) {
1174 if (intel_crtc->base.state->active) {
1175 active_crtc_cnt++;
1176 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
1178 drrs_status_per_crtc(m, dev, intel_crtc);
1181 drm_modeset_unlock_all(dev);
1183 if (!active_crtc_cnt)
1184 seq_puts(m, "No active crtc found\n");
1186 return 0;
1189 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1190 seq_puts(m, "LPSP: disabled\n"))
1192 static bool
1193 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1194 enum i915_power_well_id power_well_id)
1196 intel_wakeref_t wakeref;
1197 bool is_enabled;
1199 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1200 is_enabled = intel_display_power_well_is_enabled(i915,
1201 power_well_id);
1202 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1204 return is_enabled;
1207 static int i915_lpsp_status(struct seq_file *m, void *unused)
1209 struct drm_i915_private *i915 = node_to_i915(m->private);
1211 switch (INTEL_GEN(i915)) {
1212 case 12:
1213 case 11:
1214 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1215 break;
1216 case 10:
1217 case 9:
1218 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1219 break;
1220 default:
1222 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1223 * support lpsp.
1225 if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1226 LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1227 else
1228 seq_puts(m, "LPSP: not supported\n");
1231 return 0;
1234 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1236 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1237 struct drm_device *dev = &dev_priv->drm;
1238 struct intel_encoder *intel_encoder;
1239 struct intel_digital_port *dig_port;
1240 struct drm_connector *connector;
1241 struct drm_connector_list_iter conn_iter;
1243 drm_connector_list_iter_begin(dev, &conn_iter);
1244 drm_for_each_connector_iter(connector, &conn_iter) {
1245 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1246 continue;
1248 intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1249 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1250 continue;
1252 dig_port = enc_to_dig_port(intel_encoder);
1253 if (!dig_port->dp.can_mst)
1254 continue;
1256 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1257 dig_port->base.base.base.id,
1258 dig_port->base.base.name);
1259 drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1261 drm_connector_list_iter_end(&conn_iter);
1263 return 0;
1266 static ssize_t i915_displayport_test_active_write(struct file *file,
1267 const char __user *ubuf,
1268 size_t len, loff_t *offp)
1270 char *input_buffer;
1271 int status = 0;
1272 struct drm_device *dev;
1273 struct drm_connector *connector;
1274 struct drm_connector_list_iter conn_iter;
1275 struct intel_dp *intel_dp;
1276 int val = 0;
1278 dev = ((struct seq_file *)file->private_data)->private;
1280 if (len == 0)
1281 return 0;
1283 input_buffer = memdup_user_nul(ubuf, len);
1284 if (IS_ERR(input_buffer))
1285 return PTR_ERR(input_buffer);
1287 drm_dbg(&to_i915(dev)->drm,
1288 "Copied %d bytes from user\n", (unsigned int)len);
1290 drm_connector_list_iter_begin(dev, &conn_iter);
1291 drm_for_each_connector_iter(connector, &conn_iter) {
1292 struct intel_encoder *encoder;
1294 if (connector->connector_type !=
1295 DRM_MODE_CONNECTOR_DisplayPort)
1296 continue;
1298 encoder = to_intel_encoder(connector->encoder);
1299 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1300 continue;
1302 if (encoder && connector->status == connector_status_connected) {
1303 intel_dp = enc_to_intel_dp(encoder);
1304 status = kstrtoint(input_buffer, 10, &val);
1305 if (status < 0)
1306 break;
1307 drm_dbg(&to_i915(dev)->drm,
1308 "Got %d for test active\n", val);
1309 /* To prevent erroneous activation of the compliance
1310 * testing code, only accept an actual value of 1 here
1312 if (val == 1)
1313 intel_dp->compliance.test_active = true;
1314 else
1315 intel_dp->compliance.test_active = false;
1318 drm_connector_list_iter_end(&conn_iter);
1319 kfree(input_buffer);
1320 if (status < 0)
1321 return status;
1323 *offp += len;
1324 return len;
1327 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1329 struct drm_i915_private *dev_priv = m->private;
1330 struct drm_device *dev = &dev_priv->drm;
1331 struct drm_connector *connector;
1332 struct drm_connector_list_iter conn_iter;
1333 struct intel_dp *intel_dp;
1335 drm_connector_list_iter_begin(dev, &conn_iter);
1336 drm_for_each_connector_iter(connector, &conn_iter) {
1337 struct intel_encoder *encoder;
1339 if (connector->connector_type !=
1340 DRM_MODE_CONNECTOR_DisplayPort)
1341 continue;
1343 encoder = to_intel_encoder(connector->encoder);
1344 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1345 continue;
1347 if (encoder && connector->status == connector_status_connected) {
1348 intel_dp = enc_to_intel_dp(encoder);
1349 if (intel_dp->compliance.test_active)
1350 seq_puts(m, "1");
1351 else
1352 seq_puts(m, "0");
1353 } else
1354 seq_puts(m, "0");
1356 drm_connector_list_iter_end(&conn_iter);
1358 return 0;
1361 static int i915_displayport_test_active_open(struct inode *inode,
1362 struct file *file)
1364 return single_open(file, i915_displayport_test_active_show,
1365 inode->i_private);
1368 static const struct file_operations i915_displayport_test_active_fops = {
1369 .owner = THIS_MODULE,
1370 .open = i915_displayport_test_active_open,
1371 .read = seq_read,
1372 .llseek = seq_lseek,
1373 .release = single_release,
1374 .write = i915_displayport_test_active_write
1377 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1379 struct drm_i915_private *dev_priv = m->private;
1380 struct drm_device *dev = &dev_priv->drm;
1381 struct drm_connector *connector;
1382 struct drm_connector_list_iter conn_iter;
1383 struct intel_dp *intel_dp;
1385 drm_connector_list_iter_begin(dev, &conn_iter);
1386 drm_for_each_connector_iter(connector, &conn_iter) {
1387 struct intel_encoder *encoder;
1389 if (connector->connector_type !=
1390 DRM_MODE_CONNECTOR_DisplayPort)
1391 continue;
1393 encoder = to_intel_encoder(connector->encoder);
1394 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1395 continue;
1397 if (encoder && connector->status == connector_status_connected) {
1398 intel_dp = enc_to_intel_dp(encoder);
1399 if (intel_dp->compliance.test_type ==
1400 DP_TEST_LINK_EDID_READ)
1401 seq_printf(m, "%lx",
1402 intel_dp->compliance.test_data.edid);
1403 else if (intel_dp->compliance.test_type ==
1404 DP_TEST_LINK_VIDEO_PATTERN) {
1405 seq_printf(m, "hdisplay: %d\n",
1406 intel_dp->compliance.test_data.hdisplay);
1407 seq_printf(m, "vdisplay: %d\n",
1408 intel_dp->compliance.test_data.vdisplay);
1409 seq_printf(m, "bpc: %u\n",
1410 intel_dp->compliance.test_data.bpc);
1411 } else if (intel_dp->compliance.test_type ==
1412 DP_TEST_LINK_PHY_TEST_PATTERN) {
1413 seq_printf(m, "pattern: %d\n",
1414 intel_dp->compliance.test_data.phytest.phy_pattern);
1415 seq_printf(m, "Number of lanes: %d\n",
1416 intel_dp->compliance.test_data.phytest.num_lanes);
1417 seq_printf(m, "Link Rate: %d\n",
1418 intel_dp->compliance.test_data.phytest.link_rate);
1419 seq_printf(m, "level: %02x\n",
1420 intel_dp->train_set[0]);
1422 } else
1423 seq_puts(m, "0");
1425 drm_connector_list_iter_end(&conn_iter);
1427 return 0;
1429 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1431 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1433 struct drm_i915_private *dev_priv = m->private;
1434 struct drm_device *dev = &dev_priv->drm;
1435 struct drm_connector *connector;
1436 struct drm_connector_list_iter conn_iter;
1437 struct intel_dp *intel_dp;
1439 drm_connector_list_iter_begin(dev, &conn_iter);
1440 drm_for_each_connector_iter(connector, &conn_iter) {
1441 struct intel_encoder *encoder;
1443 if (connector->connector_type !=
1444 DRM_MODE_CONNECTOR_DisplayPort)
1445 continue;
1447 encoder = to_intel_encoder(connector->encoder);
1448 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1449 continue;
1451 if (encoder && connector->status == connector_status_connected) {
1452 intel_dp = enc_to_intel_dp(encoder);
1453 seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1454 } else
1455 seq_puts(m, "0");
1457 drm_connector_list_iter_end(&conn_iter);
1459 return 0;
1461 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1463 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1465 struct drm_i915_private *dev_priv = m->private;
1466 struct drm_device *dev = &dev_priv->drm;
1467 int level;
1468 int num_levels;
1470 if (IS_CHERRYVIEW(dev_priv))
1471 num_levels = 3;
1472 else if (IS_VALLEYVIEW(dev_priv))
1473 num_levels = 1;
1474 else if (IS_G4X(dev_priv))
1475 num_levels = 3;
1476 else
1477 num_levels = ilk_wm_max_level(dev_priv) + 1;
1479 drm_modeset_lock_all(dev);
1481 for (level = 0; level < num_levels; level++) {
1482 unsigned int latency = wm[level];
1485 * - WM1+ latency values in 0.5us units
1486 * - latencies are in us on gen9/vlv/chv
1488 if (INTEL_GEN(dev_priv) >= 9 ||
1489 IS_VALLEYVIEW(dev_priv) ||
1490 IS_CHERRYVIEW(dev_priv) ||
1491 IS_G4X(dev_priv))
1492 latency *= 10;
1493 else if (level > 0)
1494 latency *= 5;
1496 seq_printf(m, "WM%d %u (%u.%u usec)\n",
1497 level, wm[level], latency / 10, latency % 10);
1500 drm_modeset_unlock_all(dev);
1503 static int pri_wm_latency_show(struct seq_file *m, void *data)
1505 struct drm_i915_private *dev_priv = m->private;
1506 const u16 *latencies;
1508 if (INTEL_GEN(dev_priv) >= 9)
1509 latencies = dev_priv->wm.skl_latency;
1510 else
1511 latencies = dev_priv->wm.pri_latency;
1513 wm_latency_show(m, latencies);
1515 return 0;
1518 static int spr_wm_latency_show(struct seq_file *m, void *data)
1520 struct drm_i915_private *dev_priv = m->private;
1521 const u16 *latencies;
1523 if (INTEL_GEN(dev_priv) >= 9)
1524 latencies = dev_priv->wm.skl_latency;
1525 else
1526 latencies = dev_priv->wm.spr_latency;
1528 wm_latency_show(m, latencies);
1530 return 0;
1533 static int cur_wm_latency_show(struct seq_file *m, void *data)
1535 struct drm_i915_private *dev_priv = m->private;
1536 const u16 *latencies;
1538 if (INTEL_GEN(dev_priv) >= 9)
1539 latencies = dev_priv->wm.skl_latency;
1540 else
1541 latencies = dev_priv->wm.cur_latency;
1543 wm_latency_show(m, latencies);
1545 return 0;
1548 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1550 struct drm_i915_private *dev_priv = inode->i_private;
1552 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1553 return -ENODEV;
1555 return single_open(file, pri_wm_latency_show, dev_priv);
1558 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1560 struct drm_i915_private *dev_priv = inode->i_private;
1562 if (HAS_GMCH(dev_priv))
1563 return -ENODEV;
1565 return single_open(file, spr_wm_latency_show, dev_priv);
1568 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1570 struct drm_i915_private *dev_priv = inode->i_private;
1572 if (HAS_GMCH(dev_priv))
1573 return -ENODEV;
1575 return single_open(file, cur_wm_latency_show, dev_priv);
1578 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1579 size_t len, loff_t *offp, u16 wm[8])
1581 struct seq_file *m = file->private_data;
1582 struct drm_i915_private *dev_priv = m->private;
1583 struct drm_device *dev = &dev_priv->drm;
1584 u16 new[8] = { 0 };
1585 int num_levels;
1586 int level;
1587 int ret;
1588 char tmp[32];
1590 if (IS_CHERRYVIEW(dev_priv))
1591 num_levels = 3;
1592 else if (IS_VALLEYVIEW(dev_priv))
1593 num_levels = 1;
1594 else if (IS_G4X(dev_priv))
1595 num_levels = 3;
1596 else
1597 num_levels = ilk_wm_max_level(dev_priv) + 1;
1599 if (len >= sizeof(tmp))
1600 return -EINVAL;
1602 if (copy_from_user(tmp, ubuf, len))
1603 return -EFAULT;
1605 tmp[len] = '\0';
1607 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1608 &new[0], &new[1], &new[2], &new[3],
1609 &new[4], &new[5], &new[6], &new[7]);
1610 if (ret != num_levels)
1611 return -EINVAL;
1613 drm_modeset_lock_all(dev);
1615 for (level = 0; level < num_levels; level++)
1616 wm[level] = new[level];
1618 drm_modeset_unlock_all(dev);
1620 return len;
1624 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1625 size_t len, loff_t *offp)
1627 struct seq_file *m = file->private_data;
1628 struct drm_i915_private *dev_priv = m->private;
1629 u16 *latencies;
1631 if (INTEL_GEN(dev_priv) >= 9)
1632 latencies = dev_priv->wm.skl_latency;
1633 else
1634 latencies = dev_priv->wm.pri_latency;
1636 return wm_latency_write(file, ubuf, len, offp, latencies);
1639 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1640 size_t len, loff_t *offp)
1642 struct seq_file *m = file->private_data;
1643 struct drm_i915_private *dev_priv = m->private;
1644 u16 *latencies;
1646 if (INTEL_GEN(dev_priv) >= 9)
1647 latencies = dev_priv->wm.skl_latency;
1648 else
1649 latencies = dev_priv->wm.spr_latency;
1651 return wm_latency_write(file, ubuf, len, offp, latencies);
1654 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1655 size_t len, loff_t *offp)
1657 struct seq_file *m = file->private_data;
1658 struct drm_i915_private *dev_priv = m->private;
1659 u16 *latencies;
1661 if (INTEL_GEN(dev_priv) >= 9)
1662 latencies = dev_priv->wm.skl_latency;
1663 else
1664 latencies = dev_priv->wm.cur_latency;
1666 return wm_latency_write(file, ubuf, len, offp, latencies);
1669 static const struct file_operations i915_pri_wm_latency_fops = {
1670 .owner = THIS_MODULE,
1671 .open = pri_wm_latency_open,
1672 .read = seq_read,
1673 .llseek = seq_lseek,
1674 .release = single_release,
1675 .write = pri_wm_latency_write
1678 static const struct file_operations i915_spr_wm_latency_fops = {
1679 .owner = THIS_MODULE,
1680 .open = spr_wm_latency_open,
1681 .read = seq_read,
1682 .llseek = seq_lseek,
1683 .release = single_release,
1684 .write = spr_wm_latency_write
1687 static const struct file_operations i915_cur_wm_latency_fops = {
1688 .owner = THIS_MODULE,
1689 .open = cur_wm_latency_open,
1690 .read = seq_read,
1691 .llseek = seq_lseek,
1692 .release = single_release,
1693 .write = cur_wm_latency_write
1696 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1698 struct drm_i915_private *dev_priv = m->private;
1699 struct i915_hotplug *hotplug = &dev_priv->hotplug;
1701 /* Synchronize with everything first in case there's been an HPD
1702 * storm, but we haven't finished handling it in the kernel yet
1704 intel_synchronize_irq(dev_priv);
1705 flush_work(&dev_priv->hotplug.dig_port_work);
1706 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1708 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1709 seq_printf(m, "Detected: %s\n",
1710 yesno(delayed_work_pending(&hotplug->reenable_work)));
1712 return 0;
1715 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1716 const char __user *ubuf, size_t len,
1717 loff_t *offp)
1719 struct seq_file *m = file->private_data;
1720 struct drm_i915_private *dev_priv = m->private;
1721 struct i915_hotplug *hotplug = &dev_priv->hotplug;
1722 unsigned int new_threshold;
1723 int i;
1724 char *newline;
1725 char tmp[16];
1727 if (len >= sizeof(tmp))
1728 return -EINVAL;
1730 if (copy_from_user(tmp, ubuf, len))
1731 return -EFAULT;
1733 tmp[len] = '\0';
1735 /* Strip newline, if any */
1736 newline = strchr(tmp, '\n');
1737 if (newline)
1738 *newline = '\0';
1740 if (strcmp(tmp, "reset") == 0)
1741 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1742 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1743 return -EINVAL;
1745 if (new_threshold > 0)
1746 drm_dbg_kms(&dev_priv->drm,
1747 "Setting HPD storm detection threshold to %d\n",
1748 new_threshold);
1749 else
1750 drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1752 spin_lock_irq(&dev_priv->irq_lock);
1753 hotplug->hpd_storm_threshold = new_threshold;
1754 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1755 for_each_hpd_pin(i)
1756 hotplug->stats[i].count = 0;
1757 spin_unlock_irq(&dev_priv->irq_lock);
1759 /* Re-enable hpd immediately if we were in an irq storm */
1760 flush_delayed_work(&dev_priv->hotplug.reenable_work);
1762 return len;
1765 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1767 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1770 static const struct file_operations i915_hpd_storm_ctl_fops = {
1771 .owner = THIS_MODULE,
1772 .open = i915_hpd_storm_ctl_open,
1773 .read = seq_read,
1774 .llseek = seq_lseek,
1775 .release = single_release,
1776 .write = i915_hpd_storm_ctl_write
1779 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1781 struct drm_i915_private *dev_priv = m->private;
1783 seq_printf(m, "Enabled: %s\n",
1784 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1786 return 0;
1789 static int
1790 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1792 return single_open(file, i915_hpd_short_storm_ctl_show,
1793 inode->i_private);
1796 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1797 const char __user *ubuf,
1798 size_t len, loff_t *offp)
1800 struct seq_file *m = file->private_data;
1801 struct drm_i915_private *dev_priv = m->private;
1802 struct i915_hotplug *hotplug = &dev_priv->hotplug;
1803 char *newline;
1804 char tmp[16];
1805 int i;
1806 bool new_state;
1808 if (len >= sizeof(tmp))
1809 return -EINVAL;
1811 if (copy_from_user(tmp, ubuf, len))
1812 return -EFAULT;
1814 tmp[len] = '\0';
1816 /* Strip newline, if any */
1817 newline = strchr(tmp, '\n');
1818 if (newline)
1819 *newline = '\0';
1821 /* Reset to the "default" state for this system */
1822 if (strcmp(tmp, "reset") == 0)
1823 new_state = !HAS_DP_MST(dev_priv);
1824 else if (kstrtobool(tmp, &new_state) != 0)
1825 return -EINVAL;
1827 drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1828 new_state ? "En" : "Dis");
1830 spin_lock_irq(&dev_priv->irq_lock);
1831 hotplug->hpd_short_storm_enabled = new_state;
1832 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1833 for_each_hpd_pin(i)
1834 hotplug->stats[i].count = 0;
1835 spin_unlock_irq(&dev_priv->irq_lock);
1837 /* Re-enable hpd immediately if we were in an irq storm */
1838 flush_delayed_work(&dev_priv->hotplug.reenable_work);
1840 return len;
1843 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1844 .owner = THIS_MODULE,
1845 .open = i915_hpd_short_storm_ctl_open,
1846 .read = seq_read,
1847 .llseek = seq_lseek,
1848 .release = single_release,
1849 .write = i915_hpd_short_storm_ctl_write,
1852 static int i915_drrs_ctl_set(void *data, u64 val)
1854 struct drm_i915_private *dev_priv = data;
1855 struct drm_device *dev = &dev_priv->drm;
1856 struct intel_crtc *crtc;
1858 if (INTEL_GEN(dev_priv) < 7)
1859 return -ENODEV;
1861 for_each_intel_crtc(dev, crtc) {
1862 struct drm_connector_list_iter conn_iter;
1863 struct intel_crtc_state *crtc_state;
1864 struct drm_connector *connector;
1865 struct drm_crtc_commit *commit;
1866 int ret;
1868 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1869 if (ret)
1870 return ret;
1872 crtc_state = to_intel_crtc_state(crtc->base.state);
1874 if (!crtc_state->hw.active ||
1875 !crtc_state->has_drrs)
1876 goto out;
1878 commit = crtc_state->uapi.commit;
1879 if (commit) {
1880 ret = wait_for_completion_interruptible(&commit->hw_done);
1881 if (ret)
1882 goto out;
1885 drm_connector_list_iter_begin(dev, &conn_iter);
1886 drm_for_each_connector_iter(connector, &conn_iter) {
1887 struct intel_encoder *encoder;
1888 struct intel_dp *intel_dp;
1890 if (!(crtc_state->uapi.connector_mask &
1891 drm_connector_mask(connector)))
1892 continue;
1894 encoder = intel_attached_encoder(to_intel_connector(connector));
1895 if (encoder->type != INTEL_OUTPUT_EDP)
1896 continue;
1898 drm_dbg(&dev_priv->drm,
1899 "Manually %sabling DRRS. %llu\n",
1900 val ? "en" : "dis", val);
1902 intel_dp = enc_to_intel_dp(encoder);
1903 if (val)
1904 intel_edp_drrs_enable(intel_dp,
1905 crtc_state);
1906 else
1907 intel_edp_drrs_disable(intel_dp,
1908 crtc_state);
1910 drm_connector_list_iter_end(&conn_iter);
1912 out:
1913 drm_modeset_unlock(&crtc->base.mutex);
1914 if (ret)
1915 return ret;
1918 return 0;
1921 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1923 static ssize_t
1924 i915_fifo_underrun_reset_write(struct file *filp,
1925 const char __user *ubuf,
1926 size_t cnt, loff_t *ppos)
1928 struct drm_i915_private *dev_priv = filp->private_data;
1929 struct intel_crtc *intel_crtc;
1930 struct drm_device *dev = &dev_priv->drm;
1931 int ret;
1932 bool reset;
1934 ret = kstrtobool_from_user(ubuf, cnt, &reset);
1935 if (ret)
1936 return ret;
1938 if (!reset)
1939 return cnt;
1941 for_each_intel_crtc(dev, intel_crtc) {
1942 struct drm_crtc_commit *commit;
1943 struct intel_crtc_state *crtc_state;
1945 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1946 if (ret)
1947 return ret;
1949 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1950 commit = crtc_state->uapi.commit;
1951 if (commit) {
1952 ret = wait_for_completion_interruptible(&commit->hw_done);
1953 if (!ret)
1954 ret = wait_for_completion_interruptible(&commit->flip_done);
1957 if (!ret && crtc_state->hw.active) {
1958 drm_dbg_kms(&dev_priv->drm,
1959 "Re-arming FIFO underruns on pipe %c\n",
1960 pipe_name(intel_crtc->pipe));
1962 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1965 drm_modeset_unlock(&intel_crtc->base.mutex);
1967 if (ret)
1968 return ret;
1971 ret = intel_fbc_reset_underrun(dev_priv);
1972 if (ret)
1973 return ret;
1975 return cnt;
1978 static const struct file_operations i915_fifo_underrun_reset_ops = {
1979 .owner = THIS_MODULE,
1980 .open = simple_open,
1981 .write = i915_fifo_underrun_reset_write,
1982 .llseek = default_llseek,
1985 static const struct drm_info_list intel_display_debugfs_list[] = {
1986 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1987 {"i915_fbc_status", i915_fbc_status, 0},
1988 {"i915_ips_status", i915_ips_status, 0},
1989 {"i915_sr_status", i915_sr_status, 0},
1990 {"i915_opregion", i915_opregion, 0},
1991 {"i915_vbt", i915_vbt, 0},
1992 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1993 {"i915_edp_psr_status", i915_edp_psr_status, 0},
1994 {"i915_power_domain_info", i915_power_domain_info, 0},
1995 {"i915_dmc_info", i915_dmc_info, 0},
1996 {"i915_display_info", i915_display_info, 0},
1997 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1998 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1999 {"i915_ddb_info", i915_ddb_info, 0},
2000 {"i915_drrs_status", i915_drrs_status, 0},
2001 {"i915_lpsp_status", i915_lpsp_status, 0},
2004 static const struct {
2005 const char *name;
2006 const struct file_operations *fops;
2007 } intel_display_debugfs_files[] = {
2008 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2009 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2010 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2011 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2012 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
2013 {"i915_dp_test_data", &i915_displayport_test_data_fops},
2014 {"i915_dp_test_type", &i915_displayport_test_type_fops},
2015 {"i915_dp_test_active", &i915_displayport_test_active_fops},
2016 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2017 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2018 {"i915_ipc_status", &i915_ipc_status_fops},
2019 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
2020 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2023 void intel_display_debugfs_register(struct drm_i915_private *i915)
2025 struct drm_minor *minor = i915->drm.primary;
2026 int i;
2028 for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2029 debugfs_create_file(intel_display_debugfs_files[i].name,
2030 S_IRUGO | S_IWUSR,
2031 minor->debugfs_root,
2032 to_i915(minor->dev),
2033 intel_display_debugfs_files[i].fops);
2036 drm_debugfs_create_files(intel_display_debugfs_list,
2037 ARRAY_SIZE(intel_display_debugfs_list),
2038 minor->debugfs_root, minor);
2041 static int i915_panel_show(struct seq_file *m, void *data)
2043 struct drm_connector *connector = m->private;
2044 struct intel_dp *intel_dp =
2045 intel_attached_dp(to_intel_connector(connector));
2047 if (connector->status != connector_status_connected)
2048 return -ENODEV;
2050 seq_printf(m, "Panel power up delay: %d\n",
2051 intel_dp->panel_power_up_delay);
2052 seq_printf(m, "Panel power down delay: %d\n",
2053 intel_dp->panel_power_down_delay);
2054 seq_printf(m, "Backlight on delay: %d\n",
2055 intel_dp->backlight_on_delay);
2056 seq_printf(m, "Backlight off delay: %d\n",
2057 intel_dp->backlight_off_delay);
2059 return 0;
2061 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2063 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2065 struct drm_connector *connector = m->private;
2066 struct intel_connector *intel_connector = to_intel_connector(connector);
2068 if (connector->status != connector_status_connected)
2069 return -ENODEV;
2071 seq_printf(m, "%s:%d HDCP version: ", connector->name,
2072 connector->base.id);
2073 intel_hdcp_info(m, intel_connector);
2075 return 0;
2077 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2079 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2080 seq_puts(m, "LPSP: incapable\n"))
2082 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2084 struct drm_connector *connector = m->private;
2085 struct drm_i915_private *i915 = to_i915(connector->dev);
2086 struct intel_encoder *encoder;
2088 encoder = intel_attached_encoder(to_intel_connector(connector));
2089 if (!encoder)
2090 return -ENODEV;
2092 if (connector->status != connector_status_connected)
2093 return -ENODEV;
2095 switch (INTEL_GEN(i915)) {
2096 case 12:
2098 * Actually TGL can drive LPSP on port till DDI_C
2099 * but there is no physical connected DDI_C on TGL sku's,
2100 * even driver is not initilizing DDI_C port for gen12.
2102 LPSP_CAPABLE(encoder->port <= PORT_B);
2103 break;
2104 case 11:
2105 LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2106 connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2107 break;
2108 case 10:
2109 case 9:
2110 LPSP_CAPABLE(encoder->port == PORT_A &&
2111 (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2112 connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2113 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2114 break;
2115 default:
2116 if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2117 LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2120 return 0;
2122 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2124 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2126 struct drm_connector *connector = m->private;
2127 struct drm_device *dev = connector->dev;
2128 struct drm_crtc *crtc;
2129 struct intel_dp *intel_dp;
2130 struct drm_modeset_acquire_ctx ctx;
2131 struct intel_crtc_state *crtc_state = NULL;
2132 int ret = 0;
2133 bool try_again = false;
2135 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2137 do {
2138 try_again = false;
2139 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2140 &ctx);
2141 if (ret) {
2142 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2143 try_again = true;
2144 continue;
2146 break;
2148 crtc = connector->state->crtc;
2149 if (connector->status != connector_status_connected || !crtc) {
2150 ret = -ENODEV;
2151 break;
2153 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2154 if (ret == -EDEADLK) {
2155 ret = drm_modeset_backoff(&ctx);
2156 if (!ret) {
2157 try_again = true;
2158 continue;
2160 break;
2161 } else if (ret) {
2162 break;
2164 intel_dp = intel_attached_dp(to_intel_connector(connector));
2165 crtc_state = to_intel_crtc_state(crtc->state);
2166 seq_printf(m, "DSC_Enabled: %s\n",
2167 yesno(crtc_state->dsc.compression_enable));
2168 seq_printf(m, "DSC_Sink_Support: %s\n",
2169 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2170 seq_printf(m, "Force_DSC_Enable: %s\n",
2171 yesno(intel_dp->force_dsc_en));
2172 if (!intel_dp_is_edp(intel_dp))
2173 seq_printf(m, "FEC_Sink_Support: %s\n",
2174 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2175 } while (try_again);
2177 drm_modeset_drop_locks(&ctx);
2178 drm_modeset_acquire_fini(&ctx);
2180 return ret;
2183 static ssize_t i915_dsc_fec_support_write(struct file *file,
2184 const char __user *ubuf,
2185 size_t len, loff_t *offp)
2187 bool dsc_enable = false;
2188 int ret;
2189 struct drm_connector *connector =
2190 ((struct seq_file *)file->private_data)->private;
2191 struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2192 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2193 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2195 if (len == 0)
2196 return 0;
2198 drm_dbg(&i915->drm,
2199 "Copied %zu bytes from user to force DSC\n", len);
2201 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2202 if (ret < 0)
2203 return ret;
2205 drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2206 (dsc_enable) ? "true" : "false");
2207 intel_dp->force_dsc_en = dsc_enable;
2209 *offp += len;
2210 return len;
2213 static int i915_dsc_fec_support_open(struct inode *inode,
2214 struct file *file)
2216 return single_open(file, i915_dsc_fec_support_show,
2217 inode->i_private);
2220 static const struct file_operations i915_dsc_fec_support_fops = {
2221 .owner = THIS_MODULE,
2222 .open = i915_dsc_fec_support_open,
2223 .read = seq_read,
2224 .llseek = seq_lseek,
2225 .release = single_release,
2226 .write = i915_dsc_fec_support_write
2230 * intel_connector_debugfs_add - add i915 specific connector debugfs files
2231 * @connector: pointer to a registered drm_connector
2233 * Cleanup will be done by drm_connector_unregister() through a call to
2234 * drm_debugfs_connector_remove().
2236 * Returns 0 on success, negative error codes on error.
2238 int intel_connector_debugfs_add(struct drm_connector *connector)
2240 struct dentry *root = connector->debugfs_entry;
2241 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2243 /* The connector must have been registered beforehands. */
2244 if (!root)
2245 return -ENODEV;
2247 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2248 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2249 connector, &i915_panel_fops);
2250 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2251 connector, &i915_psr_sink_status_fops);
2254 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2255 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2256 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2257 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2258 connector, &i915_hdcp_sink_capability_fops);
2261 if (INTEL_GEN(dev_priv) >= 10 &&
2262 ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2263 !to_intel_connector(connector)->mst_port) ||
2264 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2265 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2266 connector, &i915_dsc_fec_support_fops);
2268 /* Legacy panels doesn't lpsp on any platform */
2269 if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2270 IS_BROADWELL(dev_priv)) &&
2271 (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2272 connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2273 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2274 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2275 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2276 debugfs_create_file("i915_lpsp_capability", 0444, root,
2277 connector, &i915_lpsp_capability_fops);
2279 return 0;