proc: Fix proc_sys_prune_dcache to hold a sb reference
[cris-mirror.git] / drivers / gpu / drm / i915 / intel_dpll_mgr.c
blobb4de632f11587000778908a74969fd34f89fdfe1
1 /*
2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_drv.h"
26 /**
27 * DOC: Display PLLs
29 * Display PLLs used for driving outputs vary by platform. While some have
30 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
31 * from a pool. In the latter scenario, it is possible that multiple pipes
32 * share a PLL if their configurations match.
34 * This file provides an abstraction over display PLLs. The function
35 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
36 * users of a PLL are tracked and that tracking is integrated with the atomic
37 * modest interface. During an atomic operation, a PLL can be requested for a
38 * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
39 * a previously used PLL can be released with intel_release_shared_dpll().
40 * Changes to the users are first staged in the atomic state, and then made
41 * effective by calling intel_shared_dpll_swap_state() during the atomic
42 * commit phase.
45 static void
46 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
47 struct intel_shared_dpll_state *shared_dpll)
49 enum intel_dpll_id i;
51 /* Copy shared dpll state */
52 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
53 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
55 shared_dpll[i] = pll->state;
59 static struct intel_shared_dpll_state *
60 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
62 struct intel_atomic_state *state = to_intel_atomic_state(s);
64 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
66 if (!state->dpll_set) {
67 state->dpll_set = true;
69 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
70 state->shared_dpll);
73 return state->shared_dpll;
76 /**
77 * intel_get_shared_dpll_by_id - get a DPLL given its id
78 * @dev_priv: i915 device instance
79 * @id: pll id
81 * Returns:
82 * A pointer to the DPLL with @id
84 struct intel_shared_dpll *
85 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
86 enum intel_dpll_id id)
88 return &dev_priv->shared_dplls[id];
91 /**
92 * intel_get_shared_dpll_id - get the id of a DPLL
93 * @dev_priv: i915 device instance
94 * @pll: the DPLL
96 * Returns:
97 * The id of @pll
99 enum intel_dpll_id
100 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
101 struct intel_shared_dpll *pll)
103 if (WARN_ON(pll < dev_priv->shared_dplls||
104 pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
105 return -1;
107 return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
110 /* For ILK+ */
111 void assert_shared_dpll(struct drm_i915_private *dev_priv,
112 struct intel_shared_dpll *pll,
113 bool state)
115 bool cur_state;
116 struct intel_dpll_hw_state hw_state;
118 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
119 return;
121 cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
122 I915_STATE_WARN(cur_state != state,
123 "%s assertion failure (expected %s, current %s)\n",
124 pll->name, onoff(state), onoff(cur_state));
128 * intel_prepare_shared_dpll - call a dpll's prepare hook
129 * @crtc: CRTC which has a shared dpll
131 * This calls the PLL's prepare hook if it has one and if the PLL is not
132 * already enabled. The prepare hook is platform specific.
134 void intel_prepare_shared_dpll(struct intel_crtc *crtc)
136 struct drm_device *dev = crtc->base.dev;
137 struct drm_i915_private *dev_priv = to_i915(dev);
138 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
140 if (WARN_ON(pll == NULL))
141 return;
143 mutex_lock(&dev_priv->dpll_lock);
144 WARN_ON(!pll->state.crtc_mask);
145 if (!pll->active_mask) {
146 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
147 WARN_ON(pll->on);
148 assert_shared_dpll_disabled(dev_priv, pll);
150 pll->funcs.prepare(dev_priv, pll);
152 mutex_unlock(&dev_priv->dpll_lock);
156 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
157 * @crtc: CRTC which has a shared DPLL
159 * Enable the shared DPLL used by @crtc.
161 void intel_enable_shared_dpll(struct intel_crtc *crtc)
163 struct drm_device *dev = crtc->base.dev;
164 struct drm_i915_private *dev_priv = to_i915(dev);
165 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
166 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
167 unsigned old_mask;
169 if (WARN_ON(pll == NULL))
170 return;
172 mutex_lock(&dev_priv->dpll_lock);
173 old_mask = pll->active_mask;
175 if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
176 WARN_ON(pll->active_mask & crtc_mask))
177 goto out;
179 pll->active_mask |= crtc_mask;
181 DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
182 pll->name, pll->active_mask, pll->on,
183 crtc->base.base.id);
185 if (old_mask) {
186 WARN_ON(!pll->on);
187 assert_shared_dpll_enabled(dev_priv, pll);
188 goto out;
190 WARN_ON(pll->on);
192 DRM_DEBUG_KMS("enabling %s\n", pll->name);
193 pll->funcs.enable(dev_priv, pll);
194 pll->on = true;
196 out:
197 mutex_unlock(&dev_priv->dpll_lock);
201 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
202 * @crtc: CRTC which has a shared DPLL
204 * Disable the shared DPLL used by @crtc.
206 void intel_disable_shared_dpll(struct intel_crtc *crtc)
208 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
209 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
210 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
212 /* PCH only available on ILK+ */
213 if (INTEL_GEN(dev_priv) < 5)
214 return;
216 if (pll == NULL)
217 return;
219 mutex_lock(&dev_priv->dpll_lock);
220 if (WARN_ON(!(pll->active_mask & crtc_mask)))
221 goto out;
223 DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
224 pll->name, pll->active_mask, pll->on,
225 crtc->base.base.id);
227 assert_shared_dpll_enabled(dev_priv, pll);
228 WARN_ON(!pll->on);
230 pll->active_mask &= ~crtc_mask;
231 if (pll->active_mask)
232 goto out;
234 DRM_DEBUG_KMS("disabling %s\n", pll->name);
235 pll->funcs.disable(dev_priv, pll);
236 pll->on = false;
238 out:
239 mutex_unlock(&dev_priv->dpll_lock);
242 static struct intel_shared_dpll *
243 intel_find_shared_dpll(struct intel_crtc *crtc,
244 struct intel_crtc_state *crtc_state,
245 enum intel_dpll_id range_min,
246 enum intel_dpll_id range_max)
248 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
249 struct intel_shared_dpll *pll;
250 struct intel_shared_dpll_state *shared_dpll;
251 enum intel_dpll_id i;
253 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
255 for (i = range_min; i <= range_max; i++) {
256 pll = &dev_priv->shared_dplls[i];
258 /* Only want to check enabled timings first */
259 if (shared_dpll[i].crtc_mask == 0)
260 continue;
262 if (memcmp(&crtc_state->dpll_hw_state,
263 &shared_dpll[i].hw_state,
264 sizeof(crtc_state->dpll_hw_state)) == 0) {
265 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
266 crtc->base.base.id, crtc->base.name, pll->name,
267 shared_dpll[i].crtc_mask,
268 pll->active_mask);
269 return pll;
273 /* Ok no matching timings, maybe there's a free one? */
274 for (i = range_min; i <= range_max; i++) {
275 pll = &dev_priv->shared_dplls[i];
276 if (shared_dpll[i].crtc_mask == 0) {
277 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
278 crtc->base.base.id, crtc->base.name, pll->name);
279 return pll;
283 return NULL;
286 static void
287 intel_reference_shared_dpll(struct intel_shared_dpll *pll,
288 struct intel_crtc_state *crtc_state)
290 struct intel_shared_dpll_state *shared_dpll;
291 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
292 enum intel_dpll_id i = pll->id;
294 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
296 if (shared_dpll[i].crtc_mask == 0)
297 shared_dpll[i].hw_state =
298 crtc_state->dpll_hw_state;
300 crtc_state->shared_dpll = pll;
301 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
302 pipe_name(crtc->pipe));
304 shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
308 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
309 * @state: atomic state
311 * This is the dpll version of drm_atomic_helper_swap_state() since the
312 * helper does not handle driver-specific global state.
314 * For consistency with atomic helpers this function does a complete swap,
315 * i.e. it also puts the current state into @state, even though there is no
316 * need for that at this moment.
318 void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
320 struct drm_i915_private *dev_priv = to_i915(state->dev);
321 struct intel_shared_dpll_state *shared_dpll;
322 struct intel_shared_dpll *pll;
323 enum intel_dpll_id i;
325 if (!to_intel_atomic_state(state)->dpll_set)
326 return;
328 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
329 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
330 struct intel_shared_dpll_state tmp;
332 pll = &dev_priv->shared_dplls[i];
334 tmp = pll->state;
335 pll->state = shared_dpll[i];
336 shared_dpll[i] = tmp;
340 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
341 struct intel_shared_dpll *pll,
342 struct intel_dpll_hw_state *hw_state)
344 uint32_t val;
346 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
347 return false;
349 val = I915_READ(PCH_DPLL(pll->id));
350 hw_state->dpll = val;
351 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
352 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
354 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
356 return val & DPLL_VCO_ENABLE;
359 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
360 struct intel_shared_dpll *pll)
362 I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
363 I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
366 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
368 u32 val;
369 bool enabled;
371 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
373 val = I915_READ(PCH_DREF_CONTROL);
374 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
375 DREF_SUPERSPREAD_SOURCE_MASK));
376 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
379 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
380 struct intel_shared_dpll *pll)
382 /* PCH refclock must be enabled first */
383 ibx_assert_pch_refclk_enabled(dev_priv);
385 I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
387 /* Wait for the clocks to stabilize. */
388 POSTING_READ(PCH_DPLL(pll->id));
389 udelay(150);
391 /* The pixel multiplier can only be updated once the
392 * DPLL is enabled and the clocks are stable.
394 * So write it again.
396 I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
397 POSTING_READ(PCH_DPLL(pll->id));
398 udelay(200);
401 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
402 struct intel_shared_dpll *pll)
404 struct drm_device *dev = &dev_priv->drm;
405 struct intel_crtc *crtc;
407 /* Make sure no transcoder isn't still depending on us. */
408 for_each_intel_crtc(dev, crtc) {
409 if (crtc->config->shared_dpll == pll)
410 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
413 I915_WRITE(PCH_DPLL(pll->id), 0);
414 POSTING_READ(PCH_DPLL(pll->id));
415 udelay(200);
418 static struct intel_shared_dpll *
419 ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
420 struct intel_encoder *encoder)
422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
423 struct intel_shared_dpll *pll;
424 enum intel_dpll_id i;
426 if (HAS_PCH_IBX(dev_priv)) {
427 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
428 i = (enum intel_dpll_id) crtc->pipe;
429 pll = &dev_priv->shared_dplls[i];
431 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
432 crtc->base.base.id, crtc->base.name, pll->name);
433 } else {
434 pll = intel_find_shared_dpll(crtc, crtc_state,
435 DPLL_ID_PCH_PLL_A,
436 DPLL_ID_PCH_PLL_B);
439 if (!pll)
440 return NULL;
442 /* reference the pll */
443 intel_reference_shared_dpll(pll, crtc_state);
445 return pll;
448 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
449 struct intel_dpll_hw_state *hw_state)
451 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
452 "fp0: 0x%x, fp1: 0x%x\n",
453 hw_state->dpll,
454 hw_state->dpll_md,
455 hw_state->fp0,
456 hw_state->fp1);
459 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
460 .prepare = ibx_pch_dpll_prepare,
461 .enable = ibx_pch_dpll_enable,
462 .disable = ibx_pch_dpll_disable,
463 .get_hw_state = ibx_pch_dpll_get_hw_state,
466 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
467 struct intel_shared_dpll *pll)
469 I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
470 POSTING_READ(WRPLL_CTL(pll->id));
471 udelay(20);
474 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
475 struct intel_shared_dpll *pll)
477 I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
478 POSTING_READ(SPLL_CTL);
479 udelay(20);
482 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
483 struct intel_shared_dpll *pll)
485 uint32_t val;
487 val = I915_READ(WRPLL_CTL(pll->id));
488 I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
489 POSTING_READ(WRPLL_CTL(pll->id));
492 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
493 struct intel_shared_dpll *pll)
495 uint32_t val;
497 val = I915_READ(SPLL_CTL);
498 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
499 POSTING_READ(SPLL_CTL);
502 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
503 struct intel_shared_dpll *pll,
504 struct intel_dpll_hw_state *hw_state)
506 uint32_t val;
508 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
509 return false;
511 val = I915_READ(WRPLL_CTL(pll->id));
512 hw_state->wrpll = val;
514 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
516 return val & WRPLL_PLL_ENABLE;
519 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
520 struct intel_shared_dpll *pll,
521 struct intel_dpll_hw_state *hw_state)
523 uint32_t val;
525 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
526 return false;
528 val = I915_READ(SPLL_CTL);
529 hw_state->spll = val;
531 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
533 return val & SPLL_PLL_ENABLE;
536 #define LC_FREQ 2700
537 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
539 #define P_MIN 2
540 #define P_MAX 64
541 #define P_INC 2
543 /* Constraints for PLL good behavior */
544 #define REF_MIN 48
545 #define REF_MAX 400
546 #define VCO_MIN 2400
547 #define VCO_MAX 4800
549 struct hsw_wrpll_rnp {
550 unsigned p, n2, r2;
553 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
555 unsigned budget;
557 switch (clock) {
558 case 25175000:
559 case 25200000:
560 case 27000000:
561 case 27027000:
562 case 37762500:
563 case 37800000:
564 case 40500000:
565 case 40541000:
566 case 54000000:
567 case 54054000:
568 case 59341000:
569 case 59400000:
570 case 72000000:
571 case 74176000:
572 case 74250000:
573 case 81000000:
574 case 81081000:
575 case 89012000:
576 case 89100000:
577 case 108000000:
578 case 108108000:
579 case 111264000:
580 case 111375000:
581 case 148352000:
582 case 148500000:
583 case 162000000:
584 case 162162000:
585 case 222525000:
586 case 222750000:
587 case 296703000:
588 case 297000000:
589 budget = 0;
590 break;
591 case 233500000:
592 case 245250000:
593 case 247750000:
594 case 253250000:
595 case 298000000:
596 budget = 1500;
597 break;
598 case 169128000:
599 case 169500000:
600 case 179500000:
601 case 202000000:
602 budget = 2000;
603 break;
604 case 256250000:
605 case 262500000:
606 case 270000000:
607 case 272500000:
608 case 273750000:
609 case 280750000:
610 case 281250000:
611 case 286000000:
612 case 291750000:
613 budget = 4000;
614 break;
615 case 267250000:
616 case 268500000:
617 budget = 5000;
618 break;
619 default:
620 budget = 1000;
621 break;
624 return budget;
627 static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
628 unsigned r2, unsigned n2, unsigned p,
629 struct hsw_wrpll_rnp *best)
631 uint64_t a, b, c, d, diff, diff_best;
633 /* No best (r,n,p) yet */
634 if (best->p == 0) {
635 best->p = p;
636 best->n2 = n2;
637 best->r2 = r2;
638 return;
642 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
643 * freq2k.
645 * delta = 1e6 *
646 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
647 * freq2k;
649 * and we would like delta <= budget.
651 * If the discrepancy is above the PPM-based budget, always prefer to
652 * improve upon the previous solution. However, if you're within the
653 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
655 a = freq2k * budget * p * r2;
656 b = freq2k * budget * best->p * best->r2;
657 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
658 diff_best = abs_diff(freq2k * best->p * best->r2,
659 LC_FREQ_2K * best->n2);
660 c = 1000000 * diff;
661 d = 1000000 * diff_best;
663 if (a < c && b < d) {
664 /* If both are above the budget, pick the closer */
665 if (best->p * best->r2 * diff < p * r2 * diff_best) {
666 best->p = p;
667 best->n2 = n2;
668 best->r2 = r2;
670 } else if (a >= c && b < d) {
671 /* If A is below the threshold but B is above it? Update. */
672 best->p = p;
673 best->n2 = n2;
674 best->r2 = r2;
675 } else if (a >= c && b >= d) {
676 /* Both are below the limit, so pick the higher n2/(r2*r2) */
677 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
678 best->p = p;
679 best->n2 = n2;
680 best->r2 = r2;
683 /* Otherwise a < c && b >= d, do nothing */
686 static void
687 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
688 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
690 uint64_t freq2k;
691 unsigned p, n2, r2;
692 struct hsw_wrpll_rnp best = { 0, 0, 0 };
693 unsigned budget;
695 freq2k = clock / 100;
697 budget = hsw_wrpll_get_budget_for_freq(clock);
699 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
700 * and directly pass the LC PLL to it. */
701 if (freq2k == 5400000) {
702 *n2_out = 2;
703 *p_out = 1;
704 *r2_out = 2;
705 return;
709 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
710 * the WR PLL.
712 * We want R so that REF_MIN <= Ref <= REF_MAX.
713 * Injecting R2 = 2 * R gives:
714 * REF_MAX * r2 > LC_FREQ * 2 and
715 * REF_MIN * r2 < LC_FREQ * 2
717 * Which means the desired boundaries for r2 are:
718 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
721 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
722 r2 <= LC_FREQ * 2 / REF_MIN;
723 r2++) {
726 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
728 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
729 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
730 * VCO_MAX * r2 > n2 * LC_FREQ and
731 * VCO_MIN * r2 < n2 * LC_FREQ)
733 * Which means the desired boundaries for n2 are:
734 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
736 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
737 n2 <= VCO_MAX * r2 / LC_FREQ;
738 n2++) {
740 for (p = P_MIN; p <= P_MAX; p += P_INC)
741 hsw_wrpll_update_rnp(freq2k, budget,
742 r2, n2, p, &best);
746 *n2_out = best.n2;
747 *p_out = best.p;
748 *r2_out = best.r2;
751 static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
752 struct intel_crtc *crtc,
753 struct intel_crtc_state *crtc_state)
755 struct intel_shared_dpll *pll;
756 uint32_t val;
757 unsigned int p, n2, r2;
759 hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
761 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
762 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
763 WRPLL_DIVIDER_POST(p);
765 crtc_state->dpll_hw_state.wrpll = val;
767 pll = intel_find_shared_dpll(crtc, crtc_state,
768 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
770 if (!pll)
771 return NULL;
773 return pll;
776 static struct intel_shared_dpll *
777 hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
779 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
780 struct intel_shared_dpll *pll;
781 enum intel_dpll_id pll_id;
783 switch (clock / 2) {
784 case 81000:
785 pll_id = DPLL_ID_LCPLL_810;
786 break;
787 case 135000:
788 pll_id = DPLL_ID_LCPLL_1350;
789 break;
790 case 270000:
791 pll_id = DPLL_ID_LCPLL_2700;
792 break;
793 default:
794 DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
795 return NULL;
798 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
800 if (!pll)
801 return NULL;
803 return pll;
806 static struct intel_shared_dpll *
807 hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
808 struct intel_encoder *encoder)
810 struct intel_shared_dpll *pll;
811 int clock = crtc_state->port_clock;
813 memset(&crtc_state->dpll_hw_state, 0,
814 sizeof(crtc_state->dpll_hw_state));
816 if (encoder->type == INTEL_OUTPUT_HDMI) {
817 pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
819 } else if (encoder->type == INTEL_OUTPUT_DP ||
820 encoder->type == INTEL_OUTPUT_DP_MST ||
821 encoder->type == INTEL_OUTPUT_EDP) {
822 pll = hsw_ddi_dp_get_dpll(encoder, clock);
824 } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
825 if (WARN_ON(crtc_state->port_clock / 2 != 135000))
826 return NULL;
828 crtc_state->dpll_hw_state.spll =
829 SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
831 pll = intel_find_shared_dpll(crtc, crtc_state,
832 DPLL_ID_SPLL, DPLL_ID_SPLL);
833 } else {
834 return NULL;
837 if (!pll)
838 return NULL;
840 intel_reference_shared_dpll(pll, crtc_state);
842 return pll;
845 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
846 struct intel_dpll_hw_state *hw_state)
848 DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
849 hw_state->wrpll, hw_state->spll);
852 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
853 .enable = hsw_ddi_wrpll_enable,
854 .disable = hsw_ddi_wrpll_disable,
855 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
858 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
859 .enable = hsw_ddi_spll_enable,
860 .disable = hsw_ddi_spll_disable,
861 .get_hw_state = hsw_ddi_spll_get_hw_state,
864 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
865 struct intel_shared_dpll *pll)
869 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
870 struct intel_shared_dpll *pll)
874 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
875 struct intel_shared_dpll *pll,
876 struct intel_dpll_hw_state *hw_state)
878 return true;
881 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
882 .enable = hsw_ddi_lcpll_enable,
883 .disable = hsw_ddi_lcpll_disable,
884 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
887 struct skl_dpll_regs {
888 i915_reg_t ctl, cfgcr1, cfgcr2;
891 /* this array is indexed by the *shared* pll id */
892 static const struct skl_dpll_regs skl_dpll_regs[4] = {
894 /* DPLL 0 */
895 .ctl = LCPLL1_CTL,
896 /* DPLL 0 doesn't support HDMI mode */
899 /* DPLL 1 */
900 .ctl = LCPLL2_CTL,
901 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
902 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
905 /* DPLL 2 */
906 .ctl = WRPLL_CTL(0),
907 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
908 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
911 /* DPLL 3 */
912 .ctl = WRPLL_CTL(1),
913 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
914 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
918 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
919 struct intel_shared_dpll *pll)
921 uint32_t val;
923 val = I915_READ(DPLL_CTRL1);
925 val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
926 DPLL_CTRL1_LINK_RATE_MASK(pll->id));
927 val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
929 I915_WRITE(DPLL_CTRL1, val);
930 POSTING_READ(DPLL_CTRL1);
933 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
934 struct intel_shared_dpll *pll)
936 const struct skl_dpll_regs *regs = skl_dpll_regs;
938 skl_ddi_pll_write_ctrl1(dev_priv, pll);
940 I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
941 I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
942 POSTING_READ(regs[pll->id].cfgcr1);
943 POSTING_READ(regs[pll->id].cfgcr2);
945 /* the enable bit is always bit 31 */
946 I915_WRITE(regs[pll->id].ctl,
947 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
949 if (intel_wait_for_register(dev_priv,
950 DPLL_STATUS,
951 DPLL_LOCK(pll->id),
952 DPLL_LOCK(pll->id),
954 DRM_ERROR("DPLL %d not locked\n", pll->id);
957 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
958 struct intel_shared_dpll *pll)
960 skl_ddi_pll_write_ctrl1(dev_priv, pll);
963 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
964 struct intel_shared_dpll *pll)
966 const struct skl_dpll_regs *regs = skl_dpll_regs;
968 /* the enable bit is always bit 31 */
969 I915_WRITE(regs[pll->id].ctl,
970 I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
971 POSTING_READ(regs[pll->id].ctl);
974 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
975 struct intel_shared_dpll *pll)
979 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
980 struct intel_shared_dpll *pll,
981 struct intel_dpll_hw_state *hw_state)
983 uint32_t val;
984 const struct skl_dpll_regs *regs = skl_dpll_regs;
985 bool ret;
987 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
988 return false;
990 ret = false;
992 val = I915_READ(regs[pll->id].ctl);
993 if (!(val & LCPLL_PLL_ENABLE))
994 goto out;
996 val = I915_READ(DPLL_CTRL1);
997 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
999 /* avoid reading back stale values if HDMI mode is not enabled */
1000 if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
1001 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
1002 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
1004 ret = true;
1006 out:
1007 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1009 return ret;
1012 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1013 struct intel_shared_dpll *pll,
1014 struct intel_dpll_hw_state *hw_state)
1016 uint32_t val;
1017 const struct skl_dpll_regs *regs = skl_dpll_regs;
1018 bool ret;
1020 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
1021 return false;
1023 ret = false;
1025 /* DPLL0 is always enabled since it drives CDCLK */
1026 val = I915_READ(regs[pll->id].ctl);
1027 if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1028 goto out;
1030 val = I915_READ(DPLL_CTRL1);
1031 hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
1033 ret = true;
1035 out:
1036 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1038 return ret;
1041 struct skl_wrpll_context {
1042 uint64_t min_deviation; /* current minimal deviation */
1043 uint64_t central_freq; /* chosen central freq */
1044 uint64_t dco_freq; /* chosen dco freq */
1045 unsigned int p; /* chosen divider */
1048 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1050 memset(ctx, 0, sizeof(*ctx));
1052 ctx->min_deviation = U64_MAX;
1055 /* DCO freq must be within +1%/-6% of the DCO central freq */
1056 #define SKL_DCO_MAX_PDEVIATION 100
1057 #define SKL_DCO_MAX_NDEVIATION 600
1059 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1060 uint64_t central_freq,
1061 uint64_t dco_freq,
1062 unsigned int divider)
1064 uint64_t deviation;
1066 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1067 central_freq);
1069 /* positive deviation */
1070 if (dco_freq >= central_freq) {
1071 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1072 deviation < ctx->min_deviation) {
1073 ctx->min_deviation = deviation;
1074 ctx->central_freq = central_freq;
1075 ctx->dco_freq = dco_freq;
1076 ctx->p = divider;
1078 /* negative deviation */
1079 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1080 deviation < ctx->min_deviation) {
1081 ctx->min_deviation = deviation;
1082 ctx->central_freq = central_freq;
1083 ctx->dco_freq = dco_freq;
1084 ctx->p = divider;
1088 static void skl_wrpll_get_multipliers(unsigned int p,
1089 unsigned int *p0 /* out */,
1090 unsigned int *p1 /* out */,
1091 unsigned int *p2 /* out */)
1093 /* even dividers */
1094 if (p % 2 == 0) {
1095 unsigned int half = p / 2;
1097 if (half == 1 || half == 2 || half == 3 || half == 5) {
1098 *p0 = 2;
1099 *p1 = 1;
1100 *p2 = half;
1101 } else if (half % 2 == 0) {
1102 *p0 = 2;
1103 *p1 = half / 2;
1104 *p2 = 2;
1105 } else if (half % 3 == 0) {
1106 *p0 = 3;
1107 *p1 = half / 3;
1108 *p2 = 2;
1109 } else if (half % 7 == 0) {
1110 *p0 = 7;
1111 *p1 = half / 7;
1112 *p2 = 2;
1114 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1115 *p0 = 3;
1116 *p1 = 1;
1117 *p2 = p / 3;
1118 } else if (p == 5 || p == 7) {
1119 *p0 = p;
1120 *p1 = 1;
1121 *p2 = 1;
1122 } else if (p == 15) {
1123 *p0 = 3;
1124 *p1 = 1;
1125 *p2 = 5;
1126 } else if (p == 21) {
1127 *p0 = 7;
1128 *p1 = 1;
1129 *p2 = 3;
1130 } else if (p == 35) {
1131 *p0 = 7;
1132 *p1 = 1;
1133 *p2 = 5;
1137 struct skl_wrpll_params {
1138 uint32_t dco_fraction;
1139 uint32_t dco_integer;
1140 uint32_t qdiv_ratio;
1141 uint32_t qdiv_mode;
1142 uint32_t kdiv;
1143 uint32_t pdiv;
1144 uint32_t central_freq;
1147 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1148 uint64_t afe_clock,
1149 uint64_t central_freq,
1150 uint32_t p0, uint32_t p1, uint32_t p2)
1152 uint64_t dco_freq;
1154 switch (central_freq) {
1155 case 9600000000ULL:
1156 params->central_freq = 0;
1157 break;
1158 case 9000000000ULL:
1159 params->central_freq = 1;
1160 break;
1161 case 8400000000ULL:
1162 params->central_freq = 3;
1165 switch (p0) {
1166 case 1:
1167 params->pdiv = 0;
1168 break;
1169 case 2:
1170 params->pdiv = 1;
1171 break;
1172 case 3:
1173 params->pdiv = 2;
1174 break;
1175 case 7:
1176 params->pdiv = 4;
1177 break;
1178 default:
1179 WARN(1, "Incorrect PDiv\n");
1182 switch (p2) {
1183 case 5:
1184 params->kdiv = 0;
1185 break;
1186 case 2:
1187 params->kdiv = 1;
1188 break;
1189 case 3:
1190 params->kdiv = 2;
1191 break;
1192 case 1:
1193 params->kdiv = 3;
1194 break;
1195 default:
1196 WARN(1, "Incorrect KDiv\n");
1199 params->qdiv_ratio = p1;
1200 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1202 dco_freq = p0 * p1 * p2 * afe_clock;
1205 * Intermediate values are in Hz.
1206 * Divide by MHz to match bsepc
1208 params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1209 params->dco_fraction =
1210 div_u64((div_u64(dco_freq, 24) -
1211 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1214 static bool
1215 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1216 struct skl_wrpll_params *wrpll_params)
1218 uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1219 uint64_t dco_central_freq[3] = {8400000000ULL,
1220 9000000000ULL,
1221 9600000000ULL};
1222 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1223 24, 28, 30, 32, 36, 40, 42, 44,
1224 48, 52, 54, 56, 60, 64, 66, 68,
1225 70, 72, 76, 78, 80, 84, 88, 90,
1226 92, 96, 98 };
1227 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1228 static const struct {
1229 const int *list;
1230 int n_dividers;
1231 } dividers[] = {
1232 { even_dividers, ARRAY_SIZE(even_dividers) },
1233 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1235 struct skl_wrpll_context ctx;
1236 unsigned int dco, d, i;
1237 unsigned int p0, p1, p2;
1239 skl_wrpll_context_init(&ctx);
1241 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1242 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1243 for (i = 0; i < dividers[d].n_dividers; i++) {
1244 unsigned int p = dividers[d].list[i];
1245 uint64_t dco_freq = p * afe_clock;
1247 skl_wrpll_try_divider(&ctx,
1248 dco_central_freq[dco],
1249 dco_freq,
1252 * Skip the remaining dividers if we're sure to
1253 * have found the definitive divider, we can't
1254 * improve a 0 deviation.
1256 if (ctx.min_deviation == 0)
1257 goto skip_remaining_dividers;
1261 skip_remaining_dividers:
1263 * If a solution is found with an even divider, prefer
1264 * this one.
1266 if (d == 0 && ctx.p)
1267 break;
1270 if (!ctx.p) {
1271 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1272 return false;
1276 * gcc incorrectly analyses that these can be used without being
1277 * initialized. To be fair, it's hard to guess.
1279 p0 = p1 = p2 = 0;
1280 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1281 skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1282 p0, p1, p2);
1284 return true;
1287 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
1288 struct intel_crtc_state *crtc_state,
1289 int clock)
1291 uint32_t ctrl1, cfgcr1, cfgcr2;
1292 struct skl_wrpll_params wrpll_params = { 0, };
1295 * See comment in intel_dpll_hw_state to understand why we always use 0
1296 * as the DPLL id in this function.
1298 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1300 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1302 if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
1303 return false;
1305 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1306 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1307 wrpll_params.dco_integer;
1309 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1310 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1311 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1312 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1313 wrpll_params.central_freq;
1315 memset(&crtc_state->dpll_hw_state, 0,
1316 sizeof(crtc_state->dpll_hw_state));
1318 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1319 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1320 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1321 return true;
1325 static bool
1326 skl_ddi_dp_set_dpll_hw_state(int clock,
1327 struct intel_dpll_hw_state *dpll_hw_state)
1329 uint32_t ctrl1;
1332 * See comment in intel_dpll_hw_state to understand why we always use 0
1333 * as the DPLL id in this function.
1335 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1336 switch (clock / 2) {
1337 case 81000:
1338 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1339 break;
1340 case 135000:
1341 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1342 break;
1343 case 270000:
1344 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1345 break;
1346 /* eDP 1.4 rates */
1347 case 162000:
1348 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1349 break;
1350 case 108000:
1351 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1352 break;
1353 case 216000:
1354 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1355 break;
1358 dpll_hw_state->ctrl1 = ctrl1;
1359 return true;
1362 static struct intel_shared_dpll *
1363 skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1364 struct intel_encoder *encoder)
1366 struct intel_shared_dpll *pll;
1367 int clock = crtc_state->port_clock;
1368 bool bret;
1369 struct intel_dpll_hw_state dpll_hw_state;
1371 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
1373 if (encoder->type == INTEL_OUTPUT_HDMI) {
1374 bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
1375 if (!bret) {
1376 DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1377 return NULL;
1379 } else if (encoder->type == INTEL_OUTPUT_DP ||
1380 encoder->type == INTEL_OUTPUT_DP_MST ||
1381 encoder->type == INTEL_OUTPUT_EDP) {
1382 bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
1383 if (!bret) {
1384 DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1385 return NULL;
1387 crtc_state->dpll_hw_state = dpll_hw_state;
1388 } else {
1389 return NULL;
1392 if (encoder->type == INTEL_OUTPUT_EDP)
1393 pll = intel_find_shared_dpll(crtc, crtc_state,
1394 DPLL_ID_SKL_DPLL0,
1395 DPLL_ID_SKL_DPLL0);
1396 else
1397 pll = intel_find_shared_dpll(crtc, crtc_state,
1398 DPLL_ID_SKL_DPLL1,
1399 DPLL_ID_SKL_DPLL3);
1400 if (!pll)
1401 return NULL;
1403 intel_reference_shared_dpll(pll, crtc_state);
1405 return pll;
1408 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1409 struct intel_dpll_hw_state *hw_state)
1411 DRM_DEBUG_KMS("dpll_hw_state: "
1412 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1413 hw_state->ctrl1,
1414 hw_state->cfgcr1,
1415 hw_state->cfgcr2);
1418 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1419 .enable = skl_ddi_pll_enable,
1420 .disable = skl_ddi_pll_disable,
1421 .get_hw_state = skl_ddi_pll_get_hw_state,
1424 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1425 .enable = skl_ddi_dpll0_enable,
1426 .disable = skl_ddi_dpll0_disable,
1427 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1430 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1431 struct intel_shared_dpll *pll)
1433 uint32_t temp;
1434 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
1435 enum dpio_phy phy;
1436 enum dpio_channel ch;
1438 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1440 /* Non-SSC reference */
1441 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1442 temp |= PORT_PLL_REF_SEL;
1443 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1445 if (IS_GEMINILAKE(dev_priv)) {
1446 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1447 temp |= PORT_PLL_POWER_ENABLE;
1448 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1450 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1451 PORT_PLL_POWER_STATE), 200))
1452 DRM_ERROR("Power state not set for PLL:%d\n", port);
1455 /* Disable 10 bit clock */
1456 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1457 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1458 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1460 /* Write P1 & P2 */
1461 temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1462 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1463 temp |= pll->state.hw_state.ebb0;
1464 I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1466 /* Write M2 integer */
1467 temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1468 temp &= ~PORT_PLL_M2_MASK;
1469 temp |= pll->state.hw_state.pll0;
1470 I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1472 /* Write N */
1473 temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1474 temp &= ~PORT_PLL_N_MASK;
1475 temp |= pll->state.hw_state.pll1;
1476 I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1478 /* Write M2 fraction */
1479 temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1480 temp &= ~PORT_PLL_M2_FRAC_MASK;
1481 temp |= pll->state.hw_state.pll2;
1482 I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1484 /* Write M2 fraction enable */
1485 temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1486 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1487 temp |= pll->state.hw_state.pll3;
1488 I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1490 /* Write coeff */
1491 temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1492 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1493 temp &= ~PORT_PLL_INT_COEFF_MASK;
1494 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1495 temp |= pll->state.hw_state.pll6;
1496 I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1498 /* Write calibration val */
1499 temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1500 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1501 temp |= pll->state.hw_state.pll8;
1502 I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1504 temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1505 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1506 temp |= pll->state.hw_state.pll9;
1507 I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1509 temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1510 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1511 temp &= ~PORT_PLL_DCO_AMP_MASK;
1512 temp |= pll->state.hw_state.pll10;
1513 I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1515 /* Recalibrate with new settings */
1516 temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1517 temp |= PORT_PLL_RECALIBRATE;
1518 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1519 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1520 temp |= pll->state.hw_state.ebb4;
1521 I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1523 /* Enable PLL */
1524 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1525 temp |= PORT_PLL_ENABLE;
1526 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1527 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1529 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1530 200))
1531 DRM_ERROR("PLL %d not locked\n", port);
1533 if (IS_GEMINILAKE(dev_priv)) {
1534 temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1535 temp |= DCC_DELAY_RANGE_2;
1536 I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1540 * While we write to the group register to program all lanes at once we
1541 * can read only lane registers and we pick lanes 0/1 for that.
1543 temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1544 temp &= ~LANE_STAGGER_MASK;
1545 temp &= ~LANESTAGGER_STRAP_OVRD;
1546 temp |= pll->state.hw_state.pcsdw12;
1547 I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1550 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1551 struct intel_shared_dpll *pll)
1553 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
1554 uint32_t temp;
1556 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1557 temp &= ~PORT_PLL_ENABLE;
1558 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1559 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1561 if (IS_GEMINILAKE(dev_priv)) {
1562 temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1563 temp &= ~PORT_PLL_POWER_ENABLE;
1564 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1566 if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1567 PORT_PLL_POWER_STATE), 200))
1568 DRM_ERROR("Power state not reset for PLL:%d\n", port);
1572 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1573 struct intel_shared_dpll *pll,
1574 struct intel_dpll_hw_state *hw_state)
1576 enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
1577 uint32_t val;
1578 bool ret;
1579 enum dpio_phy phy;
1580 enum dpio_channel ch;
1582 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1584 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
1585 return false;
1587 ret = false;
1589 val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1590 if (!(val & PORT_PLL_ENABLE))
1591 goto out;
1593 hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1594 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1596 hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1597 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1599 hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1600 hw_state->pll0 &= PORT_PLL_M2_MASK;
1602 hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1603 hw_state->pll1 &= PORT_PLL_N_MASK;
1605 hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1606 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1608 hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1609 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1611 hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1612 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1613 PORT_PLL_INT_COEFF_MASK |
1614 PORT_PLL_GAIN_CTL_MASK;
1616 hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1617 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1619 hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1620 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1622 hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1623 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1624 PORT_PLL_DCO_AMP_MASK;
1627 * While we write to the group register to program all lanes at once we
1628 * can read only lane registers. We configure all lanes the same way, so
1629 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1631 hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1632 if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1633 DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1634 hw_state->pcsdw12,
1635 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1636 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1638 ret = true;
1640 out:
1641 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1643 return ret;
1646 /* bxt clock parameters */
1647 struct bxt_clk_div {
1648 int clock;
1649 uint32_t p1;
1650 uint32_t p2;
1651 uint32_t m2_int;
1652 uint32_t m2_frac;
1653 bool m2_frac_en;
1654 uint32_t n;
1656 int vco;
1659 /* pre-calculated values for DP linkrates */
1660 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1661 {162000, 4, 2, 32, 1677722, 1, 1},
1662 {270000, 4, 1, 27, 0, 0, 1},
1663 {540000, 2, 1, 27, 0, 0, 1},
1664 {216000, 3, 2, 32, 1677722, 1, 1},
1665 {243000, 4, 1, 24, 1258291, 1, 1},
1666 {324000, 4, 1, 32, 1677722, 1, 1},
1667 {432000, 3, 1, 32, 1677722, 1, 1}
1670 static bool
1671 bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
1672 struct intel_crtc_state *crtc_state, int clock,
1673 struct bxt_clk_div *clk_div)
1675 struct dpll best_clock;
1677 /* Calculate HDMI div */
1679 * FIXME: tie the following calculation into
1680 * i9xx_crtc_compute_clock
1682 if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
1683 DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1684 clock, pipe_name(intel_crtc->pipe));
1685 return false;
1688 clk_div->p1 = best_clock.p1;
1689 clk_div->p2 = best_clock.p2;
1690 WARN_ON(best_clock.m1 != 2);
1691 clk_div->n = best_clock.n;
1692 clk_div->m2_int = best_clock.m2 >> 22;
1693 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1694 clk_div->m2_frac_en = clk_div->m2_frac != 0;
1696 clk_div->vco = best_clock.vco;
1698 return true;
1701 static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
1703 int i;
1705 *clk_div = bxt_dp_clk_val[0];
1706 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1707 if (bxt_dp_clk_val[i].clock == clock) {
1708 *clk_div = bxt_dp_clk_val[i];
1709 break;
1713 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1716 static bool bxt_ddi_set_dpll_hw_state(int clock,
1717 struct bxt_clk_div *clk_div,
1718 struct intel_dpll_hw_state *dpll_hw_state)
1720 int vco = clk_div->vco;
1721 uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
1722 uint32_t lanestagger;
1724 if (vco >= 6200000 && vco <= 6700000) {
1725 prop_coef = 4;
1726 int_coef = 9;
1727 gain_ctl = 3;
1728 targ_cnt = 8;
1729 } else if ((vco > 5400000 && vco < 6200000) ||
1730 (vco >= 4800000 && vco < 5400000)) {
1731 prop_coef = 5;
1732 int_coef = 11;
1733 gain_ctl = 3;
1734 targ_cnt = 9;
1735 } else if (vco == 5400000) {
1736 prop_coef = 3;
1737 int_coef = 8;
1738 gain_ctl = 1;
1739 targ_cnt = 9;
1740 } else {
1741 DRM_ERROR("Invalid VCO\n");
1742 return false;
1745 if (clock > 270000)
1746 lanestagger = 0x18;
1747 else if (clock > 135000)
1748 lanestagger = 0x0d;
1749 else if (clock > 67000)
1750 lanestagger = 0x07;
1751 else if (clock > 33000)
1752 lanestagger = 0x04;
1753 else
1754 lanestagger = 0x02;
1756 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1757 dpll_hw_state->pll0 = clk_div->m2_int;
1758 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1759 dpll_hw_state->pll2 = clk_div->m2_frac;
1761 if (clk_div->m2_frac_en)
1762 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1764 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1765 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1767 dpll_hw_state->pll8 = targ_cnt;
1769 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1771 dpll_hw_state->pll10 =
1772 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1773 | PORT_PLL_DCO_AMP_OVR_EN_H;
1775 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1777 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1779 return true;
1782 static bool
1783 bxt_ddi_dp_set_dpll_hw_state(int clock,
1784 struct intel_dpll_hw_state *dpll_hw_state)
1786 struct bxt_clk_div clk_div = {0};
1788 bxt_ddi_dp_pll_dividers(clock, &clk_div);
1790 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1793 static bool
1794 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
1795 struct intel_crtc_state *crtc_state, int clock,
1796 struct intel_dpll_hw_state *dpll_hw_state)
1798 struct bxt_clk_div clk_div = { };
1800 bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
1802 return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
1805 static struct intel_shared_dpll *
1806 bxt_get_dpll(struct intel_crtc *crtc,
1807 struct intel_crtc_state *crtc_state,
1808 struct intel_encoder *encoder)
1810 struct intel_dpll_hw_state dpll_hw_state = { };
1811 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812 struct intel_digital_port *intel_dig_port;
1813 struct intel_shared_dpll *pll;
1814 int i, clock = crtc_state->port_clock;
1816 if (encoder->type == INTEL_OUTPUT_HDMI &&
1817 !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
1818 &dpll_hw_state))
1819 return NULL;
1821 if ((encoder->type == INTEL_OUTPUT_DP ||
1822 encoder->type == INTEL_OUTPUT_EDP ||
1823 encoder->type == INTEL_OUTPUT_DP_MST) &&
1824 !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
1825 return NULL;
1827 memset(&crtc_state->dpll_hw_state, 0,
1828 sizeof(crtc_state->dpll_hw_state));
1830 crtc_state->dpll_hw_state = dpll_hw_state;
1832 if (encoder->type == INTEL_OUTPUT_DP_MST) {
1833 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
1835 intel_dig_port = intel_mst->primary;
1836 } else
1837 intel_dig_port = enc_to_dig_port(&encoder->base);
1839 /* 1:1 mapping between ports and PLLs */
1840 i = (enum intel_dpll_id) intel_dig_port->port;
1841 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1843 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1844 crtc->base.base.id, crtc->base.name, pll->name);
1846 intel_reference_shared_dpll(pll, crtc_state);
1848 return pll;
1851 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1852 struct intel_dpll_hw_state *hw_state)
1854 DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1855 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1856 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1857 hw_state->ebb0,
1858 hw_state->ebb4,
1859 hw_state->pll0,
1860 hw_state->pll1,
1861 hw_state->pll2,
1862 hw_state->pll3,
1863 hw_state->pll6,
1864 hw_state->pll8,
1865 hw_state->pll9,
1866 hw_state->pll10,
1867 hw_state->pcsdw12);
1870 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1871 .enable = bxt_ddi_pll_enable,
1872 .disable = bxt_ddi_pll_disable,
1873 .get_hw_state = bxt_ddi_pll_get_hw_state,
1876 static void intel_ddi_pll_init(struct drm_device *dev)
1878 struct drm_i915_private *dev_priv = to_i915(dev);
1880 if (INTEL_GEN(dev_priv) < 9) {
1881 uint32_t val = I915_READ(LCPLL_CTL);
1884 * The LCPLL register should be turned on by the BIOS. For now
1885 * let's just check its state and print errors in case
1886 * something is wrong. Don't even try to turn it on.
1889 if (val & LCPLL_CD_SOURCE_FCLK)
1890 DRM_ERROR("CDCLK source is not LCPLL\n");
1892 if (val & LCPLL_PLL_DISABLE)
1893 DRM_ERROR("LCPLL is disabled\n");
1897 struct dpll_info {
1898 const char *name;
1899 const int id;
1900 const struct intel_shared_dpll_funcs *funcs;
1901 uint32_t flags;
1904 struct intel_dpll_mgr {
1905 const struct dpll_info *dpll_info;
1907 struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
1908 struct intel_crtc_state *crtc_state,
1909 struct intel_encoder *encoder);
1911 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1912 struct intel_dpll_hw_state *hw_state);
1915 static const struct dpll_info pch_plls[] = {
1916 { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
1917 { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
1918 { NULL, -1, NULL, 0 },
1921 static const struct intel_dpll_mgr pch_pll_mgr = {
1922 .dpll_info = pch_plls,
1923 .get_dpll = ibx_get_dpll,
1924 .dump_hw_state = ibx_dump_hw_state,
1927 static const struct dpll_info hsw_plls[] = {
1928 { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
1929 { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
1930 { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
1931 { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1932 { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1933 { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
1934 { NULL, -1, NULL, },
1937 static const struct intel_dpll_mgr hsw_pll_mgr = {
1938 .dpll_info = hsw_plls,
1939 .get_dpll = hsw_get_dpll,
1940 .dump_hw_state = hsw_dump_hw_state,
1943 static const struct dpll_info skl_plls[] = {
1944 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
1945 { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
1946 { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
1947 { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
1948 { NULL, -1, NULL, },
1951 static const struct intel_dpll_mgr skl_pll_mgr = {
1952 .dpll_info = skl_plls,
1953 .get_dpll = skl_get_dpll,
1954 .dump_hw_state = skl_dump_hw_state,
1957 static const struct dpll_info bxt_plls[] = {
1958 { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
1959 { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
1960 { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
1961 { NULL, -1, NULL, },
1964 static const struct intel_dpll_mgr bxt_pll_mgr = {
1965 .dpll_info = bxt_plls,
1966 .get_dpll = bxt_get_dpll,
1967 .dump_hw_state = bxt_dump_hw_state,
1971 * intel_shared_dpll_init - Initialize shared DPLLs
1972 * @dev: drm device
1974 * Initialize shared DPLLs for @dev.
1976 void intel_shared_dpll_init(struct drm_device *dev)
1978 struct drm_i915_private *dev_priv = to_i915(dev);
1979 const struct intel_dpll_mgr *dpll_mgr = NULL;
1980 const struct dpll_info *dpll_info;
1981 int i;
1983 if (IS_GEN9_BC(dev_priv))
1984 dpll_mgr = &skl_pll_mgr;
1985 else if (IS_GEN9_LP(dev_priv))
1986 dpll_mgr = &bxt_pll_mgr;
1987 else if (HAS_DDI(dev_priv))
1988 dpll_mgr = &hsw_pll_mgr;
1989 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
1990 dpll_mgr = &pch_pll_mgr;
1992 if (!dpll_mgr) {
1993 dev_priv->num_shared_dpll = 0;
1994 return;
1997 dpll_info = dpll_mgr->dpll_info;
1999 for (i = 0; dpll_info[i].id >= 0; i++) {
2000 WARN_ON(i != dpll_info[i].id);
2002 dev_priv->shared_dplls[i].id = dpll_info[i].id;
2003 dev_priv->shared_dplls[i].name = dpll_info[i].name;
2004 dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
2005 dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
2008 dev_priv->dpll_mgr = dpll_mgr;
2009 dev_priv->num_shared_dpll = i;
2010 mutex_init(&dev_priv->dpll_lock);
2012 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
2014 /* FIXME: Move this to a more suitable place */
2015 if (HAS_DDI(dev_priv))
2016 intel_ddi_pll_init(dev);
2020 * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
2021 * @crtc: CRTC
2022 * @crtc_state: atomic state for @crtc
2023 * @encoder: encoder
2025 * Find an appropriate DPLL for the given CRTC and encoder combination. A
2026 * reference from the @crtc to the returned pll is registered in the atomic
2027 * state. That configuration is made effective by calling
2028 * intel_shared_dpll_swap_state(). The reference should be released by calling
2029 * intel_release_shared_dpll().
2031 * Returns:
2032 * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
2034 struct intel_shared_dpll *
2035 intel_get_shared_dpll(struct intel_crtc *crtc,
2036 struct intel_crtc_state *crtc_state,
2037 struct intel_encoder *encoder)
2039 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2040 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
2042 if (WARN_ON(!dpll_mgr))
2043 return NULL;
2045 return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
2049 * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
2050 * @dpll: dpll in use by @crtc
2051 * @crtc: crtc
2052 * @state: atomic state
2054 * This function releases the reference from @crtc to @dpll from the
2055 * atomic @state. The new configuration is made effective by calling
2056 * intel_shared_dpll_swap_state().
2058 void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
2059 struct intel_crtc *crtc,
2060 struct drm_atomic_state *state)
2062 struct intel_shared_dpll_state *shared_dpll_state;
2064 shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
2065 shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
2069 * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
2070 * @dev_priv: i915 drm device
2071 * @hw_state: hw state to be written to the log
2073 * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
2075 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
2076 struct intel_dpll_hw_state *hw_state)
2078 if (dev_priv->dpll_mgr) {
2079 dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
2080 } else {
2081 /* fallback for platforms that don't use the shared dpll
2082 * infrastructure
2084 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
2085 "fp0: 0x%x, fp1: 0x%x\n",
2086 hw_state->dpll,
2087 hw_state->dpll_md,
2088 hw_state->fp0,
2089 hw_state->fp1);