WIP FPC-III support
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / display / intel_dpll_mgr.c
blobf6ad257a260e48b8ec01362dd6b5974269a8e258
1 /*
2 * Copyright © 2006-2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 #include "intel_display_types.h"
25 #include "intel_dpio_phy.h"
26 #include "intel_dpll_mgr.h"
28 /**
29 * DOC: Display PLLs
31 * Display PLLs used for driving outputs vary by platform. While some have
32 * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
33 * from a pool. In the latter scenario, it is possible that multiple pipes
34 * share a PLL if their configurations match.
36 * This file provides an abstraction over display PLLs. The function
37 * intel_shared_dpll_init() initializes the PLLs for the given platform. The
38 * users of a PLL are tracked and that tracking is integrated with the atomic
39 * modset interface. During an atomic operation, required PLLs can be reserved
40 * for a given CRTC and encoder configuration by calling
41 * intel_reserve_shared_dplls() and previously reserved PLLs can be released
42 * with intel_release_shared_dplls().
43 * Changes to the users are first staged in the atomic state, and then made
44 * effective by calling intel_shared_dpll_swap_state() during the atomic
45 * commit phase.
48 struct intel_dpll_mgr {
49 const struct dpll_info *dpll_info;
51 bool (*get_dplls)(struct intel_atomic_state *state,
52 struct intel_crtc *crtc,
53 struct intel_encoder *encoder);
54 void (*put_dplls)(struct intel_atomic_state *state,
55 struct intel_crtc *crtc);
56 void (*update_active_dpll)(struct intel_atomic_state *state,
57 struct intel_crtc *crtc,
58 struct intel_encoder *encoder);
59 void (*update_ref_clks)(struct drm_i915_private *i915);
60 void (*dump_hw_state)(struct drm_i915_private *dev_priv,
61 const struct intel_dpll_hw_state *hw_state);
64 static void
65 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
66 struct intel_shared_dpll_state *shared_dpll)
68 enum intel_dpll_id i;
70 /* Copy shared dpll state */
71 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
72 struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
74 shared_dpll[i] = pll->state;
78 static struct intel_shared_dpll_state *
79 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
81 struct intel_atomic_state *state = to_intel_atomic_state(s);
83 drm_WARN_ON(s->dev, !drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
85 if (!state->dpll_set) {
86 state->dpll_set = true;
88 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
89 state->shared_dpll);
92 return state->shared_dpll;
95 /**
96 * intel_get_shared_dpll_by_id - get a DPLL given its id
97 * @dev_priv: i915 device instance
98 * @id: pll id
100 * Returns:
101 * A pointer to the DPLL with @id
103 struct intel_shared_dpll *
104 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
105 enum intel_dpll_id id)
107 return &dev_priv->dpll.shared_dplls[id];
111 * intel_get_shared_dpll_id - get the id of a DPLL
112 * @dev_priv: i915 device instance
113 * @pll: the DPLL
115 * Returns:
116 * The id of @pll
118 enum intel_dpll_id
119 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
120 struct intel_shared_dpll *pll)
122 long pll_idx = pll - dev_priv->dpll.shared_dplls;
124 if (drm_WARN_ON(&dev_priv->drm,
125 pll_idx < 0 ||
126 pll_idx >= dev_priv->dpll.num_shared_dpll))
127 return -1;
129 return pll_idx;
132 /* For ILK+ */
133 void assert_shared_dpll(struct drm_i915_private *dev_priv,
134 struct intel_shared_dpll *pll,
135 bool state)
137 bool cur_state;
138 struct intel_dpll_hw_state hw_state;
140 if (drm_WARN(&dev_priv->drm, !pll,
141 "asserting DPLL %s with no DPLL\n", onoff(state)))
142 return;
144 cur_state = intel_dpll_get_hw_state(dev_priv, pll, &hw_state);
145 I915_STATE_WARN(cur_state != state,
146 "%s assertion failure (expected %s, current %s)\n",
147 pll->info->name, onoff(state), onoff(cur_state));
150 static i915_reg_t
151 intel_combo_pll_enable_reg(struct drm_i915_private *i915,
152 struct intel_shared_dpll *pll)
154 if (IS_DG1(i915))
155 return DG1_DPLL_ENABLE(pll->info->id);
156 else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
157 return MG_PLL_ENABLE(0);
159 return CNL_DPLL_ENABLE(pll->info->id);
163 * intel_prepare_shared_dpll - call a dpll's prepare hook
164 * @crtc_state: CRTC, and its state, which has a shared dpll
166 * This calls the PLL's prepare hook if it has one and if the PLL is not
167 * already enabled. The prepare hook is platform specific.
169 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
171 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
172 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
173 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
175 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
176 return;
178 mutex_lock(&dev_priv->dpll.lock);
179 drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
180 if (!pll->active_mask) {
181 drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
182 drm_WARN_ON(&dev_priv->drm, pll->on);
183 assert_shared_dpll_disabled(dev_priv, pll);
185 pll->info->funcs->prepare(dev_priv, pll);
187 mutex_unlock(&dev_priv->dpll.lock);
191 * intel_enable_shared_dpll - enable a CRTC's shared DPLL
192 * @crtc_state: CRTC, and its state, which has a shared DPLL
194 * Enable the shared DPLL used by @crtc.
196 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
198 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
199 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
200 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
201 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
202 unsigned int old_mask;
204 if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
205 return;
207 mutex_lock(&dev_priv->dpll.lock);
208 old_mask = pll->active_mask;
210 if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
211 drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
212 goto out;
214 pll->active_mask |= crtc_mask;
216 drm_dbg_kms(&dev_priv->drm,
217 "enable %s (active %x, on? %d) for crtc %d\n",
218 pll->info->name, pll->active_mask, pll->on,
219 crtc->base.base.id);
221 if (old_mask) {
222 drm_WARN_ON(&dev_priv->drm, !pll->on);
223 assert_shared_dpll_enabled(dev_priv, pll);
224 goto out;
226 drm_WARN_ON(&dev_priv->drm, pll->on);
228 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
229 pll->info->funcs->enable(dev_priv, pll);
230 pll->on = true;
232 out:
233 mutex_unlock(&dev_priv->dpll.lock);
237 * intel_disable_shared_dpll - disable a CRTC's shared DPLL
238 * @crtc_state: CRTC, and its state, which has a shared DPLL
240 * Disable the shared DPLL used by @crtc.
242 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
244 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
245 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
246 struct intel_shared_dpll *pll = crtc_state->shared_dpll;
247 unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
249 /* PCH only available on ILK+ */
250 if (INTEL_GEN(dev_priv) < 5)
251 return;
253 if (pll == NULL)
254 return;
256 mutex_lock(&dev_priv->dpll.lock);
257 if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
258 goto out;
260 drm_dbg_kms(&dev_priv->drm,
261 "disable %s (active %x, on? %d) for crtc %d\n",
262 pll->info->name, pll->active_mask, pll->on,
263 crtc->base.base.id);
265 assert_shared_dpll_enabled(dev_priv, pll);
266 drm_WARN_ON(&dev_priv->drm, !pll->on);
268 pll->active_mask &= ~crtc_mask;
269 if (pll->active_mask)
270 goto out;
272 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
273 pll->info->funcs->disable(dev_priv, pll);
274 pll->on = false;
276 out:
277 mutex_unlock(&dev_priv->dpll.lock);
280 static struct intel_shared_dpll *
281 intel_find_shared_dpll(struct intel_atomic_state *state,
282 const struct intel_crtc *crtc,
283 const struct intel_dpll_hw_state *pll_state,
284 unsigned long dpll_mask)
286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
287 struct intel_shared_dpll *pll, *unused_pll = NULL;
288 struct intel_shared_dpll_state *shared_dpll;
289 enum intel_dpll_id i;
291 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
293 drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
295 for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
296 pll = &dev_priv->dpll.shared_dplls[i];
298 /* Only want to check enabled timings first */
299 if (shared_dpll[i].crtc_mask == 0) {
300 if (!unused_pll)
301 unused_pll = pll;
302 continue;
305 if (memcmp(pll_state,
306 &shared_dpll[i].hw_state,
307 sizeof(*pll_state)) == 0) {
308 drm_dbg_kms(&dev_priv->drm,
309 "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
310 crtc->base.base.id, crtc->base.name,
311 pll->info->name,
312 shared_dpll[i].crtc_mask,
313 pll->active_mask);
314 return pll;
318 /* Ok no matching timings, maybe there's a free one? */
319 if (unused_pll) {
320 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
321 crtc->base.base.id, crtc->base.name,
322 unused_pll->info->name);
323 return unused_pll;
326 return NULL;
329 static void
330 intel_reference_shared_dpll(struct intel_atomic_state *state,
331 const struct intel_crtc *crtc,
332 const struct intel_shared_dpll *pll,
333 const struct intel_dpll_hw_state *pll_state)
335 struct drm_i915_private *i915 = to_i915(state->base.dev);
336 struct intel_shared_dpll_state *shared_dpll;
337 const enum intel_dpll_id id = pll->info->id;
339 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
341 if (shared_dpll[id].crtc_mask == 0)
342 shared_dpll[id].hw_state = *pll_state;
344 drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
345 pipe_name(crtc->pipe));
347 shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
350 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
351 const struct intel_crtc *crtc,
352 const struct intel_shared_dpll *pll)
354 struct intel_shared_dpll_state *shared_dpll;
356 shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
357 shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
360 static void intel_put_dpll(struct intel_atomic_state *state,
361 struct intel_crtc *crtc)
363 const struct intel_crtc_state *old_crtc_state =
364 intel_atomic_get_old_crtc_state(state, crtc);
365 struct intel_crtc_state *new_crtc_state =
366 intel_atomic_get_new_crtc_state(state, crtc);
368 new_crtc_state->shared_dpll = NULL;
370 if (!old_crtc_state->shared_dpll)
371 return;
373 intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
377 * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
378 * @state: atomic state
380 * This is the dpll version of drm_atomic_helper_swap_state() since the
381 * helper does not handle driver-specific global state.
383 * For consistency with atomic helpers this function does a complete swap,
384 * i.e. it also puts the current state into @state, even though there is no
385 * need for that at this moment.
387 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
389 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
390 struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
391 enum intel_dpll_id i;
393 if (!state->dpll_set)
394 return;
396 for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
397 struct intel_shared_dpll *pll =
398 &dev_priv->dpll.shared_dplls[i];
400 swap(pll->state, shared_dpll[i]);
404 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
405 struct intel_shared_dpll *pll,
406 struct intel_dpll_hw_state *hw_state)
408 const enum intel_dpll_id id = pll->info->id;
409 intel_wakeref_t wakeref;
410 u32 val;
412 wakeref = intel_display_power_get_if_enabled(dev_priv,
413 POWER_DOMAIN_DISPLAY_CORE);
414 if (!wakeref)
415 return false;
417 val = intel_de_read(dev_priv, PCH_DPLL(id));
418 hw_state->dpll = val;
419 hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
420 hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
422 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
424 return val & DPLL_VCO_ENABLE;
427 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
428 struct intel_shared_dpll *pll)
430 const enum intel_dpll_id id = pll->info->id;
432 intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
433 intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
436 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
438 u32 val;
439 bool enabled;
441 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
443 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
444 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
445 DREF_SUPERSPREAD_SOURCE_MASK));
446 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
449 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
450 struct intel_shared_dpll *pll)
452 const enum intel_dpll_id id = pll->info->id;
454 /* PCH refclock must be enabled first */
455 ibx_assert_pch_refclk_enabled(dev_priv);
457 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
459 /* Wait for the clocks to stabilize. */
460 intel_de_posting_read(dev_priv, PCH_DPLL(id));
461 udelay(150);
463 /* The pixel multiplier can only be updated once the
464 * DPLL is enabled and the clocks are stable.
466 * So write it again.
468 intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
469 intel_de_posting_read(dev_priv, PCH_DPLL(id));
470 udelay(200);
473 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
474 struct intel_shared_dpll *pll)
476 const enum intel_dpll_id id = pll->info->id;
478 intel_de_write(dev_priv, PCH_DPLL(id), 0);
479 intel_de_posting_read(dev_priv, PCH_DPLL(id));
480 udelay(200);
483 static bool ibx_get_dpll(struct intel_atomic_state *state,
484 struct intel_crtc *crtc,
485 struct intel_encoder *encoder)
487 struct intel_crtc_state *crtc_state =
488 intel_atomic_get_new_crtc_state(state, crtc);
489 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
490 struct intel_shared_dpll *pll;
491 enum intel_dpll_id i;
493 if (HAS_PCH_IBX(dev_priv)) {
494 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
495 i = (enum intel_dpll_id) crtc->pipe;
496 pll = &dev_priv->dpll.shared_dplls[i];
498 drm_dbg_kms(&dev_priv->drm,
499 "[CRTC:%d:%s] using pre-allocated %s\n",
500 crtc->base.base.id, crtc->base.name,
501 pll->info->name);
502 } else {
503 pll = intel_find_shared_dpll(state, crtc,
504 &crtc_state->dpll_hw_state,
505 BIT(DPLL_ID_PCH_PLL_B) |
506 BIT(DPLL_ID_PCH_PLL_A));
509 if (!pll)
510 return false;
512 /* reference the pll */
513 intel_reference_shared_dpll(state, crtc,
514 pll, &crtc_state->dpll_hw_state);
516 crtc_state->shared_dpll = pll;
518 return true;
521 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
522 const struct intel_dpll_hw_state *hw_state)
524 drm_dbg_kms(&dev_priv->drm,
525 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
526 "fp0: 0x%x, fp1: 0x%x\n",
527 hw_state->dpll,
528 hw_state->dpll_md,
529 hw_state->fp0,
530 hw_state->fp1);
533 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
534 .prepare = ibx_pch_dpll_prepare,
535 .enable = ibx_pch_dpll_enable,
536 .disable = ibx_pch_dpll_disable,
537 .get_hw_state = ibx_pch_dpll_get_hw_state,
540 static const struct dpll_info pch_plls[] = {
541 { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
542 { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
543 { },
546 static const struct intel_dpll_mgr pch_pll_mgr = {
547 .dpll_info = pch_plls,
548 .get_dplls = ibx_get_dpll,
549 .put_dplls = intel_put_dpll,
550 .dump_hw_state = ibx_dump_hw_state,
553 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
554 struct intel_shared_dpll *pll)
556 const enum intel_dpll_id id = pll->info->id;
558 intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
559 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
560 udelay(20);
563 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
564 struct intel_shared_dpll *pll)
566 intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
567 intel_de_posting_read(dev_priv, SPLL_CTL);
568 udelay(20);
571 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
572 struct intel_shared_dpll *pll)
574 const enum intel_dpll_id id = pll->info->id;
575 u32 val;
577 val = intel_de_read(dev_priv, WRPLL_CTL(id));
578 intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
579 intel_de_posting_read(dev_priv, WRPLL_CTL(id));
582 * Try to set up the PCH reference clock once all DPLLs
583 * that depend on it have been shut down.
585 if (dev_priv->pch_ssc_use & BIT(id))
586 intel_init_pch_refclk(dev_priv);
589 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
590 struct intel_shared_dpll *pll)
592 enum intel_dpll_id id = pll->info->id;
593 u32 val;
595 val = intel_de_read(dev_priv, SPLL_CTL);
596 intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
597 intel_de_posting_read(dev_priv, SPLL_CTL);
600 * Try to set up the PCH reference clock once all DPLLs
601 * that depend on it have been shut down.
603 if (dev_priv->pch_ssc_use & BIT(id))
604 intel_init_pch_refclk(dev_priv);
607 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
608 struct intel_shared_dpll *pll,
609 struct intel_dpll_hw_state *hw_state)
611 const enum intel_dpll_id id = pll->info->id;
612 intel_wakeref_t wakeref;
613 u32 val;
615 wakeref = intel_display_power_get_if_enabled(dev_priv,
616 POWER_DOMAIN_DISPLAY_CORE);
617 if (!wakeref)
618 return false;
620 val = intel_de_read(dev_priv, WRPLL_CTL(id));
621 hw_state->wrpll = val;
623 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
625 return val & WRPLL_PLL_ENABLE;
628 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
629 struct intel_shared_dpll *pll,
630 struct intel_dpll_hw_state *hw_state)
632 intel_wakeref_t wakeref;
633 u32 val;
635 wakeref = intel_display_power_get_if_enabled(dev_priv,
636 POWER_DOMAIN_DISPLAY_CORE);
637 if (!wakeref)
638 return false;
640 val = intel_de_read(dev_priv, SPLL_CTL);
641 hw_state->spll = val;
643 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
645 return val & SPLL_PLL_ENABLE;
648 #define LC_FREQ 2700
649 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
651 #define P_MIN 2
652 #define P_MAX 64
653 #define P_INC 2
655 /* Constraints for PLL good behavior */
656 #define REF_MIN 48
657 #define REF_MAX 400
658 #define VCO_MIN 2400
659 #define VCO_MAX 4800
661 struct hsw_wrpll_rnp {
662 unsigned p, n2, r2;
665 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
667 unsigned budget;
669 switch (clock) {
670 case 25175000:
671 case 25200000:
672 case 27000000:
673 case 27027000:
674 case 37762500:
675 case 37800000:
676 case 40500000:
677 case 40541000:
678 case 54000000:
679 case 54054000:
680 case 59341000:
681 case 59400000:
682 case 72000000:
683 case 74176000:
684 case 74250000:
685 case 81000000:
686 case 81081000:
687 case 89012000:
688 case 89100000:
689 case 108000000:
690 case 108108000:
691 case 111264000:
692 case 111375000:
693 case 148352000:
694 case 148500000:
695 case 162000000:
696 case 162162000:
697 case 222525000:
698 case 222750000:
699 case 296703000:
700 case 297000000:
701 budget = 0;
702 break;
703 case 233500000:
704 case 245250000:
705 case 247750000:
706 case 253250000:
707 case 298000000:
708 budget = 1500;
709 break;
710 case 169128000:
711 case 169500000:
712 case 179500000:
713 case 202000000:
714 budget = 2000;
715 break;
716 case 256250000:
717 case 262500000:
718 case 270000000:
719 case 272500000:
720 case 273750000:
721 case 280750000:
722 case 281250000:
723 case 286000000:
724 case 291750000:
725 budget = 4000;
726 break;
727 case 267250000:
728 case 268500000:
729 budget = 5000;
730 break;
731 default:
732 budget = 1000;
733 break;
736 return budget;
739 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
740 unsigned int r2, unsigned int n2,
741 unsigned int p,
742 struct hsw_wrpll_rnp *best)
744 u64 a, b, c, d, diff, diff_best;
746 /* No best (r,n,p) yet */
747 if (best->p == 0) {
748 best->p = p;
749 best->n2 = n2;
750 best->r2 = r2;
751 return;
755 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
756 * freq2k.
758 * delta = 1e6 *
759 * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
760 * freq2k;
762 * and we would like delta <= budget.
764 * If the discrepancy is above the PPM-based budget, always prefer to
765 * improve upon the previous solution. However, if you're within the
766 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
768 a = freq2k * budget * p * r2;
769 b = freq2k * budget * best->p * best->r2;
770 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
771 diff_best = abs_diff(freq2k * best->p * best->r2,
772 LC_FREQ_2K * best->n2);
773 c = 1000000 * diff;
774 d = 1000000 * diff_best;
776 if (a < c && b < d) {
777 /* If both are above the budget, pick the closer */
778 if (best->p * best->r2 * diff < p * r2 * diff_best) {
779 best->p = p;
780 best->n2 = n2;
781 best->r2 = r2;
783 } else if (a >= c && b < d) {
784 /* If A is below the threshold but B is above it? Update. */
785 best->p = p;
786 best->n2 = n2;
787 best->r2 = r2;
788 } else if (a >= c && b >= d) {
789 /* Both are below the limit, so pick the higher n2/(r2*r2) */
790 if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
791 best->p = p;
792 best->n2 = n2;
793 best->r2 = r2;
796 /* Otherwise a < c && b >= d, do nothing */
799 static void
800 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
801 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
803 u64 freq2k;
804 unsigned p, n2, r2;
805 struct hsw_wrpll_rnp best = { 0, 0, 0 };
806 unsigned budget;
808 freq2k = clock / 100;
810 budget = hsw_wrpll_get_budget_for_freq(clock);
812 /* Special case handling for 540 pixel clock: bypass WR PLL entirely
813 * and directly pass the LC PLL to it. */
814 if (freq2k == 5400000) {
815 *n2_out = 2;
816 *p_out = 1;
817 *r2_out = 2;
818 return;
822 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
823 * the WR PLL.
825 * We want R so that REF_MIN <= Ref <= REF_MAX.
826 * Injecting R2 = 2 * R gives:
827 * REF_MAX * r2 > LC_FREQ * 2 and
828 * REF_MIN * r2 < LC_FREQ * 2
830 * Which means the desired boundaries for r2 are:
831 * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
834 for (r2 = LC_FREQ * 2 / REF_MAX + 1;
835 r2 <= LC_FREQ * 2 / REF_MIN;
836 r2++) {
839 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
841 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
842 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
843 * VCO_MAX * r2 > n2 * LC_FREQ and
844 * VCO_MIN * r2 < n2 * LC_FREQ)
846 * Which means the desired boundaries for n2 are:
847 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
849 for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
850 n2 <= VCO_MAX * r2 / LC_FREQ;
851 n2++) {
853 for (p = P_MIN; p <= P_MAX; p += P_INC)
854 hsw_wrpll_update_rnp(freq2k, budget,
855 r2, n2, p, &best);
859 *n2_out = best.n2;
860 *p_out = best.p;
861 *r2_out = best.r2;
864 static struct intel_shared_dpll *
865 hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
866 struct intel_crtc *crtc)
868 struct intel_crtc_state *crtc_state =
869 intel_atomic_get_new_crtc_state(state, crtc);
870 struct intel_shared_dpll *pll;
871 u32 val;
872 unsigned int p, n2, r2;
874 hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
876 val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
877 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
878 WRPLL_DIVIDER_POST(p);
880 crtc_state->dpll_hw_state.wrpll = val;
882 pll = intel_find_shared_dpll(state, crtc,
883 &crtc_state->dpll_hw_state,
884 BIT(DPLL_ID_WRPLL2) |
885 BIT(DPLL_ID_WRPLL1));
887 if (!pll)
888 return NULL;
890 return pll;
893 static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
894 const struct intel_shared_dpll *pll,
895 const struct intel_dpll_hw_state *pll_state)
897 int refclk;
898 int n, p, r;
899 u32 wrpll = pll_state->wrpll;
901 switch (wrpll & WRPLL_REF_MASK) {
902 case WRPLL_REF_SPECIAL_HSW:
903 /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
904 if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
905 refclk = dev_priv->dpll.ref_clks.nssc;
906 break;
908 fallthrough;
909 case WRPLL_REF_PCH_SSC:
911 * We could calculate spread here, but our checking
912 * code only cares about 5% accuracy, and spread is a max of
913 * 0.5% downspread.
915 refclk = dev_priv->dpll.ref_clks.ssc;
916 break;
917 case WRPLL_REF_LCPLL:
918 refclk = 2700000;
919 break;
920 default:
921 MISSING_CASE(wrpll);
922 return 0;
925 r = wrpll & WRPLL_DIVIDER_REF_MASK;
926 p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
927 n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
929 /* Convert to KHz, p & r have a fixed point portion */
930 return (refclk * n / 10) / (p * r) * 2;
933 static struct intel_shared_dpll *
934 hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
936 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
937 struct intel_shared_dpll *pll;
938 enum intel_dpll_id pll_id;
939 int clock = crtc_state->port_clock;
941 switch (clock / 2) {
942 case 81000:
943 pll_id = DPLL_ID_LCPLL_810;
944 break;
945 case 135000:
946 pll_id = DPLL_ID_LCPLL_1350;
947 break;
948 case 270000:
949 pll_id = DPLL_ID_LCPLL_2700;
950 break;
951 default:
952 drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
953 clock);
954 return NULL;
957 pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
959 if (!pll)
960 return NULL;
962 return pll;
965 static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
966 const struct intel_shared_dpll *pll,
967 const struct intel_dpll_hw_state *pll_state)
969 int link_clock = 0;
971 switch (pll->info->id) {
972 case DPLL_ID_LCPLL_810:
973 link_clock = 81000;
974 break;
975 case DPLL_ID_LCPLL_1350:
976 link_clock = 135000;
977 break;
978 case DPLL_ID_LCPLL_2700:
979 link_clock = 270000;
980 break;
981 default:
982 drm_WARN(&i915->drm, 1, "bad port clock sel\n");
983 break;
986 return link_clock * 2;
989 static struct intel_shared_dpll *
990 hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
991 struct intel_crtc *crtc)
993 struct intel_crtc_state *crtc_state =
994 intel_atomic_get_new_crtc_state(state, crtc);
996 if (drm_WARN_ON(crtc->base.dev, crtc_state->port_clock / 2 != 135000))
997 return NULL;
999 crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
1000 SPLL_REF_MUXED_SSC;
1002 return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
1003 BIT(DPLL_ID_SPLL));
1006 static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
1007 const struct intel_shared_dpll *pll,
1008 const struct intel_dpll_hw_state *pll_state)
1010 int link_clock = 0;
1012 switch (pll_state->spll & SPLL_FREQ_MASK) {
1013 case SPLL_FREQ_810MHz:
1014 link_clock = 81000;
1015 break;
1016 case SPLL_FREQ_1350MHz:
1017 link_clock = 135000;
1018 break;
1019 case SPLL_FREQ_2700MHz:
1020 link_clock = 270000;
1021 break;
1022 default:
1023 drm_WARN(&i915->drm, 1, "bad spll freq\n");
1024 break;
1027 return link_clock * 2;
1030 static bool hsw_get_dpll(struct intel_atomic_state *state,
1031 struct intel_crtc *crtc,
1032 struct intel_encoder *encoder)
1034 struct intel_crtc_state *crtc_state =
1035 intel_atomic_get_new_crtc_state(state, crtc);
1036 struct intel_shared_dpll *pll;
1038 memset(&crtc_state->dpll_hw_state, 0,
1039 sizeof(crtc_state->dpll_hw_state));
1041 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
1042 pll = hsw_ddi_wrpll_get_dpll(state, crtc);
1043 else if (intel_crtc_has_dp_encoder(crtc_state))
1044 pll = hsw_ddi_lcpll_get_dpll(crtc_state);
1045 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
1046 pll = hsw_ddi_spll_get_dpll(state, crtc);
1047 else
1048 return false;
1050 if (!pll)
1051 return false;
1053 intel_reference_shared_dpll(state, crtc,
1054 pll, &crtc_state->dpll_hw_state);
1056 crtc_state->shared_dpll = pll;
1058 return true;
1061 static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
1063 i915->dpll.ref_clks.ssc = 135000;
1064 /* Non-SSC is only used on non-ULT HSW. */
1065 if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
1066 i915->dpll.ref_clks.nssc = 24000;
1067 else
1068 i915->dpll.ref_clks.nssc = 135000;
1071 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
1072 const struct intel_dpll_hw_state *hw_state)
1074 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
1075 hw_state->wrpll, hw_state->spll);
1078 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
1079 .enable = hsw_ddi_wrpll_enable,
1080 .disable = hsw_ddi_wrpll_disable,
1081 .get_hw_state = hsw_ddi_wrpll_get_hw_state,
1082 .get_freq = hsw_ddi_wrpll_get_freq,
1085 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
1086 .enable = hsw_ddi_spll_enable,
1087 .disable = hsw_ddi_spll_disable,
1088 .get_hw_state = hsw_ddi_spll_get_hw_state,
1089 .get_freq = hsw_ddi_spll_get_freq,
1092 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
1093 struct intel_shared_dpll *pll)
1097 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
1098 struct intel_shared_dpll *pll)
1102 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
1103 struct intel_shared_dpll *pll,
1104 struct intel_dpll_hw_state *hw_state)
1106 return true;
1109 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
1110 .enable = hsw_ddi_lcpll_enable,
1111 .disable = hsw_ddi_lcpll_disable,
1112 .get_hw_state = hsw_ddi_lcpll_get_hw_state,
1113 .get_freq = hsw_ddi_lcpll_get_freq,
1116 static const struct dpll_info hsw_plls[] = {
1117 { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
1118 { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
1119 { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
1120 { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
1121 { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1122 { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1123 { },
1126 static const struct intel_dpll_mgr hsw_pll_mgr = {
1127 .dpll_info = hsw_plls,
1128 .get_dplls = hsw_get_dpll,
1129 .put_dplls = intel_put_dpll,
1130 .update_ref_clks = hsw_update_dpll_ref_clks,
1131 .dump_hw_state = hsw_dump_hw_state,
1134 struct skl_dpll_regs {
1135 i915_reg_t ctl, cfgcr1, cfgcr2;
1138 /* this array is indexed by the *shared* pll id */
1139 static const struct skl_dpll_regs skl_dpll_regs[4] = {
1141 /* DPLL 0 */
1142 .ctl = LCPLL1_CTL,
1143 /* DPLL 0 doesn't support HDMI mode */
1146 /* DPLL 1 */
1147 .ctl = LCPLL2_CTL,
1148 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
1149 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
1152 /* DPLL 2 */
1153 .ctl = WRPLL_CTL(0),
1154 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
1155 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
1158 /* DPLL 3 */
1159 .ctl = WRPLL_CTL(1),
1160 .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
1161 .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
1165 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
1166 struct intel_shared_dpll *pll)
1168 const enum intel_dpll_id id = pll->info->id;
1169 u32 val;
1171 val = intel_de_read(dev_priv, DPLL_CTRL1);
1173 val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1174 DPLL_CTRL1_SSC(id) |
1175 DPLL_CTRL1_LINK_RATE_MASK(id));
1176 val |= pll->state.hw_state.ctrl1 << (id * 6);
1178 intel_de_write(dev_priv, DPLL_CTRL1, val);
1179 intel_de_posting_read(dev_priv, DPLL_CTRL1);
1182 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1183 struct intel_shared_dpll *pll)
1185 const struct skl_dpll_regs *regs = skl_dpll_regs;
1186 const enum intel_dpll_id id = pll->info->id;
1188 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1190 intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1191 intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1192 intel_de_posting_read(dev_priv, regs[id].cfgcr1);
1193 intel_de_posting_read(dev_priv, regs[id].cfgcr2);
1195 /* the enable bit is always bit 31 */
1196 intel_de_write(dev_priv, regs[id].ctl,
1197 intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
1199 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1200 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
1203 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1204 struct intel_shared_dpll *pll)
1206 skl_ddi_pll_write_ctrl1(dev_priv, pll);
1209 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1210 struct intel_shared_dpll *pll)
1212 const struct skl_dpll_regs *regs = skl_dpll_regs;
1213 const enum intel_dpll_id id = pll->info->id;
1215 /* the enable bit is always bit 31 */
1216 intel_de_write(dev_priv, regs[id].ctl,
1217 intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1218 intel_de_posting_read(dev_priv, regs[id].ctl);
1221 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1222 struct intel_shared_dpll *pll)
1226 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1227 struct intel_shared_dpll *pll,
1228 struct intel_dpll_hw_state *hw_state)
1230 u32 val;
1231 const struct skl_dpll_regs *regs = skl_dpll_regs;
1232 const enum intel_dpll_id id = pll->info->id;
1233 intel_wakeref_t wakeref;
1234 bool ret;
1236 wakeref = intel_display_power_get_if_enabled(dev_priv,
1237 POWER_DOMAIN_DISPLAY_CORE);
1238 if (!wakeref)
1239 return false;
1241 ret = false;
1243 val = intel_de_read(dev_priv, regs[id].ctl);
1244 if (!(val & LCPLL_PLL_ENABLE))
1245 goto out;
1247 val = intel_de_read(dev_priv, DPLL_CTRL1);
1248 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1250 /* avoid reading back stale values if HDMI mode is not enabled */
1251 if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1252 hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
1253 hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
1255 ret = true;
1257 out:
1258 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1260 return ret;
1263 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1264 struct intel_shared_dpll *pll,
1265 struct intel_dpll_hw_state *hw_state)
1267 const struct skl_dpll_regs *regs = skl_dpll_regs;
1268 const enum intel_dpll_id id = pll->info->id;
1269 intel_wakeref_t wakeref;
1270 u32 val;
1271 bool ret;
1273 wakeref = intel_display_power_get_if_enabled(dev_priv,
1274 POWER_DOMAIN_DISPLAY_CORE);
1275 if (!wakeref)
1276 return false;
1278 ret = false;
1280 /* DPLL0 is always enabled since it drives CDCLK */
1281 val = intel_de_read(dev_priv, regs[id].ctl);
1282 if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
1283 goto out;
1285 val = intel_de_read(dev_priv, DPLL_CTRL1);
1286 hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1288 ret = true;
1290 out:
1291 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1293 return ret;
1296 struct skl_wrpll_context {
1297 u64 min_deviation; /* current minimal deviation */
1298 u64 central_freq; /* chosen central freq */
1299 u64 dco_freq; /* chosen dco freq */
1300 unsigned int p; /* chosen divider */
1303 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1305 memset(ctx, 0, sizeof(*ctx));
1307 ctx->min_deviation = U64_MAX;
1310 /* DCO freq must be within +1%/-6% of the DCO central freq */
1311 #define SKL_DCO_MAX_PDEVIATION 100
1312 #define SKL_DCO_MAX_NDEVIATION 600
1314 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1315 u64 central_freq,
1316 u64 dco_freq,
1317 unsigned int divider)
1319 u64 deviation;
1321 deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1322 central_freq);
1324 /* positive deviation */
1325 if (dco_freq >= central_freq) {
1326 if (deviation < SKL_DCO_MAX_PDEVIATION &&
1327 deviation < ctx->min_deviation) {
1328 ctx->min_deviation = deviation;
1329 ctx->central_freq = central_freq;
1330 ctx->dco_freq = dco_freq;
1331 ctx->p = divider;
1333 /* negative deviation */
1334 } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1335 deviation < ctx->min_deviation) {
1336 ctx->min_deviation = deviation;
1337 ctx->central_freq = central_freq;
1338 ctx->dco_freq = dco_freq;
1339 ctx->p = divider;
1343 static void skl_wrpll_get_multipliers(unsigned int p,
1344 unsigned int *p0 /* out */,
1345 unsigned int *p1 /* out */,
1346 unsigned int *p2 /* out */)
1348 /* even dividers */
1349 if (p % 2 == 0) {
1350 unsigned int half = p / 2;
1352 if (half == 1 || half == 2 || half == 3 || half == 5) {
1353 *p0 = 2;
1354 *p1 = 1;
1355 *p2 = half;
1356 } else if (half % 2 == 0) {
1357 *p0 = 2;
1358 *p1 = half / 2;
1359 *p2 = 2;
1360 } else if (half % 3 == 0) {
1361 *p0 = 3;
1362 *p1 = half / 3;
1363 *p2 = 2;
1364 } else if (half % 7 == 0) {
1365 *p0 = 7;
1366 *p1 = half / 7;
1367 *p2 = 2;
1369 } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
1370 *p0 = 3;
1371 *p1 = 1;
1372 *p2 = p / 3;
1373 } else if (p == 5 || p == 7) {
1374 *p0 = p;
1375 *p1 = 1;
1376 *p2 = 1;
1377 } else if (p == 15) {
1378 *p0 = 3;
1379 *p1 = 1;
1380 *p2 = 5;
1381 } else if (p == 21) {
1382 *p0 = 7;
1383 *p1 = 1;
1384 *p2 = 3;
1385 } else if (p == 35) {
1386 *p0 = 7;
1387 *p1 = 1;
1388 *p2 = 5;
1392 struct skl_wrpll_params {
1393 u32 dco_fraction;
1394 u32 dco_integer;
1395 u32 qdiv_ratio;
1396 u32 qdiv_mode;
1397 u32 kdiv;
1398 u32 pdiv;
1399 u32 central_freq;
1402 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1403 u64 afe_clock,
1404 int ref_clock,
1405 u64 central_freq,
1406 u32 p0, u32 p1, u32 p2)
1408 u64 dco_freq;
1410 switch (central_freq) {
1411 case 9600000000ULL:
1412 params->central_freq = 0;
1413 break;
1414 case 9000000000ULL:
1415 params->central_freq = 1;
1416 break;
1417 case 8400000000ULL:
1418 params->central_freq = 3;
1421 switch (p0) {
1422 case 1:
1423 params->pdiv = 0;
1424 break;
1425 case 2:
1426 params->pdiv = 1;
1427 break;
1428 case 3:
1429 params->pdiv = 2;
1430 break;
1431 case 7:
1432 params->pdiv = 4;
1433 break;
1434 default:
1435 WARN(1, "Incorrect PDiv\n");
1438 switch (p2) {
1439 case 5:
1440 params->kdiv = 0;
1441 break;
1442 case 2:
1443 params->kdiv = 1;
1444 break;
1445 case 3:
1446 params->kdiv = 2;
1447 break;
1448 case 1:
1449 params->kdiv = 3;
1450 break;
1451 default:
1452 WARN(1, "Incorrect KDiv\n");
1455 params->qdiv_ratio = p1;
1456 params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1458 dco_freq = p0 * p1 * p2 * afe_clock;
1461 * Intermediate values are in Hz.
1462 * Divide by MHz to match bsepc
1464 params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
1465 params->dco_fraction =
1466 div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
1467 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1470 static bool
1471 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1472 int ref_clock,
1473 struct skl_wrpll_params *wrpll_params)
1475 u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1476 u64 dco_central_freq[3] = { 8400000000ULL,
1477 9000000000ULL,
1478 9600000000ULL };
1479 static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
1480 24, 28, 30, 32, 36, 40, 42, 44,
1481 48, 52, 54, 56, 60, 64, 66, 68,
1482 70, 72, 76, 78, 80, 84, 88, 90,
1483 92, 96, 98 };
1484 static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1485 static const struct {
1486 const int *list;
1487 int n_dividers;
1488 } dividers[] = {
1489 { even_dividers, ARRAY_SIZE(even_dividers) },
1490 { odd_dividers, ARRAY_SIZE(odd_dividers) },
1492 struct skl_wrpll_context ctx;
1493 unsigned int dco, d, i;
1494 unsigned int p0, p1, p2;
1496 skl_wrpll_context_init(&ctx);
1498 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1499 for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1500 for (i = 0; i < dividers[d].n_dividers; i++) {
1501 unsigned int p = dividers[d].list[i];
1502 u64 dco_freq = p * afe_clock;
1504 skl_wrpll_try_divider(&ctx,
1505 dco_central_freq[dco],
1506 dco_freq,
1509 * Skip the remaining dividers if we're sure to
1510 * have found the definitive divider, we can't
1511 * improve a 0 deviation.
1513 if (ctx.min_deviation == 0)
1514 goto skip_remaining_dividers;
1518 skip_remaining_dividers:
1520 * If a solution is found with an even divider, prefer
1521 * this one.
1523 if (d == 0 && ctx.p)
1524 break;
1527 if (!ctx.p) {
1528 DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1529 return false;
1533 * gcc incorrectly analyses that these can be used without being
1534 * initialized. To be fair, it's hard to guess.
1536 p0 = p1 = p2 = 0;
1537 skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1538 skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
1539 ctx.central_freq, p0, p1, p2);
1541 return true;
1544 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1546 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
1547 u32 ctrl1, cfgcr1, cfgcr2;
1548 struct skl_wrpll_params wrpll_params = { 0, };
1551 * See comment in intel_dpll_hw_state to understand why we always use 0
1552 * as the DPLL id in this function.
1554 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1556 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1558 if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1559 i915->dpll.ref_clks.nssc,
1560 &wrpll_params))
1561 return false;
1563 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1564 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1565 wrpll_params.dco_integer;
1567 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1568 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1569 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1570 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1571 wrpll_params.central_freq;
1573 memset(&crtc_state->dpll_hw_state, 0,
1574 sizeof(crtc_state->dpll_hw_state));
1576 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1577 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1578 crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1579 return true;
1582 static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
1583 const struct intel_shared_dpll *pll,
1584 const struct intel_dpll_hw_state *pll_state)
1586 int ref_clock = i915->dpll.ref_clks.nssc;
1587 u32 p0, p1, p2, dco_freq;
1589 p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
1590 p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
1592 if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
1593 p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
1594 else
1595 p1 = 1;
1598 switch (p0) {
1599 case DPLL_CFGCR2_PDIV_1:
1600 p0 = 1;
1601 break;
1602 case DPLL_CFGCR2_PDIV_2:
1603 p0 = 2;
1604 break;
1605 case DPLL_CFGCR2_PDIV_3:
1606 p0 = 3;
1607 break;
1608 case DPLL_CFGCR2_PDIV_7_INVALID:
1610 * Incorrect ASUS-Z170M BIOS setting, the HW seems to ignore bit#0,
1611 * handling it the same way as PDIV_7.
1613 drm_dbg_kms(&i915->drm, "Invalid WRPLL PDIV divider value, fixing it.\n");
1614 fallthrough;
1615 case DPLL_CFGCR2_PDIV_7:
1616 p0 = 7;
1617 break;
1618 default:
1619 MISSING_CASE(p0);
1620 return 0;
1623 switch (p2) {
1624 case DPLL_CFGCR2_KDIV_5:
1625 p2 = 5;
1626 break;
1627 case DPLL_CFGCR2_KDIV_2:
1628 p2 = 2;
1629 break;
1630 case DPLL_CFGCR2_KDIV_3:
1631 p2 = 3;
1632 break;
1633 case DPLL_CFGCR2_KDIV_1:
1634 p2 = 1;
1635 break;
1636 default:
1637 MISSING_CASE(p2);
1638 return 0;
1641 dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
1642 ref_clock;
1644 dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
1645 ref_clock / 0x8000;
1647 if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
1648 return 0;
1650 return dco_freq / (p0 * p1 * p2 * 5);
1653 static bool
1654 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1656 u32 ctrl1;
1659 * See comment in intel_dpll_hw_state to understand why we always use 0
1660 * as the DPLL id in this function.
1662 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1663 switch (crtc_state->port_clock / 2) {
1664 case 81000:
1665 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1666 break;
1667 case 135000:
1668 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1669 break;
1670 case 270000:
1671 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1672 break;
1673 /* eDP 1.4 rates */
1674 case 162000:
1675 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1676 break;
1677 case 108000:
1678 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1679 break;
1680 case 216000:
1681 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1682 break;
1685 memset(&crtc_state->dpll_hw_state, 0,
1686 sizeof(crtc_state->dpll_hw_state));
1688 crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1690 return true;
1693 static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
1694 const struct intel_shared_dpll *pll,
1695 const struct intel_dpll_hw_state *pll_state)
1697 int link_clock = 0;
1699 switch ((pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0)) >>
1700 DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
1701 case DPLL_CTRL1_LINK_RATE_810:
1702 link_clock = 81000;
1703 break;
1704 case DPLL_CTRL1_LINK_RATE_1080:
1705 link_clock = 108000;
1706 break;
1707 case DPLL_CTRL1_LINK_RATE_1350:
1708 link_clock = 135000;
1709 break;
1710 case DPLL_CTRL1_LINK_RATE_1620:
1711 link_clock = 162000;
1712 break;
1713 case DPLL_CTRL1_LINK_RATE_2160:
1714 link_clock = 216000;
1715 break;
1716 case DPLL_CTRL1_LINK_RATE_2700:
1717 link_clock = 270000;
1718 break;
1719 default:
1720 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
1721 break;
1724 return link_clock * 2;
1727 static bool skl_get_dpll(struct intel_atomic_state *state,
1728 struct intel_crtc *crtc,
1729 struct intel_encoder *encoder)
1731 struct intel_crtc_state *crtc_state =
1732 intel_atomic_get_new_crtc_state(state, crtc);
1733 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
1734 struct intel_shared_dpll *pll;
1735 bool bret;
1737 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1738 bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1739 if (!bret) {
1740 drm_dbg_kms(&i915->drm,
1741 "Could not get HDMI pll dividers.\n");
1742 return false;
1744 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
1745 bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1746 if (!bret) {
1747 drm_dbg_kms(&i915->drm,
1748 "Could not set DP dpll HW state.\n");
1749 return false;
1751 } else {
1752 return false;
1755 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1756 pll = intel_find_shared_dpll(state, crtc,
1757 &crtc_state->dpll_hw_state,
1758 BIT(DPLL_ID_SKL_DPLL0));
1759 else
1760 pll = intel_find_shared_dpll(state, crtc,
1761 &crtc_state->dpll_hw_state,
1762 BIT(DPLL_ID_SKL_DPLL3) |
1763 BIT(DPLL_ID_SKL_DPLL2) |
1764 BIT(DPLL_ID_SKL_DPLL1));
1765 if (!pll)
1766 return false;
1768 intel_reference_shared_dpll(state, crtc,
1769 pll, &crtc_state->dpll_hw_state);
1771 crtc_state->shared_dpll = pll;
1773 return true;
1776 static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
1777 const struct intel_shared_dpll *pll,
1778 const struct intel_dpll_hw_state *pll_state)
1781 * ctrl1 register is already shifted for each pll, just use 0 to get
1782 * the internal shift for each field
1784 if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
1785 return skl_ddi_wrpll_get_freq(i915, pll, pll_state);
1786 else
1787 return skl_ddi_lcpll_get_freq(i915, pll, pll_state);
1790 static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
1792 /* No SSC ref */
1793 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
1796 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1797 const struct intel_dpll_hw_state *hw_state)
1799 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
1800 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1801 hw_state->ctrl1,
1802 hw_state->cfgcr1,
1803 hw_state->cfgcr2);
1806 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1807 .enable = skl_ddi_pll_enable,
1808 .disable = skl_ddi_pll_disable,
1809 .get_hw_state = skl_ddi_pll_get_hw_state,
1810 .get_freq = skl_ddi_pll_get_freq,
1813 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1814 .enable = skl_ddi_dpll0_enable,
1815 .disable = skl_ddi_dpll0_disable,
1816 .get_hw_state = skl_ddi_dpll0_get_hw_state,
1817 .get_freq = skl_ddi_pll_get_freq,
1820 static const struct dpll_info skl_plls[] = {
1821 { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
1822 { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
1823 { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
1824 { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
1825 { },
1828 static const struct intel_dpll_mgr skl_pll_mgr = {
1829 .dpll_info = skl_plls,
1830 .get_dplls = skl_get_dpll,
1831 .put_dplls = intel_put_dpll,
1832 .update_ref_clks = skl_update_dpll_ref_clks,
1833 .dump_hw_state = skl_dump_hw_state,
1836 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1837 struct intel_shared_dpll *pll)
1839 u32 temp;
1840 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1841 enum dpio_phy phy;
1842 enum dpio_channel ch;
1844 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1846 /* Non-SSC reference */
1847 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1848 temp |= PORT_PLL_REF_SEL;
1849 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1851 if (IS_GEMINILAKE(dev_priv)) {
1852 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1853 temp |= PORT_PLL_POWER_ENABLE;
1854 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1856 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1857 PORT_PLL_POWER_STATE), 200))
1858 drm_err(&dev_priv->drm,
1859 "Power state not set for PLL:%d\n", port);
1862 /* Disable 10 bit clock */
1863 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1864 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1865 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1867 /* Write P1 & P2 */
1868 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
1869 temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1870 temp |= pll->state.hw_state.ebb0;
1871 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
1873 /* Write M2 integer */
1874 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
1875 temp &= ~PORT_PLL_M2_MASK;
1876 temp |= pll->state.hw_state.pll0;
1877 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
1879 /* Write N */
1880 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
1881 temp &= ~PORT_PLL_N_MASK;
1882 temp |= pll->state.hw_state.pll1;
1883 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
1885 /* Write M2 fraction */
1886 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
1887 temp &= ~PORT_PLL_M2_FRAC_MASK;
1888 temp |= pll->state.hw_state.pll2;
1889 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
1891 /* Write M2 fraction enable */
1892 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
1893 temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1894 temp |= pll->state.hw_state.pll3;
1895 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
1897 /* Write coeff */
1898 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
1899 temp &= ~PORT_PLL_PROP_COEFF_MASK;
1900 temp &= ~PORT_PLL_INT_COEFF_MASK;
1901 temp &= ~PORT_PLL_GAIN_CTL_MASK;
1902 temp |= pll->state.hw_state.pll6;
1903 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
1905 /* Write calibration val */
1906 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
1907 temp &= ~PORT_PLL_TARGET_CNT_MASK;
1908 temp |= pll->state.hw_state.pll8;
1909 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
1911 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
1912 temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1913 temp |= pll->state.hw_state.pll9;
1914 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
1916 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
1917 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1918 temp &= ~PORT_PLL_DCO_AMP_MASK;
1919 temp |= pll->state.hw_state.pll10;
1920 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
1922 /* Recalibrate with new settings */
1923 temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
1924 temp |= PORT_PLL_RECALIBRATE;
1925 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1926 temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1927 temp |= pll->state.hw_state.ebb4;
1928 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
1930 /* Enable PLL */
1931 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1932 temp |= PORT_PLL_ENABLE;
1933 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1934 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1936 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1937 200))
1938 drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
1940 if (IS_GEMINILAKE(dev_priv)) {
1941 temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
1942 temp |= DCC_DELAY_RANGE_2;
1943 intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1947 * While we write to the group register to program all lanes at once we
1948 * can read only lane registers and we pick lanes 0/1 for that.
1950 temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
1951 temp &= ~LANE_STAGGER_MASK;
1952 temp &= ~LANESTAGGER_STRAP_OVRD;
1953 temp |= pll->state.hw_state.pcsdw12;
1954 intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1957 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1958 struct intel_shared_dpll *pll)
1960 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1961 u32 temp;
1963 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1964 temp &= ~PORT_PLL_ENABLE;
1965 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1966 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1968 if (IS_GEMINILAKE(dev_priv)) {
1969 temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
1970 temp &= ~PORT_PLL_POWER_ENABLE;
1971 intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
1973 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
1974 PORT_PLL_POWER_STATE), 200))
1975 drm_err(&dev_priv->drm,
1976 "Power state not reset for PLL:%d\n", port);
1980 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1981 struct intel_shared_dpll *pll,
1982 struct intel_dpll_hw_state *hw_state)
1984 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1985 intel_wakeref_t wakeref;
1986 enum dpio_phy phy;
1987 enum dpio_channel ch;
1988 u32 val;
1989 bool ret;
1991 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1993 wakeref = intel_display_power_get_if_enabled(dev_priv,
1994 POWER_DOMAIN_DISPLAY_CORE);
1995 if (!wakeref)
1996 return false;
1998 ret = false;
2000 val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
2001 if (!(val & PORT_PLL_ENABLE))
2002 goto out;
2004 hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
2005 hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
2007 hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
2008 hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
2010 hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
2011 hw_state->pll0 &= PORT_PLL_M2_MASK;
2013 hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
2014 hw_state->pll1 &= PORT_PLL_N_MASK;
2016 hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
2017 hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
2019 hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
2020 hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
2022 hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
2023 hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
2024 PORT_PLL_INT_COEFF_MASK |
2025 PORT_PLL_GAIN_CTL_MASK;
2027 hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
2028 hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
2030 hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
2031 hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
2033 hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
2034 hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
2035 PORT_PLL_DCO_AMP_MASK;
2038 * While we write to the group register to program all lanes at once we
2039 * can read only lane registers. We configure all lanes the same way, so
2040 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
2042 hw_state->pcsdw12 = intel_de_read(dev_priv,
2043 BXT_PORT_PCS_DW12_LN01(phy, ch));
2044 if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
2045 drm_dbg(&dev_priv->drm,
2046 "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
2047 hw_state->pcsdw12,
2048 intel_de_read(dev_priv,
2049 BXT_PORT_PCS_DW12_LN23(phy, ch)));
2050 hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
2052 ret = true;
2054 out:
2055 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2057 return ret;
2060 /* bxt clock parameters */
2061 struct bxt_clk_div {
2062 int clock;
2063 u32 p1;
2064 u32 p2;
2065 u32 m2_int;
2066 u32 m2_frac;
2067 bool m2_frac_en;
2068 u32 n;
2070 int vco;
2073 /* pre-calculated values for DP linkrates */
2074 static const struct bxt_clk_div bxt_dp_clk_val[] = {
2075 {162000, 4, 2, 32, 1677722, 1, 1},
2076 {270000, 4, 1, 27, 0, 0, 1},
2077 {540000, 2, 1, 27, 0, 0, 1},
2078 {216000, 3, 2, 32, 1677722, 1, 1},
2079 {243000, 4, 1, 24, 1258291, 1, 1},
2080 {324000, 4, 1, 32, 1677722, 1, 1},
2081 {432000, 3, 1, 32, 1677722, 1, 1}
2084 static bool
2085 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
2086 struct bxt_clk_div *clk_div)
2088 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2089 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2090 struct dpll best_clock;
2092 /* Calculate HDMI div */
2094 * FIXME: tie the following calculation into
2095 * i9xx_crtc_compute_clock
2097 if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
2098 drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
2099 crtc_state->port_clock,
2100 pipe_name(crtc->pipe));
2101 return false;
2104 clk_div->p1 = best_clock.p1;
2105 clk_div->p2 = best_clock.p2;
2106 drm_WARN_ON(&i915->drm, best_clock.m1 != 2);
2107 clk_div->n = best_clock.n;
2108 clk_div->m2_int = best_clock.m2 >> 22;
2109 clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
2110 clk_div->m2_frac_en = clk_div->m2_frac != 0;
2112 clk_div->vco = best_clock.vco;
2114 return true;
2117 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
2118 struct bxt_clk_div *clk_div)
2120 int clock = crtc_state->port_clock;
2121 int i;
2123 *clk_div = bxt_dp_clk_val[0];
2124 for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
2125 if (bxt_dp_clk_val[i].clock == clock) {
2126 *clk_div = bxt_dp_clk_val[i];
2127 break;
2131 clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
2134 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
2135 const struct bxt_clk_div *clk_div)
2137 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2138 struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
2139 int clock = crtc_state->port_clock;
2140 int vco = clk_div->vco;
2141 u32 prop_coef, int_coef, gain_ctl, targ_cnt;
2142 u32 lanestagger;
2144 memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
2146 if (vco >= 6200000 && vco <= 6700000) {
2147 prop_coef = 4;
2148 int_coef = 9;
2149 gain_ctl = 3;
2150 targ_cnt = 8;
2151 } else if ((vco > 5400000 && vco < 6200000) ||
2152 (vco >= 4800000 && vco < 5400000)) {
2153 prop_coef = 5;
2154 int_coef = 11;
2155 gain_ctl = 3;
2156 targ_cnt = 9;
2157 } else if (vco == 5400000) {
2158 prop_coef = 3;
2159 int_coef = 8;
2160 gain_ctl = 1;
2161 targ_cnt = 9;
2162 } else {
2163 drm_err(&i915->drm, "Invalid VCO\n");
2164 return false;
2167 if (clock > 270000)
2168 lanestagger = 0x18;
2169 else if (clock > 135000)
2170 lanestagger = 0x0d;
2171 else if (clock > 67000)
2172 lanestagger = 0x07;
2173 else if (clock > 33000)
2174 lanestagger = 0x04;
2175 else
2176 lanestagger = 0x02;
2178 dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
2179 dpll_hw_state->pll0 = clk_div->m2_int;
2180 dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
2181 dpll_hw_state->pll2 = clk_div->m2_frac;
2183 if (clk_div->m2_frac_en)
2184 dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
2186 dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
2187 dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
2189 dpll_hw_state->pll8 = targ_cnt;
2191 dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
2193 dpll_hw_state->pll10 =
2194 PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
2195 | PORT_PLL_DCO_AMP_OVR_EN_H;
2197 dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
2199 dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
2201 return true;
2204 static bool
2205 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2207 struct bxt_clk_div clk_div = {};
2209 bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
2211 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2214 static bool
2215 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2217 struct bxt_clk_div clk_div = {};
2219 bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
2221 return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
2224 static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
2225 const struct intel_shared_dpll *pll,
2226 const struct intel_dpll_hw_state *pll_state)
2228 struct dpll clock;
2230 clock.m1 = 2;
2231 clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
2232 if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
2233 clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
2234 clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
2235 clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
2236 clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
2238 return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
2241 static bool bxt_get_dpll(struct intel_atomic_state *state,
2242 struct intel_crtc *crtc,
2243 struct intel_encoder *encoder)
2245 struct intel_crtc_state *crtc_state =
2246 intel_atomic_get_new_crtc_state(state, crtc);
2247 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2248 struct intel_shared_dpll *pll;
2249 enum intel_dpll_id id;
2251 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
2252 !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
2253 return false;
2255 if (intel_crtc_has_dp_encoder(crtc_state) &&
2256 !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
2257 return false;
2259 /* 1:1 mapping between ports and PLLs */
2260 id = (enum intel_dpll_id) encoder->port;
2261 pll = intel_get_shared_dpll_by_id(dev_priv, id);
2263 drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
2264 crtc->base.base.id, crtc->base.name, pll->info->name);
2266 intel_reference_shared_dpll(state, crtc,
2267 pll, &crtc_state->dpll_hw_state);
2269 crtc_state->shared_dpll = pll;
2271 return true;
2274 static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
2276 i915->dpll.ref_clks.ssc = 100000;
2277 i915->dpll.ref_clks.nssc = 100000;
2278 /* DSI non-SSC ref 19.2MHz */
2281 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
2282 const struct intel_dpll_hw_state *hw_state)
2284 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
2285 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
2286 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
2287 hw_state->ebb0,
2288 hw_state->ebb4,
2289 hw_state->pll0,
2290 hw_state->pll1,
2291 hw_state->pll2,
2292 hw_state->pll3,
2293 hw_state->pll6,
2294 hw_state->pll8,
2295 hw_state->pll9,
2296 hw_state->pll10,
2297 hw_state->pcsdw12);
2300 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
2301 .enable = bxt_ddi_pll_enable,
2302 .disable = bxt_ddi_pll_disable,
2303 .get_hw_state = bxt_ddi_pll_get_hw_state,
2304 .get_freq = bxt_ddi_pll_get_freq,
2307 static const struct dpll_info bxt_plls[] = {
2308 { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2309 { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2310 { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2311 { },
2314 static const struct intel_dpll_mgr bxt_pll_mgr = {
2315 .dpll_info = bxt_plls,
2316 .get_dplls = bxt_get_dpll,
2317 .put_dplls = intel_put_dpll,
2318 .update_ref_clks = bxt_update_dpll_ref_clks,
2319 .dump_hw_state = bxt_dump_hw_state,
2322 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2323 struct intel_shared_dpll *pll)
2325 const enum intel_dpll_id id = pll->info->id;
2326 u32 val;
2328 /* 1. Enable DPLL power in DPLL_ENABLE. */
2329 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2330 val |= PLL_POWER_ENABLE;
2331 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2333 /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2334 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2335 PLL_POWER_STATE, 5))
2336 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
2339 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2340 * select DP mode, and set DP link rate.
2342 val = pll->state.hw_state.cfgcr0;
2343 intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
2345 /* 4. Reab back to ensure writes completed */
2346 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
2348 /* 3. Configure DPLL_CFGCR0 */
2349 /* Avoid touch CFGCR1 if HDMI mode is not enabled */
2350 if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2351 val = pll->state.hw_state.cfgcr1;
2352 intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
2353 /* 4. Reab back to ensure writes completed */
2354 intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
2358 * 5. If the frequency will result in a change to the voltage
2359 * requirement, follow the Display Voltage Frequency Switching
2360 * Sequence Before Frequency Change
2362 * Note: DVFS is actually handled via the cdclk code paths,
2363 * hence we do nothing here.
2366 /* 6. Enable DPLL in DPLL_ENABLE. */
2367 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2368 val |= PLL_ENABLE;
2369 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2371 /* 7. Wait for PLL lock status in DPLL_ENABLE. */
2372 if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2373 drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
2376 * 8. If the frequency will result in a change to the voltage
2377 * requirement, follow the Display Voltage Frequency Switching
2378 * Sequence After Frequency Change
2380 * Note: DVFS is actually handled via the cdclk code paths,
2381 * hence we do nothing here.
2385 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2386 * Done at intel_ddi_clk_select
2390 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2391 struct intel_shared_dpll *pll)
2393 const enum intel_dpll_id id = pll->info->id;
2394 u32 val;
2397 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2398 * Done at intel_ddi_post_disable
2402 * 2. If the frequency will result in a change to the voltage
2403 * requirement, follow the Display Voltage Frequency Switching
2404 * Sequence Before Frequency Change
2406 * Note: DVFS is actually handled via the cdclk code paths,
2407 * hence we do nothing here.
2410 /* 3. Disable DPLL through DPLL_ENABLE. */
2411 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2412 val &= ~PLL_ENABLE;
2413 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2415 /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2416 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2417 drm_err(&dev_priv->drm, "PLL %d locked\n", id);
2420 * 5. If the frequency will result in a change to the voltage
2421 * requirement, follow the Display Voltage Frequency Switching
2422 * Sequence After Frequency Change
2424 * Note: DVFS is actually handled via the cdclk code paths,
2425 * hence we do nothing here.
2428 /* 6. Disable DPLL power in DPLL_ENABLE. */
2429 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2430 val &= ~PLL_POWER_ENABLE;
2431 intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
2433 /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2434 if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2435 PLL_POWER_STATE, 5))
2436 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
2439 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2440 struct intel_shared_dpll *pll,
2441 struct intel_dpll_hw_state *hw_state)
2443 const enum intel_dpll_id id = pll->info->id;
2444 intel_wakeref_t wakeref;
2445 u32 val;
2446 bool ret;
2448 wakeref = intel_display_power_get_if_enabled(dev_priv,
2449 POWER_DOMAIN_DISPLAY_CORE);
2450 if (!wakeref)
2451 return false;
2453 ret = false;
2455 val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
2456 if (!(val & PLL_ENABLE))
2457 goto out;
2459 val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
2460 hw_state->cfgcr0 = val;
2462 /* avoid reading back stale values if HDMI mode is not enabled */
2463 if (val & DPLL_CFGCR0_HDMI_MODE) {
2464 hw_state->cfgcr1 = intel_de_read(dev_priv,
2465 CNL_DPLL_CFGCR1(id));
2467 ret = true;
2469 out:
2470 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2472 return ret;
2475 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2476 int *qdiv, int *kdiv)
2478 /* even dividers */
2479 if (bestdiv % 2 == 0) {
2480 if (bestdiv == 2) {
2481 *pdiv = 2;
2482 *qdiv = 1;
2483 *kdiv = 1;
2484 } else if (bestdiv % 4 == 0) {
2485 *pdiv = 2;
2486 *qdiv = bestdiv / 4;
2487 *kdiv = 2;
2488 } else if (bestdiv % 6 == 0) {
2489 *pdiv = 3;
2490 *qdiv = bestdiv / 6;
2491 *kdiv = 2;
2492 } else if (bestdiv % 5 == 0) {
2493 *pdiv = 5;
2494 *qdiv = bestdiv / 10;
2495 *kdiv = 2;
2496 } else if (bestdiv % 14 == 0) {
2497 *pdiv = 7;
2498 *qdiv = bestdiv / 14;
2499 *kdiv = 2;
2501 } else {
2502 if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2503 *pdiv = bestdiv;
2504 *qdiv = 1;
2505 *kdiv = 1;
2506 } else { /* 9, 15, 21 */
2507 *pdiv = bestdiv / 3;
2508 *qdiv = 1;
2509 *kdiv = 3;
2514 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2515 u32 dco_freq, u32 ref_freq,
2516 int pdiv, int qdiv, int kdiv)
2518 u32 dco;
2520 switch (kdiv) {
2521 case 1:
2522 params->kdiv = 1;
2523 break;
2524 case 2:
2525 params->kdiv = 2;
2526 break;
2527 case 3:
2528 params->kdiv = 4;
2529 break;
2530 default:
2531 WARN(1, "Incorrect KDiv\n");
2534 switch (pdiv) {
2535 case 2:
2536 params->pdiv = 1;
2537 break;
2538 case 3:
2539 params->pdiv = 2;
2540 break;
2541 case 5:
2542 params->pdiv = 4;
2543 break;
2544 case 7:
2545 params->pdiv = 8;
2546 break;
2547 default:
2548 WARN(1, "Incorrect PDiv\n");
2551 WARN_ON(kdiv != 2 && qdiv != 1);
2553 params->qdiv_ratio = qdiv;
2554 params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2556 dco = div_u64((u64)dco_freq << 15, ref_freq);
2558 params->dco_integer = dco >> 15;
2559 params->dco_fraction = dco & 0x7fff;
2562 static bool
2563 __cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2564 struct skl_wrpll_params *wrpll_params,
2565 int ref_clock)
2567 u32 afe_clock = crtc_state->port_clock * 5;
2568 u32 dco_min = 7998000;
2569 u32 dco_max = 10000000;
2570 u32 dco_mid = (dco_min + dco_max) / 2;
2571 static const int dividers[] = { 2, 4, 6, 8, 10, 12, 14, 16,
2572 18, 20, 24, 28, 30, 32, 36, 40,
2573 42, 44, 48, 50, 52, 54, 56, 60,
2574 64, 66, 68, 70, 72, 76, 78, 80,
2575 84, 88, 90, 92, 96, 98, 100, 102,
2576 3, 5, 7, 9, 15, 21 };
2577 u32 dco, best_dco = 0, dco_centrality = 0;
2578 u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2579 int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2581 for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2582 dco = afe_clock * dividers[d];
2584 if ((dco <= dco_max) && (dco >= dco_min)) {
2585 dco_centrality = abs(dco - dco_mid);
2587 if (dco_centrality < best_dco_centrality) {
2588 best_dco_centrality = dco_centrality;
2589 best_div = dividers[d];
2590 best_dco = dco;
2595 if (best_div == 0)
2596 return false;
2598 cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2599 cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2600 pdiv, qdiv, kdiv);
2602 return true;
2605 static bool
2606 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2607 struct skl_wrpll_params *wrpll_params)
2609 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2611 return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
2612 i915->dpll.ref_clks.nssc);
2615 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2617 u32 cfgcr0, cfgcr1;
2618 struct skl_wrpll_params wrpll_params = { 0, };
2620 cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2622 if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2623 return false;
2625 cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2626 wrpll_params.dco_integer;
2628 cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2629 DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2630 DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2631 DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2632 DPLL_CFGCR1_CENTRAL_FREQ;
2634 memset(&crtc_state->dpll_hw_state, 0,
2635 sizeof(crtc_state->dpll_hw_state));
2637 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2638 crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2639 return true;
2643 * Display WA #22010492432: ehl, tgl
2644 * Program half of the nominal DCO divider fraction value.
2646 static bool
2647 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
2649 return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
2650 IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) ||
2651 IS_TIGERLAKE(i915)) &&
2652 i915->dpll.ref_clks.nssc == 38400;
2655 static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
2656 const struct intel_shared_dpll *pll,
2657 const struct intel_dpll_hw_state *pll_state,
2658 int ref_clock)
2660 u32 dco_fraction;
2661 u32 p0, p1, p2, dco_freq;
2663 p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
2664 p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
2666 if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
2667 p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
2668 DPLL_CFGCR1_QDIV_RATIO_SHIFT;
2669 else
2670 p1 = 1;
2673 switch (p0) {
2674 case DPLL_CFGCR1_PDIV_2:
2675 p0 = 2;
2676 break;
2677 case DPLL_CFGCR1_PDIV_3:
2678 p0 = 3;
2679 break;
2680 case DPLL_CFGCR1_PDIV_5:
2681 p0 = 5;
2682 break;
2683 case DPLL_CFGCR1_PDIV_7:
2684 p0 = 7;
2685 break;
2688 switch (p2) {
2689 case DPLL_CFGCR1_KDIV_1:
2690 p2 = 1;
2691 break;
2692 case DPLL_CFGCR1_KDIV_2:
2693 p2 = 2;
2694 break;
2695 case DPLL_CFGCR1_KDIV_3:
2696 p2 = 3;
2697 break;
2700 dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
2701 ref_clock;
2703 dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
2704 DPLL_CFGCR0_DCO_FRACTION_SHIFT;
2706 if (ehl_combo_pll_div_frac_wa_needed(dev_priv))
2707 dco_fraction *= 2;
2709 dco_freq += (dco_fraction * ref_clock) / 0x8000;
2711 if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
2712 return 0;
2714 return dco_freq / (p0 * p1 * p2 * 5);
2717 static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
2718 const struct intel_shared_dpll *pll,
2719 const struct intel_dpll_hw_state *pll_state)
2721 return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
2722 i915->dpll.ref_clks.nssc);
2725 static bool
2726 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2728 u32 cfgcr0;
2730 cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2732 switch (crtc_state->port_clock / 2) {
2733 case 81000:
2734 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2735 break;
2736 case 135000:
2737 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2738 break;
2739 case 270000:
2740 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2741 break;
2742 /* eDP 1.4 rates */
2743 case 162000:
2744 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2745 break;
2746 case 108000:
2747 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2748 break;
2749 case 216000:
2750 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2751 break;
2752 case 324000:
2753 /* Some SKUs may require elevated I/O voltage to support this */
2754 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2755 break;
2756 case 405000:
2757 /* Some SKUs may require elevated I/O voltage to support this */
2758 cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2759 break;
2762 memset(&crtc_state->dpll_hw_state, 0,
2763 sizeof(crtc_state->dpll_hw_state));
2765 crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2767 return true;
2770 static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
2771 const struct intel_shared_dpll *pll,
2772 const struct intel_dpll_hw_state *pll_state)
2774 int link_clock = 0;
2776 switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
2777 case DPLL_CFGCR0_LINK_RATE_810:
2778 link_clock = 81000;
2779 break;
2780 case DPLL_CFGCR0_LINK_RATE_1080:
2781 link_clock = 108000;
2782 break;
2783 case DPLL_CFGCR0_LINK_RATE_1350:
2784 link_clock = 135000;
2785 break;
2786 case DPLL_CFGCR0_LINK_RATE_1620:
2787 link_clock = 162000;
2788 break;
2789 case DPLL_CFGCR0_LINK_RATE_2160:
2790 link_clock = 216000;
2791 break;
2792 case DPLL_CFGCR0_LINK_RATE_2700:
2793 link_clock = 270000;
2794 break;
2795 case DPLL_CFGCR0_LINK_RATE_3240:
2796 link_clock = 324000;
2797 break;
2798 case DPLL_CFGCR0_LINK_RATE_4050:
2799 link_clock = 405000;
2800 break;
2801 default:
2802 drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
2803 break;
2806 return link_clock * 2;
2809 static bool cnl_get_dpll(struct intel_atomic_state *state,
2810 struct intel_crtc *crtc,
2811 struct intel_encoder *encoder)
2813 struct intel_crtc_state *crtc_state =
2814 intel_atomic_get_new_crtc_state(state, crtc);
2815 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
2816 struct intel_shared_dpll *pll;
2817 bool bret;
2819 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2820 bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2821 if (!bret) {
2822 drm_dbg_kms(&i915->drm,
2823 "Could not get HDMI pll dividers.\n");
2824 return false;
2826 } else if (intel_crtc_has_dp_encoder(crtc_state)) {
2827 bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2828 if (!bret) {
2829 drm_dbg_kms(&i915->drm,
2830 "Could not set DP dpll HW state.\n");
2831 return false;
2833 } else {
2834 drm_dbg_kms(&i915->drm,
2835 "Skip DPLL setup for output_types 0x%x\n",
2836 crtc_state->output_types);
2837 return false;
2840 pll = intel_find_shared_dpll(state, crtc,
2841 &crtc_state->dpll_hw_state,
2842 BIT(DPLL_ID_SKL_DPLL2) |
2843 BIT(DPLL_ID_SKL_DPLL1) |
2844 BIT(DPLL_ID_SKL_DPLL0));
2845 if (!pll) {
2846 drm_dbg_kms(&i915->drm, "No PLL selected\n");
2847 return false;
2850 intel_reference_shared_dpll(state, crtc,
2851 pll, &crtc_state->dpll_hw_state);
2853 crtc_state->shared_dpll = pll;
2855 return true;
2858 static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
2859 const struct intel_shared_dpll *pll,
2860 const struct intel_dpll_hw_state *pll_state)
2862 if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
2863 return cnl_ddi_wrpll_get_freq(i915, pll, pll_state);
2864 else
2865 return cnl_ddi_lcpll_get_freq(i915, pll, pll_state);
2868 static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
2870 /* No SSC reference */
2871 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
2874 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2875 const struct intel_dpll_hw_state *hw_state)
2877 drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
2878 "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2879 hw_state->cfgcr0,
2880 hw_state->cfgcr1);
2883 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2884 .enable = cnl_ddi_pll_enable,
2885 .disable = cnl_ddi_pll_disable,
2886 .get_hw_state = cnl_ddi_pll_get_hw_state,
2887 .get_freq = cnl_ddi_pll_get_freq,
2890 static const struct dpll_info cnl_plls[] = {
2891 { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2892 { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2893 { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2894 { },
2897 static const struct intel_dpll_mgr cnl_pll_mgr = {
2898 .dpll_info = cnl_plls,
2899 .get_dplls = cnl_get_dpll,
2900 .put_dplls = intel_put_dpll,
2901 .update_ref_clks = cnl_update_dpll_ref_clks,
2902 .dump_hw_state = cnl_dump_hw_state,
2905 struct icl_combo_pll_params {
2906 int clock;
2907 struct skl_wrpll_params wrpll;
2911 * These values alrea already adjusted: they're the bits we write to the
2912 * registers, not the logical values.
2914 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2915 { 540000,
2916 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
2917 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2918 { 270000,
2919 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
2920 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2921 { 162000,
2922 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
2923 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2924 { 324000,
2925 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
2926 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2927 { 216000,
2928 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
2929 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2930 { 432000,
2931 { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
2932 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2933 { 648000,
2934 { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
2935 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2936 { 810000,
2937 { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
2938 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2942 /* Also used for 38.4 MHz values. */
2943 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2944 { 540000,
2945 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
2946 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2947 { 270000,
2948 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
2949 .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2950 { 162000,
2951 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
2952 .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2953 { 324000,
2954 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
2955 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2956 { 216000,
2957 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
2958 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2959 { 432000,
2960 { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
2961 .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2962 { 648000,
2963 { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
2964 .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2965 { 810000,
2966 { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
2967 .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2970 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2971 .dco_integer = 0x151, .dco_fraction = 0x4000,
2972 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2975 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2976 .dco_integer = 0x1A5, .dco_fraction = 0x7000,
2977 .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2980 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2981 .dco_integer = 0x54, .dco_fraction = 0x3000,
2982 /* the following params are unused */
2983 .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2986 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2987 .dco_integer = 0x43, .dco_fraction = 0x4000,
2988 /* the following params are unused */
2991 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2992 struct skl_wrpll_params *pll_params)
2994 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2995 const struct icl_combo_pll_params *params =
2996 dev_priv->dpll.ref_clks.nssc == 24000 ?
2997 icl_dp_combo_pll_24MHz_values :
2998 icl_dp_combo_pll_19_2MHz_values;
2999 int clock = crtc_state->port_clock;
3000 int i;
3002 for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
3003 if (clock == params[i].clock) {
3004 *pll_params = params[i].wrpll;
3005 return true;
3009 MISSING_CASE(clock);
3010 return false;
3013 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
3014 struct skl_wrpll_params *pll_params)
3016 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3018 if (INTEL_GEN(dev_priv) >= 12) {
3019 switch (dev_priv->dpll.ref_clks.nssc) {
3020 default:
3021 MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3022 fallthrough;
3023 case 19200:
3024 case 38400:
3025 *pll_params = tgl_tbt_pll_19_2MHz_values;
3026 break;
3027 case 24000:
3028 *pll_params = tgl_tbt_pll_24MHz_values;
3029 break;
3031 } else {
3032 switch (dev_priv->dpll.ref_clks.nssc) {
3033 default:
3034 MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
3035 fallthrough;
3036 case 19200:
3037 case 38400:
3038 *pll_params = icl_tbt_pll_19_2MHz_values;
3039 break;
3040 case 24000:
3041 *pll_params = icl_tbt_pll_24MHz_values;
3042 break;
3046 return true;
3049 static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
3050 const struct intel_shared_dpll *pll,
3051 const struct intel_dpll_hw_state *pll_state)
3054 * The PLL outputs multiple frequencies at the same time, selection is
3055 * made at DDI clock mux level.
3057 drm_WARN_ON(&i915->drm, 1);
3059 return 0;
3062 static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
3064 int ref_clock = i915->dpll.ref_clks.nssc;
3067 * For ICL+, the spec states: if reference frequency is 38.4,
3068 * use 19.2 because the DPLL automatically divides that by 2.
3070 if (ref_clock == 38400)
3071 ref_clock = 19200;
3073 return ref_clock;
3076 static bool
3077 icl_calc_wrpll(struct intel_crtc_state *crtc_state,
3078 struct skl_wrpll_params *wrpll_params)
3080 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
3082 return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
3083 icl_wrpll_ref_clock(i915));
3086 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
3087 const struct intel_shared_dpll *pll,
3088 const struct intel_dpll_hw_state *pll_state)
3090 return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
3091 icl_wrpll_ref_clock(i915));
3094 static void icl_calc_dpll_state(struct drm_i915_private *i915,
3095 const struct skl_wrpll_params *pll_params,
3096 struct intel_dpll_hw_state *pll_state)
3098 u32 dco_fraction = pll_params->dco_fraction;
3100 memset(pll_state, 0, sizeof(*pll_state));
3102 if (ehl_combo_pll_div_frac_wa_needed(i915))
3103 dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
3105 pll_state->cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(dco_fraction) |
3106 pll_params->dco_integer;
3108 pll_state->cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params->qdiv_ratio) |
3109 DPLL_CFGCR1_QDIV_MODE(pll_params->qdiv_mode) |
3110 DPLL_CFGCR1_KDIV(pll_params->kdiv) |
3111 DPLL_CFGCR1_PDIV(pll_params->pdiv);
3113 if (INTEL_GEN(i915) >= 12)
3114 pll_state->cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
3115 else
3116 pll_state->cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
3119 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
3121 return id - DPLL_ID_ICL_MGPLL1;
3124 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
3126 return tc_port + DPLL_ID_ICL_MGPLL1;
3129 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
3130 u32 *target_dco_khz,
3131 struct intel_dpll_hw_state *state,
3132 bool is_dkl)
3134 u32 dco_min_freq, dco_max_freq;
3135 int div1_vals[] = {7, 5, 3, 2};
3136 unsigned int i;
3137 int div2;
3139 dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
3140 dco_max_freq = is_dp ? 8100000 : 10000000;
3142 for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
3143 int div1 = div1_vals[i];
3145 for (div2 = 10; div2 > 0; div2--) {
3146 int dco = div1 * div2 * clock_khz * 5;
3147 int a_divratio, tlinedrv, inputsel;
3148 u32 hsdiv;
3150 if (dco < dco_min_freq || dco > dco_max_freq)
3151 continue;
3153 if (div2 >= 2) {
3155 * Note: a_divratio not matching TGL BSpec
3156 * algorithm but matching hardcoded values and
3157 * working on HW for DP alt-mode at least
3159 a_divratio = is_dp ? 10 : 5;
3160 tlinedrv = is_dkl ? 1 : 2;
3161 } else {
3162 a_divratio = 5;
3163 tlinedrv = 0;
3165 inputsel = is_dp ? 0 : 1;
3167 switch (div1) {
3168 default:
3169 MISSING_CASE(div1);
3170 fallthrough;
3171 case 2:
3172 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
3173 break;
3174 case 3:
3175 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
3176 break;
3177 case 5:
3178 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
3179 break;
3180 case 7:
3181 hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
3182 break;
3185 *target_dco_khz = dco;
3187 state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
3189 state->mg_clktop2_coreclkctl1 =
3190 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
3192 state->mg_clktop2_hsclkctl =
3193 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
3194 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
3195 hsdiv |
3196 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
3198 return true;
3202 return false;
3206 * The specification for this function uses real numbers, so the math had to be
3207 * adapted to integer-only calculation, that's why it looks so different.
3209 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
3210 struct intel_dpll_hw_state *pll_state)
3212 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
3213 int refclk_khz = dev_priv->dpll.ref_clks.nssc;
3214 int clock = crtc_state->port_clock;
3215 u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
3216 u32 iref_ndiv, iref_trim, iref_pulse_w;
3217 u32 prop_coeff, int_coeff;
3218 u32 tdc_targetcnt, feedfwgain;
3219 u64 ssc_stepsize, ssc_steplen, ssc_steplog;
3220 u64 tmp;
3221 bool use_ssc = false;
3222 bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
3223 bool is_dkl = INTEL_GEN(dev_priv) >= 12;
3225 memset(pll_state, 0, sizeof(*pll_state));
3227 if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
3228 pll_state, is_dkl)) {
3229 drm_dbg_kms(&dev_priv->drm,
3230 "Failed to find divisors for clock %d\n", clock);
3231 return false;
3234 m1div = 2;
3235 m2div_int = dco_khz / (refclk_khz * m1div);
3236 if (m2div_int > 255) {
3237 if (!is_dkl) {
3238 m1div = 4;
3239 m2div_int = dco_khz / (refclk_khz * m1div);
3242 if (m2div_int > 255) {
3243 drm_dbg_kms(&dev_priv->drm,
3244 "Failed to find mdiv for clock %d\n",
3245 clock);
3246 return false;
3249 m2div_rem = dco_khz % (refclk_khz * m1div);
3251 tmp = (u64)m2div_rem * (1 << 22);
3252 do_div(tmp, refclk_khz * m1div);
3253 m2div_frac = tmp;
3255 switch (refclk_khz) {
3256 case 19200:
3257 iref_ndiv = 1;
3258 iref_trim = 28;
3259 iref_pulse_w = 1;
3260 break;
3261 case 24000:
3262 iref_ndiv = 1;
3263 iref_trim = 25;
3264 iref_pulse_w = 2;
3265 break;
3266 case 38400:
3267 iref_ndiv = 2;
3268 iref_trim = 28;
3269 iref_pulse_w = 1;
3270 break;
3271 default:
3272 MISSING_CASE(refclk_khz);
3273 return false;
3277 * tdc_res = 0.000003
3278 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
3280 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
3281 * was supposed to be a division, but we rearranged the operations of
3282 * the formula to avoid early divisions so we don't multiply the
3283 * rounding errors.
3285 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
3286 * we also rearrange to work with integers.
3288 * The 0.5 transformed to 5 results in a multiplication by 10 and the
3289 * last division by 10.
3291 tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
3294 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
3295 * 32 bits. That's not a problem since we round the division down
3296 * anyway.
3298 feedfwgain = (use_ssc || m2div_rem > 0) ?
3299 m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
3301 if (dco_khz >= 9000000) {
3302 prop_coeff = 5;
3303 int_coeff = 10;
3304 } else {
3305 prop_coeff = 4;
3306 int_coeff = 8;
3309 if (use_ssc) {
3310 tmp = mul_u32_u32(dco_khz, 47 * 32);
3311 do_div(tmp, refclk_khz * m1div * 10000);
3312 ssc_stepsize = tmp;
3314 tmp = mul_u32_u32(dco_khz, 1000);
3315 ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
3316 } else {
3317 ssc_stepsize = 0;
3318 ssc_steplen = 0;
3320 ssc_steplog = 4;
3322 /* write pll_state calculations */
3323 if (is_dkl) {
3324 pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
3325 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
3326 DKL_PLL_DIV0_FBPREDIV(m1div) |
3327 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
3329 pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
3330 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
3332 pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
3333 DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
3334 DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
3335 (use_ssc ? DKL_PLL_SSC_EN : 0);
3337 pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
3338 DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
3340 pll_state->mg_pll_tdc_coldst_bias =
3341 DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
3342 DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
3344 } else {
3345 pll_state->mg_pll_div0 =
3346 (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
3347 MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
3348 MG_PLL_DIV0_FBDIV_INT(m2div_int);
3350 pll_state->mg_pll_div1 =
3351 MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
3352 MG_PLL_DIV1_DITHER_DIV_2 |
3353 MG_PLL_DIV1_NDIVRATIO(1) |
3354 MG_PLL_DIV1_FBPREDIV(m1div);
3356 pll_state->mg_pll_lf =
3357 MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
3358 MG_PLL_LF_AFCCNTSEL_512 |
3359 MG_PLL_LF_GAINCTRL(1) |
3360 MG_PLL_LF_INT_COEFF(int_coeff) |
3361 MG_PLL_LF_PROP_COEFF(prop_coeff);
3363 pll_state->mg_pll_frac_lock =
3364 MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
3365 MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
3366 MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
3367 MG_PLL_FRAC_LOCK_DCODITHEREN |
3368 MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
3369 if (use_ssc || m2div_rem > 0)
3370 pll_state->mg_pll_frac_lock |=
3371 MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
3373 pll_state->mg_pll_ssc =
3374 (use_ssc ? MG_PLL_SSC_EN : 0) |
3375 MG_PLL_SSC_TYPE(2) |
3376 MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
3377 MG_PLL_SSC_STEPNUM(ssc_steplog) |
3378 MG_PLL_SSC_FLLEN |
3379 MG_PLL_SSC_STEPSIZE(ssc_stepsize);
3381 pll_state->mg_pll_tdc_coldst_bias =
3382 MG_PLL_TDC_COLDST_COLDSTART |
3383 MG_PLL_TDC_COLDST_IREFINT_EN |
3384 MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
3385 MG_PLL_TDC_TDCOVCCORR_EN |
3386 MG_PLL_TDC_TDCSEL(3);
3388 pll_state->mg_pll_bias =
3389 MG_PLL_BIAS_BIAS_GB_SEL(3) |
3390 MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
3391 MG_PLL_BIAS_BIAS_BONUS(10) |
3392 MG_PLL_BIAS_BIASCAL_EN |
3393 MG_PLL_BIAS_CTRIM(12) |
3394 MG_PLL_BIAS_VREF_RDAC(4) |
3395 MG_PLL_BIAS_IREFTRIM(iref_trim);
3397 if (refclk_khz == 38400) {
3398 pll_state->mg_pll_tdc_coldst_bias_mask =
3399 MG_PLL_TDC_COLDST_COLDSTART;
3400 pll_state->mg_pll_bias_mask = 0;
3401 } else {
3402 pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
3403 pll_state->mg_pll_bias_mask = -1U;
3406 pll_state->mg_pll_tdc_coldst_bias &=
3407 pll_state->mg_pll_tdc_coldst_bias_mask;
3408 pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
3411 return true;
3414 static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
3415 const struct intel_shared_dpll *pll,
3416 const struct intel_dpll_hw_state *pll_state)
3418 u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
3419 u64 tmp;
3421 ref_clock = dev_priv->dpll.ref_clks.nssc;
3423 if (INTEL_GEN(dev_priv) >= 12) {
3424 m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
3425 m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
3426 m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
3428 if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
3429 m2_frac = pll_state->mg_pll_bias &
3430 DKL_PLL_BIAS_FBDIV_FRAC_MASK;
3431 m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
3432 } else {
3433 m2_frac = 0;
3435 } else {
3436 m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
3437 m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
3439 if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
3440 m2_frac = pll_state->mg_pll_div0 &
3441 MG_PLL_DIV0_FBDIV_FRAC_MASK;
3442 m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
3443 } else {
3444 m2_frac = 0;
3448 switch (pll_state->mg_clktop2_hsclkctl &
3449 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
3450 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
3451 div1 = 2;
3452 break;
3453 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
3454 div1 = 3;
3455 break;
3456 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
3457 div1 = 5;
3458 break;
3459 case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
3460 div1 = 7;
3461 break;
3462 default:
3463 MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
3464 return 0;
3467 div2 = (pll_state->mg_clktop2_hsclkctl &
3468 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
3469 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
3471 /* div2 value of 0 is same as 1 means no div */
3472 if (div2 == 0)
3473 div2 = 1;
3476 * Adjust the original formula to delay the division by 2^22 in order to
3477 * minimize possible rounding errors.
3479 tmp = (u64)m1 * m2_int * ref_clock +
3480 (((u64)m1 * m2_frac * ref_clock) >> 22);
3481 tmp = div_u64(tmp, 5 * div1 * div2);
3483 return tmp;
3487 * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
3488 * @crtc_state: state for the CRTC to select the DPLL for
3489 * @port_dpll_id: the active @port_dpll_id to select
3491 * Select the given @port_dpll_id instance from the DPLLs reserved for the
3492 * CRTC.
3494 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
3495 enum icl_port_dpll_id port_dpll_id)
3497 struct icl_port_dpll *port_dpll =
3498 &crtc_state->icl_port_dplls[port_dpll_id];
3500 crtc_state->shared_dpll = port_dpll->pll;
3501 crtc_state->dpll_hw_state = port_dpll->hw_state;
3504 static void icl_update_active_dpll(struct intel_atomic_state *state,
3505 struct intel_crtc *crtc,
3506 struct intel_encoder *encoder)
3508 struct intel_crtc_state *crtc_state =
3509 intel_atomic_get_new_crtc_state(state, crtc);
3510 struct intel_digital_port *primary_port;
3511 enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
3513 primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
3514 enc_to_mst(encoder)->primary :
3515 enc_to_dig_port(encoder);
3517 if (primary_port &&
3518 (primary_port->tc_mode == TC_PORT_DP_ALT ||
3519 primary_port->tc_mode == TC_PORT_LEGACY))
3520 port_dpll_id = ICL_PORT_DPLL_MG_PHY;
3522 icl_set_active_port_dpll(crtc_state, port_dpll_id);
3525 static u32 intel_get_hti_plls(struct drm_i915_private *i915)
3527 if (!(i915->hti_state & HDPORT_ENABLED))
3528 return 0;
3530 return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
3533 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
3534 struct intel_crtc *crtc,
3535 struct intel_encoder *encoder)
3537 struct intel_crtc_state *crtc_state =
3538 intel_atomic_get_new_crtc_state(state, crtc);
3539 struct skl_wrpll_params pll_params = { };
3540 struct icl_port_dpll *port_dpll =
3541 &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3542 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3543 enum port port = encoder->port;
3544 unsigned long dpll_mask;
3545 int ret;
3547 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
3548 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
3549 ret = icl_calc_wrpll(crtc_state, &pll_params);
3550 else
3551 ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
3553 if (!ret) {
3554 drm_dbg_kms(&dev_priv->drm,
3555 "Could not calculate combo PHY PLL state.\n");
3557 return false;
3560 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3562 if (IS_DG1(dev_priv)) {
3563 if (port == PORT_D || port == PORT_E) {
3564 dpll_mask =
3565 BIT(DPLL_ID_DG1_DPLL2) |
3566 BIT(DPLL_ID_DG1_DPLL3);
3567 } else {
3568 dpll_mask =
3569 BIT(DPLL_ID_DG1_DPLL0) |
3570 BIT(DPLL_ID_DG1_DPLL1);
3572 } else if (IS_ROCKETLAKE(dev_priv)) {
3573 dpll_mask =
3574 BIT(DPLL_ID_EHL_DPLL4) |
3575 BIT(DPLL_ID_ICL_DPLL1) |
3576 BIT(DPLL_ID_ICL_DPLL0);
3577 } else if (IS_JSL_EHL(dev_priv) && port != PORT_A) {
3578 dpll_mask =
3579 BIT(DPLL_ID_EHL_DPLL4) |
3580 BIT(DPLL_ID_ICL_DPLL1) |
3581 BIT(DPLL_ID_ICL_DPLL0);
3582 } else {
3583 dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3586 /* Eliminate DPLLs from consideration if reserved by HTI */
3587 dpll_mask &= ~intel_get_hti_plls(dev_priv);
3589 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3590 &port_dpll->hw_state,
3591 dpll_mask);
3592 if (!port_dpll->pll) {
3593 drm_dbg_kms(&dev_priv->drm,
3594 "No combo PHY PLL found for [ENCODER:%d:%s]\n",
3595 encoder->base.base.id, encoder->base.name);
3596 return false;
3599 intel_reference_shared_dpll(state, crtc,
3600 port_dpll->pll, &port_dpll->hw_state);
3602 icl_update_active_dpll(state, crtc, encoder);
3604 return true;
3607 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3608 struct intel_crtc *crtc,
3609 struct intel_encoder *encoder)
3611 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3612 struct intel_crtc_state *crtc_state =
3613 intel_atomic_get_new_crtc_state(state, crtc);
3614 struct skl_wrpll_params pll_params = { };
3615 struct icl_port_dpll *port_dpll;
3616 enum intel_dpll_id dpll_id;
3618 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3619 if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
3620 drm_dbg_kms(&dev_priv->drm,
3621 "Could not calculate TBT PLL state.\n");
3622 return false;
3625 icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
3627 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3628 &port_dpll->hw_state,
3629 BIT(DPLL_ID_ICL_TBTPLL));
3630 if (!port_dpll->pll) {
3631 drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
3632 return false;
3634 intel_reference_shared_dpll(state, crtc,
3635 port_dpll->pll, &port_dpll->hw_state);
3638 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3639 if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3640 drm_dbg_kms(&dev_priv->drm,
3641 "Could not calculate MG PHY PLL state.\n");
3642 goto err_unreference_tbt_pll;
3645 dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3646 encoder->port));
3647 port_dpll->pll = intel_find_shared_dpll(state, crtc,
3648 &port_dpll->hw_state,
3649 BIT(dpll_id));
3650 if (!port_dpll->pll) {
3651 drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
3652 goto err_unreference_tbt_pll;
3654 intel_reference_shared_dpll(state, crtc,
3655 port_dpll->pll, &port_dpll->hw_state);
3657 icl_update_active_dpll(state, crtc, encoder);
3659 return true;
3661 err_unreference_tbt_pll:
3662 port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3663 intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3665 return false;
3668 static bool icl_get_dplls(struct intel_atomic_state *state,
3669 struct intel_crtc *crtc,
3670 struct intel_encoder *encoder)
3672 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3673 enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3675 if (intel_phy_is_combo(dev_priv, phy))
3676 return icl_get_combo_phy_dpll(state, crtc, encoder);
3677 else if (intel_phy_is_tc(dev_priv, phy))
3678 return icl_get_tc_phy_dplls(state, crtc, encoder);
3680 MISSING_CASE(phy);
3682 return false;
3685 static void icl_put_dplls(struct intel_atomic_state *state,
3686 struct intel_crtc *crtc)
3688 const struct intel_crtc_state *old_crtc_state =
3689 intel_atomic_get_old_crtc_state(state, crtc);
3690 struct intel_crtc_state *new_crtc_state =
3691 intel_atomic_get_new_crtc_state(state, crtc);
3692 enum icl_port_dpll_id id;
3694 new_crtc_state->shared_dpll = NULL;
3696 for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3697 const struct icl_port_dpll *old_port_dpll =
3698 &old_crtc_state->icl_port_dplls[id];
3699 struct icl_port_dpll *new_port_dpll =
3700 &new_crtc_state->icl_port_dplls[id];
3702 new_port_dpll->pll = NULL;
3704 if (!old_port_dpll->pll)
3705 continue;
3707 intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3711 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3712 struct intel_shared_dpll *pll,
3713 struct intel_dpll_hw_state *hw_state)
3715 const enum intel_dpll_id id = pll->info->id;
3716 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3717 intel_wakeref_t wakeref;
3718 bool ret = false;
3719 u32 val;
3721 wakeref = intel_display_power_get_if_enabled(dev_priv,
3722 POWER_DOMAIN_DISPLAY_CORE);
3723 if (!wakeref)
3724 return false;
3726 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3727 if (!(val & PLL_ENABLE))
3728 goto out;
3730 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3731 MG_REFCLKIN_CTL(tc_port));
3732 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3734 hw_state->mg_clktop2_coreclkctl1 =
3735 intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3736 hw_state->mg_clktop2_coreclkctl1 &=
3737 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3739 hw_state->mg_clktop2_hsclkctl =
3740 intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3741 hw_state->mg_clktop2_hsclkctl &=
3742 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3743 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3744 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3745 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3747 hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
3748 hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
3749 hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
3750 hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
3751 MG_PLL_FRAC_LOCK(tc_port));
3752 hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
3754 hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3755 hw_state->mg_pll_tdc_coldst_bias =
3756 intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3758 if (dev_priv->dpll.ref_clks.nssc == 38400) {
3759 hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3760 hw_state->mg_pll_bias_mask = 0;
3761 } else {
3762 hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3763 hw_state->mg_pll_bias_mask = -1U;
3766 hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3767 hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3769 ret = true;
3770 out:
3771 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3772 return ret;
3775 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3776 struct intel_shared_dpll *pll,
3777 struct intel_dpll_hw_state *hw_state)
3779 const enum intel_dpll_id id = pll->info->id;
3780 enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3781 intel_wakeref_t wakeref;
3782 bool ret = false;
3783 u32 val;
3785 wakeref = intel_display_power_get_if_enabled(dev_priv,
3786 POWER_DOMAIN_DISPLAY_CORE);
3787 if (!wakeref)
3788 return false;
3790 val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
3791 if (!(val & PLL_ENABLE))
3792 goto out;
3795 * All registers read here have the same HIP_INDEX_REG even though
3796 * they are on different building blocks
3798 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
3799 HIP_INDEX_VAL(tc_port, 0x2));
3801 hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
3802 DKL_REFCLKIN_CTL(tc_port));
3803 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3805 hw_state->mg_clktop2_hsclkctl =
3806 intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
3807 hw_state->mg_clktop2_hsclkctl &=
3808 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3809 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3810 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3811 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3813 hw_state->mg_clktop2_coreclkctl1 =
3814 intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
3815 hw_state->mg_clktop2_coreclkctl1 &=
3816 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3818 hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
3819 hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3820 DKL_PLL_DIV0_PROP_COEFF_MASK |
3821 DKL_PLL_DIV0_FBPREDIV_MASK |
3822 DKL_PLL_DIV0_FBDIV_INT_MASK);
3824 hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
3825 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3826 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3828 hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
3829 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3830 DKL_PLL_SSC_STEP_LEN_MASK |
3831 DKL_PLL_SSC_STEP_NUM_MASK |
3832 DKL_PLL_SSC_EN);
3834 hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
3835 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3836 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3838 hw_state->mg_pll_tdc_coldst_bias =
3839 intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
3840 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3841 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3843 ret = true;
3844 out:
3845 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3846 return ret;
3849 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3850 struct intel_shared_dpll *pll,
3851 struct intel_dpll_hw_state *hw_state,
3852 i915_reg_t enable_reg)
3854 const enum intel_dpll_id id = pll->info->id;
3855 intel_wakeref_t wakeref;
3856 bool ret = false;
3857 u32 val;
3859 wakeref = intel_display_power_get_if_enabled(dev_priv,
3860 POWER_DOMAIN_DISPLAY_CORE);
3861 if (!wakeref)
3862 return false;
3864 val = intel_de_read(dev_priv, enable_reg);
3865 if (!(val & PLL_ENABLE))
3866 goto out;
3868 if (IS_DG1(dev_priv)) {
3869 hw_state->cfgcr0 = intel_de_read(dev_priv, DG1_DPLL_CFGCR0(id));
3870 hw_state->cfgcr1 = intel_de_read(dev_priv, DG1_DPLL_CFGCR1(id));
3871 } else if (IS_ROCKETLAKE(dev_priv)) {
3872 hw_state->cfgcr0 = intel_de_read(dev_priv,
3873 RKL_DPLL_CFGCR0(id));
3874 hw_state->cfgcr1 = intel_de_read(dev_priv,
3875 RKL_DPLL_CFGCR1(id));
3876 } else if (INTEL_GEN(dev_priv) >= 12) {
3877 hw_state->cfgcr0 = intel_de_read(dev_priv,
3878 TGL_DPLL_CFGCR0(id));
3879 hw_state->cfgcr1 = intel_de_read(dev_priv,
3880 TGL_DPLL_CFGCR1(id));
3881 } else {
3882 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3883 hw_state->cfgcr0 = intel_de_read(dev_priv,
3884 ICL_DPLL_CFGCR0(4));
3885 hw_state->cfgcr1 = intel_de_read(dev_priv,
3886 ICL_DPLL_CFGCR1(4));
3887 } else {
3888 hw_state->cfgcr0 = intel_de_read(dev_priv,
3889 ICL_DPLL_CFGCR0(id));
3890 hw_state->cfgcr1 = intel_de_read(dev_priv,
3891 ICL_DPLL_CFGCR1(id));
3895 ret = true;
3896 out:
3897 intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3898 return ret;
3901 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3902 struct intel_shared_dpll *pll,
3903 struct intel_dpll_hw_state *hw_state)
3905 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
3907 return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3910 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3911 struct intel_shared_dpll *pll,
3912 struct intel_dpll_hw_state *hw_state)
3914 return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3917 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3918 struct intel_shared_dpll *pll)
3920 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3921 const enum intel_dpll_id id = pll->info->id;
3922 i915_reg_t cfgcr0_reg, cfgcr1_reg;
3924 if (IS_DG1(dev_priv)) {
3925 cfgcr0_reg = DG1_DPLL_CFGCR0(id);
3926 cfgcr1_reg = DG1_DPLL_CFGCR1(id);
3927 } else if (IS_ROCKETLAKE(dev_priv)) {
3928 cfgcr0_reg = RKL_DPLL_CFGCR0(id);
3929 cfgcr1_reg = RKL_DPLL_CFGCR1(id);
3930 } else if (INTEL_GEN(dev_priv) >= 12) {
3931 cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3932 cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3933 } else {
3934 if (IS_JSL_EHL(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3935 cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3936 cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3937 } else {
3938 cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3939 cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3943 intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
3944 intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
3945 intel_de_posting_read(dev_priv, cfgcr1_reg);
3948 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3949 struct intel_shared_dpll *pll)
3951 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3952 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3953 u32 val;
3956 * Some of the following registers have reserved fields, so program
3957 * these with RMW based on a mask. The mask can be fixed or generated
3958 * during the calc/readout phase if the mask depends on some other HW
3959 * state like refclk, see icl_calc_mg_pll_state().
3961 val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
3962 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3963 val |= hw_state->mg_refclkin_ctl;
3964 intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
3966 val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
3967 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3968 val |= hw_state->mg_clktop2_coreclkctl1;
3969 intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3971 val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
3972 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3973 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3974 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3975 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3976 val |= hw_state->mg_clktop2_hsclkctl;
3977 intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
3979 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3980 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3981 intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3982 intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
3983 hw_state->mg_pll_frac_lock);
3984 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3986 val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
3987 val &= ~hw_state->mg_pll_bias_mask;
3988 val |= hw_state->mg_pll_bias;
3989 intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
3991 val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3992 val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3993 val |= hw_state->mg_pll_tdc_coldst_bias;
3994 intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3996 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
3999 static void dkl_pll_write(struct drm_i915_private *dev_priv,
4000 struct intel_shared_dpll *pll)
4002 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
4003 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
4004 u32 val;
4007 * All registers programmed here have the same HIP_INDEX_REG even
4008 * though on different building block
4010 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
4011 HIP_INDEX_VAL(tc_port, 0x2));
4013 /* All the registers are RMW */
4014 val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
4015 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
4016 val |= hw_state->mg_refclkin_ctl;
4017 intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
4019 val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
4020 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
4021 val |= hw_state->mg_clktop2_coreclkctl1;
4022 intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
4024 val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
4025 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
4026 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
4027 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
4028 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
4029 val |= hw_state->mg_clktop2_hsclkctl;
4030 intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
4032 val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
4033 val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
4034 DKL_PLL_DIV0_PROP_COEFF_MASK |
4035 DKL_PLL_DIV0_FBPREDIV_MASK |
4036 DKL_PLL_DIV0_FBDIV_INT_MASK);
4037 val |= hw_state->mg_pll_div0;
4038 intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
4040 val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
4041 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
4042 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
4043 val |= hw_state->mg_pll_div1;
4044 intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
4046 val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
4047 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
4048 DKL_PLL_SSC_STEP_LEN_MASK |
4049 DKL_PLL_SSC_STEP_NUM_MASK |
4050 DKL_PLL_SSC_EN);
4051 val |= hw_state->mg_pll_ssc;
4052 intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
4054 val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
4055 val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
4056 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
4057 val |= hw_state->mg_pll_bias;
4058 intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
4060 val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4061 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
4062 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
4063 val |= hw_state->mg_pll_tdc_coldst_bias;
4064 intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
4066 intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
4069 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
4070 struct intel_shared_dpll *pll,
4071 i915_reg_t enable_reg)
4073 u32 val;
4075 val = intel_de_read(dev_priv, enable_reg);
4076 val |= PLL_POWER_ENABLE;
4077 intel_de_write(dev_priv, enable_reg, val);
4080 * The spec says we need to "wait" but it also says it should be
4081 * immediate.
4083 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4084 drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
4085 pll->info->id);
4088 static void icl_pll_enable(struct drm_i915_private *dev_priv,
4089 struct intel_shared_dpll *pll,
4090 i915_reg_t enable_reg)
4092 u32 val;
4094 val = intel_de_read(dev_priv, enable_reg);
4095 val |= PLL_ENABLE;
4096 intel_de_write(dev_priv, enable_reg, val);
4098 /* Timeout is actually 600us. */
4099 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
4100 drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
4103 static void combo_pll_enable(struct drm_i915_private *dev_priv,
4104 struct intel_shared_dpll *pll)
4106 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4108 if (IS_JSL_EHL(dev_priv) &&
4109 pll->info->id == DPLL_ID_EHL_DPLL4) {
4112 * We need to disable DC states when this DPLL is enabled.
4113 * This can be done by taking a reference on DPLL4 power
4114 * domain.
4116 pll->wakeref = intel_display_power_get(dev_priv,
4117 POWER_DOMAIN_DPLL_DC_OFF);
4120 icl_pll_power_enable(dev_priv, pll, enable_reg);
4122 icl_dpll_write(dev_priv, pll);
4125 * DVFS pre sequence would be here, but in our driver the cdclk code
4126 * paths should already be setting the appropriate voltage, hence we do
4127 * nothing here.
4130 icl_pll_enable(dev_priv, pll, enable_reg);
4132 /* DVFS post sequence would be here. See the comment above. */
4135 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
4136 struct intel_shared_dpll *pll)
4138 icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
4140 icl_dpll_write(dev_priv, pll);
4143 * DVFS pre sequence would be here, but in our driver the cdclk code
4144 * paths should already be setting the appropriate voltage, hence we do
4145 * nothing here.
4148 icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
4150 /* DVFS post sequence would be here. See the comment above. */
4153 static void mg_pll_enable(struct drm_i915_private *dev_priv,
4154 struct intel_shared_dpll *pll)
4156 i915_reg_t enable_reg =
4157 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4159 icl_pll_power_enable(dev_priv, pll, enable_reg);
4161 if (INTEL_GEN(dev_priv) >= 12)
4162 dkl_pll_write(dev_priv, pll);
4163 else
4164 icl_mg_pll_write(dev_priv, pll);
4167 * DVFS pre sequence would be here, but in our driver the cdclk code
4168 * paths should already be setting the appropriate voltage, hence we do
4169 * nothing here.
4172 icl_pll_enable(dev_priv, pll, enable_reg);
4174 /* DVFS post sequence would be here. See the comment above. */
4177 static void icl_pll_disable(struct drm_i915_private *dev_priv,
4178 struct intel_shared_dpll *pll,
4179 i915_reg_t enable_reg)
4181 u32 val;
4183 /* The first steps are done by intel_ddi_post_disable(). */
4186 * DVFS pre sequence would be here, but in our driver the cdclk code
4187 * paths should already be setting the appropriate voltage, hence we do
4188 * nothign here.
4191 val = intel_de_read(dev_priv, enable_reg);
4192 val &= ~PLL_ENABLE;
4193 intel_de_write(dev_priv, enable_reg, val);
4195 /* Timeout is actually 1us. */
4196 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
4197 drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
4199 /* DVFS post sequence would be here. See the comment above. */
4201 val = intel_de_read(dev_priv, enable_reg);
4202 val &= ~PLL_POWER_ENABLE;
4203 intel_de_write(dev_priv, enable_reg, val);
4206 * The spec says we need to "wait" but it also says it should be
4207 * immediate.
4209 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
4210 drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
4211 pll->info->id);
4214 static void combo_pll_disable(struct drm_i915_private *dev_priv,
4215 struct intel_shared_dpll *pll)
4217 i915_reg_t enable_reg = intel_combo_pll_enable_reg(dev_priv, pll);
4219 icl_pll_disable(dev_priv, pll, enable_reg);
4221 if (IS_JSL_EHL(dev_priv) &&
4222 pll->info->id == DPLL_ID_EHL_DPLL4)
4223 intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
4224 pll->wakeref);
4227 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
4228 struct intel_shared_dpll *pll)
4230 icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
4233 static void mg_pll_disable(struct drm_i915_private *dev_priv,
4234 struct intel_shared_dpll *pll)
4236 i915_reg_t enable_reg =
4237 MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
4239 icl_pll_disable(dev_priv, pll, enable_reg);
4242 static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
4244 /* No SSC ref */
4245 i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
4248 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
4249 const struct intel_dpll_hw_state *hw_state)
4251 drm_dbg_kms(&dev_priv->drm,
4252 "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
4253 "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
4254 "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
4255 "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
4256 "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
4257 "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
4258 hw_state->cfgcr0, hw_state->cfgcr1,
4259 hw_state->mg_refclkin_ctl,
4260 hw_state->mg_clktop2_coreclkctl1,
4261 hw_state->mg_clktop2_hsclkctl,
4262 hw_state->mg_pll_div0,
4263 hw_state->mg_pll_div1,
4264 hw_state->mg_pll_lf,
4265 hw_state->mg_pll_frac_lock,
4266 hw_state->mg_pll_ssc,
4267 hw_state->mg_pll_bias,
4268 hw_state->mg_pll_tdc_coldst_bias);
4271 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
4272 .enable = combo_pll_enable,
4273 .disable = combo_pll_disable,
4274 .get_hw_state = combo_pll_get_hw_state,
4275 .get_freq = icl_ddi_combo_pll_get_freq,
4278 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
4279 .enable = tbt_pll_enable,
4280 .disable = tbt_pll_disable,
4281 .get_hw_state = tbt_pll_get_hw_state,
4282 .get_freq = icl_ddi_tbt_pll_get_freq,
4285 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
4286 .enable = mg_pll_enable,
4287 .disable = mg_pll_disable,
4288 .get_hw_state = mg_pll_get_hw_state,
4289 .get_freq = icl_ddi_mg_pll_get_freq,
4292 static const struct dpll_info icl_plls[] = {
4293 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4294 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4295 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4296 { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4297 { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4298 { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4299 { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4300 { },
4303 static const struct intel_dpll_mgr icl_pll_mgr = {
4304 .dpll_info = icl_plls,
4305 .get_dplls = icl_get_dplls,
4306 .put_dplls = icl_put_dplls,
4307 .update_active_dpll = icl_update_active_dpll,
4308 .update_ref_clks = icl_update_dpll_ref_clks,
4309 .dump_hw_state = icl_dump_hw_state,
4312 static const struct dpll_info ehl_plls[] = {
4313 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4314 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4315 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4316 { },
4319 static const struct intel_dpll_mgr ehl_pll_mgr = {
4320 .dpll_info = ehl_plls,
4321 .get_dplls = icl_get_dplls,
4322 .put_dplls = icl_put_dplls,
4323 .update_ref_clks = icl_update_dpll_ref_clks,
4324 .dump_hw_state = icl_dump_hw_state,
4327 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
4328 .enable = mg_pll_enable,
4329 .disable = mg_pll_disable,
4330 .get_hw_state = dkl_pll_get_hw_state,
4331 .get_freq = icl_ddi_mg_pll_get_freq,
4334 static const struct dpll_info tgl_plls[] = {
4335 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4336 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4337 { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
4338 { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
4339 { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
4340 { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
4341 { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
4342 { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
4343 { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
4344 { },
4347 static const struct intel_dpll_mgr tgl_pll_mgr = {
4348 .dpll_info = tgl_plls,
4349 .get_dplls = icl_get_dplls,
4350 .put_dplls = icl_put_dplls,
4351 .update_active_dpll = icl_update_active_dpll,
4352 .update_ref_clks = icl_update_dpll_ref_clks,
4353 .dump_hw_state = icl_dump_hw_state,
4356 static const struct dpll_info rkl_plls[] = {
4357 { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
4358 { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
4359 { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
4360 { },
4363 static const struct intel_dpll_mgr rkl_pll_mgr = {
4364 .dpll_info = rkl_plls,
4365 .get_dplls = icl_get_dplls,
4366 .put_dplls = icl_put_dplls,
4367 .update_ref_clks = icl_update_dpll_ref_clks,
4368 .dump_hw_state = icl_dump_hw_state,
4371 static const struct dpll_info dg1_plls[] = {
4372 { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
4373 { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
4374 { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
4375 { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
4376 { },
4379 static const struct intel_dpll_mgr dg1_pll_mgr = {
4380 .dpll_info = dg1_plls,
4381 .get_dplls = icl_get_dplls,
4382 .put_dplls = icl_put_dplls,
4383 .update_ref_clks = icl_update_dpll_ref_clks,
4384 .dump_hw_state = icl_dump_hw_state,
4388 * intel_shared_dpll_init - Initialize shared DPLLs
4389 * @dev: drm device
4391 * Initialize shared DPLLs for @dev.
4393 void intel_shared_dpll_init(struct drm_device *dev)
4395 struct drm_i915_private *dev_priv = to_i915(dev);
4396 const struct intel_dpll_mgr *dpll_mgr = NULL;
4397 const struct dpll_info *dpll_info;
4398 int i;
4400 if (IS_DG1(dev_priv))
4401 dpll_mgr = &dg1_pll_mgr;
4402 else if (IS_ROCKETLAKE(dev_priv))
4403 dpll_mgr = &rkl_pll_mgr;
4404 else if (INTEL_GEN(dev_priv) >= 12)
4405 dpll_mgr = &tgl_pll_mgr;
4406 else if (IS_JSL_EHL(dev_priv))
4407 dpll_mgr = &ehl_pll_mgr;
4408 else if (INTEL_GEN(dev_priv) >= 11)
4409 dpll_mgr = &icl_pll_mgr;
4410 else if (IS_CANNONLAKE(dev_priv))
4411 dpll_mgr = &cnl_pll_mgr;
4412 else if (IS_GEN9_BC(dev_priv))
4413 dpll_mgr = &skl_pll_mgr;
4414 else if (IS_GEN9_LP(dev_priv))
4415 dpll_mgr = &bxt_pll_mgr;
4416 else if (HAS_DDI(dev_priv))
4417 dpll_mgr = &hsw_pll_mgr;
4418 else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
4419 dpll_mgr = &pch_pll_mgr;
4421 if (!dpll_mgr) {
4422 dev_priv->dpll.num_shared_dpll = 0;
4423 return;
4426 dpll_info = dpll_mgr->dpll_info;
4428 for (i = 0; dpll_info[i].name; i++) {
4429 drm_WARN_ON(dev, i != dpll_info[i].id);
4430 dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
4433 dev_priv->dpll.mgr = dpll_mgr;
4434 dev_priv->dpll.num_shared_dpll = i;
4435 mutex_init(&dev_priv->dpll.lock);
4437 BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
4441 * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
4442 * @state: atomic state
4443 * @crtc: CRTC to reserve DPLLs for
4444 * @encoder: encoder
4446 * This function reserves all required DPLLs for the given CRTC and encoder
4447 * combination in the current atomic commit @state and the new @crtc atomic
4448 * state.
4450 * The new configuration in the atomic commit @state is made effective by
4451 * calling intel_shared_dpll_swap_state().
4453 * The reserved DPLLs should be released by calling
4454 * intel_release_shared_dplls().
4456 * Returns:
4457 * True if all required DPLLs were successfully reserved.
4459 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
4460 struct intel_crtc *crtc,
4461 struct intel_encoder *encoder)
4463 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4464 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4466 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4467 return false;
4469 return dpll_mgr->get_dplls(state, crtc, encoder);
4473 * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
4474 * @state: atomic state
4475 * @crtc: crtc from which the DPLLs are to be released
4477 * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
4478 * from the current atomic commit @state and the old @crtc atomic state.
4480 * The new configuration in the atomic commit @state is made effective by
4481 * calling intel_shared_dpll_swap_state().
4483 void intel_release_shared_dplls(struct intel_atomic_state *state,
4484 struct intel_crtc *crtc)
4486 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
4487 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4490 * FIXME: this function is called for every platform having a
4491 * compute_clock hook, even though the platform doesn't yet support
4492 * the shared DPLL framework and intel_reserve_shared_dplls() is not
4493 * called on those.
4495 if (!dpll_mgr)
4496 return;
4498 dpll_mgr->put_dplls(state, crtc);
4502 * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
4503 * @state: atomic state
4504 * @crtc: the CRTC for which to update the active DPLL
4505 * @encoder: encoder determining the type of port DPLL
4507 * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
4508 * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
4509 * DPLL selected will be based on the current mode of the encoder's port.
4511 void intel_update_active_dpll(struct intel_atomic_state *state,
4512 struct intel_crtc *crtc,
4513 struct intel_encoder *encoder)
4515 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4516 const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
4518 if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
4519 return;
4521 dpll_mgr->update_active_dpll(state, crtc, encoder);
4525 * intel_dpll_get_freq - calculate the DPLL's output frequency
4526 * @i915: i915 device
4527 * @pll: DPLL for which to calculate the output frequency
4528 * @pll_state: DPLL state from which to calculate the output frequency
4530 * Return the output frequency corresponding to @pll's passed in @pll_state.
4532 int intel_dpll_get_freq(struct drm_i915_private *i915,
4533 const struct intel_shared_dpll *pll,
4534 const struct intel_dpll_hw_state *pll_state)
4536 if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
4537 return 0;
4539 return pll->info->funcs->get_freq(i915, pll, pll_state);
4543 * intel_dpll_get_hw_state - readout the DPLL's hardware state
4544 * @i915: i915 device
4545 * @pll: DPLL for which to calculate the output frequency
4546 * @hw_state: DPLL's hardware state
4548 * Read out @pll's hardware state into @hw_state.
4550 bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
4551 struct intel_shared_dpll *pll,
4552 struct intel_dpll_hw_state *hw_state)
4554 return pll->info->funcs->get_hw_state(i915, pll, hw_state);
4557 static void readout_dpll_hw_state(struct drm_i915_private *i915,
4558 struct intel_shared_dpll *pll)
4560 struct intel_crtc *crtc;
4562 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
4564 if (IS_JSL_EHL(i915) && pll->on &&
4565 pll->info->id == DPLL_ID_EHL_DPLL4) {
4566 pll->wakeref = intel_display_power_get(i915,
4567 POWER_DOMAIN_DPLL_DC_OFF);
4570 pll->state.crtc_mask = 0;
4571 for_each_intel_crtc(&i915->drm, crtc) {
4572 struct intel_crtc_state *crtc_state =
4573 to_intel_crtc_state(crtc->base.state);
4575 if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
4576 pll->state.crtc_mask |= 1 << crtc->pipe;
4578 pll->active_mask = pll->state.crtc_mask;
4580 drm_dbg_kms(&i915->drm,
4581 "%s hw state readout: crtc_mask 0x%08x, on %i\n",
4582 pll->info->name, pll->state.crtc_mask, pll->on);
4585 void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
4587 int i;
4589 if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
4590 i915->dpll.mgr->update_ref_clks(i915);
4592 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4593 readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
4596 static void sanitize_dpll_state(struct drm_i915_private *i915,
4597 struct intel_shared_dpll *pll)
4599 if (!pll->on || pll->active_mask)
4600 return;
4602 drm_dbg_kms(&i915->drm,
4603 "%s enabled but not in use, disabling\n",
4604 pll->info->name);
4606 pll->info->funcs->disable(i915, pll);
4607 pll->on = false;
4610 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
4612 int i;
4614 for (i = 0; i < i915->dpll.num_shared_dpll; i++)
4615 sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
4619 * intel_dpll_dump_hw_state - write hw_state to dmesg
4620 * @dev_priv: i915 drm device
4621 * @hw_state: hw state to be written to the log
4623 * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
4625 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
4626 const struct intel_dpll_hw_state *hw_state)
4628 if (dev_priv->dpll.mgr) {
4629 dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
4630 } else {
4631 /* fallback for platforms that don't use the shared dpll
4632 * infrastructure
4634 drm_dbg_kms(&dev_priv->drm,
4635 "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
4636 "fp0: 0x%x, fp1: 0x%x\n",
4637 hw_state->dpll,
4638 hw_state->dpll_md,
4639 hw_state->fp0,
4640 hw_state->fp1);