2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
39 #include "i915_trace.h"
40 #include "drm_dp_helper.h"
41 #include "drm_crtc_helper.h"
42 #include <linux/dma_remapping.h>
44 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
46 bool intel_pipe_has_type(struct drm_crtc
*crtc
, int type
);
47 static void intel_increase_pllclock(struct drm_crtc
*crtc
);
48 static void intel_crtc_update_cursor(struct drm_crtc
*crtc
, bool on
);
71 #define INTEL_P2_NUM 2
72 typedef struct intel_limit intel_limit_t
;
74 intel_range_t dot
, vco
, n
, m
, m1
, m2
, p
, p1
;
76 bool (* find_pll
)(const intel_limit_t
*, struct drm_crtc
*,
77 int, int, intel_clock_t
*, intel_clock_t
*);
81 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
84 intel_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
85 int target
, int refclk
, intel_clock_t
*match_clock
,
86 intel_clock_t
*best_clock
);
88 intel_g4x_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
89 int target
, int refclk
, intel_clock_t
*match_clock
,
90 intel_clock_t
*best_clock
);
93 intel_find_pll_g4x_dp(const intel_limit_t
*, struct drm_crtc
*crtc
,
94 int target
, int refclk
, intel_clock_t
*match_clock
,
95 intel_clock_t
*best_clock
);
97 intel_find_pll_ironlake_dp(const intel_limit_t
*, struct drm_crtc
*crtc
,
98 int target
, int refclk
, intel_clock_t
*match_clock
,
99 intel_clock_t
*best_clock
);
102 intel_vlv_find_best_pll(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
103 int target
, int refclk
, intel_clock_t
*match_clock
,
104 intel_clock_t
*best_clock
);
106 static inline u32
/* units of 100MHz */
107 intel_fdi_link_freq(struct drm_device
*dev
)
110 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
111 return (I915_READ(FDI_PLL_BIOS_0
) & FDI_PLL_FB_CLOCK_MASK
) + 2;
116 static const intel_limit_t intel_limits_i8xx_dvo
= {
117 .dot
= { .min
= 25000, .max
= 350000 },
118 .vco
= { .min
= 930000, .max
= 1400000 },
119 .n
= { .min
= 3, .max
= 16 },
120 .m
= { .min
= 96, .max
= 140 },
121 .m1
= { .min
= 18, .max
= 26 },
122 .m2
= { .min
= 6, .max
= 16 },
123 .p
= { .min
= 4, .max
= 128 },
124 .p1
= { .min
= 2, .max
= 33 },
125 .p2
= { .dot_limit
= 165000,
126 .p2_slow
= 4, .p2_fast
= 2 },
127 .find_pll
= intel_find_best_PLL
,
130 static const intel_limit_t intel_limits_i8xx_lvds
= {
131 .dot
= { .min
= 25000, .max
= 350000 },
132 .vco
= { .min
= 930000, .max
= 1400000 },
133 .n
= { .min
= 3, .max
= 16 },
134 .m
= { .min
= 96, .max
= 140 },
135 .m1
= { .min
= 18, .max
= 26 },
136 .m2
= { .min
= 6, .max
= 16 },
137 .p
= { .min
= 4, .max
= 128 },
138 .p1
= { .min
= 1, .max
= 6 },
139 .p2
= { .dot_limit
= 165000,
140 .p2_slow
= 14, .p2_fast
= 7 },
141 .find_pll
= intel_find_best_PLL
,
144 static const intel_limit_t intel_limits_i9xx_sdvo
= {
145 .dot
= { .min
= 20000, .max
= 400000 },
146 .vco
= { .min
= 1400000, .max
= 2800000 },
147 .n
= { .min
= 1, .max
= 6 },
148 .m
= { .min
= 70, .max
= 120 },
149 .m1
= { .min
= 10, .max
= 22 },
150 .m2
= { .min
= 5, .max
= 9 },
151 .p
= { .min
= 5, .max
= 80 },
152 .p1
= { .min
= 1, .max
= 8 },
153 .p2
= { .dot_limit
= 200000,
154 .p2_slow
= 10, .p2_fast
= 5 },
155 .find_pll
= intel_find_best_PLL
,
158 static const intel_limit_t intel_limits_i9xx_lvds
= {
159 .dot
= { .min
= 20000, .max
= 400000 },
160 .vco
= { .min
= 1400000, .max
= 2800000 },
161 .n
= { .min
= 1, .max
= 6 },
162 .m
= { .min
= 70, .max
= 120 },
163 .m1
= { .min
= 10, .max
= 22 },
164 .m2
= { .min
= 5, .max
= 9 },
165 .p
= { .min
= 7, .max
= 98 },
166 .p1
= { .min
= 1, .max
= 8 },
167 .p2
= { .dot_limit
= 112000,
168 .p2_slow
= 14, .p2_fast
= 7 },
169 .find_pll
= intel_find_best_PLL
,
173 static const intel_limit_t intel_limits_g4x_sdvo
= {
174 .dot
= { .min
= 25000, .max
= 270000 },
175 .vco
= { .min
= 1750000, .max
= 3500000},
176 .n
= { .min
= 1, .max
= 4 },
177 .m
= { .min
= 104, .max
= 138 },
178 .m1
= { .min
= 17, .max
= 23 },
179 .m2
= { .min
= 5, .max
= 11 },
180 .p
= { .min
= 10, .max
= 30 },
181 .p1
= { .min
= 1, .max
= 3},
182 .p2
= { .dot_limit
= 270000,
186 .find_pll
= intel_g4x_find_best_PLL
,
189 static const intel_limit_t intel_limits_g4x_hdmi
= {
190 .dot
= { .min
= 22000, .max
= 400000 },
191 .vco
= { .min
= 1750000, .max
= 3500000},
192 .n
= { .min
= 1, .max
= 4 },
193 .m
= { .min
= 104, .max
= 138 },
194 .m1
= { .min
= 16, .max
= 23 },
195 .m2
= { .min
= 5, .max
= 11 },
196 .p
= { .min
= 5, .max
= 80 },
197 .p1
= { .min
= 1, .max
= 8},
198 .p2
= { .dot_limit
= 165000,
199 .p2_slow
= 10, .p2_fast
= 5 },
200 .find_pll
= intel_g4x_find_best_PLL
,
203 static const intel_limit_t intel_limits_g4x_single_channel_lvds
= {
204 .dot
= { .min
= 20000, .max
= 115000 },
205 .vco
= { .min
= 1750000, .max
= 3500000 },
206 .n
= { .min
= 1, .max
= 3 },
207 .m
= { .min
= 104, .max
= 138 },
208 .m1
= { .min
= 17, .max
= 23 },
209 .m2
= { .min
= 5, .max
= 11 },
210 .p
= { .min
= 28, .max
= 112 },
211 .p1
= { .min
= 2, .max
= 8 },
212 .p2
= { .dot_limit
= 0,
213 .p2_slow
= 14, .p2_fast
= 14
215 .find_pll
= intel_g4x_find_best_PLL
,
218 static const intel_limit_t intel_limits_g4x_dual_channel_lvds
= {
219 .dot
= { .min
= 80000, .max
= 224000 },
220 .vco
= { .min
= 1750000, .max
= 3500000 },
221 .n
= { .min
= 1, .max
= 3 },
222 .m
= { .min
= 104, .max
= 138 },
223 .m1
= { .min
= 17, .max
= 23 },
224 .m2
= { .min
= 5, .max
= 11 },
225 .p
= { .min
= 14, .max
= 42 },
226 .p1
= { .min
= 2, .max
= 6 },
227 .p2
= { .dot_limit
= 0,
228 .p2_slow
= 7, .p2_fast
= 7
230 .find_pll
= intel_g4x_find_best_PLL
,
233 static const intel_limit_t intel_limits_g4x_display_port
= {
234 .dot
= { .min
= 161670, .max
= 227000 },
235 .vco
= { .min
= 1750000, .max
= 3500000},
236 .n
= { .min
= 1, .max
= 2 },
237 .m
= { .min
= 97, .max
= 108 },
238 .m1
= { .min
= 0x10, .max
= 0x12 },
239 .m2
= { .min
= 0x05, .max
= 0x06 },
240 .p
= { .min
= 10, .max
= 20 },
241 .p1
= { .min
= 1, .max
= 2},
242 .p2
= { .dot_limit
= 0,
243 .p2_slow
= 10, .p2_fast
= 10 },
244 .find_pll
= intel_find_pll_g4x_dp
,
247 static const intel_limit_t intel_limits_pineview_sdvo
= {
248 .dot
= { .min
= 20000, .max
= 400000},
249 .vco
= { .min
= 1700000, .max
= 3500000 },
250 /* Pineview's Ncounter is a ring counter */
251 .n
= { .min
= 3, .max
= 6 },
252 .m
= { .min
= 2, .max
= 256 },
253 /* Pineview only has one combined m divider, which we treat as m2. */
254 .m1
= { .min
= 0, .max
= 0 },
255 .m2
= { .min
= 0, .max
= 254 },
256 .p
= { .min
= 5, .max
= 80 },
257 .p1
= { .min
= 1, .max
= 8 },
258 .p2
= { .dot_limit
= 200000,
259 .p2_slow
= 10, .p2_fast
= 5 },
260 .find_pll
= intel_find_best_PLL
,
263 static const intel_limit_t intel_limits_pineview_lvds
= {
264 .dot
= { .min
= 20000, .max
= 400000 },
265 .vco
= { .min
= 1700000, .max
= 3500000 },
266 .n
= { .min
= 3, .max
= 6 },
267 .m
= { .min
= 2, .max
= 256 },
268 .m1
= { .min
= 0, .max
= 0 },
269 .m2
= { .min
= 0, .max
= 254 },
270 .p
= { .min
= 7, .max
= 112 },
271 .p1
= { .min
= 1, .max
= 8 },
272 .p2
= { .dot_limit
= 112000,
273 .p2_slow
= 14, .p2_fast
= 14 },
274 .find_pll
= intel_find_best_PLL
,
277 /* Ironlake / Sandybridge
279 * We calculate clock using (register_value + 2) for N/M1/M2, so here
280 * the range value for them is (actual_value - 2).
282 static const intel_limit_t intel_limits_ironlake_dac
= {
283 .dot
= { .min
= 25000, .max
= 350000 },
284 .vco
= { .min
= 1760000, .max
= 3510000 },
285 .n
= { .min
= 1, .max
= 5 },
286 .m
= { .min
= 79, .max
= 127 },
287 .m1
= { .min
= 12, .max
= 22 },
288 .m2
= { .min
= 5, .max
= 9 },
289 .p
= { .min
= 5, .max
= 80 },
290 .p1
= { .min
= 1, .max
= 8 },
291 .p2
= { .dot_limit
= 225000,
292 .p2_slow
= 10, .p2_fast
= 5 },
293 .find_pll
= intel_g4x_find_best_PLL
,
296 static const intel_limit_t intel_limits_ironlake_single_lvds
= {
297 .dot
= { .min
= 25000, .max
= 350000 },
298 .vco
= { .min
= 1760000, .max
= 3510000 },
299 .n
= { .min
= 1, .max
= 3 },
300 .m
= { .min
= 79, .max
= 118 },
301 .m1
= { .min
= 12, .max
= 22 },
302 .m2
= { .min
= 5, .max
= 9 },
303 .p
= { .min
= 28, .max
= 112 },
304 .p1
= { .min
= 2, .max
= 8 },
305 .p2
= { .dot_limit
= 225000,
306 .p2_slow
= 14, .p2_fast
= 14 },
307 .find_pll
= intel_g4x_find_best_PLL
,
310 static const intel_limit_t intel_limits_ironlake_dual_lvds
= {
311 .dot
= { .min
= 25000, .max
= 350000 },
312 .vco
= { .min
= 1760000, .max
= 3510000 },
313 .n
= { .min
= 1, .max
= 3 },
314 .m
= { .min
= 79, .max
= 127 },
315 .m1
= { .min
= 12, .max
= 22 },
316 .m2
= { .min
= 5, .max
= 9 },
317 .p
= { .min
= 14, .max
= 56 },
318 .p1
= { .min
= 2, .max
= 8 },
319 .p2
= { .dot_limit
= 225000,
320 .p2_slow
= 7, .p2_fast
= 7 },
321 .find_pll
= intel_g4x_find_best_PLL
,
324 /* LVDS 100mhz refclk limits. */
325 static const intel_limit_t intel_limits_ironlake_single_lvds_100m
= {
326 .dot
= { .min
= 25000, .max
= 350000 },
327 .vco
= { .min
= 1760000, .max
= 3510000 },
328 .n
= { .min
= 1, .max
= 2 },
329 .m
= { .min
= 79, .max
= 126 },
330 .m1
= { .min
= 12, .max
= 22 },
331 .m2
= { .min
= 5, .max
= 9 },
332 .p
= { .min
= 28, .max
= 112 },
333 .p1
= { .min
= 2, .max
= 8 },
334 .p2
= { .dot_limit
= 225000,
335 .p2_slow
= 14, .p2_fast
= 14 },
336 .find_pll
= intel_g4x_find_best_PLL
,
339 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m
= {
340 .dot
= { .min
= 25000, .max
= 350000 },
341 .vco
= { .min
= 1760000, .max
= 3510000 },
342 .n
= { .min
= 1, .max
= 3 },
343 .m
= { .min
= 79, .max
= 126 },
344 .m1
= { .min
= 12, .max
= 22 },
345 .m2
= { .min
= 5, .max
= 9 },
346 .p
= { .min
= 14, .max
= 42 },
347 .p1
= { .min
= 2, .max
= 6 },
348 .p2
= { .dot_limit
= 225000,
349 .p2_slow
= 7, .p2_fast
= 7 },
350 .find_pll
= intel_g4x_find_best_PLL
,
353 static const intel_limit_t intel_limits_ironlake_display_port
= {
354 .dot
= { .min
= 25000, .max
= 350000 },
355 .vco
= { .min
= 1760000, .max
= 3510000},
356 .n
= { .min
= 1, .max
= 2 },
357 .m
= { .min
= 81, .max
= 90 },
358 .m1
= { .min
= 12, .max
= 22 },
359 .m2
= { .min
= 5, .max
= 9 },
360 .p
= { .min
= 10, .max
= 20 },
361 .p1
= { .min
= 1, .max
= 2},
362 .p2
= { .dot_limit
= 0,
363 .p2_slow
= 10, .p2_fast
= 10 },
364 .find_pll
= intel_find_pll_ironlake_dp
,
367 static const intel_limit_t intel_limits_vlv_dac
= {
368 .dot
= { .min
= 25000, .max
= 270000 },
369 .vco
= { .min
= 4000000, .max
= 6000000 },
370 .n
= { .min
= 1, .max
= 7 },
371 .m
= { .min
= 22, .max
= 450 }, /* guess */
372 .m1
= { .min
= 2, .max
= 3 },
373 .m2
= { .min
= 11, .max
= 156 },
374 .p
= { .min
= 10, .max
= 30 },
375 .p1
= { .min
= 2, .max
= 3 },
376 .p2
= { .dot_limit
= 270000,
377 .p2_slow
= 2, .p2_fast
= 20 },
378 .find_pll
= intel_vlv_find_best_pll
,
381 static const intel_limit_t intel_limits_vlv_hdmi
= {
382 .dot
= { .min
= 20000, .max
= 165000 },
383 .vco
= { .min
= 5994000, .max
= 4000000 },
384 .n
= { .min
= 1, .max
= 7 },
385 .m
= { .min
= 60, .max
= 300 }, /* guess */
386 .m1
= { .min
= 2, .max
= 3 },
387 .m2
= { .min
= 11, .max
= 156 },
388 .p
= { .min
= 10, .max
= 30 },
389 .p1
= { .min
= 2, .max
= 3 },
390 .p2
= { .dot_limit
= 270000,
391 .p2_slow
= 2, .p2_fast
= 20 },
392 .find_pll
= intel_vlv_find_best_pll
,
395 static const intel_limit_t intel_limits_vlv_dp
= {
396 .dot
= { .min
= 162000, .max
= 270000 },
397 .vco
= { .min
= 5994000, .max
= 4000000 },
398 .n
= { .min
= 1, .max
= 7 },
399 .m
= { .min
= 60, .max
= 300 }, /* guess */
400 .m1
= { .min
= 2, .max
= 3 },
401 .m2
= { .min
= 11, .max
= 156 },
402 .p
= { .min
= 10, .max
= 30 },
403 .p1
= { .min
= 2, .max
= 3 },
404 .p2
= { .dot_limit
= 270000,
405 .p2_slow
= 2, .p2_fast
= 20 },
406 .find_pll
= intel_vlv_find_best_pll
,
409 u32
intel_dpio_read(struct drm_i915_private
*dev_priv
, int reg
)
414 spin_lock_irqsave(&dev_priv
->dpio_lock
, flags
);
415 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100)) {
416 DRM_ERROR("DPIO idle wait timed out\n");
420 I915_WRITE(DPIO_REG
, reg
);
421 I915_WRITE(DPIO_PKT
, DPIO_RID
| DPIO_OP_READ
| DPIO_PORTID
|
423 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100)) {
424 DRM_ERROR("DPIO read wait timed out\n");
427 val
= I915_READ(DPIO_DATA
);
430 spin_unlock_irqrestore(&dev_priv
->dpio_lock
, flags
);
434 static void intel_dpio_write(struct drm_i915_private
*dev_priv
, int reg
,
439 spin_lock_irqsave(&dev_priv
->dpio_lock
, flags
);
440 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100)) {
441 DRM_ERROR("DPIO idle wait timed out\n");
445 I915_WRITE(DPIO_DATA
, val
);
446 I915_WRITE(DPIO_REG
, reg
);
447 I915_WRITE(DPIO_PKT
, DPIO_RID
| DPIO_OP_WRITE
| DPIO_PORTID
|
449 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100))
450 DRM_ERROR("DPIO write wait timed out\n");
453 spin_unlock_irqrestore(&dev_priv
->dpio_lock
, flags
);
456 static void vlv_init_dpio(struct drm_device
*dev
)
458 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
460 /* Reset the DPIO config */
461 I915_WRITE(DPIO_CTL
, 0);
462 POSTING_READ(DPIO_CTL
);
463 I915_WRITE(DPIO_CTL
, 1);
464 POSTING_READ(DPIO_CTL
);
467 static int intel_dual_link_lvds_callback(const struct dmi_system_id
*id
)
469 DRM_INFO("Forcing lvds to dual link mode on %s\n", id
->ident
);
473 static const struct dmi_system_id intel_dual_link_lvds
[] = {
475 .callback
= intel_dual_link_lvds_callback
,
476 .ident
= "Apple MacBook Pro (Core i5/i7 Series)",
478 DMI_MATCH(DMI_SYS_VENDOR
, "Apple Inc."),
479 DMI_MATCH(DMI_PRODUCT_NAME
, "MacBookPro8,2"),
482 { } /* terminating entry */
485 static bool is_dual_link_lvds(struct drm_i915_private
*dev_priv
,
490 /* use the module option value if specified */
491 if (i915_lvds_channel_mode
> 0)
492 return i915_lvds_channel_mode
== 2;
494 if (dmi_check_system(intel_dual_link_lvds
))
497 if (dev_priv
->lvds_val
)
498 val
= dev_priv
->lvds_val
;
500 /* BIOS should set the proper LVDS register value at boot, but
501 * in reality, it doesn't set the value when the lid is closed;
502 * we need to check "the value to be set" in VBT when LVDS
503 * register is uninitialized.
505 val
= I915_READ(reg
);
506 if (!(val
& ~(LVDS_PIPE_MASK
| LVDS_DETECTED
)))
507 val
= dev_priv
->bios_lvds_val
;
508 dev_priv
->lvds_val
= val
;
510 return (val
& LVDS_CLKB_POWER_MASK
) == LVDS_CLKB_POWER_UP
;
513 static const intel_limit_t
*intel_ironlake_limit(struct drm_crtc
*crtc
,
516 struct drm_device
*dev
= crtc
->dev
;
517 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
518 const intel_limit_t
*limit
;
520 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
521 if (is_dual_link_lvds(dev_priv
, PCH_LVDS
)) {
522 /* LVDS dual channel */
523 if (refclk
== 100000)
524 limit
= &intel_limits_ironlake_dual_lvds_100m
;
526 limit
= &intel_limits_ironlake_dual_lvds
;
528 if (refclk
== 100000)
529 limit
= &intel_limits_ironlake_single_lvds_100m
;
531 limit
= &intel_limits_ironlake_single_lvds
;
533 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
) ||
535 limit
= &intel_limits_ironlake_display_port
;
537 limit
= &intel_limits_ironlake_dac
;
542 static const intel_limit_t
*intel_g4x_limit(struct drm_crtc
*crtc
)
544 struct drm_device
*dev
= crtc
->dev
;
545 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
546 const intel_limit_t
*limit
;
548 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
549 if (is_dual_link_lvds(dev_priv
, LVDS
))
550 /* LVDS with dual channel */
551 limit
= &intel_limits_g4x_dual_channel_lvds
;
553 /* LVDS with dual channel */
554 limit
= &intel_limits_g4x_single_channel_lvds
;
555 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_HDMI
) ||
556 intel_pipe_has_type(crtc
, INTEL_OUTPUT_ANALOG
)) {
557 limit
= &intel_limits_g4x_hdmi
;
558 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_SDVO
)) {
559 limit
= &intel_limits_g4x_sdvo
;
560 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
)) {
561 limit
= &intel_limits_g4x_display_port
;
562 } else /* The option is for other outputs */
563 limit
= &intel_limits_i9xx_sdvo
;
568 static const intel_limit_t
*intel_limit(struct drm_crtc
*crtc
, int refclk
)
570 struct drm_device
*dev
= crtc
->dev
;
571 const intel_limit_t
*limit
;
573 if (HAS_PCH_SPLIT(dev
))
574 limit
= intel_ironlake_limit(crtc
, refclk
);
575 else if (IS_G4X(dev
)) {
576 limit
= intel_g4x_limit(crtc
);
577 } else if (IS_PINEVIEW(dev
)) {
578 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
579 limit
= &intel_limits_pineview_lvds
;
581 limit
= &intel_limits_pineview_sdvo
;
582 } else if (IS_VALLEYVIEW(dev
)) {
583 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_ANALOG
))
584 limit
= &intel_limits_vlv_dac
;
585 else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_HDMI
))
586 limit
= &intel_limits_vlv_hdmi
;
588 limit
= &intel_limits_vlv_dp
;
589 } else if (!IS_GEN2(dev
)) {
590 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
591 limit
= &intel_limits_i9xx_lvds
;
593 limit
= &intel_limits_i9xx_sdvo
;
595 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
596 limit
= &intel_limits_i8xx_lvds
;
598 limit
= &intel_limits_i8xx_dvo
;
603 /* m1 is reserved as 0 in Pineview, n is a ring counter */
604 static void pineview_clock(int refclk
, intel_clock_t
*clock
)
606 clock
->m
= clock
->m2
+ 2;
607 clock
->p
= clock
->p1
* clock
->p2
;
608 clock
->vco
= refclk
* clock
->m
/ clock
->n
;
609 clock
->dot
= clock
->vco
/ clock
->p
;
612 static void intel_clock(struct drm_device
*dev
, int refclk
, intel_clock_t
*clock
)
614 if (IS_PINEVIEW(dev
)) {
615 pineview_clock(refclk
, clock
);
618 clock
->m
= 5 * (clock
->m1
+ 2) + (clock
->m2
+ 2);
619 clock
->p
= clock
->p1
* clock
->p2
;
620 clock
->vco
= refclk
* clock
->m
/ (clock
->n
+ 2);
621 clock
->dot
= clock
->vco
/ clock
->p
;
625 * Returns whether any output on the specified pipe is of the specified type
627 bool intel_pipe_has_type(struct drm_crtc
*crtc
, int type
)
629 struct drm_device
*dev
= crtc
->dev
;
630 struct intel_encoder
*encoder
;
632 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
633 if (encoder
->type
== type
)
639 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
641 * Returns whether the given set of divisors are valid for a given refclk with
642 * the given connectors.
645 static bool intel_PLL_is_valid(struct drm_device
*dev
,
646 const intel_limit_t
*limit
,
647 const intel_clock_t
*clock
)
649 if (clock
->p1
< limit
->p1
.min
|| limit
->p1
.max
< clock
->p1
)
650 INTELPllInvalid("p1 out of range\n");
651 if (clock
->p
< limit
->p
.min
|| limit
->p
.max
< clock
->p
)
652 INTELPllInvalid("p out of range\n");
653 if (clock
->m2
< limit
->m2
.min
|| limit
->m2
.max
< clock
->m2
)
654 INTELPllInvalid("m2 out of range\n");
655 if (clock
->m1
< limit
->m1
.min
|| limit
->m1
.max
< clock
->m1
)
656 INTELPllInvalid("m1 out of range\n");
657 if (clock
->m1
<= clock
->m2
&& !IS_PINEVIEW(dev
))
658 INTELPllInvalid("m1 <= m2\n");
659 if (clock
->m
< limit
->m
.min
|| limit
->m
.max
< clock
->m
)
660 INTELPllInvalid("m out of range\n");
661 if (clock
->n
< limit
->n
.min
|| limit
->n
.max
< clock
->n
)
662 INTELPllInvalid("n out of range\n");
663 if (clock
->vco
< limit
->vco
.min
|| limit
->vco
.max
< clock
->vco
)
664 INTELPllInvalid("vco out of range\n");
665 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
666 * connector, etc., rather than just a single range.
668 if (clock
->dot
< limit
->dot
.min
|| limit
->dot
.max
< clock
->dot
)
669 INTELPllInvalid("dot out of range\n");
675 intel_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
676 int target
, int refclk
, intel_clock_t
*match_clock
,
677 intel_clock_t
*best_clock
)
680 struct drm_device
*dev
= crtc
->dev
;
681 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
685 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) &&
686 (I915_READ(LVDS
)) != 0) {
688 * For LVDS, if the panel is on, just rely on its current
689 * settings for dual-channel. We haven't figured out how to
690 * reliably set up different single/dual channel state, if we
693 if (is_dual_link_lvds(dev_priv
, LVDS
))
694 clock
.p2
= limit
->p2
.p2_fast
;
696 clock
.p2
= limit
->p2
.p2_slow
;
698 if (target
< limit
->p2
.dot_limit
)
699 clock
.p2
= limit
->p2
.p2_slow
;
701 clock
.p2
= limit
->p2
.p2_fast
;
704 memset(best_clock
, 0, sizeof(*best_clock
));
706 for (clock
.m1
= limit
->m1
.min
; clock
.m1
<= limit
->m1
.max
;
708 for (clock
.m2
= limit
->m2
.min
;
709 clock
.m2
<= limit
->m2
.max
; clock
.m2
++) {
710 /* m1 is always 0 in Pineview */
711 if (clock
.m2
>= clock
.m1
&& !IS_PINEVIEW(dev
))
713 for (clock
.n
= limit
->n
.min
;
714 clock
.n
<= limit
->n
.max
; clock
.n
++) {
715 for (clock
.p1
= limit
->p1
.min
;
716 clock
.p1
<= limit
->p1
.max
; clock
.p1
++) {
719 intel_clock(dev
, refclk
, &clock
);
720 if (!intel_PLL_is_valid(dev
, limit
,
724 clock
.p
!= match_clock
->p
)
727 this_err
= abs(clock
.dot
- target
);
728 if (this_err
< err
) {
737 return (err
!= target
);
741 intel_g4x_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
742 int target
, int refclk
, intel_clock_t
*match_clock
,
743 intel_clock_t
*best_clock
)
745 struct drm_device
*dev
= crtc
->dev
;
746 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
750 /* approximately equals target * 0.00585 */
751 int err_most
= (target
>> 8) + (target
>> 9);
754 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
757 if (HAS_PCH_SPLIT(dev
))
761 if ((I915_READ(lvds_reg
) & LVDS_CLKB_POWER_MASK
) ==
763 clock
.p2
= limit
->p2
.p2_fast
;
765 clock
.p2
= limit
->p2
.p2_slow
;
767 if (target
< limit
->p2
.dot_limit
)
768 clock
.p2
= limit
->p2
.p2_slow
;
770 clock
.p2
= limit
->p2
.p2_fast
;
773 memset(best_clock
, 0, sizeof(*best_clock
));
774 max_n
= limit
->n
.max
;
775 /* based on hardware requirement, prefer smaller n to precision */
776 for (clock
.n
= limit
->n
.min
; clock
.n
<= max_n
; clock
.n
++) {
777 /* based on hardware requirement, prefere larger m1,m2 */
778 for (clock
.m1
= limit
->m1
.max
;
779 clock
.m1
>= limit
->m1
.min
; clock
.m1
--) {
780 for (clock
.m2
= limit
->m2
.max
;
781 clock
.m2
>= limit
->m2
.min
; clock
.m2
--) {
782 for (clock
.p1
= limit
->p1
.max
;
783 clock
.p1
>= limit
->p1
.min
; clock
.p1
--) {
786 intel_clock(dev
, refclk
, &clock
);
787 if (!intel_PLL_is_valid(dev
, limit
,
791 clock
.p
!= match_clock
->p
)
794 this_err
= abs(clock
.dot
- target
);
795 if (this_err
< err_most
) {
809 intel_find_pll_ironlake_dp(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
810 int target
, int refclk
, intel_clock_t
*match_clock
,
811 intel_clock_t
*best_clock
)
813 struct drm_device
*dev
= crtc
->dev
;
816 if (target
< 200000) {
829 intel_clock(dev
, refclk
, &clock
);
830 memcpy(best_clock
, &clock
, sizeof(intel_clock_t
));
834 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
836 intel_find_pll_g4x_dp(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
837 int target
, int refclk
, intel_clock_t
*match_clock
,
838 intel_clock_t
*best_clock
)
841 if (target
< 200000) {
854 clock
.m
= 5 * (clock
.m1
+ 2) + (clock
.m2
+ 2);
855 clock
.p
= (clock
.p1
* clock
.p2
);
856 clock
.dot
= 96000 * clock
.m
/ (clock
.n
+ 2) / clock
.p
;
858 memcpy(best_clock
, &clock
, sizeof(intel_clock_t
));
862 intel_vlv_find_best_pll(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
863 int target
, int refclk
, intel_clock_t
*match_clock
,
864 intel_clock_t
*best_clock
)
866 u32 p1
, p2
, m1
, m2
, vco
, bestn
, bestm1
, bestm2
, bestp1
, bestp2
;
868 u32 updrate
, minupdate
, fracbits
, p
;
869 unsigned long bestppm
, ppm
, absppm
;
873 dotclk
= target
* 1000;
876 fastclk
= dotclk
/ (2*100);
880 n
= p
= p1
= p2
= m
= m1
= m2
= vco
= bestn
= 0;
881 bestm1
= bestm2
= bestp1
= bestp2
= 0;
883 /* based on hardware requirement, prefer smaller n to precision */
884 for (n
= limit
->n
.min
; n
<= ((refclk
) / minupdate
); n
++) {
885 updrate
= refclk
/ n
;
886 for (p1
= limit
->p1
.max
; p1
> limit
->p1
.min
; p1
--) {
887 for (p2
= limit
->p2
.p2_fast
+1; p2
> 0; p2
--) {
891 /* based on hardware requirement, prefer bigger m1,m2 values */
892 for (m1
= limit
->m1
.min
; m1
<= limit
->m1
.max
; m1
++) {
893 m2
= (((2*(fastclk
* p
* n
/ m1
)) +
894 refclk
) / (2*refclk
));
897 if (vco
>= limit
->vco
.min
&& vco
< limit
->vco
.max
) {
898 ppm
= 1000000 * ((vco
/ p
) - fastclk
) / fastclk
;
899 absppm
= (ppm
> 0) ? ppm
: (-ppm
);
900 if (absppm
< 100 && ((p1
* p2
) > (bestp1
* bestp2
))) {
904 if (absppm
< bestppm
- 10) {
921 best_clock
->n
= bestn
;
922 best_clock
->m1
= bestm1
;
923 best_clock
->m2
= bestm2
;
924 best_clock
->p1
= bestp1
;
925 best_clock
->p2
= bestp2
;
930 static void ironlake_wait_for_vblank(struct drm_device
*dev
, int pipe
)
932 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
933 u32 frame
, frame_reg
= PIPEFRAME(pipe
);
935 frame
= I915_READ(frame_reg
);
937 if (wait_for(I915_READ_NOTRACE(frame_reg
) != frame
, 50))
938 DRM_DEBUG_KMS("vblank wait timed out\n");
942 * intel_wait_for_vblank - wait for vblank on a given pipe
944 * @pipe: pipe to wait for
946 * Wait for vblank to occur on a given pipe. Needed for various bits of
949 void intel_wait_for_vblank(struct drm_device
*dev
, int pipe
)
951 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
952 int pipestat_reg
= PIPESTAT(pipe
);
954 if (INTEL_INFO(dev
)->gen
>= 5) {
955 ironlake_wait_for_vblank(dev
, pipe
);
959 /* Clear existing vblank status. Note this will clear any other
960 * sticky status fields as well.
962 * This races with i915_driver_irq_handler() with the result
963 * that either function could miss a vblank event. Here it is not
964 * fatal, as we will either wait upon the next vblank interrupt or
965 * timeout. Generally speaking intel_wait_for_vblank() is only
966 * called during modeset at which time the GPU should be idle and
967 * should *not* be performing page flips and thus not waiting on
969 * Currently, the result of us stealing a vblank from the irq
970 * handler is that a single frame will be skipped during swapbuffers.
972 I915_WRITE(pipestat_reg
,
973 I915_READ(pipestat_reg
) | PIPE_VBLANK_INTERRUPT_STATUS
);
975 /* Wait for vblank interrupt bit to set */
976 if (wait_for(I915_READ(pipestat_reg
) &
977 PIPE_VBLANK_INTERRUPT_STATUS
,
979 DRM_DEBUG_KMS("vblank wait timed out\n");
983 * intel_wait_for_pipe_off - wait for pipe to turn off
985 * @pipe: pipe to wait for
987 * After disabling a pipe, we can't wait for vblank in the usual way,
988 * spinning on the vblank interrupt status bit, since we won't actually
989 * see an interrupt when the pipe is disabled.
992 * wait for the pipe register state bit to turn off
995 * wait for the display line value to settle (it usually
996 * ends up stopping at the start of the next frame).
999 void intel_wait_for_pipe_off(struct drm_device
*dev
, int pipe
)
1001 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1003 if (INTEL_INFO(dev
)->gen
>= 4) {
1004 int reg
= PIPECONF(pipe
);
1006 /* Wait for the Pipe State to go off */
1007 if (wait_for((I915_READ(reg
) & I965_PIPECONF_ACTIVE
) == 0,
1009 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1011 u32 last_line
, line_mask
;
1012 int reg
= PIPEDSL(pipe
);
1013 unsigned long timeout
= jiffies
+ msecs_to_jiffies(100);
1016 line_mask
= DSL_LINEMASK_GEN2
;
1018 line_mask
= DSL_LINEMASK_GEN3
;
1020 /* Wait for the display line to settle */
1022 last_line
= I915_READ(reg
) & line_mask
;
1024 } while (((I915_READ(reg
) & line_mask
) != last_line
) &&
1025 time_after(timeout
, jiffies
));
1026 if (time_after(jiffies
, timeout
))
1027 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1031 static const char *state_string(bool enabled
)
1033 return enabled
? "on" : "off";
1036 /* Only for pre-ILK configs */
1037 static void assert_pll(struct drm_i915_private
*dev_priv
,
1038 enum pipe pipe
, bool state
)
1045 val
= I915_READ(reg
);
1046 cur_state
= !!(val
& DPLL_VCO_ENABLE
);
1047 WARN(cur_state
!= state
,
1048 "PLL state assertion failure (expected %s, current %s)\n",
1049 state_string(state
), state_string(cur_state
));
1051 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
1052 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
1055 static void assert_pch_pll(struct drm_i915_private
*dev_priv
,
1056 struct intel_pch_pll
*pll
,
1057 struct intel_crtc
*crtc
,
1063 if (HAS_PCH_LPT(dev_priv
->dev
)) {
1064 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1069 "asserting PCH PLL %s with no PLL\n", state_string(state
)))
1072 val
= I915_READ(pll
->pll_reg
);
1073 cur_state
= !!(val
& DPLL_VCO_ENABLE
);
1074 WARN(cur_state
!= state
,
1075 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1076 pll
->pll_reg
, state_string(state
), state_string(cur_state
), val
);
1078 /* Make sure the selected PLL is correctly attached to the transcoder */
1079 if (crtc
&& HAS_PCH_CPT(dev_priv
->dev
)) {
1082 pch_dpll
= I915_READ(PCH_DPLL_SEL
);
1083 cur_state
= pll
->pll_reg
== _PCH_DPLL_B
;
1084 if (!WARN(((pch_dpll
>> (4 * crtc
->pipe
)) & 1) != cur_state
,
1085 "PLL[%d] not attached to this transcoder %d: %08x\n",
1086 cur_state
, crtc
->pipe
, pch_dpll
)) {
1087 cur_state
= !!(val
>> (4*crtc
->pipe
+ 3));
1088 WARN(cur_state
!= state
,
1089 "PLL[%d] not %s on this transcoder %d: %08x\n",
1090 pll
->pll_reg
== _PCH_DPLL_B
,
1091 state_string(state
),
1097 #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1098 #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
1100 static void assert_fdi_tx(struct drm_i915_private
*dev_priv
,
1101 enum pipe pipe
, bool state
)
1107 if (IS_HASWELL(dev_priv
->dev
)) {
1108 /* On Haswell, DDI is used instead of FDI_TX_CTL */
1109 reg
= DDI_FUNC_CTL(pipe
);
1110 val
= I915_READ(reg
);
1111 cur_state
= !!(val
& PIPE_DDI_FUNC_ENABLE
);
1113 reg
= FDI_TX_CTL(pipe
);
1114 val
= I915_READ(reg
);
1115 cur_state
= !!(val
& FDI_TX_ENABLE
);
1117 WARN(cur_state
!= state
,
1118 "FDI TX state assertion failure (expected %s, current %s)\n",
1119 state_string(state
), state_string(cur_state
));
1121 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1122 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1124 static void assert_fdi_rx(struct drm_i915_private
*dev_priv
,
1125 enum pipe pipe
, bool state
)
1131 if (IS_HASWELL(dev_priv
->dev
) && pipe
> 0) {
1132 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
1135 reg
= FDI_RX_CTL(pipe
);
1136 val
= I915_READ(reg
);
1137 cur_state
= !!(val
& FDI_RX_ENABLE
);
1139 WARN(cur_state
!= state
,
1140 "FDI RX state assertion failure (expected %s, current %s)\n",
1141 state_string(state
), state_string(cur_state
));
1143 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1144 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1146 static void assert_fdi_tx_pll_enabled(struct drm_i915_private
*dev_priv
,
1152 /* ILK FDI PLL is always enabled */
1153 if (dev_priv
->info
->gen
== 5)
1156 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1157 if (IS_HASWELL(dev_priv
->dev
))
1160 reg
= FDI_TX_CTL(pipe
);
1161 val
= I915_READ(reg
);
1162 WARN(!(val
& FDI_TX_PLL_ENABLE
), "FDI TX PLL assertion failure, should be active but is disabled\n");
1165 static void assert_fdi_rx_pll_enabled(struct drm_i915_private
*dev_priv
,
1171 if (IS_HASWELL(dev_priv
->dev
) && pipe
> 0) {
1172 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1175 reg
= FDI_RX_CTL(pipe
);
1176 val
= I915_READ(reg
);
1177 WARN(!(val
& FDI_RX_PLL_ENABLE
), "FDI RX PLL assertion failure, should be active but is disabled\n");
1180 static void assert_panel_unlocked(struct drm_i915_private
*dev_priv
,
1183 int pp_reg
, lvds_reg
;
1185 enum pipe panel_pipe
= PIPE_A
;
1188 if (HAS_PCH_SPLIT(dev_priv
->dev
)) {
1189 pp_reg
= PCH_PP_CONTROL
;
1190 lvds_reg
= PCH_LVDS
;
1192 pp_reg
= PP_CONTROL
;
1196 val
= I915_READ(pp_reg
);
1197 if (!(val
& PANEL_POWER_ON
) ||
1198 ((val
& PANEL_UNLOCK_REGS
) == PANEL_UNLOCK_REGS
))
1201 if (I915_READ(lvds_reg
) & LVDS_PIPEB_SELECT
)
1202 panel_pipe
= PIPE_B
;
1204 WARN(panel_pipe
== pipe
&& locked
,
1205 "panel assertion failure, pipe %c regs locked\n",
1209 void assert_pipe(struct drm_i915_private
*dev_priv
,
1210 enum pipe pipe
, bool state
)
1216 /* if we need the pipe A quirk it must be always on */
1217 if (pipe
== PIPE_A
&& dev_priv
->quirks
& QUIRK_PIPEA_FORCE
)
1220 reg
= PIPECONF(pipe
);
1221 val
= I915_READ(reg
);
1222 cur_state
= !!(val
& PIPECONF_ENABLE
);
1223 WARN(cur_state
!= state
,
1224 "pipe %c assertion failure (expected %s, current %s)\n",
1225 pipe_name(pipe
), state_string(state
), state_string(cur_state
));
1228 static void assert_plane(struct drm_i915_private
*dev_priv
,
1229 enum plane plane
, bool state
)
1235 reg
= DSPCNTR(plane
);
1236 val
= I915_READ(reg
);
1237 cur_state
= !!(val
& DISPLAY_PLANE_ENABLE
);
1238 WARN(cur_state
!= state
,
1239 "plane %c assertion failure (expected %s, current %s)\n",
1240 plane_name(plane
), state_string(state
), state_string(cur_state
));
1243 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1244 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1246 static void assert_planes_disabled(struct drm_i915_private
*dev_priv
,
1253 /* Planes are fixed to pipes on ILK+ */
1254 if (HAS_PCH_SPLIT(dev_priv
->dev
)) {
1255 reg
= DSPCNTR(pipe
);
1256 val
= I915_READ(reg
);
1257 WARN((val
& DISPLAY_PLANE_ENABLE
),
1258 "plane %c assertion failure, should be disabled but not\n",
1263 /* Need to check both planes against the pipe */
1264 for (i
= 0; i
< 2; i
++) {
1266 val
= I915_READ(reg
);
1267 cur_pipe
= (val
& DISPPLANE_SEL_PIPE_MASK
) >>
1268 DISPPLANE_SEL_PIPE_SHIFT
;
1269 WARN((val
& DISPLAY_PLANE_ENABLE
) && pipe
== cur_pipe
,
1270 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1271 plane_name(i
), pipe_name(pipe
));
1275 static void assert_pch_refclk_enabled(struct drm_i915_private
*dev_priv
)
1280 if (HAS_PCH_LPT(dev_priv
->dev
)) {
1281 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1285 val
= I915_READ(PCH_DREF_CONTROL
);
1286 enabled
= !!(val
& (DREF_SSC_SOURCE_MASK
| DREF_NONSPREAD_SOURCE_MASK
|
1287 DREF_SUPERSPREAD_SOURCE_MASK
));
1288 WARN(!enabled
, "PCH refclk assertion failure, should be active but is disabled\n");
1291 static void assert_transcoder_disabled(struct drm_i915_private
*dev_priv
,
1298 reg
= TRANSCONF(pipe
);
1299 val
= I915_READ(reg
);
1300 enabled
= !!(val
& TRANS_ENABLE
);
1302 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1306 static bool dp_pipe_enabled(struct drm_i915_private
*dev_priv
,
1307 enum pipe pipe
, u32 port_sel
, u32 val
)
1309 if ((val
& DP_PORT_EN
) == 0)
1312 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1313 u32 trans_dp_ctl_reg
= TRANS_DP_CTL(pipe
);
1314 u32 trans_dp_ctl
= I915_READ(trans_dp_ctl_reg
);
1315 if ((trans_dp_ctl
& TRANS_DP_PORT_SEL_MASK
) != port_sel
)
1318 if ((val
& DP_PIPE_MASK
) != (pipe
<< 30))
1324 static bool hdmi_pipe_enabled(struct drm_i915_private
*dev_priv
,
1325 enum pipe pipe
, u32 val
)
1327 if ((val
& PORT_ENABLE
) == 0)
1330 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1331 if ((val
& PORT_TRANS_SEL_MASK
) != PORT_TRANS_SEL_CPT(pipe
))
1334 if ((val
& TRANSCODER_MASK
) != TRANSCODER(pipe
))
1340 static bool lvds_pipe_enabled(struct drm_i915_private
*dev_priv
,
1341 enum pipe pipe
, u32 val
)
1343 if ((val
& LVDS_PORT_EN
) == 0)
1346 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1347 if ((val
& PORT_TRANS_SEL_MASK
) != PORT_TRANS_SEL_CPT(pipe
))
1350 if ((val
& LVDS_PIPE_MASK
) != LVDS_PIPE(pipe
))
1356 static bool adpa_pipe_enabled(struct drm_i915_private
*dev_priv
,
1357 enum pipe pipe
, u32 val
)
1359 if ((val
& ADPA_DAC_ENABLE
) == 0)
1361 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1362 if ((val
& PORT_TRANS_SEL_MASK
) != PORT_TRANS_SEL_CPT(pipe
))
1365 if ((val
& ADPA_PIPE_SELECT_MASK
) != ADPA_PIPE_SELECT(pipe
))
1371 static void assert_pch_dp_disabled(struct drm_i915_private
*dev_priv
,
1372 enum pipe pipe
, int reg
, u32 port_sel
)
1374 u32 val
= I915_READ(reg
);
1375 WARN(dp_pipe_enabled(dev_priv
, pipe
, port_sel
, val
),
1376 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1377 reg
, pipe_name(pipe
));
1379 WARN(HAS_PCH_IBX(dev_priv
->dev
) && (val
& DP_PORT_EN
) == 0
1380 && (val
& DP_PIPEB_SELECT
),
1381 "IBX PCH dp port still using transcoder B\n");
1384 static void assert_pch_hdmi_disabled(struct drm_i915_private
*dev_priv
,
1385 enum pipe pipe
, int reg
)
1387 u32 val
= I915_READ(reg
);
1388 WARN(hdmi_pipe_enabled(dev_priv
, pipe
, val
),
1389 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1390 reg
, pipe_name(pipe
));
1392 WARN(HAS_PCH_IBX(dev_priv
->dev
) && (val
& PORT_ENABLE
) == 0
1393 && (val
& SDVO_PIPE_B_SELECT
),
1394 "IBX PCH hdmi port still using transcoder B\n");
1397 static void assert_pch_ports_disabled(struct drm_i915_private
*dev_priv
,
1403 assert_pch_dp_disabled(dev_priv
, pipe
, PCH_DP_B
, TRANS_DP_PORT_SEL_B
);
1404 assert_pch_dp_disabled(dev_priv
, pipe
, PCH_DP_C
, TRANS_DP_PORT_SEL_C
);
1405 assert_pch_dp_disabled(dev_priv
, pipe
, PCH_DP_D
, TRANS_DP_PORT_SEL_D
);
1408 val
= I915_READ(reg
);
1409 WARN(adpa_pipe_enabled(dev_priv
, pipe
, val
),
1410 "PCH VGA enabled on transcoder %c, should be disabled\n",
1414 val
= I915_READ(reg
);
1415 WARN(lvds_pipe_enabled(dev_priv
, pipe
, val
),
1416 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1419 assert_pch_hdmi_disabled(dev_priv
, pipe
, HDMIB
);
1420 assert_pch_hdmi_disabled(dev_priv
, pipe
, HDMIC
);
1421 assert_pch_hdmi_disabled(dev_priv
, pipe
, HDMID
);
1425 * intel_enable_pll - enable a PLL
1426 * @dev_priv: i915 private structure
1427 * @pipe: pipe PLL to enable
1429 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1430 * make sure the PLL reg is writable first though, since the panel write
1431 * protect mechanism may be enabled.
1433 * Note! This is for pre-ILK only.
1435 static void intel_enable_pll(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
1440 /* No really, not for ILK+ */
1441 BUG_ON(!IS_VALLEYVIEW(dev_priv
->dev
) && dev_priv
->info
->gen
>= 5);
1443 /* PLL is protected by panel, make sure we can write it */
1444 if (IS_MOBILE(dev_priv
->dev
) && !IS_I830(dev_priv
->dev
))
1445 assert_panel_unlocked(dev_priv
, pipe
);
1448 val
= I915_READ(reg
);
1449 val
|= DPLL_VCO_ENABLE
;
1451 /* We do this three times for luck */
1452 I915_WRITE(reg
, val
);
1454 udelay(150); /* wait for warmup */
1455 I915_WRITE(reg
, val
);
1457 udelay(150); /* wait for warmup */
1458 I915_WRITE(reg
, val
);
1460 udelay(150); /* wait for warmup */
1464 * intel_disable_pll - disable a PLL
1465 * @dev_priv: i915 private structure
1466 * @pipe: pipe PLL to disable
1468 * Disable the PLL for @pipe, making sure the pipe is off first.
1470 * Note! This is for pre-ILK only.
1472 static void intel_disable_pll(struct drm_i915_private
*dev_priv
, enum pipe pipe
)
1477 /* Don't disable pipe A or pipe A PLLs if needed */
1478 if (pipe
== PIPE_A
&& (dev_priv
->quirks
& QUIRK_PIPEA_FORCE
))
1481 /* Make sure the pipe isn't still relying on us */
1482 assert_pipe_disabled(dev_priv
, pipe
);
1485 val
= I915_READ(reg
);
1486 val
&= ~DPLL_VCO_ENABLE
;
1487 I915_WRITE(reg
, val
);
1493 intel_sbi_write(struct drm_i915_private
*dev_priv
, u16 reg
, u32 value
)
1495 unsigned long flags
;
1497 spin_lock_irqsave(&dev_priv
->dpio_lock
, flags
);
1498 if (wait_for((I915_READ(SBI_CTL_STAT
) & SBI_BUSY
) == 0,
1500 DRM_ERROR("timeout waiting for SBI to become ready\n");
1504 I915_WRITE(SBI_ADDR
,
1506 I915_WRITE(SBI_DATA
,
1508 I915_WRITE(SBI_CTL_STAT
,
1512 if (wait_for((I915_READ(SBI_CTL_STAT
) & (SBI_BUSY
| SBI_RESPONSE_FAIL
)) == 0,
1514 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1519 spin_unlock_irqrestore(&dev_priv
->dpio_lock
, flags
);
1523 intel_sbi_read(struct drm_i915_private
*dev_priv
, u16 reg
)
1525 unsigned long flags
;
1528 spin_lock_irqsave(&dev_priv
->dpio_lock
, flags
);
1529 if (wait_for((I915_READ(SBI_CTL_STAT
) & SBI_BUSY
) == 0,
1531 DRM_ERROR("timeout waiting for SBI to become ready\n");
1535 I915_WRITE(SBI_ADDR
,
1537 I915_WRITE(SBI_CTL_STAT
,
1541 if (wait_for((I915_READ(SBI_CTL_STAT
) & (SBI_BUSY
| SBI_RESPONSE_FAIL
)) == 0,
1543 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1547 value
= I915_READ(SBI_DATA
);
1550 spin_unlock_irqrestore(&dev_priv
->dpio_lock
, flags
);
1555 * intel_enable_pch_pll - enable PCH PLL
1556 * @dev_priv: i915 private structure
1557 * @pipe: pipe PLL to enable
1559 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1560 * drives the transcoder clock.
1562 static void intel_enable_pch_pll(struct intel_crtc
*intel_crtc
)
1564 struct drm_i915_private
*dev_priv
= intel_crtc
->base
.dev
->dev_private
;
1565 struct intel_pch_pll
*pll
;
1569 /* PCH PLLs only available on ILK, SNB and IVB */
1570 BUG_ON(dev_priv
->info
->gen
< 5);
1571 pll
= intel_crtc
->pch_pll
;
1575 if (WARN_ON(pll
->refcount
== 0))
1578 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1579 pll
->pll_reg
, pll
->active
, pll
->on
,
1580 intel_crtc
->base
.base
.id
);
1582 /* PCH refclock must be enabled first */
1583 assert_pch_refclk_enabled(dev_priv
);
1585 if (pll
->active
++ && pll
->on
) {
1586 assert_pch_pll_enabled(dev_priv
, pll
, NULL
);
1590 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll
->pll_reg
);
1593 val
= I915_READ(reg
);
1594 val
|= DPLL_VCO_ENABLE
;
1595 I915_WRITE(reg
, val
);
1602 static void intel_disable_pch_pll(struct intel_crtc
*intel_crtc
)
1604 struct drm_i915_private
*dev_priv
= intel_crtc
->base
.dev
->dev_private
;
1605 struct intel_pch_pll
*pll
= intel_crtc
->pch_pll
;
1609 /* PCH only available on ILK+ */
1610 BUG_ON(dev_priv
->info
->gen
< 5);
1614 if (WARN_ON(pll
->refcount
== 0))
1617 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1618 pll
->pll_reg
, pll
->active
, pll
->on
,
1619 intel_crtc
->base
.base
.id
);
1621 if (WARN_ON(pll
->active
== 0)) {
1622 assert_pch_pll_disabled(dev_priv
, pll
, NULL
);
1626 if (--pll
->active
) {
1627 assert_pch_pll_enabled(dev_priv
, pll
, NULL
);
1631 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll
->pll_reg
);
1633 /* Make sure transcoder isn't still depending on us */
1634 assert_transcoder_disabled(dev_priv
, intel_crtc
->pipe
);
1637 val
= I915_READ(reg
);
1638 val
&= ~DPLL_VCO_ENABLE
;
1639 I915_WRITE(reg
, val
);
1646 static void intel_enable_transcoder(struct drm_i915_private
*dev_priv
,
1650 u32 val
, pipeconf_val
;
1651 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1653 /* PCH only available on ILK+ */
1654 BUG_ON(dev_priv
->info
->gen
< 5);
1656 /* Make sure PCH DPLL is enabled */
1657 assert_pch_pll_enabled(dev_priv
,
1658 to_intel_crtc(crtc
)->pch_pll
,
1659 to_intel_crtc(crtc
));
1661 /* FDI must be feeding us bits for PCH ports */
1662 assert_fdi_tx_enabled(dev_priv
, pipe
);
1663 assert_fdi_rx_enabled(dev_priv
, pipe
);
1665 if (IS_HASWELL(dev_priv
->dev
) && pipe
> 0) {
1666 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1669 reg
= TRANSCONF(pipe
);
1670 val
= I915_READ(reg
);
1671 pipeconf_val
= I915_READ(PIPECONF(pipe
));
1673 if (HAS_PCH_IBX(dev_priv
->dev
)) {
1675 * make the BPC in transcoder be consistent with
1676 * that in pipeconf reg.
1678 val
&= ~PIPE_BPC_MASK
;
1679 val
|= pipeconf_val
& PIPE_BPC_MASK
;
1682 val
&= ~TRANS_INTERLACE_MASK
;
1683 if ((pipeconf_val
& PIPECONF_INTERLACE_MASK
) == PIPECONF_INTERLACED_ILK
)
1684 if (HAS_PCH_IBX(dev_priv
->dev
) &&
1685 intel_pipe_has_type(crtc
, INTEL_OUTPUT_SDVO
))
1686 val
|= TRANS_LEGACY_INTERLACED_ILK
;
1688 val
|= TRANS_INTERLACED
;
1690 val
|= TRANS_PROGRESSIVE
;
1692 I915_WRITE(reg
, val
| TRANS_ENABLE
);
1693 if (wait_for(I915_READ(reg
) & TRANS_STATE_ENABLE
, 100))
1694 DRM_ERROR("failed to enable transcoder %d\n", pipe
);
1697 static void intel_disable_transcoder(struct drm_i915_private
*dev_priv
,
1703 /* FDI relies on the transcoder */
1704 assert_fdi_tx_disabled(dev_priv
, pipe
);
1705 assert_fdi_rx_disabled(dev_priv
, pipe
);
1707 /* Ports must be off as well */
1708 assert_pch_ports_disabled(dev_priv
, pipe
);
1710 reg
= TRANSCONF(pipe
);
1711 val
= I915_READ(reg
);
1712 val
&= ~TRANS_ENABLE
;
1713 I915_WRITE(reg
, val
);
1714 /* wait for PCH transcoder off, transcoder state */
1715 if (wait_for((I915_READ(reg
) & TRANS_STATE_ENABLE
) == 0, 50))
1716 DRM_ERROR("failed to disable transcoder %d\n", pipe
);
1720 * intel_enable_pipe - enable a pipe, asserting requirements
1721 * @dev_priv: i915 private structure
1722 * @pipe: pipe to enable
1723 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1725 * Enable @pipe, making sure that various hardware specific requirements
1726 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1728 * @pipe should be %PIPE_A or %PIPE_B.
1730 * Will wait until the pipe is actually running (i.e. first vblank) before
1733 static void intel_enable_pipe(struct drm_i915_private
*dev_priv
, enum pipe pipe
,
1740 * A pipe without a PLL won't actually be able to drive bits from
1741 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1744 if (!HAS_PCH_SPLIT(dev_priv
->dev
))
1745 assert_pll_enabled(dev_priv
, pipe
);
1748 /* if driving the PCH, we need FDI enabled */
1749 assert_fdi_rx_pll_enabled(dev_priv
, pipe
);
1750 assert_fdi_tx_pll_enabled(dev_priv
, pipe
);
1752 /* FIXME: assert CPU port conditions for SNB+ */
1755 reg
= PIPECONF(pipe
);
1756 val
= I915_READ(reg
);
1757 if (val
& PIPECONF_ENABLE
)
1760 I915_WRITE(reg
, val
| PIPECONF_ENABLE
);
1761 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
1765 * intel_disable_pipe - disable a pipe, asserting requirements
1766 * @dev_priv: i915 private structure
1767 * @pipe: pipe to disable
1769 * Disable @pipe, making sure that various hardware specific requirements
1770 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1772 * @pipe should be %PIPE_A or %PIPE_B.
1774 * Will wait until the pipe has shut down before returning.
1776 static void intel_disable_pipe(struct drm_i915_private
*dev_priv
,
1783 * Make sure planes won't keep trying to pump pixels to us,
1784 * or we might hang the display.
1786 assert_planes_disabled(dev_priv
, pipe
);
1788 /* Don't disable pipe A or pipe A PLLs if needed */
1789 if (pipe
== PIPE_A
&& (dev_priv
->quirks
& QUIRK_PIPEA_FORCE
))
1792 reg
= PIPECONF(pipe
);
1793 val
= I915_READ(reg
);
1794 if ((val
& PIPECONF_ENABLE
) == 0)
1797 I915_WRITE(reg
, val
& ~PIPECONF_ENABLE
);
1798 intel_wait_for_pipe_off(dev_priv
->dev
, pipe
);
1802 * Plane regs are double buffered, going from enabled->disabled needs a
1803 * trigger in order to latch. The display address reg provides this.
1805 void intel_flush_display_plane(struct drm_i915_private
*dev_priv
,
1808 I915_WRITE(DSPADDR(plane
), I915_READ(DSPADDR(plane
)));
1809 I915_WRITE(DSPSURF(plane
), I915_READ(DSPSURF(plane
)));
1813 * intel_enable_plane - enable a display plane on a given pipe
1814 * @dev_priv: i915 private structure
1815 * @plane: plane to enable
1816 * @pipe: pipe being fed
1818 * Enable @plane on @pipe, making sure that @pipe is running first.
1820 static void intel_enable_plane(struct drm_i915_private
*dev_priv
,
1821 enum plane plane
, enum pipe pipe
)
1826 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1827 assert_pipe_enabled(dev_priv
, pipe
);
1829 reg
= DSPCNTR(plane
);
1830 val
= I915_READ(reg
);
1831 if (val
& DISPLAY_PLANE_ENABLE
)
1834 I915_WRITE(reg
, val
| DISPLAY_PLANE_ENABLE
);
1835 intel_flush_display_plane(dev_priv
, plane
);
1836 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
1840 * intel_disable_plane - disable a display plane
1841 * @dev_priv: i915 private structure
1842 * @plane: plane to disable
1843 * @pipe: pipe consuming the data
1845 * Disable @plane; should be an independent operation.
1847 static void intel_disable_plane(struct drm_i915_private
*dev_priv
,
1848 enum plane plane
, enum pipe pipe
)
1853 reg
= DSPCNTR(plane
);
1854 val
= I915_READ(reg
);
1855 if ((val
& DISPLAY_PLANE_ENABLE
) == 0)
1858 I915_WRITE(reg
, val
& ~DISPLAY_PLANE_ENABLE
);
1859 intel_flush_display_plane(dev_priv
, plane
);
1860 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
1863 static void disable_pch_dp(struct drm_i915_private
*dev_priv
,
1864 enum pipe pipe
, int reg
, u32 port_sel
)
1866 u32 val
= I915_READ(reg
);
1867 if (dp_pipe_enabled(dev_priv
, pipe
, port_sel
, val
)) {
1868 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg
, pipe
);
1869 I915_WRITE(reg
, val
& ~DP_PORT_EN
);
1873 static void disable_pch_hdmi(struct drm_i915_private
*dev_priv
,
1874 enum pipe pipe
, int reg
)
1876 u32 val
= I915_READ(reg
);
1877 if (hdmi_pipe_enabled(dev_priv
, pipe
, val
)) {
1878 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1880 I915_WRITE(reg
, val
& ~PORT_ENABLE
);
1884 /* Disable any ports connected to this transcoder */
1885 static void intel_disable_pch_ports(struct drm_i915_private
*dev_priv
,
1890 val
= I915_READ(PCH_PP_CONTROL
);
1891 I915_WRITE(PCH_PP_CONTROL
, val
| PANEL_UNLOCK_REGS
);
1893 disable_pch_dp(dev_priv
, pipe
, PCH_DP_B
, TRANS_DP_PORT_SEL_B
);
1894 disable_pch_dp(dev_priv
, pipe
, PCH_DP_C
, TRANS_DP_PORT_SEL_C
);
1895 disable_pch_dp(dev_priv
, pipe
, PCH_DP_D
, TRANS_DP_PORT_SEL_D
);
1898 val
= I915_READ(reg
);
1899 if (adpa_pipe_enabled(dev_priv
, pipe
, val
))
1900 I915_WRITE(reg
, val
& ~ADPA_DAC_ENABLE
);
1903 val
= I915_READ(reg
);
1904 if (lvds_pipe_enabled(dev_priv
, pipe
, val
)) {
1905 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe
, val
);
1906 I915_WRITE(reg
, val
& ~LVDS_PORT_EN
);
1911 disable_pch_hdmi(dev_priv
, pipe
, HDMIB
);
1912 disable_pch_hdmi(dev_priv
, pipe
, HDMIC
);
1913 disable_pch_hdmi(dev_priv
, pipe
, HDMID
);
1917 intel_pin_and_fence_fb_obj(struct drm_device
*dev
,
1918 struct drm_i915_gem_object
*obj
,
1919 struct intel_ring_buffer
*pipelined
)
1921 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1925 switch (obj
->tiling_mode
) {
1926 case I915_TILING_NONE
:
1927 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1928 alignment
= 128 * 1024;
1929 else if (INTEL_INFO(dev
)->gen
>= 4)
1930 alignment
= 4 * 1024;
1932 alignment
= 64 * 1024;
1935 /* pin() will align the object as required by fence */
1939 /* FIXME: Is this true? */
1940 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1946 dev_priv
->mm
.interruptible
= false;
1947 ret
= i915_gem_object_pin_to_display_plane(obj
, alignment
, pipelined
);
1949 goto err_interruptible
;
1951 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1952 * fence, whereas 965+ only requires a fence if using
1953 * framebuffer compression. For simplicity, we always install
1954 * a fence as the cost is not that onerous.
1956 ret
= i915_gem_object_get_fence(obj
);
1960 i915_gem_object_pin_fence(obj
);
1962 dev_priv
->mm
.interruptible
= true;
1966 i915_gem_object_unpin(obj
);
1968 dev_priv
->mm
.interruptible
= true;
1972 void intel_unpin_fb_obj(struct drm_i915_gem_object
*obj
)
1974 i915_gem_object_unpin_fence(obj
);
1975 i915_gem_object_unpin(obj
);
1978 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1979 * is assumed to be a power-of-two. */
1980 static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x
, int *y
,
1984 int tile_rows
, tiles
;
1988 tiles
= *x
/ (512/bpp
);
1991 return tile_rows
* pitch
* 8 + tiles
* 4096;
1994 static int i9xx_update_plane(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
1997 struct drm_device
*dev
= crtc
->dev
;
1998 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1999 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2000 struct intel_framebuffer
*intel_fb
;
2001 struct drm_i915_gem_object
*obj
;
2002 int plane
= intel_crtc
->plane
;
2003 unsigned long linear_offset
;
2012 DRM_ERROR("Can't update plane %d in SAREA\n", plane
);
2016 intel_fb
= to_intel_framebuffer(fb
);
2017 obj
= intel_fb
->obj
;
2019 reg
= DSPCNTR(plane
);
2020 dspcntr
= I915_READ(reg
);
2021 /* Mask out pixel format bits in case we change it */
2022 dspcntr
&= ~DISPPLANE_PIXFORMAT_MASK
;
2023 switch (fb
->bits_per_pixel
) {
2025 dspcntr
|= DISPPLANE_8BPP
;
2028 if (fb
->depth
== 15)
2029 dspcntr
|= DISPPLANE_15_16BPP
;
2031 dspcntr
|= DISPPLANE_16BPP
;
2035 dspcntr
|= DISPPLANE_32BPP_NO_ALPHA
;
2038 DRM_ERROR("Unknown color depth %d\n", fb
->bits_per_pixel
);
2041 if (INTEL_INFO(dev
)->gen
>= 4) {
2042 if (obj
->tiling_mode
!= I915_TILING_NONE
)
2043 dspcntr
|= DISPPLANE_TILED
;
2045 dspcntr
&= ~DISPPLANE_TILED
;
2048 I915_WRITE(reg
, dspcntr
);
2050 linear_offset
= y
* fb
->pitches
[0] + x
* (fb
->bits_per_pixel
/ 8);
2052 if (INTEL_INFO(dev
)->gen
>= 4) {
2053 intel_crtc
->dspaddr_offset
=
2054 gen4_compute_dspaddr_offset_xtiled(&x
, &y
,
2055 fb
->bits_per_pixel
/ 8,
2057 linear_offset
-= intel_crtc
->dspaddr_offset
;
2059 intel_crtc
->dspaddr_offset
= linear_offset
;
2062 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2063 obj
->gtt_offset
, linear_offset
, x
, y
, fb
->pitches
[0]);
2064 I915_WRITE(DSPSTRIDE(plane
), fb
->pitches
[0]);
2065 if (INTEL_INFO(dev
)->gen
>= 4) {
2066 I915_MODIFY_DISPBASE(DSPSURF(plane
),
2067 obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
2068 I915_WRITE(DSPTILEOFF(plane
), (y
<< 16) | x
);
2069 I915_WRITE(DSPLINOFF(plane
), linear_offset
);
2071 I915_WRITE(DSPADDR(plane
), obj
->gtt_offset
+ linear_offset
);
2077 static int ironlake_update_plane(struct drm_crtc
*crtc
,
2078 struct drm_framebuffer
*fb
, int x
, int y
)
2080 struct drm_device
*dev
= crtc
->dev
;
2081 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2082 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2083 struct intel_framebuffer
*intel_fb
;
2084 struct drm_i915_gem_object
*obj
;
2085 int plane
= intel_crtc
->plane
;
2086 unsigned long linear_offset
;
2096 DRM_ERROR("Can't update plane %d in SAREA\n", plane
);
2100 intel_fb
= to_intel_framebuffer(fb
);
2101 obj
= intel_fb
->obj
;
2103 reg
= DSPCNTR(plane
);
2104 dspcntr
= I915_READ(reg
);
2105 /* Mask out pixel format bits in case we change it */
2106 dspcntr
&= ~DISPPLANE_PIXFORMAT_MASK
;
2107 switch (fb
->bits_per_pixel
) {
2109 dspcntr
|= DISPPLANE_8BPP
;
2112 if (fb
->depth
!= 16)
2115 dspcntr
|= DISPPLANE_16BPP
;
2119 if (fb
->depth
== 24)
2120 dspcntr
|= DISPPLANE_32BPP_NO_ALPHA
;
2121 else if (fb
->depth
== 30)
2122 dspcntr
|= DISPPLANE_32BPP_30BIT_NO_ALPHA
;
2127 DRM_ERROR("Unknown color depth %d\n", fb
->bits_per_pixel
);
2131 if (obj
->tiling_mode
!= I915_TILING_NONE
)
2132 dspcntr
|= DISPPLANE_TILED
;
2134 dspcntr
&= ~DISPPLANE_TILED
;
2137 dspcntr
|= DISPPLANE_TRICKLE_FEED_DISABLE
;
2139 I915_WRITE(reg
, dspcntr
);
2141 linear_offset
= y
* fb
->pitches
[0] + x
* (fb
->bits_per_pixel
/ 8);
2142 intel_crtc
->dspaddr_offset
=
2143 gen4_compute_dspaddr_offset_xtiled(&x
, &y
,
2144 fb
->bits_per_pixel
/ 8,
2146 linear_offset
-= intel_crtc
->dspaddr_offset
;
2148 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2149 obj
->gtt_offset
, linear_offset
, x
, y
, fb
->pitches
[0]);
2150 I915_WRITE(DSPSTRIDE(plane
), fb
->pitches
[0]);
2151 I915_MODIFY_DISPBASE(DSPSURF(plane
),
2152 obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
2153 I915_WRITE(DSPTILEOFF(plane
), (y
<< 16) | x
);
2154 I915_WRITE(DSPLINOFF(plane
), linear_offset
);
2160 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2162 intel_pipe_set_base_atomic(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
2163 int x
, int y
, enum mode_set_atomic state
)
2165 struct drm_device
*dev
= crtc
->dev
;
2166 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2168 if (dev_priv
->display
.disable_fbc
)
2169 dev_priv
->display
.disable_fbc(dev
);
2170 intel_increase_pllclock(crtc
);
2172 return dev_priv
->display
.update_plane(crtc
, fb
, x
, y
);
2176 intel_finish_fb(struct drm_framebuffer
*old_fb
)
2178 struct drm_i915_gem_object
*obj
= to_intel_framebuffer(old_fb
)->obj
;
2179 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
2180 bool was_interruptible
= dev_priv
->mm
.interruptible
;
2183 wait_event(dev_priv
->pending_flip_queue
,
2184 atomic_read(&dev_priv
->mm
.wedged
) ||
2185 atomic_read(&obj
->pending_flip
) == 0);
2187 /* Big Hammer, we also need to ensure that any pending
2188 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2189 * current scanout is retired before unpinning the old
2192 * This should only fail upon a hung GPU, in which case we
2193 * can safely continue.
2195 dev_priv
->mm
.interruptible
= false;
2196 ret
= i915_gem_object_finish_gpu(obj
);
2197 dev_priv
->mm
.interruptible
= was_interruptible
;
2203 intel_pipe_set_base(struct drm_crtc
*crtc
, int x
, int y
,
2204 struct drm_framebuffer
*old_fb
)
2206 struct drm_device
*dev
= crtc
->dev
;
2207 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2208 struct drm_i915_master_private
*master_priv
;
2209 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2214 DRM_ERROR("No FB bound\n");
2218 if(intel_crtc
->plane
> dev_priv
->num_pipe
) {
2219 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2221 dev_priv
->num_pipe
);
2225 mutex_lock(&dev
->struct_mutex
);
2226 ret
= intel_pin_and_fence_fb_obj(dev
,
2227 to_intel_framebuffer(crtc
->fb
)->obj
,
2230 mutex_unlock(&dev
->struct_mutex
);
2231 DRM_ERROR("pin & fence failed\n");
2236 intel_finish_fb(old_fb
);
2238 ret
= dev_priv
->display
.update_plane(crtc
, crtc
->fb
, x
, y
);
2240 intel_unpin_fb_obj(to_intel_framebuffer(crtc
->fb
)->obj
);
2241 mutex_unlock(&dev
->struct_mutex
);
2242 DRM_ERROR("failed to update base address\n");
2247 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
2248 intel_unpin_fb_obj(to_intel_framebuffer(old_fb
)->obj
);
2251 intel_update_fbc(dev
);
2252 mutex_unlock(&dev
->struct_mutex
);
2254 if (!dev
->primary
->master
)
2257 master_priv
= dev
->primary
->master
->driver_priv
;
2258 if (!master_priv
->sarea_priv
)
2261 if (intel_crtc
->pipe
) {
2262 master_priv
->sarea_priv
->pipeB_x
= x
;
2263 master_priv
->sarea_priv
->pipeB_y
= y
;
2265 master_priv
->sarea_priv
->pipeA_x
= x
;
2266 master_priv
->sarea_priv
->pipeA_y
= y
;
2272 static void ironlake_set_pll_edp(struct drm_crtc
*crtc
, int clock
)
2274 struct drm_device
*dev
= crtc
->dev
;
2275 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2278 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock
);
2279 dpa_ctl
= I915_READ(DP_A
);
2280 dpa_ctl
&= ~DP_PLL_FREQ_MASK
;
2282 if (clock
< 200000) {
2284 dpa_ctl
|= DP_PLL_FREQ_160MHZ
;
2285 /* workaround for 160Mhz:
2286 1) program 0x4600c bits 15:0 = 0x8124
2287 2) program 0x46010 bit 0 = 1
2288 3) program 0x46034 bit 24 = 1
2289 4) program 0x64000 bit 14 = 1
2291 temp
= I915_READ(0x4600c);
2293 I915_WRITE(0x4600c, temp
| 0x8124);
2295 temp
= I915_READ(0x46010);
2296 I915_WRITE(0x46010, temp
| 1);
2298 temp
= I915_READ(0x46034);
2299 I915_WRITE(0x46034, temp
| (1 << 24));
2301 dpa_ctl
|= DP_PLL_FREQ_270MHZ
;
2303 I915_WRITE(DP_A
, dpa_ctl
);
2309 static void intel_fdi_normal_train(struct drm_crtc
*crtc
)
2311 struct drm_device
*dev
= crtc
->dev
;
2312 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2313 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2314 int pipe
= intel_crtc
->pipe
;
2317 /* enable normal train */
2318 reg
= FDI_TX_CTL(pipe
);
2319 temp
= I915_READ(reg
);
2320 if (IS_IVYBRIDGE(dev
)) {
2321 temp
&= ~FDI_LINK_TRAIN_NONE_IVB
;
2322 temp
|= FDI_LINK_TRAIN_NONE_IVB
| FDI_TX_ENHANCE_FRAME_ENABLE
;
2324 temp
&= ~FDI_LINK_TRAIN_NONE
;
2325 temp
|= FDI_LINK_TRAIN_NONE
| FDI_TX_ENHANCE_FRAME_ENABLE
;
2327 I915_WRITE(reg
, temp
);
2329 reg
= FDI_RX_CTL(pipe
);
2330 temp
= I915_READ(reg
);
2331 if (HAS_PCH_CPT(dev
)) {
2332 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2333 temp
|= FDI_LINK_TRAIN_NORMAL_CPT
;
2335 temp
&= ~FDI_LINK_TRAIN_NONE
;
2336 temp
|= FDI_LINK_TRAIN_NONE
;
2338 I915_WRITE(reg
, temp
| FDI_RX_ENHANCE_FRAME_ENABLE
);
2340 /* wait one idle pattern time */
2344 /* IVB wants error correction enabled */
2345 if (IS_IVYBRIDGE(dev
))
2346 I915_WRITE(reg
, I915_READ(reg
) | FDI_FS_ERRC_ENABLE
|
2347 FDI_FE_ERRC_ENABLE
);
2350 static void cpt_phase_pointer_enable(struct drm_device
*dev
, int pipe
)
2352 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2353 u32 flags
= I915_READ(SOUTH_CHICKEN1
);
2355 flags
|= FDI_PHASE_SYNC_OVR(pipe
);
2356 I915_WRITE(SOUTH_CHICKEN1
, flags
); /* once to unlock... */
2357 flags
|= FDI_PHASE_SYNC_EN(pipe
);
2358 I915_WRITE(SOUTH_CHICKEN1
, flags
); /* then again to enable */
2359 POSTING_READ(SOUTH_CHICKEN1
);
2362 /* The FDI link training functions for ILK/Ibexpeak. */
2363 static void ironlake_fdi_link_train(struct drm_crtc
*crtc
)
2365 struct drm_device
*dev
= crtc
->dev
;
2366 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2367 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2368 int pipe
= intel_crtc
->pipe
;
2369 int plane
= intel_crtc
->plane
;
2370 u32 reg
, temp
, tries
;
2372 /* FDI needs bits from pipe & plane first */
2373 assert_pipe_enabled(dev_priv
, pipe
);
2374 assert_plane_enabled(dev_priv
, plane
);
2376 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2378 reg
= FDI_RX_IMR(pipe
);
2379 temp
= I915_READ(reg
);
2380 temp
&= ~FDI_RX_SYMBOL_LOCK
;
2381 temp
&= ~FDI_RX_BIT_LOCK
;
2382 I915_WRITE(reg
, temp
);
2386 /* enable CPU FDI TX and PCH FDI RX */
2387 reg
= FDI_TX_CTL(pipe
);
2388 temp
= I915_READ(reg
);
2390 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2391 temp
&= ~FDI_LINK_TRAIN_NONE
;
2392 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2393 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
2395 reg
= FDI_RX_CTL(pipe
);
2396 temp
= I915_READ(reg
);
2397 temp
&= ~FDI_LINK_TRAIN_NONE
;
2398 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2399 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
2404 /* Ironlake workaround, enable clock pointer after FDI enable*/
2405 if (HAS_PCH_IBX(dev
)) {
2406 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
);
2407 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
|
2408 FDI_RX_PHASE_SYNC_POINTER_EN
);
2411 reg
= FDI_RX_IIR(pipe
);
2412 for (tries
= 0; tries
< 5; tries
++) {
2413 temp
= I915_READ(reg
);
2414 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2416 if ((temp
& FDI_RX_BIT_LOCK
)) {
2417 DRM_DEBUG_KMS("FDI train 1 done.\n");
2418 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
2423 DRM_ERROR("FDI train 1 fail!\n");
2426 reg
= FDI_TX_CTL(pipe
);
2427 temp
= I915_READ(reg
);
2428 temp
&= ~FDI_LINK_TRAIN_NONE
;
2429 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2430 I915_WRITE(reg
, temp
);
2432 reg
= FDI_RX_CTL(pipe
);
2433 temp
= I915_READ(reg
);
2434 temp
&= ~FDI_LINK_TRAIN_NONE
;
2435 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2436 I915_WRITE(reg
, temp
);
2441 reg
= FDI_RX_IIR(pipe
);
2442 for (tries
= 0; tries
< 5; tries
++) {
2443 temp
= I915_READ(reg
);
2444 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2446 if (temp
& FDI_RX_SYMBOL_LOCK
) {
2447 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
2448 DRM_DEBUG_KMS("FDI train 2 done.\n");
2453 DRM_ERROR("FDI train 2 fail!\n");
2455 DRM_DEBUG_KMS("FDI train done\n");
2459 static const int snb_b_fdi_train_param
[] = {
2460 FDI_LINK_TRAIN_400MV_0DB_SNB_B
,
2461 FDI_LINK_TRAIN_400MV_6DB_SNB_B
,
2462 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B
,
2463 FDI_LINK_TRAIN_800MV_0DB_SNB_B
,
2466 /* The FDI link training functions for SNB/Cougarpoint. */
2467 static void gen6_fdi_link_train(struct drm_crtc
*crtc
)
2469 struct drm_device
*dev
= crtc
->dev
;
2470 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2471 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2472 int pipe
= intel_crtc
->pipe
;
2473 u32 reg
, temp
, i
, retry
;
2475 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2477 reg
= FDI_RX_IMR(pipe
);
2478 temp
= I915_READ(reg
);
2479 temp
&= ~FDI_RX_SYMBOL_LOCK
;
2480 temp
&= ~FDI_RX_BIT_LOCK
;
2481 I915_WRITE(reg
, temp
);
2486 /* enable CPU FDI TX and PCH FDI RX */
2487 reg
= FDI_TX_CTL(pipe
);
2488 temp
= I915_READ(reg
);
2490 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2491 temp
&= ~FDI_LINK_TRAIN_NONE
;
2492 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2493 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2495 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2496 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
2498 reg
= FDI_RX_CTL(pipe
);
2499 temp
= I915_READ(reg
);
2500 if (HAS_PCH_CPT(dev
)) {
2501 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2502 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
2504 temp
&= ~FDI_LINK_TRAIN_NONE
;
2505 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2507 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
2512 if (HAS_PCH_CPT(dev
))
2513 cpt_phase_pointer_enable(dev
, pipe
);
2515 for (i
= 0; i
< 4; i
++) {
2516 reg
= FDI_TX_CTL(pipe
);
2517 temp
= I915_READ(reg
);
2518 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2519 temp
|= snb_b_fdi_train_param
[i
];
2520 I915_WRITE(reg
, temp
);
2525 for (retry
= 0; retry
< 5; retry
++) {
2526 reg
= FDI_RX_IIR(pipe
);
2527 temp
= I915_READ(reg
);
2528 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2529 if (temp
& FDI_RX_BIT_LOCK
) {
2530 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
2531 DRM_DEBUG_KMS("FDI train 1 done.\n");
2540 DRM_ERROR("FDI train 1 fail!\n");
2543 reg
= FDI_TX_CTL(pipe
);
2544 temp
= I915_READ(reg
);
2545 temp
&= ~FDI_LINK_TRAIN_NONE
;
2546 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2548 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2550 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2552 I915_WRITE(reg
, temp
);
2554 reg
= FDI_RX_CTL(pipe
);
2555 temp
= I915_READ(reg
);
2556 if (HAS_PCH_CPT(dev
)) {
2557 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2558 temp
|= FDI_LINK_TRAIN_PATTERN_2_CPT
;
2560 temp
&= ~FDI_LINK_TRAIN_NONE
;
2561 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2563 I915_WRITE(reg
, temp
);
2568 for (i
= 0; i
< 4; i
++) {
2569 reg
= FDI_TX_CTL(pipe
);
2570 temp
= I915_READ(reg
);
2571 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2572 temp
|= snb_b_fdi_train_param
[i
];
2573 I915_WRITE(reg
, temp
);
2578 for (retry
= 0; retry
< 5; retry
++) {
2579 reg
= FDI_RX_IIR(pipe
);
2580 temp
= I915_READ(reg
);
2581 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2582 if (temp
& FDI_RX_SYMBOL_LOCK
) {
2583 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
2584 DRM_DEBUG_KMS("FDI train 2 done.\n");
2593 DRM_ERROR("FDI train 2 fail!\n");
2595 DRM_DEBUG_KMS("FDI train done.\n");
2598 /* Manual link training for Ivy Bridge A0 parts */
2599 static void ivb_manual_fdi_link_train(struct drm_crtc
*crtc
)
2601 struct drm_device
*dev
= crtc
->dev
;
2602 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2603 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2604 int pipe
= intel_crtc
->pipe
;
2607 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2609 reg
= FDI_RX_IMR(pipe
);
2610 temp
= I915_READ(reg
);
2611 temp
&= ~FDI_RX_SYMBOL_LOCK
;
2612 temp
&= ~FDI_RX_BIT_LOCK
;
2613 I915_WRITE(reg
, temp
);
2618 /* enable CPU FDI TX and PCH FDI RX */
2619 reg
= FDI_TX_CTL(pipe
);
2620 temp
= I915_READ(reg
);
2622 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2623 temp
&= ~(FDI_LINK_TRAIN_AUTO
| FDI_LINK_TRAIN_NONE_IVB
);
2624 temp
|= FDI_LINK_TRAIN_PATTERN_1_IVB
;
2625 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2626 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2627 temp
|= FDI_COMPOSITE_SYNC
;
2628 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
2630 reg
= FDI_RX_CTL(pipe
);
2631 temp
= I915_READ(reg
);
2632 temp
&= ~FDI_LINK_TRAIN_AUTO
;
2633 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2634 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
2635 temp
|= FDI_COMPOSITE_SYNC
;
2636 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
2641 if (HAS_PCH_CPT(dev
))
2642 cpt_phase_pointer_enable(dev
, pipe
);
2644 for (i
= 0; i
< 4; i
++) {
2645 reg
= FDI_TX_CTL(pipe
);
2646 temp
= I915_READ(reg
);
2647 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2648 temp
|= snb_b_fdi_train_param
[i
];
2649 I915_WRITE(reg
, temp
);
2654 reg
= FDI_RX_IIR(pipe
);
2655 temp
= I915_READ(reg
);
2656 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2658 if (temp
& FDI_RX_BIT_LOCK
||
2659 (I915_READ(reg
) & FDI_RX_BIT_LOCK
)) {
2660 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
2661 DRM_DEBUG_KMS("FDI train 1 done.\n");
2666 DRM_ERROR("FDI train 1 fail!\n");
2669 reg
= FDI_TX_CTL(pipe
);
2670 temp
= I915_READ(reg
);
2671 temp
&= ~FDI_LINK_TRAIN_NONE_IVB
;
2672 temp
|= FDI_LINK_TRAIN_PATTERN_2_IVB
;
2673 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2674 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2675 I915_WRITE(reg
, temp
);
2677 reg
= FDI_RX_CTL(pipe
);
2678 temp
= I915_READ(reg
);
2679 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2680 temp
|= FDI_LINK_TRAIN_PATTERN_2_CPT
;
2681 I915_WRITE(reg
, temp
);
2686 for (i
= 0; i
< 4; i
++) {
2687 reg
= FDI_TX_CTL(pipe
);
2688 temp
= I915_READ(reg
);
2689 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2690 temp
|= snb_b_fdi_train_param
[i
];
2691 I915_WRITE(reg
, temp
);
2696 reg
= FDI_RX_IIR(pipe
);
2697 temp
= I915_READ(reg
);
2698 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2700 if (temp
& FDI_RX_SYMBOL_LOCK
) {
2701 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
2702 DRM_DEBUG_KMS("FDI train 2 done.\n");
2707 DRM_ERROR("FDI train 2 fail!\n");
2709 DRM_DEBUG_KMS("FDI train done.\n");
2712 static void ironlake_fdi_pll_enable(struct drm_crtc
*crtc
)
2714 struct drm_device
*dev
= crtc
->dev
;
2715 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2716 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2717 int pipe
= intel_crtc
->pipe
;
2720 /* Write the TU size bits so error detection works */
2721 I915_WRITE(FDI_RX_TUSIZE1(pipe
),
2722 I915_READ(PIPE_DATA_M1(pipe
)) & TU_SIZE_MASK
);
2724 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2725 reg
= FDI_RX_CTL(pipe
);
2726 temp
= I915_READ(reg
);
2727 temp
&= ~((0x7 << 19) | (0x7 << 16));
2728 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2729 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPE_BPC_MASK
) << 11;
2730 I915_WRITE(reg
, temp
| FDI_RX_PLL_ENABLE
);
2735 /* Switch from Rawclk to PCDclk */
2736 temp
= I915_READ(reg
);
2737 I915_WRITE(reg
, temp
| FDI_PCDCLK
);
2742 /* On Haswell, the PLL configuration for ports and pipes is handled
2743 * separately, as part of DDI setup */
2744 if (!IS_HASWELL(dev
)) {
2745 /* Enable CPU FDI TX PLL, always on for Ironlake */
2746 reg
= FDI_TX_CTL(pipe
);
2747 temp
= I915_READ(reg
);
2748 if ((temp
& FDI_TX_PLL_ENABLE
) == 0) {
2749 I915_WRITE(reg
, temp
| FDI_TX_PLL_ENABLE
);
2757 static void cpt_phase_pointer_disable(struct drm_device
*dev
, int pipe
)
2759 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2760 u32 flags
= I915_READ(SOUTH_CHICKEN1
);
2762 flags
&= ~(FDI_PHASE_SYNC_EN(pipe
));
2763 I915_WRITE(SOUTH_CHICKEN1
, flags
); /* once to disable... */
2764 flags
&= ~(FDI_PHASE_SYNC_OVR(pipe
));
2765 I915_WRITE(SOUTH_CHICKEN1
, flags
); /* then again to lock */
2766 POSTING_READ(SOUTH_CHICKEN1
);
2768 static void ironlake_fdi_disable(struct drm_crtc
*crtc
)
2770 struct drm_device
*dev
= crtc
->dev
;
2771 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2772 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2773 int pipe
= intel_crtc
->pipe
;
2776 /* disable CPU FDI tx and PCH FDI rx */
2777 reg
= FDI_TX_CTL(pipe
);
2778 temp
= I915_READ(reg
);
2779 I915_WRITE(reg
, temp
& ~FDI_TX_ENABLE
);
2782 reg
= FDI_RX_CTL(pipe
);
2783 temp
= I915_READ(reg
);
2784 temp
&= ~(0x7 << 16);
2785 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPE_BPC_MASK
) << 11;
2786 I915_WRITE(reg
, temp
& ~FDI_RX_ENABLE
);
2791 /* Ironlake workaround, disable clock pointer after downing FDI */
2792 if (HAS_PCH_IBX(dev
)) {
2793 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
);
2794 I915_WRITE(FDI_RX_CHICKEN(pipe
),
2795 I915_READ(FDI_RX_CHICKEN(pipe
) &
2796 ~FDI_RX_PHASE_SYNC_POINTER_EN
));
2797 } else if (HAS_PCH_CPT(dev
)) {
2798 cpt_phase_pointer_disable(dev
, pipe
);
2801 /* still set train pattern 1 */
2802 reg
= FDI_TX_CTL(pipe
);
2803 temp
= I915_READ(reg
);
2804 temp
&= ~FDI_LINK_TRAIN_NONE
;
2805 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2806 I915_WRITE(reg
, temp
);
2808 reg
= FDI_RX_CTL(pipe
);
2809 temp
= I915_READ(reg
);
2810 if (HAS_PCH_CPT(dev
)) {
2811 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2812 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
2814 temp
&= ~FDI_LINK_TRAIN_NONE
;
2815 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2817 /* BPC in FDI rx is consistent with that in PIPECONF */
2818 temp
&= ~(0x07 << 16);
2819 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPE_BPC_MASK
) << 11;
2820 I915_WRITE(reg
, temp
);
2826 static bool intel_crtc_has_pending_flip(struct drm_crtc
*crtc
)
2828 struct drm_device
*dev
= crtc
->dev
;
2829 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2830 unsigned long flags
;
2833 if (atomic_read(&dev_priv
->mm
.wedged
))
2836 spin_lock_irqsave(&dev
->event_lock
, flags
);
2837 pending
= to_intel_crtc(crtc
)->unpin_work
!= NULL
;
2838 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
2843 static void intel_crtc_wait_for_pending_flips(struct drm_crtc
*crtc
)
2845 struct drm_device
*dev
= crtc
->dev
;
2846 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2848 if (crtc
->fb
== NULL
)
2851 wait_event(dev_priv
->pending_flip_queue
,
2852 !intel_crtc_has_pending_flip(crtc
));
2854 mutex_lock(&dev
->struct_mutex
);
2855 intel_finish_fb(crtc
->fb
);
2856 mutex_unlock(&dev
->struct_mutex
);
2859 static bool intel_crtc_driving_pch(struct drm_crtc
*crtc
)
2861 struct drm_device
*dev
= crtc
->dev
;
2862 struct intel_encoder
*encoder
;
2865 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2866 * must be driven by its own crtc; no sharing is possible.
2868 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
2870 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2871 * CPU handles all others */
2872 if (IS_HASWELL(dev
)) {
2873 /* It is still unclear how this will work on PPT, so throw up a warning */
2874 WARN_ON(!HAS_PCH_LPT(dev
));
2876 if (encoder
->type
== DRM_MODE_ENCODER_DAC
) {
2877 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2880 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2886 switch (encoder
->type
) {
2887 case INTEL_OUTPUT_EDP
:
2888 if (!intel_encoder_is_pch_edp(&encoder
->base
))
2897 /* Program iCLKIP clock to the desired frequency */
2898 static void lpt_program_iclkip(struct drm_crtc
*crtc
)
2900 struct drm_device
*dev
= crtc
->dev
;
2901 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2902 u32 divsel
, phaseinc
, auxdiv
, phasedir
= 0;
2905 /* It is necessary to ungate the pixclk gate prior to programming
2906 * the divisors, and gate it back when it is done.
2908 I915_WRITE(PIXCLK_GATE
, PIXCLK_GATE_GATE
);
2910 /* Disable SSCCTL */
2911 intel_sbi_write(dev_priv
, SBI_SSCCTL6
,
2912 intel_sbi_read(dev_priv
, SBI_SSCCTL6
) |
2913 SBI_SSCCTL_DISABLE
);
2915 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2916 if (crtc
->mode
.clock
== 20000) {
2921 /* The iCLK virtual clock root frequency is in MHz,
2922 * but the crtc->mode.clock in in KHz. To get the divisors,
2923 * it is necessary to divide one by another, so we
2924 * convert the virtual clock precision to KHz here for higher
2927 u32 iclk_virtual_root_freq
= 172800 * 1000;
2928 u32 iclk_pi_range
= 64;
2929 u32 desired_divisor
, msb_divisor_value
, pi_value
;
2931 desired_divisor
= (iclk_virtual_root_freq
/ crtc
->mode
.clock
);
2932 msb_divisor_value
= desired_divisor
/ iclk_pi_range
;
2933 pi_value
= desired_divisor
% iclk_pi_range
;
2936 divsel
= msb_divisor_value
- 2;
2937 phaseinc
= pi_value
;
2940 /* This should not happen with any sane values */
2941 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel
) &
2942 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK
);
2943 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir
) &
2944 ~SBI_SSCDIVINTPHASE_INCVAL_MASK
);
2946 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2953 /* Program SSCDIVINTPHASE6 */
2954 temp
= intel_sbi_read(dev_priv
, SBI_SSCDIVINTPHASE6
);
2955 temp
&= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK
;
2956 temp
|= SBI_SSCDIVINTPHASE_DIVSEL(divsel
);
2957 temp
&= ~SBI_SSCDIVINTPHASE_INCVAL_MASK
;
2958 temp
|= SBI_SSCDIVINTPHASE_INCVAL(phaseinc
);
2959 temp
|= SBI_SSCDIVINTPHASE_DIR(phasedir
);
2960 temp
|= SBI_SSCDIVINTPHASE_PROPAGATE
;
2962 intel_sbi_write(dev_priv
,
2963 SBI_SSCDIVINTPHASE6
,
2966 /* Program SSCAUXDIV */
2967 temp
= intel_sbi_read(dev_priv
, SBI_SSCAUXDIV6
);
2968 temp
&= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2969 temp
|= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv
);
2970 intel_sbi_write(dev_priv
,
2975 /* Enable modulator and associated divider */
2976 temp
= intel_sbi_read(dev_priv
, SBI_SSCCTL6
);
2977 temp
&= ~SBI_SSCCTL_DISABLE
;
2978 intel_sbi_write(dev_priv
,
2982 /* Wait for initialization time */
2985 I915_WRITE(PIXCLK_GATE
, PIXCLK_GATE_UNGATE
);
2989 * Enable PCH resources required for PCH ports:
2991 * - FDI training & RX/TX
2992 * - update transcoder timings
2993 * - DP transcoding bits
2996 static void ironlake_pch_enable(struct drm_crtc
*crtc
)
2998 struct drm_device
*dev
= crtc
->dev
;
2999 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3000 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3001 int pipe
= intel_crtc
->pipe
;
3004 assert_transcoder_disabled(dev_priv
, pipe
);
3006 /* For PCH output, training FDI link */
3007 dev_priv
->display
.fdi_link_train(crtc
);
3009 intel_enable_pch_pll(intel_crtc
);
3011 if (HAS_PCH_LPT(dev
)) {
3012 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
3013 lpt_program_iclkip(crtc
);
3014 } else if (HAS_PCH_CPT(dev
)) {
3017 temp
= I915_READ(PCH_DPLL_SEL
);
3021 temp
|= TRANSA_DPLL_ENABLE
;
3022 sel
= TRANSA_DPLLB_SEL
;
3025 temp
|= TRANSB_DPLL_ENABLE
;
3026 sel
= TRANSB_DPLLB_SEL
;
3029 temp
|= TRANSC_DPLL_ENABLE
;
3030 sel
= TRANSC_DPLLB_SEL
;
3033 if (intel_crtc
->pch_pll
->pll_reg
== _PCH_DPLL_B
)
3037 I915_WRITE(PCH_DPLL_SEL
, temp
);
3040 /* set transcoder timing, panel must allow it */
3041 assert_panel_unlocked(dev_priv
, pipe
);
3042 I915_WRITE(TRANS_HTOTAL(pipe
), I915_READ(HTOTAL(pipe
)));
3043 I915_WRITE(TRANS_HBLANK(pipe
), I915_READ(HBLANK(pipe
)));
3044 I915_WRITE(TRANS_HSYNC(pipe
), I915_READ(HSYNC(pipe
)));
3046 I915_WRITE(TRANS_VTOTAL(pipe
), I915_READ(VTOTAL(pipe
)));
3047 I915_WRITE(TRANS_VBLANK(pipe
), I915_READ(VBLANK(pipe
)));
3048 I915_WRITE(TRANS_VSYNC(pipe
), I915_READ(VSYNC(pipe
)));
3049 I915_WRITE(TRANS_VSYNCSHIFT(pipe
), I915_READ(VSYNCSHIFT(pipe
)));
3051 if (!IS_HASWELL(dev
))
3052 intel_fdi_normal_train(crtc
);
3054 /* For PCH DP, enable TRANS_DP_CTL */
3055 if (HAS_PCH_CPT(dev
) &&
3056 (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
) ||
3057 intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
))) {
3058 u32 bpc
= (I915_READ(PIPECONF(pipe
)) & PIPE_BPC_MASK
) >> 5;
3059 reg
= TRANS_DP_CTL(pipe
);
3060 temp
= I915_READ(reg
);
3061 temp
&= ~(TRANS_DP_PORT_SEL_MASK
|
3062 TRANS_DP_SYNC_MASK
|
3064 temp
|= (TRANS_DP_OUTPUT_ENABLE
|
3065 TRANS_DP_ENH_FRAMING
);
3066 temp
|= bpc
<< 9; /* same format but at 11:9 */
3068 if (crtc
->mode
.flags
& DRM_MODE_FLAG_PHSYNC
)
3069 temp
|= TRANS_DP_HSYNC_ACTIVE_HIGH
;
3070 if (crtc
->mode
.flags
& DRM_MODE_FLAG_PVSYNC
)
3071 temp
|= TRANS_DP_VSYNC_ACTIVE_HIGH
;
3073 switch (intel_trans_dp_port_sel(crtc
)) {
3075 temp
|= TRANS_DP_PORT_SEL_B
;
3078 temp
|= TRANS_DP_PORT_SEL_C
;
3081 temp
|= TRANS_DP_PORT_SEL_D
;
3084 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3085 temp
|= TRANS_DP_PORT_SEL_B
;
3089 I915_WRITE(reg
, temp
);
3092 intel_enable_transcoder(dev_priv
, pipe
);
3095 static void intel_put_pch_pll(struct intel_crtc
*intel_crtc
)
3097 struct intel_pch_pll
*pll
= intel_crtc
->pch_pll
;
3102 if (pll
->refcount
== 0) {
3103 WARN(1, "bad PCH PLL refcount\n");
3108 intel_crtc
->pch_pll
= NULL
;
3111 static struct intel_pch_pll
*intel_get_pch_pll(struct intel_crtc
*intel_crtc
, u32 dpll
, u32 fp
)
3113 struct drm_i915_private
*dev_priv
= intel_crtc
->base
.dev
->dev_private
;
3114 struct intel_pch_pll
*pll
;
3117 pll
= intel_crtc
->pch_pll
;
3119 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3120 intel_crtc
->base
.base
.id
, pll
->pll_reg
);
3124 if (HAS_PCH_IBX(dev_priv
->dev
)) {
3125 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3126 i
= intel_crtc
->pipe
;
3127 pll
= &dev_priv
->pch_plls
[i
];
3129 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3130 intel_crtc
->base
.base
.id
, pll
->pll_reg
);
3135 for (i
= 0; i
< dev_priv
->num_pch_pll
; i
++) {
3136 pll
= &dev_priv
->pch_plls
[i
];
3138 /* Only want to check enabled timings first */
3139 if (pll
->refcount
== 0)
3142 if (dpll
== (I915_READ(pll
->pll_reg
) & 0x7fffffff) &&
3143 fp
== I915_READ(pll
->fp0_reg
)) {
3144 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3145 intel_crtc
->base
.base
.id
,
3146 pll
->pll_reg
, pll
->refcount
, pll
->active
);
3152 /* Ok no matching timings, maybe there's a free one? */
3153 for (i
= 0; i
< dev_priv
->num_pch_pll
; i
++) {
3154 pll
= &dev_priv
->pch_plls
[i
];
3155 if (pll
->refcount
== 0) {
3156 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3157 intel_crtc
->base
.base
.id
, pll
->pll_reg
);
3165 intel_crtc
->pch_pll
= pll
;
3167 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i
, intel_crtc
->pipe
);
3168 prepare
: /* separate function? */
3169 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll
->pll_reg
);
3171 /* Wait for the clocks to stabilize before rewriting the regs */
3172 I915_WRITE(pll
->pll_reg
, dpll
& ~DPLL_VCO_ENABLE
);
3173 POSTING_READ(pll
->pll_reg
);
3176 I915_WRITE(pll
->fp0_reg
, fp
);
3177 I915_WRITE(pll
->pll_reg
, dpll
& ~DPLL_VCO_ENABLE
);
3182 void intel_cpt_verify_modeset(struct drm_device
*dev
, int pipe
)
3184 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3185 int dslreg
= PIPEDSL(pipe
), tc2reg
= TRANS_CHICKEN2(pipe
);
3188 temp
= I915_READ(dslreg
);
3190 if (wait_for(I915_READ(dslreg
) != temp
, 5)) {
3191 /* Without this, mode sets may fail silently on FDI */
3192 I915_WRITE(tc2reg
, TRANS_AUTOTRAIN_GEN_STALL_DIS
);
3194 I915_WRITE(tc2reg
, 0);
3195 if (wait_for(I915_READ(dslreg
) != temp
, 5))
3196 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe
);
3200 static void ironlake_crtc_enable(struct drm_crtc
*crtc
)
3202 struct drm_device
*dev
= crtc
->dev
;
3203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3204 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3205 int pipe
= intel_crtc
->pipe
;
3206 int plane
= intel_crtc
->plane
;
3210 if (intel_crtc
->active
)
3213 intel_crtc
->active
= true;
3214 intel_update_watermarks(dev
);
3216 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
3217 temp
= I915_READ(PCH_LVDS
);
3218 if ((temp
& LVDS_PORT_EN
) == 0)
3219 I915_WRITE(PCH_LVDS
, temp
| LVDS_PORT_EN
);
3222 is_pch_port
= intel_crtc_driving_pch(crtc
);
3225 ironlake_fdi_pll_enable(crtc
);
3227 ironlake_fdi_disable(crtc
);
3229 /* Enable panel fitting for LVDS */
3230 if (dev_priv
->pch_pf_size
&&
3231 (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) || HAS_eDP
)) {
3232 /* Force use of hard-coded filter coefficients
3233 * as some pre-programmed values are broken,
3236 I915_WRITE(PF_CTL(pipe
), PF_ENABLE
| PF_FILTER_MED_3x3
);
3237 I915_WRITE(PF_WIN_POS(pipe
), dev_priv
->pch_pf_pos
);
3238 I915_WRITE(PF_WIN_SZ(pipe
), dev_priv
->pch_pf_size
);
3242 * On ILK+ LUT must be loaded before the pipe is running but with
3245 intel_crtc_load_lut(crtc
);
3247 intel_enable_pipe(dev_priv
, pipe
, is_pch_port
);
3248 intel_enable_plane(dev_priv
, plane
, pipe
);
3251 ironlake_pch_enable(crtc
);
3253 mutex_lock(&dev
->struct_mutex
);
3254 intel_update_fbc(dev
);
3255 mutex_unlock(&dev
->struct_mutex
);
3257 intel_crtc_update_cursor(crtc
, true);
3260 static void ironlake_crtc_disable(struct drm_crtc
*crtc
)
3262 struct drm_device
*dev
= crtc
->dev
;
3263 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3264 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3265 int pipe
= intel_crtc
->pipe
;
3266 int plane
= intel_crtc
->plane
;
3269 if (!intel_crtc
->active
)
3272 intel_crtc_wait_for_pending_flips(crtc
);
3273 drm_vblank_off(dev
, pipe
);
3274 intel_crtc_update_cursor(crtc
, false);
3276 intel_disable_plane(dev_priv
, plane
, pipe
);
3278 if (dev_priv
->cfb_plane
== plane
)
3279 intel_disable_fbc(dev
);
3281 intel_disable_pipe(dev_priv
, pipe
);
3284 I915_WRITE(PF_CTL(pipe
), 0);
3285 I915_WRITE(PF_WIN_SZ(pipe
), 0);
3287 ironlake_fdi_disable(crtc
);
3289 /* This is a horrible layering violation; we should be doing this in
3290 * the connector/encoder ->prepare instead, but we don't always have
3291 * enough information there about the config to know whether it will
3292 * actually be necessary or just cause undesired flicker.
3294 intel_disable_pch_ports(dev_priv
, pipe
);
3296 intel_disable_transcoder(dev_priv
, pipe
);
3298 if (HAS_PCH_CPT(dev
)) {
3299 /* disable TRANS_DP_CTL */
3300 reg
= TRANS_DP_CTL(pipe
);
3301 temp
= I915_READ(reg
);
3302 temp
&= ~(TRANS_DP_OUTPUT_ENABLE
| TRANS_DP_PORT_SEL_MASK
);
3303 temp
|= TRANS_DP_PORT_SEL_NONE
;
3304 I915_WRITE(reg
, temp
);
3306 /* disable DPLL_SEL */
3307 temp
= I915_READ(PCH_DPLL_SEL
);
3310 temp
&= ~(TRANSA_DPLL_ENABLE
| TRANSA_DPLLB_SEL
);
3313 temp
&= ~(TRANSB_DPLL_ENABLE
| TRANSB_DPLLB_SEL
);
3316 /* C shares PLL A or B */
3317 temp
&= ~(TRANSC_DPLL_ENABLE
| TRANSC_DPLLB_SEL
);
3322 I915_WRITE(PCH_DPLL_SEL
, temp
);
3325 /* disable PCH DPLL */
3326 intel_disable_pch_pll(intel_crtc
);
3328 /* Switch from PCDclk to Rawclk */
3329 reg
= FDI_RX_CTL(pipe
);
3330 temp
= I915_READ(reg
);
3331 I915_WRITE(reg
, temp
& ~FDI_PCDCLK
);
3333 /* Disable CPU FDI TX PLL */
3334 reg
= FDI_TX_CTL(pipe
);
3335 temp
= I915_READ(reg
);
3336 I915_WRITE(reg
, temp
& ~FDI_TX_PLL_ENABLE
);
3341 reg
= FDI_RX_CTL(pipe
);
3342 temp
= I915_READ(reg
);
3343 I915_WRITE(reg
, temp
& ~FDI_RX_PLL_ENABLE
);
3345 /* Wait for the clocks to turn off. */
3349 intel_crtc
->active
= false;
3350 intel_update_watermarks(dev
);
3352 mutex_lock(&dev
->struct_mutex
);
3353 intel_update_fbc(dev
);
3354 mutex_unlock(&dev
->struct_mutex
);
3357 static void ironlake_crtc_dpms(struct drm_crtc
*crtc
, int mode
)
3359 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3360 int pipe
= intel_crtc
->pipe
;
3361 int plane
= intel_crtc
->plane
;
3363 /* XXX: When our outputs are all unaware of DPMS modes other than off
3364 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3367 case DRM_MODE_DPMS_ON
:
3368 case DRM_MODE_DPMS_STANDBY
:
3369 case DRM_MODE_DPMS_SUSPEND
:
3370 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe
, plane
);
3371 ironlake_crtc_enable(crtc
);
3374 case DRM_MODE_DPMS_OFF
:
3375 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe
, plane
);
3376 ironlake_crtc_disable(crtc
);
3381 static void ironlake_crtc_off(struct drm_crtc
*crtc
)
3383 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3384 intel_put_pch_pll(intel_crtc
);
3387 static void intel_crtc_dpms_overlay(struct intel_crtc
*intel_crtc
, bool enable
)
3389 if (!enable
&& intel_crtc
->overlay
) {
3390 struct drm_device
*dev
= intel_crtc
->base
.dev
;
3391 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3393 mutex_lock(&dev
->struct_mutex
);
3394 dev_priv
->mm
.interruptible
= false;
3395 (void) intel_overlay_switch_off(intel_crtc
->overlay
);
3396 dev_priv
->mm
.interruptible
= true;
3397 mutex_unlock(&dev
->struct_mutex
);
3400 /* Let userspace switch the overlay on again. In most cases userspace
3401 * has to recompute where to put it anyway.
3405 static void i9xx_crtc_enable(struct drm_crtc
*crtc
)
3407 struct drm_device
*dev
= crtc
->dev
;
3408 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3409 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3410 int pipe
= intel_crtc
->pipe
;
3411 int plane
= intel_crtc
->plane
;
3413 if (intel_crtc
->active
)
3416 intel_crtc
->active
= true;
3417 intel_update_watermarks(dev
);
3419 intel_enable_pll(dev_priv
, pipe
);
3420 intel_enable_pipe(dev_priv
, pipe
, false);
3421 intel_enable_plane(dev_priv
, plane
, pipe
);
3423 intel_crtc_load_lut(crtc
);
3424 intel_update_fbc(dev
);
3426 /* Give the overlay scaler a chance to enable if it's on this pipe */
3427 intel_crtc_dpms_overlay(intel_crtc
, true);
3428 intel_crtc_update_cursor(crtc
, true);
3431 static void i9xx_crtc_disable(struct drm_crtc
*crtc
)
3433 struct drm_device
*dev
= crtc
->dev
;
3434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3435 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3436 int pipe
= intel_crtc
->pipe
;
3437 int plane
= intel_crtc
->plane
;
3439 if (!intel_crtc
->active
)
3442 /* Give the overlay scaler a chance to disable if it's on this pipe */
3443 intel_crtc_wait_for_pending_flips(crtc
);
3444 drm_vblank_off(dev
, pipe
);
3445 intel_crtc_dpms_overlay(intel_crtc
, false);
3446 intel_crtc_update_cursor(crtc
, false);
3448 if (dev_priv
->cfb_plane
== plane
)
3449 intel_disable_fbc(dev
);
3451 intel_disable_plane(dev_priv
, plane
, pipe
);
3452 intel_disable_pipe(dev_priv
, pipe
);
3453 intel_disable_pll(dev_priv
, pipe
);
3455 intel_crtc
->active
= false;
3456 intel_update_fbc(dev
);
3457 intel_update_watermarks(dev
);
3460 static void i9xx_crtc_dpms(struct drm_crtc
*crtc
, int mode
)
3462 /* XXX: When our outputs are all unaware of DPMS modes other than off
3463 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3466 case DRM_MODE_DPMS_ON
:
3467 case DRM_MODE_DPMS_STANDBY
:
3468 case DRM_MODE_DPMS_SUSPEND
:
3469 i9xx_crtc_enable(crtc
);
3471 case DRM_MODE_DPMS_OFF
:
3472 i9xx_crtc_disable(crtc
);
3477 static void i9xx_crtc_off(struct drm_crtc
*crtc
)
3482 * Sets the power management mode of the pipe and plane.
3484 static void intel_crtc_dpms(struct drm_crtc
*crtc
, int mode
)
3486 struct drm_device
*dev
= crtc
->dev
;
3487 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3488 struct drm_i915_master_private
*master_priv
;
3489 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3490 int pipe
= intel_crtc
->pipe
;
3493 if (intel_crtc
->dpms_mode
== mode
)
3496 intel_crtc
->dpms_mode
= mode
;
3498 dev_priv
->display
.dpms(crtc
, mode
);
3500 if (!dev
->primary
->master
)
3503 master_priv
= dev
->primary
->master
->driver_priv
;
3504 if (!master_priv
->sarea_priv
)
3507 enabled
= crtc
->enabled
&& mode
!= DRM_MODE_DPMS_OFF
;
3511 master_priv
->sarea_priv
->pipeA_w
= enabled
? crtc
->mode
.hdisplay
: 0;
3512 master_priv
->sarea_priv
->pipeA_h
= enabled
? crtc
->mode
.vdisplay
: 0;
3515 master_priv
->sarea_priv
->pipeB_w
= enabled
? crtc
->mode
.hdisplay
: 0;
3516 master_priv
->sarea_priv
->pipeB_h
= enabled
? crtc
->mode
.vdisplay
: 0;
3519 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe
));
3524 static void intel_crtc_disable(struct drm_crtc
*crtc
)
3526 struct drm_crtc_helper_funcs
*crtc_funcs
= crtc
->helper_private
;
3527 struct drm_device
*dev
= crtc
->dev
;
3528 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3530 crtc_funcs
->dpms(crtc
, DRM_MODE_DPMS_OFF
);
3531 dev_priv
->display
.off(crtc
);
3533 assert_plane_disabled(dev
->dev_private
, to_intel_crtc(crtc
)->plane
);
3534 assert_pipe_disabled(dev
->dev_private
, to_intel_crtc(crtc
)->pipe
);
3537 mutex_lock(&dev
->struct_mutex
);
3538 intel_unpin_fb_obj(to_intel_framebuffer(crtc
->fb
)->obj
);
3539 mutex_unlock(&dev
->struct_mutex
);
3543 /* Prepare for a mode set.
3545 * Note we could be a lot smarter here. We need to figure out which outputs
3546 * will be enabled, which disabled (in short, how the config will changes)
3547 * and perform the minimum necessary steps to accomplish that, e.g. updating
3548 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3549 * panel fitting is in the proper state, etc.
3551 static void i9xx_crtc_prepare(struct drm_crtc
*crtc
)
3553 i9xx_crtc_disable(crtc
);
3556 static void i9xx_crtc_commit(struct drm_crtc
*crtc
)
3558 i9xx_crtc_enable(crtc
);
3561 static void ironlake_crtc_prepare(struct drm_crtc
*crtc
)
3563 ironlake_crtc_disable(crtc
);
3566 static void ironlake_crtc_commit(struct drm_crtc
*crtc
)
3568 ironlake_crtc_enable(crtc
);
3571 void intel_encoder_prepare(struct drm_encoder
*encoder
)
3573 struct drm_encoder_helper_funcs
*encoder_funcs
= encoder
->helper_private
;
3574 /* lvds has its own version of prepare see intel_lvds_prepare */
3575 encoder_funcs
->dpms(encoder
, DRM_MODE_DPMS_OFF
);
3578 void intel_encoder_commit(struct drm_encoder
*encoder
)
3580 struct drm_encoder_helper_funcs
*encoder_funcs
= encoder
->helper_private
;
3581 struct drm_device
*dev
= encoder
->dev
;
3582 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->crtc
);
3584 /* lvds has its own version of commit see intel_lvds_commit */
3585 encoder_funcs
->dpms(encoder
, DRM_MODE_DPMS_ON
);
3587 if (HAS_PCH_CPT(dev
))
3588 intel_cpt_verify_modeset(dev
, intel_crtc
->pipe
);
3591 void intel_encoder_destroy(struct drm_encoder
*encoder
)
3593 struct intel_encoder
*intel_encoder
= to_intel_encoder(encoder
);
3595 drm_encoder_cleanup(encoder
);
3596 kfree(intel_encoder
);
3599 static bool intel_crtc_mode_fixup(struct drm_crtc
*crtc
,
3600 const struct drm_display_mode
*mode
,
3601 struct drm_display_mode
*adjusted_mode
)
3603 struct drm_device
*dev
= crtc
->dev
;
3605 if (HAS_PCH_SPLIT(dev
)) {
3606 /* FDI link clock is fixed at 2.7G */
3607 if (mode
->clock
* 3 > IRONLAKE_FDI_FREQ
* 4)
3611 /* All interlaced capable intel hw wants timings in frames. Note though
3612 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3613 * timings, so we need to be careful not to clobber these.*/
3614 if (!(adjusted_mode
->private_flags
& INTEL_MODE_CRTC_TIMINGS_SET
))
3615 drm_mode_set_crtcinfo(adjusted_mode
, 0);
3620 static int valleyview_get_display_clock_speed(struct drm_device
*dev
)
3622 return 400000; /* FIXME */
3625 static int i945_get_display_clock_speed(struct drm_device
*dev
)
3630 static int i915_get_display_clock_speed(struct drm_device
*dev
)
3635 static int i9xx_misc_get_display_clock_speed(struct drm_device
*dev
)
3640 static int i915gm_get_display_clock_speed(struct drm_device
*dev
)
3644 pci_read_config_word(dev
->pdev
, GCFGC
, &gcfgc
);
3646 if (gcfgc
& GC_LOW_FREQUENCY_ENABLE
)
3649 switch (gcfgc
& GC_DISPLAY_CLOCK_MASK
) {
3650 case GC_DISPLAY_CLOCK_333_MHZ
:
3653 case GC_DISPLAY_CLOCK_190_200_MHZ
:
3659 static int i865_get_display_clock_speed(struct drm_device
*dev
)
3664 static int i855_get_display_clock_speed(struct drm_device
*dev
)
3667 /* Assume that the hardware is in the high speed state. This
3668 * should be the default.
3670 switch (hpllcc
& GC_CLOCK_CONTROL_MASK
) {
3671 case GC_CLOCK_133_200
:
3672 case GC_CLOCK_100_200
:
3674 case GC_CLOCK_166_250
:
3676 case GC_CLOCK_100_133
:
3680 /* Shouldn't happen */
3684 static int i830_get_display_clock_speed(struct drm_device
*dev
)
3698 fdi_reduce_ratio(u32
*num
, u32
*den
)
3700 while (*num
> 0xffffff || *den
> 0xffffff) {
3707 ironlake_compute_m_n(int bits_per_pixel
, int nlanes
, int pixel_clock
,
3708 int link_clock
, struct fdi_m_n
*m_n
)
3710 m_n
->tu
= 64; /* default size */
3712 /* BUG_ON(pixel_clock > INT_MAX / 36); */
3713 m_n
->gmch_m
= bits_per_pixel
* pixel_clock
;
3714 m_n
->gmch_n
= link_clock
* nlanes
* 8;
3715 fdi_reduce_ratio(&m_n
->gmch_m
, &m_n
->gmch_n
);
3717 m_n
->link_m
= pixel_clock
;
3718 m_n
->link_n
= link_clock
;
3719 fdi_reduce_ratio(&m_n
->link_m
, &m_n
->link_n
);
3722 static inline bool intel_panel_use_ssc(struct drm_i915_private
*dev_priv
)
3724 if (i915_panel_use_ssc
>= 0)
3725 return i915_panel_use_ssc
!= 0;
3726 return dev_priv
->lvds_use_ssc
3727 && !(dev_priv
->quirks
& QUIRK_LVDS_SSC_DISABLE
);
3731 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3732 * @crtc: CRTC structure
3733 * @mode: requested mode
3735 * A pipe may be connected to one or more outputs. Based on the depth of the
3736 * attached framebuffer, choose a good color depth to use on the pipe.
3738 * If possible, match the pipe depth to the fb depth. In some cases, this
3739 * isn't ideal, because the connected output supports a lesser or restricted
3740 * set of depths. Resolve that here:
3741 * LVDS typically supports only 6bpc, so clamp down in that case
3742 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3743 * Displays may support a restricted set as well, check EDID and clamp as
3745 * DP may want to dither down to 6bpc to fit larger modes
3748 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3749 * true if they don't match).
3751 static bool intel_choose_pipe_bpp_dither(struct drm_crtc
*crtc
,
3752 unsigned int *pipe_bpp
,
3753 struct drm_display_mode
*mode
)
3755 struct drm_device
*dev
= crtc
->dev
;
3756 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3757 struct drm_connector
*connector
;
3758 struct intel_encoder
*intel_encoder
;
3759 unsigned int display_bpc
= UINT_MAX
, bpc
;
3761 /* Walk the encoders & connectors on this crtc, get min bpc */
3762 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
3764 if (intel_encoder
->type
== INTEL_OUTPUT_LVDS
) {
3765 unsigned int lvds_bpc
;
3767 if ((I915_READ(PCH_LVDS
) & LVDS_A3_POWER_MASK
) ==
3773 if (lvds_bpc
< display_bpc
) {
3774 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc
, lvds_bpc
);
3775 display_bpc
= lvds_bpc
;
3780 /* Not one of the known troublemakers, check the EDID */
3781 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
3783 if (connector
->encoder
!= &intel_encoder
->base
)
3786 /* Don't use an invalid EDID bpc value */
3787 if (connector
->display_info
.bpc
&&
3788 connector
->display_info
.bpc
< display_bpc
) {
3789 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc
, connector
->display_info
.bpc
);
3790 display_bpc
= connector
->display_info
.bpc
;
3795 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3796 * through, clamp it down. (Note: >12bpc will be caught below.)
3798 if (intel_encoder
->type
== INTEL_OUTPUT_HDMI
) {
3799 if (display_bpc
> 8 && display_bpc
< 12) {
3800 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
3803 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
3809 if (mode
->private_flags
& INTEL_MODE_DP_FORCE_6BPC
) {
3810 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3815 * We could just drive the pipe at the highest bpc all the time and
3816 * enable dithering as needed, but that costs bandwidth. So choose
3817 * the minimum value that expresses the full color range of the fb but
3818 * also stays within the max display bpc discovered above.
3821 switch (crtc
->fb
->depth
) {
3823 bpc
= 8; /* since we go through a colormap */
3827 bpc
= 6; /* min is 18bpp */
3839 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3840 bpc
= min((unsigned int)8, display_bpc
);
3844 display_bpc
= min(display_bpc
, bpc
);
3846 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3849 *pipe_bpp
= display_bpc
* 3;
3851 return display_bpc
!= bpc
;
3854 static int vlv_get_refclk(struct drm_crtc
*crtc
)
3856 struct drm_device
*dev
= crtc
->dev
;
3857 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3858 int refclk
= 27000; /* for DP & HDMI */
3860 return 100000; /* only one validated so far */
3862 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_ANALOG
)) {
3864 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
3865 if (intel_panel_use_ssc(dev_priv
))
3869 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
)) {
3876 static int i9xx_get_refclk(struct drm_crtc
*crtc
, int num_connectors
)
3878 struct drm_device
*dev
= crtc
->dev
;
3879 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3882 if (IS_VALLEYVIEW(dev
)) {
3883 refclk
= vlv_get_refclk(crtc
);
3884 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) &&
3885 intel_panel_use_ssc(dev_priv
) && num_connectors
< 2) {
3886 refclk
= dev_priv
->lvds_ssc_freq
* 1000;
3887 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3889 } else if (!IS_GEN2(dev
)) {
3898 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode
*adjusted_mode
,
3899 intel_clock_t
*clock
)
3901 /* SDVO TV has fixed PLL values depend on its clock range,
3902 this mirrors vbios setting. */
3903 if (adjusted_mode
->clock
>= 100000
3904 && adjusted_mode
->clock
< 140500) {
3910 } else if (adjusted_mode
->clock
>= 140500
3911 && adjusted_mode
->clock
<= 200000) {
3920 static void i9xx_update_pll_dividers(struct drm_crtc
*crtc
,
3921 intel_clock_t
*clock
,
3922 intel_clock_t
*reduced_clock
)
3924 struct drm_device
*dev
= crtc
->dev
;
3925 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3926 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3927 int pipe
= intel_crtc
->pipe
;
3930 if (IS_PINEVIEW(dev
)) {
3931 fp
= (1 << clock
->n
) << 16 | clock
->m1
<< 8 | clock
->m2
;
3933 fp2
= (1 << reduced_clock
->n
) << 16 |
3934 reduced_clock
->m1
<< 8 | reduced_clock
->m2
;
3936 fp
= clock
->n
<< 16 | clock
->m1
<< 8 | clock
->m2
;
3938 fp2
= reduced_clock
->n
<< 16 | reduced_clock
->m1
<< 8 |
3942 I915_WRITE(FP0(pipe
), fp
);
3944 intel_crtc
->lowfreq_avail
= false;
3945 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) &&
3946 reduced_clock
&& i915_powersave
) {
3947 I915_WRITE(FP1(pipe
), fp2
);
3948 intel_crtc
->lowfreq_avail
= true;
3950 I915_WRITE(FP1(pipe
), fp
);
3954 static void intel_update_lvds(struct drm_crtc
*crtc
, intel_clock_t
*clock
,
3955 struct drm_display_mode
*adjusted_mode
)
3957 struct drm_device
*dev
= crtc
->dev
;
3958 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3959 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3960 int pipe
= intel_crtc
->pipe
;
3963 temp
= I915_READ(LVDS
);
3964 temp
|= LVDS_PORT_EN
| LVDS_A0A2_CLKA_POWER_UP
;
3966 temp
|= LVDS_PIPEB_SELECT
;
3968 temp
&= ~LVDS_PIPEB_SELECT
;
3970 /* set the corresponsding LVDS_BORDER bit */
3971 temp
|= dev_priv
->lvds_border_bits
;
3972 /* Set the B0-B3 data pairs corresponding to whether we're going to
3973 * set the DPLLs for dual-channel mode or not.
3976 temp
|= LVDS_B0B3_POWER_UP
| LVDS_CLKB_POWER_UP
;
3978 temp
&= ~(LVDS_B0B3_POWER_UP
| LVDS_CLKB_POWER_UP
);
3980 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3981 * appropriately here, but we need to look more thoroughly into how
3982 * panels behave in the two modes.
3984 /* set the dithering flag on LVDS as needed */
3985 if (INTEL_INFO(dev
)->gen
>= 4) {
3986 if (dev_priv
->lvds_dither
)
3987 temp
|= LVDS_ENABLE_DITHER
;
3989 temp
&= ~LVDS_ENABLE_DITHER
;
3991 temp
&= ~(LVDS_HSYNC_POLARITY
| LVDS_VSYNC_POLARITY
);
3992 if (adjusted_mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
3993 temp
|= LVDS_HSYNC_POLARITY
;
3994 if (adjusted_mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
3995 temp
|= LVDS_VSYNC_POLARITY
;
3996 I915_WRITE(LVDS
, temp
);
3999 static void vlv_update_pll(struct drm_crtc
*crtc
,
4000 struct drm_display_mode
*mode
,
4001 struct drm_display_mode
*adjusted_mode
,
4002 intel_clock_t
*clock
, intel_clock_t
*reduced_clock
,
4003 int refclk
, int num_connectors
)
4005 struct drm_device
*dev
= crtc
->dev
;
4006 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4007 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4008 int pipe
= intel_crtc
->pipe
;
4009 u32 dpll
, mdiv
, pdiv
;
4010 u32 bestn
, bestm1
, bestm2
, bestp1
, bestp2
;
4013 is_hdmi
= intel_pipe_has_type(crtc
, INTEL_OUTPUT_HDMI
);
4021 /* Enable DPIO clock input */
4022 dpll
= DPLL_EXT_BUFFER_ENABLE_VLV
| DPLL_REFA_CLK_ENABLE_VLV
|
4023 DPLL_VGA_MODE_DIS
| DPLL_INTEGRATED_CLOCK_VLV
;
4024 I915_WRITE(DPLL(pipe
), dpll
);
4025 POSTING_READ(DPLL(pipe
));
4027 mdiv
= ((bestm1
<< DPIO_M1DIV_SHIFT
) | (bestm2
& DPIO_M2DIV_MASK
));
4028 mdiv
|= ((bestp1
<< DPIO_P1_SHIFT
) | (bestp2
<< DPIO_P2_SHIFT
));
4029 mdiv
|= ((bestn
<< DPIO_N_SHIFT
));
4030 mdiv
|= (1 << DPIO_POST_DIV_SHIFT
);
4031 mdiv
|= (1 << DPIO_K_SHIFT
);
4032 mdiv
|= DPIO_ENABLE_CALIBRATION
;
4033 intel_dpio_write(dev_priv
, DPIO_DIV(pipe
), mdiv
);
4035 intel_dpio_write(dev_priv
, DPIO_CORE_CLK(pipe
), 0x01000000);
4037 pdiv
= DPIO_REFSEL_OVERRIDE
| (5 << DPIO_PLL_MODESEL_SHIFT
) |
4038 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT
) | (1<<20) |
4039 (8 << DPIO_DRIVER_CTL_SHIFT
) | (5 << DPIO_CLK_BIAS_CTL_SHIFT
);
4040 intel_dpio_write(dev_priv
, DPIO_REFSFR(pipe
), pdiv
);
4042 intel_dpio_write(dev_priv
, DPIO_LFP_COEFF(pipe
), 0x009f0051);
4044 dpll
|= DPLL_VCO_ENABLE
;
4045 I915_WRITE(DPLL(pipe
), dpll
);
4046 POSTING_READ(DPLL(pipe
));
4047 if (wait_for(((I915_READ(DPLL(pipe
)) & DPLL_LOCK_VLV
) == DPLL_LOCK_VLV
), 1))
4048 DRM_ERROR("DPLL %d failed to lock\n", pipe
);
4051 u32 temp
= intel_mode_get_pixel_multiplier(adjusted_mode
);
4054 temp
= (temp
- 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
4058 I915_WRITE(DPLL_MD(pipe
), temp
);
4059 POSTING_READ(DPLL_MD(pipe
));
4062 intel_dpio_write(dev_priv
, DPIO_FASTCLK_DISABLE
, 0x641); /* ??? */
4065 static void i9xx_update_pll(struct drm_crtc
*crtc
,
4066 struct drm_display_mode
*mode
,
4067 struct drm_display_mode
*adjusted_mode
,
4068 intel_clock_t
*clock
, intel_clock_t
*reduced_clock
,
4071 struct drm_device
*dev
= crtc
->dev
;
4072 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4073 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4074 int pipe
= intel_crtc
->pipe
;
4078 is_sdvo
= intel_pipe_has_type(crtc
, INTEL_OUTPUT_SDVO
) ||
4079 intel_pipe_has_type(crtc
, INTEL_OUTPUT_HDMI
);
4081 dpll
= DPLL_VGA_MODE_DIS
;
4083 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
4084 dpll
|= DPLLB_MODE_LVDS
;
4086 dpll
|= DPLLB_MODE_DAC_SERIAL
;
4088 int pixel_multiplier
= intel_mode_get_pixel_multiplier(adjusted_mode
);
4089 if (pixel_multiplier
> 1) {
4090 if (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))
4091 dpll
|= (pixel_multiplier
- 1) << SDVO_MULTIPLIER_SHIFT_HIRES
;
4093 dpll
|= DPLL_DVO_HIGH_SPEED
;
4095 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
))
4096 dpll
|= DPLL_DVO_HIGH_SPEED
;
4098 /* compute bitmask from p1 value */
4099 if (IS_PINEVIEW(dev
))
4100 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW
;
4102 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4103 if (IS_G4X(dev
) && reduced_clock
)
4104 dpll
|= (1 << (reduced_clock
->p1
- 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT
;
4106 switch (clock
->p2
) {
4108 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
;
4111 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_7
;
4114 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10
;
4117 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_14
;
4120 if (INTEL_INFO(dev
)->gen
>= 4)
4121 dpll
|= (6 << PLL_LOAD_PULSE_PHASE_SHIFT
);
4123 if (is_sdvo
&& intel_pipe_has_type(crtc
, INTEL_OUTPUT_TVOUT
))
4124 dpll
|= PLL_REF_INPUT_TVCLKINBC
;
4125 else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_TVOUT
))
4126 /* XXX: just matching BIOS for now */
4127 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4129 else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) &&
4130 intel_panel_use_ssc(dev_priv
) && num_connectors
< 2)
4131 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
4133 dpll
|= PLL_REF_INPUT_DREFCLK
;
4135 dpll
|= DPLL_VCO_ENABLE
;
4136 I915_WRITE(DPLL(pipe
), dpll
& ~DPLL_VCO_ENABLE
);
4137 POSTING_READ(DPLL(pipe
));
4140 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4141 * This is an exception to the general rule that mode_set doesn't turn
4144 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
4145 intel_update_lvds(crtc
, clock
, adjusted_mode
);
4147 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
))
4148 intel_dp_set_m_n(crtc
, mode
, adjusted_mode
);
4150 I915_WRITE(DPLL(pipe
), dpll
);
4152 /* Wait for the clocks to stabilize. */
4153 POSTING_READ(DPLL(pipe
));
4156 if (INTEL_INFO(dev
)->gen
>= 4) {
4159 temp
= intel_mode_get_pixel_multiplier(adjusted_mode
);
4161 temp
= (temp
- 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
4165 I915_WRITE(DPLL_MD(pipe
), temp
);
4167 /* The pixel multiplier can only be updated once the
4168 * DPLL is enabled and the clocks are stable.
4170 * So write it again.
4172 I915_WRITE(DPLL(pipe
), dpll
);
4176 static void i8xx_update_pll(struct drm_crtc
*crtc
,
4177 struct drm_display_mode
*adjusted_mode
,
4178 intel_clock_t
*clock
,
4181 struct drm_device
*dev
= crtc
->dev
;
4182 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4183 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4184 int pipe
= intel_crtc
->pipe
;
4187 dpll
= DPLL_VGA_MODE_DIS
;
4189 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
4190 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4193 dpll
|= PLL_P1_DIVIDE_BY_TWO
;
4195 dpll
|= (clock
->p1
- 2) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4197 dpll
|= PLL_P2_DIVIDE_BY_4
;
4200 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_TVOUT
))
4201 /* XXX: just matching BIOS for now */
4202 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4204 else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) &&
4205 intel_panel_use_ssc(dev_priv
) && num_connectors
< 2)
4206 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
4208 dpll
|= PLL_REF_INPUT_DREFCLK
;
4210 dpll
|= DPLL_VCO_ENABLE
;
4211 I915_WRITE(DPLL(pipe
), dpll
& ~DPLL_VCO_ENABLE
);
4212 POSTING_READ(DPLL(pipe
));
4215 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4216 * This is an exception to the general rule that mode_set doesn't turn
4219 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
4220 intel_update_lvds(crtc
, clock
, adjusted_mode
);
4222 I915_WRITE(DPLL(pipe
), dpll
);
4224 /* Wait for the clocks to stabilize. */
4225 POSTING_READ(DPLL(pipe
));
4228 /* The pixel multiplier can only be updated once the
4229 * DPLL is enabled and the clocks are stable.
4231 * So write it again.
4233 I915_WRITE(DPLL(pipe
), dpll
);
4236 static int i9xx_crtc_mode_set(struct drm_crtc
*crtc
,
4237 struct drm_display_mode
*mode
,
4238 struct drm_display_mode
*adjusted_mode
,
4240 struct drm_framebuffer
*old_fb
)
4242 struct drm_device
*dev
= crtc
->dev
;
4243 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4244 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4245 int pipe
= intel_crtc
->pipe
;
4246 int plane
= intel_crtc
->plane
;
4247 int refclk
, num_connectors
= 0;
4248 intel_clock_t clock
, reduced_clock
;
4249 u32 dspcntr
, pipeconf
, vsyncshift
;
4250 bool ok
, has_reduced_clock
= false, is_sdvo
= false;
4251 bool is_lvds
= false, is_tv
= false, is_dp
= false;
4252 struct intel_encoder
*encoder
;
4253 const intel_limit_t
*limit
;
4256 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
4257 switch (encoder
->type
) {
4258 case INTEL_OUTPUT_LVDS
:
4261 case INTEL_OUTPUT_SDVO
:
4262 case INTEL_OUTPUT_HDMI
:
4264 if (encoder
->needs_tv_clock
)
4267 case INTEL_OUTPUT_TVOUT
:
4270 case INTEL_OUTPUT_DISPLAYPORT
:
4278 refclk
= i9xx_get_refclk(crtc
, num_connectors
);
4281 * Returns a set of divisors for the desired target clock with the given
4282 * refclk, or FALSE. The returned values represent the clock equation:
4283 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4285 limit
= intel_limit(crtc
, refclk
);
4286 ok
= limit
->find_pll(limit
, crtc
, adjusted_mode
->clock
, refclk
, NULL
,
4289 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4293 /* Ensure that the cursor is valid for the new mode before changing... */
4294 intel_crtc_update_cursor(crtc
, true);
4296 if (is_lvds
&& dev_priv
->lvds_downclock_avail
) {
4298 * Ensure we match the reduced clock's P to the target clock.
4299 * If the clocks don't match, we can't switch the display clock
4300 * by using the FP0/FP1. In such case we will disable the LVDS
4301 * downclock feature.
4303 has_reduced_clock
= limit
->find_pll(limit
, crtc
,
4304 dev_priv
->lvds_downclock
,
4310 if (is_sdvo
&& is_tv
)
4311 i9xx_adjust_sdvo_tv_clock(adjusted_mode
, &clock
);
4313 i9xx_update_pll_dividers(crtc
, &clock
, has_reduced_clock
?
4314 &reduced_clock
: NULL
);
4317 i8xx_update_pll(crtc
, adjusted_mode
, &clock
, num_connectors
);
4318 else if (IS_VALLEYVIEW(dev
))
4319 vlv_update_pll(crtc
, mode
,adjusted_mode
, &clock
, NULL
,
4320 refclk
, num_connectors
);
4322 i9xx_update_pll(crtc
, mode
, adjusted_mode
, &clock
,
4323 has_reduced_clock
? &reduced_clock
: NULL
,
4326 /* setup pipeconf */
4327 pipeconf
= I915_READ(PIPECONF(pipe
));
4329 /* Set up the display plane register */
4330 dspcntr
= DISPPLANE_GAMMA_ENABLE
;
4333 dspcntr
&= ~DISPPLANE_SEL_PIPE_MASK
;
4335 dspcntr
|= DISPPLANE_SEL_PIPE_B
;
4337 if (pipe
== 0 && INTEL_INFO(dev
)->gen
< 4) {
4338 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4341 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4345 dev_priv
->display
.get_display_clock_speed(dev
) * 9 / 10)
4346 pipeconf
|= PIPECONF_DOUBLE_WIDE
;
4348 pipeconf
&= ~PIPECONF_DOUBLE_WIDE
;
4351 /* default to 8bpc */
4352 pipeconf
&= ~(PIPECONF_BPP_MASK
| PIPECONF_DITHER_EN
);
4354 if (adjusted_mode
->private_flags
& INTEL_MODE_DP_FORCE_6BPC
) {
4355 pipeconf
|= PIPECONF_BPP_6
|
4356 PIPECONF_DITHER_EN
|
4357 PIPECONF_DITHER_TYPE_SP
;
4361 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe
== 0 ? 'A' : 'B');
4362 drm_mode_debug_printmodeline(mode
);
4364 if (HAS_PIPE_CXSR(dev
)) {
4365 if (intel_crtc
->lowfreq_avail
) {
4366 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4367 pipeconf
|= PIPECONF_CXSR_DOWNCLOCK
;
4369 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4370 pipeconf
&= ~PIPECONF_CXSR_DOWNCLOCK
;
4374 pipeconf
&= ~PIPECONF_INTERLACE_MASK
;
4375 if (!IS_GEN2(dev
) &&
4376 adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
4377 pipeconf
|= PIPECONF_INTERLACE_W_FIELD_INDICATION
;
4378 /* the chip adds 2 halflines automatically */
4379 adjusted_mode
->crtc_vtotal
-= 1;
4380 adjusted_mode
->crtc_vblank_end
-= 1;
4381 vsyncshift
= adjusted_mode
->crtc_hsync_start
4382 - adjusted_mode
->crtc_htotal
/2;
4384 pipeconf
|= PIPECONF_PROGRESSIVE
;
4389 I915_WRITE(VSYNCSHIFT(pipe
), vsyncshift
);
4391 I915_WRITE(HTOTAL(pipe
),
4392 (adjusted_mode
->crtc_hdisplay
- 1) |
4393 ((adjusted_mode
->crtc_htotal
- 1) << 16));
4394 I915_WRITE(HBLANK(pipe
),
4395 (adjusted_mode
->crtc_hblank_start
- 1) |
4396 ((adjusted_mode
->crtc_hblank_end
- 1) << 16));
4397 I915_WRITE(HSYNC(pipe
),
4398 (adjusted_mode
->crtc_hsync_start
- 1) |
4399 ((adjusted_mode
->crtc_hsync_end
- 1) << 16));
4401 I915_WRITE(VTOTAL(pipe
),
4402 (adjusted_mode
->crtc_vdisplay
- 1) |
4403 ((adjusted_mode
->crtc_vtotal
- 1) << 16));
4404 I915_WRITE(VBLANK(pipe
),
4405 (adjusted_mode
->crtc_vblank_start
- 1) |
4406 ((adjusted_mode
->crtc_vblank_end
- 1) << 16));
4407 I915_WRITE(VSYNC(pipe
),
4408 (adjusted_mode
->crtc_vsync_start
- 1) |
4409 ((adjusted_mode
->crtc_vsync_end
- 1) << 16));
4411 /* pipesrc and dspsize control the size that is scaled from,
4412 * which should always be the user's requested size.
4414 I915_WRITE(DSPSIZE(plane
),
4415 ((mode
->vdisplay
- 1) << 16) |
4416 (mode
->hdisplay
- 1));
4417 I915_WRITE(DSPPOS(plane
), 0);
4418 I915_WRITE(PIPESRC(pipe
),
4419 ((mode
->hdisplay
- 1) << 16) | (mode
->vdisplay
- 1));
4421 I915_WRITE(PIPECONF(pipe
), pipeconf
);
4422 POSTING_READ(PIPECONF(pipe
));
4423 intel_enable_pipe(dev_priv
, pipe
, false);
4425 intel_wait_for_vblank(dev
, pipe
);
4427 I915_WRITE(DSPCNTR(plane
), dspcntr
);
4428 POSTING_READ(DSPCNTR(plane
));
4430 ret
= intel_pipe_set_base(crtc
, x
, y
, old_fb
);
4432 intel_update_watermarks(dev
);
4438 * Initialize reference clocks when the driver loads
4440 void ironlake_init_pch_refclk(struct drm_device
*dev
)
4442 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4443 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4444 struct intel_encoder
*encoder
;
4446 bool has_lvds
= false;
4447 bool has_cpu_edp
= false;
4448 bool has_pch_edp
= false;
4449 bool has_panel
= false;
4450 bool has_ck505
= false;
4451 bool can_ssc
= false;
4453 /* We need to take the global config into account */
4454 list_for_each_entry(encoder
, &mode_config
->encoder_list
,
4456 switch (encoder
->type
) {
4457 case INTEL_OUTPUT_LVDS
:
4461 case INTEL_OUTPUT_EDP
:
4463 if (intel_encoder_is_pch_edp(&encoder
->base
))
4471 if (HAS_PCH_IBX(dev
)) {
4472 has_ck505
= dev_priv
->display_clock_mode
;
4473 can_ssc
= has_ck505
;
4479 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4480 has_panel
, has_lvds
, has_pch_edp
, has_cpu_edp
,
4483 /* Ironlake: try to setup display ref clock before DPLL
4484 * enabling. This is only under driver's control after
4485 * PCH B stepping, previous chipset stepping should be
4486 * ignoring this setting.
4488 temp
= I915_READ(PCH_DREF_CONTROL
);
4489 /* Always enable nonspread source */
4490 temp
&= ~DREF_NONSPREAD_SOURCE_MASK
;
4493 temp
|= DREF_NONSPREAD_CK505_ENABLE
;
4495 temp
|= DREF_NONSPREAD_SOURCE_ENABLE
;
4498 temp
&= ~DREF_SSC_SOURCE_MASK
;
4499 temp
|= DREF_SSC_SOURCE_ENABLE
;
4501 /* SSC must be turned on before enabling the CPU output */
4502 if (intel_panel_use_ssc(dev_priv
) && can_ssc
) {
4503 DRM_DEBUG_KMS("Using SSC on panel\n");
4504 temp
|= DREF_SSC1_ENABLE
;
4506 temp
&= ~DREF_SSC1_ENABLE
;
4508 /* Get SSC going before enabling the outputs */
4509 I915_WRITE(PCH_DREF_CONTROL
, temp
);
4510 POSTING_READ(PCH_DREF_CONTROL
);
4513 temp
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
4515 /* Enable CPU source on CPU attached eDP */
4517 if (intel_panel_use_ssc(dev_priv
) && can_ssc
) {
4518 DRM_DEBUG_KMS("Using SSC on eDP\n");
4519 temp
|= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD
;
4522 temp
|= DREF_CPU_SOURCE_OUTPUT_NONSPREAD
;
4524 temp
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
4526 I915_WRITE(PCH_DREF_CONTROL
, temp
);
4527 POSTING_READ(PCH_DREF_CONTROL
);
4530 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4532 temp
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
4534 /* Turn off CPU output */
4535 temp
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
4537 I915_WRITE(PCH_DREF_CONTROL
, temp
);
4538 POSTING_READ(PCH_DREF_CONTROL
);
4541 /* Turn off the SSC source */
4542 temp
&= ~DREF_SSC_SOURCE_MASK
;
4543 temp
|= DREF_SSC_SOURCE_DISABLE
;
4546 temp
&= ~ DREF_SSC1_ENABLE
;
4548 I915_WRITE(PCH_DREF_CONTROL
, temp
);
4549 POSTING_READ(PCH_DREF_CONTROL
);
4554 static int ironlake_get_refclk(struct drm_crtc
*crtc
)
4556 struct drm_device
*dev
= crtc
->dev
;
4557 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4558 struct intel_encoder
*encoder
;
4559 struct intel_encoder
*edp_encoder
= NULL
;
4560 int num_connectors
= 0;
4561 bool is_lvds
= false;
4563 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
4564 switch (encoder
->type
) {
4565 case INTEL_OUTPUT_LVDS
:
4568 case INTEL_OUTPUT_EDP
:
4569 edp_encoder
= encoder
;
4575 if (is_lvds
&& intel_panel_use_ssc(dev_priv
) && num_connectors
< 2) {
4576 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4577 dev_priv
->lvds_ssc_freq
);
4578 return dev_priv
->lvds_ssc_freq
* 1000;
4584 static int ironlake_crtc_mode_set(struct drm_crtc
*crtc
,
4585 struct drm_display_mode
*mode
,
4586 struct drm_display_mode
*adjusted_mode
,
4588 struct drm_framebuffer
*old_fb
)
4590 struct drm_device
*dev
= crtc
->dev
;
4591 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4592 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4593 int pipe
= intel_crtc
->pipe
;
4594 int plane
= intel_crtc
->plane
;
4595 int refclk
, num_connectors
= 0;
4596 intel_clock_t clock
, reduced_clock
;
4597 u32 dpll
, fp
= 0, fp2
= 0, dspcntr
, pipeconf
;
4598 bool ok
, has_reduced_clock
= false, is_sdvo
= false;
4599 bool is_crt
= false, is_lvds
= false, is_tv
= false, is_dp
= false;
4600 struct intel_encoder
*encoder
, *edp_encoder
= NULL
;
4601 const intel_limit_t
*limit
;
4603 struct fdi_m_n m_n
= {0};
4605 int target_clock
, pixel_multiplier
, lane
, link_bw
, factor
;
4606 unsigned int pipe_bpp
;
4608 bool is_cpu_edp
= false, is_pch_edp
= false;
4610 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
4611 switch (encoder
->type
) {
4612 case INTEL_OUTPUT_LVDS
:
4615 case INTEL_OUTPUT_SDVO
:
4616 case INTEL_OUTPUT_HDMI
:
4618 if (encoder
->needs_tv_clock
)
4621 case INTEL_OUTPUT_TVOUT
:
4624 case INTEL_OUTPUT_ANALOG
:
4627 case INTEL_OUTPUT_DISPLAYPORT
:
4630 case INTEL_OUTPUT_EDP
:
4632 if (intel_encoder_is_pch_edp(&encoder
->base
))
4636 edp_encoder
= encoder
;
4643 refclk
= ironlake_get_refclk(crtc
);
4646 * Returns a set of divisors for the desired target clock with the given
4647 * refclk, or FALSE. The returned values represent the clock equation:
4648 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4650 limit
= intel_limit(crtc
, refclk
);
4651 ok
= limit
->find_pll(limit
, crtc
, adjusted_mode
->clock
, refclk
, NULL
,
4654 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4658 /* Ensure that the cursor is valid for the new mode before changing... */
4659 intel_crtc_update_cursor(crtc
, true);
4661 if (is_lvds
&& dev_priv
->lvds_downclock_avail
) {
4663 * Ensure we match the reduced clock's P to the target clock.
4664 * If the clocks don't match, we can't switch the display clock
4665 * by using the FP0/FP1. In such case we will disable the LVDS
4666 * downclock feature.
4668 has_reduced_clock
= limit
->find_pll(limit
, crtc
,
4669 dev_priv
->lvds_downclock
,
4675 if (is_sdvo
&& is_tv
)
4676 i9xx_adjust_sdvo_tv_clock(adjusted_mode
, &clock
);
4680 pixel_multiplier
= intel_mode_get_pixel_multiplier(adjusted_mode
);
4682 /* CPU eDP doesn't require FDI link, so just set DP M/N
4683 according to current link config */
4685 intel_edp_link_config(edp_encoder
, &lane
, &link_bw
);
4687 /* FDI is a binary signal running at ~2.7GHz, encoding
4688 * each output octet as 10 bits. The actual frequency
4689 * is stored as a divider into a 100MHz clock, and the
4690 * mode pixel clock is stored in units of 1KHz.
4691 * Hence the bw of each lane in terms of the mode signal
4694 link_bw
= intel_fdi_link_freq(dev
) * MHz(100)/KHz(1)/10;
4697 /* [e]DP over FDI requires target mode clock instead of link clock. */
4699 target_clock
= intel_edp_target_clock(edp_encoder
, mode
);
4701 target_clock
= mode
->clock
;
4703 target_clock
= adjusted_mode
->clock
;
4705 /* determine panel color depth */
4706 temp
= I915_READ(PIPECONF(pipe
));
4707 temp
&= ~PIPE_BPC_MASK
;
4708 dither
= intel_choose_pipe_bpp_dither(crtc
, &pipe_bpp
, adjusted_mode
);
4723 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4730 intel_crtc
->bpp
= pipe_bpp
;
4731 I915_WRITE(PIPECONF(pipe
), temp
);
4735 * Account for spread spectrum to avoid
4736 * oversubscribing the link. Max center spread
4737 * is 2.5%; use 5% for safety's sake.
4739 u32 bps
= target_clock
* intel_crtc
->bpp
* 21 / 20;
4740 lane
= bps
/ (link_bw
* 8) + 1;
4743 intel_crtc
->fdi_lanes
= lane
;
4745 if (pixel_multiplier
> 1)
4746 link_bw
*= pixel_multiplier
;
4747 ironlake_compute_m_n(intel_crtc
->bpp
, lane
, target_clock
, link_bw
,
4750 fp
= clock
.n
<< 16 | clock
.m1
<< 8 | clock
.m2
;
4751 if (has_reduced_clock
)
4752 fp2
= reduced_clock
.n
<< 16 | reduced_clock
.m1
<< 8 |
4755 /* Enable autotuning of the PLL clock (if permissible) */
4758 if ((intel_panel_use_ssc(dev_priv
) &&
4759 dev_priv
->lvds_ssc_freq
== 100) ||
4760 (I915_READ(PCH_LVDS
) & LVDS_CLKB_POWER_MASK
) == LVDS_CLKB_POWER_UP
)
4762 } else if (is_sdvo
&& is_tv
)
4765 if (clock
.m
< factor
* clock
.n
)
4771 dpll
|= DPLLB_MODE_LVDS
;
4773 dpll
|= DPLLB_MODE_DAC_SERIAL
;
4775 int pixel_multiplier
= intel_mode_get_pixel_multiplier(adjusted_mode
);
4776 if (pixel_multiplier
> 1) {
4777 dpll
|= (pixel_multiplier
- 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT
;
4779 dpll
|= DPLL_DVO_HIGH_SPEED
;
4781 if (is_dp
&& !is_cpu_edp
)
4782 dpll
|= DPLL_DVO_HIGH_SPEED
;
4784 /* compute bitmask from p1 value */
4785 dpll
|= (1 << (clock
.p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4787 dpll
|= (1 << (clock
.p1
- 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT
;
4791 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
;
4794 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_7
;
4797 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10
;
4800 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_14
;
4804 if (is_sdvo
&& is_tv
)
4805 dpll
|= PLL_REF_INPUT_TVCLKINBC
;
4807 /* XXX: just matching BIOS for now */
4808 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4810 else if (is_lvds
&& intel_panel_use_ssc(dev_priv
) && num_connectors
< 2)
4811 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
4813 dpll
|= PLL_REF_INPUT_DREFCLK
;
4815 /* setup pipeconf */
4816 pipeconf
= I915_READ(PIPECONF(pipe
));
4818 /* Set up the display plane register */
4819 dspcntr
= DISPPLANE_GAMMA_ENABLE
;
4821 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe
);
4822 drm_mode_debug_printmodeline(mode
);
4824 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4825 * pre-Haswell/LPT generation */
4826 if (HAS_PCH_LPT(dev
)) {
4827 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4829 } else if (!is_cpu_edp
) {
4830 struct intel_pch_pll
*pll
;
4832 pll
= intel_get_pch_pll(intel_crtc
, dpll
, fp
);
4834 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4839 intel_put_pch_pll(intel_crtc
);
4841 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4842 * This is an exception to the general rule that mode_set doesn't turn
4846 temp
= I915_READ(PCH_LVDS
);
4847 temp
|= LVDS_PORT_EN
| LVDS_A0A2_CLKA_POWER_UP
;
4848 if (HAS_PCH_CPT(dev
)) {
4849 temp
&= ~PORT_TRANS_SEL_MASK
;
4850 temp
|= PORT_TRANS_SEL_CPT(pipe
);
4853 temp
|= LVDS_PIPEB_SELECT
;
4855 temp
&= ~LVDS_PIPEB_SELECT
;
4858 /* set the corresponsding LVDS_BORDER bit */
4859 temp
|= dev_priv
->lvds_border_bits
;
4860 /* Set the B0-B3 data pairs corresponding to whether we're going to
4861 * set the DPLLs for dual-channel mode or not.
4864 temp
|= LVDS_B0B3_POWER_UP
| LVDS_CLKB_POWER_UP
;
4866 temp
&= ~(LVDS_B0B3_POWER_UP
| LVDS_CLKB_POWER_UP
);
4868 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4869 * appropriately here, but we need to look more thoroughly into how
4870 * panels behave in the two modes.
4872 temp
&= ~(LVDS_HSYNC_POLARITY
| LVDS_VSYNC_POLARITY
);
4873 if (adjusted_mode
->flags
& DRM_MODE_FLAG_NHSYNC
)
4874 temp
|= LVDS_HSYNC_POLARITY
;
4875 if (adjusted_mode
->flags
& DRM_MODE_FLAG_NVSYNC
)
4876 temp
|= LVDS_VSYNC_POLARITY
;
4877 I915_WRITE(PCH_LVDS
, temp
);
4880 pipeconf
&= ~PIPECONF_DITHER_EN
;
4881 pipeconf
&= ~PIPECONF_DITHER_TYPE_MASK
;
4882 if ((is_lvds
&& dev_priv
->lvds_dither
) || dither
) {
4883 pipeconf
|= PIPECONF_DITHER_EN
;
4884 pipeconf
|= PIPECONF_DITHER_TYPE_SP
;
4886 if (is_dp
&& !is_cpu_edp
) {
4887 intel_dp_set_m_n(crtc
, mode
, adjusted_mode
);
4889 /* For non-DP output, clear any trans DP clock recovery setting.*/
4890 I915_WRITE(TRANSDATA_M1(pipe
), 0);
4891 I915_WRITE(TRANSDATA_N1(pipe
), 0);
4892 I915_WRITE(TRANSDPLINK_M1(pipe
), 0);
4893 I915_WRITE(TRANSDPLINK_N1(pipe
), 0);
4896 if (intel_crtc
->pch_pll
) {
4897 I915_WRITE(intel_crtc
->pch_pll
->pll_reg
, dpll
);
4899 /* Wait for the clocks to stabilize. */
4900 POSTING_READ(intel_crtc
->pch_pll
->pll_reg
);
4903 /* The pixel multiplier can only be updated once the
4904 * DPLL is enabled and the clocks are stable.
4906 * So write it again.
4908 I915_WRITE(intel_crtc
->pch_pll
->pll_reg
, dpll
);
4911 intel_crtc
->lowfreq_avail
= false;
4912 if (intel_crtc
->pch_pll
) {
4913 if (is_lvds
&& has_reduced_clock
&& i915_powersave
) {
4914 I915_WRITE(intel_crtc
->pch_pll
->fp1_reg
, fp2
);
4915 intel_crtc
->lowfreq_avail
= true;
4917 I915_WRITE(intel_crtc
->pch_pll
->fp1_reg
, fp
);
4921 pipeconf
&= ~PIPECONF_INTERLACE_MASK
;
4922 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
4923 pipeconf
|= PIPECONF_INTERLACED_ILK
;
4924 /* the chip adds 2 halflines automatically */
4925 adjusted_mode
->crtc_vtotal
-= 1;
4926 adjusted_mode
->crtc_vblank_end
-= 1;
4927 I915_WRITE(VSYNCSHIFT(pipe
),
4928 adjusted_mode
->crtc_hsync_start
4929 - adjusted_mode
->crtc_htotal
/2);
4931 pipeconf
|= PIPECONF_PROGRESSIVE
;
4932 I915_WRITE(VSYNCSHIFT(pipe
), 0);
4935 I915_WRITE(HTOTAL(pipe
),
4936 (adjusted_mode
->crtc_hdisplay
- 1) |
4937 ((adjusted_mode
->crtc_htotal
- 1) << 16));
4938 I915_WRITE(HBLANK(pipe
),
4939 (adjusted_mode
->crtc_hblank_start
- 1) |
4940 ((adjusted_mode
->crtc_hblank_end
- 1) << 16));
4941 I915_WRITE(HSYNC(pipe
),
4942 (adjusted_mode
->crtc_hsync_start
- 1) |
4943 ((adjusted_mode
->crtc_hsync_end
- 1) << 16));
4945 I915_WRITE(VTOTAL(pipe
),
4946 (adjusted_mode
->crtc_vdisplay
- 1) |
4947 ((adjusted_mode
->crtc_vtotal
- 1) << 16));
4948 I915_WRITE(VBLANK(pipe
),
4949 (adjusted_mode
->crtc_vblank_start
- 1) |
4950 ((adjusted_mode
->crtc_vblank_end
- 1) << 16));
4951 I915_WRITE(VSYNC(pipe
),
4952 (adjusted_mode
->crtc_vsync_start
- 1) |
4953 ((adjusted_mode
->crtc_vsync_end
- 1) << 16));
4955 /* pipesrc controls the size that is scaled from, which should
4956 * always be the user's requested size.
4958 I915_WRITE(PIPESRC(pipe
),
4959 ((mode
->hdisplay
- 1) << 16) | (mode
->vdisplay
- 1));
4961 I915_WRITE(PIPE_DATA_M1(pipe
), TU_SIZE(m_n
.tu
) | m_n
.gmch_m
);
4962 I915_WRITE(PIPE_DATA_N1(pipe
), m_n
.gmch_n
);
4963 I915_WRITE(PIPE_LINK_M1(pipe
), m_n
.link_m
);
4964 I915_WRITE(PIPE_LINK_N1(pipe
), m_n
.link_n
);
4967 ironlake_set_pll_edp(crtc
, adjusted_mode
->clock
);
4969 I915_WRITE(PIPECONF(pipe
), pipeconf
);
4970 POSTING_READ(PIPECONF(pipe
));
4972 intel_wait_for_vblank(dev
, pipe
);
4974 I915_WRITE(DSPCNTR(plane
), dspcntr
);
4975 POSTING_READ(DSPCNTR(plane
));
4977 ret
= intel_pipe_set_base(crtc
, x
, y
, old_fb
);
4979 intel_update_watermarks(dev
);
4981 intel_update_linetime_watermarks(dev
, pipe
, adjusted_mode
);
4986 static int intel_crtc_mode_set(struct drm_crtc
*crtc
,
4987 struct drm_display_mode
*mode
,
4988 struct drm_display_mode
*adjusted_mode
,
4990 struct drm_framebuffer
*old_fb
)
4992 struct drm_device
*dev
= crtc
->dev
;
4993 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4994 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4995 int pipe
= intel_crtc
->pipe
;
4998 drm_vblank_pre_modeset(dev
, pipe
);
5000 ret
= dev_priv
->display
.crtc_mode_set(crtc
, mode
, adjusted_mode
,
5002 drm_vblank_post_modeset(dev
, pipe
);
5005 intel_crtc
->dpms_mode
= DRM_MODE_DPMS_OFF
;
5007 intel_crtc
->dpms_mode
= DRM_MODE_DPMS_ON
;
5012 static bool intel_eld_uptodate(struct drm_connector
*connector
,
5013 int reg_eldv
, uint32_t bits_eldv
,
5014 int reg_elda
, uint32_t bits_elda
,
5017 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
5018 uint8_t *eld
= connector
->eld
;
5021 i
= I915_READ(reg_eldv
);
5030 i
= I915_READ(reg_elda
);
5032 I915_WRITE(reg_elda
, i
);
5034 for (i
= 0; i
< eld
[2]; i
++)
5035 if (I915_READ(reg_edid
) != *((uint32_t *)eld
+ i
))
5041 static void g4x_write_eld(struct drm_connector
*connector
,
5042 struct drm_crtc
*crtc
)
5044 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
5045 uint8_t *eld
= connector
->eld
;
5050 i
= I915_READ(G4X_AUD_VID_DID
);
5052 if (i
== INTEL_AUDIO_DEVBLC
|| i
== INTEL_AUDIO_DEVCL
)
5053 eldv
= G4X_ELDV_DEVCL_DEVBLC
;
5055 eldv
= G4X_ELDV_DEVCTG
;
5057 if (intel_eld_uptodate(connector
,
5058 G4X_AUD_CNTL_ST
, eldv
,
5059 G4X_AUD_CNTL_ST
, G4X_ELD_ADDR
,
5060 G4X_HDMIW_HDMIEDID
))
5063 i
= I915_READ(G4X_AUD_CNTL_ST
);
5064 i
&= ~(eldv
| G4X_ELD_ADDR
);
5065 len
= (i
>> 9) & 0x1f; /* ELD buffer size */
5066 I915_WRITE(G4X_AUD_CNTL_ST
, i
);
5071 len
= min_t(uint8_t, eld
[2], len
);
5072 DRM_DEBUG_DRIVER("ELD size %d\n", len
);
5073 for (i
= 0; i
< len
; i
++)
5074 I915_WRITE(G4X_HDMIW_HDMIEDID
, *((uint32_t *)eld
+ i
));
5076 i
= I915_READ(G4X_AUD_CNTL_ST
);
5078 I915_WRITE(G4X_AUD_CNTL_ST
, i
);
5081 static void ironlake_write_eld(struct drm_connector
*connector
,
5082 struct drm_crtc
*crtc
)
5084 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
5085 uint8_t *eld
= connector
->eld
;
5094 if (HAS_PCH_IBX(connector
->dev
)) {
5095 hdmiw_hdmiedid
= IBX_HDMIW_HDMIEDID_A
;
5096 aud_config
= IBX_AUD_CONFIG_A
;
5097 aud_cntl_st
= IBX_AUD_CNTL_ST_A
;
5098 aud_cntrl_st2
= IBX_AUD_CNTL_ST2
;
5100 hdmiw_hdmiedid
= CPT_HDMIW_HDMIEDID_A
;
5101 aud_config
= CPT_AUD_CONFIG_A
;
5102 aud_cntl_st
= CPT_AUD_CNTL_ST_A
;
5103 aud_cntrl_st2
= CPT_AUD_CNTRL_ST2
;
5106 i
= to_intel_crtc(crtc
)->pipe
;
5107 hdmiw_hdmiedid
+= i
* 0x100;
5108 aud_cntl_st
+= i
* 0x100;
5109 aud_config
+= i
* 0x100;
5111 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i
));
5113 i
= I915_READ(aud_cntl_st
);
5114 i
= (i
>> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
5116 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5117 /* operate blindly on all ports */
5118 eldv
= IBX_ELD_VALIDB
;
5119 eldv
|= IBX_ELD_VALIDB
<< 4;
5120 eldv
|= IBX_ELD_VALIDB
<< 8;
5122 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i
);
5123 eldv
= IBX_ELD_VALIDB
<< ((i
- 1) * 4);
5126 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
)) {
5127 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5128 eld
[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5129 I915_WRITE(aud_config
, AUD_CONFIG_N_VALUE_INDEX
); /* 0x1 = DP */
5131 I915_WRITE(aud_config
, 0);
5133 if (intel_eld_uptodate(connector
,
5134 aud_cntrl_st2
, eldv
,
5135 aud_cntl_st
, IBX_ELD_ADDRESS
,
5139 i
= I915_READ(aud_cntrl_st2
);
5141 I915_WRITE(aud_cntrl_st2
, i
);
5146 i
= I915_READ(aud_cntl_st
);
5147 i
&= ~IBX_ELD_ADDRESS
;
5148 I915_WRITE(aud_cntl_st
, i
);
5150 len
= min_t(uint8_t, eld
[2], 21); /* 84 bytes of hw ELD buffer */
5151 DRM_DEBUG_DRIVER("ELD size %d\n", len
);
5152 for (i
= 0; i
< len
; i
++)
5153 I915_WRITE(hdmiw_hdmiedid
, *((uint32_t *)eld
+ i
));
5155 i
= I915_READ(aud_cntrl_st2
);
5157 I915_WRITE(aud_cntrl_st2
, i
);
5160 void intel_write_eld(struct drm_encoder
*encoder
,
5161 struct drm_display_mode
*mode
)
5163 struct drm_crtc
*crtc
= encoder
->crtc
;
5164 struct drm_connector
*connector
;
5165 struct drm_device
*dev
= encoder
->dev
;
5166 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5168 connector
= drm_select_eld(encoder
, mode
);
5172 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5174 drm_get_connector_name(connector
),
5175 connector
->encoder
->base
.id
,
5176 drm_get_encoder_name(connector
->encoder
));
5178 connector
->eld
[6] = drm_av_sync_delay(connector
, mode
) / 2;
5180 if (dev_priv
->display
.write_eld
)
5181 dev_priv
->display
.write_eld(connector
, crtc
);
5184 /** Loads the palette/gamma unit for the CRTC with the prepared values */
5185 void intel_crtc_load_lut(struct drm_crtc
*crtc
)
5187 struct drm_device
*dev
= crtc
->dev
;
5188 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5189 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5190 int palreg
= PALETTE(intel_crtc
->pipe
);
5193 /* The clocks have to be on to load the palette. */
5194 if (!crtc
->enabled
|| !intel_crtc
->active
)
5197 /* use legacy palette for Ironlake */
5198 if (HAS_PCH_SPLIT(dev
))
5199 palreg
= LGC_PALETTE(intel_crtc
->pipe
);
5201 for (i
= 0; i
< 256; i
++) {
5202 I915_WRITE(palreg
+ 4 * i
,
5203 (intel_crtc
->lut_r
[i
] << 16) |
5204 (intel_crtc
->lut_g
[i
] << 8) |
5205 intel_crtc
->lut_b
[i
]);
5209 static void i845_update_cursor(struct drm_crtc
*crtc
, u32 base
)
5211 struct drm_device
*dev
= crtc
->dev
;
5212 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5213 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5214 bool visible
= base
!= 0;
5217 if (intel_crtc
->cursor_visible
== visible
)
5220 cntl
= I915_READ(_CURACNTR
);
5222 /* On these chipsets we can only modify the base whilst
5223 * the cursor is disabled.
5225 I915_WRITE(_CURABASE
, base
);
5227 cntl
&= ~(CURSOR_FORMAT_MASK
);
5228 /* XXX width must be 64, stride 256 => 0x00 << 28 */
5229 cntl
|= CURSOR_ENABLE
|
5230 CURSOR_GAMMA_ENABLE
|
5233 cntl
&= ~(CURSOR_ENABLE
| CURSOR_GAMMA_ENABLE
);
5234 I915_WRITE(_CURACNTR
, cntl
);
5236 intel_crtc
->cursor_visible
= visible
;
5239 static void i9xx_update_cursor(struct drm_crtc
*crtc
, u32 base
)
5241 struct drm_device
*dev
= crtc
->dev
;
5242 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5243 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5244 int pipe
= intel_crtc
->pipe
;
5245 bool visible
= base
!= 0;
5247 if (intel_crtc
->cursor_visible
!= visible
) {
5248 uint32_t cntl
= I915_READ(CURCNTR(pipe
));
5250 cntl
&= ~(CURSOR_MODE
| MCURSOR_PIPE_SELECT
);
5251 cntl
|= CURSOR_MODE_64_ARGB_AX
| MCURSOR_GAMMA_ENABLE
;
5252 cntl
|= pipe
<< 28; /* Connect to correct pipe */
5254 cntl
&= ~(CURSOR_MODE
| MCURSOR_GAMMA_ENABLE
);
5255 cntl
|= CURSOR_MODE_DISABLE
;
5257 I915_WRITE(CURCNTR(pipe
), cntl
);
5259 intel_crtc
->cursor_visible
= visible
;
5261 /* and commit changes on next vblank */
5262 I915_WRITE(CURBASE(pipe
), base
);
5265 static void ivb_update_cursor(struct drm_crtc
*crtc
, u32 base
)
5267 struct drm_device
*dev
= crtc
->dev
;
5268 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5269 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5270 int pipe
= intel_crtc
->pipe
;
5271 bool visible
= base
!= 0;
5273 if (intel_crtc
->cursor_visible
!= visible
) {
5274 uint32_t cntl
= I915_READ(CURCNTR_IVB(pipe
));
5276 cntl
&= ~CURSOR_MODE
;
5277 cntl
|= CURSOR_MODE_64_ARGB_AX
| MCURSOR_GAMMA_ENABLE
;
5279 cntl
&= ~(CURSOR_MODE
| MCURSOR_GAMMA_ENABLE
);
5280 cntl
|= CURSOR_MODE_DISABLE
;
5282 I915_WRITE(CURCNTR_IVB(pipe
), cntl
);
5284 intel_crtc
->cursor_visible
= visible
;
5286 /* and commit changes on next vblank */
5287 I915_WRITE(CURBASE_IVB(pipe
), base
);
5290 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
5291 static void intel_crtc_update_cursor(struct drm_crtc
*crtc
,
5294 struct drm_device
*dev
= crtc
->dev
;
5295 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5296 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5297 int pipe
= intel_crtc
->pipe
;
5298 int x
= intel_crtc
->cursor_x
;
5299 int y
= intel_crtc
->cursor_y
;
5305 if (on
&& crtc
->enabled
&& crtc
->fb
) {
5306 base
= intel_crtc
->cursor_addr
;
5307 if (x
> (int) crtc
->fb
->width
)
5310 if (y
> (int) crtc
->fb
->height
)
5316 if (x
+ intel_crtc
->cursor_width
< 0)
5319 pos
|= CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
;
5322 pos
|= x
<< CURSOR_X_SHIFT
;
5325 if (y
+ intel_crtc
->cursor_height
< 0)
5328 pos
|= CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
;
5331 pos
|= y
<< CURSOR_Y_SHIFT
;
5333 visible
= base
!= 0;
5334 if (!visible
&& !intel_crtc
->cursor_visible
)
5337 if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
5338 I915_WRITE(CURPOS_IVB(pipe
), pos
);
5339 ivb_update_cursor(crtc
, base
);
5341 I915_WRITE(CURPOS(pipe
), pos
);
5342 if (IS_845G(dev
) || IS_I865G(dev
))
5343 i845_update_cursor(crtc
, base
);
5345 i9xx_update_cursor(crtc
, base
);
5349 static int intel_crtc_cursor_set(struct drm_crtc
*crtc
,
5350 struct drm_file
*file
,
5352 uint32_t width
, uint32_t height
)
5354 struct drm_device
*dev
= crtc
->dev
;
5355 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5356 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5357 struct drm_i915_gem_object
*obj
;
5361 DRM_DEBUG_KMS("\n");
5363 /* if we want to turn off the cursor ignore width and height */
5365 DRM_DEBUG_KMS("cursor off\n");
5368 mutex_lock(&dev
->struct_mutex
);
5372 /* Currently we only support 64x64 cursors */
5373 if (width
!= 64 || height
!= 64) {
5374 DRM_ERROR("we currently only support 64x64 cursors\n");
5378 obj
= to_intel_bo(drm_gem_object_lookup(dev
, file
, handle
));
5379 if (&obj
->base
== NULL
)
5382 if (obj
->base
.size
< width
* height
* 4) {
5383 DRM_ERROR("buffer is to small\n");
5388 /* we only need to pin inside GTT if cursor is non-phy */
5389 mutex_lock(&dev
->struct_mutex
);
5390 if (!dev_priv
->info
->cursor_needs_physical
) {
5391 if (obj
->tiling_mode
) {
5392 DRM_ERROR("cursor cannot be tiled\n");
5397 ret
= i915_gem_object_pin_to_display_plane(obj
, 0, NULL
);
5399 DRM_ERROR("failed to move cursor bo into the GTT\n");
5403 ret
= i915_gem_object_put_fence(obj
);
5405 DRM_ERROR("failed to release fence for cursor");
5409 addr
= obj
->gtt_offset
;
5411 int align
= IS_I830(dev
) ? 16 * 1024 : 256;
5412 ret
= i915_gem_attach_phys_object(dev
, obj
,
5413 (intel_crtc
->pipe
== 0) ? I915_GEM_PHYS_CURSOR_0
: I915_GEM_PHYS_CURSOR_1
,
5416 DRM_ERROR("failed to attach phys object\n");
5419 addr
= obj
->phys_obj
->handle
->busaddr
;
5423 I915_WRITE(CURSIZE
, (height
<< 12) | width
);
5426 if (intel_crtc
->cursor_bo
) {
5427 if (dev_priv
->info
->cursor_needs_physical
) {
5428 if (intel_crtc
->cursor_bo
!= obj
)
5429 i915_gem_detach_phys_object(dev
, intel_crtc
->cursor_bo
);
5431 i915_gem_object_unpin(intel_crtc
->cursor_bo
);
5432 drm_gem_object_unreference(&intel_crtc
->cursor_bo
->base
);
5435 mutex_unlock(&dev
->struct_mutex
);
5437 intel_crtc
->cursor_addr
= addr
;
5438 intel_crtc
->cursor_bo
= obj
;
5439 intel_crtc
->cursor_width
= width
;
5440 intel_crtc
->cursor_height
= height
;
5442 intel_crtc_update_cursor(crtc
, true);
5446 i915_gem_object_unpin(obj
);
5448 mutex_unlock(&dev
->struct_mutex
);
5450 drm_gem_object_unreference_unlocked(&obj
->base
);
5454 static int intel_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
5456 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5458 intel_crtc
->cursor_x
= x
;
5459 intel_crtc
->cursor_y
= y
;
5461 intel_crtc_update_cursor(crtc
, true);
5466 /** Sets the color ramps on behalf of RandR */
5467 void intel_crtc_fb_gamma_set(struct drm_crtc
*crtc
, u16 red
, u16 green
,
5468 u16 blue
, int regno
)
5470 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5472 intel_crtc
->lut_r
[regno
] = red
>> 8;
5473 intel_crtc
->lut_g
[regno
] = green
>> 8;
5474 intel_crtc
->lut_b
[regno
] = blue
>> 8;
5477 void intel_crtc_fb_gamma_get(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
5478 u16
*blue
, int regno
)
5480 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5482 *red
= intel_crtc
->lut_r
[regno
] << 8;
5483 *green
= intel_crtc
->lut_g
[regno
] << 8;
5484 *blue
= intel_crtc
->lut_b
[regno
] << 8;
5487 static void intel_crtc_gamma_set(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
5488 u16
*blue
, uint32_t start
, uint32_t size
)
5490 int end
= (start
+ size
> 256) ? 256 : start
+ size
, i
;
5491 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5493 for (i
= start
; i
< end
; i
++) {
5494 intel_crtc
->lut_r
[i
] = red
[i
] >> 8;
5495 intel_crtc
->lut_g
[i
] = green
[i
] >> 8;
5496 intel_crtc
->lut_b
[i
] = blue
[i
] >> 8;
5499 intel_crtc_load_lut(crtc
);
5503 * Get a pipe with a simple mode set on it for doing load-based monitor
5506 * It will be up to the load-detect code to adjust the pipe as appropriate for
5507 * its requirements. The pipe will be connected to no other encoders.
5509 * Currently this code will only succeed if there is a pipe with no encoders
5510 * configured for it. In the future, it could choose to temporarily disable
5511 * some outputs to free up a pipe for its use.
5513 * \return crtc, or NULL if no pipes are available.
5516 /* VESA 640x480x72Hz mode to set on the pipe */
5517 static struct drm_display_mode load_detect_mode
= {
5518 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT
, 31500, 640, 664,
5519 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
),
5522 static struct drm_framebuffer
*
5523 intel_framebuffer_create(struct drm_device
*dev
,
5524 struct drm_mode_fb_cmd2
*mode_cmd
,
5525 struct drm_i915_gem_object
*obj
)
5527 struct intel_framebuffer
*intel_fb
;
5530 intel_fb
= kzalloc(sizeof(*intel_fb
), GFP_KERNEL
);
5532 drm_gem_object_unreference_unlocked(&obj
->base
);
5533 return ERR_PTR(-ENOMEM
);
5536 ret
= intel_framebuffer_init(dev
, intel_fb
, mode_cmd
, obj
);
5538 drm_gem_object_unreference_unlocked(&obj
->base
);
5540 return ERR_PTR(ret
);
5543 return &intel_fb
->base
;
5547 intel_framebuffer_pitch_for_width(int width
, int bpp
)
5549 u32 pitch
= DIV_ROUND_UP(width
* bpp
, 8);
5550 return ALIGN(pitch
, 64);
5554 intel_framebuffer_size_for_mode(struct drm_display_mode
*mode
, int bpp
)
5556 u32 pitch
= intel_framebuffer_pitch_for_width(mode
->hdisplay
, bpp
);
5557 return ALIGN(pitch
* mode
->vdisplay
, PAGE_SIZE
);
5560 static struct drm_framebuffer
*
5561 intel_framebuffer_create_for_mode(struct drm_device
*dev
,
5562 struct drm_display_mode
*mode
,
5565 struct drm_i915_gem_object
*obj
;
5566 struct drm_mode_fb_cmd2 mode_cmd
;
5568 obj
= i915_gem_alloc_object(dev
,
5569 intel_framebuffer_size_for_mode(mode
, bpp
));
5571 return ERR_PTR(-ENOMEM
);
5573 mode_cmd
.width
= mode
->hdisplay
;
5574 mode_cmd
.height
= mode
->vdisplay
;
5575 mode_cmd
.pitches
[0] = intel_framebuffer_pitch_for_width(mode_cmd
.width
,
5577 mode_cmd
.pixel_format
= drm_mode_legacy_fb_format(bpp
, depth
);
5579 return intel_framebuffer_create(dev
, &mode_cmd
, obj
);
5582 static struct drm_framebuffer
*
5583 mode_fits_in_fbdev(struct drm_device
*dev
,
5584 struct drm_display_mode
*mode
)
5586 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5587 struct drm_i915_gem_object
*obj
;
5588 struct drm_framebuffer
*fb
;
5590 if (dev_priv
->fbdev
== NULL
)
5593 obj
= dev_priv
->fbdev
->ifb
.obj
;
5597 fb
= &dev_priv
->fbdev
->ifb
.base
;
5598 if (fb
->pitches
[0] < intel_framebuffer_pitch_for_width(mode
->hdisplay
,
5599 fb
->bits_per_pixel
))
5602 if (obj
->base
.size
< mode
->vdisplay
* fb
->pitches
[0])
5608 bool intel_get_load_detect_pipe(struct intel_encoder
*intel_encoder
,
5609 struct drm_connector
*connector
,
5610 struct drm_display_mode
*mode
,
5611 struct intel_load_detect_pipe
*old
)
5613 struct intel_crtc
*intel_crtc
;
5614 struct drm_crtc
*possible_crtc
;
5615 struct drm_encoder
*encoder
= &intel_encoder
->base
;
5616 struct drm_crtc
*crtc
= NULL
;
5617 struct drm_device
*dev
= encoder
->dev
;
5618 struct drm_framebuffer
*old_fb
;
5621 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5622 connector
->base
.id
, drm_get_connector_name(connector
),
5623 encoder
->base
.id
, drm_get_encoder_name(encoder
));
5626 * Algorithm gets a little messy:
5628 * - if the connector already has an assigned crtc, use it (but make
5629 * sure it's on first)
5631 * - try to find the first unused crtc that can drive this connector,
5632 * and use that if we find one
5635 /* See if we already have a CRTC for this connector */
5636 if (encoder
->crtc
) {
5637 crtc
= encoder
->crtc
;
5639 intel_crtc
= to_intel_crtc(crtc
);
5640 old
->dpms_mode
= intel_crtc
->dpms_mode
;
5641 old
->load_detect_temp
= false;
5643 /* Make sure the crtc and connector are running */
5644 if (intel_crtc
->dpms_mode
!= DRM_MODE_DPMS_ON
) {
5645 struct drm_encoder_helper_funcs
*encoder_funcs
;
5646 struct drm_crtc_helper_funcs
*crtc_funcs
;
5648 crtc_funcs
= crtc
->helper_private
;
5649 crtc_funcs
->dpms(crtc
, DRM_MODE_DPMS_ON
);
5651 encoder_funcs
= encoder
->helper_private
;
5652 encoder_funcs
->dpms(encoder
, DRM_MODE_DPMS_ON
);
5658 /* Find an unused one (if possible) */
5659 list_for_each_entry(possible_crtc
, &dev
->mode_config
.crtc_list
, head
) {
5661 if (!(encoder
->possible_crtcs
& (1 << i
)))
5663 if (!possible_crtc
->enabled
) {
5664 crtc
= possible_crtc
;
5670 * If we didn't find an unused CRTC, don't use any.
5673 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5677 encoder
->crtc
= crtc
;
5678 connector
->encoder
= encoder
;
5680 intel_crtc
= to_intel_crtc(crtc
);
5681 old
->dpms_mode
= intel_crtc
->dpms_mode
;
5682 old
->load_detect_temp
= true;
5683 old
->release_fb
= NULL
;
5686 mode
= &load_detect_mode
;
5690 /* We need a framebuffer large enough to accommodate all accesses
5691 * that the plane may generate whilst we perform load detection.
5692 * We can not rely on the fbcon either being present (we get called
5693 * during its initialisation to detect all boot displays, or it may
5694 * not even exist) or that it is large enough to satisfy the
5697 crtc
->fb
= mode_fits_in_fbdev(dev
, mode
);
5698 if (crtc
->fb
== NULL
) {
5699 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5700 crtc
->fb
= intel_framebuffer_create_for_mode(dev
, mode
, 24, 32);
5701 old
->release_fb
= crtc
->fb
;
5703 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5704 if (IS_ERR(crtc
->fb
)) {
5705 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5710 if (!drm_crtc_helper_set_mode(crtc
, mode
, 0, 0, old_fb
)) {
5711 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5712 if (old
->release_fb
)
5713 old
->release_fb
->funcs
->destroy(old
->release_fb
);
5718 /* let the connector get through one full cycle before testing */
5719 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
5724 void intel_release_load_detect_pipe(struct intel_encoder
*intel_encoder
,
5725 struct drm_connector
*connector
,
5726 struct intel_load_detect_pipe
*old
)
5728 struct drm_encoder
*encoder
= &intel_encoder
->base
;
5729 struct drm_device
*dev
= encoder
->dev
;
5730 struct drm_crtc
*crtc
= encoder
->crtc
;
5731 struct drm_encoder_helper_funcs
*encoder_funcs
= encoder
->helper_private
;
5732 struct drm_crtc_helper_funcs
*crtc_funcs
= crtc
->helper_private
;
5734 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5735 connector
->base
.id
, drm_get_connector_name(connector
),
5736 encoder
->base
.id
, drm_get_encoder_name(encoder
));
5738 if (old
->load_detect_temp
) {
5739 connector
->encoder
= NULL
;
5740 drm_helper_disable_unused_functions(dev
);
5742 if (old
->release_fb
)
5743 old
->release_fb
->funcs
->destroy(old
->release_fb
);
5748 /* Switch crtc and encoder back off if necessary */
5749 if (old
->dpms_mode
!= DRM_MODE_DPMS_ON
) {
5750 encoder_funcs
->dpms(encoder
, old
->dpms_mode
);
5751 crtc_funcs
->dpms(crtc
, old
->dpms_mode
);
5755 /* Returns the clock of the currently programmed mode of the given pipe. */
5756 static int intel_crtc_clock_get(struct drm_device
*dev
, struct drm_crtc
*crtc
)
5758 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5759 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5760 int pipe
= intel_crtc
->pipe
;
5761 u32 dpll
= I915_READ(DPLL(pipe
));
5763 intel_clock_t clock
;
5765 if ((dpll
& DISPLAY_RATE_SELECT_FPA1
) == 0)
5766 fp
= I915_READ(FP0(pipe
));
5768 fp
= I915_READ(FP1(pipe
));
5770 clock
.m1
= (fp
& FP_M1_DIV_MASK
) >> FP_M1_DIV_SHIFT
;
5771 if (IS_PINEVIEW(dev
)) {
5772 clock
.n
= ffs((fp
& FP_N_PINEVIEW_DIV_MASK
) >> FP_N_DIV_SHIFT
) - 1;
5773 clock
.m2
= (fp
& FP_M2_PINEVIEW_DIV_MASK
) >> FP_M2_DIV_SHIFT
;
5775 clock
.n
= (fp
& FP_N_DIV_MASK
) >> FP_N_DIV_SHIFT
;
5776 clock
.m2
= (fp
& FP_M2_DIV_MASK
) >> FP_M2_DIV_SHIFT
;
5779 if (!IS_GEN2(dev
)) {
5780 if (IS_PINEVIEW(dev
))
5781 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW
) >>
5782 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW
);
5784 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK
) >>
5785 DPLL_FPA01_P1_POST_DIV_SHIFT
);
5787 switch (dpll
& DPLL_MODE_MASK
) {
5788 case DPLLB_MODE_DAC_SERIAL
:
5789 clock
.p2
= dpll
& DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
?
5792 case DPLLB_MODE_LVDS
:
5793 clock
.p2
= dpll
& DPLLB_LVDS_P2_CLOCK_DIV_7
?
5797 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5798 "mode\n", (int)(dpll
& DPLL_MODE_MASK
));
5802 /* XXX: Handle the 100Mhz refclk */
5803 intel_clock(dev
, 96000, &clock
);
5805 bool is_lvds
= (pipe
== 1) && (I915_READ(LVDS
) & LVDS_PORT_EN
);
5808 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS
) >>
5809 DPLL_FPA01_P1_POST_DIV_SHIFT
);
5812 if ((dpll
& PLL_REF_INPUT_MASK
) ==
5813 PLLB_REF_INPUT_SPREADSPECTRUMIN
) {
5814 /* XXX: might not be 66MHz */
5815 intel_clock(dev
, 66000, &clock
);
5817 intel_clock(dev
, 48000, &clock
);
5819 if (dpll
& PLL_P1_DIVIDE_BY_TWO
)
5822 clock
.p1
= ((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_I830
) >>
5823 DPLL_FPA01_P1_POST_DIV_SHIFT
) + 2;
5825 if (dpll
& PLL_P2_DIVIDE_BY_4
)
5830 intel_clock(dev
, 48000, &clock
);
5834 /* XXX: It would be nice to validate the clocks, but we can't reuse
5835 * i830PllIsValid() because it relies on the xf86_config connector
5836 * configuration being accurate, which it isn't necessarily.
5842 /** Returns the currently programmed mode of the given pipe. */
5843 struct drm_display_mode
*intel_crtc_mode_get(struct drm_device
*dev
,
5844 struct drm_crtc
*crtc
)
5846 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5847 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5848 int pipe
= intel_crtc
->pipe
;
5849 struct drm_display_mode
*mode
;
5850 int htot
= I915_READ(HTOTAL(pipe
));
5851 int hsync
= I915_READ(HSYNC(pipe
));
5852 int vtot
= I915_READ(VTOTAL(pipe
));
5853 int vsync
= I915_READ(VSYNC(pipe
));
5855 mode
= kzalloc(sizeof(*mode
), GFP_KERNEL
);
5859 mode
->clock
= intel_crtc_clock_get(dev
, crtc
);
5860 mode
->hdisplay
= (htot
& 0xffff) + 1;
5861 mode
->htotal
= ((htot
& 0xffff0000) >> 16) + 1;
5862 mode
->hsync_start
= (hsync
& 0xffff) + 1;
5863 mode
->hsync_end
= ((hsync
& 0xffff0000) >> 16) + 1;
5864 mode
->vdisplay
= (vtot
& 0xffff) + 1;
5865 mode
->vtotal
= ((vtot
& 0xffff0000) >> 16) + 1;
5866 mode
->vsync_start
= (vsync
& 0xffff) + 1;
5867 mode
->vsync_end
= ((vsync
& 0xffff0000) >> 16) + 1;
5869 drm_mode_set_name(mode
);
5874 #define GPU_IDLE_TIMEOUT 500 /* ms */
5876 /* When this timer fires, we've been idle for awhile */
5877 static void intel_gpu_idle_timer(unsigned long arg
)
5879 struct drm_device
*dev
= (struct drm_device
*)arg
;
5880 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
5882 if (!list_empty(&dev_priv
->mm
.active_list
)) {
5883 /* Still processing requests, so just re-arm the timer. */
5884 mod_timer(&dev_priv
->idle_timer
, jiffies
+
5885 msecs_to_jiffies(GPU_IDLE_TIMEOUT
));
5889 dev_priv
->busy
= false;
5890 queue_work(dev_priv
->wq
, &dev_priv
->idle_work
);
5893 #define CRTC_IDLE_TIMEOUT 1000 /* ms */
5895 static void intel_crtc_idle_timer(unsigned long arg
)
5897 struct intel_crtc
*intel_crtc
= (struct intel_crtc
*)arg
;
5898 struct drm_crtc
*crtc
= &intel_crtc
->base
;
5899 drm_i915_private_t
*dev_priv
= crtc
->dev
->dev_private
;
5900 struct intel_framebuffer
*intel_fb
;
5902 intel_fb
= to_intel_framebuffer(crtc
->fb
);
5903 if (intel_fb
&& intel_fb
->obj
->active
) {
5904 /* The framebuffer is still being accessed by the GPU. */
5905 mod_timer(&intel_crtc
->idle_timer
, jiffies
+
5906 msecs_to_jiffies(CRTC_IDLE_TIMEOUT
));
5910 intel_crtc
->busy
= false;
5911 queue_work(dev_priv
->wq
, &dev_priv
->idle_work
);
5914 static void intel_increase_pllclock(struct drm_crtc
*crtc
)
5916 struct drm_device
*dev
= crtc
->dev
;
5917 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
5918 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5919 int pipe
= intel_crtc
->pipe
;
5920 int dpll_reg
= DPLL(pipe
);
5923 if (HAS_PCH_SPLIT(dev
))
5926 if (!dev_priv
->lvds_downclock_avail
)
5929 dpll
= I915_READ(dpll_reg
);
5930 if (!HAS_PIPE_CXSR(dev
) && (dpll
& DISPLAY_RATE_SELECT_FPA1
)) {
5931 DRM_DEBUG_DRIVER("upclocking LVDS\n");
5933 assert_panel_unlocked(dev_priv
, pipe
);
5935 dpll
&= ~DISPLAY_RATE_SELECT_FPA1
;
5936 I915_WRITE(dpll_reg
, dpll
);
5937 intel_wait_for_vblank(dev
, pipe
);
5939 dpll
= I915_READ(dpll_reg
);
5940 if (dpll
& DISPLAY_RATE_SELECT_FPA1
)
5941 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5944 /* Schedule downclock */
5945 mod_timer(&intel_crtc
->idle_timer
, jiffies
+
5946 msecs_to_jiffies(CRTC_IDLE_TIMEOUT
));
5949 static void intel_decrease_pllclock(struct drm_crtc
*crtc
)
5951 struct drm_device
*dev
= crtc
->dev
;
5952 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
5953 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5955 if (HAS_PCH_SPLIT(dev
))
5958 if (!dev_priv
->lvds_downclock_avail
)
5962 * Since this is called by a timer, we should never get here in
5965 if (!HAS_PIPE_CXSR(dev
) && intel_crtc
->lowfreq_avail
) {
5966 int pipe
= intel_crtc
->pipe
;
5967 int dpll_reg
= DPLL(pipe
);
5970 DRM_DEBUG_DRIVER("downclocking LVDS\n");
5972 assert_panel_unlocked(dev_priv
, pipe
);
5974 dpll
= I915_READ(dpll_reg
);
5975 dpll
|= DISPLAY_RATE_SELECT_FPA1
;
5976 I915_WRITE(dpll_reg
, dpll
);
5977 intel_wait_for_vblank(dev
, pipe
);
5978 dpll
= I915_READ(dpll_reg
);
5979 if (!(dpll
& DISPLAY_RATE_SELECT_FPA1
))
5980 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
5986 * intel_idle_update - adjust clocks for idleness
5987 * @work: work struct
5989 * Either the GPU or display (or both) went idle. Check the busy status
5990 * here and adjust the CRTC and GPU clocks as necessary.
5992 static void intel_idle_update(struct work_struct
*work
)
5994 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
5996 struct drm_device
*dev
= dev_priv
->dev
;
5997 struct drm_crtc
*crtc
;
5998 struct intel_crtc
*intel_crtc
;
6000 if (!i915_powersave
)
6003 mutex_lock(&dev
->struct_mutex
);
6005 i915_update_gfx_val(dev_priv
);
6007 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
6008 /* Skip inactive CRTCs */
6012 intel_crtc
= to_intel_crtc(crtc
);
6013 if (!intel_crtc
->busy
)
6014 intel_decrease_pllclock(crtc
);
6018 mutex_unlock(&dev
->struct_mutex
);
6022 * intel_mark_busy - mark the GPU and possibly the display busy
6024 * @obj: object we're operating on
6026 * Callers can use this function to indicate that the GPU is busy processing
6027 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
6028 * buffer), we'll also mark the display as busy, so we know to increase its
6031 void intel_mark_busy(struct drm_device
*dev
, struct drm_i915_gem_object
*obj
)
6033 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6034 struct drm_crtc
*crtc
= NULL
;
6035 struct intel_framebuffer
*intel_fb
;
6036 struct intel_crtc
*intel_crtc
;
6038 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
6041 if (!dev_priv
->busy
) {
6042 intel_sanitize_pm(dev
);
6043 dev_priv
->busy
= true;
6045 mod_timer(&dev_priv
->idle_timer
, jiffies
+
6046 msecs_to_jiffies(GPU_IDLE_TIMEOUT
));
6051 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
6055 intel_crtc
= to_intel_crtc(crtc
);
6056 intel_fb
= to_intel_framebuffer(crtc
->fb
);
6057 if (intel_fb
->obj
== obj
) {
6058 if (!intel_crtc
->busy
) {
6059 /* Non-busy -> busy, upclock */
6060 intel_increase_pllclock(crtc
);
6061 intel_crtc
->busy
= true;
6063 /* Busy -> busy, put off timer */
6064 mod_timer(&intel_crtc
->idle_timer
, jiffies
+
6065 msecs_to_jiffies(CRTC_IDLE_TIMEOUT
));
6071 static void intel_crtc_destroy(struct drm_crtc
*crtc
)
6073 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6074 struct drm_device
*dev
= crtc
->dev
;
6075 struct intel_unpin_work
*work
;
6076 unsigned long flags
;
6078 spin_lock_irqsave(&dev
->event_lock
, flags
);
6079 work
= intel_crtc
->unpin_work
;
6080 intel_crtc
->unpin_work
= NULL
;
6081 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6084 cancel_work_sync(&work
->work
);
6088 drm_crtc_cleanup(crtc
);
6093 static void intel_unpin_work_fn(struct work_struct
*__work
)
6095 struct intel_unpin_work
*work
=
6096 container_of(__work
, struct intel_unpin_work
, work
);
6098 mutex_lock(&work
->dev
->struct_mutex
);
6099 intel_unpin_fb_obj(work
->old_fb_obj
);
6100 drm_gem_object_unreference(&work
->pending_flip_obj
->base
);
6101 drm_gem_object_unreference(&work
->old_fb_obj
->base
);
6103 intel_update_fbc(work
->dev
);
6104 mutex_unlock(&work
->dev
->struct_mutex
);
6108 static void do_intel_finish_page_flip(struct drm_device
*dev
,
6109 struct drm_crtc
*crtc
)
6111 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6112 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6113 struct intel_unpin_work
*work
;
6114 struct drm_i915_gem_object
*obj
;
6115 struct drm_pending_vblank_event
*e
;
6116 struct timeval tnow
, tvbl
;
6117 unsigned long flags
;
6119 /* Ignore early vblank irqs */
6120 if (intel_crtc
== NULL
)
6123 do_gettimeofday(&tnow
);
6125 spin_lock_irqsave(&dev
->event_lock
, flags
);
6126 work
= intel_crtc
->unpin_work
;
6127 if (work
== NULL
|| !work
->pending
) {
6128 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6132 intel_crtc
->unpin_work
= NULL
;
6136 e
->event
.sequence
= drm_vblank_count_and_time(dev
, intel_crtc
->pipe
, &tvbl
);
6138 /* Called before vblank count and timestamps have
6139 * been updated for the vblank interval of flip
6140 * completion? Need to increment vblank count and
6141 * add one videorefresh duration to returned timestamp
6142 * to account for this. We assume this happened if we
6143 * get called over 0.9 frame durations after the last
6144 * timestamped vblank.
6146 * This calculation can not be used with vrefresh rates
6147 * below 5Hz (10Hz to be on the safe side) without
6148 * promoting to 64 integers.
6150 if (10 * (timeval_to_ns(&tnow
) - timeval_to_ns(&tvbl
)) >
6151 9 * crtc
->framedur_ns
) {
6152 e
->event
.sequence
++;
6153 tvbl
= ns_to_timeval(timeval_to_ns(&tvbl
) +
6157 e
->event
.tv_sec
= tvbl
.tv_sec
;
6158 e
->event
.tv_usec
= tvbl
.tv_usec
;
6160 list_add_tail(&e
->base
.link
,
6161 &e
->base
.file_priv
->event_list
);
6162 wake_up_interruptible(&e
->base
.file_priv
->event_wait
);
6165 drm_vblank_put(dev
, intel_crtc
->pipe
);
6167 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6169 obj
= work
->old_fb_obj
;
6171 atomic_clear_mask(1 << intel_crtc
->plane
,
6172 &obj
->pending_flip
.counter
);
6174 wake_up(&dev_priv
->pending_flip_queue
);
6175 schedule_work(&work
->work
);
6177 trace_i915_flip_complete(intel_crtc
->plane
, work
->pending_flip_obj
);
6180 void intel_finish_page_flip(struct drm_device
*dev
, int pipe
)
6182 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6183 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
6185 do_intel_finish_page_flip(dev
, crtc
);
6188 void intel_finish_page_flip_plane(struct drm_device
*dev
, int plane
)
6190 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6191 struct drm_crtc
*crtc
= dev_priv
->plane_to_crtc_mapping
[plane
];
6193 do_intel_finish_page_flip(dev
, crtc
);
6196 void intel_prepare_page_flip(struct drm_device
*dev
, int plane
)
6198 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6199 struct intel_crtc
*intel_crtc
=
6200 to_intel_crtc(dev_priv
->plane_to_crtc_mapping
[plane
]);
6201 unsigned long flags
;
6203 spin_lock_irqsave(&dev
->event_lock
, flags
);
6204 if (intel_crtc
->unpin_work
) {
6205 if ((++intel_crtc
->unpin_work
->pending
) > 1)
6206 DRM_ERROR("Prepared flip multiple times\n");
6208 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6210 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6213 static int intel_gen2_queue_flip(struct drm_device
*dev
,
6214 struct drm_crtc
*crtc
,
6215 struct drm_framebuffer
*fb
,
6216 struct drm_i915_gem_object
*obj
)
6218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6219 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6221 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
6224 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
6228 ret
= intel_ring_begin(ring
, 6);
6232 /* Can't queue multiple flips, so wait for the previous
6233 * one to finish before executing the next.
6235 if (intel_crtc
->plane
)
6236 flip_mask
= MI_WAIT_FOR_PLANE_B_FLIP
;
6238 flip_mask
= MI_WAIT_FOR_PLANE_A_FLIP
;
6239 intel_ring_emit(ring
, MI_WAIT_FOR_EVENT
| flip_mask
);
6240 intel_ring_emit(ring
, MI_NOOP
);
6241 intel_ring_emit(ring
, MI_DISPLAY_FLIP
|
6242 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
6243 intel_ring_emit(ring
, fb
->pitches
[0]);
6244 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
6245 intel_ring_emit(ring
, 0); /* aux display base address, unused */
6246 intel_ring_advance(ring
);
6250 intel_unpin_fb_obj(obj
);
6255 static int intel_gen3_queue_flip(struct drm_device
*dev
,
6256 struct drm_crtc
*crtc
,
6257 struct drm_framebuffer
*fb
,
6258 struct drm_i915_gem_object
*obj
)
6260 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6261 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6263 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
6266 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
6270 ret
= intel_ring_begin(ring
, 6);
6274 if (intel_crtc
->plane
)
6275 flip_mask
= MI_WAIT_FOR_PLANE_B_FLIP
;
6277 flip_mask
= MI_WAIT_FOR_PLANE_A_FLIP
;
6278 intel_ring_emit(ring
, MI_WAIT_FOR_EVENT
| flip_mask
);
6279 intel_ring_emit(ring
, MI_NOOP
);
6280 intel_ring_emit(ring
, MI_DISPLAY_FLIP_I915
|
6281 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
6282 intel_ring_emit(ring
, fb
->pitches
[0]);
6283 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
6284 intel_ring_emit(ring
, MI_NOOP
);
6286 intel_ring_advance(ring
);
6290 intel_unpin_fb_obj(obj
);
6295 static int intel_gen4_queue_flip(struct drm_device
*dev
,
6296 struct drm_crtc
*crtc
,
6297 struct drm_framebuffer
*fb
,
6298 struct drm_i915_gem_object
*obj
)
6300 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6301 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6302 uint32_t pf
, pipesrc
;
6303 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
6306 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
6310 ret
= intel_ring_begin(ring
, 4);
6314 /* i965+ uses the linear or tiled offsets from the
6315 * Display Registers (which do not change across a page-flip)
6316 * so we need only reprogram the base address.
6318 intel_ring_emit(ring
, MI_DISPLAY_FLIP
|
6319 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
6320 intel_ring_emit(ring
, fb
->pitches
[0]);
6321 intel_ring_emit(ring
,
6322 (obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
) |
6325 /* XXX Enabling the panel-fitter across page-flip is so far
6326 * untested on non-native modes, so ignore it for now.
6327 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6330 pipesrc
= I915_READ(PIPESRC(intel_crtc
->pipe
)) & 0x0fff0fff;
6331 intel_ring_emit(ring
, pf
| pipesrc
);
6332 intel_ring_advance(ring
);
6336 intel_unpin_fb_obj(obj
);
6341 static int intel_gen6_queue_flip(struct drm_device
*dev
,
6342 struct drm_crtc
*crtc
,
6343 struct drm_framebuffer
*fb
,
6344 struct drm_i915_gem_object
*obj
)
6346 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6347 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6348 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
6349 uint32_t pf
, pipesrc
;
6352 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
6356 ret
= intel_ring_begin(ring
, 4);
6360 intel_ring_emit(ring
, MI_DISPLAY_FLIP
|
6361 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
6362 intel_ring_emit(ring
, fb
->pitches
[0] | obj
->tiling_mode
);
6363 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
6365 /* Contrary to the suggestions in the documentation,
6366 * "Enable Panel Fitter" does not seem to be required when page
6367 * flipping with a non-native mode, and worse causes a normal
6369 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6372 pipesrc
= I915_READ(PIPESRC(intel_crtc
->pipe
)) & 0x0fff0fff;
6373 intel_ring_emit(ring
, pf
| pipesrc
);
6374 intel_ring_advance(ring
);
6378 intel_unpin_fb_obj(obj
);
6384 * On gen7 we currently use the blit ring because (in early silicon at least)
6385 * the render ring doesn't give us interrpts for page flip completion, which
6386 * means clients will hang after the first flip is queued. Fortunately the
6387 * blit ring generates interrupts properly, so use it instead.
6389 static int intel_gen7_queue_flip(struct drm_device
*dev
,
6390 struct drm_crtc
*crtc
,
6391 struct drm_framebuffer
*fb
,
6392 struct drm_i915_gem_object
*obj
)
6394 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6395 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6396 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[BCS
];
6397 uint32_t plane_bit
= 0;
6400 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
6404 switch(intel_crtc
->plane
) {
6406 plane_bit
= MI_DISPLAY_FLIP_IVB_PLANE_A
;
6409 plane_bit
= MI_DISPLAY_FLIP_IVB_PLANE_B
;
6412 plane_bit
= MI_DISPLAY_FLIP_IVB_PLANE_C
;
6415 WARN_ONCE(1, "unknown plane in flip command\n");
6420 ret
= intel_ring_begin(ring
, 4);
6424 intel_ring_emit(ring
, MI_DISPLAY_FLIP_I915
| plane_bit
);
6425 intel_ring_emit(ring
, (fb
->pitches
[0] | obj
->tiling_mode
));
6426 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
6427 intel_ring_emit(ring
, (MI_NOOP
));
6428 intel_ring_advance(ring
);
6432 intel_unpin_fb_obj(obj
);
6437 static int intel_default_queue_flip(struct drm_device
*dev
,
6438 struct drm_crtc
*crtc
,
6439 struct drm_framebuffer
*fb
,
6440 struct drm_i915_gem_object
*obj
)
6445 static int intel_crtc_page_flip(struct drm_crtc
*crtc
,
6446 struct drm_framebuffer
*fb
,
6447 struct drm_pending_vblank_event
*event
)
6449 struct drm_device
*dev
= crtc
->dev
;
6450 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6451 struct intel_framebuffer
*intel_fb
;
6452 struct drm_i915_gem_object
*obj
;
6453 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6454 struct intel_unpin_work
*work
;
6455 unsigned long flags
;
6458 /* Can't change pixel format via MI display flips. */
6459 if (fb
->pixel_format
!= crtc
->fb
->pixel_format
)
6463 * TILEOFF/LINOFF registers can't be changed via MI display flips.
6464 * Note that pitch changes could also affect these register.
6466 if (INTEL_INFO(dev
)->gen
> 3 &&
6467 (fb
->offsets
[0] != crtc
->fb
->offsets
[0] ||
6468 fb
->pitches
[0] != crtc
->fb
->pitches
[0]))
6471 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
6475 work
->event
= event
;
6476 work
->dev
= crtc
->dev
;
6477 intel_fb
= to_intel_framebuffer(crtc
->fb
);
6478 work
->old_fb_obj
= intel_fb
->obj
;
6479 INIT_WORK(&work
->work
, intel_unpin_work_fn
);
6481 ret
= drm_vblank_get(dev
, intel_crtc
->pipe
);
6485 /* We borrow the event spin lock for protecting unpin_work */
6486 spin_lock_irqsave(&dev
->event_lock
, flags
);
6487 if (intel_crtc
->unpin_work
) {
6488 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6490 drm_vblank_put(dev
, intel_crtc
->pipe
);
6492 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6495 intel_crtc
->unpin_work
= work
;
6496 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6498 intel_fb
= to_intel_framebuffer(fb
);
6499 obj
= intel_fb
->obj
;
6501 ret
= i915_mutex_lock_interruptible(dev
);
6505 /* Reference the objects for the scheduled work. */
6506 drm_gem_object_reference(&work
->old_fb_obj
->base
);
6507 drm_gem_object_reference(&obj
->base
);
6511 work
->pending_flip_obj
= obj
;
6513 work
->enable_stall_check
= true;
6515 /* Block clients from rendering to the new back buffer until
6516 * the flip occurs and the object is no longer visible.
6518 atomic_add(1 << intel_crtc
->plane
, &work
->old_fb_obj
->pending_flip
);
6520 ret
= dev_priv
->display
.queue_flip(dev
, crtc
, fb
, obj
);
6522 goto cleanup_pending
;
6524 intel_disable_fbc(dev
);
6525 intel_mark_busy(dev
, obj
);
6526 mutex_unlock(&dev
->struct_mutex
);
6528 trace_i915_flip_request(intel_crtc
->plane
, obj
);
6533 atomic_sub(1 << intel_crtc
->plane
, &work
->old_fb_obj
->pending_flip
);
6534 drm_gem_object_unreference(&work
->old_fb_obj
->base
);
6535 drm_gem_object_unreference(&obj
->base
);
6536 mutex_unlock(&dev
->struct_mutex
);
6539 spin_lock_irqsave(&dev
->event_lock
, flags
);
6540 intel_crtc
->unpin_work
= NULL
;
6541 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
6543 drm_vblank_put(dev
, intel_crtc
->pipe
);
6550 static void intel_sanitize_modesetting(struct drm_device
*dev
,
6551 int pipe
, int plane
)
6553 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6557 /* Clear any frame start delays used for debugging left by the BIOS */
6560 I915_WRITE(reg
, I915_READ(reg
) & ~PIPECONF_FRAME_START_DELAY_MASK
);
6563 if (HAS_PCH_SPLIT(dev
))
6566 /* Who knows what state these registers were left in by the BIOS or
6569 * If we leave the registers in a conflicting state (e.g. with the
6570 * display plane reading from the other pipe than the one we intend
6571 * to use) then when we attempt to teardown the active mode, we will
6572 * not disable the pipes and planes in the correct order -- leaving
6573 * a plane reading from a disabled pipe and possibly leading to
6574 * undefined behaviour.
6577 reg
= DSPCNTR(plane
);
6578 val
= I915_READ(reg
);
6580 if ((val
& DISPLAY_PLANE_ENABLE
) == 0)
6582 if (!!(val
& DISPPLANE_SEL_PIPE_MASK
) == pipe
)
6585 /* This display plane is active and attached to the other CPU pipe. */
6588 /* Disable the plane and wait for it to stop reading from the pipe. */
6589 intel_disable_plane(dev_priv
, plane
, pipe
);
6590 intel_disable_pipe(dev_priv
, pipe
);
6593 static void intel_crtc_reset(struct drm_crtc
*crtc
)
6595 struct drm_device
*dev
= crtc
->dev
;
6596 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6598 /* Reset flags back to the 'unknown' status so that they
6599 * will be correctly set on the initial modeset.
6601 intel_crtc
->dpms_mode
= -1;
6603 /* We need to fix up any BIOS configuration that conflicts with
6606 intel_sanitize_modesetting(dev
, intel_crtc
->pipe
, intel_crtc
->plane
);
6609 static struct drm_crtc_helper_funcs intel_helper_funcs
= {
6610 .dpms
= intel_crtc_dpms
,
6611 .mode_fixup
= intel_crtc_mode_fixup
,
6612 .mode_set
= intel_crtc_mode_set
,
6613 .mode_set_base
= intel_pipe_set_base
,
6614 .mode_set_base_atomic
= intel_pipe_set_base_atomic
,
6615 .load_lut
= intel_crtc_load_lut
,
6616 .disable
= intel_crtc_disable
,
6619 static const struct drm_crtc_funcs intel_crtc_funcs
= {
6620 .reset
= intel_crtc_reset
,
6621 .cursor_set
= intel_crtc_cursor_set
,
6622 .cursor_move
= intel_crtc_cursor_move
,
6623 .gamma_set
= intel_crtc_gamma_set
,
6624 .set_config
= drm_crtc_helper_set_config
,
6625 .destroy
= intel_crtc_destroy
,
6626 .page_flip
= intel_crtc_page_flip
,
6629 static void intel_pch_pll_init(struct drm_device
*dev
)
6631 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6634 if (dev_priv
->num_pch_pll
== 0) {
6635 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6639 for (i
= 0; i
< dev_priv
->num_pch_pll
; i
++) {
6640 dev_priv
->pch_plls
[i
].pll_reg
= _PCH_DPLL(i
);
6641 dev_priv
->pch_plls
[i
].fp0_reg
= _PCH_FP0(i
);
6642 dev_priv
->pch_plls
[i
].fp1_reg
= _PCH_FP1(i
);
6646 static void intel_crtc_init(struct drm_device
*dev
, int pipe
)
6648 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6649 struct intel_crtc
*intel_crtc
;
6652 intel_crtc
= kzalloc(sizeof(struct intel_crtc
) + (INTELFB_CONN_LIMIT
* sizeof(struct drm_connector
*)), GFP_KERNEL
);
6653 if (intel_crtc
== NULL
)
6656 drm_crtc_init(dev
, &intel_crtc
->base
, &intel_crtc_funcs
);
6658 drm_mode_crtc_set_gamma_size(&intel_crtc
->base
, 256);
6659 for (i
= 0; i
< 256; i
++) {
6660 intel_crtc
->lut_r
[i
] = i
;
6661 intel_crtc
->lut_g
[i
] = i
;
6662 intel_crtc
->lut_b
[i
] = i
;
6665 /* Swap pipes & planes for FBC on pre-965 */
6666 intel_crtc
->pipe
= pipe
;
6667 intel_crtc
->plane
= pipe
;
6668 if (IS_MOBILE(dev
) && IS_GEN3(dev
)) {
6669 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
6670 intel_crtc
->plane
= !pipe
;
6673 BUG_ON(pipe
>= ARRAY_SIZE(dev_priv
->plane_to_crtc_mapping
) ||
6674 dev_priv
->plane_to_crtc_mapping
[intel_crtc
->plane
] != NULL
);
6675 dev_priv
->plane_to_crtc_mapping
[intel_crtc
->plane
] = &intel_crtc
->base
;
6676 dev_priv
->pipe_to_crtc_mapping
[intel_crtc
->pipe
] = &intel_crtc
->base
;
6678 intel_crtc_reset(&intel_crtc
->base
);
6679 intel_crtc
->active
= true; /* force the pipe off on setup_init_config */
6680 intel_crtc
->bpp
= 24; /* default for pre-Ironlake */
6682 if (HAS_PCH_SPLIT(dev
)) {
6683 intel_helper_funcs
.prepare
= ironlake_crtc_prepare
;
6684 intel_helper_funcs
.commit
= ironlake_crtc_commit
;
6686 intel_helper_funcs
.prepare
= i9xx_crtc_prepare
;
6687 intel_helper_funcs
.commit
= i9xx_crtc_commit
;
6690 drm_crtc_helper_add(&intel_crtc
->base
, &intel_helper_funcs
);
6692 intel_crtc
->busy
= false;
6694 setup_timer(&intel_crtc
->idle_timer
, intel_crtc_idle_timer
,
6695 (unsigned long)intel_crtc
);
6698 int intel_get_pipe_from_crtc_id(struct drm_device
*dev
, void *data
,
6699 struct drm_file
*file
)
6701 struct drm_i915_get_pipe_from_crtc_id
*pipe_from_crtc_id
= data
;
6702 struct drm_mode_object
*drmmode_obj
;
6703 struct intel_crtc
*crtc
;
6705 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
6708 drmmode_obj
= drm_mode_object_find(dev
, pipe_from_crtc_id
->crtc_id
,
6709 DRM_MODE_OBJECT_CRTC
);
6712 DRM_ERROR("no such CRTC id\n");
6716 crtc
= to_intel_crtc(obj_to_crtc(drmmode_obj
));
6717 pipe_from_crtc_id
->pipe
= crtc
->pipe
;
6722 static int intel_encoder_clones(struct drm_device
*dev
, int type_mask
)
6724 struct intel_encoder
*encoder
;
6728 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
6729 if (type_mask
& encoder
->clone_mask
)
6730 index_mask
|= (1 << entry
);
6737 static bool has_edp_a(struct drm_device
*dev
)
6739 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6741 if (!IS_MOBILE(dev
))
6744 if ((I915_READ(DP_A
) & DP_DETECTED
) == 0)
6748 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES
) & ILK_eDP_A_DISABLE
))
6754 static void intel_setup_outputs(struct drm_device
*dev
)
6756 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6757 struct intel_encoder
*encoder
;
6758 bool dpd_is_edp
= false;
6761 has_lvds
= intel_lvds_init(dev
);
6762 if (!has_lvds
&& !HAS_PCH_SPLIT(dev
)) {
6763 /* disable the panel fitter on everything but LVDS */
6764 I915_WRITE(PFIT_CONTROL
, 0);
6767 if (HAS_PCH_SPLIT(dev
)) {
6768 dpd_is_edp
= intel_dpd_is_edp(dev
);
6771 intel_dp_init(dev
, DP_A
);
6773 if (dpd_is_edp
&& (I915_READ(PCH_DP_D
) & DP_DETECTED
))
6774 intel_dp_init(dev
, PCH_DP_D
);
6777 intel_crt_init(dev
);
6779 if (IS_HASWELL(dev
)) {
6782 /* Haswell uses DDI functions to detect digital outputs */
6783 found
= I915_READ(DDI_BUF_CTL_A
) & DDI_INIT_DISPLAY_DETECTED
;
6784 /* DDI A only supports eDP */
6786 intel_ddi_init(dev
, PORT_A
);
6788 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
6790 found
= I915_READ(SFUSE_STRAP
);
6792 if (found
& SFUSE_STRAP_DDIB_DETECTED
)
6793 intel_ddi_init(dev
, PORT_B
);
6794 if (found
& SFUSE_STRAP_DDIC_DETECTED
)
6795 intel_ddi_init(dev
, PORT_C
);
6796 if (found
& SFUSE_STRAP_DDID_DETECTED
)
6797 intel_ddi_init(dev
, PORT_D
);
6798 } else if (HAS_PCH_SPLIT(dev
)) {
6801 if (I915_READ(HDMIB
) & PORT_DETECTED
) {
6802 /* PCH SDVOB multiplex with HDMIB */
6803 found
= intel_sdvo_init(dev
, PCH_SDVOB
, true);
6805 intel_hdmi_init(dev
, HDMIB
);
6806 if (!found
&& (I915_READ(PCH_DP_B
) & DP_DETECTED
))
6807 intel_dp_init(dev
, PCH_DP_B
);
6810 if (I915_READ(HDMIC
) & PORT_DETECTED
)
6811 intel_hdmi_init(dev
, HDMIC
);
6813 if (!dpd_is_edp
&& I915_READ(HDMID
) & PORT_DETECTED
)
6814 intel_hdmi_init(dev
, HDMID
);
6816 if (I915_READ(PCH_DP_C
) & DP_DETECTED
)
6817 intel_dp_init(dev
, PCH_DP_C
);
6819 if (!dpd_is_edp
&& (I915_READ(PCH_DP_D
) & DP_DETECTED
))
6820 intel_dp_init(dev
, PCH_DP_D
);
6821 } else if (IS_VALLEYVIEW(dev
)) {
6824 if (I915_READ(SDVOB
) & PORT_DETECTED
) {
6825 /* SDVOB multiplex with HDMIB */
6826 found
= intel_sdvo_init(dev
, SDVOB
, true);
6828 intel_hdmi_init(dev
, SDVOB
);
6829 if (!found
&& (I915_READ(DP_B
) & DP_DETECTED
))
6830 intel_dp_init(dev
, DP_B
);
6833 if (I915_READ(SDVOC
) & PORT_DETECTED
)
6834 intel_hdmi_init(dev
, SDVOC
);
6836 /* Shares lanes with HDMI on SDVOC */
6837 if (I915_READ(DP_C
) & DP_DETECTED
)
6838 intel_dp_init(dev
, DP_C
);
6839 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev
)) {
6842 if (I915_READ(SDVOB
) & SDVO_DETECTED
) {
6843 DRM_DEBUG_KMS("probing SDVOB\n");
6844 found
= intel_sdvo_init(dev
, SDVOB
, true);
6845 if (!found
&& SUPPORTS_INTEGRATED_HDMI(dev
)) {
6846 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6847 intel_hdmi_init(dev
, SDVOB
);
6850 if (!found
&& SUPPORTS_INTEGRATED_DP(dev
)) {
6851 DRM_DEBUG_KMS("probing DP_B\n");
6852 intel_dp_init(dev
, DP_B
);
6856 /* Before G4X SDVOC doesn't have its own detect register */
6858 if (I915_READ(SDVOB
) & SDVO_DETECTED
) {
6859 DRM_DEBUG_KMS("probing SDVOC\n");
6860 found
= intel_sdvo_init(dev
, SDVOC
, false);
6863 if (!found
&& (I915_READ(SDVOC
) & SDVO_DETECTED
)) {
6865 if (SUPPORTS_INTEGRATED_HDMI(dev
)) {
6866 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6867 intel_hdmi_init(dev
, SDVOC
);
6869 if (SUPPORTS_INTEGRATED_DP(dev
)) {
6870 DRM_DEBUG_KMS("probing DP_C\n");
6871 intel_dp_init(dev
, DP_C
);
6875 if (SUPPORTS_INTEGRATED_DP(dev
) &&
6876 (I915_READ(DP_D
) & DP_DETECTED
)) {
6877 DRM_DEBUG_KMS("probing DP_D\n");
6878 intel_dp_init(dev
, DP_D
);
6880 } else if (IS_GEN2(dev
))
6881 intel_dvo_init(dev
);
6883 if (SUPPORTS_TV(dev
))
6886 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
6887 encoder
->base
.possible_crtcs
= encoder
->crtc_mask
;
6888 encoder
->base
.possible_clones
=
6889 intel_encoder_clones(dev
, encoder
->clone_mask
);
6892 /* disable all the possible outputs/crtcs before entering KMS mode */
6893 drm_helper_disable_unused_functions(dev
);
6895 if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
))
6896 ironlake_init_pch_refclk(dev
);
6899 static void intel_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
6901 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
6903 drm_framebuffer_cleanup(fb
);
6904 drm_gem_object_unreference_unlocked(&intel_fb
->obj
->base
);
6909 static int intel_user_framebuffer_create_handle(struct drm_framebuffer
*fb
,
6910 struct drm_file
*file
,
6911 unsigned int *handle
)
6913 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
6914 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
6916 return drm_gem_handle_create(file
, &obj
->base
, handle
);
6919 static const struct drm_framebuffer_funcs intel_fb_funcs
= {
6920 .destroy
= intel_user_framebuffer_destroy
,
6921 .create_handle
= intel_user_framebuffer_create_handle
,
6924 int intel_framebuffer_init(struct drm_device
*dev
,
6925 struct intel_framebuffer
*intel_fb
,
6926 struct drm_mode_fb_cmd2
*mode_cmd
,
6927 struct drm_i915_gem_object
*obj
)
6931 if (obj
->tiling_mode
== I915_TILING_Y
)
6934 if (mode_cmd
->pitches
[0] & 63)
6937 switch (mode_cmd
->pixel_format
) {
6938 case DRM_FORMAT_RGB332
:
6939 case DRM_FORMAT_RGB565
:
6940 case DRM_FORMAT_XRGB8888
:
6941 case DRM_FORMAT_XBGR8888
:
6942 case DRM_FORMAT_ARGB8888
:
6943 case DRM_FORMAT_XRGB2101010
:
6944 case DRM_FORMAT_ARGB2101010
:
6945 /* RGB formats are common across chipsets */
6947 case DRM_FORMAT_YUYV
:
6948 case DRM_FORMAT_UYVY
:
6949 case DRM_FORMAT_YVYU
:
6950 case DRM_FORMAT_VYUY
:
6953 DRM_DEBUG_KMS("unsupported pixel format %u\n",
6954 mode_cmd
->pixel_format
);
6958 ret
= drm_framebuffer_init(dev
, &intel_fb
->base
, &intel_fb_funcs
);
6960 DRM_ERROR("framebuffer init failed %d\n", ret
);
6964 drm_helper_mode_fill_fb_struct(&intel_fb
->base
, mode_cmd
);
6965 intel_fb
->obj
= obj
;
6969 static struct drm_framebuffer
*
6970 intel_user_framebuffer_create(struct drm_device
*dev
,
6971 struct drm_file
*filp
,
6972 struct drm_mode_fb_cmd2
*mode_cmd
)
6974 struct drm_i915_gem_object
*obj
;
6976 obj
= to_intel_bo(drm_gem_object_lookup(dev
, filp
,
6977 mode_cmd
->handles
[0]));
6978 if (&obj
->base
== NULL
)
6979 return ERR_PTR(-ENOENT
);
6981 return intel_framebuffer_create(dev
, mode_cmd
, obj
);
6984 static const struct drm_mode_config_funcs intel_mode_funcs
= {
6985 .fb_create
= intel_user_framebuffer_create
,
6986 .output_poll_changed
= intel_fb_output_poll_changed
,
6989 /* Set up chip specific display functions */
6990 static void intel_init_display(struct drm_device
*dev
)
6992 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6994 /* We always want a DPMS function */
6995 if (HAS_PCH_SPLIT(dev
)) {
6996 dev_priv
->display
.dpms
= ironlake_crtc_dpms
;
6997 dev_priv
->display
.crtc_mode_set
= ironlake_crtc_mode_set
;
6998 dev_priv
->display
.off
= ironlake_crtc_off
;
6999 dev_priv
->display
.update_plane
= ironlake_update_plane
;
7001 dev_priv
->display
.dpms
= i9xx_crtc_dpms
;
7002 dev_priv
->display
.crtc_mode_set
= i9xx_crtc_mode_set
;
7003 dev_priv
->display
.off
= i9xx_crtc_off
;
7004 dev_priv
->display
.update_plane
= i9xx_update_plane
;
7007 /* Returns the core display clock speed */
7008 if (IS_VALLEYVIEW(dev
))
7009 dev_priv
->display
.get_display_clock_speed
=
7010 valleyview_get_display_clock_speed
;
7011 else if (IS_I945G(dev
) || (IS_G33(dev
) && !IS_PINEVIEW_M(dev
)))
7012 dev_priv
->display
.get_display_clock_speed
=
7013 i945_get_display_clock_speed
;
7014 else if (IS_I915G(dev
))
7015 dev_priv
->display
.get_display_clock_speed
=
7016 i915_get_display_clock_speed
;
7017 else if (IS_I945GM(dev
) || IS_845G(dev
) || IS_PINEVIEW_M(dev
))
7018 dev_priv
->display
.get_display_clock_speed
=
7019 i9xx_misc_get_display_clock_speed
;
7020 else if (IS_I915GM(dev
))
7021 dev_priv
->display
.get_display_clock_speed
=
7022 i915gm_get_display_clock_speed
;
7023 else if (IS_I865G(dev
))
7024 dev_priv
->display
.get_display_clock_speed
=
7025 i865_get_display_clock_speed
;
7026 else if (IS_I85X(dev
))
7027 dev_priv
->display
.get_display_clock_speed
=
7028 i855_get_display_clock_speed
;
7030 dev_priv
->display
.get_display_clock_speed
=
7031 i830_get_display_clock_speed
;
7033 if (HAS_PCH_SPLIT(dev
)) {
7035 dev_priv
->display
.fdi_link_train
= ironlake_fdi_link_train
;
7036 dev_priv
->display
.write_eld
= ironlake_write_eld
;
7037 } else if (IS_GEN6(dev
)) {
7038 dev_priv
->display
.fdi_link_train
= gen6_fdi_link_train
;
7039 dev_priv
->display
.write_eld
= ironlake_write_eld
;
7040 } else if (IS_IVYBRIDGE(dev
)) {
7041 /* FIXME: detect B0+ stepping and use auto training */
7042 dev_priv
->display
.fdi_link_train
= ivb_manual_fdi_link_train
;
7043 dev_priv
->display
.write_eld
= ironlake_write_eld
;
7044 } else if (IS_HASWELL(dev
)) {
7045 dev_priv
->display
.fdi_link_train
= hsw_fdi_link_train
;
7046 dev_priv
->display
.write_eld
= ironlake_write_eld
;
7048 dev_priv
->display
.update_wm
= NULL
;
7049 } else if (IS_G4X(dev
)) {
7050 dev_priv
->display
.write_eld
= g4x_write_eld
;
7053 /* Default just returns -ENODEV to indicate unsupported */
7054 dev_priv
->display
.queue_flip
= intel_default_queue_flip
;
7056 switch (INTEL_INFO(dev
)->gen
) {
7058 dev_priv
->display
.queue_flip
= intel_gen2_queue_flip
;
7062 dev_priv
->display
.queue_flip
= intel_gen3_queue_flip
;
7067 dev_priv
->display
.queue_flip
= intel_gen4_queue_flip
;
7071 dev_priv
->display
.queue_flip
= intel_gen6_queue_flip
;
7074 dev_priv
->display
.queue_flip
= intel_gen7_queue_flip
;
7080 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7081 * resume, or other times. This quirk makes sure that's the case for
7084 static void quirk_pipea_force(struct drm_device
*dev
)
7086 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7088 dev_priv
->quirks
|= QUIRK_PIPEA_FORCE
;
7089 DRM_INFO("applying pipe a force quirk\n");
7093 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7095 static void quirk_ssc_force_disable(struct drm_device
*dev
)
7097 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7098 dev_priv
->quirks
|= QUIRK_LVDS_SSC_DISABLE
;
7099 DRM_INFO("applying lvds SSC disable quirk\n");
7103 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
7106 static void quirk_invert_brightness(struct drm_device
*dev
)
7108 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7109 dev_priv
->quirks
|= QUIRK_INVERT_BRIGHTNESS
;
7110 DRM_INFO("applying inverted panel brightness quirk\n");
7113 struct intel_quirk
{
7115 int subsystem_vendor
;
7116 int subsystem_device
;
7117 void (*hook
)(struct drm_device
*dev
);
7120 static struct intel_quirk intel_quirks
[] = {
7121 /* HP Mini needs pipe A force quirk (LP: #322104) */
7122 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force
},
7124 /* Thinkpad R31 needs pipe A force quirk */
7125 { 0x3577, 0x1014, 0x0505, quirk_pipea_force
},
7126 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7127 { 0x2592, 0x1179, 0x0001, quirk_pipea_force
},
7129 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7130 { 0x3577, 0x1014, 0x0513, quirk_pipea_force
},
7131 /* ThinkPad X40 needs pipe A force quirk */
7133 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7134 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force
},
7136 /* 855 & before need to leave pipe A & dpll A up */
7137 { 0x3582, PCI_ANY_ID
, PCI_ANY_ID
, quirk_pipea_force
},
7138 { 0x2562, PCI_ANY_ID
, PCI_ANY_ID
, quirk_pipea_force
},
7140 /* Lenovo U160 cannot use SSC on LVDS */
7141 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable
},
7143 /* Sony Vaio Y cannot use SSC on LVDS */
7144 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable
},
7146 /* Acer Aspire 5734Z must invert backlight brightness */
7147 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness
},
7150 static void intel_init_quirks(struct drm_device
*dev
)
7152 struct pci_dev
*d
= dev
->pdev
;
7155 for (i
= 0; i
< ARRAY_SIZE(intel_quirks
); i
++) {
7156 struct intel_quirk
*q
= &intel_quirks
[i
];
7158 if (d
->device
== q
->device
&&
7159 (d
->subsystem_vendor
== q
->subsystem_vendor
||
7160 q
->subsystem_vendor
== PCI_ANY_ID
) &&
7161 (d
->subsystem_device
== q
->subsystem_device
||
7162 q
->subsystem_device
== PCI_ANY_ID
))
7167 /* Disable the VGA plane that we never use */
7168 static void i915_disable_vga(struct drm_device
*dev
)
7170 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7174 if (HAS_PCH_SPLIT(dev
))
7175 vga_reg
= CPU_VGACNTRL
;
7179 vga_get_uninterruptible(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
7180 outb(SR01
, VGA_SR_INDEX
);
7181 sr1
= inb(VGA_SR_DATA
);
7182 outb(sr1
| 1<<5, VGA_SR_DATA
);
7183 vga_put(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
7186 I915_WRITE(vga_reg
, VGA_DISP_DISABLE
);
7187 POSTING_READ(vga_reg
);
7190 void intel_modeset_init_hw(struct drm_device
*dev
)
7192 /* We attempt to init the necessary power wells early in the initialization
7193 * time, so the subsystems that expect power to be enabled can work.
7195 intel_init_power_wells(dev
);
7197 intel_prepare_ddi(dev
);
7199 intel_init_clock_gating(dev
);
7201 mutex_lock(&dev
->struct_mutex
);
7202 intel_enable_gt_powersave(dev
);
7203 mutex_unlock(&dev
->struct_mutex
);
7206 void intel_modeset_init(struct drm_device
*dev
)
7208 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7211 drm_mode_config_init(dev
);
7213 dev
->mode_config
.min_width
= 0;
7214 dev
->mode_config
.min_height
= 0;
7216 dev
->mode_config
.preferred_depth
= 24;
7217 dev
->mode_config
.prefer_shadow
= 1;
7219 dev
->mode_config
.funcs
= &intel_mode_funcs
;
7221 intel_init_quirks(dev
);
7225 intel_init_display(dev
);
7228 dev
->mode_config
.max_width
= 2048;
7229 dev
->mode_config
.max_height
= 2048;
7230 } else if (IS_GEN3(dev
)) {
7231 dev
->mode_config
.max_width
= 4096;
7232 dev
->mode_config
.max_height
= 4096;
7234 dev
->mode_config
.max_width
= 8192;
7235 dev
->mode_config
.max_height
= 8192;
7237 dev
->mode_config
.fb_base
= dev_priv
->mm
.gtt_base_addr
;
7239 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7240 dev_priv
->num_pipe
, dev_priv
->num_pipe
> 1 ? "s" : "");
7242 for (i
= 0; i
< dev_priv
->num_pipe
; i
++) {
7243 intel_crtc_init(dev
, i
);
7244 ret
= intel_plane_init(dev
, i
);
7246 DRM_DEBUG_KMS("plane %d init failed: %d\n", i
, ret
);
7249 intel_pch_pll_init(dev
);
7251 /* Just disable it once at startup */
7252 i915_disable_vga(dev
);
7253 intel_setup_outputs(dev
);
7255 INIT_WORK(&dev_priv
->idle_work
, intel_idle_update
);
7256 setup_timer(&dev_priv
->idle_timer
, intel_gpu_idle_timer
,
7257 (unsigned long)dev
);
7260 void intel_modeset_gem_init(struct drm_device
*dev
)
7262 intel_modeset_init_hw(dev
);
7264 intel_setup_overlay(dev
);
7267 void intel_modeset_cleanup(struct drm_device
*dev
)
7269 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7270 struct drm_crtc
*crtc
;
7271 struct intel_crtc
*intel_crtc
;
7273 drm_kms_helper_poll_fini(dev
);
7274 mutex_lock(&dev
->struct_mutex
);
7276 intel_unregister_dsm_handler();
7279 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7280 /* Skip inactive CRTCs */
7284 intel_crtc
= to_intel_crtc(crtc
);
7285 intel_increase_pllclock(crtc
);
7288 intel_disable_fbc(dev
);
7290 intel_disable_gt_powersave(dev
);
7292 ironlake_teardown_rc6(dev
);
7294 if (IS_VALLEYVIEW(dev
))
7297 mutex_unlock(&dev
->struct_mutex
);
7299 /* Disable the irq before mode object teardown, for the irq might
7300 * enqueue unpin/hotplug work. */
7301 drm_irq_uninstall(dev
);
7302 cancel_work_sync(&dev_priv
->hotplug_work
);
7303 cancel_work_sync(&dev_priv
->rps_work
);
7305 /* flush any delayed tasks or pending work */
7306 flush_scheduled_work();
7308 /* Shut off idle work before the crtcs get freed. */
7309 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7310 intel_crtc
= to_intel_crtc(crtc
);
7311 del_timer_sync(&intel_crtc
->idle_timer
);
7313 del_timer_sync(&dev_priv
->idle_timer
);
7314 cancel_work_sync(&dev_priv
->idle_work
);
7316 drm_mode_config_cleanup(dev
);
7320 * Return which encoder is currently attached for connector.
7322 struct drm_encoder
*intel_best_encoder(struct drm_connector
*connector
)
7324 return &intel_attached_encoder(connector
)->base
;
7327 void intel_connector_attach_encoder(struct intel_connector
*connector
,
7328 struct intel_encoder
*encoder
)
7330 connector
->encoder
= encoder
;
7331 drm_mode_connector_attach_encoder(&connector
->base
,
7336 * set vga decode state - true == enable VGA decode
7338 int intel_modeset_vga_set_state(struct drm_device
*dev
, bool state
)
7340 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7343 pci_read_config_word(dev_priv
->bridge_dev
, INTEL_GMCH_CTRL
, &gmch_ctrl
);
7345 gmch_ctrl
&= ~INTEL_GMCH_VGA_DISABLE
;
7347 gmch_ctrl
|= INTEL_GMCH_VGA_DISABLE
;
7348 pci_write_config_word(dev_priv
->bridge_dev
, INTEL_GMCH_CTRL
, gmch_ctrl
);
7352 #ifdef CONFIG_DEBUG_FS
7353 #include <linux/seq_file.h>
7355 struct intel_display_error_state
{
7356 struct intel_cursor_error_state
{
7363 struct intel_pipe_error_state
{
7375 struct intel_plane_error_state
{
7386 struct intel_display_error_state
*
7387 intel_display_capture_error_state(struct drm_device
*dev
)
7389 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7390 struct intel_display_error_state
*error
;
7393 error
= kmalloc(sizeof(*error
), GFP_ATOMIC
);
7397 for (i
= 0; i
< 2; i
++) {
7398 error
->cursor
[i
].control
= I915_READ(CURCNTR(i
));
7399 error
->cursor
[i
].position
= I915_READ(CURPOS(i
));
7400 error
->cursor
[i
].base
= I915_READ(CURBASE(i
));
7402 error
->plane
[i
].control
= I915_READ(DSPCNTR(i
));
7403 error
->plane
[i
].stride
= I915_READ(DSPSTRIDE(i
));
7404 error
->plane
[i
].size
= I915_READ(DSPSIZE(i
));
7405 error
->plane
[i
].pos
= I915_READ(DSPPOS(i
));
7406 error
->plane
[i
].addr
= I915_READ(DSPADDR(i
));
7407 if (INTEL_INFO(dev
)->gen
>= 4) {
7408 error
->plane
[i
].surface
= I915_READ(DSPSURF(i
));
7409 error
->plane
[i
].tile_offset
= I915_READ(DSPTILEOFF(i
));
7412 error
->pipe
[i
].conf
= I915_READ(PIPECONF(i
));
7413 error
->pipe
[i
].source
= I915_READ(PIPESRC(i
));
7414 error
->pipe
[i
].htotal
= I915_READ(HTOTAL(i
));
7415 error
->pipe
[i
].hblank
= I915_READ(HBLANK(i
));
7416 error
->pipe
[i
].hsync
= I915_READ(HSYNC(i
));
7417 error
->pipe
[i
].vtotal
= I915_READ(VTOTAL(i
));
7418 error
->pipe
[i
].vblank
= I915_READ(VBLANK(i
));
7419 error
->pipe
[i
].vsync
= I915_READ(VSYNC(i
));
7426 intel_display_print_error_state(struct seq_file
*m
,
7427 struct drm_device
*dev
,
7428 struct intel_display_error_state
*error
)
7432 for (i
= 0; i
< 2; i
++) {
7433 seq_printf(m
, "Pipe [%d]:\n", i
);
7434 seq_printf(m
, " CONF: %08x\n", error
->pipe
[i
].conf
);
7435 seq_printf(m
, " SRC: %08x\n", error
->pipe
[i
].source
);
7436 seq_printf(m
, " HTOTAL: %08x\n", error
->pipe
[i
].htotal
);
7437 seq_printf(m
, " HBLANK: %08x\n", error
->pipe
[i
].hblank
);
7438 seq_printf(m
, " HSYNC: %08x\n", error
->pipe
[i
].hsync
);
7439 seq_printf(m
, " VTOTAL: %08x\n", error
->pipe
[i
].vtotal
);
7440 seq_printf(m
, " VBLANK: %08x\n", error
->pipe
[i
].vblank
);
7441 seq_printf(m
, " VSYNC: %08x\n", error
->pipe
[i
].vsync
);
7443 seq_printf(m
, "Plane [%d]:\n", i
);
7444 seq_printf(m
, " CNTR: %08x\n", error
->plane
[i
].control
);
7445 seq_printf(m
, " STRIDE: %08x\n", error
->plane
[i
].stride
);
7446 seq_printf(m
, " SIZE: %08x\n", error
->plane
[i
].size
);
7447 seq_printf(m
, " POS: %08x\n", error
->plane
[i
].pos
);
7448 seq_printf(m
, " ADDR: %08x\n", error
->plane
[i
].addr
);
7449 if (INTEL_INFO(dev
)->gen
>= 4) {
7450 seq_printf(m
, " SURF: %08x\n", error
->plane
[i
].surface
);
7451 seq_printf(m
, " TILEOFF: %08x\n", error
->plane
[i
].tile_offset
);
7454 seq_printf(m
, "Cursor [%d]:\n", i
);
7455 seq_printf(m
, " CNTR: %08x\n", error
->cursor
[i
].control
);
7456 seq_printf(m
, " POS: %08x\n", error
->cursor
[i
].position
);
7457 seq_printf(m
, " BASE: %08x\n", error
->cursor
[i
].base
);