2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "intel_device_info.h"
30 #define PLATFORM_NAME(x) [INTEL_##x] = #x
31 static const char * const platform_names
[] = {
37 PLATFORM_NAME(I915GM
),
39 PLATFORM_NAME(I945GM
),
41 PLATFORM_NAME(PINEVIEW
),
43 PLATFORM_NAME(I965GM
),
46 PLATFORM_NAME(IRONLAKE
),
47 PLATFORM_NAME(SANDYBRIDGE
),
48 PLATFORM_NAME(IVYBRIDGE
),
49 PLATFORM_NAME(VALLEYVIEW
),
50 PLATFORM_NAME(HASWELL
),
51 PLATFORM_NAME(BROADWELL
),
52 PLATFORM_NAME(CHERRYVIEW
),
53 PLATFORM_NAME(SKYLAKE
),
54 PLATFORM_NAME(BROXTON
),
55 PLATFORM_NAME(KABYLAKE
),
56 PLATFORM_NAME(GEMINILAKE
),
57 PLATFORM_NAME(COFFEELAKE
),
58 PLATFORM_NAME(CANNONLAKE
),
59 PLATFORM_NAME(ICELAKE
),
63 const char *intel_platform_name(enum intel_platform platform
)
65 BUILD_BUG_ON(ARRAY_SIZE(platform_names
) != INTEL_MAX_PLATFORMS
);
67 if (WARN_ON_ONCE(platform
>= ARRAY_SIZE(platform_names
) ||
68 platform_names
[platform
] == NULL
))
71 return platform_names
[platform
];
74 void intel_device_info_dump_flags(const struct intel_device_info
*info
,
75 struct drm_printer
*p
)
77 #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
78 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
);
82 static void sseu_dump(const struct sseu_dev_info
*sseu
, struct drm_printer
*p
)
86 drm_printf(p
, "slice total: %u, mask=%04x\n",
87 hweight8(sseu
->slice_mask
), sseu
->slice_mask
);
88 drm_printf(p
, "subslice total: %u\n", sseu_subslice_total(sseu
));
89 for (s
= 0; s
< sseu
->max_slices
; s
++) {
90 drm_printf(p
, "slice%d: %u subslices, mask=%04x\n",
91 s
, hweight8(sseu
->subslice_mask
[s
]),
92 sseu
->subslice_mask
[s
]);
94 drm_printf(p
, "EU total: %u\n", sseu
->eu_total
);
95 drm_printf(p
, "EU per subslice: %u\n", sseu
->eu_per_subslice
);
96 drm_printf(p
, "has slice power gating: %s\n",
97 yesno(sseu
->has_slice_pg
));
98 drm_printf(p
, "has subslice power gating: %s\n",
99 yesno(sseu
->has_subslice_pg
));
100 drm_printf(p
, "has EU power gating: %s\n", yesno(sseu
->has_eu_pg
));
103 void intel_device_info_dump_runtime(const struct intel_device_info
*info
,
104 struct drm_printer
*p
)
106 sseu_dump(&info
->sseu
, p
);
108 drm_printf(p
, "CS timestamp frequency: %u kHz\n",
109 info
->cs_timestamp_frequency_khz
);
112 void intel_device_info_dump(const struct intel_device_info
*info
,
113 struct drm_printer
*p
)
115 struct drm_i915_private
*dev_priv
=
116 container_of(info
, struct drm_i915_private
, info
);
118 drm_printf(p
, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
119 INTEL_DEVID(dev_priv
),
120 INTEL_REVID(dev_priv
),
121 intel_platform_name(info
->platform
),
124 intel_device_info_dump_flags(info
, p
);
127 void intel_device_info_dump_topology(const struct sseu_dev_info
*sseu
,
128 struct drm_printer
*p
)
132 if (sseu
->max_slices
== 0) {
133 drm_printf(p
, "Unavailable\n");
137 for (s
= 0; s
< sseu
->max_slices
; s
++) {
138 drm_printf(p
, "slice%d: %u subslice(s) (0x%hhx):\n",
139 s
, hweight8(sseu
->subslice_mask
[s
]),
140 sseu
->subslice_mask
[s
]);
142 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
143 u16 enabled_eus
= sseu_get_eus(sseu
, s
, ss
);
145 drm_printf(p
, "\tsubslice%d: %u EUs (0x%hx)\n",
146 ss
, hweight16(enabled_eus
), enabled_eus
);
151 static u16
compute_eu_total(const struct sseu_dev_info
*sseu
)
155 for (i
= 0; i
< ARRAY_SIZE(sseu
->eu_mask
); i
++)
156 total
+= hweight8(sseu
->eu_mask
[i
]);
161 static void gen11_sseu_info_init(struct drm_i915_private
*dev_priv
)
163 struct sseu_dev_info
*sseu
= &mkwrite_device_info(dev_priv
)->sseu
;
165 u32 ss_en
, ss_en_mask
;
169 sseu
->max_slices
= 1;
170 sseu
->max_subslices
= 8;
171 sseu
->max_eus_per_subslice
= 8;
173 s_en
= I915_READ(GEN11_GT_SLICE_ENABLE
) & GEN11_GT_S_ENA_MASK
;
174 ss_en
= ~I915_READ(GEN11_GT_SUBSLICE_DISABLE
);
175 ss_en_mask
= BIT(sseu
->max_subslices
) - 1;
176 eu_en
= ~(I915_READ(GEN11_EU_DISABLE
) & GEN11_EU_DIS_MASK
);
178 for (s
= 0; s
< sseu
->max_slices
; s
++) {
180 int ss_idx
= sseu
->max_subslices
* s
;
183 sseu
->slice_mask
|= BIT(s
);
184 sseu
->subslice_mask
[s
] = (ss_en
>> ss_idx
) & ss_en_mask
;
185 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
186 if (sseu
->subslice_mask
[s
] & BIT(ss
))
187 sseu_set_eus(sseu
, s
, ss
, eu_en
);
191 sseu
->eu_per_subslice
= hweight8(eu_en
);
192 sseu
->eu_total
= compute_eu_total(sseu
);
194 /* ICL has no power gating restrictions. */
195 sseu
->has_slice_pg
= 1;
196 sseu
->has_subslice_pg
= 1;
200 static void gen10_sseu_info_init(struct drm_i915_private
*dev_priv
)
202 struct sseu_dev_info
*sseu
= &mkwrite_device_info(dev_priv
)->sseu
;
203 const u32 fuse2
= I915_READ(GEN8_FUSE2
);
205 const int eu_mask
= 0xff;
206 u32 subslice_mask
, eu_en
;
208 sseu
->slice_mask
= (fuse2
& GEN10_F2_S_ENA_MASK
) >>
209 GEN10_F2_S_ENA_SHIFT
;
210 sseu
->max_slices
= 6;
211 sseu
->max_subslices
= 4;
212 sseu
->max_eus_per_subslice
= 8;
214 subslice_mask
= (1 << 4) - 1;
215 subslice_mask
&= ~((fuse2
& GEN10_F2_SS_DIS_MASK
) >>
216 GEN10_F2_SS_DIS_SHIFT
);
219 * Slice0 can have up to 3 subslices, but there are only 2 in
222 sseu
->subslice_mask
[0] = subslice_mask
;
223 for (s
= 1; s
< sseu
->max_slices
; s
++)
224 sseu
->subslice_mask
[s
] = subslice_mask
& 0x3;
227 eu_en
= ~I915_READ(GEN8_EU_DISABLE0
);
228 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++)
229 sseu_set_eus(sseu
, 0, ss
, (eu_en
>> (8 * ss
)) & eu_mask
);
231 sseu_set_eus(sseu
, 1, 0, (eu_en
>> 24) & eu_mask
);
232 eu_en
= ~I915_READ(GEN8_EU_DISABLE1
);
233 sseu_set_eus(sseu
, 1, 1, eu_en
& eu_mask
);
235 sseu_set_eus(sseu
, 2, 0, (eu_en
>> 8) & eu_mask
);
236 sseu_set_eus(sseu
, 2, 1, (eu_en
>> 16) & eu_mask
);
238 sseu_set_eus(sseu
, 3, 0, (eu_en
>> 24) & eu_mask
);
239 eu_en
= ~I915_READ(GEN8_EU_DISABLE2
);
240 sseu_set_eus(sseu
, 3, 1, eu_en
& eu_mask
);
242 sseu_set_eus(sseu
, 4, 0, (eu_en
>> 8) & eu_mask
);
243 sseu_set_eus(sseu
, 4, 1, (eu_en
>> 16) & eu_mask
);
245 sseu_set_eus(sseu
, 5, 0, (eu_en
>> 24) & eu_mask
);
246 eu_en
= ~I915_READ(GEN10_EU_DISABLE3
);
247 sseu_set_eus(sseu
, 5, 1, eu_en
& eu_mask
);
249 /* Do a second pass where we mark the subslices disabled if all their
252 for (s
= 0; s
< sseu
->max_slices
; s
++) {
253 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
254 if (sseu_get_eus(sseu
, s
, ss
) == 0)
255 sseu
->subslice_mask
[s
] &= ~BIT(ss
);
259 sseu
->eu_total
= compute_eu_total(sseu
);
262 * CNL is expected to always have a uniform distribution
263 * of EU across subslices with the exception that any one
264 * EU in any one subslice may be fused off for die
267 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
268 DIV_ROUND_UP(sseu
->eu_total
,
269 sseu_subslice_total(sseu
)) : 0;
271 /* No restrictions on Power Gating */
272 sseu
->has_slice_pg
= 1;
273 sseu
->has_subslice_pg
= 1;
277 static void cherryview_sseu_info_init(struct drm_i915_private
*dev_priv
)
279 struct sseu_dev_info
*sseu
= &mkwrite_device_info(dev_priv
)->sseu
;
282 fuse
= I915_READ(CHV_FUSE_GT
);
284 sseu
->slice_mask
= BIT(0);
285 sseu
->max_slices
= 1;
286 sseu
->max_subslices
= 2;
287 sseu
->max_eus_per_subslice
= 8;
289 if (!(fuse
& CHV_FGT_DISABLE_SS0
)) {
291 ((fuse
& CHV_FGT_EU_DIS_SS0_R0_MASK
) >>
292 CHV_FGT_EU_DIS_SS0_R0_SHIFT
) |
293 (((fuse
& CHV_FGT_EU_DIS_SS0_R1_MASK
) >>
294 CHV_FGT_EU_DIS_SS0_R1_SHIFT
) << 4);
296 sseu
->subslice_mask
[0] |= BIT(0);
297 sseu_set_eus(sseu
, 0, 0, ~disabled_mask
);
300 if (!(fuse
& CHV_FGT_DISABLE_SS1
)) {
302 ((fuse
& CHV_FGT_EU_DIS_SS1_R0_MASK
) >>
303 CHV_FGT_EU_DIS_SS1_R0_SHIFT
) |
304 (((fuse
& CHV_FGT_EU_DIS_SS1_R1_MASK
) >>
305 CHV_FGT_EU_DIS_SS1_R1_SHIFT
) << 4);
307 sseu
->subslice_mask
[0] |= BIT(1);
308 sseu_set_eus(sseu
, 0, 1, ~disabled_mask
);
311 sseu
->eu_total
= compute_eu_total(sseu
);
314 * CHV expected to always have a uniform distribution of EU
317 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
318 sseu
->eu_total
/ sseu_subslice_total(sseu
) :
321 * CHV supports subslice power gating on devices with more than
322 * one subslice, and supports EU power gating on devices with
323 * more than one EU pair per subslice.
325 sseu
->has_slice_pg
= 0;
326 sseu
->has_subslice_pg
= sseu_subslice_total(sseu
) > 1;
327 sseu
->has_eu_pg
= (sseu
->eu_per_subslice
> 2);
330 static void gen9_sseu_info_init(struct drm_i915_private
*dev_priv
)
332 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
333 struct sseu_dev_info
*sseu
= &info
->sseu
;
335 u32 fuse2
, eu_disable
, subslice_mask
;
336 const u8 eu_mask
= 0xff;
338 fuse2
= I915_READ(GEN8_FUSE2
);
339 sseu
->slice_mask
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
341 /* BXT has a single slice and at most 3 subslices. */
342 sseu
->max_slices
= IS_GEN9_LP(dev_priv
) ? 1 : 3;
343 sseu
->max_subslices
= IS_GEN9_LP(dev_priv
) ? 3 : 4;
344 sseu
->max_eus_per_subslice
= 8;
347 * The subslice disable field is global, i.e. it applies
348 * to each of the enabled slices.
350 subslice_mask
= (1 << sseu
->max_subslices
) - 1;
351 subslice_mask
&= ~((fuse2
& GEN9_F2_SS_DIS_MASK
) >>
352 GEN9_F2_SS_DIS_SHIFT
);
355 * Iterate through enabled slices and subslices to
356 * count the total enabled EU.
358 for (s
= 0; s
< sseu
->max_slices
; s
++) {
359 if (!(sseu
->slice_mask
& BIT(s
)))
360 /* skip disabled slice */
363 sseu
->subslice_mask
[s
] = subslice_mask
;
365 eu_disable
= I915_READ(GEN9_EU_DISABLE(s
));
366 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
370 if (!(sseu
->subslice_mask
[s
] & BIT(ss
)))
371 /* skip disabled subslice */
374 eu_disabled_mask
= (eu_disable
>> (ss
* 8)) & eu_mask
;
376 sseu_set_eus(sseu
, s
, ss
, ~eu_disabled_mask
);
378 eu_per_ss
= sseu
->max_eus_per_subslice
-
379 hweight8(eu_disabled_mask
);
382 * Record which subslice(s) has(have) 7 EUs. we
383 * can tune the hash used to spread work among
384 * subslices if they are unbalanced.
387 sseu
->subslice_7eu
[s
] |= BIT(ss
);
391 sseu
->eu_total
= compute_eu_total(sseu
);
394 * SKL is expected to always have a uniform distribution
395 * of EU across subslices with the exception that any one
396 * EU in any one subslice may be fused off for die
397 * recovery. BXT is expected to be perfectly uniform in EU
400 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
401 DIV_ROUND_UP(sseu
->eu_total
,
402 sseu_subslice_total(sseu
)) : 0;
404 * SKL+ supports slice power gating on devices with more than
405 * one slice, and supports EU power gating on devices with
406 * more than one EU pair per subslice. BXT+ supports subslice
407 * power gating on devices with more than one subslice, and
408 * supports EU power gating on devices with more than one EU
412 !IS_GEN9_LP(dev_priv
) && hweight8(sseu
->slice_mask
) > 1;
413 sseu
->has_subslice_pg
=
414 IS_GEN9_LP(dev_priv
) && sseu_subslice_total(sseu
) > 1;
415 sseu
->has_eu_pg
= sseu
->eu_per_subslice
> 2;
417 if (IS_GEN9_LP(dev_priv
)) {
418 #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss)))
419 info
->has_pooled_eu
= hweight8(sseu
->subslice_mask
[0]) == 3;
421 sseu
->min_eu_in_pool
= 0;
422 if (info
->has_pooled_eu
) {
423 if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
424 sseu
->min_eu_in_pool
= 3;
425 else if (IS_SS_DISABLED(1))
426 sseu
->min_eu_in_pool
= 6;
428 sseu
->min_eu_in_pool
= 9;
430 #undef IS_SS_DISABLED
434 static void broadwell_sseu_info_init(struct drm_i915_private
*dev_priv
)
436 struct sseu_dev_info
*sseu
= &mkwrite_device_info(dev_priv
)->sseu
;
438 u32 fuse2
, subslice_mask
, eu_disable
[3]; /* s_max */
440 fuse2
= I915_READ(GEN8_FUSE2
);
441 sseu
->slice_mask
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
442 sseu
->max_slices
= 3;
443 sseu
->max_subslices
= 3;
444 sseu
->max_eus_per_subslice
= 8;
447 * The subslice disable field is global, i.e. it applies
448 * to each of the enabled slices.
450 subslice_mask
= GENMASK(sseu
->max_subslices
- 1, 0);
451 subslice_mask
&= ~((fuse2
& GEN8_F2_SS_DIS_MASK
) >>
452 GEN8_F2_SS_DIS_SHIFT
);
454 eu_disable
[0] = I915_READ(GEN8_EU_DISABLE0
) & GEN8_EU_DIS0_S0_MASK
;
455 eu_disable
[1] = (I915_READ(GEN8_EU_DISABLE0
) >> GEN8_EU_DIS0_S1_SHIFT
) |
456 ((I915_READ(GEN8_EU_DISABLE1
) & GEN8_EU_DIS1_S1_MASK
) <<
457 (32 - GEN8_EU_DIS0_S1_SHIFT
));
458 eu_disable
[2] = (I915_READ(GEN8_EU_DISABLE1
) >> GEN8_EU_DIS1_S2_SHIFT
) |
459 ((I915_READ(GEN8_EU_DISABLE2
) & GEN8_EU_DIS2_S2_MASK
) <<
460 (32 - GEN8_EU_DIS1_S2_SHIFT
));
463 * Iterate through enabled slices and subslices to
464 * count the total enabled EU.
466 for (s
= 0; s
< sseu
->max_slices
; s
++) {
467 if (!(sseu
->slice_mask
& BIT(s
)))
468 /* skip disabled slice */
471 sseu
->subslice_mask
[s
] = subslice_mask
;
473 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
477 if (!(sseu
->subslice_mask
[s
] & BIT(ss
)))
478 /* skip disabled subslice */
482 eu_disable
[s
] >> (ss
* sseu
->max_eus_per_subslice
);
484 sseu_set_eus(sseu
, s
, ss
, ~eu_disabled_mask
);
486 n_disabled
= hweight8(eu_disabled_mask
);
489 * Record which subslices have 7 EUs.
491 if (sseu
->max_eus_per_subslice
- n_disabled
== 7)
492 sseu
->subslice_7eu
[s
] |= 1 << ss
;
496 sseu
->eu_total
= compute_eu_total(sseu
);
499 * BDW is expected to always have a uniform distribution of EU across
500 * subslices with the exception that any one EU in any one subslice may
501 * be fused off for die recovery.
503 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
504 DIV_ROUND_UP(sseu
->eu_total
,
505 sseu_subslice_total(sseu
)) : 0;
508 * BDW supports slice power gating on devices with more than
511 sseu
->has_slice_pg
= hweight8(sseu
->slice_mask
) > 1;
512 sseu
->has_subslice_pg
= 0;
516 static void haswell_sseu_info_init(struct drm_i915_private
*dev_priv
)
518 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
519 struct sseu_dev_info
*sseu
= &info
->sseu
;
524 * There isn't a register to tell us how many slices/subslices. We
525 * work off the PCI-ids here.
529 MISSING_CASE(info
->gt
);
532 sseu
->slice_mask
= BIT(0);
533 sseu
->subslice_mask
[0] = BIT(0);
536 sseu
->slice_mask
= BIT(0);
537 sseu
->subslice_mask
[0] = BIT(0) | BIT(1);
540 sseu
->slice_mask
= BIT(0) | BIT(1);
541 sseu
->subslice_mask
[0] = BIT(0) | BIT(1);
542 sseu
->subslice_mask
[1] = BIT(0) | BIT(1);
546 sseu
->max_slices
= hweight8(sseu
->slice_mask
);
547 sseu
->max_subslices
= hweight8(sseu
->subslice_mask
[0]);
549 fuse1
= I915_READ(HSW_PAVP_FUSE1
);
550 switch ((fuse1
& HSW_F1_EU_DIS_MASK
) >> HSW_F1_EU_DIS_SHIFT
) {
552 MISSING_CASE((fuse1
& HSW_F1_EU_DIS_MASK
) >>
553 HSW_F1_EU_DIS_SHIFT
);
555 case HSW_F1_EU_DIS_10EUS
:
556 sseu
->eu_per_subslice
= 10;
558 case HSW_F1_EU_DIS_8EUS
:
559 sseu
->eu_per_subslice
= 8;
561 case HSW_F1_EU_DIS_6EUS
:
562 sseu
->eu_per_subslice
= 6;
565 sseu
->max_eus_per_subslice
= sseu
->eu_per_subslice
;
567 for (s
= 0; s
< sseu
->max_slices
; s
++) {
568 for (ss
= 0; ss
< sseu
->max_subslices
; ss
++) {
569 sseu_set_eus(sseu
, s
, ss
,
570 (1UL << sseu
->eu_per_subslice
) - 1);
574 sseu
->eu_total
= compute_eu_total(sseu
);
576 /* No powergating for you. */
577 sseu
->has_slice_pg
= 0;
578 sseu
->has_subslice_pg
= 0;
582 static u32
read_reference_ts_freq(struct drm_i915_private
*dev_priv
)
584 u32 ts_override
= I915_READ(GEN9_TIMESTAMP_OVERRIDE
);
585 u32 base_freq
, frac_freq
;
587 base_freq
= ((ts_override
& GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK
) >>
588 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT
) + 1;
591 frac_freq
= ((ts_override
&
592 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK
) >>
593 GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT
);
594 frac_freq
= 1000 / (frac_freq
+ 1);
596 return base_freq
+ frac_freq
;
599 static u32
gen10_get_crystal_clock_freq(struct drm_i915_private
*dev_priv
,
602 u32 f19_2_mhz
= 19200;
604 u32 crystal_clock
= (rpm_config_reg
&
605 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK
) >>
606 GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT
;
608 switch (crystal_clock
) {
609 case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ
:
611 case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ
:
614 MISSING_CASE(crystal_clock
);
619 static u32
gen11_get_crystal_clock_freq(struct drm_i915_private
*dev_priv
,
622 u32 f19_2_mhz
= 19200;
625 u32 f38_4_mhz
= 38400;
626 u32 crystal_clock
= (rpm_config_reg
&
627 GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK
) >>
628 GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT
;
630 switch (crystal_clock
) {
631 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ
:
633 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ
:
635 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ
:
637 case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ
:
640 MISSING_CASE(crystal_clock
);
645 static u32
read_timestamp_frequency(struct drm_i915_private
*dev_priv
)
647 u32 f12_5_mhz
= 12500;
648 u32 f19_2_mhz
= 19200;
651 if (INTEL_GEN(dev_priv
) <= 4) {
654 * "The value in this register increments once every 16
655 * hclks." (through the “Clocking Configuration”
656 * (“CLKCFG”) MCHBAR register)
658 return dev_priv
->rawclk_freq
/ 16;
659 } else if (INTEL_GEN(dev_priv
) <= 8) {
662 * "The PCU TSC counts 10ns increments; this timestamp
663 * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
664 * rolling over every 1.5 hours).
667 } else if (INTEL_GEN(dev_priv
) <= 9) {
668 u32 ctc_reg
= I915_READ(CTC_MODE
);
671 if ((ctc_reg
& CTC_SOURCE_PARAMETER_MASK
) == CTC_SOURCE_DIVIDE_LOGIC
) {
672 freq
= read_reference_ts_freq(dev_priv
);
674 freq
= IS_GEN9_LP(dev_priv
) ? f19_2_mhz
: f24_mhz
;
676 /* Now figure out how the command stream's timestamp
677 * register increments from this frequency (it might
678 * increment only every few clock cycle).
680 freq
>>= 3 - ((ctc_reg
& CTC_SHIFT_PARAMETER_MASK
) >>
681 CTC_SHIFT_PARAMETER_SHIFT
);
685 } else if (INTEL_GEN(dev_priv
) <= 11) {
686 u32 ctc_reg
= I915_READ(CTC_MODE
);
689 /* First figure out the reference frequency. There are 2 ways
690 * we can compute the frequency, either through the
691 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
692 * tells us which one we should use.
694 if ((ctc_reg
& CTC_SOURCE_PARAMETER_MASK
) == CTC_SOURCE_DIVIDE_LOGIC
) {
695 freq
= read_reference_ts_freq(dev_priv
);
697 u32 rpm_config_reg
= I915_READ(RPM_CONFIG0
);
699 if (INTEL_GEN(dev_priv
) <= 10)
700 freq
= gen10_get_crystal_clock_freq(dev_priv
,
703 freq
= gen11_get_crystal_clock_freq(dev_priv
,
706 /* Now figure out how the command stream's timestamp
707 * register increments from this frequency (it might
708 * increment only every few clock cycle).
710 freq
>>= 3 - ((rpm_config_reg
&
711 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK
) >>
712 GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT
);
718 MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
723 * intel_device_info_runtime_init - initialize runtime info
724 * @info: intel device info struct
726 * Determine various intel_device_info fields at runtime.
728 * Use it when either:
729 * - it's judged too laborious to fill n static structures with the limit
730 * when a simple if statement does the job,
731 * - run-time checks (eg read fuse/strap registers) are needed.
733 * This function needs to be called:
734 * - after the MMIO has been setup as we are reading registers,
735 * - after the PCH has been detected,
736 * - before the first usage of the fields it can tweak.
738 void intel_device_info_runtime_init(struct intel_device_info
*info
)
740 struct drm_i915_private
*dev_priv
=
741 container_of(info
, struct drm_i915_private
, info
);
744 if (INTEL_GEN(dev_priv
) >= 10) {
745 for_each_pipe(dev_priv
, pipe
)
746 info
->num_scalers
[pipe
] = 2;
747 } else if (INTEL_GEN(dev_priv
) == 9) {
748 info
->num_scalers
[PIPE_A
] = 2;
749 info
->num_scalers
[PIPE_B
] = 2;
750 info
->num_scalers
[PIPE_C
] = 1;
753 BUILD_BUG_ON(I915_NUM_ENGINES
>
754 sizeof(intel_ring_mask_t
) * BITS_PER_BYTE
);
757 * Skylake and Broxton currently don't expose the topmost plane as its
758 * use is exclusive with the legacy cursor and we only want to expose
759 * one of those, not both. Until we can safely expose the topmost plane
760 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
761 * we don't expose the topmost plane at all to prevent ABI breakage
764 if (IS_GEN10(dev_priv
) || IS_GEMINILAKE(dev_priv
))
765 for_each_pipe(dev_priv
, pipe
)
766 info
->num_sprites
[pipe
] = 3;
767 else if (IS_BROXTON(dev_priv
)) {
768 info
->num_sprites
[PIPE_A
] = 2;
769 info
->num_sprites
[PIPE_B
] = 2;
770 info
->num_sprites
[PIPE_C
] = 1;
771 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
772 for_each_pipe(dev_priv
, pipe
)
773 info
->num_sprites
[pipe
] = 2;
774 } else if (INTEL_GEN(dev_priv
) >= 5 || IS_G4X(dev_priv
)) {
775 for_each_pipe(dev_priv
, pipe
)
776 info
->num_sprites
[pipe
] = 1;
779 if (i915_modparams
.disable_display
) {
780 DRM_INFO("Display disabled (module parameter)\n");
782 } else if (info
->num_pipes
> 0 &&
783 (IS_GEN7(dev_priv
) || IS_GEN8(dev_priv
)) &&
784 HAS_PCH_SPLIT(dev_priv
)) {
785 u32 fuse_strap
= I915_READ(FUSE_STRAP
);
786 u32 sfuse_strap
= I915_READ(SFUSE_STRAP
);
789 * SFUSE_STRAP is supposed to have a bit signalling the display
790 * is fused off. Unfortunately it seems that, at least in
791 * certain cases, fused off display means that PCH display
792 * reads don't land anywhere. In that case, we read 0s.
794 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
795 * should be set when taking over after the firmware.
797 if (fuse_strap
& ILK_INTERNAL_DISPLAY_DISABLE
||
798 sfuse_strap
& SFUSE_STRAP_DISPLAY_DISABLED
||
799 (HAS_PCH_CPT(dev_priv
) &&
800 !(sfuse_strap
& SFUSE_STRAP_FUSE_LOCK
))) {
801 DRM_INFO("Display fused off, disabling\n");
803 } else if (fuse_strap
& IVB_PIPE_C_DISABLE
) {
804 DRM_INFO("PipeC fused off\n");
805 info
->num_pipes
-= 1;
807 } else if (info
->num_pipes
> 0 && IS_GEN9(dev_priv
)) {
808 u32 dfsm
= I915_READ(SKL_DFSM
);
809 u8 disabled_mask
= 0;
813 if (dfsm
& SKL_DFSM_PIPE_A_DISABLE
)
814 disabled_mask
|= BIT(PIPE_A
);
815 if (dfsm
& SKL_DFSM_PIPE_B_DISABLE
)
816 disabled_mask
|= BIT(PIPE_B
);
817 if (dfsm
& SKL_DFSM_PIPE_C_DISABLE
)
818 disabled_mask
|= BIT(PIPE_C
);
820 num_bits
= hweight8(disabled_mask
);
822 switch (disabled_mask
) {
825 case BIT(PIPE_A
) | BIT(PIPE_B
):
826 case BIT(PIPE_A
) | BIT(PIPE_C
):
833 if (num_bits
> info
->num_pipes
|| invalid
)
834 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
837 info
->num_pipes
-= num_bits
;
840 /* Initialize slice/subslice/EU info */
841 if (IS_HASWELL(dev_priv
))
842 haswell_sseu_info_init(dev_priv
);
843 else if (IS_CHERRYVIEW(dev_priv
))
844 cherryview_sseu_info_init(dev_priv
);
845 else if (IS_BROADWELL(dev_priv
))
846 broadwell_sseu_info_init(dev_priv
);
847 else if (INTEL_GEN(dev_priv
) == 9)
848 gen9_sseu_info_init(dev_priv
);
849 else if (INTEL_GEN(dev_priv
) == 10)
850 gen10_sseu_info_init(dev_priv
);
851 else if (INTEL_GEN(dev_priv
) >= 11)
852 gen11_sseu_info_init(dev_priv
);
854 /* Initialize command stream timestamp frequency */
855 info
->cs_timestamp_frequency_khz
= read_timestamp_frequency(dev_priv
);
858 void intel_driver_caps_print(const struct intel_driver_caps
*caps
,
859 struct drm_printer
*p
)
861 drm_printf(p
, "Has logical contexts? %s\n",
862 yesno(caps
->has_logical_contexts
));
863 drm_printf(p
, "scheduler: %x\n", caps
->scheduler
);
867 * Determine which engines are fused off in our particular hardware. Since the
868 * fuse register is in the blitter powerwell, we need forcewake to be ready at
869 * this point (but later we need to prune the forcewake domains for engines that
870 * are indeed fused off).
872 void intel_device_info_init_mmio(struct drm_i915_private
*dev_priv
)
874 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
875 u8 vdbox_disable
, vebox_disable
;
879 if (INTEL_GEN(dev_priv
) < 11)
882 media_fuse
= I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE
);
884 vdbox_disable
= media_fuse
& GEN11_GT_VDBOX_DISABLE_MASK
;
885 vebox_disable
= (media_fuse
& GEN11_GT_VEBOX_DISABLE_MASK
) >>
886 GEN11_GT_VEBOX_DISABLE_SHIFT
;
888 DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable
);
889 for (i
= 0; i
< I915_MAX_VCS
; i
++) {
890 if (!HAS_ENGINE(dev_priv
, _VCS(i
)))
893 if (!(BIT(i
) & vdbox_disable
))
896 info
->ring_mask
&= ~ENGINE_MASK(_VCS(i
));
897 DRM_DEBUG_DRIVER("vcs%u fused off\n", i
);
900 DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable
);
901 for (i
= 0; i
< I915_MAX_VECS
; i
++) {
902 if (!HAS_ENGINE(dev_priv
, _VECS(i
)))
905 if (!(BIT(i
) & vebox_disable
))
908 info
->ring_mask
&= ~ENGINE_MASK(_VECS(i
));
909 DRM_DEBUG_DRIVER("vecs%u fused off\n", i
);