2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #define PLATFORM_NAME(x) [INTEL_##x] = #x
28 static const char * const platform_names
[] = {
34 PLATFORM_NAME(I915GM
),
36 PLATFORM_NAME(I945GM
),
38 PLATFORM_NAME(PINEVIEW
),
40 PLATFORM_NAME(I965GM
),
43 PLATFORM_NAME(IRONLAKE
),
44 PLATFORM_NAME(SANDYBRIDGE
),
45 PLATFORM_NAME(IVYBRIDGE
),
46 PLATFORM_NAME(VALLEYVIEW
),
47 PLATFORM_NAME(HASWELL
),
48 PLATFORM_NAME(BROADWELL
),
49 PLATFORM_NAME(CHERRYVIEW
),
50 PLATFORM_NAME(SKYLAKE
),
51 PLATFORM_NAME(BROXTON
),
52 PLATFORM_NAME(KABYLAKE
),
53 PLATFORM_NAME(GEMINILAKE
),
57 const char *intel_platform_name(enum intel_platform platform
)
59 if (WARN_ON_ONCE(platform
>= ARRAY_SIZE(platform_names
) ||
60 platform_names
[platform
] == NULL
))
63 return platform_names
[platform
];
66 void intel_device_info_dump(struct drm_i915_private
*dev_priv
)
68 const struct intel_device_info
*info
= &dev_priv
->info
;
70 DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
71 intel_platform_name(info
->platform
),
73 dev_priv
->drm
.pdev
->device
,
74 dev_priv
->drm
.pdev
->revision
);
75 #define PRINT_FLAG(name) \
76 DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
77 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
);
81 static void cherryview_sseu_info_init(struct drm_i915_private
*dev_priv
)
83 struct sseu_dev_info
*sseu
= &mkwrite_device_info(dev_priv
)->sseu
;
86 fuse
= I915_READ(CHV_FUSE_GT
);
88 sseu
->slice_mask
= BIT(0);
90 if (!(fuse
& CHV_FGT_DISABLE_SS0
)) {
91 sseu
->subslice_mask
|= BIT(0);
92 eu_dis
= fuse
& (CHV_FGT_EU_DIS_SS0_R0_MASK
|
93 CHV_FGT_EU_DIS_SS0_R1_MASK
);
94 sseu
->eu_total
+= 8 - hweight32(eu_dis
);
97 if (!(fuse
& CHV_FGT_DISABLE_SS1
)) {
98 sseu
->subslice_mask
|= BIT(1);
99 eu_dis
= fuse
& (CHV_FGT_EU_DIS_SS1_R0_MASK
|
100 CHV_FGT_EU_DIS_SS1_R1_MASK
);
101 sseu
->eu_total
+= 8 - hweight32(eu_dis
);
105 * CHV expected to always have a uniform distribution of EU
108 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
109 sseu
->eu_total
/ sseu_subslice_total(sseu
) :
112 * CHV supports subslice power gating on devices with more than
113 * one subslice, and supports EU power gating on devices with
114 * more than one EU pair per subslice.
116 sseu
->has_slice_pg
= 0;
117 sseu
->has_subslice_pg
= sseu_subslice_total(sseu
) > 1;
118 sseu
->has_eu_pg
= (sseu
->eu_per_subslice
> 2);
121 static void gen9_sseu_info_init(struct drm_i915_private
*dev_priv
)
123 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
124 struct sseu_dev_info
*sseu
= &info
->sseu
;
125 int s_max
= 3, ss_max
= 4, eu_max
= 8;
127 u32 fuse2
, eu_disable
;
130 fuse2
= I915_READ(GEN8_FUSE2
);
131 sseu
->slice_mask
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
134 * The subslice disable field is global, i.e. it applies
135 * to each of the enabled slices.
137 sseu
->subslice_mask
= (1 << ss_max
) - 1;
138 sseu
->subslice_mask
&= ~((fuse2
& GEN9_F2_SS_DIS_MASK
) >>
139 GEN9_F2_SS_DIS_SHIFT
);
142 * Iterate through enabled slices and subslices to
143 * count the total enabled EU.
145 for (s
= 0; s
< s_max
; s
++) {
146 if (!(sseu
->slice_mask
& BIT(s
)))
147 /* skip disabled slice */
150 eu_disable
= I915_READ(GEN9_EU_DISABLE(s
));
151 for (ss
= 0; ss
< ss_max
; ss
++) {
154 if (!(sseu
->subslice_mask
& BIT(ss
)))
155 /* skip disabled subslice */
158 eu_per_ss
= eu_max
- hweight8((eu_disable
>> (ss
*8)) &
162 * Record which subslice(s) has(have) 7 EUs. we
163 * can tune the hash used to spread work among
164 * subslices if they are unbalanced.
167 sseu
->subslice_7eu
[s
] |= BIT(ss
);
169 sseu
->eu_total
+= eu_per_ss
;
174 * SKL is expected to always have a uniform distribution
175 * of EU across subslices with the exception that any one
176 * EU in any one subslice may be fused off for die
177 * recovery. BXT is expected to be perfectly uniform in EU
180 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
181 DIV_ROUND_UP(sseu
->eu_total
,
182 sseu_subslice_total(sseu
)) : 0;
184 * SKL supports slice power gating on devices with more than
185 * one slice, and supports EU power gating on devices with
186 * more than one EU pair per subslice. BXT supports subslice
187 * power gating on devices with more than one subslice, and
188 * supports EU power gating on devices with more than one EU
192 (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) &&
193 hweight8(sseu
->slice_mask
) > 1;
194 sseu
->has_subslice_pg
=
195 IS_GEN9_LP(dev_priv
) && sseu_subslice_total(sseu
) > 1;
196 sseu
->has_eu_pg
= sseu
->eu_per_subslice
> 2;
198 if (IS_BROXTON(dev_priv
)) {
199 #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
201 * There is a HW issue in 2x6 fused down parts that requires
202 * Pooled EU to be enabled as a WA. The pool configuration
203 * changes depending upon which subslice is fused down. This
204 * doesn't affect if the device has all 3 subslices enabled.
206 /* WaEnablePooledEuFor2x6:bxt */
207 info
->has_pooled_eu
= ((hweight8(sseu
->subslice_mask
) == 3) ||
208 (hweight8(sseu
->subslice_mask
) == 2 &&
209 INTEL_REVID(dev_priv
) < BXT_REVID_C0
));
211 sseu
->min_eu_in_pool
= 0;
212 if (info
->has_pooled_eu
) {
213 if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0))
214 sseu
->min_eu_in_pool
= 3;
215 else if (IS_SS_DISABLED(1))
216 sseu
->min_eu_in_pool
= 6;
218 sseu
->min_eu_in_pool
= 9;
220 #undef IS_SS_DISABLED
224 static void broadwell_sseu_info_init(struct drm_i915_private
*dev_priv
)
226 struct sseu_dev_info
*sseu
= &mkwrite_device_info(dev_priv
)->sseu
;
227 const int s_max
= 3, ss_max
= 3, eu_max
= 8;
229 u32 fuse2
, eu_disable
[3]; /* s_max */
231 fuse2
= I915_READ(GEN8_FUSE2
);
232 sseu
->slice_mask
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
234 * The subslice disable field is global, i.e. it applies
235 * to each of the enabled slices.
237 sseu
->subslice_mask
= BIT(ss_max
) - 1;
238 sseu
->subslice_mask
&= ~((fuse2
& GEN8_F2_SS_DIS_MASK
) >>
239 GEN8_F2_SS_DIS_SHIFT
);
241 eu_disable
[0] = I915_READ(GEN8_EU_DISABLE0
) & GEN8_EU_DIS0_S0_MASK
;
242 eu_disable
[1] = (I915_READ(GEN8_EU_DISABLE0
) >> GEN8_EU_DIS0_S1_SHIFT
) |
243 ((I915_READ(GEN8_EU_DISABLE1
) & GEN8_EU_DIS1_S1_MASK
) <<
244 (32 - GEN8_EU_DIS0_S1_SHIFT
));
245 eu_disable
[2] = (I915_READ(GEN8_EU_DISABLE1
) >> GEN8_EU_DIS1_S2_SHIFT
) |
246 ((I915_READ(GEN8_EU_DISABLE2
) & GEN8_EU_DIS2_S2_MASK
) <<
247 (32 - GEN8_EU_DIS1_S2_SHIFT
));
250 * Iterate through enabled slices and subslices to
251 * count the total enabled EU.
253 for (s
= 0; s
< s_max
; s
++) {
254 if (!(sseu
->slice_mask
& BIT(s
)))
255 /* skip disabled slice */
258 for (ss
= 0; ss
< ss_max
; ss
++) {
261 if (!(sseu
->subslice_mask
& BIT(ss
)))
262 /* skip disabled subslice */
265 n_disabled
= hweight8(eu_disable
[s
] >> (ss
* eu_max
));
268 * Record which subslices have 7 EUs.
270 if (eu_max
- n_disabled
== 7)
271 sseu
->subslice_7eu
[s
] |= 1 << ss
;
273 sseu
->eu_total
+= eu_max
- n_disabled
;
278 * BDW is expected to always have a uniform distribution of EU across
279 * subslices with the exception that any one EU in any one subslice may
280 * be fused off for die recovery.
282 sseu
->eu_per_subslice
= sseu_subslice_total(sseu
) ?
283 DIV_ROUND_UP(sseu
->eu_total
,
284 sseu_subslice_total(sseu
)) : 0;
287 * BDW supports slice power gating on devices with more than
290 sseu
->has_slice_pg
= hweight8(sseu
->slice_mask
) > 1;
291 sseu
->has_subslice_pg
= 0;
296 * Determine various intel_device_info fields at runtime.
298 * Use it when either:
299 * - it's judged too laborious to fill n static structures with the limit
300 * when a simple if statement does the job,
301 * - run-time checks (eg read fuse/strap registers) are needed.
303 * This function needs to be called:
304 * - after the MMIO has been setup as we are reading registers,
305 * - after the PCH has been detected,
306 * - before the first usage of the fields it can tweak.
308 void intel_device_info_runtime_init(struct drm_i915_private
*dev_priv
)
310 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
313 if (INTEL_GEN(dev_priv
) >= 9) {
314 info
->num_scalers
[PIPE_A
] = 2;
315 info
->num_scalers
[PIPE_B
] = 2;
316 info
->num_scalers
[PIPE_C
] = 1;
320 * Skylake and Broxton currently don't expose the topmost plane as its
321 * use is exclusive with the legacy cursor and we only want to expose
322 * one of those, not both. Until we can safely expose the topmost plane
323 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
324 * we don't expose the topmost plane at all to prevent ABI breakage
327 if (IS_GEMINILAKE(dev_priv
))
328 for_each_pipe(dev_priv
, pipe
)
329 info
->num_sprites
[pipe
] = 3;
330 else if (IS_BROXTON(dev_priv
)) {
331 info
->num_sprites
[PIPE_A
] = 2;
332 info
->num_sprites
[PIPE_B
] = 2;
333 info
->num_sprites
[PIPE_C
] = 1;
334 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
335 for_each_pipe(dev_priv
, pipe
)
336 info
->num_sprites
[pipe
] = 2;
337 } else if (INTEL_GEN(dev_priv
) >= 5) {
338 for_each_pipe(dev_priv
, pipe
)
339 info
->num_sprites
[pipe
] = 1;
342 if (i915
.disable_display
) {
343 DRM_INFO("Display disabled (module parameter)\n");
345 } else if (info
->num_pipes
> 0 &&
346 (IS_GEN7(dev_priv
) || IS_GEN8(dev_priv
)) &&
347 HAS_PCH_SPLIT(dev_priv
)) {
348 u32 fuse_strap
= I915_READ(FUSE_STRAP
);
349 u32 sfuse_strap
= I915_READ(SFUSE_STRAP
);
352 * SFUSE_STRAP is supposed to have a bit signalling the display
353 * is fused off. Unfortunately it seems that, at least in
354 * certain cases, fused off display means that PCH display
355 * reads don't land anywhere. In that case, we read 0s.
357 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
358 * should be set when taking over after the firmware.
360 if (fuse_strap
& ILK_INTERNAL_DISPLAY_DISABLE
||
361 sfuse_strap
& SFUSE_STRAP_DISPLAY_DISABLED
||
362 (dev_priv
->pch_type
== PCH_CPT
&&
363 !(sfuse_strap
& SFUSE_STRAP_FUSE_LOCK
))) {
364 DRM_INFO("Display fused off, disabling\n");
366 } else if (fuse_strap
& IVB_PIPE_C_DISABLE
) {
367 DRM_INFO("PipeC fused off\n");
368 info
->num_pipes
-= 1;
370 } else if (info
->num_pipes
> 0 && IS_GEN9(dev_priv
)) {
371 u32 dfsm
= I915_READ(SKL_DFSM
);
372 u8 disabled_mask
= 0;
376 if (dfsm
& SKL_DFSM_PIPE_A_DISABLE
)
377 disabled_mask
|= BIT(PIPE_A
);
378 if (dfsm
& SKL_DFSM_PIPE_B_DISABLE
)
379 disabled_mask
|= BIT(PIPE_B
);
380 if (dfsm
& SKL_DFSM_PIPE_C_DISABLE
)
381 disabled_mask
|= BIT(PIPE_C
);
383 num_bits
= hweight8(disabled_mask
);
385 switch (disabled_mask
) {
388 case BIT(PIPE_A
) | BIT(PIPE_B
):
389 case BIT(PIPE_A
) | BIT(PIPE_C
):
396 if (num_bits
> info
->num_pipes
|| invalid
)
397 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
400 info
->num_pipes
-= num_bits
;
403 /* Initialize slice/subslice/EU info */
404 if (IS_CHERRYVIEW(dev_priv
))
405 cherryview_sseu_info_init(dev_priv
);
406 else if (IS_BROADWELL(dev_priv
))
407 broadwell_sseu_info_init(dev_priv
);
408 else if (INTEL_INFO(dev_priv
)->gen
>= 9)
409 gen9_sseu_info_init(dev_priv
);
411 info
->has_snoop
= !info
->has_llc
;
413 /* Snooping is broken on BXT A stepping. */
414 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
))
415 info
->has_snoop
= false;
417 DRM_DEBUG_DRIVER("slice mask: %04x\n", info
->sseu
.slice_mask
);
418 DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info
->sseu
.slice_mask
));
419 DRM_DEBUG_DRIVER("subslice total: %u\n",
420 sseu_subslice_total(&info
->sseu
));
421 DRM_DEBUG_DRIVER("subslice mask %04x\n", info
->sseu
.subslice_mask
);
422 DRM_DEBUG_DRIVER("subslice per slice: %u\n",
423 hweight8(info
->sseu
.subslice_mask
));
424 DRM_DEBUG_DRIVER("EU total: %u\n", info
->sseu
.eu_total
);
425 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info
->sseu
.eu_per_subslice
);
426 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
427 info
->sseu
.has_slice_pg
? "y" : "n");
428 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
429 info
->sseu
.has_subslice_pg
? "y" : "n");
430 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
431 info
->sseu
.has_eu_pg
? "y" : "n");