2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
28 #include <linux/device.h>
29 #include <linux/module.h>
30 #include <linux/stat.h>
31 #include <linux/sysfs.h>
32 #include "intel_drv.h"
36 static u32
calc_residency(struct drm_device
*dev
, const u32 reg
)
38 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
39 u64 raw_time
; /* 32b value may overflow during fixed point math */
41 if (!intel_enable_rc6(dev
))
44 raw_time
= I915_READ(reg
) * 128ULL;
45 return DIV_ROUND_UP_ULL(raw_time
, 100000);
49 show_rc6_mask(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
51 struct drm_minor
*dminor
= container_of(kdev
, struct drm_minor
, kdev
);
52 return snprintf(buf
, PAGE_SIZE
, "%x\n", intel_enable_rc6(dminor
->dev
));
56 show_rc6_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
58 struct drm_minor
*dminor
= container_of(kdev
, struct drm_minor
, kdev
);
59 u32 rc6_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6
);
60 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6_residency
);
64 show_rc6p_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
66 struct drm_minor
*dminor
= container_of(kdev
, struct drm_minor
, kdev
);
67 u32 rc6p_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6p
);
68 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6p_residency
);
72 show_rc6pp_ms(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
74 struct drm_minor
*dminor
= container_of(kdev
, struct drm_minor
, kdev
);
75 u32 rc6pp_residency
= calc_residency(dminor
->dev
, GEN6_GT_GFX_RC6pp
);
76 return snprintf(buf
, PAGE_SIZE
, "%u\n", rc6pp_residency
);
79 static DEVICE_ATTR(rc6_enable
, S_IRUGO
, show_rc6_mask
, NULL
);
80 static DEVICE_ATTR(rc6_residency_ms
, S_IRUGO
, show_rc6_ms
, NULL
);
81 static DEVICE_ATTR(rc6p_residency_ms
, S_IRUGO
, show_rc6p_ms
, NULL
);
82 static DEVICE_ATTR(rc6pp_residency_ms
, S_IRUGO
, show_rc6pp_ms
, NULL
);
84 static struct attribute
*rc6_attrs
[] = {
85 &dev_attr_rc6_enable
.attr
,
86 &dev_attr_rc6_residency_ms
.attr
,
87 &dev_attr_rc6p_residency_ms
.attr
,
88 &dev_attr_rc6pp_residency_ms
.attr
,
92 static struct attribute_group rc6_attr_group
= {
93 .name
= power_group_name
,
98 static int l3_access_valid(struct drm_device
*dev
, loff_t offset
)
100 if (!HAS_L3_GPU_CACHE(dev
))
106 if (offset
>= GEN7_L3LOG_SIZE
)
113 i915_l3_read(struct file
*filp
, struct kobject
*kobj
,
114 struct bin_attribute
*attr
, char *buf
,
115 loff_t offset
, size_t count
)
117 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
118 struct drm_minor
*dminor
= container_of(dev
, struct drm_minor
, kdev
);
119 struct drm_device
*drm_dev
= dminor
->dev
;
120 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
124 ret
= l3_access_valid(drm_dev
, offset
);
128 ret
= i915_mutex_lock_interruptible(drm_dev
);
132 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
133 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
135 for (i
= offset
; count
>= 4 && i
< GEN7_L3LOG_SIZE
; i
+= 4, count
-= 4)
136 *((uint32_t *)(&buf
[i
])) = I915_READ(GEN7_L3LOG_BASE
+ i
);
138 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
140 mutex_unlock(&drm_dev
->struct_mutex
);
146 i915_l3_write(struct file
*filp
, struct kobject
*kobj
,
147 struct bin_attribute
*attr
, char *buf
,
148 loff_t offset
, size_t count
)
150 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
151 struct drm_minor
*dminor
= container_of(dev
, struct drm_minor
, kdev
);
152 struct drm_device
*drm_dev
= dminor
->dev
;
153 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
154 u32
*temp
= NULL
; /* Just here to make handling failures easy */
157 ret
= l3_access_valid(drm_dev
, offset
);
161 ret
= i915_mutex_lock_interruptible(drm_dev
);
165 if (!dev_priv
->l3_parity
.remap_info
) {
166 temp
= kzalloc(GEN7_L3LOG_SIZE
, GFP_KERNEL
);
168 mutex_unlock(&drm_dev
->struct_mutex
);
173 ret
= i915_gpu_idle(drm_dev
);
176 mutex_unlock(&drm_dev
->struct_mutex
);
180 /* TODO: Ideally we really want a GPU reset here to make sure errors
181 * aren't propagated. Since I cannot find a stable way to reset the GPU
182 * at this point it is left as a TODO.
185 dev_priv
->l3_parity
.remap_info
= temp
;
187 memcpy(dev_priv
->l3_parity
.remap_info
+ (offset
/4),
191 i915_gem_l3_remap(drm_dev
);
193 mutex_unlock(&drm_dev
->struct_mutex
);
198 static struct bin_attribute dpf_attrs
= {
199 .attr
= {.name
= "l3_parity", .mode
= (S_IRUSR
| S_IWUSR
)},
200 .size
= GEN7_L3LOG_SIZE
,
201 .read
= i915_l3_read
,
202 .write
= i915_l3_write
,
206 static ssize_t
gt_cur_freq_mhz_show(struct device
*kdev
,
207 struct device_attribute
*attr
, char *buf
)
209 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
210 struct drm_device
*dev
= minor
->dev
;
211 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
214 mutex_lock(&dev_priv
->rps
.hw_lock
);
215 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
217 freq
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
218 ret
= vlv_gpu_freq(dev_priv
->mem_freq
, (freq
>> 8) & 0xff);
220 ret
= dev_priv
->rps
.cur_delay
* GT_FREQUENCY_MULTIPLIER
;
222 mutex_unlock(&dev_priv
->rps
.hw_lock
);
224 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
227 static ssize_t
vlv_rpe_freq_mhz_show(struct device
*kdev
,
228 struct device_attribute
*attr
, char *buf
)
230 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
231 struct drm_device
*dev
= minor
->dev
;
232 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
234 return snprintf(buf
, PAGE_SIZE
, "%d\n",
235 vlv_gpu_freq(dev_priv
->mem_freq
,
236 dev_priv
->rps
.rpe_delay
));
239 static ssize_t
gt_max_freq_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
241 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
242 struct drm_device
*dev
= minor
->dev
;
243 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
246 mutex_lock(&dev_priv
->rps
.hw_lock
);
247 if (IS_VALLEYVIEW(dev_priv
->dev
))
248 ret
= vlv_gpu_freq(dev_priv
->mem_freq
, dev_priv
->rps
.max_delay
);
250 ret
= dev_priv
->rps
.max_delay
* GT_FREQUENCY_MULTIPLIER
;
251 mutex_unlock(&dev_priv
->rps
.hw_lock
);
253 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
256 static ssize_t
gt_max_freq_mhz_store(struct device
*kdev
,
257 struct device_attribute
*attr
,
258 const char *buf
, size_t count
)
260 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
261 struct drm_device
*dev
= minor
->dev
;
262 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
263 u32 val
, rp_state_cap
, hw_max
, hw_min
, non_oc_max
;
266 ret
= kstrtou32(buf
, 0, &val
);
270 mutex_lock(&dev_priv
->rps
.hw_lock
);
272 if (IS_VALLEYVIEW(dev_priv
->dev
)) {
273 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
275 hw_max
= valleyview_rps_max_freq(dev_priv
);
276 hw_min
= valleyview_rps_min_freq(dev_priv
);
279 val
/= GT_FREQUENCY_MULTIPLIER
;
281 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
282 hw_max
= dev_priv
->rps
.hw_max
;
283 non_oc_max
= (rp_state_cap
& 0xff);
284 hw_min
= ((rp_state_cap
& 0xff0000) >> 16);
287 if (val
< hw_min
|| val
> hw_max
||
288 val
< dev_priv
->rps
.min_delay
) {
289 mutex_unlock(&dev_priv
->rps
.hw_lock
);
293 if (val
> non_oc_max
)
294 DRM_DEBUG("User requested overclocking to %d\n",
295 val
* GT_FREQUENCY_MULTIPLIER
);
297 if (dev_priv
->rps
.cur_delay
> val
) {
298 if (IS_VALLEYVIEW(dev_priv
->dev
))
299 valleyview_set_rps(dev_priv
->dev
, val
);
301 gen6_set_rps(dev_priv
->dev
, val
);
304 dev_priv
->rps
.max_delay
= val
;
306 mutex_unlock(&dev_priv
->rps
.hw_lock
);
311 static ssize_t
gt_min_freq_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
313 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
314 struct drm_device
*dev
= minor
->dev
;
315 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
318 mutex_lock(&dev_priv
->rps
.hw_lock
);
319 if (IS_VALLEYVIEW(dev_priv
->dev
))
320 ret
= vlv_gpu_freq(dev_priv
->mem_freq
, dev_priv
->rps
.min_delay
);
322 ret
= dev_priv
->rps
.min_delay
* GT_FREQUENCY_MULTIPLIER
;
323 mutex_unlock(&dev_priv
->rps
.hw_lock
);
325 return snprintf(buf
, PAGE_SIZE
, "%d\n", ret
);
328 static ssize_t
gt_min_freq_mhz_store(struct device
*kdev
,
329 struct device_attribute
*attr
,
330 const char *buf
, size_t count
)
332 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
333 struct drm_device
*dev
= minor
->dev
;
334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
335 u32 val
, rp_state_cap
, hw_max
, hw_min
;
338 ret
= kstrtou32(buf
, 0, &val
);
342 mutex_lock(&dev_priv
->rps
.hw_lock
);
344 if (IS_VALLEYVIEW(dev
)) {
345 val
= vlv_freq_opcode(dev_priv
->mem_freq
, val
);
347 hw_max
= valleyview_rps_max_freq(dev_priv
);
348 hw_min
= valleyview_rps_min_freq(dev_priv
);
350 val
/= GT_FREQUENCY_MULTIPLIER
;
352 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
353 hw_max
= dev_priv
->rps
.hw_max
;
354 hw_min
= ((rp_state_cap
& 0xff0000) >> 16);
357 if (val
< hw_min
|| val
> hw_max
|| val
> dev_priv
->rps
.max_delay
) {
358 mutex_unlock(&dev_priv
->rps
.hw_lock
);
362 if (dev_priv
->rps
.cur_delay
< val
) {
363 if (IS_VALLEYVIEW(dev
))
364 valleyview_set_rps(dev
, val
);
366 gen6_set_rps(dev_priv
->dev
, val
);
369 dev_priv
->rps
.min_delay
= val
;
371 mutex_unlock(&dev_priv
->rps
.hw_lock
);
377 static DEVICE_ATTR(gt_cur_freq_mhz
, S_IRUGO
, gt_cur_freq_mhz_show
, NULL
);
378 static DEVICE_ATTR(gt_max_freq_mhz
, S_IRUGO
| S_IWUSR
, gt_max_freq_mhz_show
, gt_max_freq_mhz_store
);
379 static DEVICE_ATTR(gt_min_freq_mhz
, S_IRUGO
| S_IWUSR
, gt_min_freq_mhz_show
, gt_min_freq_mhz_store
);
381 static DEVICE_ATTR(vlv_rpe_freq_mhz
, S_IRUGO
, vlv_rpe_freq_mhz_show
, NULL
);
383 static ssize_t
gt_rp_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
);
384 static DEVICE_ATTR(gt_RP0_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
385 static DEVICE_ATTR(gt_RP1_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
386 static DEVICE_ATTR(gt_RPn_freq_mhz
, S_IRUGO
, gt_rp_mhz_show
, NULL
);
388 /* For now we have a static number of RP states */
389 static ssize_t
gt_rp_mhz_show(struct device
*kdev
, struct device_attribute
*attr
, char *buf
)
391 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
392 struct drm_device
*dev
= minor
->dev
;
393 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
394 u32 val
, rp_state_cap
;
397 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
400 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
401 mutex_unlock(&dev
->struct_mutex
);
403 if (attr
== &dev_attr_gt_RP0_freq_mhz
) {
404 val
= ((rp_state_cap
& 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER
;
405 } else if (attr
== &dev_attr_gt_RP1_freq_mhz
) {
406 val
= ((rp_state_cap
& 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER
;
407 } else if (attr
== &dev_attr_gt_RPn_freq_mhz
) {
408 val
= ((rp_state_cap
& 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER
;
412 return snprintf(buf
, PAGE_SIZE
, "%d\n", val
);
415 static const struct attribute
*gen6_attrs
[] = {
416 &dev_attr_gt_cur_freq_mhz
.attr
,
417 &dev_attr_gt_max_freq_mhz
.attr
,
418 &dev_attr_gt_min_freq_mhz
.attr
,
419 &dev_attr_gt_RP0_freq_mhz
.attr
,
420 &dev_attr_gt_RP1_freq_mhz
.attr
,
421 &dev_attr_gt_RPn_freq_mhz
.attr
,
425 static const struct attribute
*vlv_attrs
[] = {
426 &dev_attr_gt_cur_freq_mhz
.attr
,
427 &dev_attr_gt_max_freq_mhz
.attr
,
428 &dev_attr_gt_min_freq_mhz
.attr
,
429 &dev_attr_vlv_rpe_freq_mhz
.attr
,
433 static ssize_t
error_state_read(struct file
*filp
, struct kobject
*kobj
,
434 struct bin_attribute
*attr
, char *buf
,
435 loff_t off
, size_t count
)
438 struct device
*kdev
= container_of(kobj
, struct device
, kobj
);
439 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
440 struct drm_device
*dev
= minor
->dev
;
441 struct i915_error_state_file_priv error_priv
;
442 struct drm_i915_error_state_buf error_str
;
443 ssize_t ret_count
= 0;
446 memset(&error_priv
, 0, sizeof(error_priv
));
448 ret
= i915_error_state_buf_init(&error_str
, count
, off
);
452 error_priv
.dev
= dev
;
453 i915_error_state_get(dev
, &error_priv
);
455 ret
= i915_error_state_to_str(&error_str
, &error_priv
);
459 ret_count
= count
< error_str
.bytes
? count
: error_str
.bytes
;
461 memcpy(buf
, error_str
.buf
, ret_count
);
463 i915_error_state_put(&error_priv
);
464 i915_error_state_buf_release(&error_str
);
466 return ret
?: ret_count
;
469 static ssize_t
error_state_write(struct file
*file
, struct kobject
*kobj
,
470 struct bin_attribute
*attr
, char *buf
,
471 loff_t off
, size_t count
)
473 struct device
*kdev
= container_of(kobj
, struct device
, kobj
);
474 struct drm_minor
*minor
= container_of(kdev
, struct drm_minor
, kdev
);
475 struct drm_device
*dev
= minor
->dev
;
478 DRM_DEBUG_DRIVER("Resetting error state\n");
480 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
484 i915_destroy_error_state(dev
);
485 mutex_unlock(&dev
->struct_mutex
);
490 static struct bin_attribute error_state_attr
= {
491 .attr
.name
= "error",
492 .attr
.mode
= S_IRUSR
| S_IWUSR
,
494 .read
= error_state_read
,
495 .write
= error_state_write
,
498 void i915_setup_sysfs(struct drm_device
*dev
)
503 if (INTEL_INFO(dev
)->gen
>= 6) {
504 ret
= sysfs_merge_group(&dev
->primary
->kdev
.kobj
,
507 DRM_ERROR("RC6 residency sysfs setup failed\n");
510 if (HAS_L3_GPU_CACHE(dev
)) {
511 ret
= device_create_bin_file(&dev
->primary
->kdev
, &dpf_attrs
);
513 DRM_ERROR("l3 parity sysfs setup failed\n");
517 if (IS_VALLEYVIEW(dev
))
518 ret
= sysfs_create_files(&dev
->primary
->kdev
.kobj
, vlv_attrs
);
519 else if (INTEL_INFO(dev
)->gen
>= 6)
520 ret
= sysfs_create_files(&dev
->primary
->kdev
.kobj
, gen6_attrs
);
522 DRM_ERROR("RPS sysfs setup failed\n");
524 ret
= sysfs_create_bin_file(&dev
->primary
->kdev
.kobj
,
527 DRM_ERROR("error_state sysfs setup failed\n");
530 void i915_teardown_sysfs(struct drm_device
*dev
)
532 sysfs_remove_bin_file(&dev
->primary
->kdev
.kobj
, &error_state_attr
);
533 if (IS_VALLEYVIEW(dev
))
534 sysfs_remove_files(&dev
->primary
->kdev
.kobj
, vlv_attrs
);
536 sysfs_remove_files(&dev
->primary
->kdev
.kobj
, gen6_attrs
);
537 device_remove_bin_file(&dev
->primary
->kdev
, &dpf_attrs
);
539 sysfs_unmerge_group(&dev
->primary
->kdev
.kobj
, &rc6_attr_group
);