drm/nouveau: consume the return of large GSP message
[drm/drm-misc.git] / drivers / accel / ivpu / ivpu_sysfs.c
blob97102feaf8ddca53a554c4cd884724e6c627d568
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Intel Corporation
4 */
6 #include <linux/device.h>
7 #include <linux/err.h>
9 #include "ivpu_drv.h"
10 #include "ivpu_gem.h"
11 #include "ivpu_fw.h"
12 #include "ivpu_hw.h"
13 #include "ivpu_sysfs.h"
15 /**
16 * DOC: npu_busy_time_us
18 * npu_busy_time_us is the time that the device spent executing jobs.
19 * The time is counted when and only when there are jobs submitted to firmware.
21 * This time can be used to measure the utilization of NPU, either by calculating
22 * npu_busy_time_us difference between two timepoints (i.e. measuring the time
23 * that the NPU was active during some workload) or monitoring utilization percentage
24 * by reading npu_busy_time_us periodically.
26 * When reading the value periodically, it shouldn't be read too often as it may have
27 * an impact on job submission performance. Recommended period is 1 second.
29 static ssize_t
30 npu_busy_time_us_show(struct device *dev, struct device_attribute *attr, char *buf)
32 struct drm_device *drm = dev_get_drvdata(dev);
33 struct ivpu_device *vdev = to_ivpu_device(drm);
34 ktime_t total, now = 0;
36 mutex_lock(&vdev->submitted_jobs_lock);
38 total = vdev->busy_time;
39 if (!xa_empty(&vdev->submitted_jobs_xa))
40 now = ktime_sub(ktime_get(), vdev->busy_start_ts);
41 mutex_unlock(&vdev->submitted_jobs_lock);
43 return sysfs_emit(buf, "%lld\n", ktime_to_us(ktime_add(total, now)));
46 static DEVICE_ATTR_RO(npu_busy_time_us);
48 /**
49 * DOC: npu_memory_utilization
51 * The npu_memory_utilization is used to report in bytes a current NPU memory utilization.
54 static ssize_t
55 npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, char *buf)
57 struct drm_device *drm = dev_get_drvdata(dev);
58 struct ivpu_device *vdev = to_ivpu_device(drm);
59 struct ivpu_bo *bo;
60 u64 total_npu_memory = 0;
62 mutex_lock(&vdev->bo_list_lock);
63 list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
64 total_npu_memory += bo->base.base.size;
65 mutex_unlock(&vdev->bo_list_lock);
67 return sysfs_emit(buf, "%lld\n", total_npu_memory);
70 static DEVICE_ATTR_RO(npu_memory_utilization);
72 /**
73 * DOC: sched_mode
75 * The sched_mode is used to report current NPU scheduling mode.
77 * It returns following strings:
78 * - "HW" - Hardware Scheduler mode
79 * - "OS" - Operating System Scheduler mode
82 static ssize_t
83 sched_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
85 struct drm_device *drm = dev_get_drvdata(dev);
86 struct ivpu_device *vdev = to_ivpu_device(drm);
88 return sysfs_emit(buf, "%s\n", vdev->fw->sched_mode ? "HW" : "OS");
91 static DEVICE_ATTR_RO(sched_mode);
93 static struct attribute *ivpu_dev_attrs[] = {
94 &dev_attr_npu_busy_time_us.attr,
95 &dev_attr_npu_memory_utilization.attr,
96 &dev_attr_sched_mode.attr,
97 NULL,
100 static struct attribute_group ivpu_dev_attr_group = {
101 .attrs = ivpu_dev_attrs,
104 void ivpu_sysfs_init(struct ivpu_device *vdev)
106 int ret;
108 ret = devm_device_add_group(vdev->drm.dev, &ivpu_dev_attr_group);
109 if (ret)
110 ivpu_warn(vdev, "Failed to add group to device, ret %d", ret);