Convert trailing spaces and periods in path components
[linux/fpc-iii.git] / drivers / fpga / dfl-afu-error.c
blobc1467ae1a6b636b60c287ceb5f397e7fc0d79f9b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
5 * Copyright 2019 Intel Corporation, Inc.
7 * Authors:
8 * Wu Hao <hao.wu@linux.intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Mitchel Henry <henry.mitchel@intel.com>
17 #include <linux/uaccess.h>
19 #include "dfl-afu.h"
21 #define PORT_ERROR_MASK 0x8
22 #define PORT_ERROR 0x10
23 #define PORT_FIRST_ERROR 0x18
24 #define PORT_MALFORMED_REQ0 0x20
25 #define PORT_MALFORMED_REQ1 0x28
27 #define ERROR_MASK GENMASK_ULL(63, 0)
29 /* mask or unmask port errors by the error mask register. */
30 static void __afu_port_err_mask(struct device *dev, bool mask)
32 void __iomem *base;
34 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
36 writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
39 static void afu_port_err_mask(struct device *dev, bool mask)
41 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
43 mutex_lock(&pdata->lock);
44 __afu_port_err_mask(dev, mask);
45 mutex_unlock(&pdata->lock);
48 /* clear port errors. */
49 static int afu_port_err_clear(struct device *dev, u64 err)
51 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
52 struct platform_device *pdev = to_platform_device(dev);
53 void __iomem *base_err, *base_hdr;
54 int ret = -EBUSY;
55 u64 v;
57 base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
58 base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
60 mutex_lock(&pdata->lock);
63 * clear Port Errors
65 * - Check for AP6 State
66 * - Halt Port by keeping Port in reset
67 * - Set PORT Error mask to all 1 to mask errors
68 * - Clear all errors
69 * - Set Port mask to all 0 to enable errors
70 * - All errors start capturing new errors
71 * - Enable Port by pulling the port out of reset
74 /* if device is still in AP6 power state, can not clear any error. */
75 v = readq(base_hdr + PORT_HDR_STS);
76 if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
77 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
78 goto done;
81 /* Halt Port by keeping Port in reset */
82 ret = __afu_port_disable(pdev);
83 if (ret)
84 goto done;
86 /* Mask all errors */
87 __afu_port_err_mask(dev, true);
89 /* Clear errors if err input matches with current port errors.*/
90 v = readq(base_err + PORT_ERROR);
92 if (v == err) {
93 writeq(v, base_err + PORT_ERROR);
95 v = readq(base_err + PORT_FIRST_ERROR);
96 writeq(v, base_err + PORT_FIRST_ERROR);
97 } else {
98 ret = -EINVAL;
101 /* Clear mask */
102 __afu_port_err_mask(dev, false);
104 /* Enable the Port by clear the reset */
105 __afu_port_enable(pdev);
107 done:
108 mutex_unlock(&pdata->lock);
109 return ret;
112 static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
113 char *buf)
115 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
116 void __iomem *base;
117 u64 error;
119 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
121 mutex_lock(&pdata->lock);
122 error = readq(base + PORT_ERROR);
123 mutex_unlock(&pdata->lock);
125 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
128 static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
129 const char *buff, size_t count)
131 u64 value;
132 int ret;
134 if (kstrtou64(buff, 0, &value))
135 return -EINVAL;
137 ret = afu_port_err_clear(dev, value);
139 return ret ? ret : count;
141 static DEVICE_ATTR_RW(errors);
143 static ssize_t first_error_show(struct device *dev,
144 struct device_attribute *attr, char *buf)
146 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
147 void __iomem *base;
148 u64 error;
150 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
152 mutex_lock(&pdata->lock);
153 error = readq(base + PORT_FIRST_ERROR);
154 mutex_unlock(&pdata->lock);
156 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
158 static DEVICE_ATTR_RO(first_error);
160 static ssize_t first_malformed_req_show(struct device *dev,
161 struct device_attribute *attr,
162 char *buf)
164 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
165 void __iomem *base;
166 u64 req0, req1;
168 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
170 mutex_lock(&pdata->lock);
171 req0 = readq(base + PORT_MALFORMED_REQ0);
172 req1 = readq(base + PORT_MALFORMED_REQ1);
173 mutex_unlock(&pdata->lock);
175 return sprintf(buf, "0x%016llx%016llx\n",
176 (unsigned long long)req1, (unsigned long long)req0);
178 static DEVICE_ATTR_RO(first_malformed_req);
180 static struct attribute *port_err_attrs[] = {
181 &dev_attr_errors.attr,
182 &dev_attr_first_error.attr,
183 &dev_attr_first_malformed_req.attr,
184 NULL,
187 static umode_t port_err_attrs_visible(struct kobject *kobj,
188 struct attribute *attr, int n)
190 struct device *dev = kobj_to_dev(kobj);
193 * sysfs entries are visible only if related private feature is
194 * enumerated.
196 if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
197 return 0;
199 return attr->mode;
202 const struct attribute_group port_err_group = {
203 .name = "errors",
204 .attrs = port_err_attrs,
205 .is_visible = port_err_attrs_visible,
208 static int port_err_init(struct platform_device *pdev,
209 struct dfl_feature *feature)
211 afu_port_err_mask(&pdev->dev, false);
213 return 0;
216 static void port_err_uinit(struct platform_device *pdev,
217 struct dfl_feature *feature)
219 afu_port_err_mask(&pdev->dev, true);
222 const struct dfl_feature_id port_err_id_table[] = {
223 {.id = PORT_FEATURE_ID_ERROR,},
224 {0,}
227 const struct dfl_feature_ops port_err_ops = {
228 .init = port_err_init,
229 .uinit = port_err_uinit,