1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Management Engine Error Management
5 * Copyright 2019 Intel Corporation, Inc.
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Mitchel, Henry <henry.mitchel@intel.com>
18 #include <linux/uaccess.h>
23 #define FME_ERROR_MASK 0x8
24 #define FME_ERROR 0x10
25 #define MBP_ERROR BIT_ULL(6)
26 #define PCIE0_ERROR_MASK 0x18
27 #define PCIE0_ERROR 0x20
28 #define PCIE1_ERROR_MASK 0x28
29 #define PCIE1_ERROR 0x30
30 #define FME_FIRST_ERROR 0x38
31 #define FME_NEXT_ERROR 0x40
32 #define RAS_NONFAT_ERROR_MASK 0x48
33 #define RAS_NONFAT_ERROR 0x50
34 #define RAS_CATFAT_ERROR_MASK 0x58
35 #define RAS_CATFAT_ERROR 0x60
36 #define RAS_ERROR_INJECT 0x68
37 #define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
39 #define ERROR_MASK GENMASK_ULL(63, 0)
41 static ssize_t
pcie0_errors_show(struct device
*dev
,
42 struct device_attribute
*attr
, char *buf
)
44 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
48 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
50 mutex_lock(&pdata
->lock
);
51 value
= readq(base
+ PCIE0_ERROR
);
52 mutex_unlock(&pdata
->lock
);
54 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
57 static ssize_t
pcie0_errors_store(struct device
*dev
,
58 struct device_attribute
*attr
,
59 const char *buf
, size_t count
)
61 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
66 if (kstrtou64(buf
, 0, &val
))
69 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
71 mutex_lock(&pdata
->lock
);
72 writeq(GENMASK_ULL(63, 0), base
+ PCIE0_ERROR_MASK
);
74 v
= readq(base
+ PCIE0_ERROR
);
76 writeq(v
, base
+ PCIE0_ERROR
);
80 writeq(0ULL, base
+ PCIE0_ERROR_MASK
);
81 mutex_unlock(&pdata
->lock
);
82 return ret
? ret
: count
;
84 static DEVICE_ATTR_RW(pcie0_errors
);
86 static ssize_t
pcie1_errors_show(struct device
*dev
,
87 struct device_attribute
*attr
, char *buf
)
89 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
93 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
95 mutex_lock(&pdata
->lock
);
96 value
= readq(base
+ PCIE1_ERROR
);
97 mutex_unlock(&pdata
->lock
);
99 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
102 static ssize_t
pcie1_errors_store(struct device
*dev
,
103 struct device_attribute
*attr
,
104 const char *buf
, size_t count
)
106 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
111 if (kstrtou64(buf
, 0, &val
))
114 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
116 mutex_lock(&pdata
->lock
);
117 writeq(GENMASK_ULL(63, 0), base
+ PCIE1_ERROR_MASK
);
119 v
= readq(base
+ PCIE1_ERROR
);
121 writeq(v
, base
+ PCIE1_ERROR
);
125 writeq(0ULL, base
+ PCIE1_ERROR_MASK
);
126 mutex_unlock(&pdata
->lock
);
127 return ret
? ret
: count
;
129 static DEVICE_ATTR_RW(pcie1_errors
);
131 static ssize_t
nonfatal_errors_show(struct device
*dev
,
132 struct device_attribute
*attr
, char *buf
)
136 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
138 return sprintf(buf
, "0x%llx\n",
139 (unsigned long long)readq(base
+ RAS_NONFAT_ERROR
));
141 static DEVICE_ATTR_RO(nonfatal_errors
);
143 static ssize_t
catfatal_errors_show(struct device
*dev
,
144 struct device_attribute
*attr
, char *buf
)
148 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
150 return sprintf(buf
, "0x%llx\n",
151 (unsigned long long)readq(base
+ RAS_CATFAT_ERROR
));
153 static DEVICE_ATTR_RO(catfatal_errors
);
155 static ssize_t
inject_errors_show(struct device
*dev
,
156 struct device_attribute
*attr
, char *buf
)
158 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
162 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
164 mutex_lock(&pdata
->lock
);
165 v
= readq(base
+ RAS_ERROR_INJECT
);
166 mutex_unlock(&pdata
->lock
);
168 return sprintf(buf
, "0x%llx\n",
169 (unsigned long long)FIELD_GET(INJECT_ERROR_MASK
, v
));
172 static ssize_t
inject_errors_store(struct device
*dev
,
173 struct device_attribute
*attr
,
174 const char *buf
, size_t count
)
176 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
181 if (kstrtou8(buf
, 0, &inject_error
))
184 if (inject_error
& ~INJECT_ERROR_MASK
)
187 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
189 mutex_lock(&pdata
->lock
);
190 v
= readq(base
+ RAS_ERROR_INJECT
);
191 v
&= ~INJECT_ERROR_MASK
;
192 v
|= FIELD_PREP(INJECT_ERROR_MASK
, inject_error
);
193 writeq(v
, base
+ RAS_ERROR_INJECT
);
194 mutex_unlock(&pdata
->lock
);
198 static DEVICE_ATTR_RW(inject_errors
);
200 static ssize_t
fme_errors_show(struct device
*dev
,
201 struct device_attribute
*attr
, char *buf
)
203 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
207 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
209 mutex_lock(&pdata
->lock
);
210 value
= readq(base
+ FME_ERROR
);
211 mutex_unlock(&pdata
->lock
);
213 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
216 static ssize_t
fme_errors_store(struct device
*dev
,
217 struct device_attribute
*attr
,
218 const char *buf
, size_t count
)
220 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
225 if (kstrtou64(buf
, 0, &val
))
228 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
230 mutex_lock(&pdata
->lock
);
231 writeq(GENMASK_ULL(63, 0), base
+ FME_ERROR_MASK
);
233 v
= readq(base
+ FME_ERROR
);
235 writeq(v
, base
+ FME_ERROR
);
239 /* Workaround: disable MBP_ERROR if feature revision is 0 */
240 writeq(dfl_feature_revision(base
) ? 0ULL : MBP_ERROR
,
241 base
+ FME_ERROR_MASK
);
242 mutex_unlock(&pdata
->lock
);
243 return ret
? ret
: count
;
245 static DEVICE_ATTR_RW(fme_errors
);
247 static ssize_t
first_error_show(struct device
*dev
,
248 struct device_attribute
*attr
, char *buf
)
250 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
254 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
256 mutex_lock(&pdata
->lock
);
257 value
= readq(base
+ FME_FIRST_ERROR
);
258 mutex_unlock(&pdata
->lock
);
260 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
262 static DEVICE_ATTR_RO(first_error
);
264 static ssize_t
next_error_show(struct device
*dev
,
265 struct device_attribute
*attr
, char *buf
)
267 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
271 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
273 mutex_lock(&pdata
->lock
);
274 value
= readq(base
+ FME_NEXT_ERROR
);
275 mutex_unlock(&pdata
->lock
);
277 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
279 static DEVICE_ATTR_RO(next_error
);
281 static struct attribute
*fme_global_err_attrs
[] = {
282 &dev_attr_pcie0_errors
.attr
,
283 &dev_attr_pcie1_errors
.attr
,
284 &dev_attr_nonfatal_errors
.attr
,
285 &dev_attr_catfatal_errors
.attr
,
286 &dev_attr_inject_errors
.attr
,
287 &dev_attr_fme_errors
.attr
,
288 &dev_attr_first_error
.attr
,
289 &dev_attr_next_error
.attr
,
293 static umode_t
fme_global_err_attrs_visible(struct kobject
*kobj
,
294 struct attribute
*attr
, int n
)
296 struct device
*dev
= kobj_to_dev(kobj
);
299 * sysfs entries are visible only if related private feature is
302 if (!dfl_get_feature_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
))
308 const struct attribute_group fme_global_err_group
= {
310 .attrs
= fme_global_err_attrs
,
311 .is_visible
= fme_global_err_attrs_visible
,
314 static void fme_err_mask(struct device
*dev
, bool mask
)
316 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
319 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
321 mutex_lock(&pdata
->lock
);
323 /* Workaround: keep MBP_ERROR always masked if revision is 0 */
324 if (dfl_feature_revision(base
))
325 writeq(mask
? ERROR_MASK
: 0, base
+ FME_ERROR_MASK
);
327 writeq(mask
? ERROR_MASK
: MBP_ERROR
, base
+ FME_ERROR_MASK
);
329 writeq(mask
? ERROR_MASK
: 0, base
+ PCIE0_ERROR_MASK
);
330 writeq(mask
? ERROR_MASK
: 0, base
+ PCIE1_ERROR_MASK
);
331 writeq(mask
? ERROR_MASK
: 0, base
+ RAS_NONFAT_ERROR_MASK
);
332 writeq(mask
? ERROR_MASK
: 0, base
+ RAS_CATFAT_ERROR_MASK
);
334 mutex_unlock(&pdata
->lock
);
337 static int fme_global_err_init(struct platform_device
*pdev
,
338 struct dfl_feature
*feature
)
340 fme_err_mask(&pdev
->dev
, false);
345 static void fme_global_err_uinit(struct platform_device
*pdev
,
346 struct dfl_feature
*feature
)
348 fme_err_mask(&pdev
->dev
, true);
351 const struct dfl_feature_id fme_global_err_id_table
[] = {
352 {.id
= FME_FEATURE_ID_GLOBAL_ERR
,},
356 const struct dfl_feature_ops fme_global_err_ops
= {
357 .init
= fme_global_err_init
,
358 .uinit
= fme_global_err_uinit
,