1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Management Engine Error Management
5 * Copyright 2019 Intel Corporation, Inc.
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Mitchel, Henry <henry.mitchel@intel.com>
18 #include <linux/fpga-dfl.h>
19 #include <linux/uaccess.h>
24 #define FME_ERROR_MASK 0x8
25 #define FME_ERROR 0x10
26 #define MBP_ERROR BIT_ULL(6)
27 #define PCIE0_ERROR_MASK 0x18
28 #define PCIE0_ERROR 0x20
29 #define PCIE1_ERROR_MASK 0x28
30 #define PCIE1_ERROR 0x30
31 #define FME_FIRST_ERROR 0x38
32 #define FME_NEXT_ERROR 0x40
33 #define RAS_NONFAT_ERROR_MASK 0x48
34 #define RAS_NONFAT_ERROR 0x50
35 #define RAS_CATFAT_ERROR_MASK 0x58
36 #define RAS_CATFAT_ERROR 0x60
37 #define RAS_ERROR_INJECT 0x68
38 #define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
40 #define ERROR_MASK GENMASK_ULL(63, 0)
42 static ssize_t
pcie0_errors_show(struct device
*dev
,
43 struct device_attribute
*attr
, char *buf
)
45 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
49 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
51 mutex_lock(&pdata
->lock
);
52 value
= readq(base
+ PCIE0_ERROR
);
53 mutex_unlock(&pdata
->lock
);
55 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
58 static ssize_t
pcie0_errors_store(struct device
*dev
,
59 struct device_attribute
*attr
,
60 const char *buf
, size_t count
)
62 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
67 if (kstrtou64(buf
, 0, &val
))
70 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
72 mutex_lock(&pdata
->lock
);
73 writeq(GENMASK_ULL(63, 0), base
+ PCIE0_ERROR_MASK
);
75 v
= readq(base
+ PCIE0_ERROR
);
77 writeq(v
, base
+ PCIE0_ERROR
);
81 writeq(0ULL, base
+ PCIE0_ERROR_MASK
);
82 mutex_unlock(&pdata
->lock
);
83 return ret
? ret
: count
;
85 static DEVICE_ATTR_RW(pcie0_errors
);
87 static ssize_t
pcie1_errors_show(struct device
*dev
,
88 struct device_attribute
*attr
, char *buf
)
90 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
94 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
96 mutex_lock(&pdata
->lock
);
97 value
= readq(base
+ PCIE1_ERROR
);
98 mutex_unlock(&pdata
->lock
);
100 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
103 static ssize_t
pcie1_errors_store(struct device
*dev
,
104 struct device_attribute
*attr
,
105 const char *buf
, size_t count
)
107 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
112 if (kstrtou64(buf
, 0, &val
))
115 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
117 mutex_lock(&pdata
->lock
);
118 writeq(GENMASK_ULL(63, 0), base
+ PCIE1_ERROR_MASK
);
120 v
= readq(base
+ PCIE1_ERROR
);
122 writeq(v
, base
+ PCIE1_ERROR
);
126 writeq(0ULL, base
+ PCIE1_ERROR_MASK
);
127 mutex_unlock(&pdata
->lock
);
128 return ret
? ret
: count
;
130 static DEVICE_ATTR_RW(pcie1_errors
);
132 static ssize_t
nonfatal_errors_show(struct device
*dev
,
133 struct device_attribute
*attr
, char *buf
)
137 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
139 return sprintf(buf
, "0x%llx\n",
140 (unsigned long long)readq(base
+ RAS_NONFAT_ERROR
));
142 static DEVICE_ATTR_RO(nonfatal_errors
);
144 static ssize_t
catfatal_errors_show(struct device
*dev
,
145 struct device_attribute
*attr
, char *buf
)
149 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
151 return sprintf(buf
, "0x%llx\n",
152 (unsigned long long)readq(base
+ RAS_CATFAT_ERROR
));
154 static DEVICE_ATTR_RO(catfatal_errors
);
156 static ssize_t
inject_errors_show(struct device
*dev
,
157 struct device_attribute
*attr
, char *buf
)
159 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
163 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
165 mutex_lock(&pdata
->lock
);
166 v
= readq(base
+ RAS_ERROR_INJECT
);
167 mutex_unlock(&pdata
->lock
);
169 return sprintf(buf
, "0x%llx\n",
170 (unsigned long long)FIELD_GET(INJECT_ERROR_MASK
, v
));
173 static ssize_t
inject_errors_store(struct device
*dev
,
174 struct device_attribute
*attr
,
175 const char *buf
, size_t count
)
177 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
182 if (kstrtou8(buf
, 0, &inject_error
))
185 if (inject_error
& ~INJECT_ERROR_MASK
)
188 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
190 mutex_lock(&pdata
->lock
);
191 v
= readq(base
+ RAS_ERROR_INJECT
);
192 v
&= ~INJECT_ERROR_MASK
;
193 v
|= FIELD_PREP(INJECT_ERROR_MASK
, inject_error
);
194 writeq(v
, base
+ RAS_ERROR_INJECT
);
195 mutex_unlock(&pdata
->lock
);
199 static DEVICE_ATTR_RW(inject_errors
);
201 static ssize_t
fme_errors_show(struct device
*dev
,
202 struct device_attribute
*attr
, char *buf
)
204 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
208 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
210 mutex_lock(&pdata
->lock
);
211 value
= readq(base
+ FME_ERROR
);
212 mutex_unlock(&pdata
->lock
);
214 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
217 static ssize_t
fme_errors_store(struct device
*dev
,
218 struct device_attribute
*attr
,
219 const char *buf
, size_t count
)
221 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
226 if (kstrtou64(buf
, 0, &val
))
229 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
231 mutex_lock(&pdata
->lock
);
232 writeq(GENMASK_ULL(63, 0), base
+ FME_ERROR_MASK
);
234 v
= readq(base
+ FME_ERROR
);
236 writeq(v
, base
+ FME_ERROR
);
240 /* Workaround: disable MBP_ERROR if feature revision is 0 */
241 writeq(dfl_feature_revision(base
) ? 0ULL : MBP_ERROR
,
242 base
+ FME_ERROR_MASK
);
243 mutex_unlock(&pdata
->lock
);
244 return ret
? ret
: count
;
246 static DEVICE_ATTR_RW(fme_errors
);
248 static ssize_t
first_error_show(struct device
*dev
,
249 struct device_attribute
*attr
, char *buf
)
251 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
255 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
257 mutex_lock(&pdata
->lock
);
258 value
= readq(base
+ FME_FIRST_ERROR
);
259 mutex_unlock(&pdata
->lock
);
261 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
263 static DEVICE_ATTR_RO(first_error
);
265 static ssize_t
next_error_show(struct device
*dev
,
266 struct device_attribute
*attr
, char *buf
)
268 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
272 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
274 mutex_lock(&pdata
->lock
);
275 value
= readq(base
+ FME_NEXT_ERROR
);
276 mutex_unlock(&pdata
->lock
);
278 return sprintf(buf
, "0x%llx\n", (unsigned long long)value
);
280 static DEVICE_ATTR_RO(next_error
);
282 static struct attribute
*fme_global_err_attrs
[] = {
283 &dev_attr_pcie0_errors
.attr
,
284 &dev_attr_pcie1_errors
.attr
,
285 &dev_attr_nonfatal_errors
.attr
,
286 &dev_attr_catfatal_errors
.attr
,
287 &dev_attr_inject_errors
.attr
,
288 &dev_attr_fme_errors
.attr
,
289 &dev_attr_first_error
.attr
,
290 &dev_attr_next_error
.attr
,
294 static umode_t
fme_global_err_attrs_visible(struct kobject
*kobj
,
295 struct attribute
*attr
, int n
)
297 struct device
*dev
= kobj_to_dev(kobj
);
300 * sysfs entries are visible only if related private feature is
303 if (!dfl_get_feature_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
))
309 const struct attribute_group fme_global_err_group
= {
311 .attrs
= fme_global_err_attrs
,
312 .is_visible
= fme_global_err_attrs_visible
,
315 static void fme_err_mask(struct device
*dev
, bool mask
)
317 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
320 base
= dfl_get_feature_ioaddr_by_id(dev
, FME_FEATURE_ID_GLOBAL_ERR
);
322 mutex_lock(&pdata
->lock
);
324 /* Workaround: keep MBP_ERROR always masked if revision is 0 */
325 if (dfl_feature_revision(base
))
326 writeq(mask
? ERROR_MASK
: 0, base
+ FME_ERROR_MASK
);
328 writeq(mask
? ERROR_MASK
: MBP_ERROR
, base
+ FME_ERROR_MASK
);
330 writeq(mask
? ERROR_MASK
: 0, base
+ PCIE0_ERROR_MASK
);
331 writeq(mask
? ERROR_MASK
: 0, base
+ PCIE1_ERROR_MASK
);
332 writeq(mask
? ERROR_MASK
: 0, base
+ RAS_NONFAT_ERROR_MASK
);
333 writeq(mask
? ERROR_MASK
: 0, base
+ RAS_CATFAT_ERROR_MASK
);
335 mutex_unlock(&pdata
->lock
);
338 static int fme_global_err_init(struct platform_device
*pdev
,
339 struct dfl_feature
*feature
)
341 fme_err_mask(&pdev
->dev
, false);
346 static void fme_global_err_uinit(struct platform_device
*pdev
,
347 struct dfl_feature
*feature
)
349 fme_err_mask(&pdev
->dev
, true);
353 fme_global_error_ioctl(struct platform_device
*pdev
,
354 struct dfl_feature
*feature
,
355 unsigned int cmd
, unsigned long arg
)
358 case DFL_FPGA_FME_ERR_GET_IRQ_NUM
:
359 return dfl_feature_ioctl_get_num_irqs(pdev
, feature
, arg
);
360 case DFL_FPGA_FME_ERR_SET_IRQ
:
361 return dfl_feature_ioctl_set_irq(pdev
, feature
, arg
);
363 dev_dbg(&pdev
->dev
, "%x cmd not handled", cmd
);
368 const struct dfl_feature_id fme_global_err_id_table
[] = {
369 {.id
= FME_FEATURE_ID_GLOBAL_ERR
,},
373 const struct dfl_feature_ops fme_global_err_ops
= {
374 .init
= fme_global_err_init
,
375 .uinit
= fme_global_err_uinit
,
376 .ioctl
= fme_global_error_ioctl
,