1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
5 * Copyright 2019 Intel Corporation, Inc.
8 * Wu Hao <hao.wu@linux.intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Mitchel Henry <henry.mitchel@intel.com>
17 #include <linux/uaccess.h>
21 #define PORT_ERROR_MASK 0x8
22 #define PORT_ERROR 0x10
23 #define PORT_FIRST_ERROR 0x18
24 #define PORT_MALFORMED_REQ0 0x20
25 #define PORT_MALFORMED_REQ1 0x28
27 #define ERROR_MASK GENMASK_ULL(63, 0)
29 /* mask or unmask port errors by the error mask register. */
30 static void __afu_port_err_mask(struct device
*dev
, bool mask
)
34 base
= dfl_get_feature_ioaddr_by_id(dev
, PORT_FEATURE_ID_ERROR
);
36 writeq(mask
? ERROR_MASK
: 0, base
+ PORT_ERROR_MASK
);
39 static void afu_port_err_mask(struct device
*dev
, bool mask
)
41 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
43 mutex_lock(&pdata
->lock
);
44 __afu_port_err_mask(dev
, mask
);
45 mutex_unlock(&pdata
->lock
);
48 /* clear port errors. */
49 static int afu_port_err_clear(struct device
*dev
, u64 err
)
51 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
52 struct platform_device
*pdev
= to_platform_device(dev
);
53 void __iomem
*base_err
, *base_hdr
;
57 base_err
= dfl_get_feature_ioaddr_by_id(dev
, PORT_FEATURE_ID_ERROR
);
58 base_hdr
= dfl_get_feature_ioaddr_by_id(dev
, PORT_FEATURE_ID_HEADER
);
60 mutex_lock(&pdata
->lock
);
65 * - Check for AP6 State
66 * - Halt Port by keeping Port in reset
67 * - Set PORT Error mask to all 1 to mask errors
69 * - Set Port mask to all 0 to enable errors
70 * - All errors start capturing new errors
71 * - Enable Port by pulling the port out of reset
74 /* if device is still in AP6 power state, can not clear any error. */
75 v
= readq(base_hdr
+ PORT_HDR_STS
);
76 if (FIELD_GET(PORT_STS_PWR_STATE
, v
) == PORT_STS_PWR_STATE_AP6
) {
77 dev_err(dev
, "Could not clear errors, device in AP6 state.\n");
81 /* Halt Port by keeping Port in reset */
82 ret
= __afu_port_disable(pdev
);
87 __afu_port_err_mask(dev
, true);
89 /* Clear errors if err input matches with current port errors.*/
90 v
= readq(base_err
+ PORT_ERROR
);
93 writeq(v
, base_err
+ PORT_ERROR
);
95 v
= readq(base_err
+ PORT_FIRST_ERROR
);
96 writeq(v
, base_err
+ PORT_FIRST_ERROR
);
102 __afu_port_err_mask(dev
, false);
104 /* Enable the Port by clear the reset */
105 __afu_port_enable(pdev
);
108 mutex_unlock(&pdata
->lock
);
112 static ssize_t
errors_show(struct device
*dev
, struct device_attribute
*attr
,
115 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
119 base
= dfl_get_feature_ioaddr_by_id(dev
, PORT_FEATURE_ID_ERROR
);
121 mutex_lock(&pdata
->lock
);
122 error
= readq(base
+ PORT_ERROR
);
123 mutex_unlock(&pdata
->lock
);
125 return sprintf(buf
, "0x%llx\n", (unsigned long long)error
);
128 static ssize_t
errors_store(struct device
*dev
, struct device_attribute
*attr
,
129 const char *buff
, size_t count
)
134 if (kstrtou64(buff
, 0, &value
))
137 ret
= afu_port_err_clear(dev
, value
);
139 return ret
? ret
: count
;
141 static DEVICE_ATTR_RW(errors
);
143 static ssize_t
first_error_show(struct device
*dev
,
144 struct device_attribute
*attr
, char *buf
)
146 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
150 base
= dfl_get_feature_ioaddr_by_id(dev
, PORT_FEATURE_ID_ERROR
);
152 mutex_lock(&pdata
->lock
);
153 error
= readq(base
+ PORT_FIRST_ERROR
);
154 mutex_unlock(&pdata
->lock
);
156 return sprintf(buf
, "0x%llx\n", (unsigned long long)error
);
158 static DEVICE_ATTR_RO(first_error
);
160 static ssize_t
first_malformed_req_show(struct device
*dev
,
161 struct device_attribute
*attr
,
164 struct dfl_feature_platform_data
*pdata
= dev_get_platdata(dev
);
168 base
= dfl_get_feature_ioaddr_by_id(dev
, PORT_FEATURE_ID_ERROR
);
170 mutex_lock(&pdata
->lock
);
171 req0
= readq(base
+ PORT_MALFORMED_REQ0
);
172 req1
= readq(base
+ PORT_MALFORMED_REQ1
);
173 mutex_unlock(&pdata
->lock
);
175 return sprintf(buf
, "0x%016llx%016llx\n",
176 (unsigned long long)req1
, (unsigned long long)req0
);
178 static DEVICE_ATTR_RO(first_malformed_req
);
180 static struct attribute
*port_err_attrs
[] = {
181 &dev_attr_errors
.attr
,
182 &dev_attr_first_error
.attr
,
183 &dev_attr_first_malformed_req
.attr
,
187 static umode_t
port_err_attrs_visible(struct kobject
*kobj
,
188 struct attribute
*attr
, int n
)
190 struct device
*dev
= kobj_to_dev(kobj
);
193 * sysfs entries are visible only if related private feature is
196 if (!dfl_get_feature_by_id(dev
, PORT_FEATURE_ID_ERROR
))
202 const struct attribute_group port_err_group
= {
204 .attrs
= port_err_attrs
,
205 .is_visible
= port_err_attrs_visible
,
208 static int port_err_init(struct platform_device
*pdev
,
209 struct dfl_feature
*feature
)
211 afu_port_err_mask(&pdev
->dev
, false);
216 static void port_err_uinit(struct platform_device
*pdev
,
217 struct dfl_feature
*feature
)
219 afu_port_err_mask(&pdev
->dev
, true);
222 const struct dfl_feature_id port_err_id_table
[] = {
223 {.id
= PORT_FEATURE_ID_ERROR
,},
227 const struct dfl_feature_ops port_err_ops
= {
228 .init
= port_err_init
,
229 .uinit
= port_err_uinit
,