1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
8 #define ADF_DH895XCC_EP_OFFSET 0x3A000
9 #define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
10 #define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
11 #define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
12 #define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
14 void adf_enable_pf2vf_interrupts(struct adf_accel_dev
*accel_dev
)
16 struct adf_accel_pci
*pci_info
= &accel_dev
->accel_pci_dev
;
17 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
18 void __iomem
*pmisc_bar_addr
=
19 pci_info
->pci_bars
[hw_data
->get_misc_bar_id(hw_data
)].virt_addr
;
21 ADF_CSR_WR(pmisc_bar_addr
, hw_data
->get_vintmsk_offset(0), 0x0);
24 void adf_disable_pf2vf_interrupts(struct adf_accel_dev
*accel_dev
)
26 struct adf_accel_pci
*pci_info
= &accel_dev
->accel_pci_dev
;
27 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
28 void __iomem
*pmisc_bar_addr
=
29 pci_info
->pci_bars
[hw_data
->get_misc_bar_id(hw_data
)].virt_addr
;
31 ADF_CSR_WR(pmisc_bar_addr
, hw_data
->get_vintmsk_offset(0), 0x2);
34 void adf_enable_vf2pf_interrupts(struct adf_accel_dev
*accel_dev
,
37 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
38 struct adf_bar
*pmisc
=
39 &GET_BARS(accel_dev
)[hw_data
->get_misc_bar_id(hw_data
)];
40 void __iomem
*pmisc_addr
= pmisc
->virt_addr
;
43 /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
44 if (vf_mask
& 0xFFFF) {
45 reg
= ADF_CSR_RD(pmisc_addr
, ADF_DH895XCC_ERRMSK3
);
46 reg
&= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask
);
47 ADF_CSR_WR(pmisc_addr
, ADF_DH895XCC_ERRMSK3
, reg
);
50 /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
52 reg
= ADF_CSR_RD(pmisc_addr
, ADF_DH895XCC_ERRMSK5
);
53 reg
&= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask
);
54 ADF_CSR_WR(pmisc_addr
, ADF_DH895XCC_ERRMSK5
, reg
);
58 void adf_disable_vf2pf_interrupts(struct adf_accel_dev
*accel_dev
, u32 vf_mask
)
60 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
61 struct adf_bar
*pmisc
=
62 &GET_BARS(accel_dev
)[hw_data
->get_misc_bar_id(hw_data
)];
63 void __iomem
*pmisc_addr
= pmisc
->virt_addr
;
66 /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
67 if (vf_mask
& 0xFFFF) {
68 reg
= ADF_CSR_RD(pmisc_addr
, ADF_DH895XCC_ERRMSK3
) |
69 ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask
);
70 ADF_CSR_WR(pmisc_addr
, ADF_DH895XCC_ERRMSK3
, reg
);
73 /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
75 reg
= ADF_CSR_RD(pmisc_addr
, ADF_DH895XCC_ERRMSK5
) |
76 ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask
);
77 ADF_CSR_WR(pmisc_addr
, ADF_DH895XCC_ERRMSK5
, reg
);
81 static int __adf_iov_putmsg(struct adf_accel_dev
*accel_dev
, u32 msg
, u8 vf_nr
)
83 struct adf_accel_pci
*pci_info
= &accel_dev
->accel_pci_dev
;
84 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
85 void __iomem
*pmisc_bar_addr
=
86 pci_info
->pci_bars
[hw_data
->get_misc_bar_id(hw_data
)].virt_addr
;
87 u32 val
, pf2vf_offset
, count
= 0;
88 u32 local_in_use_mask
, local_in_use_pattern
;
89 u32 remote_in_use_mask
, remote_in_use_pattern
;
90 struct mutex
*lock
; /* lock preventing concurrent acces of CSR */
94 if (accel_dev
->is_vf
) {
95 pf2vf_offset
= hw_data
->get_pf2vf_offset(0);
96 lock
= &accel_dev
->vf
.vf2pf_lock
;
97 local_in_use_mask
= ADF_VF2PF_IN_USE_BY_VF_MASK
;
98 local_in_use_pattern
= ADF_VF2PF_IN_USE_BY_VF
;
99 remote_in_use_mask
= ADF_PF2VF_IN_USE_BY_PF_MASK
;
100 remote_in_use_pattern
= ADF_PF2VF_IN_USE_BY_PF
;
101 int_bit
= ADF_VF2PF_INT
;
103 pf2vf_offset
= hw_data
->get_pf2vf_offset(vf_nr
);
104 lock
= &accel_dev
->pf
.vf_info
[vf_nr
].pf2vf_lock
;
105 local_in_use_mask
= ADF_PF2VF_IN_USE_BY_PF_MASK
;
106 local_in_use_pattern
= ADF_PF2VF_IN_USE_BY_PF
;
107 remote_in_use_mask
= ADF_VF2PF_IN_USE_BY_VF_MASK
;
108 remote_in_use_pattern
= ADF_VF2PF_IN_USE_BY_VF
;
109 int_bit
= ADF_PF2VF_INT
;
114 /* Check if PF2VF CSR is in use by remote function */
115 val
= ADF_CSR_RD(pmisc_bar_addr
, pf2vf_offset
);
116 if ((val
& remote_in_use_mask
) == remote_in_use_pattern
) {
117 dev_dbg(&GET_DEV(accel_dev
),
118 "PF2VF CSR in use by remote function\n");
123 /* Attempt to get ownership of PF2VF CSR */
124 msg
&= ~local_in_use_mask
;
125 msg
|= local_in_use_pattern
;
126 ADF_CSR_WR(pmisc_bar_addr
, pf2vf_offset
, msg
);
128 /* Wait in case remote func also attempting to get ownership */
129 msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY
);
131 val
= ADF_CSR_RD(pmisc_bar_addr
, pf2vf_offset
);
132 if ((val
& local_in_use_mask
) != local_in_use_pattern
) {
133 dev_dbg(&GET_DEV(accel_dev
),
134 "PF2VF CSR in use by remote - collision detected\n");
140 * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
141 * remain in the PF2VF CSR for all writes including ACK from remote
142 * until this local function relinquishes the CSR. Send the message
143 * by interrupting the remote.
145 ADF_CSR_WR(pmisc_bar_addr
, pf2vf_offset
, msg
| int_bit
);
147 /* Wait for confirmation from remote func it received the message */
149 msleep(ADF_IOV_MSG_ACK_DELAY
);
150 val
= ADF_CSR_RD(pmisc_bar_addr
, pf2vf_offset
);
151 } while ((val
& int_bit
) && (count
++ < ADF_IOV_MSG_ACK_MAX_RETRY
));
154 dev_dbg(&GET_DEV(accel_dev
), "ACK not received from remote\n");
159 /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
160 ADF_CSR_WR(pmisc_bar_addr
, pf2vf_offset
, val
& ~local_in_use_mask
);
167 * adf_iov_putmsg() - send PF2VF message
168 * @accel_dev: Pointer to acceleration device.
169 * @msg: Message to send
170 * @vf_nr: VF number to which the message will be sent
172 * Function sends a messge from the PF to a VF
174 * Return: 0 on success, error code otherwise.
176 int adf_iov_putmsg(struct adf_accel_dev
*accel_dev
, u32 msg
, u8 vf_nr
)
182 ret
= __adf_iov_putmsg(accel_dev
, msg
, vf_nr
);
184 msleep(ADF_IOV_MSG_RETRY_DELAY
);
185 } while (ret
&& (count
++ < ADF_IOV_MSG_MAX_RETRIES
));
189 EXPORT_SYMBOL_GPL(adf_iov_putmsg
);
191 void adf_vf2pf_req_hndl(struct adf_accel_vf_info
*vf_info
)
193 struct adf_accel_dev
*accel_dev
= vf_info
->accel_dev
;
194 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
195 int bar_id
= hw_data
->get_misc_bar_id(hw_data
);
196 struct adf_bar
*pmisc
= &GET_BARS(accel_dev
)[bar_id
];
197 void __iomem
*pmisc_addr
= pmisc
->virt_addr
;
198 u32 msg
, resp
= 0, vf_nr
= vf_info
->vf_nr
;
200 /* Read message from the VF */
201 msg
= ADF_CSR_RD(pmisc_addr
, hw_data
->get_pf2vf_offset(vf_nr
));
203 /* To ACK, clear the VF2PFINT bit */
204 msg
&= ~ADF_VF2PF_INT
;
205 ADF_CSR_WR(pmisc_addr
, hw_data
->get_pf2vf_offset(vf_nr
), msg
);
207 if (!(msg
& ADF_VF2PF_MSGORIGIN_SYSTEM
))
208 /* Ignore legacy non-system (non-kernel) VF2PF messages */
211 switch ((msg
& ADF_VF2PF_MSGTYPE_MASK
) >> ADF_VF2PF_MSGTYPE_SHIFT
) {
212 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ
:
214 u8 vf_compat_ver
= msg
>> ADF_VF2PF_COMPAT_VER_REQ_SHIFT
;
216 resp
= (ADF_PF2VF_MSGORIGIN_SYSTEM
|
217 (ADF_PF2VF_MSGTYPE_VERSION_RESP
<<
218 ADF_PF2VF_MSGTYPE_SHIFT
) |
219 (ADF_PFVF_COMPATIBILITY_VERSION
<<
220 ADF_PF2VF_VERSION_RESP_VERS_SHIFT
));
222 dev_dbg(&GET_DEV(accel_dev
),
223 "Compatibility Version Request from VF%d vers=%u\n",
224 vf_nr
+ 1, vf_compat_ver
);
226 if (vf_compat_ver
< hw_data
->min_iov_compat_ver
) {
227 dev_err(&GET_DEV(accel_dev
),
228 "VF (vers %d) incompatible with PF (vers %d)\n",
229 vf_compat_ver
, ADF_PFVF_COMPATIBILITY_VERSION
);
230 resp
|= ADF_PF2VF_VF_INCOMPATIBLE
<<
231 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT
;
232 } else if (vf_compat_ver
> ADF_PFVF_COMPATIBILITY_VERSION
) {
233 dev_err(&GET_DEV(accel_dev
),
234 "VF (vers %d) compat with PF (vers %d) unkn.\n",
235 vf_compat_ver
, ADF_PFVF_COMPATIBILITY_VERSION
);
236 resp
|= ADF_PF2VF_VF_COMPAT_UNKNOWN
<<
237 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT
;
239 dev_dbg(&GET_DEV(accel_dev
),
240 "VF (vers %d) compatible with PF (vers %d)\n",
241 vf_compat_ver
, ADF_PFVF_COMPATIBILITY_VERSION
);
242 resp
|= ADF_PF2VF_VF_COMPATIBLE
<<
243 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT
;
247 case ADF_VF2PF_MSGTYPE_VERSION_REQ
:
248 dev_dbg(&GET_DEV(accel_dev
),
249 "Legacy VersionRequest received from VF%d 0x%x\n",
251 resp
= (ADF_PF2VF_MSGORIGIN_SYSTEM
|
252 (ADF_PF2VF_MSGTYPE_VERSION_RESP
<<
253 ADF_PF2VF_MSGTYPE_SHIFT
) |
254 (ADF_PFVF_COMPATIBILITY_VERSION
<<
255 ADF_PF2VF_VERSION_RESP_VERS_SHIFT
));
256 resp
|= ADF_PF2VF_VF_COMPATIBLE
<<
257 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT
;
258 /* Set legacy major and minor version num */
259 resp
|= 1 << ADF_PF2VF_MAJORVERSION_SHIFT
|
260 1 << ADF_PF2VF_MINORVERSION_SHIFT
;
262 case ADF_VF2PF_MSGTYPE_INIT
:
264 dev_dbg(&GET_DEV(accel_dev
),
265 "Init message received from VF%d 0x%x\n",
267 vf_info
->init
= true;
270 case ADF_VF2PF_MSGTYPE_SHUTDOWN
:
272 dev_dbg(&GET_DEV(accel_dev
),
273 "Shutdown message received from VF%d 0x%x\n",
275 vf_info
->init
= false;
282 if (resp
&& adf_iov_putmsg(accel_dev
, resp
, vf_nr
))
283 dev_err(&GET_DEV(accel_dev
), "Failed to send response to VF\n");
285 /* re-enable interrupt on PF from this VF */
286 adf_enable_vf2pf_interrupts(accel_dev
, (1 << vf_nr
));
289 dev_dbg(&GET_DEV(accel_dev
), "Unknown message from VF%d (0x%x);\n",
293 void adf_pf2vf_notify_restarting(struct adf_accel_dev
*accel_dev
)
295 struct adf_accel_vf_info
*vf
;
296 u32 msg
= (ADF_PF2VF_MSGORIGIN_SYSTEM
|
297 (ADF_PF2VF_MSGTYPE_RESTARTING
<< ADF_PF2VF_MSGTYPE_SHIFT
));
298 int i
, num_vfs
= pci_num_vf(accel_to_pci_dev(accel_dev
));
300 for (i
= 0, vf
= accel_dev
->pf
.vf_info
; i
< num_vfs
; i
++, vf
++) {
301 if (vf
->init
&& adf_iov_putmsg(accel_dev
, msg
, i
))
302 dev_err(&GET_DEV(accel_dev
),
303 "Failed to send restarting msg to VF%d\n", i
);
307 static int adf_vf2pf_request_version(struct adf_accel_dev
*accel_dev
)
309 unsigned long timeout
= msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT
);
310 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
314 msg
= ADF_VF2PF_MSGORIGIN_SYSTEM
;
315 msg
|= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ
<< ADF_VF2PF_MSGTYPE_SHIFT
;
316 msg
|= ADF_PFVF_COMPATIBILITY_VERSION
<< ADF_VF2PF_COMPAT_VER_REQ_SHIFT
;
317 BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION
> 255);
319 /* Send request from VF to PF */
320 ret
= adf_iov_putmsg(accel_dev
, msg
, 0);
322 dev_err(&GET_DEV(accel_dev
),
323 "Failed to send Compatibility Version Request.\n");
327 /* Wait for response */
328 if (!wait_for_completion_timeout(&accel_dev
->vf
.iov_msg_completion
,
330 dev_err(&GET_DEV(accel_dev
),
331 "IOV request/response message timeout expired\n");
335 /* Response from PF received, check compatibility */
336 switch (accel_dev
->vf
.compatible
) {
337 case ADF_PF2VF_VF_COMPATIBLE
:
339 case ADF_PF2VF_VF_COMPAT_UNKNOWN
:
340 /* VF is newer than PF and decides whether it is compatible */
341 if (accel_dev
->vf
.pf_version
>= hw_data
->min_iov_compat_ver
)
344 case ADF_PF2VF_VF_INCOMPATIBLE
:
345 dev_err(&GET_DEV(accel_dev
),
346 "PF (vers %d) and VF (vers %d) are not compatible\n",
347 accel_dev
->vf
.pf_version
,
348 ADF_PFVF_COMPATIBILITY_VERSION
);
351 dev_err(&GET_DEV(accel_dev
),
352 "Invalid response from PF; assume not compatible\n");
359 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
361 * @accel_dev: Pointer to acceleration device virtual function.
363 * Return: 0 on success, error code otherwise.
365 int adf_enable_vf2pf_comms(struct adf_accel_dev
*accel_dev
)
367 adf_enable_pf2vf_interrupts(accel_dev
);
368 return adf_vf2pf_request_version(accel_dev
);
370 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms
);