1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2015 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_common_drv.h"
6 #include "adf_pf2vf_msg.h"
7
8 #define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
9 #define ADF_PFVF_MSG_ACK_DELAY 2
10 #define ADF_PFVF_MSG_ACK_MAX_RETRY 100
11 #define ADF_PFVF_MSG_RETRY_DELAY 5
12 #define ADF_PFVF_MSG_MAX_RETRIES 3
13 #define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
14 ADF_PFVF_MSG_ACK_MAX_RETRY + \
15 ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
16
adf_enable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)17 void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
18 {
19 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
20 u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
21 struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
22 void __iomem *pmisc_addr = pmisc->virt_addr;
23 unsigned long flags;
24
25 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
26 hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
27 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
28 }
29
adf_disable_vf2pf_interrupts(struct adf_accel_dev * accel_dev,u32 vf_mask)30 void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
31 {
32 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
33 u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
34 struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
35 void __iomem *pmisc_addr = pmisc->virt_addr;
36 unsigned long flags;
37
38 spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
39 hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
40 spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
41 }
42
adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev * accel_dev,u32 vf_mask)43 void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
44 u32 vf_mask)
45 {
46 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
47 u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
48 struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
49 void __iomem *pmisc_addr = pmisc->virt_addr;
50
51 spin_lock(&accel_dev->pf.vf2pf_ints_lock);
52 hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
53 spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
54 }
55
__adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)56 static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
57 {
58 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
59 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
60 void __iomem *pmisc_bar_addr =
61 pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
62 u32 val, pf2vf_offset, count = 0;
63 u32 local_in_use_mask, local_in_use_pattern;
64 u32 remote_in_use_mask, remote_in_use_pattern;
65 struct mutex *lock; /* lock preventing concurrent acces of CSR */
66 u32 int_bit;
67 int ret = 0;
68
69 if (accel_dev->is_vf) {
70 pf2vf_offset = hw_data->get_pf2vf_offset(0);
71 lock = &accel_dev->vf.vf2pf_lock;
72 local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
73 local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
74 remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
75 remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
76 int_bit = ADF_VF2PF_INT;
77 } else {
78 pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
79 lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
80 local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
81 local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
82 remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
83 remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
84 int_bit = ADF_PF2VF_INT;
85 }
86
87 mutex_lock(lock);
88
89 /* Check if the PFVF CSR is in use by remote function */
90 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
91 if ((val & remote_in_use_mask) == remote_in_use_pattern) {
92 dev_dbg(&GET_DEV(accel_dev),
93 "PFVF CSR in use by remote function\n");
94 ret = -EBUSY;
95 goto out;
96 }
97
98 msg &= ~local_in_use_mask;
99 msg |= local_in_use_pattern;
100
101 /* Attempt to get ownership of the PFVF CSR */
102 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
103
104 /* Wait for confirmation from remote func it received the message */
105 do {
106 msleep(ADF_PFVF_MSG_ACK_DELAY);
107 val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
108 } while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY));
109
110 if (val != msg) {
111 dev_dbg(&GET_DEV(accel_dev),
112 "Collision - PFVF CSR overwritten by remote function\n");
113 ret = -EIO;
114 goto out;
115 }
116
117 if (val & int_bit) {
118 dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
119 val &= ~int_bit;
120 ret = -EIO;
121 }
122
123 /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
124 ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
125 out:
126 mutex_unlock(lock);
127 return ret;
128 }
129
130 /**
131 * adf_iov_putmsg() - send PFVF message
132 * @accel_dev: Pointer to acceleration device.
133 * @msg: Message to send
134 * @vf_nr: VF number to which the message will be sent if on PF, ignored
135 * otherwise
136 *
137 * Function sends a message through the PFVF channel
138 *
139 * Return: 0 on success, error code otherwise.
140 */
adf_iov_putmsg(struct adf_accel_dev * accel_dev,u32 msg,u8 vf_nr)141 static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
142 {
143 u32 count = 0;
144 int ret;
145
146 do {
147 ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
148 if (ret)
149 msleep(ADF_PFVF_MSG_RETRY_DELAY);
150 } while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES));
151
152 return ret;
153 }
154
155 /**
156 * adf_send_pf2vf_msg() - send PF to VF message
157 * @accel_dev: Pointer to acceleration device
158 * @vf_nr: VF number to which the message will be sent
159 * @msg: Message to send
160 *
161 * This function allows the PF to send a message to a specific VF.
162 *
163 * Return: 0 on success, error code otherwise.
164 */
adf_send_pf2vf_msg(struct adf_accel_dev * accel_dev,u8 vf_nr,u32 msg)165 static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg)
166 {
167 return adf_iov_putmsg(accel_dev, msg, vf_nr);
168 }
169
170 /**
171 * adf_send_vf2pf_msg() - send VF to PF message
172 * @accel_dev: Pointer to acceleration device
173 * @msg: Message to send
174 *
175 * This function allows the VF to send a message to the PF.
176 *
177 * Return: 0 on success, error code otherwise.
178 */
adf_send_vf2pf_msg(struct adf_accel_dev * accel_dev,u32 msg)179 int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg)
180 {
181 return adf_iov_putmsg(accel_dev, msg, 0);
182 }
183
184 /**
185 * adf_send_vf2pf_req() - send VF2PF request message
186 * @accel_dev: Pointer to acceleration device.
187 * @msg: Request message to send
188 *
189 * This function sends a message that requires a response from the VF to the PF
190 * and waits for a reply.
191 *
192 * Return: 0 on success, error code otherwise.
193 */
adf_send_vf2pf_req(struct adf_accel_dev * accel_dev,u32 msg)194 static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg)
195 {
196 unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
197 int ret;
198
199 reinit_completion(&accel_dev->vf.iov_msg_completion);
200
201 /* Send request from VF to PF */
202 ret = adf_send_vf2pf_msg(accel_dev, msg);
203 if (ret) {
204 dev_err(&GET_DEV(accel_dev),
205 "Failed to send request msg to PF\n");
206 return ret;
207 }
208
209 /* Wait for response */
210 if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
211 timeout)) {
212 dev_err(&GET_DEV(accel_dev),
213 "PFVF request/response message timeout expired\n");
214 return -EIO;
215 }
216
217 return 0;
218 }
219
adf_vf2pf_req_hndl(struct adf_accel_vf_info * vf_info)220 void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
221 {
222 struct adf_accel_dev *accel_dev = vf_info->accel_dev;
223 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
224 int bar_id = hw_data->get_misc_bar_id(hw_data);
225 struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
226 void __iomem *pmisc_addr = pmisc->virt_addr;
227 u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
228
229 /* Read message from the VF */
230 msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
231 if (!(msg & ADF_VF2PF_INT)) {
232 dev_info(&GET_DEV(accel_dev),
233 "Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
234 goto out;
235 }
236
237 /* To ACK, clear the VF2PFINT bit */
238 msg &= ~ADF_VF2PF_INT;
239 ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
240
241 if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
242 /* Ignore legacy non-system (non-kernel) VF2PF messages */
243 goto err;
244
245 switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
246 case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
247 {
248 u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
249
250 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
251 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
252 ADF_PF2VF_MSGTYPE_SHIFT) |
253 (ADF_PFVF_COMPAT_THIS_VERSION <<
254 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
255
256 dev_dbg(&GET_DEV(accel_dev),
257 "Compatibility Version Request from VF%d vers=%u\n",
258 vf_nr + 1, vf_compat_ver);
259
260 if (vf_compat_ver < hw_data->min_iov_compat_ver) {
261 dev_err(&GET_DEV(accel_dev),
262 "VF (vers %d) incompatible with PF (vers %d)\n",
263 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
264 resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
265 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
266 } else if (vf_compat_ver > ADF_PFVF_COMPAT_THIS_VERSION) {
267 dev_err(&GET_DEV(accel_dev),
268 "VF (vers %d) compat with PF (vers %d) unkn.\n",
269 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
270 resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
271 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
272 } else {
273 dev_dbg(&GET_DEV(accel_dev),
274 "VF (vers %d) compatible with PF (vers %d)\n",
275 vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION);
276 resp |= ADF_PF2VF_VF_COMPATIBLE <<
277 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
278 }
279 }
280 break;
281 case ADF_VF2PF_MSGTYPE_VERSION_REQ:
282 dev_dbg(&GET_DEV(accel_dev),
283 "Legacy VersionRequest received from VF%d 0x%x\n",
284 vf_nr + 1, msg);
285 resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
286 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
287 ADF_PF2VF_MSGTYPE_SHIFT) |
288 (ADF_PFVF_COMPAT_THIS_VERSION <<
289 ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
290 resp |= ADF_PF2VF_VF_COMPATIBLE <<
291 ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
292 /* Set legacy major and minor version num */
293 resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
294 1 << ADF_PF2VF_MINORVERSION_SHIFT;
295 break;
296 case ADF_VF2PF_MSGTYPE_INIT:
297 {
298 dev_dbg(&GET_DEV(accel_dev),
299 "Init message received from VF%d 0x%x\n",
300 vf_nr + 1, msg);
301 vf_info->init = true;
302 }
303 break;
304 case ADF_VF2PF_MSGTYPE_SHUTDOWN:
305 {
306 dev_dbg(&GET_DEV(accel_dev),
307 "Shutdown message received from VF%d 0x%x\n",
308 vf_nr + 1, msg);
309 vf_info->init = false;
310 }
311 break;
312 default:
313 goto err;
314 }
315
316 if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
317 dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
318
319 out:
320 /* re-enable interrupt on PF from this VF */
321 adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
322
323 return;
324 err:
325 dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
326 vf_nr + 1, msg);
327 }
328
adf_pf2vf_notify_restarting(struct adf_accel_dev * accel_dev)329 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
330 {
331 struct adf_accel_vf_info *vf;
332 u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
333 (ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
334 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
335
336 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
337 if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
338 dev_err(&GET_DEV(accel_dev),
339 "Failed to send restarting msg to VF%d\n", i);
340 }
341 }
342
adf_vf2pf_request_version(struct adf_accel_dev * accel_dev)343 static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
344 {
345 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
346 u32 msg = 0;
347 int ret;
348
349 msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
350 msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
351 msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
352 BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
353
354 ret = adf_send_vf2pf_req(accel_dev, msg);
355 if (ret) {
356 dev_err(&GET_DEV(accel_dev),
357 "Failed to send Compatibility Version Request.\n");
358 return ret;
359 }
360
361 /* Response from PF received, check compatibility */
362 switch (accel_dev->vf.compatible) {
363 case ADF_PF2VF_VF_COMPATIBLE:
364 break;
365 case ADF_PF2VF_VF_COMPAT_UNKNOWN:
366 /* VF is newer than PF and decides whether it is compatible */
367 if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver) {
368 accel_dev->vf.compatible = ADF_PF2VF_VF_COMPATIBLE;
369 break;
370 }
371 fallthrough;
372 case ADF_PF2VF_VF_INCOMPATIBLE:
373 dev_err(&GET_DEV(accel_dev),
374 "PF (vers %d) and VF (vers %d) are not compatible\n",
375 accel_dev->vf.pf_version,
376 ADF_PFVF_COMPAT_THIS_VERSION);
377 return -EINVAL;
378 default:
379 dev_err(&GET_DEV(accel_dev),
380 "Invalid response from PF; assume not compatible\n");
381 return -EINVAL;
382 }
383 return ret;
384 }
385
386 /**
387 * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
388 *
389 * @accel_dev: Pointer to acceleration device virtual function.
390 *
391 * Return: 0 on success, error code otherwise.
392 */
adf_enable_vf2pf_comms(struct adf_accel_dev * accel_dev)393 int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
394 {
395 adf_enable_pf2vf_interrupts(accel_dev);
396 return adf_vf2pf_request_version(accel_dev);
397 }
398 EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
399
400 /**
401 * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
402 *
403 * @accel_dev: Pointer to acceleration device virtual function.
404 *
405 * This function carries out the necessary steps to setup and start the PFVF
406 * communication channel, if any.
407 *
408 * Return: 0 on success, error code otherwise.
409 */
adf_enable_pf2vf_comms(struct adf_accel_dev * accel_dev)410 int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
411 {
412 spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
413
414 return 0;
415 }
416 EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
417