1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/eswitch.h>
36 #include <linux/mlx5/vport.h>
37 #include "lib/devcom.h"
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "lag.h"
41 #include "mp.h"
42 
43 /* General purpose, use for short periods of time.
44  * Beware of lock dependencies (preferably, no locks should be acquired
45  * under it).
46  */
47 static DEFINE_SPINLOCK(lag_lock);
48 
mlx5_cmd_create_lag(struct mlx5_core_dev * dev,u8 remap_port1,u8 remap_port2,bool shared_fdb,u8 flags)49 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
50 			       u8 remap_port2, bool shared_fdb, u8 flags)
51 {
52 	u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
53 	void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
54 
55 	MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
56 
57 	MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
58 	if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
59 		MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
60 		MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
61 	} else {
62 		MLX5_SET(lagc, lag_ctx, port_select_mode,
63 			 MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
64 	}
65 
66 	return mlx5_cmd_exec_in(dev, create_lag, in);
67 }
68 
mlx5_cmd_modify_lag(struct mlx5_core_dev * dev,u8 remap_port1,u8 remap_port2)69 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
70 			       u8 remap_port2)
71 {
72 	u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
73 	void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
74 
75 	MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
76 	MLX5_SET(modify_lag_in, in, field_select, 0x1);
77 
78 	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
79 	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
80 
81 	return mlx5_cmd_exec_in(dev, modify_lag, in);
82 }
83 
mlx5_cmd_create_vport_lag(struct mlx5_core_dev * dev)84 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
85 {
86 	u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
87 
88 	MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
89 
90 	return mlx5_cmd_exec_in(dev, create_vport_lag, in);
91 }
92 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
93 
mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev * dev)94 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
95 {
96 	u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
97 
98 	MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
99 
100 	return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
101 }
102 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
103 
104 static int mlx5_lag_netdev_event(struct notifier_block *this,
105 				 unsigned long event, void *ptr);
106 static void mlx5_do_bond_work(struct work_struct *work);
107 
mlx5_ldev_free(struct kref * ref)108 static void mlx5_ldev_free(struct kref *ref)
109 {
110 	struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
111 
112 	if (ldev->nb.notifier_call)
113 		unregister_netdevice_notifier_net(&init_net, &ldev->nb);
114 	mlx5_lag_mp_cleanup(ldev);
115 	cancel_delayed_work_sync(&ldev->bond_work);
116 	destroy_workqueue(ldev->wq);
117 	kfree(ldev);
118 }
119 
mlx5_ldev_put(struct mlx5_lag * ldev)120 static void mlx5_ldev_put(struct mlx5_lag *ldev)
121 {
122 	kref_put(&ldev->ref, mlx5_ldev_free);
123 }
124 
mlx5_ldev_get(struct mlx5_lag * ldev)125 static void mlx5_ldev_get(struct mlx5_lag *ldev)
126 {
127 	kref_get(&ldev->ref);
128 }
129 
mlx5_lag_dev_alloc(struct mlx5_core_dev * dev)130 static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
131 {
132 	struct mlx5_lag *ldev;
133 	int err;
134 
135 	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
136 	if (!ldev)
137 		return NULL;
138 
139 	ldev->wq = create_singlethread_workqueue("mlx5_lag");
140 	if (!ldev->wq) {
141 		kfree(ldev);
142 		return NULL;
143 	}
144 
145 	kref_init(&ldev->ref);
146 	INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
147 
148 	ldev->nb.notifier_call = mlx5_lag_netdev_event;
149 	if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
150 		ldev->nb.notifier_call = NULL;
151 		mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
152 	}
153 
154 	err = mlx5_lag_mp_init(ldev);
155 	if (err)
156 		mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
157 			      err);
158 
159 	return ldev;
160 }
161 
mlx5_lag_dev_get_netdev_idx(struct mlx5_lag * ldev,struct net_device * ndev)162 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
163 				struct net_device *ndev)
164 {
165 	int i;
166 
167 	for (i = 0; i < MLX5_MAX_PORTS; i++)
168 		if (ldev->pf[i].netdev == ndev)
169 			return i;
170 
171 	return -ENOENT;
172 }
173 
__mlx5_lag_is_roce(struct mlx5_lag * ldev)174 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
175 {
176 	return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
177 }
178 
__mlx5_lag_is_sriov(struct mlx5_lag * ldev)179 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
180 {
181 	return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
182 }
183 
mlx5_infer_tx_affinity_mapping(struct lag_tracker * tracker,u8 * port1,u8 * port2)184 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
185 					   u8 *port1, u8 *port2)
186 {
187 	bool p1en;
188 	bool p2en;
189 
190 	p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
191 	       tracker->netdev_state[MLX5_LAG_P1].link_up;
192 
193 	p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
194 	       tracker->netdev_state[MLX5_LAG_P2].link_up;
195 
196 	*port1 = 1;
197 	*port2 = 2;
198 	if ((!p1en && !p2en) || (p1en && p2en))
199 		return;
200 
201 	if (p1en)
202 		*port2 = 1;
203 	else
204 		*port1 = 2;
205 }
206 
_mlx5_modify_lag(struct mlx5_lag * ldev,u8 v2p_port1,u8 v2p_port2)207 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
208 {
209 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
210 
211 	if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
212 		return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
213 	return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
214 }
215 
mlx5_modify_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker)216 void mlx5_modify_lag(struct mlx5_lag *ldev,
217 		     struct lag_tracker *tracker)
218 {
219 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
220 	u8 v2p_port1, v2p_port2;
221 	int err;
222 
223 	mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
224 				       &v2p_port2);
225 
226 	if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
227 	    v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
228 		err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
229 		if (err) {
230 			mlx5_core_err(dev0,
231 				      "Failed to modify LAG (%d)\n",
232 				      err);
233 			return;
234 		}
235 		ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
236 		ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
237 		mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
238 			       ldev->v2p_map[MLX5_LAG_P1],
239 			       ldev->v2p_map[MLX5_LAG_P2]);
240 	}
241 }
242 
mlx5_lag_set_port_sel_mode(struct mlx5_lag * ldev,struct lag_tracker * tracker,u8 * flags)243 static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
244 				       struct lag_tracker *tracker, u8 *flags)
245 {
246 	bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
247 	struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
248 
249 	if (roce_lag ||
250 	    !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
251 	    tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
252 		return;
253 	*flags |= MLX5_LAG_FLAG_HASH_BASED;
254 }
255 
get_str_port_sel_mode(u8 flags)256 static char *get_str_port_sel_mode(u8 flags)
257 {
258 	if (flags &  MLX5_LAG_FLAG_HASH_BASED)
259 		return "hash";
260 	return "queue_affinity";
261 }
262 
mlx5_create_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker,bool shared_fdb,u8 flags)263 static int mlx5_create_lag(struct mlx5_lag *ldev,
264 			   struct lag_tracker *tracker,
265 			   bool shared_fdb, u8 flags)
266 {
267 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
268 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
269 	u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
270 	int err;
271 
272 	mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
273 		       ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
274 		       shared_fdb, get_str_port_sel_mode(flags));
275 
276 	err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
277 				  ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
278 	if (err) {
279 		mlx5_core_err(dev0,
280 			      "Failed to create LAG (%d)\n",
281 			      err);
282 		return err;
283 	}
284 
285 	if (shared_fdb) {
286 		err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
287 							      dev1->priv.eswitch);
288 		if (err)
289 			mlx5_core_err(dev0, "Can't enable single FDB mode\n");
290 		else
291 			mlx5_core_info(dev0, "Operation mode is single FDB\n");
292 	}
293 
294 	if (err) {
295 		MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
296 		if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
297 			mlx5_core_err(dev0,
298 				      "Failed to deactivate RoCE LAG; driver restart required\n");
299 	}
300 
301 	return err;
302 }
303 
mlx5_activate_lag(struct mlx5_lag * ldev,struct lag_tracker * tracker,u8 flags,bool shared_fdb)304 int mlx5_activate_lag(struct mlx5_lag *ldev,
305 		      struct lag_tracker *tracker,
306 		      u8 flags,
307 		      bool shared_fdb)
308 {
309 	bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
310 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
311 	int err;
312 
313 	mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
314 				       &ldev->v2p_map[MLX5_LAG_P2]);
315 	mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
316 	if (flags & MLX5_LAG_FLAG_HASH_BASED) {
317 		err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
318 					       ldev->v2p_map[MLX5_LAG_P1],
319 					       ldev->v2p_map[MLX5_LAG_P2]);
320 		if (err) {
321 			mlx5_core_err(dev0,
322 				      "Failed to create LAG port selection(%d)\n",
323 				      err);
324 			return err;
325 		}
326 	}
327 
328 	err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
329 	if (err) {
330 		if (flags & MLX5_LAG_FLAG_HASH_BASED)
331 			mlx5_lag_port_sel_destroy(ldev);
332 		if (roce_lag)
333 			mlx5_core_err(dev0,
334 				      "Failed to activate RoCE LAG\n");
335 		else
336 			mlx5_core_err(dev0,
337 				      "Failed to activate VF LAG\n"
338 				      "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
339 		return err;
340 	}
341 
342 	ldev->flags |= flags;
343 	ldev->shared_fdb = shared_fdb;
344 	return 0;
345 }
346 
mlx5_deactivate_lag(struct mlx5_lag * ldev)347 static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
348 {
349 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
350 	u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
351 	bool roce_lag = __mlx5_lag_is_roce(ldev);
352 	u8 flags = ldev->flags;
353 	int err;
354 
355 	ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
356 	mlx5_lag_mp_reset(ldev);
357 
358 	if (ldev->shared_fdb) {
359 		mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch,
360 							 ldev->pf[MLX5_LAG_P2].dev->priv.eswitch);
361 		ldev->shared_fdb = false;
362 	}
363 
364 	MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
365 	err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
366 	if (err) {
367 		if (roce_lag) {
368 			mlx5_core_err(dev0,
369 				      "Failed to deactivate RoCE LAG; driver restart required\n");
370 		} else {
371 			mlx5_core_err(dev0,
372 				      "Failed to deactivate VF LAG; driver restart required\n"
373 				      "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
374 		}
375 	} else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
376 		mlx5_lag_port_sel_destroy(ldev);
377 	}
378 
379 	return err;
380 }
381 
mlx5_lag_check_prereq(struct mlx5_lag * ldev)382 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
383 {
384 	if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
385 		return false;
386 
387 #ifdef CONFIG_MLX5_ESWITCH
388 	return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
389 				   ldev->pf[MLX5_LAG_P2].dev);
390 #else
391 	return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
392 		!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
393 #endif
394 }
395 
mlx5_lag_add_devices(struct mlx5_lag * ldev)396 static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
397 {
398 	int i;
399 
400 	for (i = 0; i < MLX5_MAX_PORTS; i++) {
401 		if (!ldev->pf[i].dev)
402 			continue;
403 
404 		if (ldev->pf[i].dev->priv.flags &
405 		    MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
406 			continue;
407 
408 		ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
409 		mlx5_rescan_drivers_locked(ldev->pf[i].dev);
410 	}
411 }
412 
mlx5_lag_remove_devices(struct mlx5_lag * ldev)413 static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
414 {
415 	int i;
416 
417 	for (i = 0; i < MLX5_MAX_PORTS; i++) {
418 		if (!ldev->pf[i].dev)
419 			continue;
420 
421 		if (ldev->pf[i].dev->priv.flags &
422 		    MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
423 			continue;
424 
425 		ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
426 		mlx5_rescan_drivers_locked(ldev->pf[i].dev);
427 	}
428 }
429 
mlx5_disable_lag(struct mlx5_lag * ldev)430 static void mlx5_disable_lag(struct mlx5_lag *ldev)
431 {
432 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
433 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
434 	bool shared_fdb = ldev->shared_fdb;
435 	bool roce_lag;
436 	int err;
437 
438 	roce_lag = __mlx5_lag_is_roce(ldev);
439 
440 	if (shared_fdb) {
441 		mlx5_lag_remove_devices(ldev);
442 	} else if (roce_lag) {
443 		if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
444 			dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
445 			mlx5_rescan_drivers_locked(dev0);
446 		}
447 		mlx5_nic_vport_disable_roce(dev1);
448 	}
449 
450 	err = mlx5_deactivate_lag(ldev);
451 	if (err)
452 		return;
453 
454 	if (shared_fdb || roce_lag)
455 		mlx5_lag_add_devices(ldev);
456 
457 	if (shared_fdb) {
458 		if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
459 			mlx5_eswitch_reload_reps(dev0->priv.eswitch);
460 		if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
461 			mlx5_eswitch_reload_reps(dev1->priv.eswitch);
462 	}
463 }
464 
mlx5_shared_fdb_supported(struct mlx5_lag * ldev)465 static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
466 {
467 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
468 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
469 
470 	if (is_mdev_switchdev_mode(dev0) &&
471 	    is_mdev_switchdev_mode(dev1) &&
472 	    mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
473 	    mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
474 	    mlx5_devcom_is_paired(dev0->priv.devcom,
475 				  MLX5_DEVCOM_ESW_OFFLOADS) &&
476 	    MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
477 	    MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
478 	    MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
479 		return true;
480 
481 	return false;
482 }
483 
mlx5_do_bond(struct mlx5_lag * ldev)484 static void mlx5_do_bond(struct mlx5_lag *ldev)
485 {
486 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
487 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
488 	struct lag_tracker tracker;
489 	bool do_bond, roce_lag;
490 	int err;
491 
492 	if (!mlx5_lag_is_ready(ldev)) {
493 		do_bond = false;
494 	} else {
495 		/* VF LAG is in multipath mode, ignore bond change requests */
496 		if (mlx5_lag_is_multipath(dev0))
497 			return;
498 
499 		tracker = ldev->tracker;
500 
501 		do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
502 	}
503 
504 	if (do_bond && !__mlx5_lag_is_active(ldev)) {
505 		bool shared_fdb = mlx5_shared_fdb_supported(ldev);
506 
507 		roce_lag = !mlx5_sriov_is_enabled(dev0) &&
508 			   !mlx5_sriov_is_enabled(dev1);
509 
510 #ifdef CONFIG_MLX5_ESWITCH
511 		roce_lag = roce_lag &&
512 			   dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
513 			   dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
514 #endif
515 
516 		if (shared_fdb || roce_lag)
517 			mlx5_lag_remove_devices(ldev);
518 
519 		err = mlx5_activate_lag(ldev, &tracker,
520 					roce_lag ? MLX5_LAG_FLAG_ROCE :
521 						   MLX5_LAG_FLAG_SRIOV,
522 					shared_fdb);
523 		if (err) {
524 			if (shared_fdb || roce_lag)
525 				mlx5_lag_add_devices(ldev);
526 
527 			return;
528 		} else if (roce_lag) {
529 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
530 			mlx5_rescan_drivers_locked(dev0);
531 			mlx5_nic_vport_enable_roce(dev1);
532 		} else if (shared_fdb) {
533 			dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
534 			mlx5_rescan_drivers_locked(dev0);
535 
536 			err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
537 			if (!err)
538 				err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
539 
540 			if (err) {
541 				dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
542 				mlx5_rescan_drivers_locked(dev0);
543 				mlx5_deactivate_lag(ldev);
544 				mlx5_lag_add_devices(ldev);
545 				mlx5_eswitch_reload_reps(dev0->priv.eswitch);
546 				mlx5_eswitch_reload_reps(dev1->priv.eswitch);
547 				mlx5_core_err(dev0, "Failed to enable lag\n");
548 				return;
549 			}
550 		}
551 	} else if (do_bond && __mlx5_lag_is_active(ldev)) {
552 		mlx5_modify_lag(ldev, &tracker);
553 	} else if (!do_bond && __mlx5_lag_is_active(ldev)) {
554 		mlx5_disable_lag(ldev);
555 	}
556 }
557 
mlx5_queue_bond_work(struct mlx5_lag * ldev,unsigned long delay)558 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
559 {
560 	queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
561 }
562 
mlx5_lag_lock_eswitches(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)563 static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
564 				    struct mlx5_core_dev *dev1)
565 {
566 	if (dev0)
567 		mlx5_esw_lock(dev0->priv.eswitch);
568 	if (dev1)
569 		mlx5_esw_lock(dev1->priv.eswitch);
570 }
571 
mlx5_lag_unlock_eswitches(struct mlx5_core_dev * dev0,struct mlx5_core_dev * dev1)572 static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
573 				      struct mlx5_core_dev *dev1)
574 {
575 	if (dev1)
576 		mlx5_esw_unlock(dev1->priv.eswitch);
577 	if (dev0)
578 		mlx5_esw_unlock(dev0->priv.eswitch);
579 }
580 
mlx5_do_bond_work(struct work_struct * work)581 static void mlx5_do_bond_work(struct work_struct *work)
582 {
583 	struct delayed_work *delayed_work = to_delayed_work(work);
584 	struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
585 					     bond_work);
586 	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
587 	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
588 	int status;
589 
590 	status = mlx5_dev_list_trylock();
591 	if (!status) {
592 		mlx5_queue_bond_work(ldev, HZ);
593 		return;
594 	}
595 
596 	if (ldev->mode_changes_in_progress) {
597 		mlx5_dev_list_unlock();
598 		mlx5_queue_bond_work(ldev, HZ);
599 		return;
600 	}
601 
602 	mlx5_lag_lock_eswitches(dev0, dev1);
603 	mlx5_do_bond(ldev);
604 	mlx5_lag_unlock_eswitches(dev0, dev1);
605 	mlx5_dev_list_unlock();
606 }
607 
mlx5_handle_changeupper_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct net_device * ndev,struct netdev_notifier_changeupper_info * info)608 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
609 					 struct lag_tracker *tracker,
610 					 struct net_device *ndev,
611 					 struct netdev_notifier_changeupper_info *info)
612 {
613 	struct net_device *upper = info->upper_dev, *ndev_tmp;
614 	struct netdev_lag_upper_info *lag_upper_info = NULL;
615 	bool is_bonded, is_in_lag, mode_supported;
616 	int bond_status = 0;
617 	int num_slaves = 0;
618 	int changed = 0;
619 	int idx;
620 
621 	if (!netif_is_lag_master(upper))
622 		return 0;
623 
624 	if (info->linking)
625 		lag_upper_info = info->upper_info;
626 
627 	/* The event may still be of interest if the slave does not belong to
628 	 * us, but is enslaved to a master which has one or more of our netdevs
629 	 * as slaves (e.g., if a new slave is added to a master that bonds two
630 	 * of our netdevs, we should unbond).
631 	 */
632 	rcu_read_lock();
633 	for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
634 		idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
635 		if (idx >= 0)
636 			bond_status |= (1 << idx);
637 
638 		num_slaves++;
639 	}
640 	rcu_read_unlock();
641 
642 	/* None of this lagdev's netdevs are slaves of this master. */
643 	if (!(bond_status & 0x3))
644 		return 0;
645 
646 	if (lag_upper_info) {
647 		tracker->tx_type = lag_upper_info->tx_type;
648 		tracker->hash_type = lag_upper_info->hash_type;
649 	}
650 
651 	/* Determine bonding status:
652 	 * A device is considered bonded if both its physical ports are slaves
653 	 * of the same lag master, and only them.
654 	 */
655 	is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
656 
657 	/* Lag mode must be activebackup or hash. */
658 	mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
659 			 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
660 
661 	is_bonded = is_in_lag && mode_supported;
662 	if (tracker->is_bonded != is_bonded) {
663 		tracker->is_bonded = is_bonded;
664 		changed = 1;
665 	}
666 
667 	if (!is_in_lag)
668 		return changed;
669 
670 	if (!mlx5_lag_is_ready(ldev))
671 		NL_SET_ERR_MSG_MOD(info->info.extack,
672 				   "Can't activate LAG offload, PF is configured with more than 64 VFs");
673 	else if (!mode_supported)
674 		NL_SET_ERR_MSG_MOD(info->info.extack,
675 				   "Can't activate LAG offload, TX type isn't supported");
676 
677 	return changed;
678 }
679 
mlx5_handle_changelowerstate_event(struct mlx5_lag * ldev,struct lag_tracker * tracker,struct net_device * ndev,struct netdev_notifier_changelowerstate_info * info)680 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
681 					      struct lag_tracker *tracker,
682 					      struct net_device *ndev,
683 					      struct netdev_notifier_changelowerstate_info *info)
684 {
685 	struct netdev_lag_lower_state_info *lag_lower_info;
686 	int idx;
687 
688 	if (!netif_is_lag_port(ndev))
689 		return 0;
690 
691 	idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
692 	if (idx < 0)
693 		return 0;
694 
695 	/* This information is used to determine virtual to physical
696 	 * port mapping.
697 	 */
698 	lag_lower_info = info->lower_state_info;
699 	if (!lag_lower_info)
700 		return 0;
701 
702 	tracker->netdev_state[idx] = *lag_lower_info;
703 
704 	return 1;
705 }
706 
mlx5_lag_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)707 static int mlx5_lag_netdev_event(struct notifier_block *this,
708 				 unsigned long event, void *ptr)
709 {
710 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
711 	struct lag_tracker tracker;
712 	struct mlx5_lag *ldev;
713 	int changed = 0;
714 
715 	if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
716 		return NOTIFY_DONE;
717 
718 	ldev    = container_of(this, struct mlx5_lag, nb);
719 
720 	tracker = ldev->tracker;
721 
722 	switch (event) {
723 	case NETDEV_CHANGEUPPER:
724 		changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
725 							ptr);
726 		break;
727 	case NETDEV_CHANGELOWERSTATE:
728 		changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
729 							     ndev, ptr);
730 		break;
731 	}
732 
733 	ldev->tracker = tracker;
734 
735 	if (changed)
736 		mlx5_queue_bond_work(ldev, 0);
737 
738 	return NOTIFY_DONE;
739 }
740 
mlx5_ldev_add_netdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev,struct net_device * netdev)741 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
742 				 struct mlx5_core_dev *dev,
743 				 struct net_device *netdev)
744 {
745 	unsigned int fn = mlx5_get_dev_index(dev);
746 
747 	if (fn >= MLX5_MAX_PORTS)
748 		return;
749 
750 	spin_lock(&lag_lock);
751 	ldev->pf[fn].netdev = netdev;
752 	ldev->tracker.netdev_state[fn].link_up = 0;
753 	ldev->tracker.netdev_state[fn].tx_enabled = 0;
754 	spin_unlock(&lag_lock);
755 }
756 
mlx5_ldev_remove_netdev(struct mlx5_lag * ldev,struct net_device * netdev)757 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
758 				    struct net_device *netdev)
759 {
760 	int i;
761 
762 	spin_lock(&lag_lock);
763 	for (i = 0; i < MLX5_MAX_PORTS; i++) {
764 		if (ldev->pf[i].netdev == netdev) {
765 			ldev->pf[i].netdev = NULL;
766 			break;
767 		}
768 	}
769 	spin_unlock(&lag_lock);
770 }
771 
mlx5_ldev_add_mdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev)772 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
773 			       struct mlx5_core_dev *dev)
774 {
775 	unsigned int fn = mlx5_get_dev_index(dev);
776 
777 	if (fn >= MLX5_MAX_PORTS)
778 		return;
779 
780 	ldev->pf[fn].dev = dev;
781 	dev->priv.lag = ldev;
782 }
783 
784 /* Must be called with intf_mutex held */
mlx5_ldev_remove_mdev(struct mlx5_lag * ldev,struct mlx5_core_dev * dev)785 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
786 				  struct mlx5_core_dev *dev)
787 {
788 	int i;
789 
790 	for (i = 0; i < MLX5_MAX_PORTS; i++)
791 		if (ldev->pf[i].dev == dev)
792 			break;
793 
794 	if (i == MLX5_MAX_PORTS)
795 		return;
796 
797 	ldev->pf[i].dev = NULL;
798 	dev->priv.lag = NULL;
799 }
800 
801 /* Must be called with intf_mutex held */
__mlx5_lag_dev_add_mdev(struct mlx5_core_dev * dev)802 static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
803 {
804 	struct mlx5_lag *ldev = NULL;
805 	struct mlx5_core_dev *tmp_dev;
806 
807 	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
808 	    !MLX5_CAP_GEN(dev, lag_master) ||
809 	    MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
810 		return 0;
811 
812 	tmp_dev = mlx5_get_next_phys_dev(dev);
813 	if (tmp_dev)
814 		ldev = tmp_dev->priv.lag;
815 
816 	if (!ldev) {
817 		ldev = mlx5_lag_dev_alloc(dev);
818 		if (!ldev) {
819 			mlx5_core_err(dev, "Failed to alloc lag dev\n");
820 			return 0;
821 		}
822 	} else {
823 		if (ldev->mode_changes_in_progress)
824 			return -EAGAIN;
825 		mlx5_ldev_get(ldev);
826 	}
827 
828 	mlx5_ldev_add_mdev(ldev, dev);
829 
830 	return 0;
831 }
832 
mlx5_lag_remove_mdev(struct mlx5_core_dev * dev)833 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
834 {
835 	struct mlx5_lag *ldev;
836 
837 	ldev = mlx5_lag_dev(dev);
838 	if (!ldev)
839 		return;
840 
841 recheck:
842 	mlx5_dev_list_lock();
843 	if (ldev->mode_changes_in_progress) {
844 		mlx5_dev_list_unlock();
845 		msleep(100);
846 		goto recheck;
847 	}
848 	mlx5_ldev_remove_mdev(ldev, dev);
849 	mlx5_dev_list_unlock();
850 	mlx5_ldev_put(ldev);
851 }
852 
mlx5_lag_add_mdev(struct mlx5_core_dev * dev)853 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
854 {
855 	int err;
856 
857 recheck:
858 	mlx5_dev_list_lock();
859 	err = __mlx5_lag_dev_add_mdev(dev);
860 	if (err) {
861 		mlx5_dev_list_unlock();
862 		msleep(100);
863 		goto recheck;
864 	}
865 	mlx5_dev_list_unlock();
866 }
867 
868 /* Must be called with intf_mutex held */
mlx5_lag_remove_netdev(struct mlx5_core_dev * dev,struct net_device * netdev)869 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
870 			    struct net_device *netdev)
871 {
872 	struct mlx5_lag *ldev;
873 
874 	ldev = mlx5_lag_dev(dev);
875 	if (!ldev)
876 		return;
877 
878 	mlx5_ldev_remove_netdev(ldev, netdev);
879 	ldev->flags &= ~MLX5_LAG_FLAG_READY;
880 
881 	if (__mlx5_lag_is_active(ldev))
882 		mlx5_queue_bond_work(ldev, 0);
883 }
884 
885 /* Must be called with intf_mutex held */
mlx5_lag_add_netdev(struct mlx5_core_dev * dev,struct net_device * netdev)886 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
887 			 struct net_device *netdev)
888 {
889 	struct mlx5_lag *ldev;
890 	int i;
891 
892 	ldev = mlx5_lag_dev(dev);
893 	if (!ldev)
894 		return;
895 
896 	mlx5_ldev_add_netdev(ldev, dev, netdev);
897 
898 	for (i = 0; i < MLX5_MAX_PORTS; i++)
899 		if (!ldev->pf[i].dev)
900 			break;
901 
902 	if (i >= MLX5_MAX_PORTS)
903 		ldev->flags |= MLX5_LAG_FLAG_READY;
904 	mlx5_queue_bond_work(ldev, 0);
905 }
906 
mlx5_lag_is_roce(struct mlx5_core_dev * dev)907 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
908 {
909 	struct mlx5_lag *ldev;
910 	bool res;
911 
912 	spin_lock(&lag_lock);
913 	ldev = mlx5_lag_dev(dev);
914 	res  = ldev && __mlx5_lag_is_roce(ldev);
915 	spin_unlock(&lag_lock);
916 
917 	return res;
918 }
919 EXPORT_SYMBOL(mlx5_lag_is_roce);
920 
mlx5_lag_is_active(struct mlx5_core_dev * dev)921 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
922 {
923 	struct mlx5_lag *ldev;
924 	bool res;
925 
926 	spin_lock(&lag_lock);
927 	ldev = mlx5_lag_dev(dev);
928 	res  = ldev && __mlx5_lag_is_active(ldev);
929 	spin_unlock(&lag_lock);
930 
931 	return res;
932 }
933 EXPORT_SYMBOL(mlx5_lag_is_active);
934 
mlx5_lag_is_master(struct mlx5_core_dev * dev)935 bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
936 {
937 	struct mlx5_lag *ldev;
938 	bool res;
939 
940 	spin_lock(&lag_lock);
941 	ldev = mlx5_lag_dev(dev);
942 	res = ldev && __mlx5_lag_is_active(ldev) &&
943 		dev == ldev->pf[MLX5_LAG_P1].dev;
944 	spin_unlock(&lag_lock);
945 
946 	return res;
947 }
948 EXPORT_SYMBOL(mlx5_lag_is_master);
949 
mlx5_lag_is_sriov(struct mlx5_core_dev * dev)950 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
951 {
952 	struct mlx5_lag *ldev;
953 	bool res;
954 
955 	spin_lock(&lag_lock);
956 	ldev = mlx5_lag_dev(dev);
957 	res  = ldev && __mlx5_lag_is_sriov(ldev);
958 	spin_unlock(&lag_lock);
959 
960 	return res;
961 }
962 EXPORT_SYMBOL(mlx5_lag_is_sriov);
963 
mlx5_lag_is_shared_fdb(struct mlx5_core_dev * dev)964 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
965 {
966 	struct mlx5_lag *ldev;
967 	bool res;
968 
969 	spin_lock(&lag_lock);
970 	ldev = mlx5_lag_dev(dev);
971 	res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb;
972 	spin_unlock(&lag_lock);
973 
974 	return res;
975 }
976 EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
977 
mlx5_lag_disable_change(struct mlx5_core_dev * dev)978 void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
979 {
980 	struct mlx5_core_dev *dev0;
981 	struct mlx5_core_dev *dev1;
982 	struct mlx5_lag *ldev;
983 
984 	ldev = mlx5_lag_dev(dev);
985 	if (!ldev)
986 		return;
987 
988 	mlx5_dev_list_lock();
989 
990 	dev0 = ldev->pf[MLX5_LAG_P1].dev;
991 	dev1 = ldev->pf[MLX5_LAG_P2].dev;
992 
993 	ldev->mode_changes_in_progress++;
994 	if (__mlx5_lag_is_active(ldev)) {
995 		mlx5_lag_lock_eswitches(dev0, dev1);
996 		mlx5_disable_lag(ldev);
997 		mlx5_lag_unlock_eswitches(dev0, dev1);
998 	}
999 	mlx5_dev_list_unlock();
1000 }
1001 
mlx5_lag_enable_change(struct mlx5_core_dev * dev)1002 void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
1003 {
1004 	struct mlx5_lag *ldev;
1005 
1006 	ldev = mlx5_lag_dev(dev);
1007 	if (!ldev)
1008 		return;
1009 
1010 	mlx5_dev_list_lock();
1011 	ldev->mode_changes_in_progress--;
1012 	mlx5_dev_list_unlock();
1013 	mlx5_queue_bond_work(ldev, 0);
1014 }
1015 
mlx5_lag_get_roce_netdev(struct mlx5_core_dev * dev)1016 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1017 {
1018 	struct net_device *ndev = NULL;
1019 	struct mlx5_lag *ldev;
1020 
1021 	spin_lock(&lag_lock);
1022 	ldev = mlx5_lag_dev(dev);
1023 
1024 	if (!(ldev && __mlx5_lag_is_roce(ldev)))
1025 		goto unlock;
1026 
1027 	if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1028 		ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
1029 		       ldev->pf[MLX5_LAG_P1].netdev :
1030 		       ldev->pf[MLX5_LAG_P2].netdev;
1031 	} else {
1032 		ndev = ldev->pf[MLX5_LAG_P1].netdev;
1033 	}
1034 	if (ndev)
1035 		dev_hold(ndev);
1036 
1037 unlock:
1038 	spin_unlock(&lag_lock);
1039 
1040 	return ndev;
1041 }
1042 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
1043 
mlx5_lag_get_slave_port(struct mlx5_core_dev * dev,struct net_device * slave)1044 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1045 			   struct net_device *slave)
1046 {
1047 	struct mlx5_lag *ldev;
1048 	u8 port = 0;
1049 
1050 	spin_lock(&lag_lock);
1051 	ldev = mlx5_lag_dev(dev);
1052 	if (!(ldev && __mlx5_lag_is_roce(ldev)))
1053 		goto unlock;
1054 
1055 	if (ldev->pf[MLX5_LAG_P1].netdev == slave)
1056 		port = MLX5_LAG_P1;
1057 	else
1058 		port = MLX5_LAG_P2;
1059 
1060 	port = ldev->v2p_map[port];
1061 
1062 unlock:
1063 	spin_unlock(&lag_lock);
1064 	return port;
1065 }
1066 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
1067 
mlx5_lag_get_peer_mdev(struct mlx5_core_dev * dev)1068 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1069 {
1070 	struct mlx5_core_dev *peer_dev = NULL;
1071 	struct mlx5_lag *ldev;
1072 
1073 	spin_lock(&lag_lock);
1074 	ldev = mlx5_lag_dev(dev);
1075 	if (!ldev)
1076 		goto unlock;
1077 
1078 	peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
1079 			   ldev->pf[MLX5_LAG_P2].dev :
1080 			   ldev->pf[MLX5_LAG_P1].dev;
1081 
1082 unlock:
1083 	spin_unlock(&lag_lock);
1084 	return peer_dev;
1085 }
1086 EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
1087 
mlx5_lag_query_cong_counters(struct mlx5_core_dev * dev,u64 * values,int num_counters,size_t * offsets)1088 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1089 				 u64 *values,
1090 				 int num_counters,
1091 				 size_t *offsets)
1092 {
1093 	int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
1094 	struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
1095 	struct mlx5_lag *ldev;
1096 	int num_ports;
1097 	int ret, i, j;
1098 	void *out;
1099 
1100 	out = kvzalloc(outlen, GFP_KERNEL);
1101 	if (!out)
1102 		return -ENOMEM;
1103 
1104 	memset(values, 0, sizeof(*values) * num_counters);
1105 
1106 	spin_lock(&lag_lock);
1107 	ldev = mlx5_lag_dev(dev);
1108 	if (ldev && __mlx5_lag_is_active(ldev)) {
1109 		num_ports = MLX5_MAX_PORTS;
1110 		mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
1111 		mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
1112 	} else {
1113 		num_ports = 1;
1114 		mdev[MLX5_LAG_P1] = dev;
1115 	}
1116 	spin_unlock(&lag_lock);
1117 
1118 	for (i = 0; i < num_ports; ++i) {
1119 		u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
1120 
1121 		MLX5_SET(query_cong_statistics_in, in, opcode,
1122 			 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1123 		ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
1124 					  out);
1125 		if (ret)
1126 			goto free;
1127 
1128 		for (j = 0; j < num_counters; ++j)
1129 			values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
1130 	}
1131 
1132 free:
1133 	kvfree(out);
1134 	return ret;
1135 }
1136 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
1137