1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies Ltd */
3
4 #include <linux/etherdevice.h>
5 #include <linux/mlx5/driver.h>
6 #include <linux/mlx5/mlx5_ifc.h>
7 #include <linux/mlx5/vport.h>
8 #include <linux/mlx5/fs.h>
9 #include "esw/acl/lgcy.h"
10 #include "esw/legacy.h"
11 #include "mlx5_core.h"
12 #include "eswitch.h"
13 #include "fs_core.h"
14 #include "esw/qos.h"
15
16 enum {
17 LEGACY_VEPA_PRIO = 0,
18 LEGACY_FDB_PRIO,
19 };
20
esw_create_legacy_vepa_table(struct mlx5_eswitch * esw)21 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
22 {
23 struct mlx5_flow_table_attr ft_attr = {};
24 struct mlx5_core_dev *dev = esw->dev;
25 struct mlx5_flow_namespace *root_ns;
26 struct mlx5_flow_table *fdb;
27 int err;
28
29 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
30 if (!root_ns) {
31 esw_warn(dev, "Failed to get FDB flow namespace\n");
32 return -EOPNOTSUPP;
33 }
34
35 /* num FTE 2, num FG 2 */
36 ft_attr.prio = LEGACY_VEPA_PRIO;
37 ft_attr.max_fte = 2;
38 ft_attr.autogroup.max_num_groups = 2;
39 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
40 if (IS_ERR(fdb)) {
41 err = PTR_ERR(fdb);
42 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
43 return err;
44 }
45 esw->fdb_table.legacy.vepa_fdb = fdb;
46
47 return 0;
48 }
49
esw_destroy_legacy_fdb_table(struct mlx5_eswitch * esw)50 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
51 {
52 esw_debug(esw->dev, "Destroy FDB Table\n");
53 if (!esw->fdb_table.legacy.fdb)
54 return;
55
56 if (esw->fdb_table.legacy.promisc_grp)
57 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
58 if (esw->fdb_table.legacy.allmulti_grp)
59 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
60 if (esw->fdb_table.legacy.addr_grp)
61 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
62 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
63
64 esw->fdb_table.legacy.fdb = NULL;
65 esw->fdb_table.legacy.addr_grp = NULL;
66 esw->fdb_table.legacy.allmulti_grp = NULL;
67 esw->fdb_table.legacy.promisc_grp = NULL;
68 atomic64_set(&esw->user_count, 0);
69 }
70
esw_create_legacy_fdb_table(struct mlx5_eswitch * esw)71 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
72 {
73 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
74 struct mlx5_flow_table_attr ft_attr = {};
75 struct mlx5_core_dev *dev = esw->dev;
76 struct mlx5_flow_namespace *root_ns;
77 struct mlx5_flow_table *fdb;
78 struct mlx5_flow_group *g;
79 void *match_criteria;
80 int table_size;
81 u32 *flow_group_in;
82 u8 *dmac;
83 int err = 0;
84
85 esw_debug(dev, "Create FDB log_max_size(%d)\n",
86 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
87
88 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
89 if (!root_ns) {
90 esw_warn(dev, "Failed to get FDB flow namespace\n");
91 return -EOPNOTSUPP;
92 }
93
94 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
95 if (!flow_group_in)
96 return -ENOMEM;
97
98 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
99 ft_attr.max_fte = table_size;
100 ft_attr.prio = LEGACY_FDB_PRIO;
101 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
102 if (IS_ERR(fdb)) {
103 err = PTR_ERR(fdb);
104 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
105 goto out;
106 }
107 esw->fdb_table.legacy.fdb = fdb;
108
109 /* Addresses group : Full match unicast/multicast addresses */
110 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
111 MLX5_MATCH_OUTER_HEADERS);
112 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
113 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
114 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
115 /* Preserve 2 entries for allmulti and promisc rules*/
116 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
117 eth_broadcast_addr(dmac);
118 g = mlx5_create_flow_group(fdb, flow_group_in);
119 if (IS_ERR(g)) {
120 err = PTR_ERR(g);
121 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
122 goto out;
123 }
124 esw->fdb_table.legacy.addr_grp = g;
125
126 /* Allmulti group : One rule that forwards any mcast traffic */
127 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
128 MLX5_MATCH_OUTER_HEADERS);
129 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
130 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
131 eth_zero_addr(dmac);
132 dmac[0] = 0x01;
133 g = mlx5_create_flow_group(fdb, flow_group_in);
134 if (IS_ERR(g)) {
135 err = PTR_ERR(g);
136 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
137 goto out;
138 }
139 esw->fdb_table.legacy.allmulti_grp = g;
140
141 /* Promiscuous group :
142 * One rule that forward all unmatched traffic from previous groups
143 */
144 eth_zero_addr(dmac);
145 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
146 MLX5_MATCH_MISC_PARAMETERS);
147 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
148 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
149 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
150 g = mlx5_create_flow_group(fdb, flow_group_in);
151 if (IS_ERR(g)) {
152 err = PTR_ERR(g);
153 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
154 goto out;
155 }
156 esw->fdb_table.legacy.promisc_grp = g;
157
158 out:
159 if (err)
160 esw_destroy_legacy_fdb_table(esw);
161
162 kvfree(flow_group_in);
163 return err;
164 }
165
esw_destroy_legacy_vepa_table(struct mlx5_eswitch * esw)166 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
167 {
168 esw_debug(esw->dev, "Destroy VEPA Table\n");
169 if (!esw->fdb_table.legacy.vepa_fdb)
170 return;
171
172 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
173 esw->fdb_table.legacy.vepa_fdb = NULL;
174 }
175
esw_create_legacy_table(struct mlx5_eswitch * esw)176 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
177 {
178 int err;
179
180 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
181 atomic64_set(&esw->user_count, 0);
182
183 err = esw_create_legacy_vepa_table(esw);
184 if (err)
185 return err;
186
187 err = esw_create_legacy_fdb_table(esw);
188 if (err)
189 esw_destroy_legacy_vepa_table(esw);
190
191 return err;
192 }
193
esw_cleanup_vepa_rules(struct mlx5_eswitch * esw)194 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
195 {
196 if (esw->fdb_table.legacy.vepa_uplink_rule)
197 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
198
199 if (esw->fdb_table.legacy.vepa_star_rule)
200 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
201
202 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
203 esw->fdb_table.legacy.vepa_star_rule = NULL;
204 }
205
esw_destroy_legacy_table(struct mlx5_eswitch * esw)206 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
207 {
208 esw_cleanup_vepa_rules(esw);
209 esw_destroy_legacy_fdb_table(esw);
210 esw_destroy_legacy_vepa_table(esw);
211 }
212
213 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
214 MLX5_VPORT_MC_ADDR_CHANGE | \
215 MLX5_VPORT_PROMISC_CHANGE)
216
esw_legacy_enable(struct mlx5_eswitch * esw)217 int esw_legacy_enable(struct mlx5_eswitch *esw)
218 {
219 struct mlx5_vport *vport;
220 unsigned long i;
221 int ret;
222
223 ret = esw_create_legacy_table(esw);
224 if (ret)
225 return ret;
226
227 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
228 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
229
230 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
231 if (ret)
232 esw_destroy_legacy_table(esw);
233 return ret;
234 }
235
esw_legacy_disable(struct mlx5_eswitch * esw)236 void esw_legacy_disable(struct mlx5_eswitch *esw)
237 {
238 struct esw_mc_addr *mc_promisc;
239
240 mlx5_eswitch_disable_pf_vf_vports(esw);
241
242 mc_promisc = &esw->mc_promisc;
243 if (mc_promisc->uplink_rule)
244 mlx5_del_flow_rules(mc_promisc->uplink_rule);
245
246 esw_destroy_legacy_table(esw);
247 }
248
_mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch * esw,u8 setting)249 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
250 u8 setting)
251 {
252 struct mlx5_flow_destination dest = {};
253 struct mlx5_flow_act flow_act = {};
254 struct mlx5_flow_handle *flow_rule;
255 struct mlx5_flow_spec *spec;
256 int err = 0;
257 void *misc;
258
259 if (!setting) {
260 esw_cleanup_vepa_rules(esw);
261 return 0;
262 }
263
264 if (esw->fdb_table.legacy.vepa_uplink_rule)
265 return 0;
266
267 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
268 if (!spec)
269 return -ENOMEM;
270
271 /* Uplink rule forward uplink traffic to FDB */
272 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
273 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
274
275 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
276 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
277
278 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
279 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
280 dest.ft = esw->fdb_table.legacy.fdb;
281 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
282 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
283 &flow_act, &dest, 1);
284 if (IS_ERR(flow_rule)) {
285 err = PTR_ERR(flow_rule);
286 goto out;
287 } else {
288 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
289 }
290
291 /* Star rule to forward all traffic to uplink vport */
292 memset(&dest, 0, sizeof(dest));
293 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
294 dest.vport.num = MLX5_VPORT_UPLINK;
295 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
296 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
297 &flow_act, &dest, 1);
298 if (IS_ERR(flow_rule)) {
299 err = PTR_ERR(flow_rule);
300 goto out;
301 } else {
302 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
303 }
304
305 out:
306 kvfree(spec);
307 if (err)
308 esw_cleanup_vepa_rules(esw);
309 return err;
310 }
311
mlx5_eswitch_set_vepa(struct mlx5_eswitch * esw,u8 setting)312 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
313 {
314 int err = 0;
315
316 if (!esw)
317 return -EOPNOTSUPP;
318
319 if (!mlx5_esw_allowed(esw))
320 return -EPERM;
321
322 mutex_lock(&esw->state_lock);
323 if (esw->mode != MLX5_ESWITCH_LEGACY) {
324 err = -EOPNOTSUPP;
325 goto out;
326 }
327
328 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
329
330 out:
331 mutex_unlock(&esw->state_lock);
332 return err;
333 }
334
mlx5_eswitch_get_vepa(struct mlx5_eswitch * esw,u8 * setting)335 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
336 {
337 if (!esw)
338 return -EOPNOTSUPP;
339
340 if (!mlx5_esw_allowed(esw))
341 return -EPERM;
342
343 if (esw->mode != MLX5_ESWITCH_LEGACY)
344 return -EOPNOTSUPP;
345
346 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
347 return 0;
348 }
349
esw_legacy_vport_acl_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)350 int esw_legacy_vport_acl_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
351 {
352 int ret;
353
354 /* Only non manager vports need ACL in legacy mode */
355 if (mlx5_esw_is_manager_vport(esw, vport->vport))
356 return 0;
357
358 ret = esw_acl_ingress_lgcy_setup(esw, vport);
359 if (ret)
360 goto ingress_err;
361
362 ret = esw_acl_egress_lgcy_setup(esw, vport);
363 if (ret)
364 goto egress_err;
365
366 return 0;
367
368 egress_err:
369 esw_acl_ingress_lgcy_cleanup(esw, vport);
370 ingress_err:
371 return ret;
372 }
373
esw_legacy_vport_acl_cleanup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)374 void esw_legacy_vport_acl_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
375 {
376 if (mlx5_esw_is_manager_vport(esw, vport->vport))
377 return;
378
379 esw_acl_egress_lgcy_cleanup(esw, vport);
380 esw_acl_ingress_lgcy_cleanup(esw, vport);
381 }
382
mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev * dev,struct mlx5_vport * vport,struct mlx5_vport_drop_stats * stats)383 int mlx5_esw_query_vport_drop_stats(struct mlx5_core_dev *dev,
384 struct mlx5_vport *vport,
385 struct mlx5_vport_drop_stats *stats)
386 {
387 u64 rx_discard_vport_down, tx_discard_vport_down;
388 struct mlx5_eswitch *esw = dev->priv.eswitch;
389 u64 bytes = 0;
390 int err = 0;
391
392 if (esw->mode != MLX5_ESWITCH_LEGACY)
393 return 0;
394
395 mutex_lock(&esw->state_lock);
396 if (!vport->enabled)
397 goto unlock;
398
399 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
400 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
401 &stats->rx_dropped, &bytes);
402
403 if (vport->ingress.legacy.drop_counter)
404 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
405 &stats->tx_dropped, &bytes);
406
407 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
408 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
409 goto unlock;
410
411 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
412 &rx_discard_vport_down,
413 &tx_discard_vport_down);
414 if (err)
415 goto unlock;
416
417 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
418 stats->rx_dropped += rx_discard_vport_down;
419 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
420 stats->tx_dropped += tx_discard_vport_down;
421
422 unlock:
423 mutex_unlock(&esw->state_lock);
424 return err;
425 }
426
mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch * esw,u16 vport,u16 vlan,u8 qos)427 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
428 u16 vport, u16 vlan, u8 qos)
429 {
430 u8 set_flags = 0;
431 int err = 0;
432
433 if (!mlx5_esw_allowed(esw))
434 return -EPERM;
435
436 if (vlan || qos)
437 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
438
439 mutex_lock(&esw->state_lock);
440 if (esw->mode != MLX5_ESWITCH_LEGACY) {
441 if (!vlan)
442 goto unlock; /* compatibility with libvirt */
443
444 err = -EOPNOTSUPP;
445 goto unlock;
446 }
447
448 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
449
450 unlock:
451 mutex_unlock(&esw->state_lock);
452 return err;
453 }
454
mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch * esw,u16 vport,bool spoofchk)455 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
456 u16 vport, bool spoofchk)
457 {
458 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
459 bool pschk;
460 int err = 0;
461
462 if (!mlx5_esw_allowed(esw))
463 return -EPERM;
464 if (IS_ERR(evport))
465 return PTR_ERR(evport);
466
467 mutex_lock(&esw->state_lock);
468 if (esw->mode != MLX5_ESWITCH_LEGACY) {
469 err = -EOPNOTSUPP;
470 goto unlock;
471 }
472 pschk = evport->info.spoofchk;
473 evport->info.spoofchk = spoofchk;
474 if (pschk && !is_valid_ether_addr(evport->info.mac))
475 mlx5_core_warn(esw->dev,
476 "Spoofchk in set while MAC is invalid, vport(%d)\n",
477 evport->vport);
478 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
479 err = esw_acl_ingress_lgcy_setup(esw, evport);
480 if (err)
481 evport->info.spoofchk = pschk;
482
483 unlock:
484 mutex_unlock(&esw->state_lock);
485 return err;
486 }
487
mlx5_eswitch_set_vport_trust(struct mlx5_eswitch * esw,u16 vport,bool setting)488 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
489 u16 vport, bool setting)
490 {
491 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
492 int err = 0;
493
494 if (!mlx5_esw_allowed(esw))
495 return -EPERM;
496 if (IS_ERR(evport))
497 return PTR_ERR(evport);
498
499 mutex_lock(&esw->state_lock);
500 if (esw->mode != MLX5_ESWITCH_LEGACY) {
501 err = -EOPNOTSUPP;
502 goto unlock;
503 }
504 evport->info.trusted = setting;
505 if (evport->enabled)
506 esw_vport_change_handle_locked(evport);
507
508 unlock:
509 mutex_unlock(&esw->state_lock);
510 return err;
511 }
512
mlx5_eswitch_set_vport_rate(struct mlx5_eswitch * esw,u16 vport,u32 max_rate,u32 min_rate)513 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
514 u32 max_rate, u32 min_rate)
515 {
516 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
517 int err;
518
519 if (!mlx5_esw_allowed(esw))
520 return -EPERM;
521 if (IS_ERR(evport))
522 return PTR_ERR(evport);
523
524 mutex_lock(&esw->state_lock);
525 err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL);
526 if (!err)
527 err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL);
528 mutex_unlock(&esw->state_lock);
529 return err;
530 }
531