1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "fs_ft_pool.h"
40 #include "mlx5_core.h"
41 #include "eswitch.h"
42 
mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 					struct mlx5_flow_table *ft,
45 					u32 underlay_qpn,
46 					bool disconnect)
47 {
48 	return 0;
49 }
50 
mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,unsigned int size,struct mlx5_flow_table * next_ft)51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 					   struct mlx5_flow_table *ft,
53 					   unsigned int size,
54 					   struct mlx5_flow_table *next_ft)
55 {
56 	ft->max_fte = size ? roundup_pow_of_two(size) : 1;
57 
58 	return 0;
59 }
60 
mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)61 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
62 					    struct mlx5_flow_table *ft)
63 {
64 	return 0;
65 }
66 
mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)67 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
68 					   struct mlx5_flow_table *ft,
69 					   struct mlx5_flow_table *next_ft)
70 {
71 	return 0;
72 }
73 
mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)74 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
75 					   struct mlx5_flow_table *ft,
76 					   u32 *in,
77 					   struct mlx5_flow_group *fg)
78 {
79 	return 0;
80 }
81 
mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)82 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
83 					    struct mlx5_flow_table *ft,
84 					    struct mlx5_flow_group *fg)
85 {
86 	return 0;
87 }
88 
mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)89 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
90 				    struct mlx5_flow_table *ft,
91 				    struct mlx5_flow_group *group,
92 				    struct fs_fte *fte)
93 {
94 	return 0;
95 }
96 
mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)97 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
98 				    struct mlx5_flow_table *ft,
99 				    struct mlx5_flow_group *group,
100 				    int modify_mask,
101 				    struct fs_fte *fte)
102 {
103 	return -EOPNOTSUPP;
104 }
105 
mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)106 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
107 				    struct mlx5_flow_table *ft,
108 				    struct fs_fte *fte)
109 {
110 	return 0;
111 }
112 
mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)113 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
114 					       struct mlx5_pkt_reformat_params *params,
115 					       enum mlx5_flow_namespace_type namespace,
116 					       struct mlx5_pkt_reformat *pkt_reformat)
117 {
118 	return 0;
119 }
120 
mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)121 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
122 						  struct mlx5_pkt_reformat *pkt_reformat)
123 {
124 }
125 
mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)126 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
127 					     u8 namespace, u8 num_actions,
128 					     void *modify_actions,
129 					     struct mlx5_modify_hdr *modify_hdr)
130 {
131 	return 0;
132 }
133 
mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)134 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
135 						struct mlx5_modify_hdr *modify_hdr)
136 {
137 }
138 
mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)139 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
140 				  struct mlx5_flow_root_namespace *peer_ns)
141 {
142 	return 0;
143 }
144 
mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace * ns)145 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
146 {
147 	return 0;
148 }
149 
mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace * ns)150 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
151 {
152 	return 0;
153 }
154 
mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,bool ft_id_valid,u32 ft_id)155 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
156 				       struct mlx5_core_dev *slave,
157 				       bool ft_id_valid,
158 				       u32 ft_id)
159 {
160 	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
161 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
162 	struct mlx5_flow_root_namespace *root;
163 	struct mlx5_flow_namespace *ns;
164 
165 	MLX5_SET(set_flow_table_root_in, in, opcode,
166 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
167 	MLX5_SET(set_flow_table_root_in, in, table_type,
168 		 FS_FT_FDB);
169 	if (ft_id_valid) {
170 		MLX5_SET(set_flow_table_root_in, in,
171 			 table_eswitch_owner_vhca_id_valid, 1);
172 		MLX5_SET(set_flow_table_root_in, in,
173 			 table_eswitch_owner_vhca_id,
174 			 MLX5_CAP_GEN(master, vhca_id));
175 		MLX5_SET(set_flow_table_root_in, in, table_id,
176 			 ft_id);
177 	} else {
178 		ns = mlx5_get_flow_namespace(slave,
179 					     MLX5_FLOW_NAMESPACE_FDB);
180 		root = find_root(&ns->node);
181 		MLX5_SET(set_flow_table_root_in, in, table_id,
182 			 root->root_ft->id);
183 	}
184 
185 	return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
186 }
187 
188 static int
mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)189 mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
190 				    int definer_id)
191 {
192 	return 0;
193 }
194 
195 static int
mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)196 mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
197 				   u16 format_id, u32 *match_mask)
198 {
199 	return 0;
200 }
201 
mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)202 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
203 				   struct mlx5_flow_table *ft, u32 underlay_qpn,
204 				   bool disconnect)
205 {
206 	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
207 	struct mlx5_core_dev *dev = ns->dev;
208 	int err;
209 
210 	if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
211 	    underlay_qpn == 0)
212 		return 0;
213 
214 	if (ft->type == FS_FT_FDB &&
215 	    mlx5_lag_is_shared_fdb(dev) &&
216 	    !mlx5_lag_is_master(dev))
217 		return 0;
218 
219 	MLX5_SET(set_flow_table_root_in, in, opcode,
220 		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
221 	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
222 
223 	if (disconnect)
224 		MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
225 	else
226 		MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
227 
228 	MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
229 	MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
230 	MLX5_SET(set_flow_table_root_in, in, other_vport,
231 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
232 
233 	err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
234 	if (!err &&
235 	    ft->type == FS_FT_FDB &&
236 	    mlx5_lag_is_shared_fdb(dev) &&
237 	    mlx5_lag_is_master(dev)) {
238 		err = mlx5_cmd_set_slave_root_fdb(dev,
239 						  mlx5_lag_get_peer_mdev(dev),
240 						  !disconnect, (!disconnect) ?
241 						  ft->id : 0);
242 		if (err && !disconnect) {
243 			MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
244 			MLX5_SET(set_flow_table_root_in, in, table_id,
245 				 ns->root_ft->id);
246 			mlx5_cmd_exec_in(dev, set_flow_table_root, in);
247 		}
248 	}
249 
250 	return err;
251 }
252 
mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,unsigned int size,struct mlx5_flow_table * next_ft)253 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
254 				      struct mlx5_flow_table *ft,
255 				      unsigned int size,
256 				      struct mlx5_flow_table *next_ft)
257 {
258 	int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
259 	int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
260 	int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
261 	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
262 	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
263 	struct mlx5_core_dev *dev = ns->dev;
264 	int err;
265 
266 	if (size != POOL_NEXT_SIZE)
267 		size = roundup_pow_of_two(size);
268 	size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
269 	if (!size)
270 		return -ENOSPC;
271 
272 	MLX5_SET(create_flow_table_in, in, opcode,
273 		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
274 
275 	MLX5_SET(create_flow_table_in, in, table_type, ft->type);
276 	MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
277 	MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
278 	MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
279 	MLX5_SET(create_flow_table_in, in, other_vport,
280 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
281 
282 	MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
283 		 en_decap);
284 	MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
285 		 en_encap);
286 	MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
287 		 term);
288 
289 	switch (ft->op_mod) {
290 	case FS_FT_OP_MOD_NORMAL:
291 		if (next_ft) {
292 			MLX5_SET(create_flow_table_in, in,
293 				 flow_table_context.table_miss_action,
294 				 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
295 			MLX5_SET(create_flow_table_in, in,
296 				 flow_table_context.table_miss_id, next_ft->id);
297 		} else {
298 			MLX5_SET(create_flow_table_in, in,
299 				 flow_table_context.table_miss_action,
300 				 ft->def_miss_action);
301 		}
302 		break;
303 
304 	case FS_FT_OP_MOD_LAG_DEMUX:
305 		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
306 		if (next_ft)
307 			MLX5_SET(create_flow_table_in, in,
308 				 flow_table_context.lag_master_next_table_id,
309 				 next_ft->id);
310 		break;
311 	}
312 
313 	err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
314 	if (!err) {
315 		ft->id = MLX5_GET(create_flow_table_out, out,
316 				  table_id);
317 		ft->max_fte = size;
318 	} else {
319 		mlx5_ft_pool_put_sz(ns->dev, size);
320 	}
321 
322 	return err;
323 }
324 
mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)325 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
326 				       struct mlx5_flow_table *ft)
327 {
328 	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
329 	struct mlx5_core_dev *dev = ns->dev;
330 	int err;
331 
332 	MLX5_SET(destroy_flow_table_in, in, opcode,
333 		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
334 	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
335 	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
336 	MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
337 	MLX5_SET(destroy_flow_table_in, in, other_vport,
338 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
339 
340 	err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
341 	if (!err)
342 		mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
343 
344 	return err;
345 }
346 
mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)347 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
348 				      struct mlx5_flow_table *ft,
349 				      struct mlx5_flow_table *next_ft)
350 {
351 	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
352 	struct mlx5_core_dev *dev = ns->dev;
353 
354 	MLX5_SET(modify_flow_table_in, in, opcode,
355 		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
356 	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
357 	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
358 
359 	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
360 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
361 			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
362 		if (next_ft) {
363 			MLX5_SET(modify_flow_table_in, in,
364 				 flow_table_context.lag_master_next_table_id, next_ft->id);
365 		} else {
366 			MLX5_SET(modify_flow_table_in, in,
367 				 flow_table_context.lag_master_next_table_id, 0);
368 		}
369 	} else {
370 		MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
371 		MLX5_SET(modify_flow_table_in, in, other_vport,
372 			 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
373 		MLX5_SET(modify_flow_table_in, in, modify_field_select,
374 			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
375 		if (next_ft) {
376 			MLX5_SET(modify_flow_table_in, in,
377 				 flow_table_context.table_miss_action,
378 				 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
379 			MLX5_SET(modify_flow_table_in, in,
380 				 flow_table_context.table_miss_id,
381 				 next_ft->id);
382 		} else {
383 			MLX5_SET(modify_flow_table_in, in,
384 				 flow_table_context.table_miss_action,
385 				 ft->def_miss_action);
386 		}
387 	}
388 
389 	return mlx5_cmd_exec_in(dev, modify_flow_table, in);
390 }
391 
mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)392 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
393 				      struct mlx5_flow_table *ft,
394 				      u32 *in,
395 				      struct mlx5_flow_group *fg)
396 {
397 	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
398 	struct mlx5_core_dev *dev = ns->dev;
399 	int err;
400 
401 	MLX5_SET(create_flow_group_in, in, opcode,
402 		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
403 	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
404 	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
405 	if (ft->vport) {
406 		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
407 		MLX5_SET(create_flow_group_in, in, other_vport, 1);
408 	}
409 
410 	MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
411 	MLX5_SET(create_flow_group_in, in, other_vport,
412 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
413 	err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
414 	if (!err)
415 		fg->id = MLX5_GET(create_flow_group_out, out,
416 				  group_id);
417 	return err;
418 }
419 
mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)420 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
421 				       struct mlx5_flow_table *ft,
422 				       struct mlx5_flow_group *fg)
423 {
424 	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
425 	struct mlx5_core_dev *dev = ns->dev;
426 
427 	MLX5_SET(destroy_flow_group_in, in, opcode,
428 		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
429 	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
430 	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
431 	MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
432 	MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
433 	MLX5_SET(destroy_flow_group_in, in, other_vport,
434 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
435 	return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
436 }
437 
mlx5_set_extended_dest(struct mlx5_core_dev * dev,struct fs_fte * fte,bool * extended_dest)438 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
439 				  struct fs_fte *fte, bool *extended_dest)
440 {
441 	int fw_log_max_fdb_encap_uplink =
442 		MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
443 	int num_fwd_destinations = 0;
444 	struct mlx5_flow_rule *dst;
445 	int num_encap = 0;
446 
447 	*extended_dest = false;
448 	if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
449 		return 0;
450 
451 	list_for_each_entry(dst, &fte->node.children, node.list) {
452 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
453 			continue;
454 		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
455 		    dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
456 			num_encap++;
457 		num_fwd_destinations++;
458 	}
459 	if (num_fwd_destinations > 1 && num_encap > 0)
460 		*extended_dest = true;
461 
462 	if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
463 		mlx5_core_warn(dev, "FW does not support extended destination");
464 		return -EOPNOTSUPP;
465 	}
466 	if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
467 		mlx5_core_warn(dev, "FW does not support more than %d encaps",
468 			       1 << fw_log_max_fdb_encap_uplink);
469 		return -EOPNOTSUPP;
470 	}
471 
472 	return 0;
473 }
mlx5_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5_flow_table * ft,unsigned group_id,struct fs_fte * fte)474 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
475 			    int opmod, int modify_mask,
476 			    struct mlx5_flow_table *ft,
477 			    unsigned group_id,
478 			    struct fs_fte *fte)
479 {
480 	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
481 	bool extended_dest = false;
482 	struct mlx5_flow_rule *dst;
483 	void *in_flow_context, *vlan;
484 	void *in_match_value;
485 	unsigned int inlen;
486 	int dst_cnt_size;
487 	void *in_dests;
488 	u32 *in;
489 	int err;
490 
491 	if (mlx5_set_extended_dest(dev, fte, &extended_dest))
492 		return -EOPNOTSUPP;
493 
494 	if (!extended_dest)
495 		dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
496 	else
497 		dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
498 
499 	inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
500 	in = kvzalloc(inlen, GFP_KERNEL);
501 	if (!in)
502 		return -ENOMEM;
503 
504 	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
505 	MLX5_SET(set_fte_in, in, op_mod, opmod);
506 	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
507 	MLX5_SET(set_fte_in, in, table_type, ft->type);
508 	MLX5_SET(set_fte_in, in, table_id,   ft->id);
509 	MLX5_SET(set_fte_in, in, flow_index, fte->index);
510 	MLX5_SET(set_fte_in, in, ignore_flow_level,
511 		 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
512 
513 	MLX5_SET(set_fte_in, in, vport_number, ft->vport);
514 	MLX5_SET(set_fte_in, in, other_vport,
515 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
516 
517 	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
518 	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
519 
520 	MLX5_SET(flow_context, in_flow_context, flow_tag,
521 		 fte->flow_context.flow_tag);
522 	MLX5_SET(flow_context, in_flow_context, flow_source,
523 		 fte->flow_context.flow_source);
524 
525 	MLX5_SET(flow_context, in_flow_context, extended_destination,
526 		 extended_dest);
527 	if (extended_dest) {
528 		u32 action;
529 
530 		action = fte->action.action &
531 			~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
532 		MLX5_SET(flow_context, in_flow_context, action, action);
533 	} else {
534 		MLX5_SET(flow_context, in_flow_context, action,
535 			 fte->action.action);
536 		if (fte->action.pkt_reformat)
537 			MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
538 				 fte->action.pkt_reformat->id);
539 	}
540 	if (fte->action.modify_hdr)
541 		MLX5_SET(flow_context, in_flow_context, modify_header_id,
542 			 fte->action.modify_hdr->id);
543 
544 	MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
545 
546 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
547 
548 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
549 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
550 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
551 
552 	vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
553 
554 	MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
555 	MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
556 	MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
557 
558 	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
559 				      match_value);
560 	memcpy(in_match_value, &fte->val, sizeof(fte->val));
561 
562 	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
563 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
564 		int list_size = 0;
565 
566 		list_for_each_entry(dst, &fte->node.children, node.list) {
567 			unsigned int id, type = dst->dest_attr.type;
568 
569 			if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
570 				continue;
571 
572 			switch (type) {
573 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
574 				id = dst->dest_attr.ft_num;
575 				type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
576 				break;
577 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
578 				id = dst->dest_attr.ft->id;
579 				break;
580 			case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
581 			case MLX5_FLOW_DESTINATION_TYPE_VPORT:
582 				MLX5_SET(dest_format_struct, in_dests,
583 					 destination_eswitch_owner_vhca_id_valid,
584 					 !!(dst->dest_attr.vport.flags &
585 					    MLX5_FLOW_DEST_VPORT_VHCA_ID));
586 				MLX5_SET(dest_format_struct, in_dests,
587 					 destination_eswitch_owner_vhca_id,
588 					 dst->dest_attr.vport.vhca_id);
589 				if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
590 					/* destination_id is reserved */
591 					id = 0;
592 					break;
593 				}
594 				id = dst->dest_attr.vport.num;
595 				if (extended_dest &&
596 				    dst->dest_attr.vport.pkt_reformat) {
597 					MLX5_SET(dest_format_struct, in_dests,
598 						 packet_reformat,
599 						 !!(dst->dest_attr.vport.flags &
600 						    MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
601 					MLX5_SET(extended_dest_format, in_dests,
602 						 packet_reformat_id,
603 						 dst->dest_attr.vport.pkt_reformat->id);
604 				}
605 				break;
606 			case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
607 				id = dst->dest_attr.sampler_id;
608 				break;
609 			default:
610 				id = dst->dest_attr.tir_num;
611 			}
612 
613 			MLX5_SET(dest_format_struct, in_dests, destination_type,
614 				 type);
615 			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
616 			in_dests += dst_cnt_size;
617 			list_size++;
618 		}
619 
620 		MLX5_SET(flow_context, in_flow_context, destination_list_size,
621 			 list_size);
622 	}
623 
624 	if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
625 		int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
626 					log_max_flow_counter,
627 					ft->type));
628 		int list_size = 0;
629 
630 		list_for_each_entry(dst, &fte->node.children, node.list) {
631 			if (dst->dest_attr.type !=
632 			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
633 				continue;
634 
635 			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
636 				 dst->dest_attr.counter_id);
637 			in_dests += dst_cnt_size;
638 			list_size++;
639 		}
640 		if (list_size > max_list_size) {
641 			err = -EINVAL;
642 			goto err_out;
643 		}
644 
645 		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
646 			 list_size);
647 	}
648 
649 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
650 err_out:
651 	kvfree(in);
652 	return err;
653 }
654 
mlx5_cmd_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)655 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
656 			       struct mlx5_flow_table *ft,
657 			       struct mlx5_flow_group *group,
658 			       struct fs_fte *fte)
659 {
660 	struct mlx5_core_dev *dev = ns->dev;
661 	unsigned int group_id = group->id;
662 
663 	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
664 }
665 
mlx5_cmd_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)666 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
667 			       struct mlx5_flow_table *ft,
668 			       struct mlx5_flow_group *fg,
669 			       int modify_mask,
670 			       struct fs_fte *fte)
671 {
672 	int opmod;
673 	struct mlx5_core_dev *dev = ns->dev;
674 	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
675 						flow_table_properties_nic_receive.
676 						flow_modify_en);
677 	if (!atomic_mod_cap)
678 		return -EOPNOTSUPP;
679 	opmod = 1;
680 
681 	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
682 }
683 
mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)684 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
685 			       struct mlx5_flow_table *ft,
686 			       struct fs_fte *fte)
687 {
688 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
689 	struct mlx5_core_dev *dev = ns->dev;
690 
691 	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
692 	MLX5_SET(delete_fte_in, in, table_type, ft->type);
693 	MLX5_SET(delete_fte_in, in, table_id, ft->id);
694 	MLX5_SET(delete_fte_in, in, flow_index, fte->index);
695 	MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
696 	MLX5_SET(delete_fte_in, in, other_vport,
697 		 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
698 
699 	return mlx5_cmd_exec_in(dev, delete_fte, in);
700 }
701 
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev * dev,enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,u32 * id)702 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
703 			   enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
704 			   u32 *id)
705 {
706 	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
707 	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
708 	int err;
709 
710 	MLX5_SET(alloc_flow_counter_in, in, opcode,
711 		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
712 	MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
713 
714 	err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
715 	if (!err)
716 		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
717 	return err;
718 }
719 
mlx5_cmd_fc_alloc(struct mlx5_core_dev * dev,u32 * id)720 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
721 {
722 	return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
723 }
724 
mlx5_cmd_fc_free(struct mlx5_core_dev * dev,u32 id)725 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
726 {
727 	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
728 
729 	MLX5_SET(dealloc_flow_counter_in, in, opcode,
730 		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
731 	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
732 	return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
733 }
734 
mlx5_cmd_fc_query(struct mlx5_core_dev * dev,u32 id,u64 * packets,u64 * bytes)735 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
736 		      u64 *packets, u64 *bytes)
737 {
738 	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
739 		MLX5_ST_SZ_BYTES(traffic_counter)] = {};
740 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
741 	void *stats;
742 	int err = 0;
743 
744 	MLX5_SET(query_flow_counter_in, in, opcode,
745 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
746 	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
747 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
748 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
749 	if (err)
750 		return err;
751 
752 	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
753 	*packets = MLX5_GET64(traffic_counter, stats, packets);
754 	*bytes = MLX5_GET64(traffic_counter, stats, octets);
755 	return 0;
756 }
757 
mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)758 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
759 {
760 	return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
761 		MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
762 }
763 
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev * dev,u32 base_id,int bulk_len,u32 * out)764 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
765 			   u32 *out)
766 {
767 	int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
768 	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
769 
770 	MLX5_SET(query_flow_counter_in, in, opcode,
771 		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
772 	MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
773 	MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
774 	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
775 }
776 
mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)777 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
778 					  struct mlx5_pkt_reformat_params *params,
779 					  enum mlx5_flow_namespace_type namespace,
780 					  struct mlx5_pkt_reformat *pkt_reformat)
781 {
782 	u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
783 	struct mlx5_core_dev *dev = ns->dev;
784 	void *packet_reformat_context_in;
785 	int max_encap_size;
786 	void *reformat;
787 	int inlen;
788 	int err;
789 	u32 *in;
790 
791 	if (namespace == MLX5_FLOW_NAMESPACE_FDB)
792 		max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
793 	else
794 		max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
795 
796 	if (params->size > max_encap_size) {
797 		mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
798 			       params->size, max_encap_size);
799 		return -EINVAL;
800 	}
801 
802 	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
803 		     params->size, GFP_KERNEL);
804 	if (!in)
805 		return -ENOMEM;
806 
807 	packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
808 						  in, packet_reformat_context);
809 	reformat = MLX5_ADDR_OF(packet_reformat_context_in,
810 				packet_reformat_context_in,
811 				reformat_data);
812 	inlen = reformat - (void *)in + params->size;
813 
814 	MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
815 		 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
816 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
817 		 reformat_data_size, params->size);
818 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
819 		 reformat_type, params->type);
820 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
821 		 reformat_param_0, params->param_0);
822 	MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
823 		 reformat_param_1, params->param_1);
824 	if (params->data && params->size)
825 		memcpy(reformat, params->data, params->size);
826 
827 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
828 
829 	pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
830 				    out, packet_reformat_id);
831 	kfree(in);
832 	return err;
833 }
834 
mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)835 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
836 					     struct mlx5_pkt_reformat *pkt_reformat)
837 {
838 	u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
839 	struct mlx5_core_dev *dev = ns->dev;
840 
841 	MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
842 		 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
843 	MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
844 		 pkt_reformat->id);
845 
846 	mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
847 }
848 
mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)849 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
850 					u8 namespace, u8 num_actions,
851 					void *modify_actions,
852 					struct mlx5_modify_hdr *modify_hdr)
853 {
854 	u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
855 	int max_actions, actions_size, inlen, err;
856 	struct mlx5_core_dev *dev = ns->dev;
857 	void *actions_in;
858 	u8 table_type;
859 	u32 *in;
860 
861 	switch (namespace) {
862 	case MLX5_FLOW_NAMESPACE_FDB:
863 		max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
864 		table_type = FS_FT_FDB;
865 		break;
866 	case MLX5_FLOW_NAMESPACE_KERNEL:
867 	case MLX5_FLOW_NAMESPACE_BYPASS:
868 		max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
869 		table_type = FS_FT_NIC_RX;
870 		break;
871 	case MLX5_FLOW_NAMESPACE_EGRESS:
872 #ifdef CONFIG_MLX5_IPSEC
873 	case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
874 #endif
875 		max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
876 		table_type = FS_FT_NIC_TX;
877 		break;
878 	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
879 		max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
880 		table_type = FS_FT_ESW_INGRESS_ACL;
881 		break;
882 	case MLX5_FLOW_NAMESPACE_RDMA_TX:
883 		max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
884 		table_type = FS_FT_RDMA_TX;
885 		break;
886 	default:
887 		return -EOPNOTSUPP;
888 	}
889 
890 	if (num_actions > max_actions) {
891 		mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
892 			       num_actions, max_actions);
893 		return -EOPNOTSUPP;
894 	}
895 
896 	actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
897 	inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
898 
899 	in = kzalloc(inlen, GFP_KERNEL);
900 	if (!in)
901 		return -ENOMEM;
902 
903 	MLX5_SET(alloc_modify_header_context_in, in, opcode,
904 		 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
905 	MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
906 	MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
907 
908 	actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
909 	memcpy(actions_in, modify_actions, actions_size);
910 
911 	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
912 
913 	modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
914 	kfree(in);
915 	return err;
916 }
917 
mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)918 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
919 					   struct mlx5_modify_hdr *modify_hdr)
920 {
921 	u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
922 	struct mlx5_core_dev *dev = ns->dev;
923 
924 	MLX5_SET(dealloc_modify_header_context_in, in, opcode,
925 		 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
926 	MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
927 		 modify_hdr->id);
928 
929 	mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
930 }
931 
mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)932 static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
933 					  int definer_id)
934 {
935 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
936 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
937 
938 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
939 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
940 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
941 		 MLX5_OBJ_TYPE_MATCH_DEFINER);
942 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
943 
944 	return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
945 }
946 
mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)947 static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
948 					 u16 format_id, u32 *match_mask)
949 {
950 	u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
951 	u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
952 	struct mlx5_core_dev *dev = ns->dev;
953 	void *ptr;
954 	int err;
955 
956 	MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
957 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
958 	MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
959 		 MLX5_OBJ_TYPE_MATCH_DEFINER);
960 
961 	ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
962 	MLX5_SET(match_definer, ptr, format_id, format_id);
963 
964 	ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
965 	memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
966 
967 	err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
968 	return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
969 }
970 
971 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
972 	.create_flow_table = mlx5_cmd_create_flow_table,
973 	.destroy_flow_table = mlx5_cmd_destroy_flow_table,
974 	.modify_flow_table = mlx5_cmd_modify_flow_table,
975 	.create_flow_group = mlx5_cmd_create_flow_group,
976 	.destroy_flow_group = mlx5_cmd_destroy_flow_group,
977 	.create_fte = mlx5_cmd_create_fte,
978 	.update_fte = mlx5_cmd_update_fte,
979 	.delete_fte = mlx5_cmd_delete_fte,
980 	.update_root_ft = mlx5_cmd_update_root_ft,
981 	.packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
982 	.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
983 	.modify_header_alloc = mlx5_cmd_modify_header_alloc,
984 	.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
985 	.create_match_definer = mlx5_cmd_create_match_definer,
986 	.destroy_match_definer = mlx5_cmd_destroy_match_definer,
987 	.set_peer = mlx5_cmd_stub_set_peer,
988 	.create_ns = mlx5_cmd_stub_create_ns,
989 	.destroy_ns = mlx5_cmd_stub_destroy_ns,
990 };
991 
992 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
993 	.create_flow_table = mlx5_cmd_stub_create_flow_table,
994 	.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
995 	.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
996 	.create_flow_group = mlx5_cmd_stub_create_flow_group,
997 	.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
998 	.create_fte = mlx5_cmd_stub_create_fte,
999 	.update_fte = mlx5_cmd_stub_update_fte,
1000 	.delete_fte = mlx5_cmd_stub_delete_fte,
1001 	.update_root_ft = mlx5_cmd_stub_update_root_ft,
1002 	.packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
1003 	.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
1004 	.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
1005 	.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
1006 	.create_match_definer = mlx5_cmd_stub_create_match_definer,
1007 	.destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
1008 	.set_peer = mlx5_cmd_stub_set_peer,
1009 	.create_ns = mlx5_cmd_stub_create_ns,
1010 	.destroy_ns = mlx5_cmd_stub_destroy_ns,
1011 };
1012 
mlx5_fs_cmd_get_fw_cmds(void)1013 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
1014 {
1015 	return &mlx5_flow_cmds;
1016 }
1017 
mlx5_fs_cmd_get_stub_cmds(void)1018 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
1019 {
1020 	return &mlx5_flow_cmd_stubs;
1021 }
1022 
mlx5_fs_cmd_get_default(enum fs_flow_table_type type)1023 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
1024 {
1025 	switch (type) {
1026 	case FS_FT_NIC_RX:
1027 	case FS_FT_ESW_EGRESS_ACL:
1028 	case FS_FT_ESW_INGRESS_ACL:
1029 	case FS_FT_FDB:
1030 	case FS_FT_SNIFFER_RX:
1031 	case FS_FT_SNIFFER_TX:
1032 	case FS_FT_NIC_TX:
1033 	case FS_FT_RDMA_RX:
1034 	case FS_FT_RDMA_TX:
1035 	case FS_FT_PORT_SEL:
1036 		return mlx5_fs_cmd_get_fw_cmds();
1037 	default:
1038 		return mlx5_fs_cmd_get_stub_cmds();
1039 	}
1040 }
1041