1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/math64.h>
8 #include <linux/vmalloc.h>
9 #include <net/pkt_cls.h>
10 
11 #include "cmsg.h"
12 #include "conntrack.h"
13 #include "main.h"
14 #include "../nfp_app.h"
15 
16 struct nfp_mask_id_table {
17 	struct hlist_node link;
18 	u32 hash_key;
19 	u32 ref_cnt;
20 	u8 mask_id;
21 };
22 
23 struct nfp_fl_flow_table_cmp_arg {
24 	struct net_device *netdev;
25 	unsigned long cookie;
26 };
27 
28 struct nfp_fl_stats_ctx_to_flow {
29 	struct rhash_head ht_node;
30 	u32 stats_cxt;
31 	struct nfp_fl_payload *flow;
32 };
33 
34 static const struct rhashtable_params stats_ctx_table_params = {
35 	.key_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
36 	.head_offset	= offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
37 	.key_len	= sizeof(u32),
38 };
39 
nfp_release_stats_entry(struct nfp_app * app,u32 stats_context_id)40 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
41 {
42 	struct nfp_flower_priv *priv = app->priv;
43 	struct circ_buf *ring;
44 
45 	ring = &priv->stats_ids.free_list;
46 	/* Check if buffer is full. */
47 	if (!CIRC_SPACE(ring->head, ring->tail,
48 			priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
49 			NFP_FL_STATS_ELEM_RS + 1))
50 		return -ENOBUFS;
51 
52 	memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
53 	ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
54 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
55 
56 	return 0;
57 }
58 
nfp_get_stats_entry(struct nfp_app * app,u32 * stats_context_id)59 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
60 {
61 	struct nfp_flower_priv *priv = app->priv;
62 	u32 freed_stats_id, temp_stats_id;
63 	struct circ_buf *ring;
64 
65 	ring = &priv->stats_ids.free_list;
66 	freed_stats_id = priv->stats_ring_size;
67 	/* Check for unallocated entries first. */
68 	if (priv->stats_ids.init_unalloc > 0) {
69 		*stats_context_id =
70 			FIELD_PREP(NFP_FL_STAT_ID_STAT,
71 				   priv->stats_ids.init_unalloc - 1) |
72 			FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
73 				   priv->active_mem_unit);
74 
75 		if (++priv->active_mem_unit == priv->total_mem_units) {
76 			priv->stats_ids.init_unalloc--;
77 			priv->active_mem_unit = 0;
78 		}
79 
80 		return 0;
81 	}
82 
83 	/* Check if buffer is empty. */
84 	if (ring->head == ring->tail) {
85 		*stats_context_id = freed_stats_id;
86 		return -ENOENT;
87 	}
88 
89 	memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
90 	*stats_context_id = temp_stats_id;
91 	memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
92 	ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
93 		     (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
94 
95 	return 0;
96 }
97 
98 /* Must be called with either RTNL or rcu_read_lock */
99 struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app * app,unsigned long tc_flower_cookie,struct net_device * netdev)100 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
101 			   struct net_device *netdev)
102 {
103 	struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
104 	struct nfp_flower_priv *priv = app->priv;
105 
106 	flower_cmp_arg.netdev = netdev;
107 	flower_cmp_arg.cookie = tc_flower_cookie;
108 
109 	return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
110 				      nfp_flower_table_params);
111 }
112 
nfp_flower_rx_flow_stats(struct nfp_app * app,struct sk_buff * skb)113 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
114 {
115 	unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
116 	struct nfp_flower_priv *priv = app->priv;
117 	struct nfp_fl_stats_frame *stats;
118 	unsigned char *msg;
119 	u32 ctx_id;
120 	int i;
121 
122 	msg = nfp_flower_cmsg_get_data(skb);
123 
124 	spin_lock(&priv->stats_lock);
125 	for (i = 0; i < msg_len / sizeof(*stats); i++) {
126 		stats = (struct nfp_fl_stats_frame *)msg + i;
127 		ctx_id = be32_to_cpu(stats->stats_con_id);
128 		priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
129 		priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
130 		priv->stats[ctx_id].used = jiffies;
131 	}
132 	spin_unlock(&priv->stats_lock);
133 }
134 
nfp_release_mask_id(struct nfp_app * app,u8 mask_id)135 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
136 {
137 	struct nfp_flower_priv *priv = app->priv;
138 	struct circ_buf *ring;
139 
140 	ring = &priv->mask_ids.mask_id_free_list;
141 	/* Checking if buffer is full. */
142 	if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
143 		return -ENOBUFS;
144 
145 	memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
146 	ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
147 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
148 
149 	priv->mask_ids.last_used[mask_id] = ktime_get();
150 
151 	return 0;
152 }
153 
nfp_mask_alloc(struct nfp_app * app,u8 * mask_id)154 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
155 {
156 	struct nfp_flower_priv *priv = app->priv;
157 	ktime_t reuse_timeout;
158 	struct circ_buf *ring;
159 	u8 temp_id, freed_id;
160 
161 	ring = &priv->mask_ids.mask_id_free_list;
162 	freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
163 	/* Checking for unallocated entries first. */
164 	if (priv->mask_ids.init_unallocated > 0) {
165 		*mask_id = priv->mask_ids.init_unallocated;
166 		priv->mask_ids.init_unallocated--;
167 		return 0;
168 	}
169 
170 	/* Checking if buffer is empty. */
171 	if (ring->head == ring->tail)
172 		goto err_not_found;
173 
174 	memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
175 	*mask_id = temp_id;
176 
177 	reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
178 				     NFP_FL_MASK_REUSE_TIME_NS);
179 
180 	if (ktime_before(ktime_get(), reuse_timeout))
181 		goto err_not_found;
182 
183 	memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
184 	ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
185 		     (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
186 
187 	return 0;
188 
189 err_not_found:
190 	*mask_id = freed_id;
191 	return -ENOENT;
192 }
193 
194 static int
nfp_add_mask_table(struct nfp_app * app,char * mask_data,u32 mask_len)195 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
196 {
197 	struct nfp_flower_priv *priv = app->priv;
198 	struct nfp_mask_id_table *mask_entry;
199 	unsigned long hash_key;
200 	u8 mask_id;
201 
202 	if (nfp_mask_alloc(app, &mask_id))
203 		return -ENOENT;
204 
205 	mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
206 	if (!mask_entry) {
207 		nfp_release_mask_id(app, mask_id);
208 		return -ENOMEM;
209 	}
210 
211 	INIT_HLIST_NODE(&mask_entry->link);
212 	mask_entry->mask_id = mask_id;
213 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
214 	mask_entry->hash_key = hash_key;
215 	mask_entry->ref_cnt = 1;
216 	hash_add(priv->mask_table, &mask_entry->link, hash_key);
217 
218 	return mask_id;
219 }
220 
221 static struct nfp_mask_id_table *
nfp_search_mask_table(struct nfp_app * app,char * mask_data,u32 mask_len)222 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
223 {
224 	struct nfp_flower_priv *priv = app->priv;
225 	struct nfp_mask_id_table *mask_entry;
226 	unsigned long hash_key;
227 
228 	hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
229 
230 	hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
231 		if (mask_entry->hash_key == hash_key)
232 			return mask_entry;
233 
234 	return NULL;
235 }
236 
237 static int
nfp_find_in_mask_table(struct nfp_app * app,char * mask_data,u32 mask_len)238 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
239 {
240 	struct nfp_mask_id_table *mask_entry;
241 
242 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
243 	if (!mask_entry)
244 		return -ENOENT;
245 
246 	mask_entry->ref_cnt++;
247 
248 	/* Casting u8 to int for later use. */
249 	return mask_entry->mask_id;
250 }
251 
252 static bool
nfp_check_mask_add(struct nfp_app * app,char * mask_data,u32 mask_len,u8 * meta_flags,u8 * mask_id)253 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
254 		   u8 *meta_flags, u8 *mask_id)
255 {
256 	int id;
257 
258 	id = nfp_find_in_mask_table(app, mask_data, mask_len);
259 	if (id < 0) {
260 		id = nfp_add_mask_table(app, mask_data, mask_len);
261 		if (id < 0)
262 			return false;
263 		*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
264 	}
265 	*mask_id = id;
266 
267 	return true;
268 }
269 
270 static bool
nfp_check_mask_remove(struct nfp_app * app,char * mask_data,u32 mask_len,u8 * meta_flags,u8 * mask_id)271 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
272 		      u8 *meta_flags, u8 *mask_id)
273 {
274 	struct nfp_mask_id_table *mask_entry;
275 
276 	mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
277 	if (!mask_entry)
278 		return false;
279 
280 	*mask_id = mask_entry->mask_id;
281 	mask_entry->ref_cnt--;
282 	if (!mask_entry->ref_cnt) {
283 		hash_del(&mask_entry->link);
284 		nfp_release_mask_id(app, *mask_id);
285 		kfree(mask_entry);
286 		if (meta_flags)
287 			*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
288 	}
289 
290 	return true;
291 }
292 
nfp_compile_flow_metadata(struct nfp_app * app,u32 cookie,struct nfp_fl_payload * nfp_flow,struct net_device * netdev,struct netlink_ext_ack * extack)293 int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie,
294 			      struct nfp_fl_payload *nfp_flow,
295 			      struct net_device *netdev,
296 			      struct netlink_ext_ack *extack)
297 {
298 	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
299 	struct nfp_flower_priv *priv = app->priv;
300 	struct nfp_fl_payload *check_entry;
301 	u8 new_mask_id;
302 	u32 stats_cxt;
303 	int err;
304 
305 	err = nfp_get_stats_entry(app, &stats_cxt);
306 	if (err) {
307 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate new stats context");
308 		return err;
309 	}
310 
311 	nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
312 	nfp_flow->meta.host_cookie = cpu_to_be64(cookie);
313 	nfp_flow->ingress_dev = netdev;
314 
315 	ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
316 	if (!ctx_entry) {
317 		err = -ENOMEM;
318 		goto err_release_stats;
319 	}
320 
321 	ctx_entry->stats_cxt = stats_cxt;
322 	ctx_entry->flow = nfp_flow;
323 
324 	if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
325 				   stats_ctx_table_params)) {
326 		err = -ENOMEM;
327 		goto err_free_ctx_entry;
328 	}
329 
330 	/* Do net allocate a mask-id for pre_tun_rules. These flows are used to
331 	 * configure the pre_tun table and are never actually send to the
332 	 * firmware as an add-flow message. This causes the mask-id allocation
333 	 * on the firmware to get out of sync if allocated here.
334 	 */
335 	new_mask_id = 0;
336 	if (!nfp_flow->pre_tun_rule.dev &&
337 	    !nfp_check_mask_add(app, nfp_flow->mask_data,
338 				nfp_flow->meta.mask_len,
339 				&nfp_flow->meta.flags, &new_mask_id)) {
340 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
341 		if (nfp_release_stats_entry(app, stats_cxt)) {
342 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
343 			err = -EINVAL;
344 			goto err_remove_rhash;
345 		}
346 		err = -ENOENT;
347 		goto err_remove_rhash;
348 	}
349 
350 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
351 	priv->flower_version++;
352 
353 	/* Update flow payload with mask ids. */
354 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
355 	priv->stats[stats_cxt].pkts = 0;
356 	priv->stats[stats_cxt].bytes = 0;
357 	priv->stats[stats_cxt].used = jiffies;
358 
359 	check_entry = nfp_flower_search_fl_table(app, cookie, netdev);
360 	if (check_entry) {
361 		NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry");
362 		if (nfp_release_stats_entry(app, stats_cxt)) {
363 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release stats context");
364 			err = -EINVAL;
365 			goto err_remove_mask;
366 		}
367 
368 		if (!nfp_flow->pre_tun_rule.dev &&
369 		    !nfp_check_mask_remove(app, nfp_flow->mask_data,
370 					   nfp_flow->meta.mask_len,
371 					   NULL, &new_mask_id)) {
372 			NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
373 			err = -EINVAL;
374 			goto err_remove_mask;
375 		}
376 
377 		err = -EEXIST;
378 		goto err_remove_mask;
379 	}
380 
381 	return 0;
382 
383 err_remove_mask:
384 	if (!nfp_flow->pre_tun_rule.dev)
385 		nfp_check_mask_remove(app, nfp_flow->mask_data,
386 				      nfp_flow->meta.mask_len,
387 				      NULL, &new_mask_id);
388 err_remove_rhash:
389 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
390 					    &ctx_entry->ht_node,
391 					    stats_ctx_table_params));
392 err_free_ctx_entry:
393 	kfree(ctx_entry);
394 err_release_stats:
395 	nfp_release_stats_entry(app, stats_cxt);
396 
397 	return err;
398 }
399 
__nfp_modify_flow_metadata(struct nfp_flower_priv * priv,struct nfp_fl_payload * nfp_flow)400 void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
401 				struct nfp_fl_payload *nfp_flow)
402 {
403 	nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
404 	nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
405 	priv->flower_version++;
406 }
407 
nfp_modify_flow_metadata(struct nfp_app * app,struct nfp_fl_payload * nfp_flow)408 int nfp_modify_flow_metadata(struct nfp_app *app,
409 			     struct nfp_fl_payload *nfp_flow)
410 {
411 	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
412 	struct nfp_flower_priv *priv = app->priv;
413 	u8 new_mask_id = 0;
414 	u32 temp_ctx_id;
415 
416 	__nfp_modify_flow_metadata(priv, nfp_flow);
417 
418 	if (!nfp_flow->pre_tun_rule.dev)
419 		nfp_check_mask_remove(app, nfp_flow->mask_data,
420 				      nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
421 				      &new_mask_id);
422 
423 	/* Update flow payload with mask ids. */
424 	nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
425 
426 	/* Release the stats ctx id and ctx to flow table entry. */
427 	temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
428 
429 	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
430 					   stats_ctx_table_params);
431 	if (!ctx_entry)
432 		return -ENOENT;
433 
434 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
435 					    &ctx_entry->ht_node,
436 					    stats_ctx_table_params));
437 	kfree(ctx_entry);
438 
439 	return nfp_release_stats_entry(app, temp_ctx_id);
440 }
441 
442 struct nfp_fl_payload *
nfp_flower_get_fl_payload_from_ctx(struct nfp_app * app,u32 ctx_id)443 nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
444 {
445 	struct nfp_fl_stats_ctx_to_flow *ctx_entry;
446 	struct nfp_flower_priv *priv = app->priv;
447 
448 	ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
449 					   stats_ctx_table_params);
450 	if (!ctx_entry)
451 		return NULL;
452 
453 	return ctx_entry->flow;
454 }
455 
nfp_fl_obj_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)456 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
457 			    const void *obj)
458 {
459 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
460 	const struct nfp_fl_payload *flow_entry = obj;
461 
462 	if (flow_entry->ingress_dev == cmp_arg->netdev)
463 		return flow_entry->tc_flower_cookie != cmp_arg->cookie;
464 
465 	return 1;
466 }
467 
nfp_fl_obj_hashfn(const void * data,u32 len,u32 seed)468 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
469 {
470 	const struct nfp_fl_payload *flower_entry = data;
471 
472 	return jhash2((u32 *)&flower_entry->tc_flower_cookie,
473 		      sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
474 		      seed);
475 }
476 
nfp_fl_key_hashfn(const void * data,u32 len,u32 seed)477 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
478 {
479 	const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
480 
481 	return jhash2((u32 *)&cmp_arg->cookie,
482 		      sizeof(cmp_arg->cookie) / sizeof(u32), seed);
483 }
484 
485 const struct rhashtable_params nfp_flower_table_params = {
486 	.head_offset		= offsetof(struct nfp_fl_payload, fl_node),
487 	.hashfn			= nfp_fl_key_hashfn,
488 	.obj_cmpfn		= nfp_fl_obj_cmpfn,
489 	.obj_hashfn		= nfp_fl_obj_hashfn,
490 	.automatic_shrinking	= true,
491 };
492 
493 const struct rhashtable_params merge_table_params = {
494 	.key_offset	= offsetof(struct nfp_merge_info, parent_ctx),
495 	.head_offset	= offsetof(struct nfp_merge_info, ht_node),
496 	.key_len	= sizeof(u64),
497 };
498 
499 const struct rhashtable_params nfp_zone_table_params = {
500 	.head_offset		= offsetof(struct nfp_fl_ct_zone_entry, hash_node),
501 	.key_len		= sizeof(u16),
502 	.key_offset		= offsetof(struct nfp_fl_ct_zone_entry, zone),
503 	.automatic_shrinking	= false,
504 };
505 
506 const struct rhashtable_params nfp_ct_map_params = {
507 	.head_offset		= offsetof(struct nfp_fl_ct_map_entry, hash_node),
508 	.key_len		= sizeof(unsigned long),
509 	.key_offset		= offsetof(struct nfp_fl_ct_map_entry, cookie),
510 	.automatic_shrinking	= true,
511 };
512 
nfp_flower_metadata_init(struct nfp_app * app,u64 host_ctx_count,unsigned int host_num_mems)513 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
514 			     unsigned int host_num_mems)
515 {
516 	struct nfp_flower_priv *priv = app->priv;
517 	int err, stats_size;
518 
519 	hash_init(priv->mask_table);
520 
521 	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
522 	if (err)
523 		return err;
524 
525 	err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
526 	if (err)
527 		goto err_free_flow_table;
528 
529 	err = rhashtable_init(&priv->merge_table, &merge_table_params);
530 	if (err)
531 		goto err_free_stats_ctx_table;
532 
533 	err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
534 	if (err)
535 		goto err_free_merge_table;
536 
537 	err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
538 	if (err)
539 		goto err_free_ct_zone_table;
540 
541 	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
542 
543 	/* Init ring buffer and unallocated mask_ids. */
544 	priv->mask_ids.mask_id_free_list.buf =
545 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
546 			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
547 	if (!priv->mask_ids.mask_id_free_list.buf)
548 		goto err_free_ct_map_table;
549 
550 	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
551 
552 	/* Init timestamps for mask id*/
553 	priv->mask_ids.last_used =
554 		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
555 			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
556 	if (!priv->mask_ids.last_used)
557 		goto err_free_mask_id;
558 
559 	/* Init ring buffer and unallocated stats_ids. */
560 	priv->stats_ids.free_list.buf =
561 		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
562 				   priv->stats_ring_size));
563 	if (!priv->stats_ids.free_list.buf)
564 		goto err_free_last_used;
565 
566 	priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
567 
568 	stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
569 		     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
570 	priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
571 				     GFP_KERNEL);
572 	if (!priv->stats)
573 		goto err_free_ring_buf;
574 
575 	spin_lock_init(&priv->stats_lock);
576 
577 	return 0;
578 
579 err_free_ring_buf:
580 	vfree(priv->stats_ids.free_list.buf);
581 err_free_last_used:
582 	kfree(priv->mask_ids.last_used);
583 err_free_mask_id:
584 	kfree(priv->mask_ids.mask_id_free_list.buf);
585 err_free_ct_map_table:
586 	rhashtable_destroy(&priv->ct_map_table);
587 err_free_ct_zone_table:
588 	rhashtable_destroy(&priv->ct_zone_table);
589 err_free_merge_table:
590 	rhashtable_destroy(&priv->merge_table);
591 err_free_stats_ctx_table:
592 	rhashtable_destroy(&priv->stats_ctx_table);
593 err_free_flow_table:
594 	rhashtable_destroy(&priv->flow_table);
595 	return -ENOMEM;
596 }
597 
nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry * zt)598 static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
599 {
600 	if (!zt)
601 		return;
602 
603 	if (!list_empty(&zt->pre_ct_list)) {
604 		struct rhashtable *m_table = &zt->priv->ct_map_table;
605 		struct nfp_fl_ct_flow_entry *entry, *tmp;
606 		struct nfp_fl_ct_map_entry *map;
607 
608 		WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
609 		list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
610 					 list_node) {
611 			map = rhashtable_lookup_fast(m_table,
612 						     &entry->cookie,
613 						     nfp_ct_map_params);
614 			WARN_ON_ONCE(rhashtable_remove_fast(m_table,
615 							    &map->hash_node,
616 							    nfp_ct_map_params));
617 			nfp_fl_ct_clean_flow_entry(entry);
618 			kfree(map);
619 		}
620 	}
621 
622 	if (!list_empty(&zt->post_ct_list)) {
623 		struct rhashtable *m_table = &zt->priv->ct_map_table;
624 		struct nfp_fl_ct_flow_entry *entry, *tmp;
625 		struct nfp_fl_ct_map_entry *map;
626 
627 		WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
628 		list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
629 					 list_node) {
630 			map = rhashtable_lookup_fast(m_table,
631 						     &entry->cookie,
632 						     nfp_ct_map_params);
633 			WARN_ON_ONCE(rhashtable_remove_fast(m_table,
634 							    &map->hash_node,
635 							    nfp_ct_map_params));
636 			nfp_fl_ct_clean_flow_entry(entry);
637 			kfree(map);
638 		}
639 	}
640 
641 	if (zt->nft) {
642 		nf_flow_table_offload_del_cb(zt->nft,
643 					     nfp_fl_ct_handle_nft_flow,
644 					     zt);
645 		zt->nft = NULL;
646 	}
647 
648 	if (!list_empty(&zt->nft_flows_list)) {
649 		struct rhashtable *m_table = &zt->priv->ct_map_table;
650 		struct nfp_fl_ct_flow_entry *entry, *tmp;
651 		struct nfp_fl_ct_map_entry *map;
652 
653 		WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
654 		list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
655 					 list_node) {
656 			map = rhashtable_lookup_fast(m_table,
657 						     &entry->cookie,
658 						     nfp_ct_map_params);
659 			WARN_ON_ONCE(rhashtable_remove_fast(m_table,
660 							    &map->hash_node,
661 							    nfp_ct_map_params));
662 			nfp_fl_ct_clean_flow_entry(entry);
663 			kfree(map);
664 		}
665 	}
666 
667 	rhashtable_free_and_destroy(&zt->tc_merge_tb,
668 				    nfp_check_rhashtable_empty, NULL);
669 	rhashtable_free_and_destroy(&zt->nft_merge_tb,
670 				    nfp_check_rhashtable_empty, NULL);
671 
672 	kfree(zt);
673 }
674 
nfp_free_zone_table_entry(void * ptr,void * arg)675 static void nfp_free_zone_table_entry(void *ptr, void *arg)
676 {
677 	struct nfp_fl_ct_zone_entry *zt = ptr;
678 
679 	nfp_zone_table_entry_destroy(zt);
680 }
681 
nfp_free_map_table_entry(void * ptr,void * arg)682 static void nfp_free_map_table_entry(void *ptr, void *arg)
683 {
684 	struct nfp_fl_ct_map_entry *map = ptr;
685 
686 	if (!map)
687 		return;
688 
689 	kfree(map);
690 }
691 
nfp_flower_metadata_cleanup(struct nfp_app * app)692 void nfp_flower_metadata_cleanup(struct nfp_app *app)
693 {
694 	struct nfp_flower_priv *priv = app->priv;
695 
696 	if (!priv)
697 		return;
698 
699 	rhashtable_free_and_destroy(&priv->flow_table,
700 				    nfp_check_rhashtable_empty, NULL);
701 	rhashtable_free_and_destroy(&priv->stats_ctx_table,
702 				    nfp_check_rhashtable_empty, NULL);
703 	rhashtable_free_and_destroy(&priv->merge_table,
704 				    nfp_check_rhashtable_empty, NULL);
705 	rhashtable_free_and_destroy(&priv->ct_zone_table,
706 				    nfp_free_zone_table_entry, NULL);
707 	nfp_zone_table_entry_destroy(priv->ct_zone_wc);
708 
709 	rhashtable_free_and_destroy(&priv->ct_map_table,
710 				    nfp_free_map_table_entry, NULL);
711 	kvfree(priv->stats);
712 	kfree(priv->mask_ids.mask_id_free_list.buf);
713 	kfree(priv->mask_ids.last_used);
714 	vfree(priv->stats_ids.free_list.buf);
715 }
716