1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/netdevice.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/etherdevice.h>
7 #include <linux/list.h>
8
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_rx_filter.h"
12
ionic_rx_filter_free(struct ionic_lif * lif,struct ionic_rx_filter * f)13 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
14 {
15 struct device *dev = lif->ionic->dev;
16
17 hlist_del(&f->by_id);
18 hlist_del(&f->by_hash);
19 devm_kfree(dev, f);
20 }
21
ionic_rx_filter_replay(struct ionic_lif * lif)22 void ionic_rx_filter_replay(struct ionic_lif *lif)
23 {
24 struct ionic_rx_filter_add_cmd *ac;
25 struct hlist_head new_id_list;
26 struct ionic_admin_ctx ctx;
27 struct ionic_rx_filter *f;
28 struct hlist_head *head;
29 struct hlist_node *tmp;
30 unsigned int key;
31 unsigned int i;
32 int err;
33
34 INIT_HLIST_HEAD(&new_id_list);
35 ac = &ctx.cmd.rx_filter_add;
36
37 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
38 head = &lif->rx_filters.by_id[i];
39 hlist_for_each_entry_safe(f, tmp, head, by_id) {
40 ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
41 memcpy(ac, &f->cmd, sizeof(f->cmd));
42 dev_dbg(&lif->netdev->dev, "replay filter command:\n");
43 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
44 &ctx.cmd, sizeof(ctx.cmd), true);
45
46 err = ionic_adminq_post_wait(lif, &ctx);
47 if (err) {
48 switch (le16_to_cpu(ac->match)) {
49 case IONIC_RX_FILTER_MATCH_VLAN:
50 netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
51 err,
52 le16_to_cpu(ac->vlan.vlan));
53 break;
54 case IONIC_RX_FILTER_MATCH_MAC:
55 netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
56 err, ac->mac.addr);
57 break;
58 case IONIC_RX_FILTER_MATCH_MAC_VLAN:
59 netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
60 err,
61 le16_to_cpu(ac->vlan.vlan),
62 ac->mac.addr);
63 break;
64 }
65 spin_lock_bh(&lif->rx_filters.lock);
66 ionic_rx_filter_free(lif, f);
67 spin_unlock_bh(&lif->rx_filters.lock);
68
69 continue;
70 }
71
72 /* remove from old id list, save new id in tmp list */
73 spin_lock_bh(&lif->rx_filters.lock);
74 hlist_del(&f->by_id);
75 spin_unlock_bh(&lif->rx_filters.lock);
76 f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
77 hlist_add_head(&f->by_id, &new_id_list);
78 }
79 }
80
81 /* rebuild the by_id hash lists with the new filter ids */
82 spin_lock_bh(&lif->rx_filters.lock);
83 hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
84 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
85 head = &lif->rx_filters.by_id[key];
86 hlist_add_head(&f->by_id, head);
87 }
88 spin_unlock_bh(&lif->rx_filters.lock);
89 }
90
ionic_rx_filters_init(struct ionic_lif * lif)91 int ionic_rx_filters_init(struct ionic_lif *lif)
92 {
93 unsigned int i;
94
95 spin_lock_init(&lif->rx_filters.lock);
96
97 spin_lock_bh(&lif->rx_filters.lock);
98 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
99 INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
100 INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
101 }
102 spin_unlock_bh(&lif->rx_filters.lock);
103
104 return 0;
105 }
106
ionic_rx_filters_deinit(struct ionic_lif * lif)107 void ionic_rx_filters_deinit(struct ionic_lif *lif)
108 {
109 struct ionic_rx_filter *f;
110 struct hlist_head *head;
111 struct hlist_node *tmp;
112 unsigned int i;
113
114 spin_lock_bh(&lif->rx_filters.lock);
115 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
116 head = &lif->rx_filters.by_id[i];
117 hlist_for_each_entry_safe(f, tmp, head, by_id)
118 ionic_rx_filter_free(lif, f);
119 }
120 spin_unlock_bh(&lif->rx_filters.lock);
121 }
122
ionic_rx_filter_save(struct ionic_lif * lif,u32 flow_id,u16 rxq_index,u32 hash,struct ionic_admin_ctx * ctx,enum ionic_filter_state state)123 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
124 u32 hash, struct ionic_admin_ctx *ctx,
125 enum ionic_filter_state state)
126 {
127 struct device *dev = lif->ionic->dev;
128 struct ionic_rx_filter_add_cmd *ac;
129 struct ionic_rx_filter *f = NULL;
130 struct hlist_head *head;
131 unsigned int key;
132
133 ac = &ctx->cmd.rx_filter_add;
134
135 switch (le16_to_cpu(ac->match)) {
136 case IONIC_RX_FILTER_MATCH_VLAN:
137 key = le16_to_cpu(ac->vlan.vlan);
138 f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
139 break;
140 case IONIC_RX_FILTER_MATCH_MAC:
141 key = *(u32 *)ac->mac.addr;
142 f = ionic_rx_filter_by_addr(lif, ac->mac.addr);
143 break;
144 case IONIC_RX_FILTER_MATCH_MAC_VLAN:
145 key = le16_to_cpu(ac->mac_vlan.vlan);
146 break;
147 case IONIC_RX_FILTER_STEER_PKTCLASS:
148 key = 0;
149 break;
150 default:
151 return -EINVAL;
152 }
153
154 if (f) {
155 /* remove from current linking so we can refresh it */
156 hlist_del(&f->by_id);
157 hlist_del(&f->by_hash);
158 } else {
159 f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC);
160 if (!f)
161 return -ENOMEM;
162 }
163
164 f->flow_id = flow_id;
165 f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
166 f->state = state;
167 f->rxq_index = rxq_index;
168 memcpy(&f->cmd, ac, sizeof(f->cmd));
169 netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
170
171 INIT_HLIST_NODE(&f->by_hash);
172 INIT_HLIST_NODE(&f->by_id);
173
174 key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
175 head = &lif->rx_filters.by_hash[key];
176 hlist_add_head(&f->by_hash, head);
177
178 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
179 head = &lif->rx_filters.by_id[key];
180 hlist_add_head(&f->by_id, head);
181
182 return 0;
183 }
184
ionic_rx_filter_by_vlan(struct ionic_lif * lif,u16 vid)185 struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
186 {
187 struct ionic_rx_filter *f;
188 struct hlist_head *head;
189 unsigned int key;
190
191 key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
192 head = &lif->rx_filters.by_hash[key];
193
194 hlist_for_each_entry(f, head, by_hash) {
195 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
196 continue;
197 if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
198 return f;
199 }
200
201 return NULL;
202 }
203
ionic_rx_filter_by_addr(struct ionic_lif * lif,const u8 * addr)204 struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
205 const u8 *addr)
206 {
207 struct ionic_rx_filter *f;
208 struct hlist_head *head;
209 unsigned int key;
210
211 key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
212 head = &lif->rx_filters.by_hash[key];
213
214 hlist_for_each_entry(f, head, by_hash) {
215 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
216 continue;
217 if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
218 return f;
219 }
220
221 return NULL;
222 }
223
ionic_rx_filter_rxsteer(struct ionic_lif * lif)224 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
225 {
226 struct ionic_rx_filter *f;
227 struct hlist_head *head;
228 unsigned int key;
229
230 key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
231 head = &lif->rx_filters.by_hash[key];
232
233 hlist_for_each_entry(f, head, by_hash) {
234 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
235 continue;
236 return f;
237 }
238
239 return NULL;
240 }
241
ionic_rx_filter_find(struct ionic_lif * lif,struct ionic_rx_filter_add_cmd * ac)242 static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
243 struct ionic_rx_filter_add_cmd *ac)
244 {
245 switch (le16_to_cpu(ac->match)) {
246 case IONIC_RX_FILTER_MATCH_VLAN:
247 return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
248 case IONIC_RX_FILTER_MATCH_MAC:
249 return ionic_rx_filter_by_addr(lif, ac->mac.addr);
250 default:
251 netdev_err(lif->netdev, "unsupported filter match %d",
252 le16_to_cpu(ac->match));
253 return NULL;
254 }
255 }
256
ionic_lif_list_addr(struct ionic_lif * lif,const u8 * addr,bool mode)257 int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
258 {
259 struct ionic_rx_filter *f;
260 int err;
261
262 spin_lock_bh(&lif->rx_filters.lock);
263
264 f = ionic_rx_filter_by_addr(lif, addr);
265 if (mode == ADD_ADDR && !f) {
266 struct ionic_admin_ctx ctx = {
267 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
268 .cmd.rx_filter_add = {
269 .opcode = IONIC_CMD_RX_FILTER_ADD,
270 .lif_index = cpu_to_le16(lif->index),
271 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
272 },
273 };
274
275 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
276 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
277 IONIC_FILTER_STATE_NEW);
278 if (err) {
279 spin_unlock_bh(&lif->rx_filters.lock);
280 return err;
281 }
282
283 } else if (mode == ADD_ADDR && f) {
284 if (f->state == IONIC_FILTER_STATE_OLD)
285 f->state = IONIC_FILTER_STATE_SYNCED;
286
287 } else if (mode == DEL_ADDR && f) {
288 if (f->state == IONIC_FILTER_STATE_NEW)
289 ionic_rx_filter_free(lif, f);
290 else if (f->state == IONIC_FILTER_STATE_SYNCED)
291 f->state = IONIC_FILTER_STATE_OLD;
292 } else if (mode == DEL_ADDR && !f) {
293 spin_unlock_bh(&lif->rx_filters.lock);
294 return -ENOENT;
295 }
296
297 spin_unlock_bh(&lif->rx_filters.lock);
298
299 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
300
301 return 0;
302 }
303
ionic_lif_filter_add(struct ionic_lif * lif,struct ionic_rx_filter_add_cmd * ac)304 static int ionic_lif_filter_add(struct ionic_lif *lif,
305 struct ionic_rx_filter_add_cmd *ac)
306 {
307 struct ionic_admin_ctx ctx = {
308 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
309 };
310 struct ionic_rx_filter *f;
311 int nfilters;
312 int err = 0;
313
314 ctx.cmd.rx_filter_add = *ac;
315 ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
316 ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
317
318 spin_lock_bh(&lif->rx_filters.lock);
319 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
320 if (f) {
321 /* don't bother if we already have it and it is sync'd */
322 if (f->state == IONIC_FILTER_STATE_SYNCED) {
323 spin_unlock_bh(&lif->rx_filters.lock);
324 return 0;
325 }
326
327 /* mark preemptively as sync'd to block any parallel attempts */
328 f->state = IONIC_FILTER_STATE_SYNCED;
329 } else {
330 /* save as SYNCED to catch any DEL requests while processing */
331 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
332 IONIC_FILTER_STATE_SYNCED);
333 }
334 spin_unlock_bh(&lif->rx_filters.lock);
335 if (err)
336 return err;
337
338 /* Don't bother with the write to FW if we know there's no room,
339 * we can try again on the next sync attempt.
340 * Since the FW doesn't have a way to tell us the vlan limit,
341 * we start max_vlans at 0 until we hit the ENOSPC error.
342 */
343 switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
344 case IONIC_RX_FILTER_MATCH_VLAN:
345 netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
346 __func__, ctx.cmd.rx_filter_add.vlan.vlan);
347 if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
348 err = -ENOSPC;
349 break;
350 case IONIC_RX_FILTER_MATCH_MAC:
351 netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
352 __func__, ctx.cmd.rx_filter_add.mac.addr);
353 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
354 if ((lif->nucast + lif->nmcast) >= nfilters)
355 err = -ENOSPC;
356 break;
357 }
358
359 if (err != -ENOSPC)
360 err = ionic_adminq_post_wait_nomsg(lif, &ctx);
361
362 spin_lock_bh(&lif->rx_filters.lock);
363
364 if (err && err != -EEXIST) {
365 /* set the state back to NEW so we can try again later */
366 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
367 if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
368 f->state = IONIC_FILTER_STATE_NEW;
369
370 /* If -ENOSPC we won't waste time trying to sync again
371 * until there is a delete that might make room
372 */
373 if (err != -ENOSPC)
374 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
375 }
376
377 spin_unlock_bh(&lif->rx_filters.lock);
378
379 if (err == -ENOSPC) {
380 if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
381 lif->max_vlans = lif->nvlans;
382 return 0;
383 }
384
385 ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
386 ctx.comp.comp.status, err);
387 switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
388 case IONIC_RX_FILTER_MATCH_VLAN:
389 netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
390 ctx.cmd.rx_filter_add.vlan.vlan);
391 break;
392 case IONIC_RX_FILTER_MATCH_MAC:
393 netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
394 ctx.cmd.rx_filter_add.mac.addr);
395 break;
396 }
397
398 return err;
399 }
400
401 switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
402 case IONIC_RX_FILTER_MATCH_VLAN:
403 lif->nvlans++;
404 break;
405 case IONIC_RX_FILTER_MATCH_MAC:
406 if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
407 lif->nmcast++;
408 else
409 lif->nucast++;
410 break;
411 }
412
413 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
414 if (f && f->state == IONIC_FILTER_STATE_OLD) {
415 /* Someone requested a delete while we were adding
416 * so update the filter info with the results from the add
417 * and the data will be there for the delete on the next
418 * sync cycle.
419 */
420 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
421 IONIC_FILTER_STATE_OLD);
422 } else {
423 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
424 IONIC_FILTER_STATE_SYNCED);
425 }
426
427 spin_unlock_bh(&lif->rx_filters.lock);
428
429 return err;
430 }
431
ionic_lif_addr_add(struct ionic_lif * lif,const u8 * addr)432 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
433 {
434 struct ionic_rx_filter_add_cmd ac = {
435 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
436 };
437
438 memcpy(&ac.mac.addr, addr, ETH_ALEN);
439
440 return ionic_lif_filter_add(lif, &ac);
441 }
442
ionic_lif_vlan_add(struct ionic_lif * lif,const u16 vid)443 int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
444 {
445 struct ionic_rx_filter_add_cmd ac = {
446 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
447 .vlan.vlan = cpu_to_le16(vid),
448 };
449
450 return ionic_lif_filter_add(lif, &ac);
451 }
452
ionic_lif_filter_del(struct ionic_lif * lif,struct ionic_rx_filter_add_cmd * ac)453 static int ionic_lif_filter_del(struct ionic_lif *lif,
454 struct ionic_rx_filter_add_cmd *ac)
455 {
456 struct ionic_admin_ctx ctx = {
457 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
458 .cmd.rx_filter_del = {
459 .opcode = IONIC_CMD_RX_FILTER_DEL,
460 .lif_index = cpu_to_le16(lif->index),
461 },
462 };
463 struct ionic_rx_filter *f;
464 int state;
465 int err;
466
467 spin_lock_bh(&lif->rx_filters.lock);
468 f = ionic_rx_filter_find(lif, ac);
469 if (!f) {
470 spin_unlock_bh(&lif->rx_filters.lock);
471 return -ENOENT;
472 }
473
474 switch (le16_to_cpu(ac->match)) {
475 case IONIC_RX_FILTER_MATCH_VLAN:
476 netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
477 __func__, ac->vlan.vlan, f->filter_id);
478 lif->nvlans--;
479 break;
480 case IONIC_RX_FILTER_MATCH_MAC:
481 netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
482 __func__, ac->mac.addr, f->filter_id);
483 if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
484 lif->nmcast--;
485 else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
486 lif->nucast--;
487 break;
488 }
489
490 state = f->state;
491 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
492 ionic_rx_filter_free(lif, f);
493
494 spin_unlock_bh(&lif->rx_filters.lock);
495
496 if (state != IONIC_FILTER_STATE_NEW) {
497 err = ionic_adminq_post_wait(lif, &ctx);
498 if (err && err != -EEXIST)
499 return err;
500 }
501
502 return 0;
503 }
504
ionic_lif_addr_del(struct ionic_lif * lif,const u8 * addr)505 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
506 {
507 struct ionic_rx_filter_add_cmd ac = {
508 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
509 };
510
511 memcpy(&ac.mac.addr, addr, ETH_ALEN);
512
513 return ionic_lif_filter_del(lif, &ac);
514 }
515
ionic_lif_vlan_del(struct ionic_lif * lif,const u16 vid)516 int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
517 {
518 struct ionic_rx_filter_add_cmd ac = {
519 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
520 .vlan.vlan = cpu_to_le16(vid),
521 };
522
523 return ionic_lif_filter_del(lif, &ac);
524 }
525
526 struct sync_item {
527 struct list_head list;
528 struct ionic_rx_filter f;
529 };
530
ionic_rx_filter_sync(struct ionic_lif * lif)531 void ionic_rx_filter_sync(struct ionic_lif *lif)
532 {
533 struct device *dev = lif->ionic->dev;
534 struct list_head sync_add_list;
535 struct list_head sync_del_list;
536 struct sync_item *sync_item;
537 struct ionic_rx_filter *f;
538 struct hlist_head *head;
539 struct hlist_node *tmp;
540 struct sync_item *spos;
541 unsigned int i;
542
543 INIT_LIST_HEAD(&sync_add_list);
544 INIT_LIST_HEAD(&sync_del_list);
545
546 clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
547
548 /* Copy the filters to be added and deleted
549 * into a separate local list that needs no locking.
550 */
551 spin_lock_bh(&lif->rx_filters.lock);
552 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
553 head = &lif->rx_filters.by_id[i];
554 hlist_for_each_entry_safe(f, tmp, head, by_id) {
555 if (f->state == IONIC_FILTER_STATE_NEW ||
556 f->state == IONIC_FILTER_STATE_OLD) {
557 sync_item = devm_kzalloc(dev, sizeof(*sync_item),
558 GFP_ATOMIC);
559 if (!sync_item)
560 goto loop_out;
561
562 sync_item->f = *f;
563
564 if (f->state == IONIC_FILTER_STATE_NEW)
565 list_add(&sync_item->list, &sync_add_list);
566 else
567 list_add(&sync_item->list, &sync_del_list);
568 }
569 }
570 }
571 loop_out:
572 spin_unlock_bh(&lif->rx_filters.lock);
573
574 /* If the add or delete fails, it won't get marked as sync'd
575 * and will be tried again in the next sync action.
576 * Do the deletes first in case we're in an overflow state and
577 * they can clear room for some new filters
578 */
579 list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
580 (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
581
582 list_del(&sync_item->list);
583 devm_kfree(dev, sync_item);
584 }
585
586 list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
587 (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
588
589 list_del(&sync_item->list);
590 devm_kfree(dev, sync_item);
591 }
592 }
593