1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_api.c Packet classifier API.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
10 */
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/jhash.h>
24 #include <linux/rculist.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/tc_act/tc_gate.h>
42 #include <net/flow_offload.h>
43
44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45
46 /* The list of all installed classifier types */
47 static LIST_HEAD(tcf_proto_base);
48
49 /* Protects list of registered TC modules. It is pure SMP lock. */
50 static DEFINE_RWLOCK(cls_mod_lock);
51
destroy_obj_hashfn(const struct tcf_proto * tp)52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
53 {
54 return jhash_3words(tp->chain->index, tp->prio,
55 (__force __u32)tp->protocol, 0);
56 }
57
tcf_proto_signal_destroying(struct tcf_chain * chain,struct tcf_proto * tp)58 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
59 struct tcf_proto *tp)
60 {
61 struct tcf_block *block = chain->block;
62
63 mutex_lock(&block->proto_destroy_lock);
64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
65 destroy_obj_hashfn(tp));
66 mutex_unlock(&block->proto_destroy_lock);
67 }
68
tcf_proto_cmp(const struct tcf_proto * tp1,const struct tcf_proto * tp2)69 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
70 const struct tcf_proto *tp2)
71 {
72 return tp1->chain->index == tp2->chain->index &&
73 tp1->prio == tp2->prio &&
74 tp1->protocol == tp2->protocol;
75 }
76
tcf_proto_exists_destroying(struct tcf_chain * chain,struct tcf_proto * tp)77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
78 struct tcf_proto *tp)
79 {
80 u32 hash = destroy_obj_hashfn(tp);
81 struct tcf_proto *iter;
82 bool found = false;
83
84 rcu_read_lock();
85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
86 destroy_ht_node, hash) {
87 if (tcf_proto_cmp(tp, iter)) {
88 found = true;
89 break;
90 }
91 }
92 rcu_read_unlock();
93
94 return found;
95 }
96
97 static void
tcf_proto_signal_destroyed(struct tcf_chain * chain,struct tcf_proto * tp)98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
99 {
100 struct tcf_block *block = chain->block;
101
102 mutex_lock(&block->proto_destroy_lock);
103 if (hash_hashed(&tp->destroy_ht_node))
104 hash_del_rcu(&tp->destroy_ht_node);
105 mutex_unlock(&block->proto_destroy_lock);
106 }
107
108 /* Find classifier type by string name */
109
__tcf_proto_lookup_ops(const char * kind)110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
111 {
112 const struct tcf_proto_ops *t, *res = NULL;
113
114 if (kind) {
115 read_lock(&cls_mod_lock);
116 list_for_each_entry(t, &tcf_proto_base, head) {
117 if (strcmp(kind, t->kind) == 0) {
118 if (try_module_get(t->owner))
119 res = t;
120 break;
121 }
122 }
123 read_unlock(&cls_mod_lock);
124 }
125 return res;
126 }
127
128 static const struct tcf_proto_ops *
tcf_proto_lookup_ops(const char * kind,bool rtnl_held,struct netlink_ext_ack * extack)129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
130 struct netlink_ext_ack *extack)
131 {
132 const struct tcf_proto_ops *ops;
133
134 ops = __tcf_proto_lookup_ops(kind);
135 if (ops)
136 return ops;
137 #ifdef CONFIG_MODULES
138 if (rtnl_held)
139 rtnl_unlock();
140 request_module("cls_%s", kind);
141 if (rtnl_held)
142 rtnl_lock();
143 ops = __tcf_proto_lookup_ops(kind);
144 /* We dropped the RTNL semaphore in order to perform
145 * the module load. So, even if we succeeded in loading
146 * the module we have to replay the request. We indicate
147 * this using -EAGAIN.
148 */
149 if (ops) {
150 module_put(ops->owner);
151 return ERR_PTR(-EAGAIN);
152 }
153 #endif
154 NL_SET_ERR_MSG(extack, "TC classifier not found");
155 return ERR_PTR(-ENOENT);
156 }
157
158 /* Register(unregister) new classifier type */
159
register_tcf_proto_ops(struct tcf_proto_ops * ops)160 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
161 {
162 struct tcf_proto_ops *t;
163 int rc = -EEXIST;
164
165 write_lock(&cls_mod_lock);
166 list_for_each_entry(t, &tcf_proto_base, head)
167 if (!strcmp(ops->kind, t->kind))
168 goto out;
169
170 list_add_tail(&ops->head, &tcf_proto_base);
171 rc = 0;
172 out:
173 write_unlock(&cls_mod_lock);
174 return rc;
175 }
176 EXPORT_SYMBOL(register_tcf_proto_ops);
177
178 static struct workqueue_struct *tc_filter_wq;
179
unregister_tcf_proto_ops(struct tcf_proto_ops * ops)180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
181 {
182 struct tcf_proto_ops *t;
183 int rc = -ENOENT;
184
185 /* Wait for outstanding call_rcu()s, if any, from a
186 * tcf_proto_ops's destroy() handler.
187 */
188 rcu_barrier();
189 flush_workqueue(tc_filter_wq);
190
191 write_lock(&cls_mod_lock);
192 list_for_each_entry(t, &tcf_proto_base, head) {
193 if (t == ops) {
194 list_del(&t->head);
195 rc = 0;
196 break;
197 }
198 }
199 write_unlock(&cls_mod_lock);
200 return rc;
201 }
202 EXPORT_SYMBOL(unregister_tcf_proto_ops);
203
tcf_queue_work(struct rcu_work * rwork,work_func_t func)204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
205 {
206 INIT_RCU_WORK(rwork, func);
207 return queue_rcu_work(tc_filter_wq, rwork);
208 }
209 EXPORT_SYMBOL(tcf_queue_work);
210
211 /* Select new prio value from the range, managed by kernel. */
212
tcf_auto_prio(struct tcf_proto * tp)213 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
214 {
215 u32 first = TC_H_MAKE(0xC0000000U, 0U);
216
217 if (tp)
218 first = tp->prio - 1;
219
220 return TC_H_MAJ(first);
221 }
222
tcf_proto_check_kind(struct nlattr * kind,char * name)223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
224 {
225 if (kind)
226 return nla_strscpy(name, kind, IFNAMSIZ) < 0;
227 memset(name, 0, IFNAMSIZ);
228 return false;
229 }
230
tcf_proto_is_unlocked(const char * kind)231 static bool tcf_proto_is_unlocked(const char *kind)
232 {
233 const struct tcf_proto_ops *ops;
234 bool ret;
235
236 if (strlen(kind) == 0)
237 return false;
238
239 ops = tcf_proto_lookup_ops(kind, false, NULL);
240 /* On error return false to take rtnl lock. Proto lookup/create
241 * functions will perform lookup again and properly handle errors.
242 */
243 if (IS_ERR(ops))
244 return false;
245
246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
247 module_put(ops->owner);
248 return ret;
249 }
250
tcf_proto_create(const char * kind,u32 protocol,u32 prio,struct tcf_chain * chain,bool rtnl_held,struct netlink_ext_ack * extack)251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
252 u32 prio, struct tcf_chain *chain,
253 bool rtnl_held,
254 struct netlink_ext_ack *extack)
255 {
256 struct tcf_proto *tp;
257 int err;
258
259 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
260 if (!tp)
261 return ERR_PTR(-ENOBUFS);
262
263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
264 if (IS_ERR(tp->ops)) {
265 err = PTR_ERR(tp->ops);
266 goto errout;
267 }
268 tp->classify = tp->ops->classify;
269 tp->protocol = protocol;
270 tp->prio = prio;
271 tp->chain = chain;
272 spin_lock_init(&tp->lock);
273 refcount_set(&tp->refcnt, 1);
274
275 err = tp->ops->init(tp);
276 if (err) {
277 module_put(tp->ops->owner);
278 goto errout;
279 }
280 return tp;
281
282 errout:
283 kfree(tp);
284 return ERR_PTR(err);
285 }
286
tcf_proto_get(struct tcf_proto * tp)287 static void tcf_proto_get(struct tcf_proto *tp)
288 {
289 refcount_inc(&tp->refcnt);
290 }
291
292 static void tcf_chain_put(struct tcf_chain *chain);
293
tcf_proto_destroy(struct tcf_proto * tp,bool rtnl_held,bool sig_destroy,struct netlink_ext_ack * extack)294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
295 bool sig_destroy, struct netlink_ext_ack *extack)
296 {
297 tp->ops->destroy(tp, rtnl_held, extack);
298 if (sig_destroy)
299 tcf_proto_signal_destroyed(tp->chain, tp);
300 tcf_chain_put(tp->chain);
301 module_put(tp->ops->owner);
302 kfree_rcu(tp, rcu);
303 }
304
tcf_proto_put(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
306 struct netlink_ext_ack *extack)
307 {
308 if (refcount_dec_and_test(&tp->refcnt))
309 tcf_proto_destroy(tp, rtnl_held, true, extack);
310 }
311
tcf_proto_check_delete(struct tcf_proto * tp)312 static bool tcf_proto_check_delete(struct tcf_proto *tp)
313 {
314 if (tp->ops->delete_empty)
315 return tp->ops->delete_empty(tp);
316
317 tp->deleting = true;
318 return tp->deleting;
319 }
320
tcf_proto_mark_delete(struct tcf_proto * tp)321 static void tcf_proto_mark_delete(struct tcf_proto *tp)
322 {
323 spin_lock(&tp->lock);
324 tp->deleting = true;
325 spin_unlock(&tp->lock);
326 }
327
tcf_proto_is_deleting(struct tcf_proto * tp)328 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
329 {
330 bool deleting;
331
332 spin_lock(&tp->lock);
333 deleting = tp->deleting;
334 spin_unlock(&tp->lock);
335
336 return deleting;
337 }
338
339 #define ASSERT_BLOCK_LOCKED(block) \
340 lockdep_assert_held(&(block)->lock)
341
342 struct tcf_filter_chain_list_item {
343 struct list_head list;
344 tcf_chain_head_change_t *chain_head_change;
345 void *chain_head_change_priv;
346 };
347
tcf_chain_create(struct tcf_block * block,u32 chain_index)348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
349 u32 chain_index)
350 {
351 struct tcf_chain *chain;
352
353 ASSERT_BLOCK_LOCKED(block);
354
355 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
356 if (!chain)
357 return NULL;
358 list_add_tail_rcu(&chain->list, &block->chain_list);
359 mutex_init(&chain->filter_chain_lock);
360 chain->block = block;
361 chain->index = chain_index;
362 chain->refcnt = 1;
363 if (!chain->index)
364 block->chain0.chain = chain;
365 return chain;
366 }
367
tcf_chain_head_change_item(struct tcf_filter_chain_list_item * item,struct tcf_proto * tp_head)368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
369 struct tcf_proto *tp_head)
370 {
371 if (item->chain_head_change)
372 item->chain_head_change(tp_head, item->chain_head_change_priv);
373 }
374
tcf_chain0_head_change(struct tcf_chain * chain,struct tcf_proto * tp_head)375 static void tcf_chain0_head_change(struct tcf_chain *chain,
376 struct tcf_proto *tp_head)
377 {
378 struct tcf_filter_chain_list_item *item;
379 struct tcf_block *block = chain->block;
380
381 if (chain->index)
382 return;
383
384 mutex_lock(&block->lock);
385 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
386 tcf_chain_head_change_item(item, tp_head);
387 mutex_unlock(&block->lock);
388 }
389
390 /* Returns true if block can be safely freed. */
391
tcf_chain_detach(struct tcf_chain * chain)392 static bool tcf_chain_detach(struct tcf_chain *chain)
393 {
394 struct tcf_block *block = chain->block;
395
396 ASSERT_BLOCK_LOCKED(block);
397
398 list_del_rcu(&chain->list);
399 if (!chain->index)
400 block->chain0.chain = NULL;
401
402 if (list_empty(&block->chain_list) &&
403 refcount_read(&block->refcnt) == 0)
404 return true;
405
406 return false;
407 }
408
tcf_block_destroy(struct tcf_block * block)409 static void tcf_block_destroy(struct tcf_block *block)
410 {
411 mutex_destroy(&block->lock);
412 mutex_destroy(&block->proto_destroy_lock);
413 kfree_rcu(block, rcu);
414 }
415
tcf_chain_destroy(struct tcf_chain * chain,bool free_block)416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
417 {
418 struct tcf_block *block = chain->block;
419
420 mutex_destroy(&chain->filter_chain_lock);
421 kfree_rcu(chain, rcu);
422 if (free_block)
423 tcf_block_destroy(block);
424 }
425
tcf_chain_hold(struct tcf_chain * chain)426 static void tcf_chain_hold(struct tcf_chain *chain)
427 {
428 ASSERT_BLOCK_LOCKED(chain->block);
429
430 ++chain->refcnt;
431 }
432
tcf_chain_held_by_acts_only(struct tcf_chain * chain)433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
434 {
435 ASSERT_BLOCK_LOCKED(chain->block);
436
437 /* In case all the references are action references, this
438 * chain should not be shown to the user.
439 */
440 return chain->refcnt == chain->action_refcnt;
441 }
442
tcf_chain_lookup(struct tcf_block * block,u32 chain_index)443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
444 u32 chain_index)
445 {
446 struct tcf_chain *chain;
447
448 ASSERT_BLOCK_LOCKED(block);
449
450 list_for_each_entry(chain, &block->chain_list, list) {
451 if (chain->index == chain_index)
452 return chain;
453 }
454 return NULL;
455 }
456
457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
tcf_chain_lookup_rcu(const struct tcf_block * block,u32 chain_index)458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
459 u32 chain_index)
460 {
461 struct tcf_chain *chain;
462
463 list_for_each_entry_rcu(chain, &block->chain_list, list) {
464 if (chain->index == chain_index)
465 return chain;
466 }
467 return NULL;
468 }
469 #endif
470
471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
472 u32 seq, u16 flags, int event, bool unicast);
473
__tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create,bool by_act)474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
475 u32 chain_index, bool create,
476 bool by_act)
477 {
478 struct tcf_chain *chain = NULL;
479 bool is_first_reference;
480
481 mutex_lock(&block->lock);
482 chain = tcf_chain_lookup(block, chain_index);
483 if (chain) {
484 tcf_chain_hold(chain);
485 } else {
486 if (!create)
487 goto errout;
488 chain = tcf_chain_create(block, chain_index);
489 if (!chain)
490 goto errout;
491 }
492
493 if (by_act)
494 ++chain->action_refcnt;
495 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
496 mutex_unlock(&block->lock);
497
498 /* Send notification only in case we got the first
499 * non-action reference. Until then, the chain acts only as
500 * a placeholder for actions pointing to it and user ought
501 * not know about them.
502 */
503 if (is_first_reference && !by_act)
504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
505 RTM_NEWCHAIN, false);
506
507 return chain;
508
509 errout:
510 mutex_unlock(&block->lock);
511 return chain;
512 }
513
tcf_chain_get(struct tcf_block * block,u32 chain_index,bool create)514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
515 bool create)
516 {
517 return __tcf_chain_get(block, chain_index, create, false);
518 }
519
tcf_chain_get_by_act(struct tcf_block * block,u32 chain_index)520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
521 {
522 return __tcf_chain_get(block, chain_index, true, true);
523 }
524 EXPORT_SYMBOL(tcf_chain_get_by_act);
525
526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
527 void *tmplt_priv);
528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
529 void *tmplt_priv, u32 chain_index,
530 struct tcf_block *block, struct sk_buff *oskb,
531 u32 seq, u16 flags, bool unicast);
532
__tcf_chain_put(struct tcf_chain * chain,bool by_act,bool explicitly_created)533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
534 bool explicitly_created)
535 {
536 struct tcf_block *block = chain->block;
537 const struct tcf_proto_ops *tmplt_ops;
538 bool free_block = false;
539 unsigned int refcnt;
540 void *tmplt_priv;
541
542 mutex_lock(&block->lock);
543 if (explicitly_created) {
544 if (!chain->explicitly_created) {
545 mutex_unlock(&block->lock);
546 return;
547 }
548 chain->explicitly_created = false;
549 }
550
551 if (by_act)
552 chain->action_refcnt--;
553
554 /* tc_chain_notify_delete can't be called while holding block lock.
555 * However, when block is unlocked chain can be changed concurrently, so
556 * save these to temporary variables.
557 */
558 refcnt = --chain->refcnt;
559 tmplt_ops = chain->tmplt_ops;
560 tmplt_priv = chain->tmplt_priv;
561
562 /* The last dropped non-action reference will trigger notification. */
563 if (refcnt - chain->action_refcnt == 0 && !by_act) {
564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
565 block, NULL, 0, 0, false);
566 /* Last reference to chain, no need to lock. */
567 chain->flushing = false;
568 }
569
570 if (refcnt == 0)
571 free_block = tcf_chain_detach(chain);
572 mutex_unlock(&block->lock);
573
574 if (refcnt == 0) {
575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
576 tcf_chain_destroy(chain, free_block);
577 }
578 }
579
tcf_chain_put(struct tcf_chain * chain)580 static void tcf_chain_put(struct tcf_chain *chain)
581 {
582 __tcf_chain_put(chain, false, false);
583 }
584
tcf_chain_put_by_act(struct tcf_chain * chain)585 void tcf_chain_put_by_act(struct tcf_chain *chain)
586 {
587 __tcf_chain_put(chain, true, false);
588 }
589 EXPORT_SYMBOL(tcf_chain_put_by_act);
590
tcf_chain_put_explicitly_created(struct tcf_chain * chain)591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
592 {
593 __tcf_chain_put(chain, false, true);
594 }
595
tcf_chain_flush(struct tcf_chain * chain,bool rtnl_held)596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
597 {
598 struct tcf_proto *tp, *tp_next;
599
600 mutex_lock(&chain->filter_chain_lock);
601 tp = tcf_chain_dereference(chain->filter_chain, chain);
602 while (tp) {
603 tp_next = rcu_dereference_protected(tp->next, 1);
604 tcf_proto_signal_destroying(chain, tp);
605 tp = tp_next;
606 }
607 tp = tcf_chain_dereference(chain->filter_chain, chain);
608 RCU_INIT_POINTER(chain->filter_chain, NULL);
609 tcf_chain0_head_change(chain, NULL);
610 chain->flushing = true;
611 mutex_unlock(&chain->filter_chain_lock);
612
613 while (tp) {
614 tp_next = rcu_dereference_protected(tp->next, 1);
615 tcf_proto_put(tp, rtnl_held, NULL);
616 tp = tp_next;
617 }
618 }
619
620 static int tcf_block_setup(struct tcf_block *block,
621 struct flow_block_offload *bo);
622
tcf_block_offload_init(struct flow_block_offload * bo,struct net_device * dev,struct Qdisc * sch,enum flow_block_command command,enum flow_block_binder_type binder_type,struct flow_block * flow_block,bool shared,struct netlink_ext_ack * extack)623 static void tcf_block_offload_init(struct flow_block_offload *bo,
624 struct net_device *dev, struct Qdisc *sch,
625 enum flow_block_command command,
626 enum flow_block_binder_type binder_type,
627 struct flow_block *flow_block,
628 bool shared, struct netlink_ext_ack *extack)
629 {
630 bo->net = dev_net(dev);
631 bo->command = command;
632 bo->binder_type = binder_type;
633 bo->block = flow_block;
634 bo->block_shared = shared;
635 bo->extack = extack;
636 bo->sch = sch;
637 bo->cb_list_head = &flow_block->cb_list;
638 INIT_LIST_HEAD(&bo->cb_list);
639 }
640
641 static void tcf_block_unbind(struct tcf_block *block,
642 struct flow_block_offload *bo);
643
tc_block_indr_cleanup(struct flow_block_cb * block_cb)644 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
645 {
646 struct tcf_block *block = block_cb->indr.data;
647 struct net_device *dev = block_cb->indr.dev;
648 struct Qdisc *sch = block_cb->indr.sch;
649 struct netlink_ext_ack extack = {};
650 struct flow_block_offload bo = {};
651
652 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
653 block_cb->indr.binder_type,
654 &block->flow_block, tcf_block_shared(block),
655 &extack);
656 rtnl_lock();
657 down_write(&block->cb_lock);
658 list_del(&block_cb->driver_list);
659 list_move(&block_cb->list, &bo.cb_list);
660 tcf_block_unbind(block, &bo);
661 up_write(&block->cb_lock);
662 rtnl_unlock();
663 }
664
tcf_block_offload_in_use(struct tcf_block * block)665 static bool tcf_block_offload_in_use(struct tcf_block *block)
666 {
667 return atomic_read(&block->offloadcnt);
668 }
669
tcf_block_offload_cmd(struct tcf_block * block,struct net_device * dev,struct Qdisc * sch,struct tcf_block_ext_info * ei,enum flow_block_command command,struct netlink_ext_ack * extack)670 static int tcf_block_offload_cmd(struct tcf_block *block,
671 struct net_device *dev, struct Qdisc *sch,
672 struct tcf_block_ext_info *ei,
673 enum flow_block_command command,
674 struct netlink_ext_ack *extack)
675 {
676 struct flow_block_offload bo = {};
677
678 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
679 &block->flow_block, tcf_block_shared(block),
680 extack);
681
682 if (dev->netdev_ops->ndo_setup_tc) {
683 int err;
684
685 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
686 if (err < 0) {
687 if (err != -EOPNOTSUPP)
688 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
689 return err;
690 }
691
692 return tcf_block_setup(block, &bo);
693 }
694
695 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
696 tc_block_indr_cleanup);
697 tcf_block_setup(block, &bo);
698
699 return -EOPNOTSUPP;
700 }
701
tcf_block_offload_bind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)702 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
703 struct tcf_block_ext_info *ei,
704 struct netlink_ext_ack *extack)
705 {
706 struct net_device *dev = q->dev_queue->dev;
707 int err;
708
709 down_write(&block->cb_lock);
710
711 /* If tc offload feature is disabled and the block we try to bind
712 * to already has some offloaded filters, forbid to bind.
713 */
714 if (dev->netdev_ops->ndo_setup_tc &&
715 !tc_can_offload(dev) &&
716 tcf_block_offload_in_use(block)) {
717 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
718 err = -EOPNOTSUPP;
719 goto err_unlock;
720 }
721
722 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
723 if (err == -EOPNOTSUPP)
724 goto no_offload_dev_inc;
725 if (err)
726 goto err_unlock;
727
728 up_write(&block->cb_lock);
729 return 0;
730
731 no_offload_dev_inc:
732 if (tcf_block_offload_in_use(block))
733 goto err_unlock;
734
735 err = 0;
736 block->nooffloaddevcnt++;
737 err_unlock:
738 up_write(&block->cb_lock);
739 return err;
740 }
741
tcf_block_offload_unbind(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)742 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
743 struct tcf_block_ext_info *ei)
744 {
745 struct net_device *dev = q->dev_queue->dev;
746 int err;
747
748 down_write(&block->cb_lock);
749 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
750 if (err == -EOPNOTSUPP)
751 goto no_offload_dev_dec;
752 up_write(&block->cb_lock);
753 return;
754
755 no_offload_dev_dec:
756 WARN_ON(block->nooffloaddevcnt-- == 0);
757 up_write(&block->cb_lock);
758 }
759
760 static int
tcf_chain0_head_change_cb_add(struct tcf_block * block,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)761 tcf_chain0_head_change_cb_add(struct tcf_block *block,
762 struct tcf_block_ext_info *ei,
763 struct netlink_ext_ack *extack)
764 {
765 struct tcf_filter_chain_list_item *item;
766 struct tcf_chain *chain0;
767
768 item = kmalloc(sizeof(*item), GFP_KERNEL);
769 if (!item) {
770 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
771 return -ENOMEM;
772 }
773 item->chain_head_change = ei->chain_head_change;
774 item->chain_head_change_priv = ei->chain_head_change_priv;
775
776 mutex_lock(&block->lock);
777 chain0 = block->chain0.chain;
778 if (chain0)
779 tcf_chain_hold(chain0);
780 else
781 list_add(&item->list, &block->chain0.filter_chain_list);
782 mutex_unlock(&block->lock);
783
784 if (chain0) {
785 struct tcf_proto *tp_head;
786
787 mutex_lock(&chain0->filter_chain_lock);
788
789 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
790 if (tp_head)
791 tcf_chain_head_change_item(item, tp_head);
792
793 mutex_lock(&block->lock);
794 list_add(&item->list, &block->chain0.filter_chain_list);
795 mutex_unlock(&block->lock);
796
797 mutex_unlock(&chain0->filter_chain_lock);
798 tcf_chain_put(chain0);
799 }
800
801 return 0;
802 }
803
804 static void
tcf_chain0_head_change_cb_del(struct tcf_block * block,struct tcf_block_ext_info * ei)805 tcf_chain0_head_change_cb_del(struct tcf_block *block,
806 struct tcf_block_ext_info *ei)
807 {
808 struct tcf_filter_chain_list_item *item;
809
810 mutex_lock(&block->lock);
811 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
812 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
813 (item->chain_head_change == ei->chain_head_change &&
814 item->chain_head_change_priv == ei->chain_head_change_priv)) {
815 if (block->chain0.chain)
816 tcf_chain_head_change_item(item, NULL);
817 list_del(&item->list);
818 mutex_unlock(&block->lock);
819
820 kfree(item);
821 return;
822 }
823 }
824 mutex_unlock(&block->lock);
825 WARN_ON(1);
826 }
827
828 struct tcf_net {
829 spinlock_t idr_lock; /* Protects idr */
830 struct idr idr;
831 };
832
833 static unsigned int tcf_net_id;
834
tcf_block_insert(struct tcf_block * block,struct net * net,struct netlink_ext_ack * extack)835 static int tcf_block_insert(struct tcf_block *block, struct net *net,
836 struct netlink_ext_ack *extack)
837 {
838 struct tcf_net *tn = net_generic(net, tcf_net_id);
839 int err;
840
841 idr_preload(GFP_KERNEL);
842 spin_lock(&tn->idr_lock);
843 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
844 GFP_NOWAIT);
845 spin_unlock(&tn->idr_lock);
846 idr_preload_end();
847
848 return err;
849 }
850
tcf_block_remove(struct tcf_block * block,struct net * net)851 static void tcf_block_remove(struct tcf_block *block, struct net *net)
852 {
853 struct tcf_net *tn = net_generic(net, tcf_net_id);
854
855 spin_lock(&tn->idr_lock);
856 idr_remove(&tn->idr, block->index);
857 spin_unlock(&tn->idr_lock);
858 }
859
tcf_block_create(struct net * net,struct Qdisc * q,u32 block_index,struct netlink_ext_ack * extack)860 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
861 u32 block_index,
862 struct netlink_ext_ack *extack)
863 {
864 struct tcf_block *block;
865
866 block = kzalloc(sizeof(*block), GFP_KERNEL);
867 if (!block) {
868 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
869 return ERR_PTR(-ENOMEM);
870 }
871 mutex_init(&block->lock);
872 mutex_init(&block->proto_destroy_lock);
873 init_rwsem(&block->cb_lock);
874 flow_block_init(&block->flow_block);
875 INIT_LIST_HEAD(&block->chain_list);
876 INIT_LIST_HEAD(&block->owner_list);
877 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
878
879 refcount_set(&block->refcnt, 1);
880 block->net = net;
881 block->index = block_index;
882
883 /* Don't store q pointer for blocks which are shared */
884 if (!tcf_block_shared(block))
885 block->q = q;
886 return block;
887 }
888
tcf_block_lookup(struct net * net,u32 block_index)889 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
890 {
891 struct tcf_net *tn = net_generic(net, tcf_net_id);
892
893 return idr_find(&tn->idr, block_index);
894 }
895
tcf_block_refcnt_get(struct net * net,u32 block_index)896 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
897 {
898 struct tcf_block *block;
899
900 rcu_read_lock();
901 block = tcf_block_lookup(net, block_index);
902 if (block && !refcount_inc_not_zero(&block->refcnt))
903 block = NULL;
904 rcu_read_unlock();
905
906 return block;
907 }
908
909 static struct tcf_chain *
__tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)910 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
911 {
912 mutex_lock(&block->lock);
913 if (chain)
914 chain = list_is_last(&chain->list, &block->chain_list) ?
915 NULL : list_next_entry(chain, list);
916 else
917 chain = list_first_entry_or_null(&block->chain_list,
918 struct tcf_chain, list);
919
920 /* skip all action-only chains */
921 while (chain && tcf_chain_held_by_acts_only(chain))
922 chain = list_is_last(&chain->list, &block->chain_list) ?
923 NULL : list_next_entry(chain, list);
924
925 if (chain)
926 tcf_chain_hold(chain);
927 mutex_unlock(&block->lock);
928
929 return chain;
930 }
931
932 /* Function to be used by all clients that want to iterate over all chains on
933 * block. It properly obtains block->lock and takes reference to chain before
934 * returning it. Users of this function must be tolerant to concurrent chain
935 * insertion/deletion or ensure that no concurrent chain modification is
936 * possible. Note that all netlink dump callbacks cannot guarantee to provide
937 * consistent dump because rtnl lock is released each time skb is filled with
938 * data and sent to user-space.
939 */
940
941 struct tcf_chain *
tcf_get_next_chain(struct tcf_block * block,struct tcf_chain * chain)942 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
943 {
944 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
945
946 if (chain)
947 tcf_chain_put(chain);
948
949 return chain_next;
950 }
951 EXPORT_SYMBOL(tcf_get_next_chain);
952
953 static struct tcf_proto *
__tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)954 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
955 {
956 u32 prio = 0;
957
958 ASSERT_RTNL();
959 mutex_lock(&chain->filter_chain_lock);
960
961 if (!tp) {
962 tp = tcf_chain_dereference(chain->filter_chain, chain);
963 } else if (tcf_proto_is_deleting(tp)) {
964 /* 'deleting' flag is set and chain->filter_chain_lock was
965 * unlocked, which means next pointer could be invalid. Restart
966 * search.
967 */
968 prio = tp->prio + 1;
969 tp = tcf_chain_dereference(chain->filter_chain, chain);
970
971 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
972 if (!tp->deleting && tp->prio >= prio)
973 break;
974 } else {
975 tp = tcf_chain_dereference(tp->next, chain);
976 }
977
978 if (tp)
979 tcf_proto_get(tp);
980
981 mutex_unlock(&chain->filter_chain_lock);
982
983 return tp;
984 }
985
986 /* Function to be used by all clients that want to iterate over all tp's on
987 * chain. Users of this function must be tolerant to concurrent tp
988 * insertion/deletion or ensure that no concurrent chain modification is
989 * possible. Note that all netlink dump callbacks cannot guarantee to provide
990 * consistent dump because rtnl lock is released each time skb is filled with
991 * data and sent to user-space.
992 */
993
994 struct tcf_proto *
tcf_get_next_proto(struct tcf_chain * chain,struct tcf_proto * tp)995 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
996 {
997 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
998
999 if (tp)
1000 tcf_proto_put(tp, true, NULL);
1001
1002 return tp_next;
1003 }
1004 EXPORT_SYMBOL(tcf_get_next_proto);
1005
tcf_block_flush_all_chains(struct tcf_block * block,bool rtnl_held)1006 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1007 {
1008 struct tcf_chain *chain;
1009
1010 /* Last reference to block. At this point chains cannot be added or
1011 * removed concurrently.
1012 */
1013 for (chain = tcf_get_next_chain(block, NULL);
1014 chain;
1015 chain = tcf_get_next_chain(block, chain)) {
1016 tcf_chain_put_explicitly_created(chain);
1017 tcf_chain_flush(chain, rtnl_held);
1018 }
1019 }
1020
1021 /* Lookup Qdisc and increments its reference counter.
1022 * Set parent, if necessary.
1023 */
1024
__tcf_qdisc_find(struct net * net,struct Qdisc ** q,u32 * parent,int ifindex,bool rtnl_held,struct netlink_ext_ack * extack)1025 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1026 u32 *parent, int ifindex, bool rtnl_held,
1027 struct netlink_ext_ack *extack)
1028 {
1029 const struct Qdisc_class_ops *cops;
1030 struct net_device *dev;
1031 int err = 0;
1032
1033 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1034 return 0;
1035
1036 rcu_read_lock();
1037
1038 /* Find link */
1039 dev = dev_get_by_index_rcu(net, ifindex);
1040 if (!dev) {
1041 rcu_read_unlock();
1042 return -ENODEV;
1043 }
1044
1045 /* Find qdisc */
1046 if (!*parent) {
1047 *q = dev->qdisc;
1048 *parent = (*q)->handle;
1049 } else {
1050 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1051 if (!*q) {
1052 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1053 err = -EINVAL;
1054 goto errout_rcu;
1055 }
1056 }
1057
1058 *q = qdisc_refcount_inc_nz(*q);
1059 if (!*q) {
1060 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1061 err = -EINVAL;
1062 goto errout_rcu;
1063 }
1064
1065 /* Is it classful? */
1066 cops = (*q)->ops->cl_ops;
1067 if (!cops) {
1068 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1069 err = -EINVAL;
1070 goto errout_qdisc;
1071 }
1072
1073 if (!cops->tcf_block) {
1074 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1075 err = -EOPNOTSUPP;
1076 goto errout_qdisc;
1077 }
1078
1079 errout_rcu:
1080 /* At this point we know that qdisc is not noop_qdisc,
1081 * which means that qdisc holds a reference to net_device
1082 * and we hold a reference to qdisc, so it is safe to release
1083 * rcu read lock.
1084 */
1085 rcu_read_unlock();
1086 return err;
1087
1088 errout_qdisc:
1089 rcu_read_unlock();
1090
1091 if (rtnl_held)
1092 qdisc_put(*q);
1093 else
1094 qdisc_put_unlocked(*q);
1095 *q = NULL;
1096
1097 return err;
1098 }
1099
__tcf_qdisc_cl_find(struct Qdisc * q,u32 parent,unsigned long * cl,int ifindex,struct netlink_ext_ack * extack)1100 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1101 int ifindex, struct netlink_ext_ack *extack)
1102 {
1103 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1104 return 0;
1105
1106 /* Do we search for filter, attached to class? */
1107 if (TC_H_MIN(parent)) {
1108 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1109
1110 *cl = cops->find(q, parent);
1111 if (*cl == 0) {
1112 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1113 return -ENOENT;
1114 }
1115 }
1116
1117 return 0;
1118 }
1119
__tcf_block_find(struct net * net,struct Qdisc * q,unsigned long cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1120 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1121 unsigned long cl, int ifindex,
1122 u32 block_index,
1123 struct netlink_ext_ack *extack)
1124 {
1125 struct tcf_block *block;
1126
1127 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1128 block = tcf_block_refcnt_get(net, block_index);
1129 if (!block) {
1130 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1131 return ERR_PTR(-EINVAL);
1132 }
1133 } else {
1134 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1135
1136 block = cops->tcf_block(q, cl, extack);
1137 if (!block)
1138 return ERR_PTR(-EINVAL);
1139
1140 if (tcf_block_shared(block)) {
1141 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1142 return ERR_PTR(-EOPNOTSUPP);
1143 }
1144
1145 /* Always take reference to block in order to support execution
1146 * of rules update path of cls API without rtnl lock. Caller
1147 * must release block when it is finished using it. 'if' block
1148 * of this conditional obtain reference to block by calling
1149 * tcf_block_refcnt_get().
1150 */
1151 refcount_inc(&block->refcnt);
1152 }
1153
1154 return block;
1155 }
1156
__tcf_block_put(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei,bool rtnl_held)1157 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1158 struct tcf_block_ext_info *ei, bool rtnl_held)
1159 {
1160 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1161 /* Flushing/putting all chains will cause the block to be
1162 * deallocated when last chain is freed. However, if chain_list
1163 * is empty, block has to be manually deallocated. After block
1164 * reference counter reached 0, it is no longer possible to
1165 * increment it or add new chains to block.
1166 */
1167 bool free_block = list_empty(&block->chain_list);
1168
1169 mutex_unlock(&block->lock);
1170 if (tcf_block_shared(block))
1171 tcf_block_remove(block, block->net);
1172
1173 if (q)
1174 tcf_block_offload_unbind(block, q, ei);
1175
1176 if (free_block)
1177 tcf_block_destroy(block);
1178 else
1179 tcf_block_flush_all_chains(block, rtnl_held);
1180 } else if (q) {
1181 tcf_block_offload_unbind(block, q, ei);
1182 }
1183 }
1184
tcf_block_refcnt_put(struct tcf_block * block,bool rtnl_held)1185 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1186 {
1187 __tcf_block_put(block, NULL, NULL, rtnl_held);
1188 }
1189
1190 /* Find tcf block.
1191 * Set q, parent, cl when appropriate.
1192 */
1193
tcf_block_find(struct net * net,struct Qdisc ** q,u32 * parent,unsigned long * cl,int ifindex,u32 block_index,struct netlink_ext_ack * extack)1194 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1195 u32 *parent, unsigned long *cl,
1196 int ifindex, u32 block_index,
1197 struct netlink_ext_ack *extack)
1198 {
1199 struct tcf_block *block;
1200 int err = 0;
1201
1202 ASSERT_RTNL();
1203
1204 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1205 if (err)
1206 goto errout;
1207
1208 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1209 if (err)
1210 goto errout_qdisc;
1211
1212 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1213 if (IS_ERR(block)) {
1214 err = PTR_ERR(block);
1215 goto errout_qdisc;
1216 }
1217
1218 return block;
1219
1220 errout_qdisc:
1221 if (*q)
1222 qdisc_put(*q);
1223 errout:
1224 *q = NULL;
1225 return ERR_PTR(err);
1226 }
1227
tcf_block_release(struct Qdisc * q,struct tcf_block * block,bool rtnl_held)1228 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1229 bool rtnl_held)
1230 {
1231 if (!IS_ERR_OR_NULL(block))
1232 tcf_block_refcnt_put(block, rtnl_held);
1233
1234 if (q) {
1235 if (rtnl_held)
1236 qdisc_put(q);
1237 else
1238 qdisc_put_unlocked(q);
1239 }
1240 }
1241
1242 struct tcf_block_owner_item {
1243 struct list_head list;
1244 struct Qdisc *q;
1245 enum flow_block_binder_type binder_type;
1246 };
1247
1248 static void
tcf_block_owner_netif_keep_dst(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1249 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1250 struct Qdisc *q,
1251 enum flow_block_binder_type binder_type)
1252 {
1253 if (block->keep_dst &&
1254 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1255 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1256 netif_keep_dst(qdisc_dev(q));
1257 }
1258
tcf_block_netif_keep_dst(struct tcf_block * block)1259 void tcf_block_netif_keep_dst(struct tcf_block *block)
1260 {
1261 struct tcf_block_owner_item *item;
1262
1263 block->keep_dst = true;
1264 list_for_each_entry(item, &block->owner_list, list)
1265 tcf_block_owner_netif_keep_dst(block, item->q,
1266 item->binder_type);
1267 }
1268 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1269
tcf_block_owner_add(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1270 static int tcf_block_owner_add(struct tcf_block *block,
1271 struct Qdisc *q,
1272 enum flow_block_binder_type binder_type)
1273 {
1274 struct tcf_block_owner_item *item;
1275
1276 item = kmalloc(sizeof(*item), GFP_KERNEL);
1277 if (!item)
1278 return -ENOMEM;
1279 item->q = q;
1280 item->binder_type = binder_type;
1281 list_add(&item->list, &block->owner_list);
1282 return 0;
1283 }
1284
tcf_block_owner_del(struct tcf_block * block,struct Qdisc * q,enum flow_block_binder_type binder_type)1285 static void tcf_block_owner_del(struct tcf_block *block,
1286 struct Qdisc *q,
1287 enum flow_block_binder_type binder_type)
1288 {
1289 struct tcf_block_owner_item *item;
1290
1291 list_for_each_entry(item, &block->owner_list, list) {
1292 if (item->q == q && item->binder_type == binder_type) {
1293 list_del(&item->list);
1294 kfree(item);
1295 return;
1296 }
1297 }
1298 WARN_ON(1);
1299 }
1300
tcf_block_get_ext(struct tcf_block ** p_block,struct Qdisc * q,struct tcf_block_ext_info * ei,struct netlink_ext_ack * extack)1301 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1302 struct tcf_block_ext_info *ei,
1303 struct netlink_ext_ack *extack)
1304 {
1305 struct net *net = qdisc_net(q);
1306 struct tcf_block *block = NULL;
1307 int err;
1308
1309 if (ei->block_index)
1310 /* block_index not 0 means the shared block is requested */
1311 block = tcf_block_refcnt_get(net, ei->block_index);
1312
1313 if (!block) {
1314 block = tcf_block_create(net, q, ei->block_index, extack);
1315 if (IS_ERR(block))
1316 return PTR_ERR(block);
1317 if (tcf_block_shared(block)) {
1318 err = tcf_block_insert(block, net, extack);
1319 if (err)
1320 goto err_block_insert;
1321 }
1322 }
1323
1324 err = tcf_block_owner_add(block, q, ei->binder_type);
1325 if (err)
1326 goto err_block_owner_add;
1327
1328 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1329
1330 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1331 if (err)
1332 goto err_chain0_head_change_cb_add;
1333
1334 err = tcf_block_offload_bind(block, q, ei, extack);
1335 if (err)
1336 goto err_block_offload_bind;
1337
1338 *p_block = block;
1339 return 0;
1340
1341 err_block_offload_bind:
1342 tcf_chain0_head_change_cb_del(block, ei);
1343 err_chain0_head_change_cb_add:
1344 tcf_block_owner_del(block, q, ei->binder_type);
1345 err_block_owner_add:
1346 err_block_insert:
1347 tcf_block_refcnt_put(block, true);
1348 return err;
1349 }
1350 EXPORT_SYMBOL(tcf_block_get_ext);
1351
tcf_chain_head_change_dflt(struct tcf_proto * tp_head,void * priv)1352 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1353 {
1354 struct tcf_proto __rcu **p_filter_chain = priv;
1355
1356 rcu_assign_pointer(*p_filter_chain, tp_head);
1357 }
1358
tcf_block_get(struct tcf_block ** p_block,struct tcf_proto __rcu ** p_filter_chain,struct Qdisc * q,struct netlink_ext_ack * extack)1359 int tcf_block_get(struct tcf_block **p_block,
1360 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1361 struct netlink_ext_ack *extack)
1362 {
1363 struct tcf_block_ext_info ei = {
1364 .chain_head_change = tcf_chain_head_change_dflt,
1365 .chain_head_change_priv = p_filter_chain,
1366 };
1367
1368 WARN_ON(!p_filter_chain);
1369 return tcf_block_get_ext(p_block, q, &ei, extack);
1370 }
1371 EXPORT_SYMBOL(tcf_block_get);
1372
1373 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
1374 * actions should be all removed after flushing.
1375 */
tcf_block_put_ext(struct tcf_block * block,struct Qdisc * q,struct tcf_block_ext_info * ei)1376 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1377 struct tcf_block_ext_info *ei)
1378 {
1379 if (!block)
1380 return;
1381 tcf_chain0_head_change_cb_del(block, ei);
1382 tcf_block_owner_del(block, q, ei->binder_type);
1383
1384 __tcf_block_put(block, q, ei, true);
1385 }
1386 EXPORT_SYMBOL(tcf_block_put_ext);
1387
tcf_block_put(struct tcf_block * block)1388 void tcf_block_put(struct tcf_block *block)
1389 {
1390 struct tcf_block_ext_info ei = {0, };
1391
1392 if (!block)
1393 return;
1394 tcf_block_put_ext(block, block->q, &ei);
1395 }
1396
1397 EXPORT_SYMBOL(tcf_block_put);
1398
1399 static int
tcf_block_playback_offloads(struct tcf_block * block,flow_setup_cb_t * cb,void * cb_priv,bool add,bool offload_in_use,struct netlink_ext_ack * extack)1400 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1401 void *cb_priv, bool add, bool offload_in_use,
1402 struct netlink_ext_ack *extack)
1403 {
1404 struct tcf_chain *chain, *chain_prev;
1405 struct tcf_proto *tp, *tp_prev;
1406 int err;
1407
1408 lockdep_assert_held(&block->cb_lock);
1409
1410 for (chain = __tcf_get_next_chain(block, NULL);
1411 chain;
1412 chain_prev = chain,
1413 chain = __tcf_get_next_chain(block, chain),
1414 tcf_chain_put(chain_prev)) {
1415 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1416 tp_prev = tp,
1417 tp = __tcf_get_next_proto(chain, tp),
1418 tcf_proto_put(tp_prev, true, NULL)) {
1419 if (tp->ops->reoffload) {
1420 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1421 extack);
1422 if (err && add)
1423 goto err_playback_remove;
1424 } else if (add && offload_in_use) {
1425 err = -EOPNOTSUPP;
1426 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1427 goto err_playback_remove;
1428 }
1429 }
1430 }
1431
1432 return 0;
1433
1434 err_playback_remove:
1435 tcf_proto_put(tp, true, NULL);
1436 tcf_chain_put(chain);
1437 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1438 extack);
1439 return err;
1440 }
1441
tcf_block_bind(struct tcf_block * block,struct flow_block_offload * bo)1442 static int tcf_block_bind(struct tcf_block *block,
1443 struct flow_block_offload *bo)
1444 {
1445 struct flow_block_cb *block_cb, *next;
1446 int err, i = 0;
1447
1448 lockdep_assert_held(&block->cb_lock);
1449
1450 list_for_each_entry(block_cb, &bo->cb_list, list) {
1451 err = tcf_block_playback_offloads(block, block_cb->cb,
1452 block_cb->cb_priv, true,
1453 tcf_block_offload_in_use(block),
1454 bo->extack);
1455 if (err)
1456 goto err_unroll;
1457 if (!bo->unlocked_driver_cb)
1458 block->lockeddevcnt++;
1459
1460 i++;
1461 }
1462 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1463
1464 return 0;
1465
1466 err_unroll:
1467 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1468 if (i-- > 0) {
1469 list_del(&block_cb->list);
1470 tcf_block_playback_offloads(block, block_cb->cb,
1471 block_cb->cb_priv, false,
1472 tcf_block_offload_in_use(block),
1473 NULL);
1474 if (!bo->unlocked_driver_cb)
1475 block->lockeddevcnt--;
1476 }
1477 flow_block_cb_free(block_cb);
1478 }
1479
1480 return err;
1481 }
1482
tcf_block_unbind(struct tcf_block * block,struct flow_block_offload * bo)1483 static void tcf_block_unbind(struct tcf_block *block,
1484 struct flow_block_offload *bo)
1485 {
1486 struct flow_block_cb *block_cb, *next;
1487
1488 lockdep_assert_held(&block->cb_lock);
1489
1490 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1491 tcf_block_playback_offloads(block, block_cb->cb,
1492 block_cb->cb_priv, false,
1493 tcf_block_offload_in_use(block),
1494 NULL);
1495 list_del(&block_cb->list);
1496 flow_block_cb_free(block_cb);
1497 if (!bo->unlocked_driver_cb)
1498 block->lockeddevcnt--;
1499 }
1500 }
1501
tcf_block_setup(struct tcf_block * block,struct flow_block_offload * bo)1502 static int tcf_block_setup(struct tcf_block *block,
1503 struct flow_block_offload *bo)
1504 {
1505 int err;
1506
1507 switch (bo->command) {
1508 case FLOW_BLOCK_BIND:
1509 err = tcf_block_bind(block, bo);
1510 break;
1511 case FLOW_BLOCK_UNBIND:
1512 err = 0;
1513 tcf_block_unbind(block, bo);
1514 break;
1515 default:
1516 WARN_ON_ONCE(1);
1517 err = -EOPNOTSUPP;
1518 }
1519
1520 return err;
1521 }
1522
1523 /* Main classifier routine: scans classifier chain attached
1524 * to this qdisc, (optionally) tests for protocol and asks
1525 * specific classifiers.
1526 */
__tcf_classify(struct sk_buff * skb,const struct tcf_proto * tp,const struct tcf_proto * orig_tp,struct tcf_result * res,bool compat_mode,u32 * last_executed_chain)1527 static inline int __tcf_classify(struct sk_buff *skb,
1528 const struct tcf_proto *tp,
1529 const struct tcf_proto *orig_tp,
1530 struct tcf_result *res,
1531 bool compat_mode,
1532 u32 *last_executed_chain)
1533 {
1534 #ifdef CONFIG_NET_CLS_ACT
1535 const int max_reclassify_loop = 16;
1536 const struct tcf_proto *first_tp;
1537 int limit = 0;
1538
1539 reclassify:
1540 #endif
1541 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1542 __be16 protocol = skb_protocol(skb, false);
1543 int err;
1544
1545 if (tp->protocol != protocol &&
1546 tp->protocol != htons(ETH_P_ALL))
1547 continue;
1548
1549 err = tp->classify(skb, tp, res);
1550 #ifdef CONFIG_NET_CLS_ACT
1551 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1552 first_tp = orig_tp;
1553 *last_executed_chain = first_tp->chain->index;
1554 goto reset;
1555 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1556 first_tp = res->goto_tp;
1557 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1558 goto reset;
1559 }
1560 #endif
1561 if (err >= 0)
1562 return err;
1563 }
1564
1565 return TC_ACT_UNSPEC; /* signal: continue lookup */
1566 #ifdef CONFIG_NET_CLS_ACT
1567 reset:
1568 if (unlikely(limit++ >= max_reclassify_loop)) {
1569 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1570 tp->chain->block->index,
1571 tp->prio & 0xffff,
1572 ntohs(tp->protocol));
1573 return TC_ACT_SHOT;
1574 }
1575
1576 tp = first_tp;
1577 goto reclassify;
1578 #endif
1579 }
1580
tcf_classify(struct sk_buff * skb,const struct tcf_block * block,const struct tcf_proto * tp,struct tcf_result * res,bool compat_mode)1581 int tcf_classify(struct sk_buff *skb,
1582 const struct tcf_block *block,
1583 const struct tcf_proto *tp,
1584 struct tcf_result *res, bool compat_mode)
1585 {
1586 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1587 u32 last_executed_chain = 0;
1588
1589 return __tcf_classify(skb, tp, tp, res, compat_mode,
1590 &last_executed_chain);
1591 #else
1592 u32 last_executed_chain = tp ? tp->chain->index : 0;
1593 const struct tcf_proto *orig_tp = tp;
1594 struct tc_skb_ext *ext;
1595 int ret;
1596
1597 if (block) {
1598 ext = skb_ext_find(skb, TC_SKB_EXT);
1599
1600 if (ext && ext->chain) {
1601 struct tcf_chain *fchain;
1602
1603 fchain = tcf_chain_lookup_rcu(block, ext->chain);
1604 if (!fchain)
1605 return TC_ACT_SHOT;
1606
1607 /* Consume, so cloned/redirect skbs won't inherit ext */
1608 skb_ext_del(skb, TC_SKB_EXT);
1609
1610 tp = rcu_dereference_bh(fchain->filter_chain);
1611 last_executed_chain = fchain->index;
1612 }
1613 }
1614
1615 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1616 &last_executed_chain);
1617
1618 /* If we missed on some chain */
1619 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1620 struct tc_skb_cb *cb = tc_skb_cb(skb);
1621
1622 ext = tc_skb_ext_alloc(skb);
1623 if (WARN_ON_ONCE(!ext))
1624 return TC_ACT_SHOT;
1625 ext->chain = last_executed_chain;
1626 ext->mru = cb->mru;
1627 ext->post_ct = cb->post_ct;
1628 ext->zone = cb->zone;
1629 }
1630
1631 return ret;
1632 #endif
1633 }
1634 EXPORT_SYMBOL(tcf_classify);
1635
1636 struct tcf_chain_info {
1637 struct tcf_proto __rcu **pprev;
1638 struct tcf_proto __rcu *next;
1639 };
1640
tcf_chain_tp_prev(struct tcf_chain * chain,struct tcf_chain_info * chain_info)1641 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1642 struct tcf_chain_info *chain_info)
1643 {
1644 return tcf_chain_dereference(*chain_info->pprev, chain);
1645 }
1646
tcf_chain_tp_insert(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1647 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1648 struct tcf_chain_info *chain_info,
1649 struct tcf_proto *tp)
1650 {
1651 if (chain->flushing)
1652 return -EAGAIN;
1653
1654 if (*chain_info->pprev == chain->filter_chain)
1655 tcf_chain0_head_change(chain, tp);
1656 tcf_proto_get(tp);
1657 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1658 rcu_assign_pointer(*chain_info->pprev, tp);
1659
1660 return 0;
1661 }
1662
tcf_chain_tp_remove(struct tcf_chain * chain,struct tcf_chain_info * chain_info,struct tcf_proto * tp)1663 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1664 struct tcf_chain_info *chain_info,
1665 struct tcf_proto *tp)
1666 {
1667 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1668
1669 tcf_proto_mark_delete(tp);
1670 if (tp == chain->filter_chain)
1671 tcf_chain0_head_change(chain, next);
1672 RCU_INIT_POINTER(*chain_info->pprev, next);
1673 }
1674
1675 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1676 struct tcf_chain_info *chain_info,
1677 u32 protocol, u32 prio,
1678 bool prio_allocate);
1679
1680 /* Try to insert new proto.
1681 * If proto with specified priority already exists, free new proto
1682 * and return existing one.
1683 */
1684
tcf_chain_tp_insert_unique(struct tcf_chain * chain,struct tcf_proto * tp_new,u32 protocol,u32 prio,bool rtnl_held)1685 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1686 struct tcf_proto *tp_new,
1687 u32 protocol, u32 prio,
1688 bool rtnl_held)
1689 {
1690 struct tcf_chain_info chain_info;
1691 struct tcf_proto *tp;
1692 int err = 0;
1693
1694 mutex_lock(&chain->filter_chain_lock);
1695
1696 if (tcf_proto_exists_destroying(chain, tp_new)) {
1697 mutex_unlock(&chain->filter_chain_lock);
1698 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1699 return ERR_PTR(-EAGAIN);
1700 }
1701
1702 tp = tcf_chain_tp_find(chain, &chain_info,
1703 protocol, prio, false);
1704 if (!tp)
1705 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1706 mutex_unlock(&chain->filter_chain_lock);
1707
1708 if (tp) {
1709 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1710 tp_new = tp;
1711 } else if (err) {
1712 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1713 tp_new = ERR_PTR(err);
1714 }
1715
1716 return tp_new;
1717 }
1718
tcf_chain_tp_delete_empty(struct tcf_chain * chain,struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)1719 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1720 struct tcf_proto *tp, bool rtnl_held,
1721 struct netlink_ext_ack *extack)
1722 {
1723 struct tcf_chain_info chain_info;
1724 struct tcf_proto *tp_iter;
1725 struct tcf_proto **pprev;
1726 struct tcf_proto *next;
1727
1728 mutex_lock(&chain->filter_chain_lock);
1729
1730 /* Atomically find and remove tp from chain. */
1731 for (pprev = &chain->filter_chain;
1732 (tp_iter = tcf_chain_dereference(*pprev, chain));
1733 pprev = &tp_iter->next) {
1734 if (tp_iter == tp) {
1735 chain_info.pprev = pprev;
1736 chain_info.next = tp_iter->next;
1737 WARN_ON(tp_iter->deleting);
1738 break;
1739 }
1740 }
1741 /* Verify that tp still exists and no new filters were inserted
1742 * concurrently.
1743 * Mark tp for deletion if it is empty.
1744 */
1745 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1746 mutex_unlock(&chain->filter_chain_lock);
1747 return;
1748 }
1749
1750 tcf_proto_signal_destroying(chain, tp);
1751 next = tcf_chain_dereference(chain_info.next, chain);
1752 if (tp == chain->filter_chain)
1753 tcf_chain0_head_change(chain, next);
1754 RCU_INIT_POINTER(*chain_info.pprev, next);
1755 mutex_unlock(&chain->filter_chain_lock);
1756
1757 tcf_proto_put(tp, rtnl_held, extack);
1758 }
1759
tcf_chain_tp_find(struct tcf_chain * chain,struct tcf_chain_info * chain_info,u32 protocol,u32 prio,bool prio_allocate)1760 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1761 struct tcf_chain_info *chain_info,
1762 u32 protocol, u32 prio,
1763 bool prio_allocate)
1764 {
1765 struct tcf_proto **pprev;
1766 struct tcf_proto *tp;
1767
1768 /* Check the chain for existence of proto-tcf with this priority */
1769 for (pprev = &chain->filter_chain;
1770 (tp = tcf_chain_dereference(*pprev, chain));
1771 pprev = &tp->next) {
1772 if (tp->prio >= prio) {
1773 if (tp->prio == prio) {
1774 if (prio_allocate ||
1775 (tp->protocol != protocol && protocol))
1776 return ERR_PTR(-EINVAL);
1777 } else {
1778 tp = NULL;
1779 }
1780 break;
1781 }
1782 }
1783 chain_info->pprev = pprev;
1784 if (tp) {
1785 chain_info->next = tp->next;
1786 tcf_proto_get(tp);
1787 } else {
1788 chain_info->next = NULL;
1789 }
1790 return tp;
1791 }
1792
tcf_fill_node(struct net * net,struct sk_buff * skb,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,u32 portid,u32 seq,u16 flags,int event,bool terse_dump,bool rtnl_held)1793 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1794 struct tcf_proto *tp, struct tcf_block *block,
1795 struct Qdisc *q, u32 parent, void *fh,
1796 u32 portid, u32 seq, u16 flags, int event,
1797 bool terse_dump, bool rtnl_held)
1798 {
1799 struct tcmsg *tcm;
1800 struct nlmsghdr *nlh;
1801 unsigned char *b = skb_tail_pointer(skb);
1802
1803 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1804 if (!nlh)
1805 goto out_nlmsg_trim;
1806 tcm = nlmsg_data(nlh);
1807 tcm->tcm_family = AF_UNSPEC;
1808 tcm->tcm__pad1 = 0;
1809 tcm->tcm__pad2 = 0;
1810 if (q) {
1811 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1812 tcm->tcm_parent = parent;
1813 } else {
1814 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1815 tcm->tcm_block_index = block->index;
1816 }
1817 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1818 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1819 goto nla_put_failure;
1820 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1821 goto nla_put_failure;
1822 if (!fh) {
1823 tcm->tcm_handle = 0;
1824 } else if (terse_dump) {
1825 if (tp->ops->terse_dump) {
1826 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1827 rtnl_held) < 0)
1828 goto nla_put_failure;
1829 } else {
1830 goto cls_op_not_supp;
1831 }
1832 } else {
1833 if (tp->ops->dump &&
1834 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1835 goto nla_put_failure;
1836 }
1837 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1838 return skb->len;
1839
1840 out_nlmsg_trim:
1841 nla_put_failure:
1842 cls_op_not_supp:
1843 nlmsg_trim(skb, b);
1844 return -1;
1845 }
1846
tfilter_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,int event,bool unicast,bool rtnl_held)1847 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1848 struct nlmsghdr *n, struct tcf_proto *tp,
1849 struct tcf_block *block, struct Qdisc *q,
1850 u32 parent, void *fh, int event, bool unicast,
1851 bool rtnl_held)
1852 {
1853 struct sk_buff *skb;
1854 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1855 int err = 0;
1856
1857 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1858 if (!skb)
1859 return -ENOBUFS;
1860
1861 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1862 n->nlmsg_seq, n->nlmsg_flags, event,
1863 false, rtnl_held) <= 0) {
1864 kfree_skb(skb);
1865 return -EINVAL;
1866 }
1867
1868 if (unicast)
1869 err = rtnl_unicast(skb, net, portid);
1870 else
1871 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1872 n->nlmsg_flags & NLM_F_ECHO);
1873 return err;
1874 }
1875
tfilter_del_notify(struct net * net,struct sk_buff * oskb,struct nlmsghdr * n,struct tcf_proto * tp,struct tcf_block * block,struct Qdisc * q,u32 parent,void * fh,bool unicast,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)1876 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1877 struct nlmsghdr *n, struct tcf_proto *tp,
1878 struct tcf_block *block, struct Qdisc *q,
1879 u32 parent, void *fh, bool unicast, bool *last,
1880 bool rtnl_held, struct netlink_ext_ack *extack)
1881 {
1882 struct sk_buff *skb;
1883 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1884 int err;
1885
1886 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1887 if (!skb)
1888 return -ENOBUFS;
1889
1890 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1891 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1892 false, rtnl_held) <= 0) {
1893 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1894 kfree_skb(skb);
1895 return -EINVAL;
1896 }
1897
1898 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1899 if (err) {
1900 kfree_skb(skb);
1901 return err;
1902 }
1903
1904 if (unicast)
1905 err = rtnl_unicast(skb, net, portid);
1906 else
1907 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1908 n->nlmsg_flags & NLM_F_ECHO);
1909 if (err < 0)
1910 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1911
1912 return err;
1913 }
1914
tfilter_notify_chain(struct net * net,struct sk_buff * oskb,struct tcf_block * block,struct Qdisc * q,u32 parent,struct nlmsghdr * n,struct tcf_chain * chain,int event)1915 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1916 struct tcf_block *block, struct Qdisc *q,
1917 u32 parent, struct nlmsghdr *n,
1918 struct tcf_chain *chain, int event)
1919 {
1920 struct tcf_proto *tp;
1921
1922 for (tp = tcf_get_next_proto(chain, NULL);
1923 tp; tp = tcf_get_next_proto(chain, tp))
1924 tfilter_notify(net, oskb, n, tp, block,
1925 q, parent, NULL, event, false, true);
1926 }
1927
tfilter_put(struct tcf_proto * tp,void * fh)1928 static void tfilter_put(struct tcf_proto *tp, void *fh)
1929 {
1930 if (tp->ops->put && fh)
1931 tp->ops->put(tp, fh);
1932 }
1933
tc_new_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)1934 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1935 struct netlink_ext_ack *extack)
1936 {
1937 struct net *net = sock_net(skb->sk);
1938 struct nlattr *tca[TCA_MAX + 1];
1939 char name[IFNAMSIZ];
1940 struct tcmsg *t;
1941 u32 protocol;
1942 u32 prio;
1943 bool prio_allocate;
1944 u32 parent;
1945 u32 chain_index;
1946 struct Qdisc *q = NULL;
1947 struct tcf_chain_info chain_info;
1948 struct tcf_chain *chain = NULL;
1949 struct tcf_block *block;
1950 struct tcf_proto *tp;
1951 unsigned long cl;
1952 void *fh;
1953 int err;
1954 int tp_created;
1955 bool rtnl_held = false;
1956 u32 flags;
1957
1958 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1959 return -EPERM;
1960
1961 replay:
1962 tp_created = 0;
1963
1964 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1965 rtm_tca_policy, extack);
1966 if (err < 0)
1967 return err;
1968
1969 t = nlmsg_data(n);
1970 protocol = TC_H_MIN(t->tcm_info);
1971 prio = TC_H_MAJ(t->tcm_info);
1972 prio_allocate = false;
1973 parent = t->tcm_parent;
1974 tp = NULL;
1975 cl = 0;
1976 block = NULL;
1977 flags = 0;
1978
1979 if (prio == 0) {
1980 /* If no priority is provided by the user,
1981 * we allocate one.
1982 */
1983 if (n->nlmsg_flags & NLM_F_CREATE) {
1984 prio = TC_H_MAKE(0x80000000U, 0U);
1985 prio_allocate = true;
1986 } else {
1987 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1988 return -ENOENT;
1989 }
1990 }
1991
1992 /* Find head of filter chain. */
1993
1994 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1995 if (err)
1996 return err;
1997
1998 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1999 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2000 err = -EINVAL;
2001 goto errout;
2002 }
2003
2004 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2005 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2006 * type is not specified, classifier is not unlocked.
2007 */
2008 if (rtnl_held ||
2009 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2010 !tcf_proto_is_unlocked(name)) {
2011 rtnl_held = true;
2012 rtnl_lock();
2013 }
2014
2015 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2016 if (err)
2017 goto errout;
2018
2019 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2020 extack);
2021 if (IS_ERR(block)) {
2022 err = PTR_ERR(block);
2023 goto errout;
2024 }
2025 block->classid = parent;
2026
2027 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2028 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2029 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2030 err = -EINVAL;
2031 goto errout;
2032 }
2033 chain = tcf_chain_get(block, chain_index, true);
2034 if (!chain) {
2035 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2036 err = -ENOMEM;
2037 goto errout;
2038 }
2039
2040 mutex_lock(&chain->filter_chain_lock);
2041 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2042 prio, prio_allocate);
2043 if (IS_ERR(tp)) {
2044 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2045 err = PTR_ERR(tp);
2046 goto errout_locked;
2047 }
2048
2049 if (tp == NULL) {
2050 struct tcf_proto *tp_new = NULL;
2051
2052 if (chain->flushing) {
2053 err = -EAGAIN;
2054 goto errout_locked;
2055 }
2056
2057 /* Proto-tcf does not exist, create new one */
2058
2059 if (tca[TCA_KIND] == NULL || !protocol) {
2060 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2061 err = -EINVAL;
2062 goto errout_locked;
2063 }
2064
2065 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2066 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2067 err = -ENOENT;
2068 goto errout_locked;
2069 }
2070
2071 if (prio_allocate)
2072 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2073 &chain_info));
2074
2075 mutex_unlock(&chain->filter_chain_lock);
2076 tp_new = tcf_proto_create(name, protocol, prio, chain,
2077 rtnl_held, extack);
2078 if (IS_ERR(tp_new)) {
2079 err = PTR_ERR(tp_new);
2080 goto errout_tp;
2081 }
2082
2083 tp_created = 1;
2084 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2085 rtnl_held);
2086 if (IS_ERR(tp)) {
2087 err = PTR_ERR(tp);
2088 goto errout_tp;
2089 }
2090 } else {
2091 mutex_unlock(&chain->filter_chain_lock);
2092 }
2093
2094 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2095 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2096 err = -EINVAL;
2097 goto errout;
2098 }
2099
2100 fh = tp->ops->get(tp, t->tcm_handle);
2101
2102 if (!fh) {
2103 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2104 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2105 err = -ENOENT;
2106 goto errout;
2107 }
2108 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2109 tfilter_put(tp, fh);
2110 NL_SET_ERR_MSG(extack, "Filter already exists");
2111 err = -EEXIST;
2112 goto errout;
2113 }
2114
2115 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2116 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2117 err = -EINVAL;
2118 goto errout;
2119 }
2120
2121 if (!(n->nlmsg_flags & NLM_F_CREATE))
2122 flags |= TCA_ACT_FLAGS_REPLACE;
2123 if (!rtnl_held)
2124 flags |= TCA_ACT_FLAGS_NO_RTNL;
2125 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2126 flags, extack);
2127 if (err == 0) {
2128 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2129 RTM_NEWTFILTER, false, rtnl_held);
2130 tfilter_put(tp, fh);
2131 /* q pointer is NULL for shared blocks */
2132 if (q)
2133 q->flags &= ~TCQ_F_CAN_BYPASS;
2134 }
2135
2136 errout:
2137 if (err && tp_created)
2138 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2139 errout_tp:
2140 if (chain) {
2141 if (tp && !IS_ERR(tp))
2142 tcf_proto_put(tp, rtnl_held, NULL);
2143 if (!tp_created)
2144 tcf_chain_put(chain);
2145 }
2146 tcf_block_release(q, block, rtnl_held);
2147
2148 if (rtnl_held)
2149 rtnl_unlock();
2150
2151 if (err == -EAGAIN) {
2152 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2153 * of target chain.
2154 */
2155 rtnl_held = true;
2156 /* Replay the request. */
2157 goto replay;
2158 }
2159 return err;
2160
2161 errout_locked:
2162 mutex_unlock(&chain->filter_chain_lock);
2163 goto errout;
2164 }
2165
tc_del_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2166 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2167 struct netlink_ext_ack *extack)
2168 {
2169 struct net *net = sock_net(skb->sk);
2170 struct nlattr *tca[TCA_MAX + 1];
2171 char name[IFNAMSIZ];
2172 struct tcmsg *t;
2173 u32 protocol;
2174 u32 prio;
2175 u32 parent;
2176 u32 chain_index;
2177 struct Qdisc *q = NULL;
2178 struct tcf_chain_info chain_info;
2179 struct tcf_chain *chain = NULL;
2180 struct tcf_block *block = NULL;
2181 struct tcf_proto *tp = NULL;
2182 unsigned long cl = 0;
2183 void *fh = NULL;
2184 int err;
2185 bool rtnl_held = false;
2186
2187 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2188 return -EPERM;
2189
2190 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2191 rtm_tca_policy, extack);
2192 if (err < 0)
2193 return err;
2194
2195 t = nlmsg_data(n);
2196 protocol = TC_H_MIN(t->tcm_info);
2197 prio = TC_H_MAJ(t->tcm_info);
2198 parent = t->tcm_parent;
2199
2200 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2201 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2202 return -ENOENT;
2203 }
2204
2205 /* Find head of filter chain. */
2206
2207 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2208 if (err)
2209 return err;
2210
2211 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2212 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2213 err = -EINVAL;
2214 goto errout;
2215 }
2216 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2217 * found), qdisc is not unlocked, classifier type is not specified,
2218 * classifier is not unlocked.
2219 */
2220 if (!prio ||
2221 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2222 !tcf_proto_is_unlocked(name)) {
2223 rtnl_held = true;
2224 rtnl_lock();
2225 }
2226
2227 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2228 if (err)
2229 goto errout;
2230
2231 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2232 extack);
2233 if (IS_ERR(block)) {
2234 err = PTR_ERR(block);
2235 goto errout;
2236 }
2237
2238 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2239 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2240 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2241 err = -EINVAL;
2242 goto errout;
2243 }
2244 chain = tcf_chain_get(block, chain_index, false);
2245 if (!chain) {
2246 /* User requested flush on non-existent chain. Nothing to do,
2247 * so just return success.
2248 */
2249 if (prio == 0) {
2250 err = 0;
2251 goto errout;
2252 }
2253 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2254 err = -ENOENT;
2255 goto errout;
2256 }
2257
2258 if (prio == 0) {
2259 tfilter_notify_chain(net, skb, block, q, parent, n,
2260 chain, RTM_DELTFILTER);
2261 tcf_chain_flush(chain, rtnl_held);
2262 err = 0;
2263 goto errout;
2264 }
2265
2266 mutex_lock(&chain->filter_chain_lock);
2267 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2268 prio, false);
2269 if (!tp || IS_ERR(tp)) {
2270 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2271 err = tp ? PTR_ERR(tp) : -ENOENT;
2272 goto errout_locked;
2273 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2274 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2275 err = -EINVAL;
2276 goto errout_locked;
2277 } else if (t->tcm_handle == 0) {
2278 tcf_proto_signal_destroying(chain, tp);
2279 tcf_chain_tp_remove(chain, &chain_info, tp);
2280 mutex_unlock(&chain->filter_chain_lock);
2281
2282 tcf_proto_put(tp, rtnl_held, NULL);
2283 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2284 RTM_DELTFILTER, false, rtnl_held);
2285 err = 0;
2286 goto errout;
2287 }
2288 mutex_unlock(&chain->filter_chain_lock);
2289
2290 fh = tp->ops->get(tp, t->tcm_handle);
2291
2292 if (!fh) {
2293 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2294 err = -ENOENT;
2295 } else {
2296 bool last;
2297
2298 err = tfilter_del_notify(net, skb, n, tp, block,
2299 q, parent, fh, false, &last,
2300 rtnl_held, extack);
2301
2302 if (err)
2303 goto errout;
2304 if (last)
2305 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2306 }
2307
2308 errout:
2309 if (chain) {
2310 if (tp && !IS_ERR(tp))
2311 tcf_proto_put(tp, rtnl_held, NULL);
2312 tcf_chain_put(chain);
2313 }
2314 tcf_block_release(q, block, rtnl_held);
2315
2316 if (rtnl_held)
2317 rtnl_unlock();
2318
2319 return err;
2320
2321 errout_locked:
2322 mutex_unlock(&chain->filter_chain_lock);
2323 goto errout;
2324 }
2325
tc_get_tfilter(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2326 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2327 struct netlink_ext_ack *extack)
2328 {
2329 struct net *net = sock_net(skb->sk);
2330 struct nlattr *tca[TCA_MAX + 1];
2331 char name[IFNAMSIZ];
2332 struct tcmsg *t;
2333 u32 protocol;
2334 u32 prio;
2335 u32 parent;
2336 u32 chain_index;
2337 struct Qdisc *q = NULL;
2338 struct tcf_chain_info chain_info;
2339 struct tcf_chain *chain = NULL;
2340 struct tcf_block *block = NULL;
2341 struct tcf_proto *tp = NULL;
2342 unsigned long cl = 0;
2343 void *fh = NULL;
2344 int err;
2345 bool rtnl_held = false;
2346
2347 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2348 rtm_tca_policy, extack);
2349 if (err < 0)
2350 return err;
2351
2352 t = nlmsg_data(n);
2353 protocol = TC_H_MIN(t->tcm_info);
2354 prio = TC_H_MAJ(t->tcm_info);
2355 parent = t->tcm_parent;
2356
2357 if (prio == 0) {
2358 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2359 return -ENOENT;
2360 }
2361
2362 /* Find head of filter chain. */
2363
2364 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2365 if (err)
2366 return err;
2367
2368 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2369 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2370 err = -EINVAL;
2371 goto errout;
2372 }
2373 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2374 * unlocked, classifier type is not specified, classifier is not
2375 * unlocked.
2376 */
2377 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2378 !tcf_proto_is_unlocked(name)) {
2379 rtnl_held = true;
2380 rtnl_lock();
2381 }
2382
2383 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2384 if (err)
2385 goto errout;
2386
2387 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2388 extack);
2389 if (IS_ERR(block)) {
2390 err = PTR_ERR(block);
2391 goto errout;
2392 }
2393
2394 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2395 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2396 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2397 err = -EINVAL;
2398 goto errout;
2399 }
2400 chain = tcf_chain_get(block, chain_index, false);
2401 if (!chain) {
2402 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2403 err = -EINVAL;
2404 goto errout;
2405 }
2406
2407 mutex_lock(&chain->filter_chain_lock);
2408 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2409 prio, false);
2410 mutex_unlock(&chain->filter_chain_lock);
2411 if (!tp || IS_ERR(tp)) {
2412 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2413 err = tp ? PTR_ERR(tp) : -ENOENT;
2414 goto errout;
2415 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2416 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2417 err = -EINVAL;
2418 goto errout;
2419 }
2420
2421 fh = tp->ops->get(tp, t->tcm_handle);
2422
2423 if (!fh) {
2424 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2425 err = -ENOENT;
2426 } else {
2427 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2428 fh, RTM_NEWTFILTER, true, rtnl_held);
2429 if (err < 0)
2430 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2431 }
2432
2433 tfilter_put(tp, fh);
2434 errout:
2435 if (chain) {
2436 if (tp && !IS_ERR(tp))
2437 tcf_proto_put(tp, rtnl_held, NULL);
2438 tcf_chain_put(chain);
2439 }
2440 tcf_block_release(q, block, rtnl_held);
2441
2442 if (rtnl_held)
2443 rtnl_unlock();
2444
2445 return err;
2446 }
2447
2448 struct tcf_dump_args {
2449 struct tcf_walker w;
2450 struct sk_buff *skb;
2451 struct netlink_callback *cb;
2452 struct tcf_block *block;
2453 struct Qdisc *q;
2454 u32 parent;
2455 bool terse_dump;
2456 };
2457
tcf_node_dump(struct tcf_proto * tp,void * n,struct tcf_walker * arg)2458 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2459 {
2460 struct tcf_dump_args *a = (void *)arg;
2461 struct net *net = sock_net(a->skb->sk);
2462
2463 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2464 n, NETLINK_CB(a->cb->skb).portid,
2465 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2466 RTM_NEWTFILTER, a->terse_dump, true);
2467 }
2468
tcf_chain_dump(struct tcf_chain * chain,struct Qdisc * q,u32 parent,struct sk_buff * skb,struct netlink_callback * cb,long index_start,long * p_index,bool terse)2469 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2470 struct sk_buff *skb, struct netlink_callback *cb,
2471 long index_start, long *p_index, bool terse)
2472 {
2473 struct net *net = sock_net(skb->sk);
2474 struct tcf_block *block = chain->block;
2475 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2476 struct tcf_proto *tp, *tp_prev;
2477 struct tcf_dump_args arg;
2478
2479 for (tp = __tcf_get_next_proto(chain, NULL);
2480 tp;
2481 tp_prev = tp,
2482 tp = __tcf_get_next_proto(chain, tp),
2483 tcf_proto_put(tp_prev, true, NULL),
2484 (*p_index)++) {
2485 if (*p_index < index_start)
2486 continue;
2487 if (TC_H_MAJ(tcm->tcm_info) &&
2488 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2489 continue;
2490 if (TC_H_MIN(tcm->tcm_info) &&
2491 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2492 continue;
2493 if (*p_index > index_start)
2494 memset(&cb->args[1], 0,
2495 sizeof(cb->args) - sizeof(cb->args[0]));
2496 if (cb->args[1] == 0) {
2497 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2498 NETLINK_CB(cb->skb).portid,
2499 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2500 RTM_NEWTFILTER, false, true) <= 0)
2501 goto errout;
2502 cb->args[1] = 1;
2503 }
2504 if (!tp->ops->walk)
2505 continue;
2506 arg.w.fn = tcf_node_dump;
2507 arg.skb = skb;
2508 arg.cb = cb;
2509 arg.block = block;
2510 arg.q = q;
2511 arg.parent = parent;
2512 arg.w.stop = 0;
2513 arg.w.skip = cb->args[1] - 1;
2514 arg.w.count = 0;
2515 arg.w.cookie = cb->args[2];
2516 arg.terse_dump = terse;
2517 tp->ops->walk(tp, &arg.w, true);
2518 cb->args[2] = arg.w.cookie;
2519 cb->args[1] = arg.w.count + 1;
2520 if (arg.w.stop)
2521 goto errout;
2522 }
2523 return true;
2524
2525 errout:
2526 tcf_proto_put(tp, true, NULL);
2527 return false;
2528 }
2529
2530 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2531 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2532 };
2533
2534 /* called with RTNL */
tc_dump_tfilter(struct sk_buff * skb,struct netlink_callback * cb)2535 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2536 {
2537 struct tcf_chain *chain, *chain_prev;
2538 struct net *net = sock_net(skb->sk);
2539 struct nlattr *tca[TCA_MAX + 1];
2540 struct Qdisc *q = NULL;
2541 struct tcf_block *block;
2542 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2543 bool terse_dump = false;
2544 long index_start;
2545 long index;
2546 u32 parent;
2547 int err;
2548
2549 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2550 return skb->len;
2551
2552 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2553 tcf_tfilter_dump_policy, cb->extack);
2554 if (err)
2555 return err;
2556
2557 if (tca[TCA_DUMP_FLAGS]) {
2558 struct nla_bitfield32 flags =
2559 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2560
2561 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2562 }
2563
2564 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2565 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2566 if (!block)
2567 goto out;
2568 /* If we work with block index, q is NULL and parent value
2569 * will never be used in the following code. The check
2570 * in tcf_fill_node prevents it. However, compiler does not
2571 * see that far, so set parent to zero to silence the warning
2572 * about parent being uninitialized.
2573 */
2574 parent = 0;
2575 } else {
2576 const struct Qdisc_class_ops *cops;
2577 struct net_device *dev;
2578 unsigned long cl = 0;
2579
2580 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2581 if (!dev)
2582 return skb->len;
2583
2584 parent = tcm->tcm_parent;
2585 if (!parent)
2586 q = dev->qdisc;
2587 else
2588 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2589 if (!q)
2590 goto out;
2591 cops = q->ops->cl_ops;
2592 if (!cops)
2593 goto out;
2594 if (!cops->tcf_block)
2595 goto out;
2596 if (TC_H_MIN(tcm->tcm_parent)) {
2597 cl = cops->find(q, tcm->tcm_parent);
2598 if (cl == 0)
2599 goto out;
2600 }
2601 block = cops->tcf_block(q, cl, NULL);
2602 if (!block)
2603 goto out;
2604 parent = block->classid;
2605 if (tcf_block_shared(block))
2606 q = NULL;
2607 }
2608
2609 index_start = cb->args[0];
2610 index = 0;
2611
2612 for (chain = __tcf_get_next_chain(block, NULL);
2613 chain;
2614 chain_prev = chain,
2615 chain = __tcf_get_next_chain(block, chain),
2616 tcf_chain_put(chain_prev)) {
2617 if (tca[TCA_CHAIN] &&
2618 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2619 continue;
2620 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2621 index_start, &index, terse_dump)) {
2622 tcf_chain_put(chain);
2623 err = -EMSGSIZE;
2624 break;
2625 }
2626 }
2627
2628 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2629 tcf_block_refcnt_put(block, true);
2630 cb->args[0] = index;
2631
2632 out:
2633 /* If we did no progress, the error (EMSGSIZE) is real */
2634 if (skb->len == 0 && err)
2635 return err;
2636 return skb->len;
2637 }
2638
tc_chain_fill_node(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct net * net,struct sk_buff * skb,struct tcf_block * block,u32 portid,u32 seq,u16 flags,int event)2639 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2640 void *tmplt_priv, u32 chain_index,
2641 struct net *net, struct sk_buff *skb,
2642 struct tcf_block *block,
2643 u32 portid, u32 seq, u16 flags, int event)
2644 {
2645 unsigned char *b = skb_tail_pointer(skb);
2646 const struct tcf_proto_ops *ops;
2647 struct nlmsghdr *nlh;
2648 struct tcmsg *tcm;
2649 void *priv;
2650
2651 ops = tmplt_ops;
2652 priv = tmplt_priv;
2653
2654 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2655 if (!nlh)
2656 goto out_nlmsg_trim;
2657 tcm = nlmsg_data(nlh);
2658 tcm->tcm_family = AF_UNSPEC;
2659 tcm->tcm__pad1 = 0;
2660 tcm->tcm__pad2 = 0;
2661 tcm->tcm_handle = 0;
2662 if (block->q) {
2663 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2664 tcm->tcm_parent = block->q->handle;
2665 } else {
2666 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2667 tcm->tcm_block_index = block->index;
2668 }
2669
2670 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2671 goto nla_put_failure;
2672
2673 if (ops) {
2674 if (nla_put_string(skb, TCA_KIND, ops->kind))
2675 goto nla_put_failure;
2676 if (ops->tmplt_dump(skb, net, priv) < 0)
2677 goto nla_put_failure;
2678 }
2679
2680 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2681 return skb->len;
2682
2683 out_nlmsg_trim:
2684 nla_put_failure:
2685 nlmsg_trim(skb, b);
2686 return -EMSGSIZE;
2687 }
2688
tc_chain_notify(struct tcf_chain * chain,struct sk_buff * oskb,u32 seq,u16 flags,int event,bool unicast)2689 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2690 u32 seq, u16 flags, int event, bool unicast)
2691 {
2692 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2693 struct tcf_block *block = chain->block;
2694 struct net *net = block->net;
2695 struct sk_buff *skb;
2696 int err = 0;
2697
2698 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2699 if (!skb)
2700 return -ENOBUFS;
2701
2702 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2703 chain->index, net, skb, block, portid,
2704 seq, flags, event) <= 0) {
2705 kfree_skb(skb);
2706 return -EINVAL;
2707 }
2708
2709 if (unicast)
2710 err = rtnl_unicast(skb, net, portid);
2711 else
2712 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2713 flags & NLM_F_ECHO);
2714
2715 return err;
2716 }
2717
tc_chain_notify_delete(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv,u32 chain_index,struct tcf_block * block,struct sk_buff * oskb,u32 seq,u16 flags,bool unicast)2718 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2719 void *tmplt_priv, u32 chain_index,
2720 struct tcf_block *block, struct sk_buff *oskb,
2721 u32 seq, u16 flags, bool unicast)
2722 {
2723 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2724 struct net *net = block->net;
2725 struct sk_buff *skb;
2726
2727 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2728 if (!skb)
2729 return -ENOBUFS;
2730
2731 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2732 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2733 kfree_skb(skb);
2734 return -EINVAL;
2735 }
2736
2737 if (unicast)
2738 return rtnl_unicast(skb, net, portid);
2739
2740 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2741 }
2742
tc_chain_tmplt_add(struct tcf_chain * chain,struct net * net,struct nlattr ** tca,struct netlink_ext_ack * extack)2743 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2744 struct nlattr **tca,
2745 struct netlink_ext_ack *extack)
2746 {
2747 const struct tcf_proto_ops *ops;
2748 char name[IFNAMSIZ];
2749 void *tmplt_priv;
2750
2751 /* If kind is not set, user did not specify template. */
2752 if (!tca[TCA_KIND])
2753 return 0;
2754
2755 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2756 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2757 return -EINVAL;
2758 }
2759
2760 ops = tcf_proto_lookup_ops(name, true, extack);
2761 if (IS_ERR(ops))
2762 return PTR_ERR(ops);
2763 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2764 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2765 return -EOPNOTSUPP;
2766 }
2767
2768 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2769 if (IS_ERR(tmplt_priv)) {
2770 module_put(ops->owner);
2771 return PTR_ERR(tmplt_priv);
2772 }
2773 chain->tmplt_ops = ops;
2774 chain->tmplt_priv = tmplt_priv;
2775 return 0;
2776 }
2777
tc_chain_tmplt_del(const struct tcf_proto_ops * tmplt_ops,void * tmplt_priv)2778 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2779 void *tmplt_priv)
2780 {
2781 /* If template ops are set, no work to do for us. */
2782 if (!tmplt_ops)
2783 return;
2784
2785 tmplt_ops->tmplt_destroy(tmplt_priv);
2786 module_put(tmplt_ops->owner);
2787 }
2788
2789 /* Add/delete/get a chain */
2790
tc_ctl_chain(struct sk_buff * skb,struct nlmsghdr * n,struct netlink_ext_ack * extack)2791 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2792 struct netlink_ext_ack *extack)
2793 {
2794 struct net *net = sock_net(skb->sk);
2795 struct nlattr *tca[TCA_MAX + 1];
2796 struct tcmsg *t;
2797 u32 parent;
2798 u32 chain_index;
2799 struct Qdisc *q = NULL;
2800 struct tcf_chain *chain = NULL;
2801 struct tcf_block *block;
2802 unsigned long cl;
2803 int err;
2804
2805 if (n->nlmsg_type != RTM_GETCHAIN &&
2806 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2807 return -EPERM;
2808
2809 replay:
2810 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2811 rtm_tca_policy, extack);
2812 if (err < 0)
2813 return err;
2814
2815 t = nlmsg_data(n);
2816 parent = t->tcm_parent;
2817 cl = 0;
2818
2819 block = tcf_block_find(net, &q, &parent, &cl,
2820 t->tcm_ifindex, t->tcm_block_index, extack);
2821 if (IS_ERR(block))
2822 return PTR_ERR(block);
2823
2824 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2825 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2826 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2827 err = -EINVAL;
2828 goto errout_block;
2829 }
2830
2831 mutex_lock(&block->lock);
2832 chain = tcf_chain_lookup(block, chain_index);
2833 if (n->nlmsg_type == RTM_NEWCHAIN) {
2834 if (chain) {
2835 if (tcf_chain_held_by_acts_only(chain)) {
2836 /* The chain exists only because there is
2837 * some action referencing it.
2838 */
2839 tcf_chain_hold(chain);
2840 } else {
2841 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2842 err = -EEXIST;
2843 goto errout_block_locked;
2844 }
2845 } else {
2846 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2847 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2848 err = -ENOENT;
2849 goto errout_block_locked;
2850 }
2851 chain = tcf_chain_create(block, chain_index);
2852 if (!chain) {
2853 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2854 err = -ENOMEM;
2855 goto errout_block_locked;
2856 }
2857 }
2858 } else {
2859 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2860 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2861 err = -EINVAL;
2862 goto errout_block_locked;
2863 }
2864 tcf_chain_hold(chain);
2865 }
2866
2867 if (n->nlmsg_type == RTM_NEWCHAIN) {
2868 /* Modifying chain requires holding parent block lock. In case
2869 * the chain was successfully added, take a reference to the
2870 * chain. This ensures that an empty chain does not disappear at
2871 * the end of this function.
2872 */
2873 tcf_chain_hold(chain);
2874 chain->explicitly_created = true;
2875 }
2876 mutex_unlock(&block->lock);
2877
2878 switch (n->nlmsg_type) {
2879 case RTM_NEWCHAIN:
2880 err = tc_chain_tmplt_add(chain, net, tca, extack);
2881 if (err) {
2882 tcf_chain_put_explicitly_created(chain);
2883 goto errout;
2884 }
2885
2886 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2887 RTM_NEWCHAIN, false);
2888 break;
2889 case RTM_DELCHAIN:
2890 tfilter_notify_chain(net, skb, block, q, parent, n,
2891 chain, RTM_DELTFILTER);
2892 /* Flush the chain first as the user requested chain removal. */
2893 tcf_chain_flush(chain, true);
2894 /* In case the chain was successfully deleted, put a reference
2895 * to the chain previously taken during addition.
2896 */
2897 tcf_chain_put_explicitly_created(chain);
2898 break;
2899 case RTM_GETCHAIN:
2900 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2901 n->nlmsg_flags, n->nlmsg_type, true);
2902 if (err < 0)
2903 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2904 break;
2905 default:
2906 err = -EOPNOTSUPP;
2907 NL_SET_ERR_MSG(extack, "Unsupported message type");
2908 goto errout;
2909 }
2910
2911 errout:
2912 tcf_chain_put(chain);
2913 errout_block:
2914 tcf_block_release(q, block, true);
2915 if (err == -EAGAIN)
2916 /* Replay the request. */
2917 goto replay;
2918 return err;
2919
2920 errout_block_locked:
2921 mutex_unlock(&block->lock);
2922 goto errout_block;
2923 }
2924
2925 /* called with RTNL */
tc_dump_chain(struct sk_buff * skb,struct netlink_callback * cb)2926 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2927 {
2928 struct net *net = sock_net(skb->sk);
2929 struct nlattr *tca[TCA_MAX + 1];
2930 struct Qdisc *q = NULL;
2931 struct tcf_block *block;
2932 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2933 struct tcf_chain *chain;
2934 long index_start;
2935 long index;
2936 int err;
2937
2938 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2939 return skb->len;
2940
2941 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2942 rtm_tca_policy, cb->extack);
2943 if (err)
2944 return err;
2945
2946 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2947 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2948 if (!block)
2949 goto out;
2950 } else {
2951 const struct Qdisc_class_ops *cops;
2952 struct net_device *dev;
2953 unsigned long cl = 0;
2954
2955 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2956 if (!dev)
2957 return skb->len;
2958
2959 if (!tcm->tcm_parent)
2960 q = dev->qdisc;
2961 else
2962 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2963
2964 if (!q)
2965 goto out;
2966 cops = q->ops->cl_ops;
2967 if (!cops)
2968 goto out;
2969 if (!cops->tcf_block)
2970 goto out;
2971 if (TC_H_MIN(tcm->tcm_parent)) {
2972 cl = cops->find(q, tcm->tcm_parent);
2973 if (cl == 0)
2974 goto out;
2975 }
2976 block = cops->tcf_block(q, cl, NULL);
2977 if (!block)
2978 goto out;
2979 if (tcf_block_shared(block))
2980 q = NULL;
2981 }
2982
2983 index_start = cb->args[0];
2984 index = 0;
2985
2986 mutex_lock(&block->lock);
2987 list_for_each_entry(chain, &block->chain_list, list) {
2988 if ((tca[TCA_CHAIN] &&
2989 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2990 continue;
2991 if (index < index_start) {
2992 index++;
2993 continue;
2994 }
2995 if (tcf_chain_held_by_acts_only(chain))
2996 continue;
2997 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2998 chain->index, net, skb, block,
2999 NETLINK_CB(cb->skb).portid,
3000 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3001 RTM_NEWCHAIN);
3002 if (err <= 0)
3003 break;
3004 index++;
3005 }
3006 mutex_unlock(&block->lock);
3007
3008 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3009 tcf_block_refcnt_put(block, true);
3010 cb->args[0] = index;
3011
3012 out:
3013 /* If we did no progress, the error (EMSGSIZE) is real */
3014 if (skb->len == 0 && err)
3015 return err;
3016 return skb->len;
3017 }
3018
tcf_exts_destroy(struct tcf_exts * exts)3019 void tcf_exts_destroy(struct tcf_exts *exts)
3020 {
3021 #ifdef CONFIG_NET_CLS_ACT
3022 if (exts->actions) {
3023 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3024 kfree(exts->actions);
3025 }
3026 exts->nr_actions = 0;
3027 #endif
3028 }
3029 EXPORT_SYMBOL(tcf_exts_destroy);
3030
tcf_exts_validate(struct net * net,struct tcf_proto * tp,struct nlattr ** tb,struct nlattr * rate_tlv,struct tcf_exts * exts,u32 flags,struct netlink_ext_ack * extack)3031 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3032 struct nlattr *rate_tlv, struct tcf_exts *exts,
3033 u32 flags, struct netlink_ext_ack *extack)
3034 {
3035 #ifdef CONFIG_NET_CLS_ACT
3036 {
3037 int init_res[TCA_ACT_MAX_PRIO] = {};
3038 struct tc_action *act;
3039 size_t attr_size = 0;
3040
3041 if (exts->police && tb[exts->police]) {
3042 struct tc_action_ops *a_o;
3043
3044 a_o = tc_action_load_ops(tb[exts->police], true,
3045 !(flags & TCA_ACT_FLAGS_NO_RTNL),
3046 extack);
3047 if (IS_ERR(a_o))
3048 return PTR_ERR(a_o);
3049 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND;
3050 act = tcf_action_init_1(net, tp, tb[exts->police],
3051 rate_tlv, a_o, init_res, flags,
3052 extack);
3053 module_put(a_o->owner);
3054 if (IS_ERR(act))
3055 return PTR_ERR(act);
3056
3057 act->type = exts->type = TCA_OLD_COMPAT;
3058 exts->actions[0] = act;
3059 exts->nr_actions = 1;
3060 tcf_idr_insert_many(exts->actions);
3061 } else if (exts->action && tb[exts->action]) {
3062 int err;
3063
3064 flags |= TCA_ACT_FLAGS_BIND;
3065 err = tcf_action_init(net, tp, tb[exts->action],
3066 rate_tlv, exts->actions, init_res,
3067 &attr_size, flags, extack);
3068 if (err < 0)
3069 return err;
3070 exts->nr_actions = err;
3071 }
3072 }
3073 #else
3074 if ((exts->action && tb[exts->action]) ||
3075 (exts->police && tb[exts->police])) {
3076 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3077 return -EOPNOTSUPP;
3078 }
3079 #endif
3080
3081 return 0;
3082 }
3083 EXPORT_SYMBOL(tcf_exts_validate);
3084
tcf_exts_change(struct tcf_exts * dst,struct tcf_exts * src)3085 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3086 {
3087 #ifdef CONFIG_NET_CLS_ACT
3088 struct tcf_exts old = *dst;
3089
3090 *dst = *src;
3091 tcf_exts_destroy(&old);
3092 #endif
3093 }
3094 EXPORT_SYMBOL(tcf_exts_change);
3095
3096 #ifdef CONFIG_NET_CLS_ACT
tcf_exts_first_act(struct tcf_exts * exts)3097 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3098 {
3099 if (exts->nr_actions == 0)
3100 return NULL;
3101 else
3102 return exts->actions[0];
3103 }
3104 #endif
3105
tcf_exts_dump(struct sk_buff * skb,struct tcf_exts * exts)3106 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3107 {
3108 #ifdef CONFIG_NET_CLS_ACT
3109 struct nlattr *nest;
3110
3111 if (exts->action && tcf_exts_has_actions(exts)) {
3112 /*
3113 * again for backward compatible mode - we want
3114 * to work with both old and new modes of entering
3115 * tc data even if iproute2 was newer - jhs
3116 */
3117 if (exts->type != TCA_OLD_COMPAT) {
3118 nest = nla_nest_start_noflag(skb, exts->action);
3119 if (nest == NULL)
3120 goto nla_put_failure;
3121
3122 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3123 < 0)
3124 goto nla_put_failure;
3125 nla_nest_end(skb, nest);
3126 } else if (exts->police) {
3127 struct tc_action *act = tcf_exts_first_act(exts);
3128 nest = nla_nest_start_noflag(skb, exts->police);
3129 if (nest == NULL || !act)
3130 goto nla_put_failure;
3131 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3132 goto nla_put_failure;
3133 nla_nest_end(skb, nest);
3134 }
3135 }
3136 return 0;
3137
3138 nla_put_failure:
3139 nla_nest_cancel(skb, nest);
3140 return -1;
3141 #else
3142 return 0;
3143 #endif
3144 }
3145 EXPORT_SYMBOL(tcf_exts_dump);
3146
tcf_exts_terse_dump(struct sk_buff * skb,struct tcf_exts * exts)3147 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3148 {
3149 #ifdef CONFIG_NET_CLS_ACT
3150 struct nlattr *nest;
3151
3152 if (!exts->action || !tcf_exts_has_actions(exts))
3153 return 0;
3154
3155 nest = nla_nest_start_noflag(skb, exts->action);
3156 if (!nest)
3157 goto nla_put_failure;
3158
3159 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3160 goto nla_put_failure;
3161 nla_nest_end(skb, nest);
3162 return 0;
3163
3164 nla_put_failure:
3165 nla_nest_cancel(skb, nest);
3166 return -1;
3167 #else
3168 return 0;
3169 #endif
3170 }
3171 EXPORT_SYMBOL(tcf_exts_terse_dump);
3172
tcf_exts_dump_stats(struct sk_buff * skb,struct tcf_exts * exts)3173 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3174 {
3175 #ifdef CONFIG_NET_CLS_ACT
3176 struct tc_action *a = tcf_exts_first_act(exts);
3177 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3178 return -1;
3179 #endif
3180 return 0;
3181 }
3182 EXPORT_SYMBOL(tcf_exts_dump_stats);
3183
tcf_block_offload_inc(struct tcf_block * block,u32 * flags)3184 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3185 {
3186 if (*flags & TCA_CLS_FLAGS_IN_HW)
3187 return;
3188 *flags |= TCA_CLS_FLAGS_IN_HW;
3189 atomic_inc(&block->offloadcnt);
3190 }
3191
tcf_block_offload_dec(struct tcf_block * block,u32 * flags)3192 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3193 {
3194 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3195 return;
3196 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3197 atomic_dec(&block->offloadcnt);
3198 }
3199
tc_cls_offload_cnt_update(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags,u32 diff,bool add)3200 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3201 struct tcf_proto *tp, u32 *cnt,
3202 u32 *flags, u32 diff, bool add)
3203 {
3204 lockdep_assert_held(&block->cb_lock);
3205
3206 spin_lock(&tp->lock);
3207 if (add) {
3208 if (!*cnt)
3209 tcf_block_offload_inc(block, flags);
3210 *cnt += diff;
3211 } else {
3212 *cnt -= diff;
3213 if (!*cnt)
3214 tcf_block_offload_dec(block, flags);
3215 }
3216 spin_unlock(&tp->lock);
3217 }
3218
3219 static void
tc_cls_offload_cnt_reset(struct tcf_block * block,struct tcf_proto * tp,u32 * cnt,u32 * flags)3220 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3221 u32 *cnt, u32 *flags)
3222 {
3223 lockdep_assert_held(&block->cb_lock);
3224
3225 spin_lock(&tp->lock);
3226 tcf_block_offload_dec(block, flags);
3227 *cnt = 0;
3228 spin_unlock(&tp->lock);
3229 }
3230
3231 static int
__tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop)3232 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3233 void *type_data, bool err_stop)
3234 {
3235 struct flow_block_cb *block_cb;
3236 int ok_count = 0;
3237 int err;
3238
3239 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3240 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3241 if (err) {
3242 if (err_stop)
3243 return err;
3244 } else {
3245 ok_count++;
3246 }
3247 }
3248 return ok_count;
3249 }
3250
tc_setup_cb_call(struct tcf_block * block,enum tc_setup_type type,void * type_data,bool err_stop,bool rtnl_held)3251 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3252 void *type_data, bool err_stop, bool rtnl_held)
3253 {
3254 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3255 int ok_count;
3256
3257 retry:
3258 if (take_rtnl)
3259 rtnl_lock();
3260 down_read(&block->cb_lock);
3261 /* Need to obtain rtnl lock if block is bound to devs that require it.
3262 * In block bind code cb_lock is obtained while holding rtnl, so we must
3263 * obtain the locks in same order here.
3264 */
3265 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3266 up_read(&block->cb_lock);
3267 take_rtnl = true;
3268 goto retry;
3269 }
3270
3271 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3272
3273 up_read(&block->cb_lock);
3274 if (take_rtnl)
3275 rtnl_unlock();
3276 return ok_count;
3277 }
3278 EXPORT_SYMBOL(tc_setup_cb_call);
3279
3280 /* Non-destructive filter add. If filter that wasn't already in hardware is
3281 * successfully offloaded, increment block offloads counter. On failure,
3282 * previously offloaded filter is considered to be intact and offloads counter
3283 * is not decremented.
3284 */
3285
tc_setup_cb_add(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3286 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3287 enum tc_setup_type type, void *type_data, bool err_stop,
3288 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3289 {
3290 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3291 int ok_count;
3292
3293 retry:
3294 if (take_rtnl)
3295 rtnl_lock();
3296 down_read(&block->cb_lock);
3297 /* Need to obtain rtnl lock if block is bound to devs that require it.
3298 * In block bind code cb_lock is obtained while holding rtnl, so we must
3299 * obtain the locks in same order here.
3300 */
3301 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3302 up_read(&block->cb_lock);
3303 take_rtnl = true;
3304 goto retry;
3305 }
3306
3307 /* Make sure all netdevs sharing this block are offload-capable. */
3308 if (block->nooffloaddevcnt && err_stop) {
3309 ok_count = -EOPNOTSUPP;
3310 goto err_unlock;
3311 }
3312
3313 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3314 if (ok_count < 0)
3315 goto err_unlock;
3316
3317 if (tp->ops->hw_add)
3318 tp->ops->hw_add(tp, type_data);
3319 if (ok_count > 0)
3320 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3321 ok_count, true);
3322 err_unlock:
3323 up_read(&block->cb_lock);
3324 if (take_rtnl)
3325 rtnl_unlock();
3326 return ok_count < 0 ? ok_count : 0;
3327 }
3328 EXPORT_SYMBOL(tc_setup_cb_add);
3329
3330 /* Destructive filter replace. If filter that wasn't already in hardware is
3331 * successfully offloaded, increment block offload counter. On failure,
3332 * previously offloaded filter is considered to be destroyed and offload counter
3333 * is decremented.
3334 */
3335
tc_setup_cb_replace(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * old_flags,unsigned int * old_in_hw_count,u32 * new_flags,unsigned int * new_in_hw_count,bool rtnl_held)3336 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3337 enum tc_setup_type type, void *type_data, bool err_stop,
3338 u32 *old_flags, unsigned int *old_in_hw_count,
3339 u32 *new_flags, unsigned int *new_in_hw_count,
3340 bool rtnl_held)
3341 {
3342 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3343 int ok_count;
3344
3345 retry:
3346 if (take_rtnl)
3347 rtnl_lock();
3348 down_read(&block->cb_lock);
3349 /* Need to obtain rtnl lock if block is bound to devs that require it.
3350 * In block bind code cb_lock is obtained while holding rtnl, so we must
3351 * obtain the locks in same order here.
3352 */
3353 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3354 up_read(&block->cb_lock);
3355 take_rtnl = true;
3356 goto retry;
3357 }
3358
3359 /* Make sure all netdevs sharing this block are offload-capable. */
3360 if (block->nooffloaddevcnt && err_stop) {
3361 ok_count = -EOPNOTSUPP;
3362 goto err_unlock;
3363 }
3364
3365 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3366 if (tp->ops->hw_del)
3367 tp->ops->hw_del(tp, type_data);
3368
3369 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3370 if (ok_count < 0)
3371 goto err_unlock;
3372
3373 if (tp->ops->hw_add)
3374 tp->ops->hw_add(tp, type_data);
3375 if (ok_count > 0)
3376 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3377 new_flags, ok_count, true);
3378 err_unlock:
3379 up_read(&block->cb_lock);
3380 if (take_rtnl)
3381 rtnl_unlock();
3382 return ok_count < 0 ? ok_count : 0;
3383 }
3384 EXPORT_SYMBOL(tc_setup_cb_replace);
3385
3386 /* Destroy filter and decrement block offload counter, if filter was previously
3387 * offloaded.
3388 */
3389
tc_setup_cb_destroy(struct tcf_block * block,struct tcf_proto * tp,enum tc_setup_type type,void * type_data,bool err_stop,u32 * flags,unsigned int * in_hw_count,bool rtnl_held)3390 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3391 enum tc_setup_type type, void *type_data, bool err_stop,
3392 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3393 {
3394 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3395 int ok_count;
3396
3397 retry:
3398 if (take_rtnl)
3399 rtnl_lock();
3400 down_read(&block->cb_lock);
3401 /* Need to obtain rtnl lock if block is bound to devs that require it.
3402 * In block bind code cb_lock is obtained while holding rtnl, so we must
3403 * obtain the locks in same order here.
3404 */
3405 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3406 up_read(&block->cb_lock);
3407 take_rtnl = true;
3408 goto retry;
3409 }
3410
3411 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3412
3413 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3414 if (tp->ops->hw_del)
3415 tp->ops->hw_del(tp, type_data);
3416
3417 up_read(&block->cb_lock);
3418 if (take_rtnl)
3419 rtnl_unlock();
3420 return ok_count < 0 ? ok_count : 0;
3421 }
3422 EXPORT_SYMBOL(tc_setup_cb_destroy);
3423
tc_setup_cb_reoffload(struct tcf_block * block,struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,enum tc_setup_type type,void * type_data,void * cb_priv,u32 * flags,unsigned int * in_hw_count)3424 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3425 bool add, flow_setup_cb_t *cb,
3426 enum tc_setup_type type, void *type_data,
3427 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3428 {
3429 int err = cb(type, type_data, cb_priv);
3430
3431 if (err) {
3432 if (add && tc_skip_sw(*flags))
3433 return err;
3434 } else {
3435 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3436 add);
3437 }
3438
3439 return 0;
3440 }
3441 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3442
tcf_act_get_cookie(struct flow_action_entry * entry,const struct tc_action * act)3443 static int tcf_act_get_cookie(struct flow_action_entry *entry,
3444 const struct tc_action *act)
3445 {
3446 struct tc_cookie *cookie;
3447 int err = 0;
3448
3449 rcu_read_lock();
3450 cookie = rcu_dereference(act->act_cookie);
3451 if (cookie) {
3452 entry->cookie = flow_action_cookie_create(cookie->data,
3453 cookie->len,
3454 GFP_ATOMIC);
3455 if (!entry->cookie)
3456 err = -ENOMEM;
3457 }
3458 rcu_read_unlock();
3459 return err;
3460 }
3461
tcf_act_put_cookie(struct flow_action_entry * entry)3462 static void tcf_act_put_cookie(struct flow_action_entry *entry)
3463 {
3464 flow_action_cookie_destroy(entry->cookie);
3465 }
3466
tc_cleanup_flow_action(struct flow_action * flow_action)3467 void tc_cleanup_flow_action(struct flow_action *flow_action)
3468 {
3469 struct flow_action_entry *entry;
3470 int i;
3471
3472 flow_action_for_each(i, entry, flow_action) {
3473 tcf_act_put_cookie(entry);
3474 if (entry->destructor)
3475 entry->destructor(entry->destructor_priv);
3476 }
3477 }
3478 EXPORT_SYMBOL(tc_cleanup_flow_action);
3479
tcf_mirred_get_dev(struct flow_action_entry * entry,const struct tc_action * act)3480 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3481 const struct tc_action *act)
3482 {
3483 #ifdef CONFIG_NET_CLS_ACT
3484 entry->dev = act->ops->get_dev(act, &entry->destructor);
3485 if (!entry->dev)
3486 return;
3487 entry->destructor_priv = entry->dev;
3488 #endif
3489 }
3490
tcf_tunnel_encap_put_tunnel(void * priv)3491 static void tcf_tunnel_encap_put_tunnel(void *priv)
3492 {
3493 struct ip_tunnel_info *tunnel = priv;
3494
3495 kfree(tunnel);
3496 }
3497
tcf_tunnel_encap_get_tunnel(struct flow_action_entry * entry,const struct tc_action * act)3498 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3499 const struct tc_action *act)
3500 {
3501 entry->tunnel = tcf_tunnel_info_copy(act);
3502 if (!entry->tunnel)
3503 return -ENOMEM;
3504 entry->destructor = tcf_tunnel_encap_put_tunnel;
3505 entry->destructor_priv = entry->tunnel;
3506 return 0;
3507 }
3508
tcf_sample_get_group(struct flow_action_entry * entry,const struct tc_action * act)3509 static void tcf_sample_get_group(struct flow_action_entry *entry,
3510 const struct tc_action *act)
3511 {
3512 #ifdef CONFIG_NET_CLS_ACT
3513 entry->sample.psample_group =
3514 act->ops->get_psample_group(act, &entry->destructor);
3515 entry->destructor_priv = entry->sample.psample_group;
3516 #endif
3517 }
3518
tcf_gate_entry_destructor(void * priv)3519 static void tcf_gate_entry_destructor(void *priv)
3520 {
3521 struct action_gate_entry *oe = priv;
3522
3523 kfree(oe);
3524 }
3525
tcf_gate_get_entries(struct flow_action_entry * entry,const struct tc_action * act)3526 static int tcf_gate_get_entries(struct flow_action_entry *entry,
3527 const struct tc_action *act)
3528 {
3529 entry->gate.entries = tcf_gate_get_list(act);
3530
3531 if (!entry->gate.entries)
3532 return -EINVAL;
3533
3534 entry->destructor = tcf_gate_entry_destructor;
3535 entry->destructor_priv = entry->gate.entries;
3536
3537 return 0;
3538 }
3539
tc_act_hw_stats(u8 hw_stats)3540 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3541 {
3542 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3543 return FLOW_ACTION_HW_STATS_DONT_CARE;
3544 else if (!hw_stats)
3545 return FLOW_ACTION_HW_STATS_DISABLED;
3546
3547 return hw_stats;
3548 }
3549
tc_setup_flow_action(struct flow_action * flow_action,const struct tcf_exts * exts)3550 int tc_setup_flow_action(struct flow_action *flow_action,
3551 const struct tcf_exts *exts)
3552 {
3553 struct tc_action *act;
3554 int i, j, k, err = 0;
3555
3556 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3557 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3558 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3559
3560 if (!exts)
3561 return 0;
3562
3563 j = 0;
3564 tcf_exts_for_each_action(i, act, exts) {
3565 struct flow_action_entry *entry;
3566
3567 entry = &flow_action->entries[j];
3568 spin_lock_bh(&act->tcfa_lock);
3569 err = tcf_act_get_cookie(entry, act);
3570 if (err)
3571 goto err_out_locked;
3572
3573 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3574
3575 if (is_tcf_gact_ok(act)) {
3576 entry->id = FLOW_ACTION_ACCEPT;
3577 } else if (is_tcf_gact_shot(act)) {
3578 entry->id = FLOW_ACTION_DROP;
3579 } else if (is_tcf_gact_trap(act)) {
3580 entry->id = FLOW_ACTION_TRAP;
3581 } else if (is_tcf_gact_goto_chain(act)) {
3582 entry->id = FLOW_ACTION_GOTO;
3583 entry->chain_index = tcf_gact_goto_chain_index(act);
3584 } else if (is_tcf_mirred_egress_redirect(act)) {
3585 entry->id = FLOW_ACTION_REDIRECT;
3586 tcf_mirred_get_dev(entry, act);
3587 } else if (is_tcf_mirred_egress_mirror(act)) {
3588 entry->id = FLOW_ACTION_MIRRED;
3589 tcf_mirred_get_dev(entry, act);
3590 } else if (is_tcf_mirred_ingress_redirect(act)) {
3591 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3592 tcf_mirred_get_dev(entry, act);
3593 } else if (is_tcf_mirred_ingress_mirror(act)) {
3594 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3595 tcf_mirred_get_dev(entry, act);
3596 } else if (is_tcf_vlan(act)) {
3597 switch (tcf_vlan_action(act)) {
3598 case TCA_VLAN_ACT_PUSH:
3599 entry->id = FLOW_ACTION_VLAN_PUSH;
3600 entry->vlan.vid = tcf_vlan_push_vid(act);
3601 entry->vlan.proto = tcf_vlan_push_proto(act);
3602 entry->vlan.prio = tcf_vlan_push_prio(act);
3603 break;
3604 case TCA_VLAN_ACT_POP:
3605 entry->id = FLOW_ACTION_VLAN_POP;
3606 break;
3607 case TCA_VLAN_ACT_MODIFY:
3608 entry->id = FLOW_ACTION_VLAN_MANGLE;
3609 entry->vlan.vid = tcf_vlan_push_vid(act);
3610 entry->vlan.proto = tcf_vlan_push_proto(act);
3611 entry->vlan.prio = tcf_vlan_push_prio(act);
3612 break;
3613 default:
3614 err = -EOPNOTSUPP;
3615 goto err_out_locked;
3616 }
3617 } else if (is_tcf_tunnel_set(act)) {
3618 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3619 err = tcf_tunnel_encap_get_tunnel(entry, act);
3620 if (err)
3621 goto err_out_locked;
3622 } else if (is_tcf_tunnel_release(act)) {
3623 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3624 } else if (is_tcf_pedit(act)) {
3625 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3626 switch (tcf_pedit_cmd(act, k)) {
3627 case TCA_PEDIT_KEY_EX_CMD_SET:
3628 entry->id = FLOW_ACTION_MANGLE;
3629 break;
3630 case TCA_PEDIT_KEY_EX_CMD_ADD:
3631 entry->id = FLOW_ACTION_ADD;
3632 break;
3633 default:
3634 err = -EOPNOTSUPP;
3635 goto err_out_locked;
3636 }
3637 entry->mangle.htype = tcf_pedit_htype(act, k);
3638 entry->mangle.mask = tcf_pedit_mask(act, k);
3639 entry->mangle.val = tcf_pedit_val(act, k);
3640 entry->mangle.offset = tcf_pedit_offset(act, k);
3641 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3642 entry = &flow_action->entries[++j];
3643 }
3644 } else if (is_tcf_csum(act)) {
3645 entry->id = FLOW_ACTION_CSUM;
3646 entry->csum_flags = tcf_csum_update_flags(act);
3647 } else if (is_tcf_skbedit_mark(act)) {
3648 entry->id = FLOW_ACTION_MARK;
3649 entry->mark = tcf_skbedit_mark(act);
3650 } else if (is_tcf_sample(act)) {
3651 entry->id = FLOW_ACTION_SAMPLE;
3652 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3653 entry->sample.truncate = tcf_sample_truncate(act);
3654 entry->sample.rate = tcf_sample_rate(act);
3655 tcf_sample_get_group(entry, act);
3656 } else if (is_tcf_police(act)) {
3657 entry->id = FLOW_ACTION_POLICE;
3658 entry->police.burst = tcf_police_burst(act);
3659 entry->police.rate_bytes_ps =
3660 tcf_police_rate_bytes_ps(act);
3661 entry->police.burst_pkt = tcf_police_burst_pkt(act);
3662 entry->police.rate_pkt_ps =
3663 tcf_police_rate_pkt_ps(act);
3664 entry->police.mtu = tcf_police_tcfp_mtu(act);
3665 entry->police.index = act->tcfa_index;
3666 } else if (is_tcf_ct(act)) {
3667 entry->id = FLOW_ACTION_CT;
3668 entry->ct.action = tcf_ct_action(act);
3669 entry->ct.zone = tcf_ct_zone(act);
3670 entry->ct.flow_table = tcf_ct_ft(act);
3671 } else if (is_tcf_mpls(act)) {
3672 switch (tcf_mpls_action(act)) {
3673 case TCA_MPLS_ACT_PUSH:
3674 entry->id = FLOW_ACTION_MPLS_PUSH;
3675 entry->mpls_push.proto = tcf_mpls_proto(act);
3676 entry->mpls_push.label = tcf_mpls_label(act);
3677 entry->mpls_push.tc = tcf_mpls_tc(act);
3678 entry->mpls_push.bos = tcf_mpls_bos(act);
3679 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3680 break;
3681 case TCA_MPLS_ACT_POP:
3682 entry->id = FLOW_ACTION_MPLS_POP;
3683 entry->mpls_pop.proto = tcf_mpls_proto(act);
3684 break;
3685 case TCA_MPLS_ACT_MODIFY:
3686 entry->id = FLOW_ACTION_MPLS_MANGLE;
3687 entry->mpls_mangle.label = tcf_mpls_label(act);
3688 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3689 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3690 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3691 break;
3692 default:
3693 err = -EOPNOTSUPP;
3694 goto err_out_locked;
3695 }
3696 } else if (is_tcf_skbedit_ptype(act)) {
3697 entry->id = FLOW_ACTION_PTYPE;
3698 entry->ptype = tcf_skbedit_ptype(act);
3699 } else if (is_tcf_skbedit_priority(act)) {
3700 entry->id = FLOW_ACTION_PRIORITY;
3701 entry->priority = tcf_skbedit_priority(act);
3702 } else if (is_tcf_gate(act)) {
3703 entry->id = FLOW_ACTION_GATE;
3704 entry->gate.index = tcf_gate_index(act);
3705 entry->gate.prio = tcf_gate_prio(act);
3706 entry->gate.basetime = tcf_gate_basetime(act);
3707 entry->gate.cycletime = tcf_gate_cycletime(act);
3708 entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3709 entry->gate.num_entries = tcf_gate_num_entries(act);
3710 err = tcf_gate_get_entries(entry, act);
3711 if (err)
3712 goto err_out_locked;
3713 } else {
3714 err = -EOPNOTSUPP;
3715 goto err_out_locked;
3716 }
3717 spin_unlock_bh(&act->tcfa_lock);
3718
3719 if (!is_tcf_pedit(act))
3720 j++;
3721 }
3722
3723 err_out:
3724 if (err)
3725 tc_cleanup_flow_action(flow_action);
3726
3727 return err;
3728 err_out_locked:
3729 spin_unlock_bh(&act->tcfa_lock);
3730 goto err_out;
3731 }
3732 EXPORT_SYMBOL(tc_setup_flow_action);
3733
tcf_exts_num_actions(struct tcf_exts * exts)3734 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3735 {
3736 unsigned int num_acts = 0;
3737 struct tc_action *act;
3738 int i;
3739
3740 tcf_exts_for_each_action(i, act, exts) {
3741 if (is_tcf_pedit(act))
3742 num_acts += tcf_pedit_nkeys(act);
3743 else
3744 num_acts++;
3745 }
3746 return num_acts;
3747 }
3748 EXPORT_SYMBOL(tcf_exts_num_actions);
3749
3750 #ifdef CONFIG_NET_CLS_ACT
tcf_qevent_parse_block_index(struct nlattr * block_index_attr,u32 * p_block_index,struct netlink_ext_ack * extack)3751 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3752 u32 *p_block_index,
3753 struct netlink_ext_ack *extack)
3754 {
3755 *p_block_index = nla_get_u32(block_index_attr);
3756 if (!*p_block_index) {
3757 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3758 return -EINVAL;
3759 }
3760
3761 return 0;
3762 }
3763
tcf_qevent_init(struct tcf_qevent * qe,struct Qdisc * sch,enum flow_block_binder_type binder_type,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3764 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3765 enum flow_block_binder_type binder_type,
3766 struct nlattr *block_index_attr,
3767 struct netlink_ext_ack *extack)
3768 {
3769 u32 block_index;
3770 int err;
3771
3772 if (!block_index_attr)
3773 return 0;
3774
3775 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3776 if (err)
3777 return err;
3778
3779 if (!block_index)
3780 return 0;
3781
3782 qe->info.binder_type = binder_type;
3783 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3784 qe->info.chain_head_change_priv = &qe->filter_chain;
3785 qe->info.block_index = block_index;
3786
3787 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3788 }
3789 EXPORT_SYMBOL(tcf_qevent_init);
3790
tcf_qevent_destroy(struct tcf_qevent * qe,struct Qdisc * sch)3791 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3792 {
3793 if (qe->info.block_index)
3794 tcf_block_put_ext(qe->block, sch, &qe->info);
3795 }
3796 EXPORT_SYMBOL(tcf_qevent_destroy);
3797
tcf_qevent_validate_change(struct tcf_qevent * qe,struct nlattr * block_index_attr,struct netlink_ext_ack * extack)3798 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3799 struct netlink_ext_ack *extack)
3800 {
3801 u32 block_index;
3802 int err;
3803
3804 if (!block_index_attr)
3805 return 0;
3806
3807 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3808 if (err)
3809 return err;
3810
3811 /* Bounce newly-configured block or change in block. */
3812 if (block_index != qe->info.block_index) {
3813 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3814 return -EINVAL;
3815 }
3816
3817 return 0;
3818 }
3819 EXPORT_SYMBOL(tcf_qevent_validate_change);
3820
tcf_qevent_handle(struct tcf_qevent * qe,struct Qdisc * sch,struct sk_buff * skb,struct sk_buff ** to_free,int * ret)3821 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3822 struct sk_buff **to_free, int *ret)
3823 {
3824 struct tcf_result cl_res;
3825 struct tcf_proto *fl;
3826
3827 if (!qe->info.block_index)
3828 return skb;
3829
3830 fl = rcu_dereference_bh(qe->filter_chain);
3831
3832 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) {
3833 case TC_ACT_SHOT:
3834 qdisc_qstats_drop(sch);
3835 __qdisc_drop(skb, to_free);
3836 *ret = __NET_XMIT_BYPASS;
3837 return NULL;
3838 case TC_ACT_STOLEN:
3839 case TC_ACT_QUEUED:
3840 case TC_ACT_TRAP:
3841 __qdisc_drop(skb, to_free);
3842 *ret = __NET_XMIT_STOLEN;
3843 return NULL;
3844 case TC_ACT_REDIRECT:
3845 skb_do_redirect(skb);
3846 *ret = __NET_XMIT_STOLEN;
3847 return NULL;
3848 }
3849
3850 return skb;
3851 }
3852 EXPORT_SYMBOL(tcf_qevent_handle);
3853
tcf_qevent_dump(struct sk_buff * skb,int attr_name,struct tcf_qevent * qe)3854 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3855 {
3856 if (!qe->info.block_index)
3857 return 0;
3858 return nla_put_u32(skb, attr_name, qe->info.block_index);
3859 }
3860 EXPORT_SYMBOL(tcf_qevent_dump);
3861 #endif
3862
tcf_net_init(struct net * net)3863 static __net_init int tcf_net_init(struct net *net)
3864 {
3865 struct tcf_net *tn = net_generic(net, tcf_net_id);
3866
3867 spin_lock_init(&tn->idr_lock);
3868 idr_init(&tn->idr);
3869 return 0;
3870 }
3871
tcf_net_exit(struct net * net)3872 static void __net_exit tcf_net_exit(struct net *net)
3873 {
3874 struct tcf_net *tn = net_generic(net, tcf_net_id);
3875
3876 idr_destroy(&tn->idr);
3877 }
3878
3879 static struct pernet_operations tcf_net_ops = {
3880 .init = tcf_net_init,
3881 .exit = tcf_net_exit,
3882 .id = &tcf_net_id,
3883 .size = sizeof(struct tcf_net),
3884 };
3885
tc_filter_init(void)3886 static int __init tc_filter_init(void)
3887 {
3888 int err;
3889
3890 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3891 if (!tc_filter_wq)
3892 return -ENOMEM;
3893
3894 err = register_pernet_subsys(&tcf_net_ops);
3895 if (err)
3896 goto err_register_pernet_subsys;
3897
3898 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3899 RTNL_FLAG_DOIT_UNLOCKED);
3900 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3901 RTNL_FLAG_DOIT_UNLOCKED);
3902 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3903 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3904 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3905 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3906 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3907 tc_dump_chain, 0);
3908
3909 return 0;
3910
3911 err_register_pernet_subsys:
3912 destroy_workqueue(tc_filter_wq);
3913 return err;
3914 }
3915
3916 subsys_initcall(tc_filter_init);
3917