1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/netdevice.h>
8 #include <net/ip.h>
9 #include <net/ip6_route.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_flow_table.h>
12 #include <net/netfilter/nf_conntrack.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_l4proto.h>
15 #include <net/netfilter/nf_conntrack_tuple.h>
16
17 static DEFINE_MUTEX(flowtable_lock);
18 static LIST_HEAD(flowtables);
19
20 static void
flow_offload_fill_dir(struct flow_offload * flow,enum flow_offload_tuple_dir dir)21 flow_offload_fill_dir(struct flow_offload *flow,
22 enum flow_offload_tuple_dir dir)
23 {
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
26
27 ft->dir = dir;
28
29 switch (ctt->src.l3num) {
30 case NFPROTO_IPV4:
31 ft->src_v4 = ctt->src.u3.in;
32 ft->dst_v4 = ctt->dst.u3.in;
33 break;
34 case NFPROTO_IPV6:
35 ft->src_v6 = ctt->src.u3.in6;
36 ft->dst_v6 = ctt->dst.u3.in6;
37 break;
38 }
39
40 ft->l3proto = ctt->src.l3num;
41 ft->l4proto = ctt->dst.protonum;
42 ft->src_port = ctt->src.u.tcp.port;
43 ft->dst_port = ctt->dst.u.tcp.port;
44 }
45
flow_offload_alloc(struct nf_conn * ct)46 struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
47 {
48 struct flow_offload *flow;
49
50 if (unlikely(nf_ct_is_dying(ct) ||
51 !atomic_inc_not_zero(&ct->ct_general.use)))
52 return NULL;
53
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
55 if (!flow)
56 goto err_ct_refcnt;
57
58 flow->ct = ct;
59
60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
62
63 if (ct->status & IPS_SRC_NAT)
64 __set_bit(NF_FLOW_SNAT, &flow->flags);
65 if (ct->status & IPS_DST_NAT)
66 __set_bit(NF_FLOW_DNAT, &flow->flags);
67
68 return flow;
69
70 err_ct_refcnt:
71 nf_ct_put(ct);
72
73 return NULL;
74 }
75 EXPORT_SYMBOL_GPL(flow_offload_alloc);
76
flow_offload_dst_cookie(struct flow_offload_tuple * flow_tuple)77 static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
78 {
79 const struct rt6_info *rt;
80
81 if (flow_tuple->l3proto == NFPROTO_IPV6) {
82 rt = (const struct rt6_info *)flow_tuple->dst_cache;
83 return rt6_get_cookie(rt);
84 }
85
86 return 0;
87 }
88
flow_offload_fill_route(struct flow_offload * flow,const struct nf_flow_route * route,enum flow_offload_tuple_dir dir)89 static int flow_offload_fill_route(struct flow_offload *flow,
90 const struct nf_flow_route *route,
91 enum flow_offload_tuple_dir dir)
92 {
93 struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
94 struct dst_entry *dst = route->tuple[dir].dst;
95 int i, j = 0;
96
97 switch (flow_tuple->l3proto) {
98 case NFPROTO_IPV4:
99 flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
100 break;
101 case NFPROTO_IPV6:
102 flow_tuple->mtu = ip6_dst_mtu_maybe_forward(dst, true);
103 break;
104 }
105
106 flow_tuple->iifidx = route->tuple[dir].in.ifindex;
107 for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
108 flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
109 flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
110 if (route->tuple[dir].in.ingress_vlans & BIT(i))
111 flow_tuple->in_vlan_ingress |= BIT(j);
112 j++;
113 }
114 flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
115
116 switch (route->tuple[dir].xmit_type) {
117 case FLOW_OFFLOAD_XMIT_DIRECT:
118 memcpy(flow_tuple->out.h_dest, route->tuple[dir].out.h_dest,
119 ETH_ALEN);
120 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
121 ETH_ALEN);
122 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
123 flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
124 break;
125 case FLOW_OFFLOAD_XMIT_XFRM:
126 case FLOW_OFFLOAD_XMIT_NEIGH:
127 if (!dst_hold_safe(route->tuple[dir].dst))
128 return -1;
129
130 flow_tuple->dst_cache = dst;
131 flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
132 break;
133 default:
134 WARN_ON_ONCE(1);
135 break;
136 }
137 flow_tuple->xmit_type = route->tuple[dir].xmit_type;
138
139 return 0;
140 }
141
nft_flow_dst_release(struct flow_offload * flow,enum flow_offload_tuple_dir dir)142 static void nft_flow_dst_release(struct flow_offload *flow,
143 enum flow_offload_tuple_dir dir)
144 {
145 if (flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
146 flow->tuplehash[dir].tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
147 dst_release(flow->tuplehash[dir].tuple.dst_cache);
148 }
149
flow_offload_route_init(struct flow_offload * flow,const struct nf_flow_route * route)150 int flow_offload_route_init(struct flow_offload *flow,
151 const struct nf_flow_route *route)
152 {
153 int err;
154
155 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
156 if (err < 0)
157 return err;
158
159 err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
160 if (err < 0)
161 goto err_route_reply;
162
163 flow->type = NF_FLOW_OFFLOAD_ROUTE;
164
165 return 0;
166
167 err_route_reply:
168 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
169
170 return err;
171 }
172 EXPORT_SYMBOL_GPL(flow_offload_route_init);
173
flow_offload_fixup_tcp(struct ip_ct_tcp * tcp)174 static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
175 {
176 tcp->state = TCP_CONNTRACK_ESTABLISHED;
177 tcp->seen[0].td_maxwin = 0;
178 tcp->seen[1].td_maxwin = 0;
179 }
180
flow_offload_fixup_ct_timeout(struct nf_conn * ct)181 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
182 {
183 struct net *net = nf_ct_net(ct);
184 int l4num = nf_ct_protonum(ct);
185 s32 timeout;
186
187 if (l4num == IPPROTO_TCP) {
188 struct nf_tcp_net *tn = nf_tcp_pernet(net);
189
190 timeout = tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
191 timeout -= tn->offload_timeout;
192 } else if (l4num == IPPROTO_UDP) {
193 struct nf_udp_net *tn = nf_udp_pernet(net);
194
195 timeout = tn->timeouts[UDP_CT_REPLIED];
196 timeout -= tn->offload_timeout;
197 } else {
198 return;
199 }
200
201 if (timeout < 0)
202 timeout = 0;
203
204 if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
205 WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
206 }
207
flow_offload_fixup_ct_state(struct nf_conn * ct)208 static void flow_offload_fixup_ct_state(struct nf_conn *ct)
209 {
210 if (nf_ct_protonum(ct) == IPPROTO_TCP)
211 flow_offload_fixup_tcp(&ct->proto.tcp);
212 }
213
flow_offload_fixup_ct(struct nf_conn * ct)214 static void flow_offload_fixup_ct(struct nf_conn *ct)
215 {
216 flow_offload_fixup_ct_state(ct);
217 flow_offload_fixup_ct_timeout(ct);
218 }
219
flow_offload_route_release(struct flow_offload * flow)220 static void flow_offload_route_release(struct flow_offload *flow)
221 {
222 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
223 nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_REPLY);
224 }
225
flow_offload_free(struct flow_offload * flow)226 void flow_offload_free(struct flow_offload *flow)
227 {
228 switch (flow->type) {
229 case NF_FLOW_OFFLOAD_ROUTE:
230 flow_offload_route_release(flow);
231 break;
232 default:
233 break;
234 }
235 nf_ct_put(flow->ct);
236 kfree_rcu(flow, rcu_head);
237 }
238 EXPORT_SYMBOL_GPL(flow_offload_free);
239
flow_offload_hash(const void * data,u32 len,u32 seed)240 static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
241 {
242 const struct flow_offload_tuple *tuple = data;
243
244 return jhash(tuple, offsetof(struct flow_offload_tuple, __hash), seed);
245 }
246
flow_offload_hash_obj(const void * data,u32 len,u32 seed)247 static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
248 {
249 const struct flow_offload_tuple_rhash *tuplehash = data;
250
251 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, __hash), seed);
252 }
253
flow_offload_hash_cmp(struct rhashtable_compare_arg * arg,const void * ptr)254 static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
255 const void *ptr)
256 {
257 const struct flow_offload_tuple *tuple = arg->key;
258 const struct flow_offload_tuple_rhash *x = ptr;
259
260 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, __hash)))
261 return 1;
262
263 return 0;
264 }
265
266 static const struct rhashtable_params nf_flow_offload_rhash_params = {
267 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
268 .hashfn = flow_offload_hash,
269 .obj_hashfn = flow_offload_hash_obj,
270 .obj_cmpfn = flow_offload_hash_cmp,
271 .automatic_shrinking = true,
272 };
273
flow_offload_get_timeout(struct flow_offload * flow)274 unsigned long flow_offload_get_timeout(struct flow_offload *flow)
275 {
276 unsigned long timeout = NF_FLOW_TIMEOUT;
277 struct net *net = nf_ct_net(flow->ct);
278 int l4num = nf_ct_protonum(flow->ct);
279
280 if (l4num == IPPROTO_TCP) {
281 struct nf_tcp_net *tn = nf_tcp_pernet(net);
282
283 timeout = tn->offload_timeout;
284 } else if (l4num == IPPROTO_UDP) {
285 struct nf_udp_net *tn = nf_udp_pernet(net);
286
287 timeout = tn->offload_timeout;
288 }
289
290 return timeout;
291 }
292
flow_offload_add(struct nf_flowtable * flow_table,struct flow_offload * flow)293 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
294 {
295 int err;
296
297 flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
298
299 err = rhashtable_insert_fast(&flow_table->rhashtable,
300 &flow->tuplehash[0].node,
301 nf_flow_offload_rhash_params);
302 if (err < 0)
303 return err;
304
305 err = rhashtable_insert_fast(&flow_table->rhashtable,
306 &flow->tuplehash[1].node,
307 nf_flow_offload_rhash_params);
308 if (err < 0) {
309 rhashtable_remove_fast(&flow_table->rhashtable,
310 &flow->tuplehash[0].node,
311 nf_flow_offload_rhash_params);
312 return err;
313 }
314
315 nf_ct_offload_timeout(flow->ct);
316
317 if (nf_flowtable_hw_offload(flow_table)) {
318 __set_bit(NF_FLOW_HW, &flow->flags);
319 nf_flow_offload_add(flow_table, flow);
320 }
321
322 return 0;
323 }
324 EXPORT_SYMBOL_GPL(flow_offload_add);
325
flow_offload_refresh(struct nf_flowtable * flow_table,struct flow_offload * flow)326 void flow_offload_refresh(struct nf_flowtable *flow_table,
327 struct flow_offload *flow)
328 {
329 u32 timeout;
330
331 timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
332 if (READ_ONCE(flow->timeout) != timeout)
333 WRITE_ONCE(flow->timeout, timeout);
334
335 if (likely(!nf_flowtable_hw_offload(flow_table)))
336 return;
337
338 nf_flow_offload_add(flow_table, flow);
339 }
340 EXPORT_SYMBOL_GPL(flow_offload_refresh);
341
nf_flow_has_expired(const struct flow_offload * flow)342 static inline bool nf_flow_has_expired(const struct flow_offload *flow)
343 {
344 return nf_flow_timeout_delta(flow->timeout) <= 0;
345 }
346
flow_offload_del(struct nf_flowtable * flow_table,struct flow_offload * flow)347 static void flow_offload_del(struct nf_flowtable *flow_table,
348 struct flow_offload *flow)
349 {
350 rhashtable_remove_fast(&flow_table->rhashtable,
351 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
352 nf_flow_offload_rhash_params);
353 rhashtable_remove_fast(&flow_table->rhashtable,
354 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
355 nf_flow_offload_rhash_params);
356
357 clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
358
359 if (nf_flow_has_expired(flow))
360 flow_offload_fixup_ct(flow->ct);
361 else
362 flow_offload_fixup_ct_timeout(flow->ct);
363
364 flow_offload_free(flow);
365 }
366
flow_offload_teardown(struct flow_offload * flow)367 void flow_offload_teardown(struct flow_offload *flow)
368 {
369 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
370
371 flow_offload_fixup_ct_state(flow->ct);
372 }
373 EXPORT_SYMBOL_GPL(flow_offload_teardown);
374
375 struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable * flow_table,struct flow_offload_tuple * tuple)376 flow_offload_lookup(struct nf_flowtable *flow_table,
377 struct flow_offload_tuple *tuple)
378 {
379 struct flow_offload_tuple_rhash *tuplehash;
380 struct flow_offload *flow;
381 int dir;
382
383 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
384 nf_flow_offload_rhash_params);
385 if (!tuplehash)
386 return NULL;
387
388 dir = tuplehash->tuple.dir;
389 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
390 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
391 return NULL;
392
393 if (unlikely(nf_ct_is_dying(flow->ct)))
394 return NULL;
395
396 return tuplehash;
397 }
398 EXPORT_SYMBOL_GPL(flow_offload_lookup);
399
400 static int
nf_flow_table_iterate(struct nf_flowtable * flow_table,void (* iter)(struct flow_offload * flow,void * data),void * data)401 nf_flow_table_iterate(struct nf_flowtable *flow_table,
402 void (*iter)(struct flow_offload *flow, void *data),
403 void *data)
404 {
405 struct flow_offload_tuple_rhash *tuplehash;
406 struct rhashtable_iter hti;
407 struct flow_offload *flow;
408 int err = 0;
409
410 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
411 rhashtable_walk_start(&hti);
412
413 while ((tuplehash = rhashtable_walk_next(&hti))) {
414 if (IS_ERR(tuplehash)) {
415 if (PTR_ERR(tuplehash) != -EAGAIN) {
416 err = PTR_ERR(tuplehash);
417 break;
418 }
419 continue;
420 }
421 if (tuplehash->tuple.dir)
422 continue;
423
424 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
425
426 iter(flow, data);
427 }
428 rhashtable_walk_stop(&hti);
429 rhashtable_walk_exit(&hti);
430
431 return err;
432 }
433
flow_offload_stale_dst(struct flow_offload_tuple * tuple)434 static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
435 {
436 struct dst_entry *dst;
437
438 if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
439 tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
440 dst = tuple->dst_cache;
441 if (!dst_check(dst, tuple->dst_cookie))
442 return true;
443 }
444
445 return false;
446 }
447
nf_flow_has_stale_dst(struct flow_offload * flow)448 static bool nf_flow_has_stale_dst(struct flow_offload *flow)
449 {
450 return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
451 flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
452 }
453
nf_flow_offload_gc_step(struct flow_offload * flow,void * data)454 static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
455 {
456 struct nf_flowtable *flow_table = data;
457
458 if (nf_flow_has_expired(flow) ||
459 nf_ct_is_dying(flow->ct) ||
460 nf_flow_has_stale_dst(flow))
461 set_bit(NF_FLOW_TEARDOWN, &flow->flags);
462
463 if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
464 if (test_bit(NF_FLOW_HW, &flow->flags)) {
465 if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
466 nf_flow_offload_del(flow_table, flow);
467 else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
468 flow_offload_del(flow_table, flow);
469 } else {
470 flow_offload_del(flow_table, flow);
471 }
472 } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
473 nf_flow_offload_stats(flow_table, flow);
474 }
475 }
476
nf_flow_offload_work_gc(struct work_struct * work)477 static void nf_flow_offload_work_gc(struct work_struct *work)
478 {
479 struct nf_flowtable *flow_table;
480
481 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
482 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
483 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
484 }
485
nf_flow_nat_port_tcp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)486 static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
487 __be16 port, __be16 new_port)
488 {
489 struct tcphdr *tcph;
490
491 tcph = (void *)(skb_network_header(skb) + thoff);
492 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
493 }
494
nf_flow_nat_port_udp(struct sk_buff * skb,unsigned int thoff,__be16 port,__be16 new_port)495 static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
496 __be16 port, __be16 new_port)
497 {
498 struct udphdr *udph;
499
500 udph = (void *)(skb_network_header(skb) + thoff);
501 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
502 inet_proto_csum_replace2(&udph->check, skb, port,
503 new_port, false);
504 if (!udph->check)
505 udph->check = CSUM_MANGLED_0;
506 }
507 }
508
nf_flow_nat_port(struct sk_buff * skb,unsigned int thoff,u8 protocol,__be16 port,__be16 new_port)509 static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
510 u8 protocol, __be16 port, __be16 new_port)
511 {
512 switch (protocol) {
513 case IPPROTO_TCP:
514 nf_flow_nat_port_tcp(skb, thoff, port, new_port);
515 break;
516 case IPPROTO_UDP:
517 nf_flow_nat_port_udp(skb, thoff, port, new_port);
518 break;
519 }
520 }
521
nf_flow_snat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)522 void nf_flow_snat_port(const struct flow_offload *flow,
523 struct sk_buff *skb, unsigned int thoff,
524 u8 protocol, enum flow_offload_tuple_dir dir)
525 {
526 struct flow_ports *hdr;
527 __be16 port, new_port;
528
529 hdr = (void *)(skb_network_header(skb) + thoff);
530
531 switch (dir) {
532 case FLOW_OFFLOAD_DIR_ORIGINAL:
533 port = hdr->source;
534 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
535 hdr->source = new_port;
536 break;
537 case FLOW_OFFLOAD_DIR_REPLY:
538 port = hdr->dest;
539 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
540 hdr->dest = new_port;
541 break;
542 }
543
544 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
545 }
546 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
547
nf_flow_dnat_port(const struct flow_offload * flow,struct sk_buff * skb,unsigned int thoff,u8 protocol,enum flow_offload_tuple_dir dir)548 void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
549 unsigned int thoff, u8 protocol,
550 enum flow_offload_tuple_dir dir)
551 {
552 struct flow_ports *hdr;
553 __be16 port, new_port;
554
555 hdr = (void *)(skb_network_header(skb) + thoff);
556
557 switch (dir) {
558 case FLOW_OFFLOAD_DIR_ORIGINAL:
559 port = hdr->dest;
560 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
561 hdr->dest = new_port;
562 break;
563 case FLOW_OFFLOAD_DIR_REPLY:
564 port = hdr->source;
565 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
566 hdr->source = new_port;
567 break;
568 }
569
570 nf_flow_nat_port(skb, thoff, protocol, port, new_port);
571 }
572 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
573
nf_flow_table_init(struct nf_flowtable * flowtable)574 int nf_flow_table_init(struct nf_flowtable *flowtable)
575 {
576 int err;
577
578 INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
579 flow_block_init(&flowtable->flow_block);
580 init_rwsem(&flowtable->flow_block_lock);
581
582 err = rhashtable_init(&flowtable->rhashtable,
583 &nf_flow_offload_rhash_params);
584 if (err < 0)
585 return err;
586
587 queue_delayed_work(system_power_efficient_wq,
588 &flowtable->gc_work, HZ);
589
590 mutex_lock(&flowtable_lock);
591 list_add(&flowtable->list, &flowtables);
592 mutex_unlock(&flowtable_lock);
593
594 return 0;
595 }
596 EXPORT_SYMBOL_GPL(nf_flow_table_init);
597
nf_flow_table_do_cleanup(struct flow_offload * flow,void * data)598 static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
599 {
600 struct net_device *dev = data;
601
602 if (!dev) {
603 flow_offload_teardown(flow);
604 return;
605 }
606
607 if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
608 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
609 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
610 flow_offload_teardown(flow);
611 }
612
nf_flow_table_gc_cleanup(struct nf_flowtable * flowtable,struct net_device * dev)613 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
614 struct net_device *dev)
615 {
616 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
617 flush_delayed_work(&flowtable->gc_work);
618 nf_flow_table_offload_flush(flowtable);
619 }
620
nf_flow_table_cleanup(struct net_device * dev)621 void nf_flow_table_cleanup(struct net_device *dev)
622 {
623 struct nf_flowtable *flowtable;
624
625 mutex_lock(&flowtable_lock);
626 list_for_each_entry(flowtable, &flowtables, list)
627 nf_flow_table_gc_cleanup(flowtable, dev);
628 mutex_unlock(&flowtable_lock);
629 }
630 EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
631
nf_flow_table_free(struct nf_flowtable * flow_table)632 void nf_flow_table_free(struct nf_flowtable *flow_table)
633 {
634 mutex_lock(&flowtable_lock);
635 list_del(&flow_table->list);
636 mutex_unlock(&flowtable_lock);
637
638 cancel_delayed_work_sync(&flow_table->gc_work);
639 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
640 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
641 nf_flow_table_offload_flush(flow_table);
642 if (nf_flowtable_hw_offload(flow_table))
643 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
644 flow_table);
645 rhashtable_destroy(&flow_table->rhashtable);
646 }
647 EXPORT_SYMBOL_GPL(nf_flow_table_free);
648
nf_flow_table_module_init(void)649 static int __init nf_flow_table_module_init(void)
650 {
651 return nf_flow_table_offload_init();
652 }
653
nf_flow_table_module_exit(void)654 static void __exit nf_flow_table_module_exit(void)
655 {
656 nf_flow_table_offload_exit();
657 }
658
659 module_init(nf_flow_table_module_init);
660 module_exit(nf_flow_table_module_exit);
661
662 MODULE_LICENSE("GPL");
663 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
664 MODULE_DESCRIPTION("Netfilter flow table module");
665