1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/xfrm.h>
35 #include <net/ip.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
37 #include <net/mip6.h>
38 #endif
39 #ifdef CONFIG_XFRM_STATISTICS
40 #include <net/snmp.h>
41 #endif
42 #ifdef CONFIG_XFRM_ESPINTCP
43 #include <net/espintcp.h>
44 #endif
45
46 #include "xfrm_hash.h"
47
48 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
49 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
50 #define XFRM_MAX_QUEUE_LEN 100
51
52 struct xfrm_flo {
53 struct dst_entry *dst_orig;
54 u8 flags;
55 };
56
57 /* prefixes smaller than this are stored in lists, not trees. */
58 #define INEXACT_PREFIXLEN_IPV4 16
59 #define INEXACT_PREFIXLEN_IPV6 48
60
61 struct xfrm_pol_inexact_node {
62 struct rb_node node;
63 union {
64 xfrm_address_t addr;
65 struct rcu_head rcu;
66 };
67 u8 prefixlen;
68
69 struct rb_root root;
70
71 /* the policies matching this node, can be empty list */
72 struct hlist_head hhead;
73 };
74
75 /* xfrm inexact policy search tree:
76 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
77 * |
78 * +---- root_d: sorted by daddr:prefix
79 * | |
80 * | xfrm_pol_inexact_node
81 * | |
82 * | +- root: sorted by saddr/prefix
83 * | | |
84 * | | xfrm_pol_inexact_node
85 * | | |
86 * | | + root: unused
87 * | | |
88 * | | + hhead: saddr:daddr policies
89 * | |
90 * | +- coarse policies and all any:daddr policies
91 * |
92 * +---- root_s: sorted by saddr:prefix
93 * | |
94 * | xfrm_pol_inexact_node
95 * | |
96 * | + root: unused
97 * | |
98 * | + hhead: saddr:any policies
99 * |
100 * +---- coarse policies and all any:any policies
101 *
102 * Lookups return four candidate lists:
103 * 1. any:any list from top-level xfrm_pol_inexact_bin
104 * 2. any:daddr list from daddr tree
105 * 3. saddr:daddr list from 2nd level daddr tree
106 * 4. saddr:any list from saddr tree
107 *
108 * This result set then needs to be searched for the policy with
109 * the lowest priority. If two results have same prio, youngest one wins.
110 */
111
112 struct xfrm_pol_inexact_key {
113 possible_net_t net;
114 u32 if_id;
115 u16 family;
116 u8 dir, type;
117 };
118
119 struct xfrm_pol_inexact_bin {
120 struct xfrm_pol_inexact_key k;
121 struct rhash_head head;
122 /* list containing '*:*' policies */
123 struct hlist_head hhead;
124
125 seqcount_spinlock_t count;
126 /* tree sorted by daddr/prefix */
127 struct rb_root root_d;
128
129 /* tree sorted by saddr/prefix */
130 struct rb_root root_s;
131
132 /* slow path below */
133 struct list_head inexact_bins;
134 struct rcu_head rcu;
135 };
136
137 enum xfrm_pol_inexact_candidate_type {
138 XFRM_POL_CAND_BOTH,
139 XFRM_POL_CAND_SADDR,
140 XFRM_POL_CAND_DADDR,
141 XFRM_POL_CAND_ANY,
142
143 XFRM_POL_CAND_MAX,
144 };
145
146 struct xfrm_pol_inexact_candidates {
147 struct hlist_head *res[XFRM_POL_CAND_MAX];
148 };
149
150 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
151 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
152
153 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
154 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
155 __read_mostly;
156
157 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
158
159 static struct rhashtable xfrm_policy_inexact_table;
160 static const struct rhashtable_params xfrm_pol_inexact_params;
161
162 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
163 static int stale_bundle(struct dst_entry *dst);
164 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
165 static void xfrm_policy_queue_process(struct timer_list *t);
166
167 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
168 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
169 int dir);
170
171 static struct xfrm_pol_inexact_bin *
172 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
173 u32 if_id);
174
175 static struct xfrm_pol_inexact_bin *
176 xfrm_policy_inexact_lookup_rcu(struct net *net,
177 u8 type, u16 family, u8 dir, u32 if_id);
178 static struct xfrm_policy *
179 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
180 bool excl);
181 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
182 struct xfrm_policy *policy);
183
184 static bool
185 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
186 struct xfrm_pol_inexact_bin *b,
187 const xfrm_address_t *saddr,
188 const xfrm_address_t *daddr);
189
xfrm_pol_hold_rcu(struct xfrm_policy * policy)190 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
191 {
192 return refcount_inc_not_zero(&policy->refcnt);
193 }
194
195 static inline bool
__xfrm4_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)196 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
197 {
198 const struct flowi4 *fl4 = &fl->u.ip4;
199
200 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
201 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
202 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
203 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
204 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
205 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
206 }
207
208 static inline bool
__xfrm6_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)209 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
210 {
211 const struct flowi6 *fl6 = &fl->u.ip6;
212
213 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
214 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
215 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
216 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
217 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
218 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
219 }
220
xfrm_selector_match(const struct xfrm_selector * sel,const struct flowi * fl,unsigned short family)221 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
222 unsigned short family)
223 {
224 switch (family) {
225 case AF_INET:
226 return __xfrm4_selector_match(sel, fl);
227 case AF_INET6:
228 return __xfrm6_selector_match(sel, fl);
229 }
230 return false;
231 }
232
xfrm_policy_get_afinfo(unsigned short family)233 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
234 {
235 const struct xfrm_policy_afinfo *afinfo;
236
237 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
238 return NULL;
239 rcu_read_lock();
240 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
241 if (unlikely(!afinfo))
242 rcu_read_unlock();
243 return afinfo;
244 }
245
246 /* Called with rcu_read_lock(). */
xfrm_if_get_cb(void)247 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
248 {
249 return rcu_dereference(xfrm_if_cb);
250 }
251
__xfrm_dst_lookup(struct net * net,int tos,int oif,const xfrm_address_t * saddr,const xfrm_address_t * daddr,int family,u32 mark)252 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
253 const xfrm_address_t *saddr,
254 const xfrm_address_t *daddr,
255 int family, u32 mark)
256 {
257 const struct xfrm_policy_afinfo *afinfo;
258 struct dst_entry *dst;
259
260 afinfo = xfrm_policy_get_afinfo(family);
261 if (unlikely(afinfo == NULL))
262 return ERR_PTR(-EAFNOSUPPORT);
263
264 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
265
266 rcu_read_unlock();
267
268 return dst;
269 }
270 EXPORT_SYMBOL(__xfrm_dst_lookup);
271
xfrm_dst_lookup(struct xfrm_state * x,int tos,int oif,xfrm_address_t * prev_saddr,xfrm_address_t * prev_daddr,int family,u32 mark)272 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
273 int tos, int oif,
274 xfrm_address_t *prev_saddr,
275 xfrm_address_t *prev_daddr,
276 int family, u32 mark)
277 {
278 struct net *net = xs_net(x);
279 xfrm_address_t *saddr = &x->props.saddr;
280 xfrm_address_t *daddr = &x->id.daddr;
281 struct dst_entry *dst;
282
283 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
284 saddr = x->coaddr;
285 daddr = prev_daddr;
286 }
287 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
288 saddr = prev_saddr;
289 daddr = x->coaddr;
290 }
291
292 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
293
294 if (!IS_ERR(dst)) {
295 if (prev_saddr != saddr)
296 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
297 if (prev_daddr != daddr)
298 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
299 }
300
301 return dst;
302 }
303
make_jiffies(long secs)304 static inline unsigned long make_jiffies(long secs)
305 {
306 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
307 return MAX_SCHEDULE_TIMEOUT-1;
308 else
309 return secs*HZ;
310 }
311
xfrm_policy_timer(struct timer_list * t)312 static void xfrm_policy_timer(struct timer_list *t)
313 {
314 struct xfrm_policy *xp = from_timer(xp, t, timer);
315 time64_t now = ktime_get_real_seconds();
316 time64_t next = TIME64_MAX;
317 int warn = 0;
318 int dir;
319
320 read_lock(&xp->lock);
321
322 if (unlikely(xp->walk.dead))
323 goto out;
324
325 dir = xfrm_policy_id2dir(xp->index);
326
327 if (xp->lft.hard_add_expires_seconds) {
328 time64_t tmo = xp->lft.hard_add_expires_seconds +
329 xp->curlft.add_time - now;
330 if (tmo <= 0)
331 goto expired;
332 if (tmo < next)
333 next = tmo;
334 }
335 if (xp->lft.hard_use_expires_seconds) {
336 time64_t tmo = xp->lft.hard_use_expires_seconds +
337 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
338 if (tmo <= 0)
339 goto expired;
340 if (tmo < next)
341 next = tmo;
342 }
343 if (xp->lft.soft_add_expires_seconds) {
344 time64_t tmo = xp->lft.soft_add_expires_seconds +
345 xp->curlft.add_time - now;
346 if (tmo <= 0) {
347 warn = 1;
348 tmo = XFRM_KM_TIMEOUT;
349 }
350 if (tmo < next)
351 next = tmo;
352 }
353 if (xp->lft.soft_use_expires_seconds) {
354 time64_t tmo = xp->lft.soft_use_expires_seconds +
355 (xp->curlft.use_time ? : xp->curlft.add_time) - now;
356 if (tmo <= 0) {
357 warn = 1;
358 tmo = XFRM_KM_TIMEOUT;
359 }
360 if (tmo < next)
361 next = tmo;
362 }
363
364 if (warn)
365 km_policy_expired(xp, dir, 0, 0);
366 if (next != TIME64_MAX &&
367 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
368 xfrm_pol_hold(xp);
369
370 out:
371 read_unlock(&xp->lock);
372 xfrm_pol_put(xp);
373 return;
374
375 expired:
376 read_unlock(&xp->lock);
377 if (!xfrm_policy_delete(xp, dir))
378 km_policy_expired(xp, dir, 1, 0);
379 xfrm_pol_put(xp);
380 }
381
382 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
383 * SPD calls.
384 */
385
xfrm_policy_alloc(struct net * net,gfp_t gfp)386 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
387 {
388 struct xfrm_policy *policy;
389
390 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
391
392 if (policy) {
393 write_pnet(&policy->xp_net, net);
394 INIT_LIST_HEAD(&policy->walk.all);
395 INIT_HLIST_NODE(&policy->bydst_inexact_list);
396 INIT_HLIST_NODE(&policy->bydst);
397 INIT_HLIST_NODE(&policy->byidx);
398 rwlock_init(&policy->lock);
399 refcount_set(&policy->refcnt, 1);
400 skb_queue_head_init(&policy->polq.hold_queue);
401 timer_setup(&policy->timer, xfrm_policy_timer, 0);
402 timer_setup(&policy->polq.hold_timer,
403 xfrm_policy_queue_process, 0);
404 }
405 return policy;
406 }
407 EXPORT_SYMBOL(xfrm_policy_alloc);
408
xfrm_policy_destroy_rcu(struct rcu_head * head)409 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
410 {
411 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
412
413 security_xfrm_policy_free(policy->security);
414 kfree(policy);
415 }
416
417 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
418
xfrm_policy_destroy(struct xfrm_policy * policy)419 void xfrm_policy_destroy(struct xfrm_policy *policy)
420 {
421 BUG_ON(!policy->walk.dead);
422
423 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
424 BUG();
425
426 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
427 }
428 EXPORT_SYMBOL(xfrm_policy_destroy);
429
430 /* Rule must be locked. Release descendant resources, announce
431 * entry dead. The rule must be unlinked from lists to the moment.
432 */
433
xfrm_policy_kill(struct xfrm_policy * policy)434 static void xfrm_policy_kill(struct xfrm_policy *policy)
435 {
436 write_lock_bh(&policy->lock);
437 policy->walk.dead = 1;
438 write_unlock_bh(&policy->lock);
439
440 atomic_inc(&policy->genid);
441
442 if (del_timer(&policy->polq.hold_timer))
443 xfrm_pol_put(policy);
444 skb_queue_purge(&policy->polq.hold_queue);
445
446 if (del_timer(&policy->timer))
447 xfrm_pol_put(policy);
448
449 xfrm_pol_put(policy);
450 }
451
452 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
453
idx_hash(struct net * net,u32 index)454 static inline unsigned int idx_hash(struct net *net, u32 index)
455 {
456 return __idx_hash(index, net->xfrm.policy_idx_hmask);
457 }
458
459 /* calculate policy hash thresholds */
__get_hash_thresh(struct net * net,unsigned short family,int dir,u8 * dbits,u8 * sbits)460 static void __get_hash_thresh(struct net *net,
461 unsigned short family, int dir,
462 u8 *dbits, u8 *sbits)
463 {
464 switch (family) {
465 case AF_INET:
466 *dbits = net->xfrm.policy_bydst[dir].dbits4;
467 *sbits = net->xfrm.policy_bydst[dir].sbits4;
468 break;
469
470 case AF_INET6:
471 *dbits = net->xfrm.policy_bydst[dir].dbits6;
472 *sbits = net->xfrm.policy_bydst[dir].sbits6;
473 break;
474
475 default:
476 *dbits = 0;
477 *sbits = 0;
478 }
479 }
480
policy_hash_bysel(struct net * net,const struct xfrm_selector * sel,unsigned short family,int dir)481 static struct hlist_head *policy_hash_bysel(struct net *net,
482 const struct xfrm_selector *sel,
483 unsigned short family, int dir)
484 {
485 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
486 unsigned int hash;
487 u8 dbits;
488 u8 sbits;
489
490 __get_hash_thresh(net, family, dir, &dbits, &sbits);
491 hash = __sel_hash(sel, family, hmask, dbits, sbits);
492
493 if (hash == hmask + 1)
494 return NULL;
495
496 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
497 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
498 }
499
policy_hash_direct(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family,int dir)500 static struct hlist_head *policy_hash_direct(struct net *net,
501 const xfrm_address_t *daddr,
502 const xfrm_address_t *saddr,
503 unsigned short family, int dir)
504 {
505 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
506 unsigned int hash;
507 u8 dbits;
508 u8 sbits;
509
510 __get_hash_thresh(net, family, dir, &dbits, &sbits);
511 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
512
513 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
514 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
515 }
516
xfrm_dst_hash_transfer(struct net * net,struct hlist_head * list,struct hlist_head * ndsttable,unsigned int nhashmask,int dir)517 static void xfrm_dst_hash_transfer(struct net *net,
518 struct hlist_head *list,
519 struct hlist_head *ndsttable,
520 unsigned int nhashmask,
521 int dir)
522 {
523 struct hlist_node *tmp, *entry0 = NULL;
524 struct xfrm_policy *pol;
525 unsigned int h0 = 0;
526 u8 dbits;
527 u8 sbits;
528
529 redo:
530 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
531 unsigned int h;
532
533 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
534 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
535 pol->family, nhashmask, dbits, sbits);
536 if (!entry0) {
537 hlist_del_rcu(&pol->bydst);
538 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
539 h0 = h;
540 } else {
541 if (h != h0)
542 continue;
543 hlist_del_rcu(&pol->bydst);
544 hlist_add_behind_rcu(&pol->bydst, entry0);
545 }
546 entry0 = &pol->bydst;
547 }
548 if (!hlist_empty(list)) {
549 entry0 = NULL;
550 goto redo;
551 }
552 }
553
xfrm_idx_hash_transfer(struct hlist_head * list,struct hlist_head * nidxtable,unsigned int nhashmask)554 static void xfrm_idx_hash_transfer(struct hlist_head *list,
555 struct hlist_head *nidxtable,
556 unsigned int nhashmask)
557 {
558 struct hlist_node *tmp;
559 struct xfrm_policy *pol;
560
561 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
562 unsigned int h;
563
564 h = __idx_hash(pol->index, nhashmask);
565 hlist_add_head(&pol->byidx, nidxtable+h);
566 }
567 }
568
xfrm_new_hash_mask(unsigned int old_hmask)569 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
570 {
571 return ((old_hmask + 1) << 1) - 1;
572 }
573
xfrm_bydst_resize(struct net * net,int dir)574 static void xfrm_bydst_resize(struct net *net, int dir)
575 {
576 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
577 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
578 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
579 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
580 struct hlist_head *odst;
581 int i;
582
583 if (!ndst)
584 return;
585
586 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
587 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
588
589 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
590 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
591
592 for (i = hmask; i >= 0; i--)
593 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
594
595 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
596 net->xfrm.policy_bydst[dir].hmask = nhashmask;
597
598 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
599 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
600
601 synchronize_rcu();
602
603 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
604 }
605
xfrm_byidx_resize(struct net * net,int total)606 static void xfrm_byidx_resize(struct net *net, int total)
607 {
608 unsigned int hmask = net->xfrm.policy_idx_hmask;
609 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
610 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
611 struct hlist_head *oidx = net->xfrm.policy_byidx;
612 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
613 int i;
614
615 if (!nidx)
616 return;
617
618 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
619
620 for (i = hmask; i >= 0; i--)
621 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
622
623 net->xfrm.policy_byidx = nidx;
624 net->xfrm.policy_idx_hmask = nhashmask;
625
626 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
627
628 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
629 }
630
xfrm_bydst_should_resize(struct net * net,int dir,int * total)631 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
632 {
633 unsigned int cnt = net->xfrm.policy_count[dir];
634 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
635
636 if (total)
637 *total += cnt;
638
639 if ((hmask + 1) < xfrm_policy_hashmax &&
640 cnt > hmask)
641 return 1;
642
643 return 0;
644 }
645
xfrm_byidx_should_resize(struct net * net,int total)646 static inline int xfrm_byidx_should_resize(struct net *net, int total)
647 {
648 unsigned int hmask = net->xfrm.policy_idx_hmask;
649
650 if ((hmask + 1) < xfrm_policy_hashmax &&
651 total > hmask)
652 return 1;
653
654 return 0;
655 }
656
xfrm_spd_getinfo(struct net * net,struct xfrmk_spdinfo * si)657 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
658 {
659 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
660 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
661 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
662 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
663 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
664 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
665 si->spdhcnt = net->xfrm.policy_idx_hmask;
666 si->spdhmcnt = xfrm_policy_hashmax;
667 }
668 EXPORT_SYMBOL(xfrm_spd_getinfo);
669
670 static DEFINE_MUTEX(hash_resize_mutex);
xfrm_hash_resize(struct work_struct * work)671 static void xfrm_hash_resize(struct work_struct *work)
672 {
673 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
674 int dir, total;
675
676 mutex_lock(&hash_resize_mutex);
677
678 total = 0;
679 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
680 if (xfrm_bydst_should_resize(net, dir, &total))
681 xfrm_bydst_resize(net, dir);
682 }
683 if (xfrm_byidx_should_resize(net, total))
684 xfrm_byidx_resize(net, total);
685
686 mutex_unlock(&hash_resize_mutex);
687 }
688
689 /* Make sure *pol can be inserted into fastbin.
690 * Useful to check that later insert requests will be successful
691 * (provided xfrm_policy_lock is held throughout).
692 */
693 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy * pol,u8 dir)694 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
695 {
696 struct xfrm_pol_inexact_bin *bin, *prev;
697 struct xfrm_pol_inexact_key k = {
698 .family = pol->family,
699 .type = pol->type,
700 .dir = dir,
701 .if_id = pol->if_id,
702 };
703 struct net *net = xp_net(pol);
704
705 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
706
707 write_pnet(&k.net, net);
708 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
709 xfrm_pol_inexact_params);
710 if (bin)
711 return bin;
712
713 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
714 if (!bin)
715 return NULL;
716
717 bin->k = k;
718 INIT_HLIST_HEAD(&bin->hhead);
719 bin->root_d = RB_ROOT;
720 bin->root_s = RB_ROOT;
721 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
722
723 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
724 &bin->k, &bin->head,
725 xfrm_pol_inexact_params);
726 if (!prev) {
727 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
728 return bin;
729 }
730
731 kfree(bin);
732
733 return IS_ERR(prev) ? NULL : prev;
734 }
735
xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t * addr,int family,u8 prefixlen)736 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
737 int family, u8 prefixlen)
738 {
739 if (xfrm_addr_any(addr, family))
740 return true;
741
742 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
743 return true;
744
745 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
746 return true;
747
748 return false;
749 }
750
751 static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy * policy)752 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
753 {
754 const xfrm_address_t *addr;
755 bool saddr_any, daddr_any;
756 u8 prefixlen;
757
758 addr = &policy->selector.saddr;
759 prefixlen = policy->selector.prefixlen_s;
760
761 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
762 policy->family,
763 prefixlen);
764 addr = &policy->selector.daddr;
765 prefixlen = policy->selector.prefixlen_d;
766 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
767 policy->family,
768 prefixlen);
769 return saddr_any && daddr_any;
770 }
771
xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node * node,const xfrm_address_t * addr,u8 prefixlen)772 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
773 const xfrm_address_t *addr, u8 prefixlen)
774 {
775 node->addr = *addr;
776 node->prefixlen = prefixlen;
777 }
778
779 static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t * addr,u8 prefixlen)780 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
781 {
782 struct xfrm_pol_inexact_node *node;
783
784 node = kzalloc(sizeof(*node), GFP_ATOMIC);
785 if (node)
786 xfrm_pol_inexact_node_init(node, addr, prefixlen);
787
788 return node;
789 }
790
xfrm_policy_addr_delta(const xfrm_address_t * a,const xfrm_address_t * b,u8 prefixlen,u16 family)791 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
792 const xfrm_address_t *b,
793 u8 prefixlen, u16 family)
794 {
795 u32 ma, mb, mask;
796 unsigned int pdw, pbi;
797 int delta = 0;
798
799 switch (family) {
800 case AF_INET:
801 if (prefixlen == 0)
802 return 0;
803 mask = ~0U << (32 - prefixlen);
804 ma = ntohl(a->a4) & mask;
805 mb = ntohl(b->a4) & mask;
806 if (ma < mb)
807 delta = -1;
808 else if (ma > mb)
809 delta = 1;
810 break;
811 case AF_INET6:
812 pdw = prefixlen >> 5;
813 pbi = prefixlen & 0x1f;
814
815 if (pdw) {
816 delta = memcmp(a->a6, b->a6, pdw << 2);
817 if (delta)
818 return delta;
819 }
820 if (pbi) {
821 mask = ~0U << (32 - pbi);
822 ma = ntohl(a->a6[pdw]) & mask;
823 mb = ntohl(b->a6[pdw]) & mask;
824 if (ma < mb)
825 delta = -1;
826 else if (ma > mb)
827 delta = 1;
828 }
829 break;
830 default:
831 break;
832 }
833
834 return delta;
835 }
836
xfrm_policy_inexact_list_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,u16 family)837 static void xfrm_policy_inexact_list_reinsert(struct net *net,
838 struct xfrm_pol_inexact_node *n,
839 u16 family)
840 {
841 unsigned int matched_s, matched_d;
842 struct xfrm_policy *policy, *p;
843
844 matched_s = 0;
845 matched_d = 0;
846
847 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
848 struct hlist_node *newpos = NULL;
849 bool matches_s, matches_d;
850
851 if (!policy->bydst_reinsert)
852 continue;
853
854 WARN_ON_ONCE(policy->family != family);
855
856 policy->bydst_reinsert = false;
857 hlist_for_each_entry(p, &n->hhead, bydst) {
858 if (policy->priority > p->priority)
859 newpos = &p->bydst;
860 else if (policy->priority == p->priority &&
861 policy->pos > p->pos)
862 newpos = &p->bydst;
863 else
864 break;
865 }
866
867 if (newpos)
868 hlist_add_behind_rcu(&policy->bydst, newpos);
869 else
870 hlist_add_head_rcu(&policy->bydst, &n->hhead);
871
872 /* paranoia checks follow.
873 * Check that the reinserted policy matches at least
874 * saddr or daddr for current node prefix.
875 *
876 * Matching both is fine, matching saddr in one policy
877 * (but not daddr) and then matching only daddr in another
878 * is a bug.
879 */
880 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
881 &n->addr,
882 n->prefixlen,
883 family) == 0;
884 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
885 &n->addr,
886 n->prefixlen,
887 family) == 0;
888 if (matches_s && matches_d)
889 continue;
890
891 WARN_ON_ONCE(!matches_s && !matches_d);
892 if (matches_s)
893 matched_s++;
894 if (matches_d)
895 matched_d++;
896 WARN_ON_ONCE(matched_s && matched_d);
897 }
898 }
899
xfrm_policy_inexact_node_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,struct rb_root * new,u16 family)900 static void xfrm_policy_inexact_node_reinsert(struct net *net,
901 struct xfrm_pol_inexact_node *n,
902 struct rb_root *new,
903 u16 family)
904 {
905 struct xfrm_pol_inexact_node *node;
906 struct rb_node **p, *parent;
907
908 /* we should not have another subtree here */
909 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
910 restart:
911 parent = NULL;
912 p = &new->rb_node;
913 while (*p) {
914 u8 prefixlen;
915 int delta;
916
917 parent = *p;
918 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
919
920 prefixlen = min(node->prefixlen, n->prefixlen);
921
922 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
923 prefixlen, family);
924 if (delta < 0) {
925 p = &parent->rb_left;
926 } else if (delta > 0) {
927 p = &parent->rb_right;
928 } else {
929 bool same_prefixlen = node->prefixlen == n->prefixlen;
930 struct xfrm_policy *tmp;
931
932 hlist_for_each_entry(tmp, &n->hhead, bydst) {
933 tmp->bydst_reinsert = true;
934 hlist_del_rcu(&tmp->bydst);
935 }
936
937 node->prefixlen = prefixlen;
938
939 xfrm_policy_inexact_list_reinsert(net, node, family);
940
941 if (same_prefixlen) {
942 kfree_rcu(n, rcu);
943 return;
944 }
945
946 rb_erase(*p, new);
947 kfree_rcu(n, rcu);
948 n = node;
949 goto restart;
950 }
951 }
952
953 rb_link_node_rcu(&n->node, parent, p);
954 rb_insert_color(&n->node, new);
955 }
956
957 /* merge nodes v and n */
xfrm_policy_inexact_node_merge(struct net * net,struct xfrm_pol_inexact_node * v,struct xfrm_pol_inexact_node * n,u16 family)958 static void xfrm_policy_inexact_node_merge(struct net *net,
959 struct xfrm_pol_inexact_node *v,
960 struct xfrm_pol_inexact_node *n,
961 u16 family)
962 {
963 struct xfrm_pol_inexact_node *node;
964 struct xfrm_policy *tmp;
965 struct rb_node *rnode;
966
967 /* To-be-merged node v has a subtree.
968 *
969 * Dismantle it and insert its nodes to n->root.
970 */
971 while ((rnode = rb_first(&v->root)) != NULL) {
972 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
973 rb_erase(&node->node, &v->root);
974 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
975 family);
976 }
977
978 hlist_for_each_entry(tmp, &v->hhead, bydst) {
979 tmp->bydst_reinsert = true;
980 hlist_del_rcu(&tmp->bydst);
981 }
982
983 xfrm_policy_inexact_list_reinsert(net, n, family);
984 }
985
986 static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net * net,struct rb_root * root,xfrm_address_t * addr,u16 family,u8 prefixlen,u8 dir)987 xfrm_policy_inexact_insert_node(struct net *net,
988 struct rb_root *root,
989 xfrm_address_t *addr,
990 u16 family, u8 prefixlen, u8 dir)
991 {
992 struct xfrm_pol_inexact_node *cached = NULL;
993 struct rb_node **p, *parent = NULL;
994 struct xfrm_pol_inexact_node *node;
995
996 p = &root->rb_node;
997 while (*p) {
998 int delta;
999
1000 parent = *p;
1001 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1002
1003 delta = xfrm_policy_addr_delta(addr, &node->addr,
1004 node->prefixlen,
1005 family);
1006 if (delta == 0 && prefixlen >= node->prefixlen) {
1007 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1008 return node;
1009 }
1010
1011 if (delta < 0)
1012 p = &parent->rb_left;
1013 else
1014 p = &parent->rb_right;
1015
1016 if (prefixlen < node->prefixlen) {
1017 delta = xfrm_policy_addr_delta(addr, &node->addr,
1018 prefixlen,
1019 family);
1020 if (delta)
1021 continue;
1022
1023 /* This node is a subnet of the new prefix. It needs
1024 * to be removed and re-inserted with the smaller
1025 * prefix and all nodes that are now also covered
1026 * by the reduced prefixlen.
1027 */
1028 rb_erase(&node->node, root);
1029
1030 if (!cached) {
1031 xfrm_pol_inexact_node_init(node, addr,
1032 prefixlen);
1033 cached = node;
1034 } else {
1035 /* This node also falls within the new
1036 * prefixlen. Merge the to-be-reinserted
1037 * node and this one.
1038 */
1039 xfrm_policy_inexact_node_merge(net, node,
1040 cached, family);
1041 kfree_rcu(node, rcu);
1042 }
1043
1044 /* restart */
1045 p = &root->rb_node;
1046 parent = NULL;
1047 }
1048 }
1049
1050 node = cached;
1051 if (!node) {
1052 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1053 if (!node)
1054 return NULL;
1055 }
1056
1057 rb_link_node_rcu(&node->node, parent, p);
1058 rb_insert_color(&node->node, root);
1059
1060 return node;
1061 }
1062
xfrm_policy_inexact_gc_tree(struct rb_root * r,bool rm)1063 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1064 {
1065 struct xfrm_pol_inexact_node *node;
1066 struct rb_node *rn = rb_first(r);
1067
1068 while (rn) {
1069 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1070
1071 xfrm_policy_inexact_gc_tree(&node->root, rm);
1072 rn = rb_next(rn);
1073
1074 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1075 WARN_ON_ONCE(rm);
1076 continue;
1077 }
1078
1079 rb_erase(&node->node, r);
1080 kfree_rcu(node, rcu);
1081 }
1082 }
1083
__xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b,bool net_exit)1084 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1085 {
1086 write_seqcount_begin(&b->count);
1087 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1088 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1089 write_seqcount_end(&b->count);
1090
1091 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1092 !hlist_empty(&b->hhead)) {
1093 WARN_ON_ONCE(net_exit);
1094 return;
1095 }
1096
1097 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1098 xfrm_pol_inexact_params) == 0) {
1099 list_del(&b->inexact_bins);
1100 kfree_rcu(b, rcu);
1101 }
1102 }
1103
xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b)1104 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1105 {
1106 struct net *net = read_pnet(&b->k.net);
1107
1108 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1109 __xfrm_policy_inexact_prune_bin(b, false);
1110 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1111 }
1112
__xfrm_policy_inexact_flush(struct net * net)1113 static void __xfrm_policy_inexact_flush(struct net *net)
1114 {
1115 struct xfrm_pol_inexact_bin *bin, *t;
1116
1117 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1118
1119 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1120 __xfrm_policy_inexact_prune_bin(bin, false);
1121 }
1122
1123 static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin * bin,struct xfrm_policy * policy,u8 dir)1124 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1125 struct xfrm_policy *policy, u8 dir)
1126 {
1127 struct xfrm_pol_inexact_node *n;
1128 struct net *net;
1129
1130 net = xp_net(policy);
1131 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1132
1133 if (xfrm_policy_inexact_insert_use_any_list(policy))
1134 return &bin->hhead;
1135
1136 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1137 policy->family,
1138 policy->selector.prefixlen_d)) {
1139 write_seqcount_begin(&bin->count);
1140 n = xfrm_policy_inexact_insert_node(net,
1141 &bin->root_s,
1142 &policy->selector.saddr,
1143 policy->family,
1144 policy->selector.prefixlen_s,
1145 dir);
1146 write_seqcount_end(&bin->count);
1147 if (!n)
1148 return NULL;
1149
1150 return &n->hhead;
1151 }
1152
1153 /* daddr is fixed */
1154 write_seqcount_begin(&bin->count);
1155 n = xfrm_policy_inexact_insert_node(net,
1156 &bin->root_d,
1157 &policy->selector.daddr,
1158 policy->family,
1159 policy->selector.prefixlen_d, dir);
1160 write_seqcount_end(&bin->count);
1161 if (!n)
1162 return NULL;
1163
1164 /* saddr is wildcard */
1165 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1166 policy->family,
1167 policy->selector.prefixlen_s))
1168 return &n->hhead;
1169
1170 write_seqcount_begin(&bin->count);
1171 n = xfrm_policy_inexact_insert_node(net,
1172 &n->root,
1173 &policy->selector.saddr,
1174 policy->family,
1175 policy->selector.prefixlen_s, dir);
1176 write_seqcount_end(&bin->count);
1177 if (!n)
1178 return NULL;
1179
1180 return &n->hhead;
1181 }
1182
1183 static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy * policy,u8 dir,int excl)1184 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1185 {
1186 struct xfrm_pol_inexact_bin *bin;
1187 struct xfrm_policy *delpol;
1188 struct hlist_head *chain;
1189 struct net *net;
1190
1191 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1192 if (!bin)
1193 return ERR_PTR(-ENOMEM);
1194
1195 net = xp_net(policy);
1196 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1197
1198 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1199 if (!chain) {
1200 __xfrm_policy_inexact_prune_bin(bin, false);
1201 return ERR_PTR(-ENOMEM);
1202 }
1203
1204 delpol = xfrm_policy_insert_list(chain, policy, excl);
1205 if (delpol && excl) {
1206 __xfrm_policy_inexact_prune_bin(bin, false);
1207 return ERR_PTR(-EEXIST);
1208 }
1209
1210 chain = &net->xfrm.policy_inexact[dir];
1211 xfrm_policy_insert_inexact_list(chain, policy);
1212
1213 if (delpol)
1214 __xfrm_policy_inexact_prune_bin(bin, false);
1215
1216 return delpol;
1217 }
1218
xfrm_hash_rebuild(struct work_struct * work)1219 static void xfrm_hash_rebuild(struct work_struct *work)
1220 {
1221 struct net *net = container_of(work, struct net,
1222 xfrm.policy_hthresh.work);
1223 unsigned int hmask;
1224 struct xfrm_policy *pol;
1225 struct xfrm_policy *policy;
1226 struct hlist_head *chain;
1227 struct hlist_head *odst;
1228 struct hlist_node *newpos;
1229 int i;
1230 int dir;
1231 unsigned seq;
1232 u8 lbits4, rbits4, lbits6, rbits6;
1233
1234 mutex_lock(&hash_resize_mutex);
1235
1236 /* read selector prefixlen thresholds */
1237 do {
1238 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1239
1240 lbits4 = net->xfrm.policy_hthresh.lbits4;
1241 rbits4 = net->xfrm.policy_hthresh.rbits4;
1242 lbits6 = net->xfrm.policy_hthresh.lbits6;
1243 rbits6 = net->xfrm.policy_hthresh.rbits6;
1244 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1245
1246 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1247 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1248
1249 /* make sure that we can insert the indirect policies again before
1250 * we start with destructive action.
1251 */
1252 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1253 struct xfrm_pol_inexact_bin *bin;
1254 u8 dbits, sbits;
1255
1256 dir = xfrm_policy_id2dir(policy->index);
1257 if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1258 continue;
1259
1260 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1261 if (policy->family == AF_INET) {
1262 dbits = rbits4;
1263 sbits = lbits4;
1264 } else {
1265 dbits = rbits6;
1266 sbits = lbits6;
1267 }
1268 } else {
1269 if (policy->family == AF_INET) {
1270 dbits = lbits4;
1271 sbits = rbits4;
1272 } else {
1273 dbits = lbits6;
1274 sbits = rbits6;
1275 }
1276 }
1277
1278 if (policy->selector.prefixlen_d < dbits ||
1279 policy->selector.prefixlen_s < sbits)
1280 continue;
1281
1282 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1283 if (!bin)
1284 goto out_unlock;
1285
1286 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1287 goto out_unlock;
1288 }
1289
1290 /* reset the bydst and inexact table in all directions */
1291 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1292 struct hlist_node *n;
1293
1294 hlist_for_each_entry_safe(policy, n,
1295 &net->xfrm.policy_inexact[dir],
1296 bydst_inexact_list) {
1297 hlist_del_rcu(&policy->bydst);
1298 hlist_del_init(&policy->bydst_inexact_list);
1299 }
1300
1301 hmask = net->xfrm.policy_bydst[dir].hmask;
1302 odst = net->xfrm.policy_bydst[dir].table;
1303 for (i = hmask; i >= 0; i--) {
1304 hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1305 hlist_del_rcu(&policy->bydst);
1306 }
1307 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1308 /* dir out => dst = remote, src = local */
1309 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1310 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1311 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1312 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1313 } else {
1314 /* dir in/fwd => dst = local, src = remote */
1315 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1316 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1317 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1318 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1319 }
1320 }
1321
1322 /* re-insert all policies by order of creation */
1323 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1324 if (policy->walk.dead)
1325 continue;
1326 dir = xfrm_policy_id2dir(policy->index);
1327 if (dir >= XFRM_POLICY_MAX) {
1328 /* skip socket policies */
1329 continue;
1330 }
1331 newpos = NULL;
1332 chain = policy_hash_bysel(net, &policy->selector,
1333 policy->family, dir);
1334
1335 if (!chain) {
1336 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1337
1338 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1339 continue;
1340 }
1341
1342 hlist_for_each_entry(pol, chain, bydst) {
1343 if (policy->priority >= pol->priority)
1344 newpos = &pol->bydst;
1345 else
1346 break;
1347 }
1348 if (newpos)
1349 hlist_add_behind_rcu(&policy->bydst, newpos);
1350 else
1351 hlist_add_head_rcu(&policy->bydst, chain);
1352 }
1353
1354 out_unlock:
1355 __xfrm_policy_inexact_flush(net);
1356 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1357 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1358
1359 mutex_unlock(&hash_resize_mutex);
1360 }
1361
xfrm_policy_hash_rebuild(struct net * net)1362 void xfrm_policy_hash_rebuild(struct net *net)
1363 {
1364 schedule_work(&net->xfrm.policy_hthresh.work);
1365 }
1366 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1367
1368 /* Generate new index... KAME seems to generate them ordered by cost
1369 * of an absolute inpredictability of ordering of rules. This will not pass. */
xfrm_gen_index(struct net * net,int dir,u32 index)1370 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1371 {
1372 static u32 idx_generator;
1373
1374 for (;;) {
1375 struct hlist_head *list;
1376 struct xfrm_policy *p;
1377 u32 idx;
1378 int found;
1379
1380 if (!index) {
1381 idx = (idx_generator | dir);
1382 idx_generator += 8;
1383 } else {
1384 idx = index;
1385 index = 0;
1386 }
1387
1388 if (idx == 0)
1389 idx = 8;
1390 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1391 found = 0;
1392 hlist_for_each_entry(p, list, byidx) {
1393 if (p->index == idx) {
1394 found = 1;
1395 break;
1396 }
1397 }
1398 if (!found)
1399 return idx;
1400 }
1401 }
1402
selector_cmp(struct xfrm_selector * s1,struct xfrm_selector * s2)1403 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1404 {
1405 u32 *p1 = (u32 *) s1;
1406 u32 *p2 = (u32 *) s2;
1407 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1408 int i;
1409
1410 for (i = 0; i < len; i++) {
1411 if (p1[i] != p2[i])
1412 return 1;
1413 }
1414
1415 return 0;
1416 }
1417
xfrm_policy_requeue(struct xfrm_policy * old,struct xfrm_policy * new)1418 static void xfrm_policy_requeue(struct xfrm_policy *old,
1419 struct xfrm_policy *new)
1420 {
1421 struct xfrm_policy_queue *pq = &old->polq;
1422 struct sk_buff_head list;
1423
1424 if (skb_queue_empty(&pq->hold_queue))
1425 return;
1426
1427 __skb_queue_head_init(&list);
1428
1429 spin_lock_bh(&pq->hold_queue.lock);
1430 skb_queue_splice_init(&pq->hold_queue, &list);
1431 if (del_timer(&pq->hold_timer))
1432 xfrm_pol_put(old);
1433 spin_unlock_bh(&pq->hold_queue.lock);
1434
1435 pq = &new->polq;
1436
1437 spin_lock_bh(&pq->hold_queue.lock);
1438 skb_queue_splice(&list, &pq->hold_queue);
1439 pq->timeout = XFRM_QUEUE_TMO_MIN;
1440 if (!mod_timer(&pq->hold_timer, jiffies))
1441 xfrm_pol_hold(new);
1442 spin_unlock_bh(&pq->hold_queue.lock);
1443 }
1444
xfrm_policy_mark_match(const struct xfrm_mark * mark,struct xfrm_policy * pol)1445 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1446 struct xfrm_policy *pol)
1447 {
1448 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1449 }
1450
xfrm_pol_bin_key(const void * data,u32 len,u32 seed)1451 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1452 {
1453 const struct xfrm_pol_inexact_key *k = data;
1454 u32 a = k->type << 24 | k->dir << 16 | k->family;
1455
1456 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1457 seed);
1458 }
1459
xfrm_pol_bin_obj(const void * data,u32 len,u32 seed)1460 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1461 {
1462 const struct xfrm_pol_inexact_bin *b = data;
1463
1464 return xfrm_pol_bin_key(&b->k, 0, seed);
1465 }
1466
xfrm_pol_bin_cmp(struct rhashtable_compare_arg * arg,const void * ptr)1467 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1468 const void *ptr)
1469 {
1470 const struct xfrm_pol_inexact_key *key = arg->key;
1471 const struct xfrm_pol_inexact_bin *b = ptr;
1472 int ret;
1473
1474 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1475 return -1;
1476
1477 ret = b->k.dir ^ key->dir;
1478 if (ret)
1479 return ret;
1480
1481 ret = b->k.type ^ key->type;
1482 if (ret)
1483 return ret;
1484
1485 ret = b->k.family ^ key->family;
1486 if (ret)
1487 return ret;
1488
1489 return b->k.if_id ^ key->if_id;
1490 }
1491
1492 static const struct rhashtable_params xfrm_pol_inexact_params = {
1493 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1494 .hashfn = xfrm_pol_bin_key,
1495 .obj_hashfn = xfrm_pol_bin_obj,
1496 .obj_cmpfn = xfrm_pol_bin_cmp,
1497 .automatic_shrinking = true,
1498 };
1499
xfrm_policy_insert_inexact_list(struct hlist_head * chain,struct xfrm_policy * policy)1500 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1501 struct xfrm_policy *policy)
1502 {
1503 struct xfrm_policy *pol, *delpol = NULL;
1504 struct hlist_node *newpos = NULL;
1505 int i = 0;
1506
1507 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1508 if (pol->type == policy->type &&
1509 pol->if_id == policy->if_id &&
1510 !selector_cmp(&pol->selector, &policy->selector) &&
1511 xfrm_policy_mark_match(&policy->mark, pol) &&
1512 xfrm_sec_ctx_match(pol->security, policy->security) &&
1513 !WARN_ON(delpol)) {
1514 delpol = pol;
1515 if (policy->priority > pol->priority)
1516 continue;
1517 } else if (policy->priority >= pol->priority) {
1518 newpos = &pol->bydst_inexact_list;
1519 continue;
1520 }
1521 if (delpol)
1522 break;
1523 }
1524
1525 if (newpos)
1526 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1527 else
1528 hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1529
1530 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1531 pol->pos = i;
1532 i++;
1533 }
1534 }
1535
xfrm_policy_insert_list(struct hlist_head * chain,struct xfrm_policy * policy,bool excl)1536 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1537 struct xfrm_policy *policy,
1538 bool excl)
1539 {
1540 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1541
1542 hlist_for_each_entry(pol, chain, bydst) {
1543 if (pol->type == policy->type &&
1544 pol->if_id == policy->if_id &&
1545 !selector_cmp(&pol->selector, &policy->selector) &&
1546 xfrm_policy_mark_match(&policy->mark, pol) &&
1547 xfrm_sec_ctx_match(pol->security, policy->security) &&
1548 !WARN_ON(delpol)) {
1549 if (excl)
1550 return ERR_PTR(-EEXIST);
1551 delpol = pol;
1552 if (policy->priority > pol->priority)
1553 continue;
1554 } else if (policy->priority >= pol->priority) {
1555 newpos = pol;
1556 continue;
1557 }
1558 if (delpol)
1559 break;
1560 }
1561
1562 if (newpos)
1563 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1564 else
1565 hlist_add_head_rcu(&policy->bydst, chain);
1566
1567 return delpol;
1568 }
1569
xfrm_policy_insert(int dir,struct xfrm_policy * policy,int excl)1570 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1571 {
1572 struct net *net = xp_net(policy);
1573 struct xfrm_policy *delpol;
1574 struct hlist_head *chain;
1575
1576 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1577 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1578 if (chain)
1579 delpol = xfrm_policy_insert_list(chain, policy, excl);
1580 else
1581 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1582
1583 if (IS_ERR(delpol)) {
1584 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1585 return PTR_ERR(delpol);
1586 }
1587
1588 __xfrm_policy_link(policy, dir);
1589
1590 /* After previous checking, family can either be AF_INET or AF_INET6 */
1591 if (policy->family == AF_INET)
1592 rt_genid_bump_ipv4(net);
1593 else
1594 rt_genid_bump_ipv6(net);
1595
1596 if (delpol) {
1597 xfrm_policy_requeue(delpol, policy);
1598 __xfrm_policy_unlink(delpol, dir);
1599 }
1600 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1601 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1602 policy->curlft.add_time = ktime_get_real_seconds();
1603 policy->curlft.use_time = 0;
1604 if (!mod_timer(&policy->timer, jiffies + HZ))
1605 xfrm_pol_hold(policy);
1606 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1607
1608 if (delpol)
1609 xfrm_policy_kill(delpol);
1610 else if (xfrm_bydst_should_resize(net, dir, NULL))
1611 schedule_work(&net->xfrm.policy_hash_work);
1612
1613 return 0;
1614 }
1615 EXPORT_SYMBOL(xfrm_policy_insert);
1616
1617 static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head * chain,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx)1618 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1619 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1620 struct xfrm_sec_ctx *ctx)
1621 {
1622 struct xfrm_policy *pol;
1623
1624 if (!chain)
1625 return NULL;
1626
1627 hlist_for_each_entry(pol, chain, bydst) {
1628 if (pol->type == type &&
1629 pol->if_id == if_id &&
1630 xfrm_policy_mark_match(mark, pol) &&
1631 !selector_cmp(sel, &pol->selector) &&
1632 xfrm_sec_ctx_match(ctx, pol->security))
1633 return pol;
1634 }
1635
1636 return NULL;
1637 }
1638
1639 struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx,int delete,int * err)1640 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1641 u8 type, int dir, struct xfrm_selector *sel,
1642 struct xfrm_sec_ctx *ctx, int delete, int *err)
1643 {
1644 struct xfrm_pol_inexact_bin *bin = NULL;
1645 struct xfrm_policy *pol, *ret = NULL;
1646 struct hlist_head *chain;
1647
1648 *err = 0;
1649 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1650 chain = policy_hash_bysel(net, sel, sel->family, dir);
1651 if (!chain) {
1652 struct xfrm_pol_inexact_candidates cand;
1653 int i;
1654
1655 bin = xfrm_policy_inexact_lookup(net, type,
1656 sel->family, dir, if_id);
1657 if (!bin) {
1658 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1659 return NULL;
1660 }
1661
1662 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1663 &sel->saddr,
1664 &sel->daddr)) {
1665 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1666 return NULL;
1667 }
1668
1669 pol = NULL;
1670 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1671 struct xfrm_policy *tmp;
1672
1673 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1674 if_id, type, dir,
1675 sel, ctx);
1676 if (!tmp)
1677 continue;
1678
1679 if (!pol || tmp->pos < pol->pos)
1680 pol = tmp;
1681 }
1682 } else {
1683 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1684 sel, ctx);
1685 }
1686
1687 if (pol) {
1688 xfrm_pol_hold(pol);
1689 if (delete) {
1690 *err = security_xfrm_policy_delete(pol->security);
1691 if (*err) {
1692 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1693 return pol;
1694 }
1695 __xfrm_policy_unlink(pol, dir);
1696 }
1697 ret = pol;
1698 }
1699 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1700
1701 if (ret && delete)
1702 xfrm_policy_kill(ret);
1703 if (bin && delete)
1704 xfrm_policy_inexact_prune_bin(bin);
1705 return ret;
1706 }
1707 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1708
1709 struct xfrm_policy *
xfrm_policy_byid(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,u32 id,int delete,int * err)1710 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1711 u8 type, int dir, u32 id, int delete, int *err)
1712 {
1713 struct xfrm_policy *pol, *ret;
1714 struct hlist_head *chain;
1715
1716 *err = -ENOENT;
1717 if (xfrm_policy_id2dir(id) != dir)
1718 return NULL;
1719
1720 *err = 0;
1721 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1722 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1723 ret = NULL;
1724 hlist_for_each_entry(pol, chain, byidx) {
1725 if (pol->type == type && pol->index == id &&
1726 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1727 xfrm_pol_hold(pol);
1728 if (delete) {
1729 *err = security_xfrm_policy_delete(
1730 pol->security);
1731 if (*err) {
1732 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1733 return pol;
1734 }
1735 __xfrm_policy_unlink(pol, dir);
1736 }
1737 ret = pol;
1738 break;
1739 }
1740 }
1741 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1742
1743 if (ret && delete)
1744 xfrm_policy_kill(ret);
1745 return ret;
1746 }
1747 EXPORT_SYMBOL(xfrm_policy_byid);
1748
1749 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1750 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1751 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1752 {
1753 struct xfrm_policy *pol;
1754 int err = 0;
1755
1756 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1757 if (pol->walk.dead ||
1758 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1759 pol->type != type)
1760 continue;
1761
1762 err = security_xfrm_policy_delete(pol->security);
1763 if (err) {
1764 xfrm_audit_policy_delete(pol, 0, task_valid);
1765 return err;
1766 }
1767 }
1768 return err;
1769 }
1770 #else
1771 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1772 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1773 {
1774 return 0;
1775 }
1776 #endif
1777
xfrm_policy_flush(struct net * net,u8 type,bool task_valid)1778 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1779 {
1780 int dir, err = 0, cnt = 0;
1781 struct xfrm_policy *pol;
1782
1783 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1784
1785 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1786 if (err)
1787 goto out;
1788
1789 again:
1790 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1791 dir = xfrm_policy_id2dir(pol->index);
1792 if (pol->walk.dead ||
1793 dir >= XFRM_POLICY_MAX ||
1794 pol->type != type)
1795 continue;
1796
1797 __xfrm_policy_unlink(pol, dir);
1798 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1799 cnt++;
1800 xfrm_audit_policy_delete(pol, 1, task_valid);
1801 xfrm_policy_kill(pol);
1802 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1803 goto again;
1804 }
1805 if (cnt)
1806 __xfrm_policy_inexact_flush(net);
1807 else
1808 err = -ESRCH;
1809 out:
1810 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1811 return err;
1812 }
1813 EXPORT_SYMBOL(xfrm_policy_flush);
1814
xfrm_policy_walk(struct net * net,struct xfrm_policy_walk * walk,int (* func)(struct xfrm_policy *,int,int,void *),void * data)1815 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1816 int (*func)(struct xfrm_policy *, int, int, void*),
1817 void *data)
1818 {
1819 struct xfrm_policy *pol;
1820 struct xfrm_policy_walk_entry *x;
1821 int error = 0;
1822
1823 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1824 walk->type != XFRM_POLICY_TYPE_ANY)
1825 return -EINVAL;
1826
1827 if (list_empty(&walk->walk.all) && walk->seq != 0)
1828 return 0;
1829
1830 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1831 if (list_empty(&walk->walk.all))
1832 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1833 else
1834 x = list_first_entry(&walk->walk.all,
1835 struct xfrm_policy_walk_entry, all);
1836
1837 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1838 if (x->dead)
1839 continue;
1840 pol = container_of(x, struct xfrm_policy, walk);
1841 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1842 walk->type != pol->type)
1843 continue;
1844 error = func(pol, xfrm_policy_id2dir(pol->index),
1845 walk->seq, data);
1846 if (error) {
1847 list_move_tail(&walk->walk.all, &x->all);
1848 goto out;
1849 }
1850 walk->seq++;
1851 }
1852 if (walk->seq == 0) {
1853 error = -ENOENT;
1854 goto out;
1855 }
1856 list_del_init(&walk->walk.all);
1857 out:
1858 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1859 return error;
1860 }
1861 EXPORT_SYMBOL(xfrm_policy_walk);
1862
xfrm_policy_walk_init(struct xfrm_policy_walk * walk,u8 type)1863 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1864 {
1865 INIT_LIST_HEAD(&walk->walk.all);
1866 walk->walk.dead = 1;
1867 walk->type = type;
1868 walk->seq = 0;
1869 }
1870 EXPORT_SYMBOL(xfrm_policy_walk_init);
1871
xfrm_policy_walk_done(struct xfrm_policy_walk * walk,struct net * net)1872 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1873 {
1874 if (list_empty(&walk->walk.all))
1875 return;
1876
1877 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1878 list_del(&walk->walk.all);
1879 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1880 }
1881 EXPORT_SYMBOL(xfrm_policy_walk_done);
1882
1883 /*
1884 * Find policy to apply to this flow.
1885 *
1886 * Returns 0 if policy found, else an -errno.
1887 */
xfrm_policy_match(const struct xfrm_policy * pol,const struct flowi * fl,u8 type,u16 family,int dir,u32 if_id)1888 static int xfrm_policy_match(const struct xfrm_policy *pol,
1889 const struct flowi *fl,
1890 u8 type, u16 family, int dir, u32 if_id)
1891 {
1892 const struct xfrm_selector *sel = &pol->selector;
1893 int ret = -ESRCH;
1894 bool match;
1895
1896 if (pol->family != family ||
1897 pol->if_id != if_id ||
1898 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1899 pol->type != type)
1900 return ret;
1901
1902 match = xfrm_selector_match(sel, fl, family);
1903 if (match)
1904 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1905 return ret;
1906 }
1907
1908 static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root * r,seqcount_spinlock_t * count,const xfrm_address_t * addr,u16 family)1909 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1910 seqcount_spinlock_t *count,
1911 const xfrm_address_t *addr, u16 family)
1912 {
1913 const struct rb_node *parent;
1914 int seq;
1915
1916 again:
1917 seq = read_seqcount_begin(count);
1918
1919 parent = rcu_dereference_raw(r->rb_node);
1920 while (parent) {
1921 struct xfrm_pol_inexact_node *node;
1922 int delta;
1923
1924 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1925
1926 delta = xfrm_policy_addr_delta(addr, &node->addr,
1927 node->prefixlen, family);
1928 if (delta < 0) {
1929 parent = rcu_dereference_raw(parent->rb_left);
1930 continue;
1931 } else if (delta > 0) {
1932 parent = rcu_dereference_raw(parent->rb_right);
1933 continue;
1934 }
1935
1936 return node;
1937 }
1938
1939 if (read_seqcount_retry(count, seq))
1940 goto again;
1941
1942 return NULL;
1943 }
1944
1945 static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_pol_inexact_bin * b,const xfrm_address_t * saddr,const xfrm_address_t * daddr)1946 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1947 struct xfrm_pol_inexact_bin *b,
1948 const xfrm_address_t *saddr,
1949 const xfrm_address_t *daddr)
1950 {
1951 struct xfrm_pol_inexact_node *n;
1952 u16 family;
1953
1954 if (!b)
1955 return false;
1956
1957 family = b->k.family;
1958 memset(cand, 0, sizeof(*cand));
1959 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1960
1961 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1962 family);
1963 if (n) {
1964 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1965 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1966 family);
1967 if (n)
1968 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1969 }
1970
1971 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1972 family);
1973 if (n)
1974 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1975
1976 return true;
1977 }
1978
1979 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)1980 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1981 u8 dir, u32 if_id)
1982 {
1983 struct xfrm_pol_inexact_key k = {
1984 .family = family,
1985 .type = type,
1986 .dir = dir,
1987 .if_id = if_id,
1988 };
1989
1990 write_pnet(&k.net, net);
1991
1992 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1993 xfrm_pol_inexact_params);
1994 }
1995
1996 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)1997 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1998 u8 dir, u32 if_id)
1999 {
2000 struct xfrm_pol_inexact_bin *bin;
2001
2002 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2003
2004 rcu_read_lock();
2005 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2006 rcu_read_unlock();
2007
2008 return bin;
2009 }
2010
2011 static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head * chain,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,int dir,u32 if_id)2012 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2013 struct xfrm_policy *prefer,
2014 const struct flowi *fl,
2015 u8 type, u16 family, int dir, u32 if_id)
2016 {
2017 u32 priority = prefer ? prefer->priority : ~0u;
2018 struct xfrm_policy *pol;
2019
2020 if (!chain)
2021 return NULL;
2022
2023 hlist_for_each_entry_rcu(pol, chain, bydst) {
2024 int err;
2025
2026 if (pol->priority > priority)
2027 break;
2028
2029 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2030 if (err) {
2031 if (err != -ESRCH)
2032 return ERR_PTR(err);
2033
2034 continue;
2035 }
2036
2037 if (prefer) {
2038 /* matches. Is it older than *prefer? */
2039 if (pol->priority == priority &&
2040 prefer->pos < pol->pos)
2041 return prefer;
2042 }
2043
2044 return pol;
2045 }
2046
2047 return NULL;
2048 }
2049
2050 static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,int dir,u32 if_id)2051 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2052 struct xfrm_policy *prefer,
2053 const struct flowi *fl,
2054 u8 type, u16 family, int dir, u32 if_id)
2055 {
2056 struct xfrm_policy *tmp;
2057 int i;
2058
2059 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2060 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2061 prefer,
2062 fl, type, family, dir,
2063 if_id);
2064 if (!tmp)
2065 continue;
2066
2067 if (IS_ERR(tmp))
2068 return tmp;
2069 prefer = tmp;
2070 }
2071
2072 return prefer;
2073 }
2074
xfrm_policy_lookup_bytype(struct net * net,u8 type,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2075 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2076 const struct flowi *fl,
2077 u16 family, u8 dir,
2078 u32 if_id)
2079 {
2080 struct xfrm_pol_inexact_candidates cand;
2081 const xfrm_address_t *daddr, *saddr;
2082 struct xfrm_pol_inexact_bin *bin;
2083 struct xfrm_policy *pol, *ret;
2084 struct hlist_head *chain;
2085 unsigned int sequence;
2086 int err;
2087
2088 daddr = xfrm_flowi_daddr(fl, family);
2089 saddr = xfrm_flowi_saddr(fl, family);
2090 if (unlikely(!daddr || !saddr))
2091 return NULL;
2092
2093 rcu_read_lock();
2094 retry:
2095 do {
2096 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2097 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2098 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2099
2100 ret = NULL;
2101 hlist_for_each_entry_rcu(pol, chain, bydst) {
2102 err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2103 if (err) {
2104 if (err == -ESRCH)
2105 continue;
2106 else {
2107 ret = ERR_PTR(err);
2108 goto fail;
2109 }
2110 } else {
2111 ret = pol;
2112 break;
2113 }
2114 }
2115 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2116 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2117 daddr))
2118 goto skip_inexact;
2119
2120 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2121 family, dir, if_id);
2122 if (pol) {
2123 ret = pol;
2124 if (IS_ERR(pol))
2125 goto fail;
2126 }
2127
2128 skip_inexact:
2129 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2130 goto retry;
2131
2132 if (ret && !xfrm_pol_hold_rcu(ret))
2133 goto retry;
2134 fail:
2135 rcu_read_unlock();
2136
2137 return ret;
2138 }
2139
xfrm_policy_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2140 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2141 const struct flowi *fl,
2142 u16 family, u8 dir, u32 if_id)
2143 {
2144 #ifdef CONFIG_XFRM_SUB_POLICY
2145 struct xfrm_policy *pol;
2146
2147 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2148 dir, if_id);
2149 if (pol != NULL)
2150 return pol;
2151 #endif
2152 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2153 dir, if_id);
2154 }
2155
xfrm_sk_policy_lookup(const struct sock * sk,int dir,const struct flowi * fl,u16 family,u32 if_id)2156 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2157 const struct flowi *fl,
2158 u16 family, u32 if_id)
2159 {
2160 struct xfrm_policy *pol;
2161
2162 rcu_read_lock();
2163 again:
2164 pol = rcu_dereference(sk->sk_policy[dir]);
2165 if (pol != NULL) {
2166 bool match;
2167 int err = 0;
2168
2169 if (pol->family != family) {
2170 pol = NULL;
2171 goto out;
2172 }
2173
2174 match = xfrm_selector_match(&pol->selector, fl, family);
2175 if (match) {
2176 if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2177 pol->if_id != if_id) {
2178 pol = NULL;
2179 goto out;
2180 }
2181 err = security_xfrm_policy_lookup(pol->security,
2182 fl->flowi_secid);
2183 if (!err) {
2184 if (!xfrm_pol_hold_rcu(pol))
2185 goto again;
2186 } else if (err == -ESRCH) {
2187 pol = NULL;
2188 } else {
2189 pol = ERR_PTR(err);
2190 }
2191 } else
2192 pol = NULL;
2193 }
2194 out:
2195 rcu_read_unlock();
2196 return pol;
2197 }
2198
__xfrm_policy_link(struct xfrm_policy * pol,int dir)2199 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2200 {
2201 struct net *net = xp_net(pol);
2202
2203 list_add(&pol->walk.all, &net->xfrm.policy_all);
2204 net->xfrm.policy_count[dir]++;
2205 xfrm_pol_hold(pol);
2206 }
2207
__xfrm_policy_unlink(struct xfrm_policy * pol,int dir)2208 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2209 int dir)
2210 {
2211 struct net *net = xp_net(pol);
2212
2213 if (list_empty(&pol->walk.all))
2214 return NULL;
2215
2216 /* Socket policies are not hashed. */
2217 if (!hlist_unhashed(&pol->bydst)) {
2218 hlist_del_rcu(&pol->bydst);
2219 hlist_del_init(&pol->bydst_inexact_list);
2220 hlist_del(&pol->byidx);
2221 }
2222
2223 list_del_init(&pol->walk.all);
2224 net->xfrm.policy_count[dir]--;
2225
2226 return pol;
2227 }
2228
xfrm_sk_policy_link(struct xfrm_policy * pol,int dir)2229 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2230 {
2231 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2232 }
2233
xfrm_sk_policy_unlink(struct xfrm_policy * pol,int dir)2234 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2235 {
2236 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2237 }
2238
xfrm_policy_delete(struct xfrm_policy * pol,int dir)2239 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2240 {
2241 struct net *net = xp_net(pol);
2242
2243 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2244 pol = __xfrm_policy_unlink(pol, dir);
2245 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2246 if (pol) {
2247 xfrm_policy_kill(pol);
2248 return 0;
2249 }
2250 return -ENOENT;
2251 }
2252 EXPORT_SYMBOL(xfrm_policy_delete);
2253
xfrm_sk_policy_insert(struct sock * sk,int dir,struct xfrm_policy * pol)2254 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2255 {
2256 struct net *net = sock_net(sk);
2257 struct xfrm_policy *old_pol;
2258
2259 #ifdef CONFIG_XFRM_SUB_POLICY
2260 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2261 return -EINVAL;
2262 #endif
2263
2264 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2265 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2266 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2267 if (pol) {
2268 pol->curlft.add_time = ktime_get_real_seconds();
2269 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2270 xfrm_sk_policy_link(pol, dir);
2271 }
2272 rcu_assign_pointer(sk->sk_policy[dir], pol);
2273 if (old_pol) {
2274 if (pol)
2275 xfrm_policy_requeue(old_pol, pol);
2276
2277 /* Unlinking succeeds always. This is the only function
2278 * allowed to delete or replace socket policy.
2279 */
2280 xfrm_sk_policy_unlink(old_pol, dir);
2281 }
2282 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2283
2284 if (old_pol) {
2285 xfrm_policy_kill(old_pol);
2286 }
2287 return 0;
2288 }
2289
clone_policy(const struct xfrm_policy * old,int dir)2290 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2291 {
2292 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2293 struct net *net = xp_net(old);
2294
2295 if (newp) {
2296 newp->selector = old->selector;
2297 if (security_xfrm_policy_clone(old->security,
2298 &newp->security)) {
2299 kfree(newp);
2300 return NULL; /* ENOMEM */
2301 }
2302 newp->lft = old->lft;
2303 newp->curlft = old->curlft;
2304 newp->mark = old->mark;
2305 newp->if_id = old->if_id;
2306 newp->action = old->action;
2307 newp->flags = old->flags;
2308 newp->xfrm_nr = old->xfrm_nr;
2309 newp->index = old->index;
2310 newp->type = old->type;
2311 newp->family = old->family;
2312 memcpy(newp->xfrm_vec, old->xfrm_vec,
2313 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2314 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2315 xfrm_sk_policy_link(newp, dir);
2316 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2317 xfrm_pol_put(newp);
2318 }
2319 return newp;
2320 }
2321
__xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)2322 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2323 {
2324 const struct xfrm_policy *p;
2325 struct xfrm_policy *np;
2326 int i, ret = 0;
2327
2328 rcu_read_lock();
2329 for (i = 0; i < 2; i++) {
2330 p = rcu_dereference(osk->sk_policy[i]);
2331 if (p) {
2332 np = clone_policy(p, i);
2333 if (unlikely(!np)) {
2334 ret = -ENOMEM;
2335 break;
2336 }
2337 rcu_assign_pointer(sk->sk_policy[i], np);
2338 }
2339 }
2340 rcu_read_unlock();
2341 return ret;
2342 }
2343
2344 static int
xfrm_get_saddr(struct net * net,int oif,xfrm_address_t * local,xfrm_address_t * remote,unsigned short family,u32 mark)2345 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2346 xfrm_address_t *remote, unsigned short family, u32 mark)
2347 {
2348 int err;
2349 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2350
2351 if (unlikely(afinfo == NULL))
2352 return -EINVAL;
2353 err = afinfo->get_saddr(net, oif, local, remote, mark);
2354 rcu_read_unlock();
2355 return err;
2356 }
2357
2358 /* Resolve list of templates for the flow, given policy. */
2359
2360 static int
xfrm_tmpl_resolve_one(struct xfrm_policy * policy,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2361 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2362 struct xfrm_state **xfrm, unsigned short family)
2363 {
2364 struct net *net = xp_net(policy);
2365 int nx;
2366 int i, error;
2367 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2368 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2369 xfrm_address_t tmp;
2370
2371 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2372 struct xfrm_state *x;
2373 xfrm_address_t *remote = daddr;
2374 xfrm_address_t *local = saddr;
2375 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2376
2377 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2378 tmpl->mode == XFRM_MODE_BEET) {
2379 remote = &tmpl->id.daddr;
2380 local = &tmpl->saddr;
2381 if (xfrm_addr_any(local, tmpl->encap_family)) {
2382 error = xfrm_get_saddr(net, fl->flowi_oif,
2383 &tmp, remote,
2384 tmpl->encap_family, 0);
2385 if (error)
2386 goto fail;
2387 local = &tmp;
2388 }
2389 }
2390
2391 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2392 family, policy->if_id);
2393
2394 if (x && x->km.state == XFRM_STATE_VALID) {
2395 xfrm[nx++] = x;
2396 daddr = remote;
2397 saddr = local;
2398 continue;
2399 }
2400 if (x) {
2401 error = (x->km.state == XFRM_STATE_ERROR ?
2402 -EINVAL : -EAGAIN);
2403 xfrm_state_put(x);
2404 } else if (error == -ESRCH) {
2405 error = -EAGAIN;
2406 }
2407
2408 if (!tmpl->optional)
2409 goto fail;
2410 }
2411 return nx;
2412
2413 fail:
2414 for (nx--; nx >= 0; nx--)
2415 xfrm_state_put(xfrm[nx]);
2416 return error;
2417 }
2418
2419 static int
xfrm_tmpl_resolve(struct xfrm_policy ** pols,int npols,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2420 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2421 struct xfrm_state **xfrm, unsigned short family)
2422 {
2423 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2424 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2425 int cnx = 0;
2426 int error;
2427 int ret;
2428 int i;
2429
2430 for (i = 0; i < npols; i++) {
2431 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2432 error = -ENOBUFS;
2433 goto fail;
2434 }
2435
2436 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2437 if (ret < 0) {
2438 error = ret;
2439 goto fail;
2440 } else
2441 cnx += ret;
2442 }
2443
2444 /* found states are sorted for outbound processing */
2445 if (npols > 1)
2446 xfrm_state_sort(xfrm, tpp, cnx, family);
2447
2448 return cnx;
2449
2450 fail:
2451 for (cnx--; cnx >= 0; cnx--)
2452 xfrm_state_put(tpp[cnx]);
2453 return error;
2454
2455 }
2456
xfrm_get_tos(const struct flowi * fl,int family)2457 static int xfrm_get_tos(const struct flowi *fl, int family)
2458 {
2459 if (family == AF_INET)
2460 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2461
2462 return 0;
2463 }
2464
xfrm_alloc_dst(struct net * net,int family)2465 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2466 {
2467 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2468 struct dst_ops *dst_ops;
2469 struct xfrm_dst *xdst;
2470
2471 if (!afinfo)
2472 return ERR_PTR(-EINVAL);
2473
2474 switch (family) {
2475 case AF_INET:
2476 dst_ops = &net->xfrm.xfrm4_dst_ops;
2477 break;
2478 #if IS_ENABLED(CONFIG_IPV6)
2479 case AF_INET6:
2480 dst_ops = &net->xfrm.xfrm6_dst_ops;
2481 break;
2482 #endif
2483 default:
2484 BUG();
2485 }
2486 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2487
2488 if (likely(xdst)) {
2489 memset_after(xdst, 0, u.dst);
2490 } else
2491 xdst = ERR_PTR(-ENOBUFS);
2492
2493 rcu_read_unlock();
2494
2495 return xdst;
2496 }
2497
xfrm_init_path(struct xfrm_dst * path,struct dst_entry * dst,int nfheader_len)2498 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2499 int nfheader_len)
2500 {
2501 if (dst->ops->family == AF_INET6) {
2502 struct rt6_info *rt = (struct rt6_info *)dst;
2503 path->path_cookie = rt6_get_cookie(rt);
2504 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2505 }
2506 }
2507
xfrm_fill_dst(struct xfrm_dst * xdst,struct net_device * dev,const struct flowi * fl)2508 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2509 const struct flowi *fl)
2510 {
2511 const struct xfrm_policy_afinfo *afinfo =
2512 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2513 int err;
2514
2515 if (!afinfo)
2516 return -EINVAL;
2517
2518 err = afinfo->fill_dst(xdst, dev, fl);
2519
2520 rcu_read_unlock();
2521
2522 return err;
2523 }
2524
2525
2526 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2527 * all the metrics... Shortly, bundle a bundle.
2528 */
2529
xfrm_bundle_create(struct xfrm_policy * policy,struct xfrm_state ** xfrm,struct xfrm_dst ** bundle,int nx,const struct flowi * fl,struct dst_entry * dst)2530 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2531 struct xfrm_state **xfrm,
2532 struct xfrm_dst **bundle,
2533 int nx,
2534 const struct flowi *fl,
2535 struct dst_entry *dst)
2536 {
2537 const struct xfrm_state_afinfo *afinfo;
2538 const struct xfrm_mode *inner_mode;
2539 struct net *net = xp_net(policy);
2540 unsigned long now = jiffies;
2541 struct net_device *dev;
2542 struct xfrm_dst *xdst_prev = NULL;
2543 struct xfrm_dst *xdst0 = NULL;
2544 int i = 0;
2545 int err;
2546 int header_len = 0;
2547 int nfheader_len = 0;
2548 int trailer_len = 0;
2549 int tos;
2550 int family = policy->selector.family;
2551 xfrm_address_t saddr, daddr;
2552
2553 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2554
2555 tos = xfrm_get_tos(fl, family);
2556
2557 dst_hold(dst);
2558
2559 for (; i < nx; i++) {
2560 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2561 struct dst_entry *dst1 = &xdst->u.dst;
2562
2563 err = PTR_ERR(xdst);
2564 if (IS_ERR(xdst)) {
2565 dst_release(dst);
2566 goto put_states;
2567 }
2568
2569 bundle[i] = xdst;
2570 if (!xdst_prev)
2571 xdst0 = xdst;
2572 else
2573 /* Ref count is taken during xfrm_alloc_dst()
2574 * No need to do dst_clone() on dst1
2575 */
2576 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2577
2578 if (xfrm[i]->sel.family == AF_UNSPEC) {
2579 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2580 xfrm_af2proto(family));
2581 if (!inner_mode) {
2582 err = -EAFNOSUPPORT;
2583 dst_release(dst);
2584 goto put_states;
2585 }
2586 } else
2587 inner_mode = &xfrm[i]->inner_mode;
2588
2589 xdst->route = dst;
2590 dst_copy_metrics(dst1, dst);
2591
2592 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2593 __u32 mark = 0;
2594
2595 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2596 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2597
2598 family = xfrm[i]->props.family;
2599 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2600 &saddr, &daddr, family, mark);
2601 err = PTR_ERR(dst);
2602 if (IS_ERR(dst))
2603 goto put_states;
2604 } else
2605 dst_hold(dst);
2606
2607 dst1->xfrm = xfrm[i];
2608 xdst->xfrm_genid = xfrm[i]->genid;
2609
2610 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2611 dst1->lastuse = now;
2612
2613 dst1->input = dst_discard;
2614
2615 rcu_read_lock();
2616 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2617 if (likely(afinfo))
2618 dst1->output = afinfo->output;
2619 else
2620 dst1->output = dst_discard_out;
2621 rcu_read_unlock();
2622
2623 xdst_prev = xdst;
2624
2625 header_len += xfrm[i]->props.header_len;
2626 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2627 nfheader_len += xfrm[i]->props.header_len;
2628 trailer_len += xfrm[i]->props.trailer_len;
2629 }
2630
2631 xfrm_dst_set_child(xdst_prev, dst);
2632 xdst0->path = dst;
2633
2634 err = -ENODEV;
2635 dev = dst->dev;
2636 if (!dev)
2637 goto free_dst;
2638
2639 xfrm_init_path(xdst0, dst, nfheader_len);
2640 xfrm_init_pmtu(bundle, nx);
2641
2642 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2643 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2644 err = xfrm_fill_dst(xdst_prev, dev, fl);
2645 if (err)
2646 goto free_dst;
2647
2648 xdst_prev->u.dst.header_len = header_len;
2649 xdst_prev->u.dst.trailer_len = trailer_len;
2650 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2651 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2652 }
2653
2654 return &xdst0->u.dst;
2655
2656 put_states:
2657 for (; i < nx; i++)
2658 xfrm_state_put(xfrm[i]);
2659 free_dst:
2660 if (xdst0)
2661 dst_release_immediate(&xdst0->u.dst);
2662
2663 return ERR_PTR(err);
2664 }
2665
xfrm_expand_policies(const struct flowi * fl,u16 family,struct xfrm_policy ** pols,int * num_pols,int * num_xfrms)2666 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2667 struct xfrm_policy **pols,
2668 int *num_pols, int *num_xfrms)
2669 {
2670 int i;
2671
2672 if (*num_pols == 0 || !pols[0]) {
2673 *num_pols = 0;
2674 *num_xfrms = 0;
2675 return 0;
2676 }
2677 if (IS_ERR(pols[0]))
2678 return PTR_ERR(pols[0]);
2679
2680 *num_xfrms = pols[0]->xfrm_nr;
2681
2682 #ifdef CONFIG_XFRM_SUB_POLICY
2683 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2684 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2685 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2686 XFRM_POLICY_TYPE_MAIN,
2687 fl, family,
2688 XFRM_POLICY_OUT,
2689 pols[0]->if_id);
2690 if (pols[1]) {
2691 if (IS_ERR(pols[1])) {
2692 xfrm_pols_put(pols, *num_pols);
2693 return PTR_ERR(pols[1]);
2694 }
2695 (*num_pols)++;
2696 (*num_xfrms) += pols[1]->xfrm_nr;
2697 }
2698 }
2699 #endif
2700 for (i = 0; i < *num_pols; i++) {
2701 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2702 *num_xfrms = -1;
2703 break;
2704 }
2705 }
2706
2707 return 0;
2708
2709 }
2710
2711 static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy ** pols,int num_pols,const struct flowi * fl,u16 family,struct dst_entry * dst_orig)2712 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2713 const struct flowi *fl, u16 family,
2714 struct dst_entry *dst_orig)
2715 {
2716 struct net *net = xp_net(pols[0]);
2717 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2718 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2719 struct xfrm_dst *xdst;
2720 struct dst_entry *dst;
2721 int err;
2722
2723 /* Try to instantiate a bundle */
2724 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2725 if (err <= 0) {
2726 if (err == 0)
2727 return NULL;
2728
2729 if (err != -EAGAIN)
2730 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2731 return ERR_PTR(err);
2732 }
2733
2734 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2735 if (IS_ERR(dst)) {
2736 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2737 return ERR_CAST(dst);
2738 }
2739
2740 xdst = (struct xfrm_dst *)dst;
2741 xdst->num_xfrms = err;
2742 xdst->num_pols = num_pols;
2743 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2744 xdst->policy_genid = atomic_read(&pols[0]->genid);
2745
2746 return xdst;
2747 }
2748
xfrm_policy_queue_process(struct timer_list * t)2749 static void xfrm_policy_queue_process(struct timer_list *t)
2750 {
2751 struct sk_buff *skb;
2752 struct sock *sk;
2753 struct dst_entry *dst;
2754 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2755 struct net *net = xp_net(pol);
2756 struct xfrm_policy_queue *pq = &pol->polq;
2757 struct flowi fl;
2758 struct sk_buff_head list;
2759 __u32 skb_mark;
2760
2761 spin_lock(&pq->hold_queue.lock);
2762 skb = skb_peek(&pq->hold_queue);
2763 if (!skb) {
2764 spin_unlock(&pq->hold_queue.lock);
2765 goto out;
2766 }
2767 dst = skb_dst(skb);
2768 sk = skb->sk;
2769
2770 /* Fixup the mark to support VTI. */
2771 skb_mark = skb->mark;
2772 skb->mark = pol->mark.v;
2773 xfrm_decode_session(skb, &fl, dst->ops->family);
2774 skb->mark = skb_mark;
2775 spin_unlock(&pq->hold_queue.lock);
2776
2777 dst_hold(xfrm_dst_path(dst));
2778 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2779 if (IS_ERR(dst))
2780 goto purge_queue;
2781
2782 if (dst->flags & DST_XFRM_QUEUE) {
2783 dst_release(dst);
2784
2785 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2786 goto purge_queue;
2787
2788 pq->timeout = pq->timeout << 1;
2789 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2790 xfrm_pol_hold(pol);
2791 goto out;
2792 }
2793
2794 dst_release(dst);
2795
2796 __skb_queue_head_init(&list);
2797
2798 spin_lock(&pq->hold_queue.lock);
2799 pq->timeout = 0;
2800 skb_queue_splice_init(&pq->hold_queue, &list);
2801 spin_unlock(&pq->hold_queue.lock);
2802
2803 while (!skb_queue_empty(&list)) {
2804 skb = __skb_dequeue(&list);
2805
2806 /* Fixup the mark to support VTI. */
2807 skb_mark = skb->mark;
2808 skb->mark = pol->mark.v;
2809 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2810 skb->mark = skb_mark;
2811
2812 dst_hold(xfrm_dst_path(skb_dst(skb)));
2813 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2814 if (IS_ERR(dst)) {
2815 kfree_skb(skb);
2816 continue;
2817 }
2818
2819 nf_reset_ct(skb);
2820 skb_dst_drop(skb);
2821 skb_dst_set(skb, dst);
2822
2823 dst_output(net, skb->sk, skb);
2824 }
2825
2826 out:
2827 xfrm_pol_put(pol);
2828 return;
2829
2830 purge_queue:
2831 pq->timeout = 0;
2832 skb_queue_purge(&pq->hold_queue);
2833 xfrm_pol_put(pol);
2834 }
2835
xdst_queue_output(struct net * net,struct sock * sk,struct sk_buff * skb)2836 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2837 {
2838 unsigned long sched_next;
2839 struct dst_entry *dst = skb_dst(skb);
2840 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2841 struct xfrm_policy *pol = xdst->pols[0];
2842 struct xfrm_policy_queue *pq = &pol->polq;
2843
2844 if (unlikely(skb_fclone_busy(sk, skb))) {
2845 kfree_skb(skb);
2846 return 0;
2847 }
2848
2849 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2850 kfree_skb(skb);
2851 return -EAGAIN;
2852 }
2853
2854 skb_dst_force(skb);
2855
2856 spin_lock_bh(&pq->hold_queue.lock);
2857
2858 if (!pq->timeout)
2859 pq->timeout = XFRM_QUEUE_TMO_MIN;
2860
2861 sched_next = jiffies + pq->timeout;
2862
2863 if (del_timer(&pq->hold_timer)) {
2864 if (time_before(pq->hold_timer.expires, sched_next))
2865 sched_next = pq->hold_timer.expires;
2866 xfrm_pol_put(pol);
2867 }
2868
2869 __skb_queue_tail(&pq->hold_queue, skb);
2870 if (!mod_timer(&pq->hold_timer, sched_next))
2871 xfrm_pol_hold(pol);
2872
2873 spin_unlock_bh(&pq->hold_queue.lock);
2874
2875 return 0;
2876 }
2877
xfrm_create_dummy_bundle(struct net * net,struct xfrm_flo * xflo,const struct flowi * fl,int num_xfrms,u16 family)2878 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2879 struct xfrm_flo *xflo,
2880 const struct flowi *fl,
2881 int num_xfrms,
2882 u16 family)
2883 {
2884 int err;
2885 struct net_device *dev;
2886 struct dst_entry *dst;
2887 struct dst_entry *dst1;
2888 struct xfrm_dst *xdst;
2889
2890 xdst = xfrm_alloc_dst(net, family);
2891 if (IS_ERR(xdst))
2892 return xdst;
2893
2894 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2895 net->xfrm.sysctl_larval_drop ||
2896 num_xfrms <= 0)
2897 return xdst;
2898
2899 dst = xflo->dst_orig;
2900 dst1 = &xdst->u.dst;
2901 dst_hold(dst);
2902 xdst->route = dst;
2903
2904 dst_copy_metrics(dst1, dst);
2905
2906 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2907 dst1->flags |= DST_XFRM_QUEUE;
2908 dst1->lastuse = jiffies;
2909
2910 dst1->input = dst_discard;
2911 dst1->output = xdst_queue_output;
2912
2913 dst_hold(dst);
2914 xfrm_dst_set_child(xdst, dst);
2915 xdst->path = dst;
2916
2917 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2918
2919 err = -ENODEV;
2920 dev = dst->dev;
2921 if (!dev)
2922 goto free_dst;
2923
2924 err = xfrm_fill_dst(xdst, dev, fl);
2925 if (err)
2926 goto free_dst;
2927
2928 out:
2929 return xdst;
2930
2931 free_dst:
2932 dst_release(dst1);
2933 xdst = ERR_PTR(err);
2934 goto out;
2935 }
2936
xfrm_bundle_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,struct xfrm_flo * xflo,u32 if_id)2937 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2938 const struct flowi *fl,
2939 u16 family, u8 dir,
2940 struct xfrm_flo *xflo, u32 if_id)
2941 {
2942 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2943 int num_pols = 0, num_xfrms = 0, err;
2944 struct xfrm_dst *xdst;
2945
2946 /* Resolve policies to use if we couldn't get them from
2947 * previous cache entry */
2948 num_pols = 1;
2949 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2950 err = xfrm_expand_policies(fl, family, pols,
2951 &num_pols, &num_xfrms);
2952 if (err < 0)
2953 goto inc_error;
2954 if (num_pols == 0)
2955 return NULL;
2956 if (num_xfrms <= 0)
2957 goto make_dummy_bundle;
2958
2959 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2960 xflo->dst_orig);
2961 if (IS_ERR(xdst)) {
2962 err = PTR_ERR(xdst);
2963 if (err == -EREMOTE) {
2964 xfrm_pols_put(pols, num_pols);
2965 return NULL;
2966 }
2967
2968 if (err != -EAGAIN)
2969 goto error;
2970 goto make_dummy_bundle;
2971 } else if (xdst == NULL) {
2972 num_xfrms = 0;
2973 goto make_dummy_bundle;
2974 }
2975
2976 return xdst;
2977
2978 make_dummy_bundle:
2979 /* We found policies, but there's no bundles to instantiate:
2980 * either because the policy blocks, has no transformations or
2981 * we could not build template (no xfrm_states).*/
2982 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2983 if (IS_ERR(xdst)) {
2984 xfrm_pols_put(pols, num_pols);
2985 return ERR_CAST(xdst);
2986 }
2987 xdst->num_pols = num_pols;
2988 xdst->num_xfrms = num_xfrms;
2989 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2990
2991 return xdst;
2992
2993 inc_error:
2994 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2995 error:
2996 xfrm_pols_put(pols, num_pols);
2997 return ERR_PTR(err);
2998 }
2999
make_blackhole(struct net * net,u16 family,struct dst_entry * dst_orig)3000 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3001 struct dst_entry *dst_orig)
3002 {
3003 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3004 struct dst_entry *ret;
3005
3006 if (!afinfo) {
3007 dst_release(dst_orig);
3008 return ERR_PTR(-EINVAL);
3009 } else {
3010 ret = afinfo->blackhole_route(net, dst_orig);
3011 }
3012 rcu_read_unlock();
3013
3014 return ret;
3015 }
3016
3017 /* Finds/creates a bundle for given flow and if_id
3018 *
3019 * At the moment we eat a raw IP route. Mostly to speed up lookups
3020 * on interfaces with disabled IPsec.
3021 *
3022 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3023 * compatibility
3024 */
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)3025 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3026 struct dst_entry *dst_orig,
3027 const struct flowi *fl,
3028 const struct sock *sk,
3029 int flags, u32 if_id)
3030 {
3031 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3032 struct xfrm_dst *xdst;
3033 struct dst_entry *dst, *route;
3034 u16 family = dst_orig->ops->family;
3035 u8 dir = XFRM_POLICY_OUT;
3036 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3037
3038 dst = NULL;
3039 xdst = NULL;
3040 route = NULL;
3041
3042 sk = sk_const_to_full_sk(sk);
3043 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3044 num_pols = 1;
3045 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3046 if_id);
3047 err = xfrm_expand_policies(fl, family, pols,
3048 &num_pols, &num_xfrms);
3049 if (err < 0)
3050 goto dropdst;
3051
3052 if (num_pols) {
3053 if (num_xfrms <= 0) {
3054 drop_pols = num_pols;
3055 goto no_transform;
3056 }
3057
3058 xdst = xfrm_resolve_and_create_bundle(
3059 pols, num_pols, fl,
3060 family, dst_orig);
3061
3062 if (IS_ERR(xdst)) {
3063 xfrm_pols_put(pols, num_pols);
3064 err = PTR_ERR(xdst);
3065 if (err == -EREMOTE)
3066 goto nopol;
3067
3068 goto dropdst;
3069 } else if (xdst == NULL) {
3070 num_xfrms = 0;
3071 drop_pols = num_pols;
3072 goto no_transform;
3073 }
3074
3075 route = xdst->route;
3076 }
3077 }
3078
3079 if (xdst == NULL) {
3080 struct xfrm_flo xflo;
3081
3082 xflo.dst_orig = dst_orig;
3083 xflo.flags = flags;
3084
3085 /* To accelerate a bit... */
3086 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3087 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3088 goto nopol;
3089
3090 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3091 if (xdst == NULL)
3092 goto nopol;
3093 if (IS_ERR(xdst)) {
3094 err = PTR_ERR(xdst);
3095 goto dropdst;
3096 }
3097
3098 num_pols = xdst->num_pols;
3099 num_xfrms = xdst->num_xfrms;
3100 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3101 route = xdst->route;
3102 }
3103
3104 dst = &xdst->u.dst;
3105 if (route == NULL && num_xfrms > 0) {
3106 /* The only case when xfrm_bundle_lookup() returns a
3107 * bundle with null route, is when the template could
3108 * not be resolved. It means policies are there, but
3109 * bundle could not be created, since we don't yet
3110 * have the xfrm_state's. We need to wait for KM to
3111 * negotiate new SA's or bail out with error.*/
3112 if (net->xfrm.sysctl_larval_drop) {
3113 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3114 err = -EREMOTE;
3115 goto error;
3116 }
3117
3118 err = -EAGAIN;
3119
3120 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3121 goto error;
3122 }
3123
3124 no_transform:
3125 if (num_pols == 0)
3126 goto nopol;
3127
3128 if ((flags & XFRM_LOOKUP_ICMP) &&
3129 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3130 err = -ENOENT;
3131 goto error;
3132 }
3133
3134 for (i = 0; i < num_pols; i++)
3135 pols[i]->curlft.use_time = ktime_get_real_seconds();
3136
3137 if (num_xfrms < 0) {
3138 /* Prohibit the flow */
3139 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3140 err = -EPERM;
3141 goto error;
3142 } else if (num_xfrms > 0) {
3143 /* Flow transformed */
3144 dst_release(dst_orig);
3145 } else {
3146 /* Flow passes untransformed */
3147 dst_release(dst);
3148 dst = dst_orig;
3149 }
3150 ok:
3151 xfrm_pols_put(pols, drop_pols);
3152 if (dst && dst->xfrm &&
3153 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3154 dst->flags |= DST_XFRM_TUNNEL;
3155 return dst;
3156
3157 nopol:
3158 if (!(dst_orig->dev->flags & IFF_LOOPBACK) &&
3159 !xfrm_default_allow(net, dir)) {
3160 err = -EPERM;
3161 goto error;
3162 }
3163 if (!(flags & XFRM_LOOKUP_ICMP)) {
3164 dst = dst_orig;
3165 goto ok;
3166 }
3167 err = -ENOENT;
3168 error:
3169 dst_release(dst);
3170 dropdst:
3171 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3172 dst_release(dst_orig);
3173 xfrm_pols_put(pols, drop_pols);
3174 return ERR_PTR(err);
3175 }
3176 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3177
3178 /* Main function: finds/creates a bundle for given flow.
3179 *
3180 * At the moment we eat a raw IP route. Mostly to speed up lookups
3181 * on interfaces with disabled IPsec.
3182 */
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3183 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3184 const struct flowi *fl, const struct sock *sk,
3185 int flags)
3186 {
3187 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3188 }
3189 EXPORT_SYMBOL(xfrm_lookup);
3190
3191 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3192 * Otherwise we may send out blackholed packets.
3193 */
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3194 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3195 const struct flowi *fl,
3196 const struct sock *sk, int flags)
3197 {
3198 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3199 flags | XFRM_LOOKUP_QUEUE |
3200 XFRM_LOOKUP_KEEP_DST_REF);
3201
3202 if (PTR_ERR(dst) == -EREMOTE)
3203 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3204
3205 if (IS_ERR(dst))
3206 dst_release(dst_orig);
3207
3208 return dst;
3209 }
3210 EXPORT_SYMBOL(xfrm_lookup_route);
3211
3212 static inline int
xfrm_secpath_reject(int idx,struct sk_buff * skb,const struct flowi * fl)3213 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3214 {
3215 struct sec_path *sp = skb_sec_path(skb);
3216 struct xfrm_state *x;
3217
3218 if (!sp || idx < 0 || idx >= sp->len)
3219 return 0;
3220 x = sp->xvec[idx];
3221 if (!x->type->reject)
3222 return 0;
3223 return x->type->reject(x, skb, fl);
3224 }
3225
3226 /* When skb is transformed back to its "native" form, we have to
3227 * check policy restrictions. At the moment we make this in maximally
3228 * stupid way. Shame on me. :-) Of course, connected sockets must
3229 * have policy cached at them.
3230 */
3231
3232 static inline int
xfrm_state_ok(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)3233 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3234 unsigned short family)
3235 {
3236 if (xfrm_state_kern(x))
3237 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3238 return x->id.proto == tmpl->id.proto &&
3239 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3240 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3241 x->props.mode == tmpl->mode &&
3242 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3243 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3244 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3245 xfrm_state_addr_cmp(tmpl, x, family));
3246 }
3247
3248 /*
3249 * 0 or more than 0 is returned when validation is succeeded (either bypass
3250 * because of optional transport mode, or next index of the matched secpath
3251 * state with the template.
3252 * -1 is returned when no matching template is found.
3253 * Otherwise "-2 - errored_index" is returned.
3254 */
3255 static inline int
xfrm_policy_ok(const struct xfrm_tmpl * tmpl,const struct sec_path * sp,int start,unsigned short family)3256 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3257 unsigned short family)
3258 {
3259 int idx = start;
3260
3261 if (tmpl->optional) {
3262 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3263 return start;
3264 } else
3265 start = -1;
3266 for (; idx < sp->len; idx++) {
3267 if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3268 return ++idx;
3269 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3270 if (start == -1)
3271 start = -2-idx;
3272 break;
3273 }
3274 }
3275 return start;
3276 }
3277
3278 static void
decode_session4(struct sk_buff * skb,struct flowi * fl,bool reverse)3279 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3280 {
3281 const struct iphdr *iph = ip_hdr(skb);
3282 int ihl = iph->ihl;
3283 u8 *xprth = skb_network_header(skb) + ihl * 4;
3284 struct flowi4 *fl4 = &fl->u.ip4;
3285 int oif = 0;
3286
3287 if (skb_dst(skb) && skb_dst(skb)->dev)
3288 oif = skb_dst(skb)->dev->ifindex;
3289
3290 memset(fl4, 0, sizeof(struct flowi4));
3291 fl4->flowi4_mark = skb->mark;
3292 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3293
3294 fl4->flowi4_proto = iph->protocol;
3295 fl4->daddr = reverse ? iph->saddr : iph->daddr;
3296 fl4->saddr = reverse ? iph->daddr : iph->saddr;
3297 fl4->flowi4_tos = iph->tos;
3298
3299 if (!ip_is_fragment(iph)) {
3300 switch (iph->protocol) {
3301 case IPPROTO_UDP:
3302 case IPPROTO_UDPLITE:
3303 case IPPROTO_TCP:
3304 case IPPROTO_SCTP:
3305 case IPPROTO_DCCP:
3306 if (xprth + 4 < skb->data ||
3307 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3308 __be16 *ports;
3309
3310 xprth = skb_network_header(skb) + ihl * 4;
3311 ports = (__be16 *)xprth;
3312
3313 fl4->fl4_sport = ports[!!reverse];
3314 fl4->fl4_dport = ports[!reverse];
3315 }
3316 break;
3317 case IPPROTO_ICMP:
3318 if (xprth + 2 < skb->data ||
3319 pskb_may_pull(skb, xprth + 2 - skb->data)) {
3320 u8 *icmp;
3321
3322 xprth = skb_network_header(skb) + ihl * 4;
3323 icmp = xprth;
3324
3325 fl4->fl4_icmp_type = icmp[0];
3326 fl4->fl4_icmp_code = icmp[1];
3327 }
3328 break;
3329 case IPPROTO_GRE:
3330 if (xprth + 12 < skb->data ||
3331 pskb_may_pull(skb, xprth + 12 - skb->data)) {
3332 __be16 *greflags;
3333 __be32 *gre_hdr;
3334
3335 xprth = skb_network_header(skb) + ihl * 4;
3336 greflags = (__be16 *)xprth;
3337 gre_hdr = (__be32 *)xprth;
3338
3339 if (greflags[0] & GRE_KEY) {
3340 if (greflags[0] & GRE_CSUM)
3341 gre_hdr++;
3342 fl4->fl4_gre_key = gre_hdr[1];
3343 }
3344 }
3345 break;
3346 default:
3347 break;
3348 }
3349 }
3350 }
3351
3352 #if IS_ENABLED(CONFIG_IPV6)
3353 static void
decode_session6(struct sk_buff * skb,struct flowi * fl,bool reverse)3354 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3355 {
3356 struct flowi6 *fl6 = &fl->u.ip6;
3357 int onlyproto = 0;
3358 const struct ipv6hdr *hdr = ipv6_hdr(skb);
3359 u32 offset = sizeof(*hdr);
3360 struct ipv6_opt_hdr *exthdr;
3361 const unsigned char *nh = skb_network_header(skb);
3362 u16 nhoff = IP6CB(skb)->nhoff;
3363 int oif = 0;
3364 u8 nexthdr;
3365
3366 if (!nhoff)
3367 nhoff = offsetof(struct ipv6hdr, nexthdr);
3368
3369 nexthdr = nh[nhoff];
3370
3371 if (skb_dst(skb) && skb_dst(skb)->dev)
3372 oif = skb_dst(skb)->dev->ifindex;
3373
3374 memset(fl6, 0, sizeof(struct flowi6));
3375 fl6->flowi6_mark = skb->mark;
3376 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3377
3378 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3379 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3380
3381 while (nh + offset + sizeof(*exthdr) < skb->data ||
3382 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3383 nh = skb_network_header(skb);
3384 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3385
3386 switch (nexthdr) {
3387 case NEXTHDR_FRAGMENT:
3388 onlyproto = 1;
3389 fallthrough;
3390 case NEXTHDR_ROUTING:
3391 case NEXTHDR_HOP:
3392 case NEXTHDR_DEST:
3393 offset += ipv6_optlen(exthdr);
3394 nexthdr = exthdr->nexthdr;
3395 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3396 break;
3397 case IPPROTO_UDP:
3398 case IPPROTO_UDPLITE:
3399 case IPPROTO_TCP:
3400 case IPPROTO_SCTP:
3401 case IPPROTO_DCCP:
3402 if (!onlyproto && (nh + offset + 4 < skb->data ||
3403 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3404 __be16 *ports;
3405
3406 nh = skb_network_header(skb);
3407 ports = (__be16 *)(nh + offset);
3408 fl6->fl6_sport = ports[!!reverse];
3409 fl6->fl6_dport = ports[!reverse];
3410 }
3411 fl6->flowi6_proto = nexthdr;
3412 return;
3413 case IPPROTO_ICMPV6:
3414 if (!onlyproto && (nh + offset + 2 < skb->data ||
3415 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3416 u8 *icmp;
3417
3418 nh = skb_network_header(skb);
3419 icmp = (u8 *)(nh + offset);
3420 fl6->fl6_icmp_type = icmp[0];
3421 fl6->fl6_icmp_code = icmp[1];
3422 }
3423 fl6->flowi6_proto = nexthdr;
3424 return;
3425 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3426 case IPPROTO_MH:
3427 offset += ipv6_optlen(exthdr);
3428 if (!onlyproto && (nh + offset + 3 < skb->data ||
3429 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3430 struct ip6_mh *mh;
3431
3432 nh = skb_network_header(skb);
3433 mh = (struct ip6_mh *)(nh + offset);
3434 fl6->fl6_mh_type = mh->ip6mh_type;
3435 }
3436 fl6->flowi6_proto = nexthdr;
3437 return;
3438 #endif
3439 default:
3440 fl6->flowi6_proto = nexthdr;
3441 return;
3442 }
3443 }
3444 }
3445 #endif
3446
__xfrm_decode_session(struct sk_buff * skb,struct flowi * fl,unsigned int family,int reverse)3447 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3448 unsigned int family, int reverse)
3449 {
3450 switch (family) {
3451 case AF_INET:
3452 decode_session4(skb, fl, reverse);
3453 break;
3454 #if IS_ENABLED(CONFIG_IPV6)
3455 case AF_INET6:
3456 decode_session6(skb, fl, reverse);
3457 break;
3458 #endif
3459 default:
3460 return -EAFNOSUPPORT;
3461 }
3462
3463 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3464 }
3465 EXPORT_SYMBOL(__xfrm_decode_session);
3466
secpath_has_nontransport(const struct sec_path * sp,int k,int * idxp)3467 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3468 {
3469 for (; k < sp->len; k++) {
3470 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3471 *idxp = k;
3472 return 1;
3473 }
3474 }
3475
3476 return 0;
3477 }
3478
__xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)3479 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3480 unsigned short family)
3481 {
3482 struct net *net = dev_net(skb->dev);
3483 struct xfrm_policy *pol;
3484 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3485 int npols = 0;
3486 int xfrm_nr;
3487 int pi;
3488 int reverse;
3489 struct flowi fl;
3490 int xerr_idx = -1;
3491 const struct xfrm_if_cb *ifcb;
3492 struct sec_path *sp;
3493 struct xfrm_if *xi;
3494 u32 if_id = 0;
3495
3496 rcu_read_lock();
3497 ifcb = xfrm_if_get_cb();
3498
3499 if (ifcb) {
3500 xi = ifcb->decode_session(skb, family);
3501 if (xi) {
3502 if_id = xi->p.if_id;
3503 net = xi->net;
3504 }
3505 }
3506 rcu_read_unlock();
3507
3508 reverse = dir & ~XFRM_POLICY_MASK;
3509 dir &= XFRM_POLICY_MASK;
3510
3511 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3512 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3513 return 0;
3514 }
3515
3516 nf_nat_decode_session(skb, &fl, family);
3517
3518 /* First, check used SA against their selectors. */
3519 sp = skb_sec_path(skb);
3520 if (sp) {
3521 int i;
3522
3523 for (i = sp->len - 1; i >= 0; i--) {
3524 struct xfrm_state *x = sp->xvec[i];
3525 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3526 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3527 return 0;
3528 }
3529 }
3530 }
3531
3532 pol = NULL;
3533 sk = sk_to_full_sk(sk);
3534 if (sk && sk->sk_policy[dir]) {
3535 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3536 if (IS_ERR(pol)) {
3537 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3538 return 0;
3539 }
3540 }
3541
3542 if (!pol)
3543 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3544
3545 if (IS_ERR(pol)) {
3546 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3547 return 0;
3548 }
3549
3550 if (!pol) {
3551 if (!xfrm_default_allow(net, dir)) {
3552 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3553 return 0;
3554 }
3555
3556 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3557 xfrm_secpath_reject(xerr_idx, skb, &fl);
3558 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3559 return 0;
3560 }
3561 return 1;
3562 }
3563
3564 pol->curlft.use_time = ktime_get_real_seconds();
3565
3566 pols[0] = pol;
3567 npols++;
3568 #ifdef CONFIG_XFRM_SUB_POLICY
3569 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3570 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3571 &fl, family,
3572 XFRM_POLICY_IN, if_id);
3573 if (pols[1]) {
3574 if (IS_ERR(pols[1])) {
3575 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3576 return 0;
3577 }
3578 pols[1]->curlft.use_time = ktime_get_real_seconds();
3579 npols++;
3580 }
3581 }
3582 #endif
3583
3584 if (pol->action == XFRM_POLICY_ALLOW) {
3585 static struct sec_path dummy;
3586 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3587 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3588 struct xfrm_tmpl **tpp = tp;
3589 int ti = 0;
3590 int i, k;
3591
3592 sp = skb_sec_path(skb);
3593 if (!sp)
3594 sp = &dummy;
3595
3596 for (pi = 0; pi < npols; pi++) {
3597 if (pols[pi] != pol &&
3598 pols[pi]->action != XFRM_POLICY_ALLOW) {
3599 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3600 goto reject;
3601 }
3602 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3603 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3604 goto reject_error;
3605 }
3606 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3607 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3608 }
3609 xfrm_nr = ti;
3610
3611 if (!xfrm_default_allow(net, dir) && !xfrm_nr) {
3612 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
3613 goto reject;
3614 }
3615
3616 if (npols > 1) {
3617 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3618 tpp = stp;
3619 }
3620
3621 /* For each tunnel xfrm, find the first matching tmpl.
3622 * For each tmpl before that, find corresponding xfrm.
3623 * Order is _important_. Later we will implement
3624 * some barriers, but at the moment barriers
3625 * are implied between each two transformations.
3626 */
3627 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3628 k = xfrm_policy_ok(tpp[i], sp, k, family);
3629 if (k < 0) {
3630 if (k < -1)
3631 /* "-2 - errored_index" returned */
3632 xerr_idx = -(2+k);
3633 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3634 goto reject;
3635 }
3636 }
3637
3638 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3639 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3640 goto reject;
3641 }
3642
3643 xfrm_pols_put(pols, npols);
3644 return 1;
3645 }
3646 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3647
3648 reject:
3649 xfrm_secpath_reject(xerr_idx, skb, &fl);
3650 reject_error:
3651 xfrm_pols_put(pols, npols);
3652 return 0;
3653 }
3654 EXPORT_SYMBOL(__xfrm_policy_check);
3655
__xfrm_route_forward(struct sk_buff * skb,unsigned short family)3656 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3657 {
3658 struct net *net = dev_net(skb->dev);
3659 struct flowi fl;
3660 struct dst_entry *dst;
3661 int res = 1;
3662
3663 if (xfrm_decode_session(skb, &fl, family) < 0) {
3664 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3665 return 0;
3666 }
3667
3668 skb_dst_force(skb);
3669 if (!skb_dst(skb)) {
3670 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3671 return 0;
3672 }
3673
3674 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3675 if (IS_ERR(dst)) {
3676 res = 0;
3677 dst = NULL;
3678 }
3679 skb_dst_set(skb, dst);
3680 return res;
3681 }
3682 EXPORT_SYMBOL(__xfrm_route_forward);
3683
3684 /* Optimize later using cookies and generation ids. */
3685
xfrm_dst_check(struct dst_entry * dst,u32 cookie)3686 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3687 {
3688 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3689 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3690 * get validated by dst_ops->check on every use. We do this
3691 * because when a normal route referenced by an XFRM dst is
3692 * obsoleted we do not go looking around for all parent
3693 * referencing XFRM dsts so that we can invalidate them. It
3694 * is just too much work. Instead we make the checks here on
3695 * every use. For example:
3696 *
3697 * XFRM dst A --> IPv4 dst X
3698 *
3699 * X is the "xdst->route" of A (X is also the "dst->path" of A
3700 * in this example). If X is marked obsolete, "A" will not
3701 * notice. That's what we are validating here via the
3702 * stale_bundle() check.
3703 *
3704 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3705 * be marked on it.
3706 * This will force stale_bundle() to fail on any xdst bundle with
3707 * this dst linked in it.
3708 */
3709 if (dst->obsolete < 0 && !stale_bundle(dst))
3710 return dst;
3711
3712 return NULL;
3713 }
3714
stale_bundle(struct dst_entry * dst)3715 static int stale_bundle(struct dst_entry *dst)
3716 {
3717 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3718 }
3719
xfrm_dst_ifdown(struct dst_entry * dst,struct net_device * dev)3720 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3721 {
3722 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3723 dst->dev = dev_net(dev)->loopback_dev;
3724 dev_hold(dst->dev);
3725 dev_put(dev);
3726 }
3727 }
3728 EXPORT_SYMBOL(xfrm_dst_ifdown);
3729
xfrm_link_failure(struct sk_buff * skb)3730 static void xfrm_link_failure(struct sk_buff *skb)
3731 {
3732 /* Impossible. Such dst must be popped before reaches point of failure. */
3733 }
3734
xfrm_negative_advice(struct dst_entry * dst)3735 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3736 {
3737 if (dst) {
3738 if (dst->obsolete) {
3739 dst_release(dst);
3740 dst = NULL;
3741 }
3742 }
3743 return dst;
3744 }
3745
xfrm_init_pmtu(struct xfrm_dst ** bundle,int nr)3746 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3747 {
3748 while (nr--) {
3749 struct xfrm_dst *xdst = bundle[nr];
3750 u32 pmtu, route_mtu_cached;
3751 struct dst_entry *dst;
3752
3753 dst = &xdst->u.dst;
3754 pmtu = dst_mtu(xfrm_dst_child(dst));
3755 xdst->child_mtu_cached = pmtu;
3756
3757 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3758
3759 route_mtu_cached = dst_mtu(xdst->route);
3760 xdst->route_mtu_cached = route_mtu_cached;
3761
3762 if (pmtu > route_mtu_cached)
3763 pmtu = route_mtu_cached;
3764
3765 dst_metric_set(dst, RTAX_MTU, pmtu);
3766 }
3767 }
3768
3769 /* Check that the bundle accepts the flow and its components are
3770 * still valid.
3771 */
3772
xfrm_bundle_ok(struct xfrm_dst * first)3773 static int xfrm_bundle_ok(struct xfrm_dst *first)
3774 {
3775 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3776 struct dst_entry *dst = &first->u.dst;
3777 struct xfrm_dst *xdst;
3778 int start_from, nr;
3779 u32 mtu;
3780
3781 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3782 (dst->dev && !netif_running(dst->dev)))
3783 return 0;
3784
3785 if (dst->flags & DST_XFRM_QUEUE)
3786 return 1;
3787
3788 start_from = nr = 0;
3789 do {
3790 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3791
3792 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3793 return 0;
3794 if (xdst->xfrm_genid != dst->xfrm->genid)
3795 return 0;
3796 if (xdst->num_pols > 0 &&
3797 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3798 return 0;
3799
3800 bundle[nr++] = xdst;
3801
3802 mtu = dst_mtu(xfrm_dst_child(dst));
3803 if (xdst->child_mtu_cached != mtu) {
3804 start_from = nr;
3805 xdst->child_mtu_cached = mtu;
3806 }
3807
3808 if (!dst_check(xdst->route, xdst->route_cookie))
3809 return 0;
3810 mtu = dst_mtu(xdst->route);
3811 if (xdst->route_mtu_cached != mtu) {
3812 start_from = nr;
3813 xdst->route_mtu_cached = mtu;
3814 }
3815
3816 dst = xfrm_dst_child(dst);
3817 } while (dst->xfrm);
3818
3819 if (likely(!start_from))
3820 return 1;
3821
3822 xdst = bundle[start_from - 1];
3823 mtu = xdst->child_mtu_cached;
3824 while (start_from--) {
3825 dst = &xdst->u.dst;
3826
3827 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3828 if (mtu > xdst->route_mtu_cached)
3829 mtu = xdst->route_mtu_cached;
3830 dst_metric_set(dst, RTAX_MTU, mtu);
3831 if (!start_from)
3832 break;
3833
3834 xdst = bundle[start_from - 1];
3835 xdst->child_mtu_cached = mtu;
3836 }
3837
3838 return 1;
3839 }
3840
xfrm_default_advmss(const struct dst_entry * dst)3841 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3842 {
3843 return dst_metric_advmss(xfrm_dst_path(dst));
3844 }
3845
xfrm_mtu(const struct dst_entry * dst)3846 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3847 {
3848 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3849
3850 return mtu ? : dst_mtu(xfrm_dst_path(dst));
3851 }
3852
xfrm_get_dst_nexthop(const struct dst_entry * dst,const void * daddr)3853 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3854 const void *daddr)
3855 {
3856 while (dst->xfrm) {
3857 const struct xfrm_state *xfrm = dst->xfrm;
3858
3859 dst = xfrm_dst_child(dst);
3860
3861 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3862 continue;
3863 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3864 daddr = xfrm->coaddr;
3865 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3866 daddr = &xfrm->id.daddr;
3867 }
3868 return daddr;
3869 }
3870
xfrm_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)3871 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3872 struct sk_buff *skb,
3873 const void *daddr)
3874 {
3875 const struct dst_entry *path = xfrm_dst_path(dst);
3876
3877 if (!skb)
3878 daddr = xfrm_get_dst_nexthop(dst, daddr);
3879 return path->ops->neigh_lookup(path, skb, daddr);
3880 }
3881
xfrm_confirm_neigh(const struct dst_entry * dst,const void * daddr)3882 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3883 {
3884 const struct dst_entry *path = xfrm_dst_path(dst);
3885
3886 daddr = xfrm_get_dst_nexthop(dst, daddr);
3887 path->ops->confirm_neigh(path, daddr);
3888 }
3889
xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo * afinfo,int family)3890 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3891 {
3892 int err = 0;
3893
3894 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3895 return -EAFNOSUPPORT;
3896
3897 spin_lock(&xfrm_policy_afinfo_lock);
3898 if (unlikely(xfrm_policy_afinfo[family] != NULL))
3899 err = -EEXIST;
3900 else {
3901 struct dst_ops *dst_ops = afinfo->dst_ops;
3902 if (likely(dst_ops->kmem_cachep == NULL))
3903 dst_ops->kmem_cachep = xfrm_dst_cache;
3904 if (likely(dst_ops->check == NULL))
3905 dst_ops->check = xfrm_dst_check;
3906 if (likely(dst_ops->default_advmss == NULL))
3907 dst_ops->default_advmss = xfrm_default_advmss;
3908 if (likely(dst_ops->mtu == NULL))
3909 dst_ops->mtu = xfrm_mtu;
3910 if (likely(dst_ops->negative_advice == NULL))
3911 dst_ops->negative_advice = xfrm_negative_advice;
3912 if (likely(dst_ops->link_failure == NULL))
3913 dst_ops->link_failure = xfrm_link_failure;
3914 if (likely(dst_ops->neigh_lookup == NULL))
3915 dst_ops->neigh_lookup = xfrm_neigh_lookup;
3916 if (likely(!dst_ops->confirm_neigh))
3917 dst_ops->confirm_neigh = xfrm_confirm_neigh;
3918 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3919 }
3920 spin_unlock(&xfrm_policy_afinfo_lock);
3921
3922 return err;
3923 }
3924 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3925
xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo * afinfo)3926 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3927 {
3928 struct dst_ops *dst_ops = afinfo->dst_ops;
3929 int i;
3930
3931 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3932 if (xfrm_policy_afinfo[i] != afinfo)
3933 continue;
3934 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3935 break;
3936 }
3937
3938 synchronize_rcu();
3939
3940 dst_ops->kmem_cachep = NULL;
3941 dst_ops->check = NULL;
3942 dst_ops->negative_advice = NULL;
3943 dst_ops->link_failure = NULL;
3944 }
3945 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3946
xfrm_if_register_cb(const struct xfrm_if_cb * ifcb)3947 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3948 {
3949 spin_lock(&xfrm_if_cb_lock);
3950 rcu_assign_pointer(xfrm_if_cb, ifcb);
3951 spin_unlock(&xfrm_if_cb_lock);
3952 }
3953 EXPORT_SYMBOL(xfrm_if_register_cb);
3954
xfrm_if_unregister_cb(void)3955 void xfrm_if_unregister_cb(void)
3956 {
3957 RCU_INIT_POINTER(xfrm_if_cb, NULL);
3958 synchronize_rcu();
3959 }
3960 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3961
3962 #ifdef CONFIG_XFRM_STATISTICS
xfrm_statistics_init(struct net * net)3963 static int __net_init xfrm_statistics_init(struct net *net)
3964 {
3965 int rv;
3966 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3967 if (!net->mib.xfrm_statistics)
3968 return -ENOMEM;
3969 rv = xfrm_proc_init(net);
3970 if (rv < 0)
3971 free_percpu(net->mib.xfrm_statistics);
3972 return rv;
3973 }
3974
xfrm_statistics_fini(struct net * net)3975 static void xfrm_statistics_fini(struct net *net)
3976 {
3977 xfrm_proc_fini(net);
3978 free_percpu(net->mib.xfrm_statistics);
3979 }
3980 #else
xfrm_statistics_init(struct net * net)3981 static int __net_init xfrm_statistics_init(struct net *net)
3982 {
3983 return 0;
3984 }
3985
xfrm_statistics_fini(struct net * net)3986 static void xfrm_statistics_fini(struct net *net)
3987 {
3988 }
3989 #endif
3990
xfrm_policy_init(struct net * net)3991 static int __net_init xfrm_policy_init(struct net *net)
3992 {
3993 unsigned int hmask, sz;
3994 int dir, err;
3995
3996 if (net_eq(net, &init_net)) {
3997 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
3998 sizeof(struct xfrm_dst),
3999 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4000 NULL);
4001 err = rhashtable_init(&xfrm_policy_inexact_table,
4002 &xfrm_pol_inexact_params);
4003 BUG_ON(err);
4004 }
4005
4006 hmask = 8 - 1;
4007 sz = (hmask+1) * sizeof(struct hlist_head);
4008
4009 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4010 if (!net->xfrm.policy_byidx)
4011 goto out_byidx;
4012 net->xfrm.policy_idx_hmask = hmask;
4013
4014 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4015 struct xfrm_policy_hash *htab;
4016
4017 net->xfrm.policy_count[dir] = 0;
4018 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4019 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4020
4021 htab = &net->xfrm.policy_bydst[dir];
4022 htab->table = xfrm_hash_alloc(sz);
4023 if (!htab->table)
4024 goto out_bydst;
4025 htab->hmask = hmask;
4026 htab->dbits4 = 32;
4027 htab->sbits4 = 32;
4028 htab->dbits6 = 128;
4029 htab->sbits6 = 128;
4030 }
4031 net->xfrm.policy_hthresh.lbits4 = 32;
4032 net->xfrm.policy_hthresh.rbits4 = 32;
4033 net->xfrm.policy_hthresh.lbits6 = 128;
4034 net->xfrm.policy_hthresh.rbits6 = 128;
4035
4036 seqlock_init(&net->xfrm.policy_hthresh.lock);
4037
4038 INIT_LIST_HEAD(&net->xfrm.policy_all);
4039 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4040 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4041 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4042 return 0;
4043
4044 out_bydst:
4045 for (dir--; dir >= 0; dir--) {
4046 struct xfrm_policy_hash *htab;
4047
4048 htab = &net->xfrm.policy_bydst[dir];
4049 xfrm_hash_free(htab->table, sz);
4050 }
4051 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4052 out_byidx:
4053 return -ENOMEM;
4054 }
4055
xfrm_policy_fini(struct net * net)4056 static void xfrm_policy_fini(struct net *net)
4057 {
4058 struct xfrm_pol_inexact_bin *b, *t;
4059 unsigned int sz;
4060 int dir;
4061
4062 flush_work(&net->xfrm.policy_hash_work);
4063 #ifdef CONFIG_XFRM_SUB_POLICY
4064 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4065 #endif
4066 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4067
4068 WARN_ON(!list_empty(&net->xfrm.policy_all));
4069
4070 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4071 struct xfrm_policy_hash *htab;
4072
4073 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4074
4075 htab = &net->xfrm.policy_bydst[dir];
4076 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4077 WARN_ON(!hlist_empty(htab->table));
4078 xfrm_hash_free(htab->table, sz);
4079 }
4080
4081 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4082 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4083 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4084
4085 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4086 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4087 __xfrm_policy_inexact_prune_bin(b, true);
4088 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4089 }
4090
xfrm_net_init(struct net * net)4091 static int __net_init xfrm_net_init(struct net *net)
4092 {
4093 int rv;
4094
4095 /* Initialize the per-net locks here */
4096 spin_lock_init(&net->xfrm.xfrm_state_lock);
4097 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4098 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4099 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4100
4101 rv = xfrm_statistics_init(net);
4102 if (rv < 0)
4103 goto out_statistics;
4104 rv = xfrm_state_init(net);
4105 if (rv < 0)
4106 goto out_state;
4107 rv = xfrm_policy_init(net);
4108 if (rv < 0)
4109 goto out_policy;
4110 rv = xfrm_sysctl_init(net);
4111 if (rv < 0)
4112 goto out_sysctl;
4113
4114 return 0;
4115
4116 out_sysctl:
4117 xfrm_policy_fini(net);
4118 out_policy:
4119 xfrm_state_fini(net);
4120 out_state:
4121 xfrm_statistics_fini(net);
4122 out_statistics:
4123 return rv;
4124 }
4125
xfrm_net_exit(struct net * net)4126 static void __net_exit xfrm_net_exit(struct net *net)
4127 {
4128 xfrm_sysctl_fini(net);
4129 xfrm_policy_fini(net);
4130 xfrm_state_fini(net);
4131 xfrm_statistics_fini(net);
4132 }
4133
4134 static struct pernet_operations __net_initdata xfrm_net_ops = {
4135 .init = xfrm_net_init,
4136 .exit = xfrm_net_exit,
4137 };
4138
xfrm_init(void)4139 void __init xfrm_init(void)
4140 {
4141 register_pernet_subsys(&xfrm_net_ops);
4142 xfrm_dev_init();
4143 xfrm_input_init();
4144
4145 #ifdef CONFIG_XFRM_ESPINTCP
4146 espintcp_init();
4147 #endif
4148 }
4149
4150 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_common_policyinfo(struct xfrm_policy * xp,struct audit_buffer * audit_buf)4151 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4152 struct audit_buffer *audit_buf)
4153 {
4154 struct xfrm_sec_ctx *ctx = xp->security;
4155 struct xfrm_selector *sel = &xp->selector;
4156
4157 if (ctx)
4158 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4159 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4160
4161 switch (sel->family) {
4162 case AF_INET:
4163 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4164 if (sel->prefixlen_s != 32)
4165 audit_log_format(audit_buf, " src_prefixlen=%d",
4166 sel->prefixlen_s);
4167 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4168 if (sel->prefixlen_d != 32)
4169 audit_log_format(audit_buf, " dst_prefixlen=%d",
4170 sel->prefixlen_d);
4171 break;
4172 case AF_INET6:
4173 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4174 if (sel->prefixlen_s != 128)
4175 audit_log_format(audit_buf, " src_prefixlen=%d",
4176 sel->prefixlen_s);
4177 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4178 if (sel->prefixlen_d != 128)
4179 audit_log_format(audit_buf, " dst_prefixlen=%d",
4180 sel->prefixlen_d);
4181 break;
4182 }
4183 }
4184
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)4185 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4186 {
4187 struct audit_buffer *audit_buf;
4188
4189 audit_buf = xfrm_audit_start("SPD-add");
4190 if (audit_buf == NULL)
4191 return;
4192 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4193 audit_log_format(audit_buf, " res=%u", result);
4194 xfrm_audit_common_policyinfo(xp, audit_buf);
4195 audit_log_end(audit_buf);
4196 }
4197 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4198
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)4199 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4200 bool task_valid)
4201 {
4202 struct audit_buffer *audit_buf;
4203
4204 audit_buf = xfrm_audit_start("SPD-delete");
4205 if (audit_buf == NULL)
4206 return;
4207 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4208 audit_log_format(audit_buf, " res=%u", result);
4209 xfrm_audit_common_policyinfo(xp, audit_buf);
4210 audit_log_end(audit_buf);
4211 }
4212 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4213 #endif
4214
4215 #ifdef CONFIG_XFRM_MIGRATE
xfrm_migrate_selector_match(const struct xfrm_selector * sel_cmp,const struct xfrm_selector * sel_tgt)4216 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4217 const struct xfrm_selector *sel_tgt)
4218 {
4219 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4220 if (sel_tgt->family == sel_cmp->family &&
4221 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4222 sel_cmp->family) &&
4223 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4224 sel_cmp->family) &&
4225 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4226 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4227 return true;
4228 }
4229 } else {
4230 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4231 return true;
4232 }
4233 }
4234 return false;
4235 }
4236
xfrm_migrate_policy_find(const struct xfrm_selector * sel,u8 dir,u8 type,struct net * net)4237 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4238 u8 dir, u8 type, struct net *net)
4239 {
4240 struct xfrm_policy *pol, *ret = NULL;
4241 struct hlist_head *chain;
4242 u32 priority = ~0U;
4243
4244 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4245 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4246 hlist_for_each_entry(pol, chain, bydst) {
4247 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4248 pol->type == type) {
4249 ret = pol;
4250 priority = ret->priority;
4251 break;
4252 }
4253 }
4254 chain = &net->xfrm.policy_inexact[dir];
4255 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4256 if ((pol->priority >= priority) && ret)
4257 break;
4258
4259 if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4260 pol->type == type) {
4261 ret = pol;
4262 break;
4263 }
4264 }
4265
4266 xfrm_pol_hold(ret);
4267
4268 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4269
4270 return ret;
4271 }
4272
migrate_tmpl_match(const struct xfrm_migrate * m,const struct xfrm_tmpl * t)4273 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4274 {
4275 int match = 0;
4276
4277 if (t->mode == m->mode && t->id.proto == m->proto &&
4278 (m->reqid == 0 || t->reqid == m->reqid)) {
4279 switch (t->mode) {
4280 case XFRM_MODE_TUNNEL:
4281 case XFRM_MODE_BEET:
4282 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4283 m->old_family) &&
4284 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4285 m->old_family)) {
4286 match = 1;
4287 }
4288 break;
4289 case XFRM_MODE_TRANSPORT:
4290 /* in case of transport mode, template does not store
4291 any IP addresses, hence we just compare mode and
4292 protocol */
4293 match = 1;
4294 break;
4295 default:
4296 break;
4297 }
4298 }
4299 return match;
4300 }
4301
4302 /* update endpoint address(es) of template(s) */
xfrm_policy_migrate(struct xfrm_policy * pol,struct xfrm_migrate * m,int num_migrate)4303 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4304 struct xfrm_migrate *m, int num_migrate)
4305 {
4306 struct xfrm_migrate *mp;
4307 int i, j, n = 0;
4308
4309 write_lock_bh(&pol->lock);
4310 if (unlikely(pol->walk.dead)) {
4311 /* target policy has been deleted */
4312 write_unlock_bh(&pol->lock);
4313 return -ENOENT;
4314 }
4315
4316 for (i = 0; i < pol->xfrm_nr; i++) {
4317 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4318 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4319 continue;
4320 n++;
4321 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4322 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4323 continue;
4324 /* update endpoints */
4325 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4326 sizeof(pol->xfrm_vec[i].id.daddr));
4327 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4328 sizeof(pol->xfrm_vec[i].saddr));
4329 pol->xfrm_vec[i].encap_family = mp->new_family;
4330 /* flush bundles */
4331 atomic_inc(&pol->genid);
4332 }
4333 }
4334
4335 write_unlock_bh(&pol->lock);
4336
4337 if (!n)
4338 return -ENODATA;
4339
4340 return 0;
4341 }
4342
xfrm_migrate_check(const struct xfrm_migrate * m,int num_migrate)4343 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4344 {
4345 int i, j;
4346
4347 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4348 return -EINVAL;
4349
4350 for (i = 0; i < num_migrate; i++) {
4351 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4352 xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4353 return -EINVAL;
4354
4355 /* check if there is any duplicated entry */
4356 for (j = i + 1; j < num_migrate; j++) {
4357 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4358 sizeof(m[i].old_daddr)) &&
4359 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4360 sizeof(m[i].old_saddr)) &&
4361 m[i].proto == m[j].proto &&
4362 m[i].mode == m[j].mode &&
4363 m[i].reqid == m[j].reqid &&
4364 m[i].old_family == m[j].old_family)
4365 return -EINVAL;
4366 }
4367 }
4368
4369 return 0;
4370 }
4371
xfrm_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,struct xfrm_migrate * m,int num_migrate,struct xfrm_kmaddress * k,struct net * net,struct xfrm_encap_tmpl * encap)4372 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4373 struct xfrm_migrate *m, int num_migrate,
4374 struct xfrm_kmaddress *k, struct net *net,
4375 struct xfrm_encap_tmpl *encap)
4376 {
4377 int i, err, nx_cur = 0, nx_new = 0;
4378 struct xfrm_policy *pol = NULL;
4379 struct xfrm_state *x, *xc;
4380 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4381 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4382 struct xfrm_migrate *mp;
4383
4384 /* Stage 0 - sanity checks */
4385 if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4386 goto out;
4387
4388 if (dir >= XFRM_POLICY_MAX) {
4389 err = -EINVAL;
4390 goto out;
4391 }
4392
4393 /* Stage 1 - find policy */
4394 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4395 err = -ENOENT;
4396 goto out;
4397 }
4398
4399 /* Stage 2 - find and update state(s) */
4400 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4401 if ((x = xfrm_migrate_state_find(mp, net))) {
4402 x_cur[nx_cur] = x;
4403 nx_cur++;
4404 xc = xfrm_state_migrate(x, mp, encap);
4405 if (xc) {
4406 x_new[nx_new] = xc;
4407 nx_new++;
4408 } else {
4409 err = -ENODATA;
4410 goto restore_state;
4411 }
4412 }
4413 }
4414
4415 /* Stage 3 - update policy */
4416 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4417 goto restore_state;
4418
4419 /* Stage 4 - delete old state(s) */
4420 if (nx_cur) {
4421 xfrm_states_put(x_cur, nx_cur);
4422 xfrm_states_delete(x_cur, nx_cur);
4423 }
4424
4425 /* Stage 5 - announce */
4426 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4427
4428 xfrm_pol_put(pol);
4429
4430 return 0;
4431 out:
4432 return err;
4433
4434 restore_state:
4435 if (pol)
4436 xfrm_pol_put(pol);
4437 if (nx_cur)
4438 xfrm_states_put(x_cur, nx_cur);
4439 if (nx_new)
4440 xfrm_states_delete(x_new, nx_new);
4441
4442 return err;
4443 }
4444 EXPORT_SYMBOL(xfrm_migrate);
4445 #endif
4446