Lines Matching refs:skb

106 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,  in skb_panic()  argument
110 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
111 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
112 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
116 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
118 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
121 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
123 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
173 struct sk_buff *skb; in napi_skb_cache_get() local
183 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
184 kasan_unpoison_object_data(skbuff_head_cache, skb); in napi_skb_cache_get()
186 return skb; in napi_skb_cache_get()
190 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
199 skb->truesize = SKB_TRUESIZE(size); in __build_skb_around()
200 refcount_set(&skb->users, 1); in __build_skb_around()
201 skb->head = data; in __build_skb_around()
202 skb->data = data; in __build_skb_around()
203 skb_reset_tail_pointer(skb); in __build_skb_around()
204 skb->end = skb->tail + size; in __build_skb_around()
205 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb_around()
206 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb_around()
209 shinfo = skb_shinfo(skb); in __build_skb_around()
213 skb_set_kcov_handle(skb, kcov_common_handle()); in __build_skb_around()
237 struct sk_buff *skb; in __build_skb() local
239 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
240 if (unlikely(!skb)) in __build_skb()
243 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
244 __build_skb_around(skb, data, frag_size); in __build_skb()
246 return skb; in __build_skb()
256 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
258 if (skb && frag_size) { in build_skb()
259 skb->head_frag = 1; in build_skb()
261 skb->pfmemalloc = 1; in build_skb()
263 return skb; in build_skb()
273 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
276 if (unlikely(!skb)) in build_skb_around()
279 __build_skb_around(skb, data, frag_size); in build_skb_around()
282 skb->head_frag = 1; in build_skb_around()
284 skb->pfmemalloc = 1; in build_skb_around()
286 return skb; in build_skb_around()
302 struct sk_buff *skb; in __napi_build_skb() local
304 skb = napi_skb_cache_get(); in __napi_build_skb()
305 if (unlikely(!skb)) in __napi_build_skb()
308 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
309 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
311 return skb; in __napi_build_skb()
326 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
328 if (likely(skb) && frag_size) { in napi_build_skb()
329 skb->head_frag = 1; in napi_build_skb()
330 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
333 return skb; in napi_build_skb()
398 struct sk_buff *skb; in __alloc_skb() local
412 skb = napi_skb_cache_get(); in __alloc_skb()
414 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
415 if (unlikely(!skb)) in __alloc_skb()
417 prefetchw(skb); in __alloc_skb()
442 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
443 __build_skb_around(skb, data, osize); in __alloc_skb()
444 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
449 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
451 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
457 return skb; in __alloc_skb()
460 kmem_cache_free(cache, skb); in __alloc_skb()
482 struct sk_buff *skb; in __netdev_alloc_skb() local
494 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
495 if (!skb) in __netdev_alloc_skb()
521 skb = __build_skb(data, len); in __netdev_alloc_skb()
522 if (unlikely(!skb)) { in __netdev_alloc_skb()
528 skb->pfmemalloc = 1; in __netdev_alloc_skb()
529 skb->head_frag = 1; in __netdev_alloc_skb()
532 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
533 skb->dev = dev; in __netdev_alloc_skb()
536 return skb; in __netdev_alloc_skb()
557 struct sk_buff *skb; in __napi_alloc_skb() local
568 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in __napi_alloc_skb()
570 if (!skb) in __napi_alloc_skb()
586 skb = __napi_build_skb(data, len); in __napi_alloc_skb()
587 if (unlikely(!skb)) { in __napi_alloc_skb()
593 skb->pfmemalloc = 1; in __napi_alloc_skb()
594 skb->head_frag = 1; in __napi_alloc_skb()
597 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in __napi_alloc_skb()
598 skb->dev = napi->dev; in __napi_alloc_skb()
601 return skb; in __napi_alloc_skb()
605 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
608 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
609 skb->len += size; in skb_add_rx_frag()
610 skb->data_len += size; in skb_add_rx_frag()
611 skb->truesize += truesize; in skb_add_rx_frag()
615 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
618 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
621 skb->len += size; in skb_coalesce_rx_frag()
622 skb->data_len += size; in skb_coalesce_rx_frag()
623 skb->truesize += truesize; in skb_coalesce_rx_frag()
633 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
635 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
638 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
642 skb_walk_frags(skb, list) in skb_clone_fraglist()
646 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
648 unsigned char *head = skb->head; in skb_free_head()
650 if (skb->head_frag) { in skb_free_head()
651 if (skb_pp_recycle(skb, head)) in skb_free_head()
659 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
661 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
664 if (skb->cloned && in skb_release_data()
665 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
669 skb_zcopy_clear(skb, true); in skb_release_data()
672 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
677 skb_free_head(skb); in skb_release_data()
688 skb->pp_recycle = 0; in skb_release_data()
694 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
698 switch (skb->fclone) { in kfree_skbmem()
700 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
704 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
715 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
724 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
726 skb_dst_drop(skb); in skb_release_head_state()
727 if (skb->destructor) { in skb_release_head_state()
729 skb->destructor(skb); in skb_release_head_state()
732 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
734 skb_ext_put(skb); in skb_release_head_state()
738 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
740 skb_release_head_state(skb); in skb_release_all()
741 if (likely(skb->head)) in skb_release_all()
742 skb_release_data(skb); in skb_release_all()
754 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
756 skb_release_all(skb); in __kfree_skb()
757 kfree_skbmem(skb); in __kfree_skb()
768 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
770 if (!skb_unref(skb)) in kfree_skb()
773 trace_kfree_skb(skb, __builtin_return_address(0)); in kfree_skb()
774 __kfree_skb(skb); in kfree_skb()
795 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
797 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
798 struct net_device *dev = skb->dev; in skb_dump()
799 struct sock *sk = skb->sk; in skb_dump()
806 len = skb->len; in skb_dump()
808 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
810 headroom = skb_headroom(skb); in skb_dump()
811 tailroom = skb_tailroom(skb); in skb_dump()
813 has_mac = skb_mac_header_was_set(skb); in skb_dump()
814 has_trans = skb_transport_header_was_set(skb); in skb_dump()
821 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
822 has_mac ? skb->mac_header : -1, in skb_dump()
823 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
824 skb->network_header, in skb_dump()
825 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
826 has_trans ? skb->transport_header : -1, in skb_dump()
829 skb->csum, skb->ip_summed, skb->csum_complete_sw, in skb_dump()
830 skb->csum_valid, skb->csum_level, in skb_dump()
831 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
832 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); in skb_dump()
843 16, 1, skb->head, headroom, false); in skb_dump()
845 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
848 16, 1, skb->data, seg_len, false); in skb_dump()
853 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
855 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
856 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
876 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
878 skb_walk_frags(skb, list_skb) in skb_dump()
891 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
893 skb_zcopy_clear(skb, true); in skb_tx_error()
906 void consume_skb(struct sk_buff *skb) in consume_skb() argument
908 if (!skb_unref(skb)) in consume_skb()
911 trace_consume_skb(skb); in consume_skb()
912 __kfree_skb(skb); in consume_skb()
924 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
926 trace_consume_skb(skb); in __consume_stateless_skb()
927 skb_release_data(skb); in __consume_stateless_skb()
928 kfree_skbmem(skb); in __consume_stateless_skb()
931 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
936 kasan_poison_object_data(skbuff_head_cache, skb); in napi_skb_cache_put()
937 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
950 void __kfree_skb_defer(struct sk_buff *skb) in __kfree_skb_defer() argument
952 skb_release_all(skb); in __kfree_skb_defer()
953 napi_skb_cache_put(skb); in __kfree_skb_defer()
956 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
958 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
959 nf_reset_ct(skb); in napi_skb_free_stolen_head()
960 skb_dst_drop(skb); in napi_skb_free_stolen_head()
961 skb_ext_put(skb); in napi_skb_free_stolen_head()
962 skb_orphan(skb); in napi_skb_free_stolen_head()
963 skb->slow_gro = 0; in napi_skb_free_stolen_head()
965 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
968 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
972 dev_consume_skb_any(skb); in napi_consume_skb()
978 if (!skb_unref(skb)) in napi_consume_skb()
982 trace_consume_skb(skb); in napi_consume_skb()
985 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
986 __kfree_skb(skb); in napi_consume_skb()
990 skb_release_all(skb); in napi_consume_skb()
991 napi_skb_cache_put(skb); in napi_consume_skb()
1054 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1056 #define C(x) n->x = skb->x in __skb_clone()
1060 __copy_skb_header(n, skb); in __skb_clone()
1065 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1080 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1081 skb->cloned = 1; in __skb_clone()
1172 struct sk_buff *skb; in msg_zerocopy_alloc() local
1176 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1177 if (!skb) in msg_zerocopy_alloc()
1180 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1181 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1185 kfree_skb(skb); in msg_zerocopy_alloc()
1251 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1253 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1273 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1275 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1295 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1309 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1310 skb = NULL; in __msg_zerocopy_callback()
1317 consume_skb(skb); in __msg_zerocopy_callback()
1321 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_callback() argument
1343 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) in skb_zerocopy_iter_dgram() argument
1345 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1349 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1353 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1355 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1363 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1364 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1365 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1369 skb->sk = sk; in skb_zerocopy_iter_stream()
1370 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1371 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1375 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1376 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1415 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1417 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1422 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1428 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; in skb_copy_ubufs()
1446 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1473 skb_frag_unref(skb, i); in skb_copy_ubufs()
1477 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); in skb_copy_ubufs()
1480 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); in skb_copy_ubufs()
1481 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1484 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
1503 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
1505 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
1510 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
1513 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1518 if (skb_pfmemalloc(skb)) in skb_clone()
1528 return __skb_clone(n, skb); in skb_clone()
1532 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
1535 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1536 skb->csum_start += off; in skb_headers_offset_update()
1538 skb->transport_header += off; in skb_headers_offset_update()
1539 skb->network_header += off; in skb_headers_offset_update()
1540 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
1541 skb->mac_header += off; in skb_headers_offset_update()
1542 skb->inner_transport_header += off; in skb_headers_offset_update()
1543 skb->inner_network_header += off; in skb_headers_offset_update()
1544 skb->inner_mac_header += off; in skb_headers_offset_update()
1558 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
1560 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
1582 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
1584 int headerlen = skb_headroom(skb); in skb_copy()
1585 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
1587 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
1595 skb_put(n, skb->len); in skb_copy()
1597 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
1599 skb_copy_header(n, skb); in skb_copy()
1621 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1624 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1625 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1634 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1636 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1638 n->truesize += skb->data_len; in __pskb_copy_fclone()
1639 n->data_len = skb->data_len; in __pskb_copy_fclone()
1640 n->len = skb->len; in __pskb_copy_fclone()
1642 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1645 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
1646 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
1651 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1652 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1653 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1658 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1659 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1663 skb_copy_header(n, skb); in __pskb_copy_fclone()
1685 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1688 int i, osize = skb_end_offset(skb); in pskb_expand_head()
1695 BUG_ON(skb_shared(skb)); in pskb_expand_head()
1699 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1710 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1713 skb_shinfo(skb), in pskb_expand_head()
1714 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1721 if (skb_cloned(skb)) { in pskb_expand_head()
1722 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1724 if (skb_zcopy(skb)) in pskb_expand_head()
1725 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
1726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1727 skb_frag_ref(skb, i); in pskb_expand_head()
1729 if (skb_has_frag_list(skb)) in pskb_expand_head()
1730 skb_clone_fraglist(skb); in pskb_expand_head()
1732 skb_release_data(skb); in pskb_expand_head()
1734 skb_free_head(skb); in pskb_expand_head()
1736 off = (data + nhead) - skb->head; in pskb_expand_head()
1738 skb->head = data; in pskb_expand_head()
1739 skb->head_frag = 0; in pskb_expand_head()
1740 skb->data += off; in pskb_expand_head()
1742 skb->end = size; in pskb_expand_head()
1745 skb->end = skb->head + size; in pskb_expand_head()
1747 skb->tail += off; in pskb_expand_head()
1748 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1749 skb->cloned = 0; in pskb_expand_head()
1750 skb->hdr_len = 0; in pskb_expand_head()
1751 skb->nohdr = 0; in pskb_expand_head()
1752 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1754 skb_metadata_clear(skb); in pskb_expand_head()
1760 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
1761 skb->truesize += size - osize; in pskb_expand_head()
1774 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1777 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1780 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1782 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1805 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
1807 int delta = headroom - skb_headroom(skb); in skb_expand_head()
1808 int osize = skb_end_offset(skb); in skb_expand_head()
1809 struct sock *sk = skb->sk; in skb_expand_head()
1813 return skb; in skb_expand_head()
1817 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
1818 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
1825 consume_skb(skb); in skb_expand_head()
1826 skb = nskb; in skb_expand_head()
1828 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
1831 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
1832 delta = skb_end_offset(skb) - osize; in skb_expand_head()
1834 skb->truesize += delta; in skb_expand_head()
1836 return skb; in skb_expand_head()
1839 kfree_skb(skb); in skb_expand_head()
1862 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1869 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1870 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1872 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1881 skb_put(n, skb->len); in skb_copy_expand()
1891 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1892 skb->len + head_copy_len)); in skb_copy_expand()
1894 skb_copy_header(n, skb); in skb_copy_expand()
1916 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
1922 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
1923 memset(skb->data+skb->len, 0, pad); in __skb_pad()
1927 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
1928 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
1929 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
1937 err = skb_linearize(skb); in __skb_pad()
1941 memset(skb->data + skb->len, 0, pad); in __skb_pad()
1946 kfree_skb(skb); in __skb_pad()
1964 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
1966 if (tail != skb) { in pskb_put()
1967 skb->data_len += len; in pskb_put()
1968 skb->len += len; in pskb_put()
1983 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
1985 void *tmp = skb_tail_pointer(skb); in skb_put()
1986 SKB_LINEAR_ASSERT(skb); in skb_put()
1987 skb->tail += len; in skb_put()
1988 skb->len += len; in skb_put()
1989 if (unlikely(skb->tail > skb->end)) in skb_put()
1990 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2004 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2006 skb->data -= len; in skb_push()
2007 skb->len += len; in skb_push()
2008 if (unlikely(skb->data < skb->head)) in skb_push()
2009 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2010 return skb->data; in skb_push()
2024 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2026 return skb_pull_inline(skb, len); in skb_pull()
2039 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2041 if (skb->len > len) in skb_trim()
2042 __skb_trim(skb, len); in skb_trim()
2049 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2053 int offset = skb_headlen(skb); in ___pskb_trim()
2054 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2058 if (skb_cloned(skb) && in ___pskb_trim()
2059 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2067 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2074 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2077 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2080 skb_frag_unref(skb, i); in ___pskb_trim()
2082 if (skb_has_frag_list(skb)) in ___pskb_trim()
2083 skb_drop_fraglist(skb); in ___pskb_trim()
2087 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2119 if (len > skb_headlen(skb)) { in ___pskb_trim()
2120 skb->data_len -= skb->len - len; in ___pskb_trim()
2121 skb->len = len; in ___pskb_trim()
2123 skb->len = len; in ___pskb_trim()
2124 skb->data_len = 0; in ___pskb_trim()
2125 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2128 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2129 skb_condense(skb); in ___pskb_trim()
2136 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2138 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2139 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2141 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2142 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2144 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2145 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2146 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2151 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2180 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2186 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2188 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2189 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2194 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2195 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2200 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2205 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2206 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2221 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2255 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2256 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2262 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2270 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2271 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2274 skb_frag_unref(skb, i); in __pskb_pull_tail()
2277 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2279 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2290 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2293 skb->tail += delta; in __pskb_pull_tail()
2294 skb->data_len -= delta; in __pskb_pull_tail()
2296 if (!skb->data_len) in __pskb_pull_tail()
2297 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2299 return skb_tail_pointer(skb); in __pskb_pull_tail()
2318 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2320 int start = skb_headlen(skb); in skb_copy_bits()
2324 if (offset > (int)skb->len - len) in skb_copy_bits()
2331 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2338 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2340 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2369 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
2503 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
2515 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
2516 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
2517 skb_headlen(skb), in __skb_splice_bits()
2519 skb_head_is_locked(skb), in __skb_splice_bits()
2526 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
2527 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
2535 skb_walk_frags(skb, iter) { in __skb_splice_bits()
2555 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
2570 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
2603 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
2607 struct sk_buff *head = skb; in __skb_send_sock()
2614 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
2618 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
2619 kv.iov_base = skb->data + offset; in __skb_send_sock()
2638 offset -= skb_headlen(skb); in __skb_send_sock()
2641 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
2642 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
2650 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
2651 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
2675 if (skb == head) { in __skb_send_sock()
2676 if (skb_has_frag_list(skb)) { in __skb_send_sock()
2677 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
2680 } else if (skb->next) { in __skb_send_sock()
2681 skb = skb->next; in __skb_send_sock()
2694 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
2697 return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, in skb_send_sock_locked()
2703 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
2705 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, in skb_send_sock()
2721 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
2723 int start = skb_headlen(skb); in skb_store_bits()
2727 if (offset > (int)skb->len - len) in skb_store_bits()
2733 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
2740 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
2741 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
2771 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
2799 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
2802 int start = skb_headlen(skb); in __skb_checksum()
2812 skb->data + offset, copy, csum); in __skb_checksum()
2819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
2821 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
2856 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
2883 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2891 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2897 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2900 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2910 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2919 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2924 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2926 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
2955 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
2982 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
2986 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
2989 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
2990 !skb->csum_complete_sw) in __skb_checksum_complete_head()
2991 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
2993 if (!skb_shared(skb)) in __skb_checksum_complete_head()
2994 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3008 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3013 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3015 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3024 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3025 !skb->csum_complete_sw) in __skb_checksum_complete()
3026 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3029 if (!skb_shared(skb)) { in __skb_checksum_complete()
3031 skb->csum = csum; in __skb_checksum_complete()
3032 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3033 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3034 skb->csum_valid = !sum; in __skb_checksum_complete()
3173 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3178 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3179 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3181 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3183 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3185 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3188 if (csstart != skb->len) in skb_copy_and_csum_dev()
3189 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3190 skb->len - csstart); in skb_copy_and_csum_dev()
3192 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3193 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3251 struct sk_buff *skb; in skb_queue_purge() local
3252 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
3253 kfree_skb(skb); in skb_queue_purge()
3273 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3276 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3277 sum += skb->truesize; in skb_rbtree_purge()
3278 kfree_skb(skb); in skb_rbtree_purge()
3335 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
3340 __skb_unlink(skb, list); in skb_unlink()
3365 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
3371 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3374 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3375 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
3377 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
3378 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
3379 skb1->data_len = skb->data_len; in skb_split_inside_header()
3381 skb->data_len = 0; in skb_split_inside_header()
3382 skb->len = len; in skb_split_inside_header()
3383 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
3386 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
3391 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
3393 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
3394 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
3395 skb->len = len; in skb_split_no_header()
3396 skb->data_len = len - pos; in skb_split_no_header()
3399 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
3402 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
3413 skb_frag_ref(skb, i); in skb_split_no_header()
3416 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
3417 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3421 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3433 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
3435 int pos = skb_headlen(skb); in skb_split()
3438 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags; in skb_split()
3439 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
3441 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
3443 skb_split_no_header(skb, skb1, len, pos); in skb_split()
3451 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
3453 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
3474 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
3479 BUG_ON(shiftlen > skb->len); in skb_shift()
3481 if (skb_headlen(skb)) in skb_shift()
3483 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
3489 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3503 if (skb_prepare_for_shift(skb) || in skb_shift()
3508 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3522 if ((shiftlen == skb->len) && in skb_shift()
3523 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
3526 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
3529 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
3533 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3561 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
3565 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
3570 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
3571 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
3572 skb_shinfo(skb)->nr_frags = to; in skb_shift()
3574 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
3581 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
3584 skb->len -= shiftlen; in skb_shift()
3585 skb->data_len -= shiftlen; in skb_shift()
3586 skb->truesize -= shiftlen; in skb_shift()
3604 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
3609 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
3765 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
3776 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
3783 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
3786 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
3788 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
3789 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
3792 skb_fill_page_desc(skb, i, page, offset, size); in skb_append_pagefrags()
3812 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
3814 unsigned char *data = skb->data; in skb_pull_rcsum()
3816 BUG_ON(len > skb->len); in skb_pull_rcsum()
3817 __skb_pull(skb, len); in skb_pull_rcsum()
3818 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
3819 return skb->data; in skb_pull_rcsum()
3836 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
3840 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
3841 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
3848 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
3850 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
3869 skb->next = nskb; in skb_segment_list()
3886 __copy_skb_header(nskb, skb); in skb_segment_list()
3888 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
3889 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
3899 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
3900 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
3901 skb->len = skb->len - delta_len; in skb_segment_list()
3903 skb_gso_reset(skb); in skb_segment_list()
3905 skb->prev = tail; in skb_segment_list()
3907 if (skb_needs_linearize(skb, features) && in skb_segment_list()
3908 __skb_linearize(skb)) in skb_segment_list()
3911 skb_get(skb); in skb_segment_list()
3913 return skb; in skb_segment_list()
3916 kfree_skb_list(skb->next); in skb_segment_list()
3917 skb->next = NULL; in skb_segment_list()
3922 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive_list() argument
3924 if (unlikely(p->len + skb->len >= 65536)) in skb_gro_receive_list()
3928 skb_shinfo(p)->frag_list = skb; in skb_gro_receive_list()
3930 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive_list()
3932 skb_pull(skb, skb_gro_offset(skb)); in skb_gro_receive_list()
3934 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive_list()
3936 p->data_len += skb->len; in skb_gro_receive_list()
3939 skb->destructor = NULL; in skb_gro_receive_list()
3940 p->truesize += skb->truesize; in skb_gro_receive_list()
3941 p->len += skb->len; in skb_gro_receive_list()
3943 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive_list()
4300 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive() argument
4302 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
4303 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
4304 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
4305 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
4310 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) in skb_gro_receive()
4339 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
4340 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
4342 skb->truesize = new_truesize; in skb_gro_receive()
4343 skb->len -= skb->data_len; in skb_gro_receive()
4344 skb->data_len = 0; in skb_gro_receive()
4346 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
4348 } else if (skb->head_frag) { in skb_gro_receive()
4351 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
4358 first_offset = skb->data - in skb_gro_receive()
4372 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
4373 skb->truesize = new_truesize; in skb_gro_receive()
4374 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
4380 skb->destructor = NULL; in skb_gro_receive()
4381 delta_truesize = skb->truesize; in skb_gro_receive()
4387 skb->data_len -= eat; in skb_gro_receive()
4388 skb->len -= eat; in skb_gro_receive()
4392 __skb_pull(skb, offset); in skb_gro_receive()
4395 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
4397 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
4398 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
4399 __skb_header_release(skb); in skb_gro_receive()
4412 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
4492 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
4495 int start = skb_headlen(skb); in __skb_to_sgvec()
4506 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
4513 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
4518 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
4520 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
4536 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
4575 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
4577 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
4607 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
4610 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
4633 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
4643 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
4644 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
4648 if (!skb_has_frag_list(skb)) { in skb_cow_data()
4654 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
4655 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
4659 *trailer = skb; in skb_cow_data()
4666 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
4726 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
4728 struct sock *sk = skb->sk; in sock_rmem_free()
4730 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
4733 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
4738 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
4745 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
4747 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
4751 skb_orphan(skb); in sock_queue_err_skb()
4752 skb->sk = sk; in sock_queue_err_skb()
4753 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
4754 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
4755 skb_set_err_queue(skb); in sock_queue_err_skb()
4758 skb_dst_force(skb); in sock_queue_err_skb()
4760 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
4767 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
4769 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
4770 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
4776 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
4781 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
4782 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
4789 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
4795 return skb; in sock_dequeue_err_skb()
4812 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
4814 struct sock *sk = skb->sk; in skb_clone_sk()
4820 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
4833 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
4841 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
4843 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
4849 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
4851 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
4857 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
4860 kfree_skb(skb); in __skb_complete_tx_timestamp()
4877 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
4880 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
4889 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
4890 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
4896 kfree_skb(skb); in skb_complete_tx_timestamp()
4905 struct sk_buff *skb; in __skb_tstamp_tx() local
4924 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
4929 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
4931 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
4933 if (!skb) in __skb_tstamp_tx()
4937 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
4939 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
4943 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
4945 skb->tstamp = ktime_get_real(); in __skb_tstamp_tx()
4947 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
4959 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
4961 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
4965 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
4966 skb->wifi_acked = acked; in skb_complete_wifi_ack()
4968 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
4977 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
4981 kfree_skb(skb); in skb_complete_wifi_ack()
4997 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5000 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5002 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5004 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5007 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5008 skb->csum_start = csum_start; in skb_partial_csum_set()
5009 skb->csum_offset = off; in skb_partial_csum_set()
5010 skb_set_transport_header(skb, start); in skb_partial_csum_set()
5015 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5018 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5024 if (max > skb->len) in skb_maybe_pull_tail()
5025 max = skb->len; in skb_maybe_pull_tail()
5027 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5030 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5038 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5046 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5048 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5052 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5055 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5057 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5061 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5072 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5081 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5087 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5090 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5097 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5102 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5103 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5104 skb->len - off, in skb_checksum_setup_ipv4()
5105 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5117 #define OPT_HDR(type, skb, off) \ argument
5118 (type *)(skb_network_header(skb) + (off))
5120 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5135 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5139 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5141 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5149 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5156 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5164 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5171 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5179 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5186 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5206 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5211 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5212 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5213 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5225 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5229 switch (skb->protocol) { in skb_checksum_setup()
5231 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5235 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5260 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5264 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5267 if (skb->len < len) in skb_checksum_maybe_trim()
5269 else if (skb->len == len) in skb_checksum_maybe_trim()
5270 return skb; in skb_checksum_maybe_trim()
5272 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5300 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
5302 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
5305 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
5308 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
5325 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
5333 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
5336 skb->dev->name); in __skb_warn_lro_forwarding()
5340 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
5343 skb_release_head_state(skb); in kfree_skb_partial()
5344 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
5346 __kfree_skb(skb); in kfree_skb_partial()
5455 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
5457 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
5458 skb->skb_iif = 0; in skb_scrub_packet()
5459 skb->ignore_df = 0; in skb_scrub_packet()
5460 skb_dst_drop(skb); in skb_scrub_packet()
5461 skb_ext_reset(skb); in skb_scrub_packet()
5462 nf_reset_ct(skb); in skb_scrub_packet()
5463 nf_reset_trace(skb); in skb_scrub_packet()
5466 skb->offload_fwd_mark = 0; in skb_scrub_packet()
5467 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
5473 ipvs_reset(skb); in skb_scrub_packet()
5474 skb->mark = 0; in skb_scrub_packet()
5475 skb->tstamp = 0; in skb_scrub_packet()
5489 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
5491 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
5494 if (skb->encapsulation) { in skb_gso_transport_seglen()
5495 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
5496 skb_transport_header(skb); in skb_gso_transport_seglen()
5499 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
5501 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
5502 } else if (unlikely(skb_is_gso_sctp(skb))) { in skb_gso_transport_seglen()
5524 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) in skb_gso_network_seglen() argument
5526 unsigned int hdr_len = skb_transport_header(skb) - in skb_gso_network_seglen()
5527 skb_network_header(skb); in skb_gso_network_seglen()
5529 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_network_seglen()
5541 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) in skb_gso_mac_seglen() argument
5543 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
5545 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_mac_seglen()
5569 static inline bool skb_gso_size_check(const struct sk_buff *skb, in skb_gso_size_check() argument
5572 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check()
5581 skb_walk_frags(skb, iter) { in skb_gso_size_check()
5599 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) in skb_gso_validate_network_len() argument
5601 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
5614 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) in skb_gso_validate_mac_len() argument
5616 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); in skb_gso_validate_mac_len()
5620 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
5625 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
5626 kfree_skb(skb); in skb_reorder_vlan_header()
5630 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
5632 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
5636 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
5638 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
5642 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
5643 return skb; in skb_reorder_vlan_header()
5646 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
5651 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
5653 return skb; in skb_vlan_untag()
5656 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
5657 if (unlikely(!skb)) in skb_vlan_untag()
5660 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
5663 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
5665 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
5667 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
5668 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
5670 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
5671 if (unlikely(!skb)) in skb_vlan_untag()
5674 skb_reset_network_header(skb); in skb_vlan_untag()
5675 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
5676 skb_reset_transport_header(skb); in skb_vlan_untag()
5677 skb_reset_mac_len(skb); in skb_vlan_untag()
5679 return skb; in skb_vlan_untag()
5682 kfree_skb(skb); in skb_vlan_untag()
5687 int skb_ensure_writable(struct sk_buff *skb, int write_len) in skb_ensure_writable() argument
5689 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
5692 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
5695 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
5702 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
5705 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
5714 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
5718 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
5720 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __skb_vlan_pop()
5723 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __skb_vlan_pop()
5724 __skb_pull(skb, VLAN_HLEN); in __skb_vlan_pop()
5726 vlan_set_encap_proto(skb, vhdr); in __skb_vlan_pop()
5727 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
5729 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
5730 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
5732 skb_reset_mac_len(skb); in __skb_vlan_pop()
5741 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
5747 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
5748 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
5750 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5753 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5758 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5761 vlan_proto = skb->protocol; in skb_vlan_pop()
5762 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5766 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
5774 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
5776 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
5777 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
5786 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
5787 skb_vlan_tag_get(skb)); in skb_vlan_push()
5791 skb->protocol = skb->vlan_proto; in skb_vlan_push()
5792 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
5794 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
5796 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
5813 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
5815 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
5816 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
5819 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
5820 skb_reset_mac_header(skb); in skb_eth_pop()
5821 skb_reset_mac_len(skb); in skb_eth_pop()
5840 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
5846 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
5849 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
5853 skb_push(skb, sizeof(*eth)); in skb_eth_push()
5854 skb_reset_mac_header(skb); in skb_eth_push()
5855 skb_reset_mac_len(skb); in skb_eth_push()
5857 eth = eth_hdr(skb); in skb_eth_push()
5860 eth->h_proto = skb->protocol; in skb_eth_push()
5862 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
5869 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
5872 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
5875 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
5896 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
5906 if (skb->encapsulation) in skb_mpls_push()
5909 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
5913 if (!skb->inner_protocol) { in skb_mpls_push()
5914 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
5915 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
5918 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
5919 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
5921 skb_reset_mac_header(skb); in skb_mpls_push()
5922 skb_set_network_header(skb, mac_len); in skb_mpls_push()
5923 skb_reset_mac_len(skb); in skb_mpls_push()
5925 lse = mpls_hdr(skb); in skb_mpls_push()
5927 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
5930 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
5931 skb->protocol = mpls_proto; in skb_mpls_push()
5949 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
5954 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
5957 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
5961 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
5962 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
5965 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
5966 skb_reset_mac_header(skb); in skb_mpls_pop()
5967 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
5973 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
5974 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
5976 skb->protocol = next_proto; in skb_mpls_pop()
5992 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
5996 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
5999 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6003 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6004 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6006 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6009 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6024 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6029 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6032 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6035 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6043 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6066 struct sk_buff *skb; in alloc_skb_with_frags() local
6078 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6079 if (!skb) in alloc_skb_with_frags()
6082 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
6107 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
6111 return skb; in alloc_skb_with_frags()
6114 kfree_skb(skb); in alloc_skb_with_frags()
6120 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6124 int size = skb_end_offset(skb); in pskb_carve_inside_header()
6130 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6141 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6142 skb->len -= off; in pskb_carve_inside_header()
6145 skb_shinfo(skb), in pskb_carve_inside_header()
6147 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6148 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6150 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6154 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6155 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6156 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6157 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6158 skb_release_data(skb); in pskb_carve_inside_header()
6163 skb_free_head(skb); in pskb_carve_inside_header()
6166 skb->head = data; in pskb_carve_inside_header()
6167 skb->data = data; in pskb_carve_inside_header()
6168 skb->head_frag = 0; in pskb_carve_inside_header()
6170 skb->end = size; in pskb_carve_inside_header()
6172 skb->end = skb->head + size; in pskb_carve_inside_header()
6174 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6175 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6176 skb->cloned = 0; in pskb_carve_inside_header()
6177 skb->hdr_len = 0; in pskb_carve_inside_header()
6178 skb->nohdr = 0; in pskb_carve_inside_header()
6179 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6184 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6189 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
6243 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6247 int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6249 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6254 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6265 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6266 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6272 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6275 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6289 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6295 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6296 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6299 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6301 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6302 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6306 skb_release_data(skb); in pskb_carve_inside_nonlinear()
6308 skb->head = data; in pskb_carve_inside_nonlinear()
6309 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6310 skb->data = data; in pskb_carve_inside_nonlinear()
6312 skb->end = size; in pskb_carve_inside_nonlinear()
6314 skb->end = skb->head + size; in pskb_carve_inside_nonlinear()
6316 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6317 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6318 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6319 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6320 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6321 skb->len -= off; in pskb_carve_inside_nonlinear()
6322 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6323 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6328 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6330 int headlen = skb_headlen(skb); in pskb_carve()
6333 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6335 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6341 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6344 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6370 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6372 if (skb->data_len) { in skb_condense()
6373 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6374 skb_cloned(skb)) in skb_condense()
6378 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6387 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6455 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
6460 skb_ext_put(skb); in __skb_ext_set()
6464 skb->extensions = ext; in __skb_ext_set()
6465 skb->active_extensions = 1 << id; in __skb_ext_set()
6483 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
6488 if (skb->active_extensions) { in skb_ext_add()
6489 old = skb->extensions; in skb_ext_add()
6491 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6511 skb->slow_gro = 1; in skb_ext_add()
6512 skb->extensions = new; in skb_ext_add()
6513 skb->active_extensions |= 1 << id; in skb_ext_add()
6536 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
6538 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6540 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6541 if (skb->active_extensions == 0) { in __skb_ext_del()
6542 skb->extensions = NULL; in __skb_ext_del()