1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019, Microsoft Corporation.
3 *
4 * Author:
5 * Haiyang Zhang <haiyangz@microsoft.com>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/bpf.h>
14 #include <linux/bpf_trace.h>
15 #include <linux/kernel.h>
16 #include <net/xdp.h>
17
18 #include <linux/mutex.h>
19 #include <linux/rtnetlink.h>
20
21 #include "hyperv_net.h"
22
netvsc_run_xdp(struct net_device * ndev,struct netvsc_channel * nvchan,struct xdp_buff * xdp)23 u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
24 struct xdp_buff *xdp)
25 {
26 void *data = nvchan->rsc.data[0];
27 u32 len = nvchan->rsc.len[0];
28 struct page *page = NULL;
29 struct bpf_prog *prog;
30 u32 act = XDP_PASS;
31
32 xdp->data_hard_start = NULL;
33
34 rcu_read_lock();
35 prog = rcu_dereference(nvchan->bpf_prog);
36
37 if (!prog)
38 goto out;
39
40 /* Ensure that the below memcpy() won't overflow the page buffer. */
41 if (len > ndev->mtu + ETH_HLEN) {
42 act = XDP_DROP;
43 goto out;
44 }
45
46 /* allocate page buffer for data */
47 page = alloc_page(GFP_ATOMIC);
48 if (!page) {
49 act = XDP_DROP;
50 goto out;
51 }
52
53 xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq);
54 xdp_prepare_buff(xdp, page_address(page), NETVSC_XDP_HDRM, len, false);
55
56 memcpy(xdp->data, data, len);
57
58 act = bpf_prog_run_xdp(prog, xdp);
59
60 switch (act) {
61 case XDP_PASS:
62 case XDP_TX:
63 case XDP_DROP:
64 break;
65
66 case XDP_ABORTED:
67 trace_xdp_exception(ndev, prog, act);
68 break;
69
70 default:
71 bpf_warn_invalid_xdp_action(act);
72 }
73
74 out:
75 rcu_read_unlock();
76
77 if (page && act != XDP_PASS && act != XDP_TX) {
78 __free_page(page);
79 xdp->data_hard_start = NULL;
80 }
81
82 return act;
83 }
84
netvsc_xdp_fraglen(unsigned int len)85 unsigned int netvsc_xdp_fraglen(unsigned int len)
86 {
87 return SKB_DATA_ALIGN(len) +
88 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
89 }
90
netvsc_xdp_get(struct netvsc_device * nvdev)91 struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
92 {
93 return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
94 }
95
netvsc_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack,struct netvsc_device * nvdev)96 int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
97 struct netlink_ext_ack *extack,
98 struct netvsc_device *nvdev)
99 {
100 struct bpf_prog *old_prog;
101 int buf_max, i;
102
103 old_prog = netvsc_xdp_get(nvdev);
104
105 if (!old_prog && !prog)
106 return 0;
107
108 buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
109 if (prog && buf_max > PAGE_SIZE) {
110 netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
111 dev->mtu, buf_max);
112 NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
113
114 return -EOPNOTSUPP;
115 }
116
117 if (prog && (dev->features & NETIF_F_LRO)) {
118 netdev_err(dev, "XDP: not support LRO\n");
119 NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
120
121 return -EOPNOTSUPP;
122 }
123
124 if (prog)
125 bpf_prog_add(prog, nvdev->num_chn - 1);
126
127 for (i = 0; i < nvdev->num_chn; i++)
128 rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
129
130 if (old_prog)
131 for (i = 0; i < nvdev->num_chn; i++)
132 bpf_prog_put(old_prog);
133
134 return 0;
135 }
136
netvsc_vf_setxdp(struct net_device * vf_netdev,struct bpf_prog * prog)137 int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
138 {
139 struct netdev_bpf xdp;
140 bpf_op_t ndo_bpf;
141 int ret;
142
143 ASSERT_RTNL();
144
145 if (!vf_netdev)
146 return 0;
147
148 ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
149 if (!ndo_bpf)
150 return 0;
151
152 memset(&xdp, 0, sizeof(xdp));
153
154 if (prog)
155 bpf_prog_inc(prog);
156
157 xdp.command = XDP_SETUP_PROG;
158 xdp.prog = prog;
159
160 ret = ndo_bpf(vf_netdev, &xdp);
161
162 if (ret && prog)
163 bpf_prog_put(prog);
164
165 return ret;
166 }
167
netvsc_bpf(struct net_device * dev,struct netdev_bpf * bpf)168 int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
169 {
170 struct net_device_context *ndevctx = netdev_priv(dev);
171 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
172 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
173 struct netlink_ext_ack *extack = bpf->extack;
174 int ret;
175
176 if (!nvdev || nvdev->destroy) {
177 return -ENODEV;
178 }
179
180 switch (bpf->command) {
181 case XDP_SETUP_PROG:
182 ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
183
184 if (ret)
185 return ret;
186
187 ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
188
189 if (ret) {
190 netdev_err(dev, "vf_setxdp failed:%d\n", ret);
191 NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
192
193 netvsc_xdp_set(dev, NULL, extack, nvdev);
194 }
195
196 return ret;
197
198 default:
199 return -EINVAL;
200 }
201 }
202