1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11
12 #ifdef CONFIG_XDP_SOCKETS
13
14 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
16 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
17 void xsk_tx_release(struct xsk_buff_pool *pool);
18 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
19 u16 queue_id);
20 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
21 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
22 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
23 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
24 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
25
xsk_pool_get_headroom(struct xsk_buff_pool * pool)26 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
27 {
28 return XDP_PACKET_HEADROOM + pool->headroom;
29 }
30
xsk_pool_get_chunk_size(struct xsk_buff_pool * pool)31 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
32 {
33 return pool->chunk_size;
34 }
35
xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool)36 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
37 {
38 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
39 }
40
xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)41 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
42 struct xdp_rxq_info *rxq)
43 {
44 xp_set_rxq_info(pool, rxq);
45 }
46
xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)47 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
48 unsigned long attrs)
49 {
50 xp_dma_unmap(pool, attrs);
51 }
52
xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs)53 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
54 struct device *dev, unsigned long attrs)
55 {
56 struct xdp_umem *umem = pool->umem;
57
58 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
59 }
60
xsk_buff_xdp_get_dma(struct xdp_buff * xdp)61 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
62 {
63 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
64
65 return xp_get_dma(xskb);
66 }
67
xsk_buff_xdp_get_frame_dma(struct xdp_buff * xdp)68 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
69 {
70 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
71
72 return xp_get_frame_dma(xskb);
73 }
74
xsk_buff_alloc(struct xsk_buff_pool * pool)75 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
76 {
77 return xp_alloc(pool);
78 }
79
80 /* Returns as many entries as possible up to max. 0 <= N <= max. */
xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)81 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
82 {
83 return xp_alloc_batch(pool, xdp, max);
84 }
85
xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count)86 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
87 {
88 return xp_can_alloc(pool, count);
89 }
90
xsk_buff_free(struct xdp_buff * xdp)91 static inline void xsk_buff_free(struct xdp_buff *xdp)
92 {
93 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
94
95 xp_free(xskb);
96 }
97
xsk_buff_set_size(struct xdp_buff * xdp,u32 size)98 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
99 {
100 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
101 xdp->data_meta = xdp->data;
102 xdp->data_end = xdp->data + size;
103 }
104
xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)105 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
106 u64 addr)
107 {
108 return xp_raw_get_dma(pool, addr);
109 }
110
xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr)111 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
112 {
113 return xp_raw_get_data(pool, addr);
114 }
115
xsk_buff_dma_sync_for_cpu(struct xdp_buff * xdp,struct xsk_buff_pool * pool)116 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
117 {
118 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
119
120 if (!pool->dma_need_sync)
121 return;
122
123 xp_dma_sync_for_cpu(xskb);
124 }
125
xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)126 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
127 dma_addr_t dma,
128 size_t size)
129 {
130 xp_dma_sync_for_device(pool, dma, size);
131 }
132
133 #else
134
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)135 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
136 {
137 }
138
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)139 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
140 struct xdp_desc *desc)
141 {
142 return false;
143 }
144
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,struct xdp_desc * desc,u32 max)145 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc,
146 u32 max)
147 {
148 return 0;
149 }
150
xsk_tx_release(struct xsk_buff_pool * pool)151 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
152 {
153 }
154
155 static inline struct xsk_buff_pool *
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)156 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
157 {
158 return NULL;
159 }
160
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)161 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
162 {
163 }
164
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)165 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
166 {
167 }
168
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)169 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
170 {
171 }
172
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)173 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
174 {
175 }
176
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)177 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
178 {
179 return false;
180 }
181
xsk_pool_get_headroom(struct xsk_buff_pool * pool)182 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
183 {
184 return 0;
185 }
186
xsk_pool_get_chunk_size(struct xsk_buff_pool * pool)187 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
188 {
189 return 0;
190 }
191
xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool)192 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
193 {
194 return 0;
195 }
196
xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)197 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
198 struct xdp_rxq_info *rxq)
199 {
200 }
201
xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)202 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
203 unsigned long attrs)
204 {
205 }
206
xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs)207 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
208 struct device *dev, unsigned long attrs)
209 {
210 return 0;
211 }
212
xsk_buff_xdp_get_dma(struct xdp_buff * xdp)213 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
214 {
215 return 0;
216 }
217
xsk_buff_xdp_get_frame_dma(struct xdp_buff * xdp)218 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
219 {
220 return 0;
221 }
222
xsk_buff_alloc(struct xsk_buff_pool * pool)223 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
224 {
225 return NULL;
226 }
227
xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)228 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
229 {
230 return 0;
231 }
232
xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count)233 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
234 {
235 return false;
236 }
237
xsk_buff_free(struct xdp_buff * xdp)238 static inline void xsk_buff_free(struct xdp_buff *xdp)
239 {
240 }
241
xsk_buff_set_size(struct xdp_buff * xdp,u32 size)242 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
243 {
244 }
245
xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)246 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
247 u64 addr)
248 {
249 return 0;
250 }
251
xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr)252 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
253 {
254 return NULL;
255 }
256
xsk_buff_dma_sync_for_cpu(struct xdp_buff * xdp,struct xsk_buff_pool * pool)257 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
258 {
259 }
260
xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)261 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
262 dma_addr_t dma,
263 size_t size)
264 {
265 }
266
267 #endif /* CONFIG_XDP_SOCKETS */
268
269 #endif /* _LINUX_XDP_SOCK_DRV_H */
270