1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
8
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
12
13
ionic_txq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)14 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
15 ionic_desc_cb cb_func, void *cb_arg)
16 {
17 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
18 }
19
ionic_rxq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)20 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
21 ionic_desc_cb cb_func, void *cb_arg)
22 {
23 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
24 }
25
q_to_ndq(struct ionic_queue * q)26 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
27 {
28 return netdev_get_tx_queue(q->lif->netdev, q->index);
29 }
30
ionic_rx_page_alloc(struct ionic_queue * q,struct ionic_buf_info * buf_info)31 static int ionic_rx_page_alloc(struct ionic_queue *q,
32 struct ionic_buf_info *buf_info)
33 {
34 struct net_device *netdev = q->lif->netdev;
35 struct ionic_rx_stats *stats;
36 struct device *dev;
37 struct page *page;
38
39 dev = q->dev;
40 stats = q_to_rx_stats(q);
41
42 if (unlikely(!buf_info)) {
43 net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
44 netdev->name, q->name);
45 return -EINVAL;
46 }
47
48 page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
49 if (unlikely(!page)) {
50 net_err_ratelimited("%s: %s page alloc failed\n",
51 netdev->name, q->name);
52 stats->alloc_err++;
53 return -ENOMEM;
54 }
55
56 buf_info->dma_addr = dma_map_page(dev, page, 0,
57 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
58 if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
59 __free_pages(page, 0);
60 net_err_ratelimited("%s: %s dma map failed\n",
61 netdev->name, q->name);
62 stats->dma_map_err++;
63 return -EIO;
64 }
65
66 buf_info->page = page;
67 buf_info->page_offset = 0;
68
69 return 0;
70 }
71
ionic_rx_page_free(struct ionic_queue * q,struct ionic_buf_info * buf_info)72 static void ionic_rx_page_free(struct ionic_queue *q,
73 struct ionic_buf_info *buf_info)
74 {
75 struct net_device *netdev = q->lif->netdev;
76 struct device *dev = q->dev;
77
78 if (unlikely(!buf_info)) {
79 net_err_ratelimited("%s: %s invalid buf_info in free\n",
80 netdev->name, q->name);
81 return;
82 }
83
84 if (!buf_info->page)
85 return;
86
87 dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
88 __free_pages(buf_info->page, 0);
89 buf_info->page = NULL;
90 }
91
ionic_rx_buf_recycle(struct ionic_queue * q,struct ionic_buf_info * buf_info,u32 used)92 static bool ionic_rx_buf_recycle(struct ionic_queue *q,
93 struct ionic_buf_info *buf_info, u32 used)
94 {
95 u32 size;
96
97 /* don't re-use pages allocated in low-mem condition */
98 if (page_is_pfmemalloc(buf_info->page))
99 return false;
100
101 /* don't re-use buffers from non-local numa nodes */
102 if (page_to_nid(buf_info->page) != numa_mem_id())
103 return false;
104
105 size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
106 buf_info->page_offset += size;
107 if (buf_info->page_offset >= IONIC_PAGE_SIZE)
108 return false;
109
110 get_page(buf_info->page);
111
112 return true;
113 }
114
ionic_rx_frags(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)115 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
116 struct ionic_desc_info *desc_info,
117 struct ionic_rxq_comp *comp)
118 {
119 struct net_device *netdev = q->lif->netdev;
120 struct ionic_buf_info *buf_info;
121 struct ionic_rx_stats *stats;
122 struct device *dev = q->dev;
123 struct sk_buff *skb;
124 unsigned int i;
125 u16 frag_len;
126 u16 len;
127
128 stats = q_to_rx_stats(q);
129
130 buf_info = &desc_info->bufs[0];
131 len = le16_to_cpu(comp->len);
132
133 prefetchw(buf_info->page);
134
135 skb = napi_get_frags(&q_to_qcq(q)->napi);
136 if (unlikely(!skb)) {
137 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
138 netdev->name, q->name);
139 stats->alloc_err++;
140 return NULL;
141 }
142
143 i = comp->num_sg_elems + 1;
144 do {
145 if (unlikely(!buf_info->page)) {
146 dev_kfree_skb(skb);
147 return NULL;
148 }
149
150 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
151 len -= frag_len;
152
153 dma_sync_single_for_cpu(dev,
154 buf_info->dma_addr + buf_info->page_offset,
155 frag_len, DMA_FROM_DEVICE);
156
157 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
158 buf_info->page, buf_info->page_offset, frag_len,
159 IONIC_PAGE_SIZE);
160
161 if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
162 dma_unmap_page(dev, buf_info->dma_addr,
163 IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
164 buf_info->page = NULL;
165 }
166
167 buf_info++;
168
169 i--;
170 } while (i > 0);
171
172 return skb;
173 }
174
ionic_rx_copybreak(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_rxq_comp * comp)175 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
176 struct ionic_desc_info *desc_info,
177 struct ionic_rxq_comp *comp)
178 {
179 struct net_device *netdev = q->lif->netdev;
180 struct ionic_buf_info *buf_info;
181 struct ionic_rx_stats *stats;
182 struct device *dev = q->dev;
183 struct sk_buff *skb;
184 u16 len;
185
186 stats = q_to_rx_stats(q);
187
188 buf_info = &desc_info->bufs[0];
189 len = le16_to_cpu(comp->len);
190
191 skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
192 if (unlikely(!skb)) {
193 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
194 netdev->name, q->name);
195 stats->alloc_err++;
196 return NULL;
197 }
198
199 if (unlikely(!buf_info->page)) {
200 dev_kfree_skb(skb);
201 return NULL;
202 }
203
204 dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
205 len, DMA_FROM_DEVICE);
206 skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
207 dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
208 len, DMA_FROM_DEVICE);
209
210 skb_put(skb, len);
211 skb->protocol = eth_type_trans(skb, q->lif->netdev);
212
213 return skb;
214 }
215
ionic_rx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)216 static void ionic_rx_clean(struct ionic_queue *q,
217 struct ionic_desc_info *desc_info,
218 struct ionic_cq_info *cq_info,
219 void *cb_arg)
220 {
221 struct net_device *netdev = q->lif->netdev;
222 struct ionic_qcq *qcq = q_to_qcq(q);
223 struct ionic_rx_stats *stats;
224 struct ionic_rxq_comp *comp;
225 struct sk_buff *skb;
226
227 comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
228
229 stats = q_to_rx_stats(q);
230
231 if (comp->status) {
232 stats->dropped++;
233 return;
234 }
235
236 stats->pkts++;
237 stats->bytes += le16_to_cpu(comp->len);
238
239 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
240 skb = ionic_rx_copybreak(q, desc_info, comp);
241 else
242 skb = ionic_rx_frags(q, desc_info, comp);
243
244 if (unlikely(!skb)) {
245 stats->dropped++;
246 return;
247 }
248
249 skb_record_rx_queue(skb, q->index);
250
251 if (likely(netdev->features & NETIF_F_RXHASH)) {
252 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
253 case IONIC_PKT_TYPE_IPV4:
254 case IONIC_PKT_TYPE_IPV6:
255 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
256 PKT_HASH_TYPE_L3);
257 break;
258 case IONIC_PKT_TYPE_IPV4_TCP:
259 case IONIC_PKT_TYPE_IPV6_TCP:
260 case IONIC_PKT_TYPE_IPV4_UDP:
261 case IONIC_PKT_TYPE_IPV6_UDP:
262 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
263 PKT_HASH_TYPE_L4);
264 break;
265 }
266 }
267
268 if (likely(netdev->features & NETIF_F_RXCSUM) &&
269 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
270 skb->ip_summed = CHECKSUM_COMPLETE;
271 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
272 stats->csum_complete++;
273 } else {
274 stats->csum_none++;
275 }
276
277 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
278 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
279 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
280 stats->csum_error++;
281
282 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
283 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
284 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
285 le16_to_cpu(comp->vlan_tci));
286 stats->vlan_stripped++;
287 }
288
289 if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
290 __le64 *cq_desc_hwstamp;
291 u64 hwstamp;
292
293 cq_desc_hwstamp =
294 cq_info->cq_desc +
295 qcq->cq.desc_size -
296 sizeof(struct ionic_rxq_comp) -
297 IONIC_HWSTAMP_CQ_NEGOFFSET;
298
299 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
300
301 if (hwstamp != IONIC_HWSTAMP_INVALID) {
302 skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
303 stats->hwstamp_valid++;
304 } else {
305 stats->hwstamp_invalid++;
306 }
307 }
308
309 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
310 napi_gro_receive(&qcq->napi, skb);
311 else
312 napi_gro_frags(&qcq->napi);
313 }
314
ionic_rx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)315 bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
316 {
317 struct ionic_queue *q = cq->bound_q;
318 struct ionic_desc_info *desc_info;
319 struct ionic_rxq_comp *comp;
320
321 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
322
323 if (!color_match(comp->pkt_type_color, cq->done_color))
324 return false;
325
326 /* check for empty queue */
327 if (q->tail_idx == q->head_idx)
328 return false;
329
330 if (q->tail_idx != le16_to_cpu(comp->comp_index))
331 return false;
332
333 desc_info = &q->info[q->tail_idx];
334 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
335
336 /* clean the related q entry, only one per qc completion */
337 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
338
339 desc_info->cb = NULL;
340 desc_info->cb_arg = NULL;
341
342 return true;
343 }
344
ionic_rx_fill(struct ionic_queue * q)345 void ionic_rx_fill(struct ionic_queue *q)
346 {
347 struct net_device *netdev = q->lif->netdev;
348 struct ionic_desc_info *desc_info;
349 struct ionic_rxq_sg_desc *sg_desc;
350 struct ionic_rxq_sg_elem *sg_elem;
351 struct ionic_buf_info *buf_info;
352 struct ionic_rxq_desc *desc;
353 unsigned int remain_len;
354 unsigned int frag_len;
355 unsigned int nfrags;
356 unsigned int i, j;
357 unsigned int len;
358
359 len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
360
361 for (i = ionic_q_space_avail(q); i; i--) {
362 nfrags = 0;
363 remain_len = len;
364 desc_info = &q->info[q->head_idx];
365 desc = desc_info->desc;
366 buf_info = &desc_info->bufs[0];
367
368 if (!buf_info->page) { /* alloc a new buffer? */
369 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
370 desc->addr = 0;
371 desc->len = 0;
372 return;
373 }
374 }
375
376 /* fill main descriptor - buf[0] */
377 desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
378 frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
379 desc->len = cpu_to_le16(frag_len);
380 remain_len -= frag_len;
381 buf_info++;
382 nfrags++;
383
384 /* fill sg descriptors - buf[1..n] */
385 sg_desc = desc_info->sg_desc;
386 for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
387 sg_elem = &sg_desc->elems[j];
388 if (!buf_info->page) { /* alloc a new sg buffer? */
389 if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
390 sg_elem->addr = 0;
391 sg_elem->len = 0;
392 return;
393 }
394 }
395
396 sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
397 frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
398 sg_elem->len = cpu_to_le16(frag_len);
399 remain_len -= frag_len;
400 buf_info++;
401 nfrags++;
402 }
403
404 /* clear end sg element as a sentinel */
405 if (j < q->max_sg_elems) {
406 sg_elem = &sg_desc->elems[j];
407 memset(sg_elem, 0, sizeof(*sg_elem));
408 }
409
410 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
411 IONIC_RXQ_DESC_OPCODE_SIMPLE;
412 desc_info->nbufs = nfrags;
413
414 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
415 }
416
417 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
418 q->dbval | q->head_idx);
419 }
420
ionic_rx_empty(struct ionic_queue * q)421 void ionic_rx_empty(struct ionic_queue *q)
422 {
423 struct ionic_desc_info *desc_info;
424 struct ionic_buf_info *buf_info;
425 unsigned int i, j;
426
427 for (i = 0; i < q->num_descs; i++) {
428 desc_info = &q->info[i];
429 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
430 buf_info = &desc_info->bufs[j];
431 if (buf_info->page)
432 ionic_rx_page_free(q, buf_info);
433 }
434
435 desc_info->nbufs = 0;
436 desc_info->cb = NULL;
437 desc_info->cb_arg = NULL;
438 }
439
440 q->head_idx = 0;
441 q->tail_idx = 0;
442 }
443
ionic_dim_update(struct ionic_qcq * qcq,int napi_mode)444 static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
445 {
446 struct dim_sample dim_sample;
447 struct ionic_lif *lif;
448 unsigned int qi;
449 u64 pkts, bytes;
450
451 if (!qcq->intr.dim_coal_hw)
452 return;
453
454 lif = qcq->q.lif;
455 qi = qcq->cq.bound_q->index;
456
457 switch (napi_mode) {
458 case IONIC_LIF_F_TX_DIM_INTR:
459 pkts = lif->txqstats[qi].pkts;
460 bytes = lif->txqstats[qi].bytes;
461 break;
462 case IONIC_LIF_F_RX_DIM_INTR:
463 pkts = lif->rxqstats[qi].pkts;
464 bytes = lif->rxqstats[qi].bytes;
465 break;
466 default:
467 pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
468 bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
469 break;
470 }
471
472 dim_update_sample(qcq->cq.bound_intr->rearm_count,
473 pkts, bytes, &dim_sample);
474
475 net_dim(&qcq->dim, dim_sample);
476 }
477
ionic_tx_napi(struct napi_struct * napi,int budget)478 int ionic_tx_napi(struct napi_struct *napi, int budget)
479 {
480 struct ionic_qcq *qcq = napi_to_qcq(napi);
481 struct ionic_cq *cq = napi_to_cq(napi);
482 struct ionic_dev *idev;
483 struct ionic_lif *lif;
484 u32 work_done = 0;
485 u32 flags = 0;
486
487 lif = cq->bound_q->lif;
488 idev = &lif->ionic->idev;
489
490 work_done = ionic_cq_service(cq, budget,
491 ionic_tx_service, NULL, NULL);
492
493 if (work_done < budget && napi_complete_done(napi, work_done)) {
494 ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
495 flags |= IONIC_INTR_CRED_UNMASK;
496 cq->bound_intr->rearm_count++;
497 }
498
499 if (work_done || flags) {
500 flags |= IONIC_INTR_CRED_RESET_COALESCE;
501 ionic_intr_credits(idev->intr_ctrl,
502 cq->bound_intr->index,
503 work_done, flags);
504 }
505
506 return work_done;
507 }
508
ionic_rx_napi(struct napi_struct * napi,int budget)509 int ionic_rx_napi(struct napi_struct *napi, int budget)
510 {
511 struct ionic_qcq *qcq = napi_to_qcq(napi);
512 struct ionic_cq *cq = napi_to_cq(napi);
513 struct ionic_dev *idev;
514 struct ionic_lif *lif;
515 u16 rx_fill_threshold;
516 u32 work_done = 0;
517 u32 flags = 0;
518
519 lif = cq->bound_q->lif;
520 idev = &lif->ionic->idev;
521
522 work_done = ionic_cq_service(cq, budget,
523 ionic_rx_service, NULL, NULL);
524
525 rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
526 cq->num_descs / IONIC_RX_FILL_DIV);
527 if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
528 ionic_rx_fill(cq->bound_q);
529
530 if (work_done < budget && napi_complete_done(napi, work_done)) {
531 ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
532 flags |= IONIC_INTR_CRED_UNMASK;
533 cq->bound_intr->rearm_count++;
534 }
535
536 if (work_done || flags) {
537 flags |= IONIC_INTR_CRED_RESET_COALESCE;
538 ionic_intr_credits(idev->intr_ctrl,
539 cq->bound_intr->index,
540 work_done, flags);
541 }
542
543 return work_done;
544 }
545
ionic_txrx_napi(struct napi_struct * napi,int budget)546 int ionic_txrx_napi(struct napi_struct *napi, int budget)
547 {
548 struct ionic_qcq *qcq = napi_to_qcq(napi);
549 struct ionic_cq *rxcq = napi_to_cq(napi);
550 unsigned int qi = rxcq->bound_q->index;
551 struct ionic_dev *idev;
552 struct ionic_lif *lif;
553 struct ionic_cq *txcq;
554 u16 rx_fill_threshold;
555 u32 rx_work_done = 0;
556 u32 tx_work_done = 0;
557 u32 flags = 0;
558
559 lif = rxcq->bound_q->lif;
560 idev = &lif->ionic->idev;
561 txcq = &lif->txqcqs[qi]->cq;
562
563 tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
564 ionic_tx_service, NULL, NULL);
565
566 rx_work_done = ionic_cq_service(rxcq, budget,
567 ionic_rx_service, NULL, NULL);
568
569 rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
570 rxcq->num_descs / IONIC_RX_FILL_DIV);
571 if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
572 ionic_rx_fill(rxcq->bound_q);
573
574 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
575 ionic_dim_update(qcq, 0);
576 flags |= IONIC_INTR_CRED_UNMASK;
577 rxcq->bound_intr->rearm_count++;
578 }
579
580 if (rx_work_done || flags) {
581 flags |= IONIC_INTR_CRED_RESET_COALESCE;
582 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
583 tx_work_done + rx_work_done, flags);
584 }
585
586 return rx_work_done;
587 }
588
ionic_tx_map_single(struct ionic_queue * q,void * data,size_t len)589 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
590 void *data, size_t len)
591 {
592 struct ionic_tx_stats *stats = q_to_tx_stats(q);
593 struct device *dev = q->dev;
594 dma_addr_t dma_addr;
595
596 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
597 if (dma_mapping_error(dev, dma_addr)) {
598 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
599 q->lif->netdev->name, q->name);
600 stats->dma_map_err++;
601 return 0;
602 }
603 return dma_addr;
604 }
605
ionic_tx_map_frag(struct ionic_queue * q,const skb_frag_t * frag,size_t offset,size_t len)606 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
607 const skb_frag_t *frag,
608 size_t offset, size_t len)
609 {
610 struct ionic_tx_stats *stats = q_to_tx_stats(q);
611 struct device *dev = q->dev;
612 dma_addr_t dma_addr;
613
614 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
615 if (dma_mapping_error(dev, dma_addr)) {
616 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
617 q->lif->netdev->name, q->name);
618 stats->dma_map_err++;
619 }
620 return dma_addr;
621 }
622
ionic_tx_map_skb(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)623 static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
624 struct ionic_desc_info *desc_info)
625 {
626 struct ionic_buf_info *buf_info = desc_info->bufs;
627 struct ionic_tx_stats *stats = q_to_tx_stats(q);
628 struct device *dev = q->dev;
629 dma_addr_t dma_addr;
630 unsigned int nfrags;
631 skb_frag_t *frag;
632 int frag_idx;
633
634 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
635 if (dma_mapping_error(dev, dma_addr)) {
636 stats->dma_map_err++;
637 return -EIO;
638 }
639 buf_info->dma_addr = dma_addr;
640 buf_info->len = skb_headlen(skb);
641 buf_info++;
642
643 frag = skb_shinfo(skb)->frags;
644 nfrags = skb_shinfo(skb)->nr_frags;
645 for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
646 dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
647 if (dma_mapping_error(dev, dma_addr)) {
648 stats->dma_map_err++;
649 goto dma_fail;
650 }
651 buf_info->dma_addr = dma_addr;
652 buf_info->len = skb_frag_size(frag);
653 buf_info++;
654 }
655
656 desc_info->nbufs = 1 + nfrags;
657
658 return 0;
659
660 dma_fail:
661 /* unwind the frag mappings and the head mapping */
662 while (frag_idx > 0) {
663 frag_idx--;
664 buf_info--;
665 dma_unmap_page(dev, buf_info->dma_addr,
666 buf_info->len, DMA_TO_DEVICE);
667 }
668 dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
669 return -EIO;
670 }
671
ionic_tx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)672 static void ionic_tx_clean(struct ionic_queue *q,
673 struct ionic_desc_info *desc_info,
674 struct ionic_cq_info *cq_info,
675 void *cb_arg)
676 {
677 struct ionic_buf_info *buf_info = desc_info->bufs;
678 struct ionic_tx_stats *stats = q_to_tx_stats(q);
679 struct ionic_qcq *qcq = q_to_qcq(q);
680 struct sk_buff *skb = cb_arg;
681 struct device *dev = q->dev;
682 unsigned int i;
683 u16 qi;
684
685 if (desc_info->nbufs) {
686 dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
687 buf_info->len, DMA_TO_DEVICE);
688 buf_info++;
689 for (i = 1; i < desc_info->nbufs; i++, buf_info++)
690 dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
691 buf_info->len, DMA_TO_DEVICE);
692 }
693
694 if (!skb)
695 return;
696
697 qi = skb_get_queue_mapping(skb);
698
699 if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
700 if (cq_info) {
701 struct skb_shared_hwtstamps hwts = {};
702 __le64 *cq_desc_hwstamp;
703 u64 hwstamp;
704
705 cq_desc_hwstamp =
706 cq_info->cq_desc +
707 qcq->cq.desc_size -
708 sizeof(struct ionic_txq_comp) -
709 IONIC_HWSTAMP_CQ_NEGOFFSET;
710
711 hwstamp = le64_to_cpu(*cq_desc_hwstamp);
712
713 if (hwstamp != IONIC_HWSTAMP_INVALID) {
714 hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
715
716 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
717 skb_tstamp_tx(skb, &hwts);
718
719 stats->hwstamp_valid++;
720 } else {
721 stats->hwstamp_invalid++;
722 }
723 }
724
725 } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
726 netif_wake_subqueue(q->lif->netdev, qi);
727 }
728
729 desc_info->bytes = skb->len;
730 stats->clean++;
731
732 dev_consume_skb_any(skb);
733 }
734
ionic_tx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)735 bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
736 {
737 struct ionic_queue *q = cq->bound_q;
738 struct ionic_desc_info *desc_info;
739 struct ionic_txq_comp *comp;
740 int bytes = 0;
741 int pkts = 0;
742 u16 index;
743
744 comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
745
746 if (!color_match(comp->color, cq->done_color))
747 return false;
748
749 /* clean the related q entries, there could be
750 * several q entries completed for each cq completion
751 */
752 do {
753 desc_info = &q->info[q->tail_idx];
754 desc_info->bytes = 0;
755 index = q->tail_idx;
756 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
757 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
758 if (desc_info->cb_arg) {
759 pkts++;
760 bytes += desc_info->bytes;
761 }
762 desc_info->cb = NULL;
763 desc_info->cb_arg = NULL;
764 } while (index != le16_to_cpu(comp->comp_index));
765
766 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
767 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
768
769 return true;
770 }
771
ionic_tx_flush(struct ionic_cq * cq)772 void ionic_tx_flush(struct ionic_cq *cq)
773 {
774 struct ionic_dev *idev = &cq->lif->ionic->idev;
775 u32 work_done;
776
777 work_done = ionic_cq_service(cq, cq->num_descs,
778 ionic_tx_service, NULL, NULL);
779 if (work_done)
780 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
781 work_done, IONIC_INTR_CRED_RESET_COALESCE);
782 }
783
ionic_tx_empty(struct ionic_queue * q)784 void ionic_tx_empty(struct ionic_queue *q)
785 {
786 struct ionic_desc_info *desc_info;
787 int bytes = 0;
788 int pkts = 0;
789
790 /* walk the not completed tx entries, if any */
791 while (q->head_idx != q->tail_idx) {
792 desc_info = &q->info[q->tail_idx];
793 desc_info->bytes = 0;
794 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
795 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
796 if (desc_info->cb_arg) {
797 pkts++;
798 bytes += desc_info->bytes;
799 }
800 desc_info->cb = NULL;
801 desc_info->cb_arg = NULL;
802 }
803
804 if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
805 netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
806 }
807
ionic_tx_tcp_inner_pseudo_csum(struct sk_buff * skb)808 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
809 {
810 int err;
811
812 err = skb_cow_head(skb, 0);
813 if (err)
814 return err;
815
816 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
817 inner_ip_hdr(skb)->check = 0;
818 inner_tcp_hdr(skb)->check =
819 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
820 inner_ip_hdr(skb)->daddr,
821 0, IPPROTO_TCP, 0);
822 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
823 inner_tcp_hdr(skb)->check =
824 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
825 &inner_ipv6_hdr(skb)->daddr,
826 0, IPPROTO_TCP, 0);
827 }
828
829 return 0;
830 }
831
ionic_tx_tcp_pseudo_csum(struct sk_buff * skb)832 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
833 {
834 int err;
835
836 err = skb_cow_head(skb, 0);
837 if (err)
838 return err;
839
840 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
841 ip_hdr(skb)->check = 0;
842 tcp_hdr(skb)->check =
843 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
844 ip_hdr(skb)->daddr,
845 0, IPPROTO_TCP, 0);
846 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
847 tcp_v6_gso_csum_prep(skb);
848 }
849
850 return 0;
851 }
852
ionic_tx_tso_post(struct ionic_queue * q,struct ionic_txq_desc * desc,struct sk_buff * skb,dma_addr_t addr,u8 nsge,u16 len,unsigned int hdrlen,unsigned int mss,bool outer_csum,u16 vlan_tci,bool has_vlan,bool start,bool done)853 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
854 struct sk_buff *skb,
855 dma_addr_t addr, u8 nsge, u16 len,
856 unsigned int hdrlen, unsigned int mss,
857 bool outer_csum,
858 u16 vlan_tci, bool has_vlan,
859 bool start, bool done)
860 {
861 u8 flags = 0;
862 u64 cmd;
863
864 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
865 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
866 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
867 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
868
869 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
870 desc->cmd = cpu_to_le64(cmd);
871 desc->len = cpu_to_le16(len);
872 desc->vlan_tci = cpu_to_le16(vlan_tci);
873 desc->hdr_len = cpu_to_le16(hdrlen);
874 desc->mss = cpu_to_le16(mss);
875
876 if (start) {
877 skb_tx_timestamp(skb);
878 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
879 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
880 ionic_txq_post(q, false, ionic_tx_clean, skb);
881 } else {
882 ionic_txq_post(q, done, NULL, NULL);
883 }
884 }
885
ionic_tx_tso(struct ionic_queue * q,struct sk_buff * skb)886 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
887 {
888 struct ionic_tx_stats *stats = q_to_tx_stats(q);
889 struct ionic_desc_info *desc_info;
890 struct ionic_buf_info *buf_info;
891 struct ionic_txq_sg_elem *elem;
892 struct ionic_txq_desc *desc;
893 unsigned int chunk_len;
894 unsigned int frag_rem;
895 unsigned int tso_rem;
896 unsigned int seg_rem;
897 dma_addr_t desc_addr;
898 dma_addr_t frag_addr;
899 unsigned int hdrlen;
900 unsigned int len;
901 unsigned int mss;
902 bool start, done;
903 bool outer_csum;
904 bool has_vlan;
905 u16 desc_len;
906 u8 desc_nsge;
907 u16 vlan_tci;
908 bool encap;
909 int err;
910
911 desc_info = &q->info[q->head_idx];
912 buf_info = desc_info->bufs;
913
914 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
915 return -EIO;
916
917 len = skb->len;
918 mss = skb_shinfo(skb)->gso_size;
919 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
920 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
921 has_vlan = !!skb_vlan_tag_present(skb);
922 vlan_tci = skb_vlan_tag_get(skb);
923 encap = skb->encapsulation;
924
925 /* Preload inner-most TCP csum field with IP pseudo hdr
926 * calculated with IP length set to zero. HW will later
927 * add in length to each TCP segment resulting from the TSO.
928 */
929
930 if (encap)
931 err = ionic_tx_tcp_inner_pseudo_csum(skb);
932 else
933 err = ionic_tx_tcp_pseudo_csum(skb);
934 if (err)
935 return err;
936
937 if (encap)
938 hdrlen = skb_inner_transport_header(skb) - skb->data +
939 inner_tcp_hdrlen(skb);
940 else
941 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
942
943 tso_rem = len;
944 seg_rem = min(tso_rem, hdrlen + mss);
945
946 frag_addr = 0;
947 frag_rem = 0;
948
949 start = true;
950
951 while (tso_rem > 0) {
952 desc = NULL;
953 elem = NULL;
954 desc_addr = 0;
955 desc_len = 0;
956 desc_nsge = 0;
957 /* use fragments until we have enough to post a single descriptor */
958 while (seg_rem > 0) {
959 /* if the fragment is exhausted then move to the next one */
960 if (frag_rem == 0) {
961 /* grab the next fragment */
962 frag_addr = buf_info->dma_addr;
963 frag_rem = buf_info->len;
964 buf_info++;
965 }
966 chunk_len = min(frag_rem, seg_rem);
967 if (!desc) {
968 /* fill main descriptor */
969 desc = desc_info->txq_desc;
970 elem = desc_info->txq_sg_desc->elems;
971 desc_addr = frag_addr;
972 desc_len = chunk_len;
973 } else {
974 /* fill sg descriptor */
975 elem->addr = cpu_to_le64(frag_addr);
976 elem->len = cpu_to_le16(chunk_len);
977 elem++;
978 desc_nsge++;
979 }
980 frag_addr += chunk_len;
981 frag_rem -= chunk_len;
982 tso_rem -= chunk_len;
983 seg_rem -= chunk_len;
984 }
985 seg_rem = min(tso_rem, mss);
986 done = (tso_rem == 0);
987 /* post descriptor */
988 ionic_tx_tso_post(q, desc, skb,
989 desc_addr, desc_nsge, desc_len,
990 hdrlen, mss, outer_csum, vlan_tci, has_vlan,
991 start, done);
992 start = false;
993 /* Buffer information is stored with the first tso descriptor */
994 desc_info = &q->info[q->head_idx];
995 desc_info->nbufs = 0;
996 }
997
998 stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
999 stats->bytes += len;
1000 stats->tso++;
1001 stats->tso_bytes = len;
1002
1003 return 0;
1004 }
1005
ionic_tx_calc_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1006 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1007 struct ionic_desc_info *desc_info)
1008 {
1009 struct ionic_txq_desc *desc = desc_info->txq_desc;
1010 struct ionic_buf_info *buf_info = desc_info->bufs;
1011 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1012 bool has_vlan;
1013 u8 flags = 0;
1014 bool encap;
1015 u64 cmd;
1016
1017 has_vlan = !!skb_vlan_tag_present(skb);
1018 encap = skb->encapsulation;
1019
1020 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1021 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1022
1023 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1024 flags, skb_shinfo(skb)->nr_frags,
1025 buf_info->dma_addr);
1026 desc->cmd = cpu_to_le64(cmd);
1027 desc->len = cpu_to_le16(buf_info->len);
1028 if (has_vlan) {
1029 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1030 stats->vlan_inserted++;
1031 } else {
1032 desc->vlan_tci = 0;
1033 }
1034 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1035 desc->csum_offset = cpu_to_le16(skb->csum_offset);
1036
1037 if (skb_csum_is_sctp(skb))
1038 stats->crc32_csum++;
1039 else
1040 stats->csum++;
1041
1042 return 0;
1043 }
1044
ionic_tx_calc_no_csum(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1045 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1046 struct ionic_desc_info *desc_info)
1047 {
1048 struct ionic_txq_desc *desc = desc_info->txq_desc;
1049 struct ionic_buf_info *buf_info = desc_info->bufs;
1050 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1051 bool has_vlan;
1052 u8 flags = 0;
1053 bool encap;
1054 u64 cmd;
1055
1056 has_vlan = !!skb_vlan_tag_present(skb);
1057 encap = skb->encapsulation;
1058
1059 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1060 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1061
1062 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1063 flags, skb_shinfo(skb)->nr_frags,
1064 buf_info->dma_addr);
1065 desc->cmd = cpu_to_le64(cmd);
1066 desc->len = cpu_to_le16(buf_info->len);
1067 if (has_vlan) {
1068 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1069 stats->vlan_inserted++;
1070 } else {
1071 desc->vlan_tci = 0;
1072 }
1073 desc->csum_start = 0;
1074 desc->csum_offset = 0;
1075
1076 stats->csum_none++;
1077
1078 return 0;
1079 }
1080
ionic_tx_skb_frags(struct ionic_queue * q,struct sk_buff * skb,struct ionic_desc_info * desc_info)1081 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1082 struct ionic_desc_info *desc_info)
1083 {
1084 struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1085 struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1086 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1087 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1088 unsigned int i;
1089
1090 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1091 elem->addr = cpu_to_le64(buf_info->dma_addr);
1092 elem->len = cpu_to_le16(buf_info->len);
1093 }
1094
1095 stats->frags += skb_shinfo(skb)->nr_frags;
1096
1097 return 0;
1098 }
1099
ionic_tx(struct ionic_queue * q,struct sk_buff * skb)1100 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1101 {
1102 struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1103 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1104 int err;
1105
1106 if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1107 return -EIO;
1108
1109 /* set up the initial descriptor */
1110 if (skb->ip_summed == CHECKSUM_PARTIAL)
1111 err = ionic_tx_calc_csum(q, skb, desc_info);
1112 else
1113 err = ionic_tx_calc_no_csum(q, skb, desc_info);
1114 if (err)
1115 return err;
1116
1117 /* add frags */
1118 err = ionic_tx_skb_frags(q, skb, desc_info);
1119 if (err)
1120 return err;
1121
1122 skb_tx_timestamp(skb);
1123 stats->pkts++;
1124 stats->bytes += skb->len;
1125
1126 if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1127 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1128 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1129
1130 return 0;
1131 }
1132
ionic_tx_descs_needed(struct ionic_queue * q,struct sk_buff * skb)1133 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1134 {
1135 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1136 int ndescs;
1137 int err;
1138
1139 /* Each desc is mss long max, so a descriptor for each gso_seg */
1140 if (skb_is_gso(skb))
1141 ndescs = skb_shinfo(skb)->gso_segs;
1142 else
1143 ndescs = 1;
1144
1145 /* If non-TSO, just need 1 desc and nr_frags sg elems */
1146 if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1147 return ndescs;
1148
1149 /* Too many frags, so linearize */
1150 err = skb_linearize(skb);
1151 if (err)
1152 return err;
1153
1154 stats->linearize++;
1155
1156 return ndescs;
1157 }
1158
ionic_maybe_stop_tx(struct ionic_queue * q,int ndescs)1159 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1160 {
1161 int stopped = 0;
1162
1163 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1164 netif_stop_subqueue(q->lif->netdev, q->index);
1165 stopped = 1;
1166
1167 /* Might race with ionic_tx_clean, check again */
1168 smp_rmb();
1169 if (ionic_q_has_space(q, ndescs)) {
1170 netif_wake_subqueue(q->lif->netdev, q->index);
1171 stopped = 0;
1172 }
1173 }
1174
1175 return stopped;
1176 }
1177
ionic_start_hwstamp_xmit(struct sk_buff * skb,struct net_device * netdev)1178 static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1179 struct net_device *netdev)
1180 {
1181 struct ionic_lif *lif = netdev_priv(netdev);
1182 struct ionic_queue *q = &lif->hwstamp_txq->q;
1183 int err, ndescs;
1184
1185 /* Does not stop/start txq, because we post to a separate tx queue
1186 * for timestamping, and if a packet can't be posted immediately to
1187 * the timestamping queue, it is dropped.
1188 */
1189
1190 ndescs = ionic_tx_descs_needed(q, skb);
1191 if (unlikely(ndescs < 0))
1192 goto err_out_drop;
1193
1194 if (unlikely(!ionic_q_has_space(q, ndescs)))
1195 goto err_out_drop;
1196
1197 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1198 if (skb_is_gso(skb))
1199 err = ionic_tx_tso(q, skb);
1200 else
1201 err = ionic_tx(q, skb);
1202
1203 if (err)
1204 goto err_out_drop;
1205
1206 return NETDEV_TX_OK;
1207
1208 err_out_drop:
1209 q->drop++;
1210 dev_kfree_skb(skb);
1211 return NETDEV_TX_OK;
1212 }
1213
ionic_start_xmit(struct sk_buff * skb,struct net_device * netdev)1214 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1215 {
1216 u16 queue_index = skb_get_queue_mapping(skb);
1217 struct ionic_lif *lif = netdev_priv(netdev);
1218 struct ionic_queue *q;
1219 int ndescs;
1220 int err;
1221
1222 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1223 dev_kfree_skb(skb);
1224 return NETDEV_TX_OK;
1225 }
1226
1227 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1228 if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1229 return ionic_start_hwstamp_xmit(skb, netdev);
1230
1231 if (unlikely(queue_index >= lif->nxqs))
1232 queue_index = 0;
1233 q = &lif->txqcqs[queue_index]->q;
1234
1235 ndescs = ionic_tx_descs_needed(q, skb);
1236 if (ndescs < 0)
1237 goto err_out_drop;
1238
1239 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1240 return NETDEV_TX_BUSY;
1241
1242 if (skb_is_gso(skb))
1243 err = ionic_tx_tso(q, skb);
1244 else
1245 err = ionic_tx(q, skb);
1246
1247 if (err)
1248 goto err_out_drop;
1249
1250 /* Stop the queue if there aren't descriptors for the next packet.
1251 * Since our SG lists per descriptor take care of most of the possible
1252 * fragmentation, we don't need to have many descriptors available.
1253 */
1254 ionic_maybe_stop_tx(q, 4);
1255
1256 return NETDEV_TX_OK;
1257
1258 err_out_drop:
1259 q->drop++;
1260 dev_kfree_skb(skb);
1261 return NETDEV_TX_OK;
1262 }
1263