Lines Matching refs:bq
364 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) in bq_xmit_all() argument
366 struct net_device *dev = bq->dev; in bq_xmit_all()
367 unsigned int cnt = bq->count; in bq_xmit_all()
376 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all()
381 if (bq->xdp_prog) { in bq_xmit_all()
382 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); in bq_xmit_all()
387 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); in bq_xmit_all()
400 xdp_return_frame_rx_napi(bq->q[i]); in bq_xmit_all()
403 bq->count = 0; in bq_xmit_all()
404 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err); in bq_xmit_all()
414 struct xdp_dev_bulk_queue *bq, *tmp; in __dev_flush() local
416 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { in __dev_flush()
417 bq_xmit_all(bq, XDP_XMIT_FLUSH); in __dev_flush()
418 bq->dev_rx = NULL; in __dev_flush()
419 bq->xdp_prog = NULL; in __dev_flush()
420 __list_del_clearprev(&bq->flush_node); in __dev_flush()
449 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); in bq_enqueue() local
451 if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) in bq_enqueue()
452 bq_xmit_all(bq, 0); in bq_enqueue()
461 if (!bq->dev_rx) { in bq_enqueue()
462 bq->dev_rx = dev_rx; in bq_enqueue()
463 bq->xdp_prog = xdp_prog; in bq_enqueue()
464 list_add(&bq->flush_node, flush_list); in bq_enqueue()
467 bq->q[bq->count++] = xdpf; in bq_enqueue()