Lines Matching refs:queue
42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument
55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots()
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available()
68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
85 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
89 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
91 if (queue->rx_queue_len >= queue->rx_queue_max) { in xenvif_rx_queue_tail()
92 struct net_device *dev = queue->vif->dev; in xenvif_rx_queue_tail()
94 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); in xenvif_rx_queue_tail()
96 queue->vif->dev->stats.rx_dropped++; in xenvif_rx_queue_tail()
98 if (skb_queue_empty(&queue->rx_queue)) in xenvif_rx_queue_tail()
99 xenvif_update_needed_slots(queue, skb); in xenvif_rx_queue_tail()
101 __skb_queue_tail(&queue->rx_queue, skb); in xenvif_rx_queue_tail()
103 queue->rx_queue_len += skb->len; in xenvif_rx_queue_tail()
106 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
109 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) in xenvif_rx_dequeue() argument
113 spin_lock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
115 skb = __skb_dequeue(&queue->rx_queue); in xenvif_rx_dequeue()
117 xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue)); in xenvif_rx_dequeue()
119 queue->rx_queue_len -= skb->len; in xenvif_rx_dequeue()
120 if (queue->rx_queue_len < queue->rx_queue_max) { in xenvif_rx_dequeue()
123 txq = netdev_get_tx_queue(queue->vif->dev, queue->id); in xenvif_rx_dequeue()
128 spin_unlock_irq(&queue->rx_queue.lock); in xenvif_rx_dequeue()
133 static void xenvif_rx_queue_purge(struct xenvif_queue *queue) in xenvif_rx_queue_purge() argument
137 while ((skb = xenvif_rx_dequeue(queue)) != NULL) in xenvif_rx_queue_purge()
141 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) in xenvif_rx_queue_drop_expired() argument
146 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_drop_expired()
151 xenvif_rx_dequeue(queue); in xenvif_rx_queue_drop_expired()
153 queue->vif->dev->stats.rx_dropped++; in xenvif_rx_queue_drop_expired()
157 static void xenvif_rx_copy_flush(struct xenvif_queue *queue) in xenvif_rx_copy_flush() argument
162 gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num); in xenvif_rx_copy_flush()
164 for (i = 0; i < queue->rx_copy.num; i++) { in xenvif_rx_copy_flush()
167 op = &queue->rx_copy.op[i]; in xenvif_rx_copy_flush()
175 rsp = RING_GET_RESPONSE(&queue->rx, in xenvif_rx_copy_flush()
176 queue->rx_copy.idx[i]); in xenvif_rx_copy_flush()
181 queue->rx_copy.num = 0; in xenvif_rx_copy_flush()
184 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); in xenvif_rx_copy_flush()
186 notify_remote_via_irq(queue->rx_irq); in xenvif_rx_copy_flush()
188 __skb_queue_purge(queue->rx_copy.completed); in xenvif_rx_copy_flush()
191 static void xenvif_rx_copy_add(struct xenvif_queue *queue, in xenvif_rx_copy_add() argument
199 if (queue->rx_copy.num == COPY_BATCH_SIZE) in xenvif_rx_copy_add()
200 xenvif_rx_copy_flush(queue); in xenvif_rx_copy_add()
202 op = &queue->rx_copy.op[queue->rx_copy.num]; in xenvif_rx_copy_add()
220 op->dest.domid = queue->vif->domid; in xenvif_rx_copy_add()
224 queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons; in xenvif_rx_copy_add()
225 queue->rx_copy.num++; in xenvif_rx_copy_add()
250 static void xenvif_rx_next_skb(struct xenvif_queue *queue, in xenvif_rx_next_skb() argument
256 skb = xenvif_rx_dequeue(queue); in xenvif_rx_next_skb()
258 queue->stats.tx_bytes += skb->len; in xenvif_rx_next_skb()
259 queue->stats.tx_packets++; in xenvif_rx_next_skb()
270 if ((1 << gso_type) & queue->vif->gso_mask) { in xenvif_rx_next_skb()
285 if (queue->vif->xdp_headroom) { in xenvif_rx_next_skb()
291 extra->u.xdp.headroom = queue->vif->xdp_headroom; in xenvif_rx_next_skb()
326 static void xenvif_rx_complete(struct xenvif_queue *queue, in xenvif_rx_complete() argument
330 queue->rx.rsp_prod_pvt = queue->rx.req_cons; in xenvif_rx_complete()
332 __skb_queue_tail(queue->rx_copy.completed, pkt->skb); in xenvif_rx_complete()
353 static void xenvif_rx_next_chunk(struct xenvif_queue *queue, in xenvif_rx_next_chunk() argument
391 static void xenvif_rx_data_slot(struct xenvif_queue *queue, in xenvif_rx_data_slot() argument
396 unsigned int offset = queue->vif->xdp_headroom; in xenvif_rx_data_slot()
403 xenvif_rx_next_chunk(queue, pkt, offset, &data, &len); in xenvif_rx_data_slot()
404 xenvif_rx_copy_add(queue, req, offset, data, len); in xenvif_rx_data_slot()
435 static void xenvif_rx_extra_slot(struct xenvif_queue *queue, in xenvif_rx_extra_slot() argument
459 static void xenvif_rx_skb(struct xenvif_queue *queue) in xenvif_rx_skb() argument
463 xenvif_rx_next_skb(queue, &pkt); in xenvif_rx_skb()
465 queue->last_rx_time = jiffies; in xenvif_rx_skb()
471 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
472 rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons); in xenvif_rx_skb()
476 xenvif_rx_extra_slot(queue, &pkt, req, rsp); in xenvif_rx_skb()
478 xenvif_rx_data_slot(queue, &pkt, req, rsp); in xenvif_rx_skb()
480 queue->rx.req_cons++; in xenvif_rx_skb()
484 xenvif_rx_complete(queue, &pkt); in xenvif_rx_skb()
489 void xenvif_rx_action(struct xenvif_queue *queue) in xenvif_rx_action() argument
495 queue->rx_copy.completed = &completed_skbs; in xenvif_rx_action()
497 while (xenvif_rx_ring_slots_available(queue) && in xenvif_rx_action()
499 xenvif_rx_skb(queue); in xenvif_rx_action()
504 xenvif_rx_copy_flush(queue); in xenvif_rx_action()
507 static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue) in xenvif_rx_queue_slots() argument
511 prod = queue->rx.sring->req_prod; in xenvif_rx_queue_slots()
512 cons = queue->rx.req_cons; in xenvif_rx_queue_slots()
517 static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue) in xenvif_rx_queue_stalled() argument
519 unsigned int needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_queue_stalled()
521 return !queue->stalled && in xenvif_rx_queue_stalled()
522 xenvif_rx_queue_slots(queue) < needed && in xenvif_rx_queue_stalled()
524 queue->last_rx_time + queue->vif->stall_timeout); in xenvif_rx_queue_stalled()
527 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) in xenvif_rx_queue_ready() argument
529 unsigned int needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_queue_ready()
531 return queue->stalled && xenvif_rx_queue_slots(queue) >= needed; in xenvif_rx_queue_ready()
534 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread) in xenvif_have_rx_work() argument
536 return xenvif_rx_ring_slots_available(queue) || in xenvif_have_rx_work()
537 (queue->vif->stall_timeout && in xenvif_have_rx_work()
538 (xenvif_rx_queue_stalled(queue) || in xenvif_have_rx_work()
539 xenvif_rx_queue_ready(queue))) || in xenvif_have_rx_work()
541 queue->vif->disabled; in xenvif_have_rx_work()
544 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) in xenvif_rx_queue_timeout() argument
549 skb = skb_peek(&queue->rx_queue); in xenvif_rx_queue_timeout()
567 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) in xenvif_wait_for_rx_work() argument
571 if (xenvif_have_rx_work(queue, true)) in xenvif_wait_for_rx_work()
577 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); in xenvif_wait_for_rx_work()
578 if (xenvif_have_rx_work(queue, true)) in xenvif_wait_for_rx_work()
581 &queue->eoi_pending) & in xenvif_wait_for_rx_work()
583 xen_irq_lateeoi(queue->rx_irq, 0); in xenvif_wait_for_rx_work()
585 ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); in xenvif_wait_for_rx_work()
589 finish_wait(&queue->wq, &wait); in xenvif_wait_for_rx_work()
592 static void xenvif_queue_carrier_off(struct xenvif_queue *queue) in xenvif_queue_carrier_off() argument
594 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_off()
596 queue->stalled = true; in xenvif_queue_carrier_off()
607 static void xenvif_queue_carrier_on(struct xenvif_queue *queue) in xenvif_queue_carrier_on() argument
609 struct xenvif *vif = queue->vif; in xenvif_queue_carrier_on()
611 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ in xenvif_queue_carrier_on()
612 queue->stalled = false; in xenvif_queue_carrier_on()
625 struct xenvif_queue *queue = data; in xenvif_kthread_guest_rx() local
626 struct xenvif *vif = queue->vif; in xenvif_kthread_guest_rx()
629 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
632 xenvif_wait_for_rx_work(queue); in xenvif_kthread_guest_rx()
644 if (unlikely(vif->disabled && queue->id == 0)) { in xenvif_kthread_guest_rx()
649 if (!skb_queue_empty(&queue->rx_queue)) in xenvif_kthread_guest_rx()
650 xenvif_rx_action(queue); in xenvif_kthread_guest_rx()
657 if (xenvif_rx_queue_stalled(queue)) in xenvif_kthread_guest_rx()
658 xenvif_queue_carrier_off(queue); in xenvif_kthread_guest_rx()
659 else if (xenvif_rx_queue_ready(queue)) in xenvif_kthread_guest_rx()
660 xenvif_queue_carrier_on(queue); in xenvif_kthread_guest_rx()
668 xenvif_rx_queue_drop_expired(queue); in xenvif_kthread_guest_rx()
674 xenvif_rx_queue_purge(queue); in xenvif_kthread_guest_rx()