Lines Matching refs:vq
21 dev_err(&(_vq)->vq.vdev->dev, \
22 "%s:"fmt, (_vq)->vq.name, ##args); \
30 (_vq)->vq.name, (_vq)->in_use); \
58 dev_err(&_vq->vq.vdev->dev, \
59 "%s:"fmt, (_vq)->vq.name, ##args); \
62 #define START_USE(vq) argument
63 #define END_USE(vq) argument
64 #define LAST_ADD_TIME_UPDATE(vq) argument
65 #define LAST_ADD_TIME_CHECK(vq) argument
66 #define LAST_ADD_TIME_INVALID(vq) argument
89 struct virtqueue vq; member
186 bool (*notify)(struct virtqueue *vq);
206 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
211 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_use_indirect() local
217 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
324 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) in vring_dma_dev() argument
326 return vq->vq.vdev->dev.parent; in vring_dma_dev()
330 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, in vring_map_one_sg() argument
334 if (!vq->use_dma_api) in vring_map_one_sg()
342 return dma_map_page(vring_dma_dev(vq), in vring_map_one_sg()
347 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, in vring_map_single() argument
351 if (!vq->use_dma_api) in vring_map_single()
354 return dma_map_single(vring_dma_dev(vq), in vring_map_single()
358 static int vring_mapping_error(const struct vring_virtqueue *vq, in vring_mapping_error() argument
361 if (!vq->use_dma_api) in vring_mapping_error()
364 return dma_mapping_error(vring_dma_dev(vq), addr); in vring_mapping_error()
372 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, in vring_unmap_one_split_indirect() argument
377 if (!vq->use_dma_api) in vring_unmap_one_split_indirect()
380 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); in vring_unmap_one_split_indirect()
383 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split_indirect()
384 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split_indirect()
385 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split_indirect()
389 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split_indirect()
390 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split_indirect()
391 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split_indirect()
397 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, in vring_unmap_one_split() argument
400 struct vring_desc_extra *extra = vq->split.desc_extra; in vring_unmap_one_split()
403 if (!vq->use_dma_api) in vring_unmap_one_split()
409 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split()
415 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split()
449 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, in virtqueue_add_desc_split() argument
457 struct vring_virtqueue *vring = to_vvq(vq); in virtqueue_add_desc_split()
461 desc[i].flags = cpu_to_virtio16(vq->vdev, flags); in virtqueue_add_desc_split()
462 desc[i].addr = cpu_to_virtio64(vq->vdev, addr); in virtqueue_add_desc_split()
463 desc[i].len = cpu_to_virtio32(vq->vdev, len); in virtqueue_add_desc_split()
467 desc[i].next = cpu_to_virtio16(vq->vdev, next); in virtqueue_add_desc_split()
473 next = virtio16_to_cpu(vq->vdev, desc[i].next); in virtqueue_add_desc_split()
487 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split() local
494 START_USE(vq); in virtqueue_add_split()
497 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
499 if (unlikely(vq->broken)) { in virtqueue_add_split()
500 END_USE(vq); in virtqueue_add_split()
504 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_split()
508 head = vq->free_head; in virtqueue_add_split()
514 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
525 desc = vq->split.vring.desc; in virtqueue_add_split()
530 if (vq->vq.num_free < descs_used) { in virtqueue_add_split()
532 descs_used, vq->vq.num_free); in virtqueue_add_split()
537 vq->notify(&vq->vq); in virtqueue_add_split()
540 END_USE(vq); in virtqueue_add_split()
546 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); in virtqueue_add_split()
547 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
561 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); in virtqueue_add_split()
562 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
578 if (!indirect && vq->use_dma_api) in virtqueue_add_split()
579 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
585 vq, desc, total_sg * sizeof(struct vring_desc), in virtqueue_add_split()
587 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
590 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
598 vq->vq.num_free -= descs_used; in virtqueue_add_split()
602 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
604 vq->free_head = i; in virtqueue_add_split()
607 vq->split.desc_state[head].data = data; in virtqueue_add_split()
609 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
611 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
615 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
616 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
620 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
621 vq->split.avail_idx_shadow++; in virtqueue_add_split()
622 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
623 vq->split.avail_idx_shadow); in virtqueue_add_split()
624 vq->num_added++; in virtqueue_add_split()
626 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_split()
627 END_USE(vq); in virtqueue_add_split()
631 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
648 vring_unmap_one_split_indirect(vq, &desc[i]); in virtqueue_add_split()
651 i = vring_unmap_one_split(vq, i); in virtqueue_add_split()
657 END_USE(vq); in virtqueue_add_split()
663 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split() local
667 START_USE(vq); in virtqueue_kick_prepare_split()
670 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
672 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
673 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
674 vq->num_added = 0; in virtqueue_kick_prepare_split()
676 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_split()
677 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_split()
679 if (vq->event) { in virtqueue_kick_prepare_split()
681 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
684 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
688 END_USE(vq); in virtqueue_kick_prepare_split()
692 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, in detach_buf_split() argument
696 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
699 vq->split.desc_state[head].data = NULL; in detach_buf_split()
704 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
705 vring_unmap_one_split(vq, i); in detach_buf_split()
706 i = vq->split.desc_extra[i].next; in detach_buf_split()
707 vq->vq.num_free++; in detach_buf_split()
710 vring_unmap_one_split(vq, i); in detach_buf_split()
711 vq->split.desc_extra[i].next = vq->free_head; in detach_buf_split()
712 vq->free_head = head; in detach_buf_split()
715 vq->vq.num_free++; in detach_buf_split()
717 if (vq->indirect) { in detach_buf_split()
719 vq->split.desc_state[head].indir_desc; in detach_buf_split()
726 len = vq->split.desc_extra[head].len; in detach_buf_split()
728 BUG_ON(!(vq->split.desc_extra[head].flags & in detach_buf_split()
733 vring_unmap_one_split_indirect(vq, &indir_desc[j]); in detach_buf_split()
736 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
738 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
742 static inline bool more_used_split(const struct vring_virtqueue *vq) in more_used_split() argument
744 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
745 vq->split.vring.used->idx); in more_used_split()
752 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split() local
757 START_USE(vq); in virtqueue_get_buf_ctx_split()
759 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
760 END_USE(vq); in virtqueue_get_buf_ctx_split()
764 if (!more_used_split(vq)) { in virtqueue_get_buf_ctx_split()
766 END_USE(vq); in virtqueue_get_buf_ctx_split()
771 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
773 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
775 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
777 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
779 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
780 BAD_RING(vq, "id %u out of range\n", i); in virtqueue_get_buf_ctx_split()
783 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
784 BAD_RING(vq, "id %u is not a head!\n", i); in virtqueue_get_buf_ctx_split()
789 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
790 detach_buf_split(vq, i, ctx); in virtqueue_get_buf_ctx_split()
791 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
795 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
796 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
797 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
798 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
800 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_split()
802 END_USE(vq); in virtqueue_get_buf_ctx_split()
808 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split() local
810 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
811 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
812 if (vq->event) in virtqueue_disable_cb_split()
814 vring_used_event(&vq->split.vring) = 0x0; in virtqueue_disable_cb_split()
816 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
818 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
824 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split() local
827 START_USE(vq); in virtqueue_enable_cb_prepare_split()
834 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
835 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
836 if (!vq->event) in virtqueue_enable_cb_prepare_split()
837 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
839 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
841 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
842 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
843 END_USE(vq); in virtqueue_enable_cb_prepare_split()
849 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split() local
852 vq->split.vring.used->idx); in virtqueue_poll_split()
857 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split() local
860 START_USE(vq); in virtqueue_enable_cb_delayed_split()
867 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
868 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
869 if (!vq->event) in virtqueue_enable_cb_delayed_split()
870 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
872 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
875 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
877 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
878 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
879 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
881 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
882 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
883 END_USE(vq); in virtqueue_enable_cb_delayed_split()
887 END_USE(vq); in virtqueue_enable_cb_delayed_split()
893 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split() local
897 START_USE(vq); in virtqueue_detach_unused_buf_split()
899 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
900 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
903 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
904 detach_buf_split(vq, i, NULL); in virtqueue_detach_unused_buf_split()
905 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
906 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
907 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
908 END_USE(vq); in virtqueue_detach_unused_buf_split()
912 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
914 END_USE(vq); in virtqueue_detach_unused_buf_split()
930 struct virtqueue *vq; in vring_create_virtqueue_split() local
967 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, in vring_create_virtqueue_split()
969 if (!vq) { in vring_create_virtqueue_split()
975 to_vvq(vq)->split.queue_dma_addr = dma_addr; in vring_create_virtqueue_split()
976 to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes; in vring_create_virtqueue_split()
977 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
979 return vq; in vring_create_virtqueue_split()
987 static void vring_unmap_state_packed(const struct vring_virtqueue *vq, in vring_unmap_state_packed() argument
992 if (!vq->use_dma_api) in vring_unmap_state_packed()
998 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_state_packed()
1003 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_state_packed()
1010 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, in vring_unmap_desc_packed() argument
1015 if (!vq->use_dma_api) in vring_unmap_desc_packed()
1021 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_desc_packed()
1027 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_desc_packed()
1052 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, in virtqueue_add_indirect_packed() argument
1066 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
1071 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1074 END_USE(vq); in virtqueue_add_indirect_packed()
1079 id = vq->free_head; in virtqueue_add_indirect_packed()
1080 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1084 addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_indirect_packed()
1086 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1098 addr = vring_map_single(vq, desc, in virtqueue_add_indirect_packed()
1101 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1104 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1105 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1107 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1109 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1110 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1111 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1113 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1114 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1122 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1123 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1124 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1127 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1131 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1133 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1134 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1138 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1139 vq->free_head = vq->packed.desc_extra[id].next; in virtqueue_add_indirect_packed()
1142 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1143 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1144 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1145 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1147 vq->num_added += 1; in virtqueue_add_indirect_packed()
1149 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_indirect_packed()
1150 END_USE(vq); in virtqueue_add_indirect_packed()
1158 vring_unmap_desc_packed(vq, &desc[i]); in virtqueue_add_indirect_packed()
1162 END_USE(vq); in virtqueue_add_indirect_packed()
1175 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed() local
1183 START_USE(vq); in virtqueue_add_packed()
1186 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1188 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1189 END_USE(vq); in virtqueue_add_packed()
1193 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_packed()
1198 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in virtqueue_add_packed()
1206 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1207 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1209 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1211 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1215 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1217 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1218 END_USE(vq); in virtqueue_add_packed()
1222 id = vq->free_head; in virtqueue_add_packed()
1223 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1229 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_packed()
1231 if (vring_mapping_error(vq, addr)) in virtqueue_add_packed()
1234 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1246 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1247 vq->packed.desc_extra[curr].addr = addr; in virtqueue_add_packed()
1248 vq->packed.desc_extra[curr].len = sg->length; in virtqueue_add_packed()
1249 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1253 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1255 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1257 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1265 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1268 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1271 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1272 vq->free_head = curr; in virtqueue_add_packed()
1275 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1276 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1277 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1278 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1285 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1286 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1287 vq->num_added += descs_used; in virtqueue_add_packed()
1289 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_packed()
1290 END_USE(vq); in virtqueue_add_packed()
1297 curr = vq->free_head; in virtqueue_add_packed()
1299 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1304 vring_unmap_state_packed(vq, in virtqueue_add_packed()
1305 &vq->packed.desc_extra[curr]); in virtqueue_add_packed()
1306 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1308 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1312 END_USE(vq); in virtqueue_add_packed()
1318 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed() local
1329 START_USE(vq); in virtqueue_kick_prepare_packed()
1335 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1337 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1338 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1339 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1341 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1344 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_packed()
1345 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_packed()
1356 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1357 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1361 END_USE(vq); in virtqueue_kick_prepare_packed()
1365 static void detach_buf_packed(struct vring_virtqueue *vq, in detach_buf_packed() argument
1372 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1377 vq->packed.desc_extra[state->last].next = vq->free_head; in detach_buf_packed()
1378 vq->free_head = id; in detach_buf_packed()
1379 vq->vq.num_free += state->num; in detach_buf_packed()
1381 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1384 vring_unmap_state_packed(vq, in detach_buf_packed()
1385 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1386 curr = vq->packed.desc_extra[curr].next; in detach_buf_packed()
1390 if (vq->indirect) { in detach_buf_packed()
1398 if (vq->use_dma_api) { in detach_buf_packed()
1399 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1402 vring_unmap_desc_packed(vq, &desc[i]); in detach_buf_packed()
1411 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, in is_used_desc_packed() argument
1417 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1424 static inline bool more_used_packed(const struct vring_virtqueue *vq) in more_used_packed() argument
1426 return is_used_desc_packed(vq, vq->last_used_idx, in more_used_packed()
1427 vq->packed.used_wrap_counter); in more_used_packed()
1434 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed() local
1438 START_USE(vq); in virtqueue_get_buf_ctx_packed()
1440 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1441 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1445 if (!more_used_packed(vq)) { in virtqueue_get_buf_ctx_packed()
1447 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1452 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1454 last_used = vq->last_used_idx; in virtqueue_get_buf_ctx_packed()
1455 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1456 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1458 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1459 BAD_RING(vq, "id %u out of range\n", id); in virtqueue_get_buf_ctx_packed()
1462 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1463 BAD_RING(vq, "id %u is not a head!\n", id); in virtqueue_get_buf_ctx_packed()
1468 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1469 detach_buf_packed(vq, id, ctx); in virtqueue_get_buf_ctx_packed()
1471 vq->last_used_idx += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1472 if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1473 vq->last_used_idx -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1474 vq->packed.used_wrap_counter ^= 1; in virtqueue_get_buf_ctx_packed()
1482 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1483 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1484 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1485 cpu_to_le16(vq->last_used_idx | in virtqueue_get_buf_ctx_packed()
1486 (vq->packed.used_wrap_counter << in virtqueue_get_buf_ctx_packed()
1489 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_packed()
1491 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1497 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed() local
1499 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1500 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1501 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1502 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1508 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed() local
1510 START_USE(vq); in virtqueue_enable_cb_prepare_packed()
1517 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1518 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1519 cpu_to_le16(vq->last_used_idx | in virtqueue_enable_cb_prepare_packed()
1520 (vq->packed.used_wrap_counter << in virtqueue_enable_cb_prepare_packed()
1526 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1529 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1530 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1533 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1534 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1537 END_USE(vq); in virtqueue_enable_cb_prepare_packed()
1538 return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter << in virtqueue_enable_cb_prepare_packed()
1544 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed() local
1551 return is_used_desc_packed(vq, used_idx, wrap_counter); in virtqueue_poll_packed()
1556 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed() local
1560 START_USE(vq); in virtqueue_enable_cb_delayed_packed()
1567 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1569 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1570 wrap_counter = vq->packed.used_wrap_counter; in virtqueue_enable_cb_delayed_packed()
1572 used_idx = vq->last_used_idx + bufs; in virtqueue_enable_cb_delayed_packed()
1573 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1574 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1578 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1585 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1588 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1589 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1592 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1593 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1600 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1602 if (is_used_desc_packed(vq, in virtqueue_enable_cb_delayed_packed()
1603 vq->last_used_idx, in virtqueue_enable_cb_delayed_packed()
1604 vq->packed.used_wrap_counter)) { in virtqueue_enable_cb_delayed_packed()
1605 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1609 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1615 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed() local
1619 START_USE(vq); in virtqueue_detach_unused_buf_packed()
1621 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1622 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1625 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1626 detach_buf_packed(vq, i, NULL); in virtqueue_detach_unused_buf_packed()
1627 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1631 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1633 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1637 static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq, in vring_alloc_desc_extra() argument
1668 struct vring_virtqueue *vq; in vring_create_virtqueue_packed() local
1696 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in vring_create_virtqueue_packed()
1697 if (!vq) in vring_create_virtqueue_packed()
1700 vq->vq.callback = callback; in vring_create_virtqueue_packed()
1701 vq->vq.vdev = vdev; in vring_create_virtqueue_packed()
1702 vq->vq.name = name; in vring_create_virtqueue_packed()
1703 vq->vq.num_free = num; in vring_create_virtqueue_packed()
1704 vq->vq.index = index; in vring_create_virtqueue_packed()
1705 vq->we_own_ring = true; in vring_create_virtqueue_packed()
1706 vq->notify = notify; in vring_create_virtqueue_packed()
1707 vq->weak_barriers = weak_barriers; in vring_create_virtqueue_packed()
1708 vq->broken = false; in vring_create_virtqueue_packed()
1709 vq->last_used_idx = 0; in vring_create_virtqueue_packed()
1710 vq->event_triggered = false; in vring_create_virtqueue_packed()
1711 vq->num_added = 0; in vring_create_virtqueue_packed()
1712 vq->packed_ring = true; in vring_create_virtqueue_packed()
1713 vq->use_dma_api = vring_use_dma_api(vdev); in vring_create_virtqueue_packed()
1715 vq->in_use = false; in vring_create_virtqueue_packed()
1716 vq->last_add_time_valid = false; in vring_create_virtqueue_packed()
1719 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in vring_create_virtqueue_packed()
1721 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_create_virtqueue_packed()
1724 vq->weak_barriers = false; in vring_create_virtqueue_packed()
1726 vq->packed.ring_dma_addr = ring_dma_addr; in vring_create_virtqueue_packed()
1727 vq->packed.driver_event_dma_addr = driver_event_dma_addr; in vring_create_virtqueue_packed()
1728 vq->packed.device_event_dma_addr = device_event_dma_addr; in vring_create_virtqueue_packed()
1730 vq->packed.ring_size_in_bytes = ring_size_in_bytes; in vring_create_virtqueue_packed()
1731 vq->packed.event_size_in_bytes = event_size_in_bytes; in vring_create_virtqueue_packed()
1733 vq->packed.vring.num = num; in vring_create_virtqueue_packed()
1734 vq->packed.vring.desc = ring; in vring_create_virtqueue_packed()
1735 vq->packed.vring.driver = driver; in vring_create_virtqueue_packed()
1736 vq->packed.vring.device = device; in vring_create_virtqueue_packed()
1738 vq->packed.next_avail_idx = 0; in vring_create_virtqueue_packed()
1739 vq->packed.avail_wrap_counter = 1; in vring_create_virtqueue_packed()
1740 vq->packed.used_wrap_counter = 1; in vring_create_virtqueue_packed()
1741 vq->packed.event_flags_shadow = 0; in vring_create_virtqueue_packed()
1742 vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL; in vring_create_virtqueue_packed()
1744 vq->packed.desc_state = kmalloc_array(num, in vring_create_virtqueue_packed()
1747 if (!vq->packed.desc_state) in vring_create_virtqueue_packed()
1750 memset(vq->packed.desc_state, 0, in vring_create_virtqueue_packed()
1754 vq->free_head = 0; in vring_create_virtqueue_packed()
1756 vq->packed.desc_extra = vring_alloc_desc_extra(vq, num); in vring_create_virtqueue_packed()
1757 if (!vq->packed.desc_extra) in vring_create_virtqueue_packed()
1762 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in vring_create_virtqueue_packed()
1763 vq->packed.vring.driver->flags = in vring_create_virtqueue_packed()
1764 cpu_to_le16(vq->packed.event_flags_shadow); in vring_create_virtqueue_packed()
1768 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_create_virtqueue_packed()
1770 return &vq->vq; in vring_create_virtqueue_packed()
1773 kfree(vq->packed.desc_state); in vring_create_virtqueue_packed()
1775 kfree(vq); in vring_create_virtqueue_packed()
1800 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add() local
1802 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
1856 int virtqueue_add_outbuf(struct virtqueue *vq, in virtqueue_add_outbuf() argument
1861 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); in virtqueue_add_outbuf()
1878 int virtqueue_add_inbuf(struct virtqueue *vq, in virtqueue_add_inbuf() argument
1883 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); in virtqueue_add_inbuf()
1901 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, in virtqueue_add_inbuf_ctx() argument
1907 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); in virtqueue_add_inbuf_ctx()
1924 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare() local
1926 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
1941 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify() local
1943 if (unlikely(vq->broken)) in virtqueue_notify()
1947 if (!vq->notify(_vq)) { in virtqueue_notify()
1948 vq->broken = true; in virtqueue_notify()
1967 bool virtqueue_kick(struct virtqueue *vq) in virtqueue_kick() argument
1969 if (virtqueue_kick_prepare(vq)) in virtqueue_kick()
1970 return virtqueue_notify(vq); in virtqueue_kick()
1995 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx() local
1997 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
2018 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb() local
2023 if (vq->event_triggered) in virtqueue_disable_cb()
2026 if (vq->packed_ring) in virtqueue_disable_cb()
2047 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare() local
2049 if (vq->event_triggered) in virtqueue_enable_cb_prepare()
2050 vq->event_triggered = false; in virtqueue_enable_cb_prepare()
2052 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2068 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll() local
2070 if (unlikely(vq->broken)) in virtqueue_poll()
2073 virtio_mb(vq->weak_barriers); in virtqueue_poll()
2074 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2113 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed() local
2115 if (vq->event_triggered) in virtqueue_enable_cb_delayed()
2116 vq->event_triggered = false; in virtqueue_enable_cb_delayed()
2118 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2133 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf() local
2135 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2140 static inline bool more_used(const struct vring_virtqueue *vq) in more_used() argument
2142 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2147 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt() local
2149 if (!more_used(vq)) { in vring_interrupt()
2150 pr_debug("virtqueue interrupt with no work for %p\n", vq); in vring_interrupt()
2154 if (unlikely(vq->broken)) in vring_interrupt()
2158 if (vq->event) in vring_interrupt()
2159 vq->event_triggered = true; in vring_interrupt()
2161 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2162 if (vq->vq.callback) in vring_interrupt()
2163 vq->vq.callback(&vq->vq); in vring_interrupt()
2179 struct vring_virtqueue *vq; in __vring_new_virtqueue() local
2184 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in __vring_new_virtqueue()
2185 if (!vq) in __vring_new_virtqueue()
2188 vq->packed_ring = false; in __vring_new_virtqueue()
2189 vq->vq.callback = callback; in __vring_new_virtqueue()
2190 vq->vq.vdev = vdev; in __vring_new_virtqueue()
2191 vq->vq.name = name; in __vring_new_virtqueue()
2192 vq->vq.num_free = vring.num; in __vring_new_virtqueue()
2193 vq->vq.index = index; in __vring_new_virtqueue()
2194 vq->we_own_ring = false; in __vring_new_virtqueue()
2195 vq->notify = notify; in __vring_new_virtqueue()
2196 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue()
2197 vq->broken = false; in __vring_new_virtqueue()
2198 vq->last_used_idx = 0; in __vring_new_virtqueue()
2199 vq->event_triggered = false; in __vring_new_virtqueue()
2200 vq->num_added = 0; in __vring_new_virtqueue()
2201 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue()
2203 vq->in_use = false; in __vring_new_virtqueue()
2204 vq->last_add_time_valid = false; in __vring_new_virtqueue()
2207 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue()
2209 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue()
2212 vq->weak_barriers = false; in __vring_new_virtqueue()
2214 vq->split.queue_dma_addr = 0; in __vring_new_virtqueue()
2215 vq->split.queue_size_in_bytes = 0; in __vring_new_virtqueue()
2217 vq->split.vring = vring; in __vring_new_virtqueue()
2218 vq->split.avail_flags_shadow = 0; in __vring_new_virtqueue()
2219 vq->split.avail_idx_shadow = 0; in __vring_new_virtqueue()
2223 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in __vring_new_virtqueue()
2224 if (!vq->event) in __vring_new_virtqueue()
2225 vq->split.vring.avail->flags = cpu_to_virtio16(vdev, in __vring_new_virtqueue()
2226 vq->split.avail_flags_shadow); in __vring_new_virtqueue()
2229 vq->split.desc_state = kmalloc_array(vring.num, in __vring_new_virtqueue()
2231 if (!vq->split.desc_state) in __vring_new_virtqueue()
2234 vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num); in __vring_new_virtqueue()
2235 if (!vq->split.desc_extra) in __vring_new_virtqueue()
2239 vq->free_head = 0; in __vring_new_virtqueue()
2240 memset(vq->split.desc_state, 0, vring.num * in __vring_new_virtqueue()
2244 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue()
2246 return &vq->vq; in __vring_new_virtqueue()
2249 kfree(vq->split.desc_state); in __vring_new_virtqueue()
2251 kfree(vq); in __vring_new_virtqueue()
2288 bool (*notify)(struct virtqueue *vq), in vring_new_virtqueue() argument
2289 void (*callback)(struct virtqueue *vq), in vring_new_virtqueue() argument
2305 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue() local
2307 spin_lock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2309 spin_unlock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2311 if (vq->we_own_ring) { in vring_del_virtqueue()
2312 if (vq->packed_ring) { in vring_del_virtqueue()
2313 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2314 vq->packed.ring_size_in_bytes, in vring_del_virtqueue()
2315 vq->packed.vring.desc, in vring_del_virtqueue()
2316 vq->packed.ring_dma_addr); in vring_del_virtqueue()
2318 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2319 vq->packed.event_size_in_bytes, in vring_del_virtqueue()
2320 vq->packed.vring.driver, in vring_del_virtqueue()
2321 vq->packed.driver_event_dma_addr); in vring_del_virtqueue()
2323 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2324 vq->packed.event_size_in_bytes, in vring_del_virtqueue()
2325 vq->packed.vring.device, in vring_del_virtqueue()
2326 vq->packed.device_event_dma_addr); in vring_del_virtqueue()
2328 kfree(vq->packed.desc_state); in vring_del_virtqueue()
2329 kfree(vq->packed.desc_extra); in vring_del_virtqueue()
2331 vring_free_queue(vq->vq.vdev, in vring_del_virtqueue()
2332 vq->split.queue_size_in_bytes, in vring_del_virtqueue()
2333 vq->split.vring.desc, in vring_del_virtqueue()
2334 vq->split.queue_dma_addr); in vring_del_virtqueue()
2337 if (!vq->packed_ring) { in vring_del_virtqueue()
2338 kfree(vq->split.desc_state); in vring_del_virtqueue()
2339 kfree(vq->split.desc_extra); in vring_del_virtqueue()
2341 kfree(vq); in vring_del_virtqueue()
2382 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size() local
2384 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
2390 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken() local
2392 return READ_ONCE(vq->broken); in virtqueue_is_broken()
2406 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device() local
2409 WRITE_ONCE(vq->broken, true); in virtio_break_device()
2417 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr() local
2419 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
2421 if (vq->packed_ring) in virtqueue_get_desc_addr()
2422 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
2424 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
2430 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr() local
2432 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
2434 if (vq->packed_ring) in virtqueue_get_avail_addr()
2435 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
2437 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
2438 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
2444 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr() local
2446 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
2448 if (vq->packed_ring) in virtqueue_get_used_addr()
2449 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
2451 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
2452 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
2457 const struct vring *virtqueue_get_vring(struct virtqueue *vq) in virtqueue_get_vring() argument
2459 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()