Lines Matching refs:vq
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) argument
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) argument
53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_big() argument
60 vq->user_be = true; in vhost_enable_cross_endian_big()
63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) in vhost_enable_cross_endian_little() argument
65 vq->user_be = false; in vhost_enable_cross_endian_little()
68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
72 if (vq->private_data) in vhost_set_vring_endian()
83 vhost_enable_cross_endian_big(vq); in vhost_set_vring_endian()
85 vhost_enable_cross_endian_little(vq); in vhost_set_vring_endian()
90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
95 .num = vq->user_be in vhost_get_vring_endian()
104 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) in vhost_disable_cross_endian() argument
118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) in vhost_set_vring_endian() argument
123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, in vhost_get_vring_endian() argument
129 static void vhost_init_is_le(struct vhost_virtqueue *vq) in vhost_init_is_le() argument
131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) in vhost_init_is_le()
136 static void vhost_reset_is_le(struct vhost_virtqueue *vq) in vhost_reset_is_le() argument
138 vhost_init_is_le(vq); in vhost_reset_is_le()
285 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) in __vhost_vq_meta_reset() argument
290 vq->meta_iotlb[j] = NULL; in __vhost_vq_meta_reset()
307 bool vhost_vq_is_setup(struct vhost_virtqueue *vq) in vhost_vq_is_setup() argument
309 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); in vhost_vq_is_setup()
314 struct vhost_virtqueue *vq) in vhost_vq_reset() argument
316 vq->num = 1; in vhost_vq_reset()
317 vq->desc = NULL; in vhost_vq_reset()
318 vq->avail = NULL; in vhost_vq_reset()
319 vq->used = NULL; in vhost_vq_reset()
320 vq->last_avail_idx = 0; in vhost_vq_reset()
321 vq->avail_idx = 0; in vhost_vq_reset()
322 vq->last_used_idx = 0; in vhost_vq_reset()
323 vq->signalled_used = 0; in vhost_vq_reset()
324 vq->signalled_used_valid = false; in vhost_vq_reset()
325 vq->used_flags = 0; in vhost_vq_reset()
326 vq->log_used = false; in vhost_vq_reset()
327 vq->log_addr = -1ull; in vhost_vq_reset()
328 vq->private_data = NULL; in vhost_vq_reset()
329 vq->acked_features = 0; in vhost_vq_reset()
330 vq->acked_backend_features = 0; in vhost_vq_reset()
331 vq->log_base = NULL; in vhost_vq_reset()
332 vq->error_ctx = NULL; in vhost_vq_reset()
333 vq->kick = NULL; in vhost_vq_reset()
334 vq->log_ctx = NULL; in vhost_vq_reset()
335 vhost_disable_cross_endian(vq); in vhost_vq_reset()
336 vhost_reset_is_le(vq); in vhost_vq_reset()
337 vq->busyloop_timeout = 0; in vhost_vq_reset()
338 vq->umem = NULL; in vhost_vq_reset()
339 vq->iotlb = NULL; in vhost_vq_reset()
340 vhost_vring_call_reset(&vq->call_ctx); in vhost_vq_reset()
341 __vhost_vq_meta_reset(vq); in vhost_vq_reset()
382 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) in vhost_vq_free_iovecs() argument
384 kfree(vq->indirect); in vhost_vq_free_iovecs()
385 vq->indirect = NULL; in vhost_vq_free_iovecs()
386 kfree(vq->log); in vhost_vq_free_iovecs()
387 vq->log = NULL; in vhost_vq_free_iovecs()
388 kfree(vq->heads); in vhost_vq_free_iovecs()
389 vq->heads = NULL; in vhost_vq_free_iovecs()
395 struct vhost_virtqueue *vq; in vhost_dev_alloc_iovecs() local
399 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
400 vq->indirect = kmalloc_array(UIO_MAXIOV, in vhost_dev_alloc_iovecs()
401 sizeof(*vq->indirect), in vhost_dev_alloc_iovecs()
403 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), in vhost_dev_alloc_iovecs()
405 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), in vhost_dev_alloc_iovecs()
407 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
426 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, in vhost_exceeds_weight() argument
429 struct vhost_dev *dev = vq->dev; in vhost_exceeds_weight()
433 vhost_poll_queue(&vq->poll); in vhost_exceeds_weight()
441 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, in vhost_get_avail_size() argument
445 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vhost_get_avail_size()
447 return sizeof(*vq->avail) + in vhost_get_avail_size()
448 sizeof(*vq->avail->ring) * num + event; in vhost_get_avail_size()
451 static size_t vhost_get_used_size(struct vhost_virtqueue *vq, in vhost_get_used_size() argument
455 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; in vhost_get_used_size()
457 return sizeof(*vq->used) + in vhost_get_used_size()
458 sizeof(*vq->used->ring) * num + event; in vhost_get_used_size()
461 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, in vhost_get_desc_size() argument
464 return sizeof(*vq->desc) * num; in vhost_get_desc_size()
474 struct vhost_virtqueue *vq; in vhost_dev_init() local
498 vq = dev->vqs[i]; in vhost_dev_init()
499 vq->log = NULL; in vhost_dev_init()
500 vq->indirect = NULL; in vhost_dev_init()
501 vq->heads = NULL; in vhost_dev_init()
502 vq->dev = dev; in vhost_dev_init()
503 mutex_init(&vq->mutex); in vhost_dev_init()
504 vhost_vq_reset(dev, vq); in vhost_dev_init()
505 if (vq->handle_kick) in vhost_dev_init()
506 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
776 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, in vhost_vq_meta_fetch() argument
780 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; in vhost_vq_meta_fetch()
814 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
817 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, in vhost_copy_to_user() argument
822 if (!vq->iotlb) in vhost_copy_to_user()
831 void __user *uaddr = vhost_vq_meta_fetch(vq, in vhost_copy_to_user()
838 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, in vhost_copy_to_user()
839 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_to_user()
843 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size); in vhost_copy_to_user()
852 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, in vhost_copy_from_user() argument
857 if (!vq->iotlb) in vhost_copy_from_user()
865 void __user *uaddr = vhost_vq_meta_fetch(vq, in vhost_copy_from_user()
873 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, in vhost_copy_from_user()
874 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_from_user()
877 vq_err(vq, "IOTLB translation failure: uaddr " in vhost_copy_from_user()
882 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size); in vhost_copy_from_user()
892 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, in __vhost_get_user_slow() argument
898 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, in __vhost_get_user_slow()
899 ARRAY_SIZE(vq->iotlb_iov), in __vhost_get_user_slow()
902 vq_err(vq, "IOTLB translation failure: uaddr " in __vhost_get_user_slow()
908 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { in __vhost_get_user_slow()
909 vq_err(vq, "Non atomic userspace memory access: uaddr " in __vhost_get_user_slow()
915 return vq->iotlb_iov[0].iov_base; in __vhost_get_user_slow()
923 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, in __vhost_get_user() argument
927 void __user *uaddr = vhost_vq_meta_fetch(vq, in __vhost_get_user()
932 return __vhost_get_user_slow(vq, addr, size, type); in __vhost_get_user()
935 #define vhost_put_user(vq, x, ptr) \ argument
938 if (!vq->iotlb) { \
942 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
952 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) in vhost_put_avail_event() argument
954 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), in vhost_put_avail_event()
955 vhost_avail_event(vq)); in vhost_put_avail_event()
958 static inline int vhost_put_used(struct vhost_virtqueue *vq, in vhost_put_used() argument
962 return vhost_copy_to_user(vq, vq->used->ring + idx, head, in vhost_put_used()
966 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) in vhost_put_used_flags() argument
969 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), in vhost_put_used_flags()
970 &vq->used->flags); in vhost_put_used_flags()
973 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) in vhost_put_used_idx() argument
976 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), in vhost_put_used_idx()
977 &vq->used->idx); in vhost_put_used_idx()
980 #define vhost_get_user(vq, x, ptr, type) \ argument
983 if (!vq->iotlb) { \
987 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
998 #define vhost_get_avail(vq, x, ptr) \ argument
999 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1001 #define vhost_get_used(vq, x, ptr) \ argument
1002 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1018 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, in vhost_get_avail_idx() argument
1021 return vhost_get_avail(vq, *idx, &vq->avail->idx); in vhost_get_avail_idx()
1024 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, in vhost_get_avail_head() argument
1027 return vhost_get_avail(vq, *head, in vhost_get_avail_head()
1028 &vq->avail->ring[idx & (vq->num - 1)]); in vhost_get_avail_head()
1031 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, in vhost_get_avail_flags() argument
1034 return vhost_get_avail(vq, *flags, &vq->avail->flags); in vhost_get_avail_flags()
1037 static inline int vhost_get_used_event(struct vhost_virtqueue *vq, in vhost_get_used_event() argument
1040 return vhost_get_avail(vq, *event, vhost_used_event(vq)); in vhost_get_used_event()
1043 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, in vhost_get_used_idx() argument
1046 return vhost_get_used(vq, *idx, &vq->used->idx); in vhost_get_used_idx()
1049 static inline int vhost_get_desc(struct vhost_virtqueue *vq, in vhost_get_desc() argument
1052 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); in vhost_get_desc()
1067 vhost_poll_queue(&node->vq->poll); in vhost_iotlb_notify_vq()
1271 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) in vhost_iotlb_miss() argument
1273 struct vhost_dev *dev = vq->dev; in vhost_iotlb_miss()
1276 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); in vhost_iotlb_miss()
1278 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); in vhost_iotlb_miss()
1298 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, in vq_access_ok() argument
1306 if (vq->iotlb) in vq_access_ok()
1309 return access_ok(desc, vhost_get_desc_size(vq, num)) && in vq_access_ok()
1310 access_ok(avail, vhost_get_avail_size(vq, num)) && in vq_access_ok()
1311 access_ok(used, vhost_get_used_size(vq, num)); in vq_access_ok()
1314 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, in vhost_vq_meta_update() argument
1322 vq->meta_iotlb[type] = map; in vhost_vq_meta_update()
1325 static bool iotlb_access_ok(struct vhost_virtqueue *vq, in iotlb_access_ok() argument
1329 struct vhost_iotlb *umem = vq->iotlb; in iotlb_access_ok()
1332 if (vhost_vq_meta_fetch(vq, addr, len, type)) in iotlb_access_ok()
1338 vhost_iotlb_miss(vq, addr, access); in iotlb_access_ok()
1350 vhost_vq_meta_update(vq, map, type); in iotlb_access_ok()
1359 int vq_meta_prefetch(struct vhost_virtqueue *vq) in vq_meta_prefetch() argument
1361 unsigned int num = vq->num; in vq_meta_prefetch()
1363 if (!vq->iotlb) in vq_meta_prefetch()
1366 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, in vq_meta_prefetch()
1367 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && in vq_meta_prefetch()
1368 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, in vq_meta_prefetch()
1369 vhost_get_avail_size(vq, num), in vq_meta_prefetch()
1371 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, in vq_meta_prefetch()
1372 vhost_get_used_size(vq, num), VHOST_ADDR_USED); in vq_meta_prefetch()
1384 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq, in vq_log_used_access_ok() argument
1391 if (vq->iotlb) in vq_log_used_access_ok()
1395 vhost_get_used_size(vq, vq->num)); in vq_log_used_access_ok()
1400 static bool vq_log_access_ok(struct vhost_virtqueue *vq, in vq_log_access_ok() argument
1403 return vq_memory_access_ok(log_base, vq->umem, in vq_log_access_ok()
1404 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && in vq_log_access_ok()
1405 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); in vq_log_access_ok()
1410 bool vhost_vq_access_ok(struct vhost_virtqueue *vq) in vhost_vq_access_ok() argument
1412 if (!vq_log_access_ok(vq, vq->log_base)) in vhost_vq_access_ok()
1415 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); in vhost_vq_access_ok()
1487 struct vhost_virtqueue *vq, in vhost_vring_set_num() argument
1494 if (vq->private_data) in vhost_vring_set_num()
1502 vq->num = s.num; in vhost_vring_set_num()
1508 struct vhost_virtqueue *vq, in vhost_vring_set_addr() argument
1526 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_set_addr()
1527 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_set_addr()
1536 if (vq->private_data) { in vhost_vring_set_addr()
1537 if (!vq_access_ok(vq, vq->num, in vhost_vring_set_addr()
1544 if (!vq_log_used_access_ok(vq, vq->log_base, in vhost_vring_set_addr()
1550 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_set_addr()
1551 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_set_addr()
1552 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_set_addr()
1553 vq->log_addr = a.log_guest_addr; in vhost_vring_set_addr()
1554 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_set_addr()
1560 struct vhost_virtqueue *vq, in vhost_vring_set_num_addr() argument
1566 mutex_lock(&vq->mutex); in vhost_vring_set_num_addr()
1570 r = vhost_vring_set_num(d, vq, argp); in vhost_vring_set_num_addr()
1573 r = vhost_vring_set_addr(d, vq, argp); in vhost_vring_set_num_addr()
1579 mutex_unlock(&vq->mutex); in vhost_vring_set_num_addr()
1589 struct vhost_virtqueue *vq; in vhost_vring_ioctl() local
1602 vq = d->vqs[idx]; in vhost_vring_ioctl()
1606 return vhost_vring_set_num_addr(d, vq, ioctl, argp); in vhost_vring_ioctl()
1609 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
1615 if (vq->private_data) { in vhost_vring_ioctl()
1627 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
1629 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
1633 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
1647 if (eventfp != vq->kick) { in vhost_vring_ioctl()
1648 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
1649 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
1664 swap(ctx, vq->call_ctx.ctx); in vhost_vring_ioctl()
1676 swap(ctx, vq->error_ctx); in vhost_vring_ioctl()
1679 r = vhost_set_vring_endian(vq, argp); in vhost_vring_ioctl()
1682 r = vhost_get_vring_endian(vq, idx, argp); in vhost_vring_ioctl()
1689 vq->busyloop_timeout = s.num; in vhost_vring_ioctl()
1693 s.num = vq->busyloop_timeout; in vhost_vring_ioctl()
1701 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
1702 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
1709 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
1710 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
1712 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
1714 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
1715 vhost_poll_flush(&vq->poll); in vhost_vring_ioctl()
1733 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_init_device_iotlb() local
1735 mutex_lock(&vq->mutex); in vhost_init_device_iotlb()
1736 vq->iotlb = niotlb; in vhost_init_device_iotlb()
1737 __vhost_vq_meta_reset(vq); in vhost_init_device_iotlb()
1738 mutex_unlock(&vq->mutex); in vhost_init_device_iotlb()
1780 struct vhost_virtqueue *vq; in vhost_dev_ioctl() local
1782 vq = d->vqs[i]; in vhost_dev_ioctl()
1783 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
1785 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
1788 vq->log_base = base; in vhost_dev_ioctl()
1789 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
1868 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) in log_write_hva() argument
1870 struct vhost_iotlb *umem = vq->umem; in log_write_hva()
1888 r = log_write(vq->log_base, in log_write_hva()
1907 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) in log_used() argument
1909 struct iovec *iov = vq->log_iov; in log_used()
1912 if (!vq->iotlb) in log_used()
1913 return log_write(vq->log_base, vq->log_addr + used_offset, len); in log_used()
1915 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, in log_used()
1921 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, in log_used()
1930 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, in vhost_log_write() argument
1938 if (vq->iotlb) { in vhost_log_write()
1940 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, in vhost_log_write()
1950 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
1955 if (vq->log_ctx) in vhost_log_write()
1956 eventfd_signal(vq->log_ctx, 1); in vhost_log_write()
1966 static int vhost_update_used_flags(struct vhost_virtqueue *vq) in vhost_update_used_flags() argument
1969 if (vhost_put_used_flags(vq)) in vhost_update_used_flags()
1971 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
1975 used = &vq->used->flags; in vhost_update_used_flags()
1976 log_used(vq, (used - (void __user *)vq->used), in vhost_update_used_flags()
1977 sizeof vq->used->flags); in vhost_update_used_flags()
1978 if (vq->log_ctx) in vhost_update_used_flags()
1979 eventfd_signal(vq->log_ctx, 1); in vhost_update_used_flags()
1984 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) in vhost_update_avail_event() argument
1986 if (vhost_put_avail_event(vq)) in vhost_update_avail_event()
1988 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
1993 used = vhost_avail_event(vq); in vhost_update_avail_event()
1994 log_used(vq, (used - (void __user *)vq->used), in vhost_update_avail_event()
1995 sizeof *vhost_avail_event(vq)); in vhost_update_avail_event()
1996 if (vq->log_ctx) in vhost_update_avail_event()
1997 eventfd_signal(vq->log_ctx, 1); in vhost_update_avail_event()
2002 int vhost_vq_init_access(struct vhost_virtqueue *vq) in vhost_vq_init_access() argument
2006 bool is_le = vq->is_le; in vhost_vq_init_access()
2008 if (!vq->private_data) in vhost_vq_init_access()
2011 vhost_init_is_le(vq); in vhost_vq_init_access()
2013 r = vhost_update_used_flags(vq); in vhost_vq_init_access()
2016 vq->signalled_used_valid = false; in vhost_vq_init_access()
2017 if (!vq->iotlb && in vhost_vq_init_access()
2018 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { in vhost_vq_init_access()
2022 r = vhost_get_used_idx(vq, &last_used_idx); in vhost_vq_init_access()
2024 vq_err(vq, "Can't access used idx at %p\n", in vhost_vq_init_access()
2025 &vq->used->idx); in vhost_vq_init_access()
2028 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_vq_init_access()
2032 vq->is_le = is_le; in vhost_vq_init_access()
2037 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, in translate_desc() argument
2041 struct vhost_dev *dev = vq->dev; in translate_desc()
2078 vhost_iotlb_miss(vq, addr, access); in translate_desc()
2085 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) in next_desc() argument
2090 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
2094 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); in next_desc()
2098 static int get_indirect(struct vhost_virtqueue *vq, in get_indirect() argument
2106 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
2112 vq_err(vq, "Invalid length in indirect descriptor: " in get_indirect()
2119 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
2123 vq_err(vq, "Translation failure %d in indirect.\n", ret); in get_indirect()
2126 iov_iter_init(&from, READ, vq->indirect, ret, len); in get_indirect()
2131 vq_err(vq, "Indirect buffer length too big: %d\n", in get_indirect()
2139 vq_err(vq, "Loop detected: last one at %u " in get_indirect()
2145 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", in get_indirect()
2146 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2149 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { in get_indirect()
2150 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", in get_indirect()
2151 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2155 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) in get_indirect()
2160 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in get_indirect()
2161 vhost32_to_cpu(vq, desc.len), iov + iov_count, in get_indirect()
2165 vq_err(vq, "Translation failure %d indirect idx %d\n", in get_indirect()
2173 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in get_indirect()
2174 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in get_indirect()
2181 vq_err(vq, "Indirect descriptor " in get_indirect()
2187 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
2199 int vhost_get_vq_desc(struct vhost_virtqueue *vq, in vhost_get_vq_desc() argument
2212 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
2214 if (vq->avail_idx == vq->last_avail_idx) { in vhost_get_vq_desc()
2215 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) { in vhost_get_vq_desc()
2216 vq_err(vq, "Failed to access avail idx at %p\n", in vhost_get_vq_desc()
2217 &vq->avail->idx); in vhost_get_vq_desc()
2220 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_get_vq_desc()
2222 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { in vhost_get_vq_desc()
2223 vq_err(vq, "Guest moved used index from %u to %u", in vhost_get_vq_desc()
2224 last_avail_idx, vq->avail_idx); in vhost_get_vq_desc()
2231 if (vq->avail_idx == last_avail_idx) in vhost_get_vq_desc()
2232 return vq->num; in vhost_get_vq_desc()
2242 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { in vhost_get_vq_desc()
2243 vq_err(vq, "Failed to read head: idx %d address %p\n", in vhost_get_vq_desc()
2245 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
2249 head = vhost16_to_cpu(vq, ring_head); in vhost_get_vq_desc()
2252 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
2253 vq_err(vq, "Guest says index %u > %u is available", in vhost_get_vq_desc()
2254 head, vq->num); in vhost_get_vq_desc()
2266 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
2267 vq_err(vq, "Desc index is %u > %u, head = %u", in vhost_get_vq_desc()
2268 i, vq->num, head); in vhost_get_vq_desc()
2271 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
2272 vq_err(vq, "Loop detected: last one at %u " in vhost_get_vq_desc()
2274 i, vq->num, head); in vhost_get_vq_desc()
2277 ret = vhost_get_desc(vq, &desc, i); in vhost_get_vq_desc()
2279 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", in vhost_get_vq_desc()
2280 i, vq->desc + i); in vhost_get_vq_desc()
2283 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { in vhost_get_vq_desc()
2284 ret = get_indirect(vq, iov, iov_size, in vhost_get_vq_desc()
2289 vq_err(vq, "Failure detected " in vhost_get_vq_desc()
2296 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) in vhost_get_vq_desc()
2300 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), in vhost_get_vq_desc()
2301 vhost32_to_cpu(vq, desc.len), iov + iov_count, in vhost_get_vq_desc()
2305 vq_err(vq, "Translation failure %d descriptor idx %d\n", in vhost_get_vq_desc()
2314 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); in vhost_get_vq_desc()
2315 log[*log_num].len = vhost32_to_cpu(vq, desc.len); in vhost_get_vq_desc()
2322 vq_err(vq, "Descriptor has out after in: " in vhost_get_vq_desc()
2328 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
2331 vq->last_avail_idx++; in vhost_get_vq_desc()
2335 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
2341 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) in vhost_discard_vq_desc() argument
2343 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
2349 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) in vhost_add_used() argument
2352 cpu_to_vhost32(vq, head), in vhost_add_used()
2353 cpu_to_vhost32(vq, len) in vhost_add_used()
2356 return vhost_add_used_n(vq, &heads, 1); in vhost_add_used()
2360 static int __vhost_add_used_n(struct vhost_virtqueue *vq, in __vhost_add_used_n() argument
2368 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
2369 used = vq->used->ring + start; in __vhost_add_used_n()
2370 if (vhost_put_used(vq, heads, start, count)) { in __vhost_add_used_n()
2371 vq_err(vq, "Failed to write used"); in __vhost_add_used_n()
2374 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
2378 log_used(vq, ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
2381 old = vq->last_used_idx; in __vhost_add_used_n()
2382 new = (vq->last_used_idx += count); in __vhost_add_used_n()
2387 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
2388 vq->signalled_used_valid = false; in __vhost_add_used_n()
2394 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, in vhost_add_used_n() argument
2399 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
2400 n = vq->num - start; in vhost_add_used_n()
2402 r = __vhost_add_used_n(vq, heads, n); in vhost_add_used_n()
2408 r = __vhost_add_used_n(vq, heads, count); in vhost_add_used_n()
2412 if (vhost_put_used_idx(vq)) { in vhost_add_used_n()
2413 vq_err(vq, "Failed to increment used idx"); in vhost_add_used_n()
2416 if (unlikely(vq->log_used)) { in vhost_add_used_n()
2420 log_used(vq, offsetof(struct vring_used, idx), in vhost_add_used_n()
2421 sizeof vq->used->idx); in vhost_add_used_n()
2422 if (vq->log_ctx) in vhost_add_used_n()
2423 eventfd_signal(vq->log_ctx, 1); in vhost_add_used_n()
2429 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_notify() argument
2439 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && in vhost_notify()
2440 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
2443 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_notify()
2445 if (vhost_get_avail_flags(vq, &flags)) { in vhost_notify()
2446 vq_err(vq, "Failed to get flags"); in vhost_notify()
2449 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); in vhost_notify()
2451 old = vq->signalled_used; in vhost_notify()
2452 v = vq->signalled_used_valid; in vhost_notify()
2453 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
2454 vq->signalled_used_valid = true; in vhost_notify()
2459 if (vhost_get_used_event(vq, &event)) { in vhost_notify()
2460 vq_err(vq, "Failed to get used event idx"); in vhost_notify()
2463 return vring_need_event(vhost16_to_cpu(vq, event), new, old); in vhost_notify()
2467 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_signal() argument
2470 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) in vhost_signal()
2471 eventfd_signal(vq->call_ctx.ctx, 1); in vhost_signal()
2477 struct vhost_virtqueue *vq, in vhost_add_used_and_signal() argument
2480 vhost_add_used(vq, head, len); in vhost_add_used_and_signal()
2481 vhost_signal(dev, vq); in vhost_add_used_and_signal()
2487 struct vhost_virtqueue *vq, in vhost_add_used_and_signal_n() argument
2490 vhost_add_used_n(vq, heads, count); in vhost_add_used_and_signal_n()
2491 vhost_signal(dev, vq); in vhost_add_used_and_signal_n()
2496 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_vq_avail_empty() argument
2501 if (vq->avail_idx != vq->last_avail_idx) in vhost_vq_avail_empty()
2504 r = vhost_get_avail_idx(vq, &avail_idx); in vhost_vq_avail_empty()
2507 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); in vhost_vq_avail_empty()
2509 return vq->avail_idx == vq->last_avail_idx; in vhost_vq_avail_empty()
2514 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_enable_notify() argument
2519 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
2521 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
2522 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_enable_notify()
2523 r = vhost_update_used_flags(vq); in vhost_enable_notify()
2525 vq_err(vq, "Failed to enable notification at %p: %d\n", in vhost_enable_notify()
2526 &vq->used->flags, r); in vhost_enable_notify()
2530 r = vhost_update_avail_event(vq, vq->avail_idx); in vhost_enable_notify()
2532 vq_err(vq, "Failed to update avail event index at %p: %d\n", in vhost_enable_notify()
2533 vhost_avail_event(vq), r); in vhost_enable_notify()
2540 r = vhost_get_avail_idx(vq, &avail_idx); in vhost_enable_notify()
2542 vq_err(vq, "Failed to check avail idx at %p: %d\n", in vhost_enable_notify()
2543 &vq->avail->idx, r); in vhost_enable_notify()
2547 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; in vhost_enable_notify()
2552 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) in vhost_disable_notify() argument
2556 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
2558 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
2559 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { in vhost_disable_notify()
2560 r = vhost_update_used_flags(vq); in vhost_disable_notify()
2562 vq_err(vq, "Failed to disable notification at %p: %d\n", in vhost_disable_notify()
2563 &vq->used->flags, r); in vhost_disable_notify()
2569 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) in vhost_new_msg() argument
2577 node->vq = vq; in vhost_new_msg()
2613 struct vhost_virtqueue *vq; in vhost_set_backend_features() local
2618 vq = dev->vqs[i]; in vhost_set_backend_features()
2619 mutex_lock(&vq->mutex); in vhost_set_backend_features()
2620 vq->acked_backend_features = features; in vhost_set_backend_features()
2621 mutex_unlock(&vq->mutex); in vhost_set_backend_features()