| /linux/tools/io_uring/ |
| A D | setup.c | 41 cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); in io_uring_mmap() 54 cq->cqes = ptr + p->cq_off.cqes; in io_uring_mmap()
|
| A D | io_uring-bench.c | 51 struct io_uring_cqe *cqes; member 257 cqe = &ring->cqes[head & cq_ring_mask]; in reap_events() 449 ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe), in setup_ring() 457 cring->cqes = ptr + p.cq_off.cqes; in setup_ring()
|
| A D | queue.c | 31 *cqe_ptr = &cq->cqes[head & mask]; in __io_uring_get_cqe()
|
| A D | liburing.h | 41 struct io_uring_cqe *cqes; member
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| A D | restrack.c | 334 struct t4_cqe *cqes) in fill_hwcqes() argument 339 if (fill_cqe(msg, cqes, idx, "hwcq_idx")) in fill_hwcqes() 342 if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx")) in fill_hwcqes() 351 struct t4_cqe *cqes) in fill_swcqes() argument 359 if (fill_cqe(msg, cqes, idx, "swcq_idx")) in fill_swcqes() 364 if (fill_cqe(msg, cqes + 1, idx, "swcq_idx")) in fill_swcqes()
|
| /linux/drivers/net/ethernet/broadcom/ |
| A D | cnic.c | 1434 cqes, num_cqes); in cnic_reply_bnx2x_kcqes() 1552 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_init2() local 1582 cqes[0] = (struct kcqe *) &kcqe; in cnic_bnx2x_iscsi_init2() 1881 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_ofld1() local 1999 struct kcqe *cqes[1]; in cnic_bnx2x_iscsi_destroy() local 2228 struct kcqe *cqes[1]; in cnic_bnx2x_offload_pg() local 2243 struct kcqe *cqes[1]; in cnic_bnx2x_update_pg() local 2353 struct kcqe *cqes[1]; in cnic_bnx2x_fcoe_ofld1() local 2500 struct kcqe *cqes[1]; in cnic_bnx2x_fcoe_destroy() local 2584 struct kcqe *cqes[1]; in cnic_bnx2x_kwqe_err() local [all …]
|
| A D | cnic_if.h | 369 void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | en_stats.c | 264 s->tx_xdp_cqes += xdpsq_red_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdp_red() 276 s->rx_xdp_tx_cqe += xdpsq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xdpsq() 287 s->tx_xsk_cqes += xsksq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_xsksq() 409 s->tx_cqes += sq_stats->cqes; in mlx5e_stats_grp_sw_update_stats_sq() 1838 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) }, 1850 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1860 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1891 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1915 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) }, 1995 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
|
| A D | en_stats.h | 401 u64 cqes ____cacheline_aligned_in_smp; 414 u64 cqes ____cacheline_aligned_in_smp;
|
| A D | en_tx.c | 875 stats->cqes += i; in mlx5e_poll_tx_cq()
|
| /linux/drivers/nvme/target/ |
| A D | passthru.c | 87 id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes); in nvmet_passthru_override_id_ctrl()
|
| A D | admin-cmd.c | 426 id->cqes = (0x4 << 4) | 0x4; in nvmet_execute_identify_ctrl()
|
| /linux/include/uapi/linux/ |
| A D | io_uring.h | 241 __u32 cqes; member
|
| /linux/drivers/nvme/host/ |
| A D | pci.c | 195 struct nvme_completion *cqes; member 995 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; in nvme_cqe_pending() 1019 struct nvme_completion *cqe = &nvmeq->cqes[idx]; in nvme_handle_cqe() 1387 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue() 1527 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), in nvme_alloc_queue() 1529 if (!nvmeq->cqes) in nvme_alloc_queue() 1547 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, in nvme_alloc_queue() 1576 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); in nvme_init_queue()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | xdp.c | 459 sq->stats->cqes += i; in mlx5e_poll_xdpsq_cq()
|
| /linux/include/linux/ |
| A D | nvme.h | 300 __u8 cqes; member
|
| /linux/fs/ |
| A D | io_uring.c | 199 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp; member 1696 return &rings->cqes[tail & mask]; in io_get_cqe() 8862 off = struct_size(rings, cqes, cq_entries); in rings_size() 10193 struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask]; in __io_uring_show_fdinfo() 10468 p->cq_off.cqes = offsetof(struct io_rings, cqes); in io_uring_create()
|