Lines Matching refs:sqe

66 	struct nvme_rdma_qe	sqe;  member
294 kfree(req->sqe.data); in nvme_rdma_exit_request()
307 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request()
308 if (!req->sqe.data) in nvme_rdma_init_request()
318 nvme_req(rq)->cmd = req->sqe.data; in nvme_rdma_init_request()
1630 container_of(qe, struct nvme_rdma_request, sqe); in nvme_rdma_send_done()
1715 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event() local
1716 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event()
1720 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event()
1728 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event()
1730 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), in nvme_rdma_submit_async_event()
1733 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); in nvme_rdma_submit_async_event()
2044 struct nvme_rdma_qe *sqe = &req->sqe; in nvme_rdma_queue_rq() local
2058 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, in nvme_rdma_queue_rq()
2061 err = ib_dma_mapping_error(dev, req->sqe.dma); in nvme_rdma_queue_rq()
2065 ib_dma_sync_single_for_cpu(dev, sqe->dma, in nvme_rdma_queue_rq()
2090 sqe->cqe.done = nvme_rdma_send_done; in nvme_rdma_queue_rq()
2092 ib_dma_sync_single_for_device(dev, sqe->dma, in nvme_rdma_queue_rq()
2095 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2113 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_queue_rq()
2166 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_complete_rq()