/linux/tools/io_uring/ |
A D | liburing.h | 112 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_rw() 113 sqe->opcode = op; in io_uring_prep_rw() 114 sqe->fd = fd; in io_uring_prep_rw() 115 sqe->off = offset; in io_uring_prep_rw() 117 sqe->len = len; in io_uring_prep_rw() 151 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_poll_add() 153 sqe->fd = fd; in io_uring_prep_poll_add() 163 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_poll_remove() 171 memset(sqe, 0, sizeof(*sqe)); in io_uring_prep_fsync() 173 sqe->fd = fd; in io_uring_prep_fsync() [all …]
|
A D | io_uring-cp.c | 71 struct io_uring_sqe *sqe; in queue_prepped() local 73 sqe = io_uring_get_sqe(ring); in queue_prepped() 74 assert(sqe); in queue_prepped() 77 io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset); in queue_prepped() 79 io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset); in queue_prepped() 81 io_uring_sqe_set_data(sqe, data); in queue_prepped() 86 struct io_uring_sqe *sqe; in queue_read() local 93 sqe = io_uring_get_sqe(ring); in queue_read() 94 if (!sqe) { in queue_read() 106 io_uring_prep_readv(sqe, infd, &data->iov, 1, offset); in queue_read() [all …]
|
A D | io_uring-bench.c | 151 sqe->opcode = IORING_OP_NOP; in init_io() 172 sqe->flags = IOSQE_FIXED_FILE; in init_io() 173 sqe->fd = f->fixed_fd; in init_io() 175 sqe->flags = 0; in init_io() 176 sqe->fd = f->real_fd; in init_io() 181 sqe->len = BS; in init_io() 182 sqe->buf_index = index; in init_io() 186 sqe->len = 1; in init_io() 187 sqe->buf_index = 0; in init_io() 189 sqe->ioprio = 0; in init_io() [all …]
|
A D | queue.c | 145 struct io_uring_sqe *sqe; in io_uring_get_sqe() local 153 sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; in io_uring_get_sqe() 155 return sqe; in io_uring_get_sqe()
|
/linux/drivers/infiniband/sw/siw/ |
A D | siw_qp.c | 275 wqe->sqe.flags = 0; in siw_qp_mpa_rts() 276 wqe->sqe.num_sge = 1; in siw_qp_mpa_rts() 284 wqe->sqe.rkey = 1; in siw_qp_mpa_rts() 285 wqe->sqe.raddr = 0; in siw_qp_mpa_rts() 871 rreq->id = sqe->id; in siw_read_to_orq() 883 struct siw_sqe *sqe; in siw_activate_tx_from_sq() local 888 if (!sqe) in siw_activate_tx_from_sq() 895 memcpy(&wqe->sqe, sqe, sizeof(*sqe)); in siw_activate_tx_from_sq() 911 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; in siw_activate_tx_from_sq() 1003 wqe->sqe.flags = 0; in siw_activate_tx() [all …]
|
A D | siw_qp_tx.c | 43 struct siw_sge *sge = &wqe->sqe.sge[0]; in siw_try_1seg() 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); in siw_try_1seg() 139 cpu_to_be64(wqe->sqe.sge[0].laddr); in siw_qp_prepare_tx() 246 cpu_to_be64(wqe->sqe.raddr); in siw_qp_prepare_tx() 650 sge = &wqe->sqe.sge[c_tx->sge_idx]; in siw_tx_hdt() 807 wqe->sqe.num_sge = 1; in siw_qp_sq_proc_tx() 835 wqe->sqe.sge[0].laddr = in siw_qp_sq_proc_tx() 836 (u64)(uintptr_t)&wqe->sqe.sge[1]; in siw_qp_sq_proc_tx() 858 wqe->sqe.id); in siw_qp_sq_proc_tx() 965 mem->stag = sqe->rkey; in siw_fastreg_mr() [all …]
|
A D | siw_verbs.c | 641 void *kbuf = &sqe->sge[1]; in siw_copy_inline_sgl() 645 sqe->sge[0].lkey = 0; in siw_copy_inline_sgl() 673 struct siw_sqe sqe = {}; in siw_sq_flush_wr() local 677 sqe.id = wr->wr_id; in siw_sq_flush_wr() 678 sqe.opcode = wr->opcode; in siw_sq_flush_wr() 783 if (sqe->flags) { in siw_post_send() 793 sqe->id = wr->wr_id; in siw_post_send() 819 sqe->num_sge = 1; in siw_post_send() 848 sqe->num_sge = 1; in siw_post_send() 868 sqe->num_sge = 1; in siw_post_send() [all …]
|
A D | siw.h | 192 struct siw_sqe sqe; member 476 #define tx_type(wqe) ((wqe)->sqe.opcode) 478 #define tx_flags(wqe) ((wqe)->sqe.flags) 523 void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe); 524 int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, 627 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_empty() local 629 return READ_ONCE(sqe->flags) == 0; in siw_sq_empty() 634 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in sq_get_next() local 636 if (READ_ONCE(sqe->flags) & SIW_WQE_VALID) in sq_get_next() 637 return sqe; in sq_get_next()
|
A D | siw_qp_rx.c | 176 srx->ddp_stag = wqe->sqe.sge[0].lkey; in siw_rresp_check_ntoh() 177 srx->ddp_to = wqe->sqe.sge[0].laddr; in siw_rresp_check_ntoh() 695 resp = &tx_work->sqe; in siw_init_rresp() 758 wqe->sqe.id = orqe->id; in siw_orqe_start_rx() 759 wqe->sqe.opcode = orqe->opcode; in siw_orqe_start_rx() 760 wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; in siw_orqe_start_rx() 761 wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; in siw_orqe_start_rx() 763 wqe->sqe.flags = orqe->flags; in siw_orqe_start_rx() 764 wqe->sqe.num_sge = 1; in siw_orqe_start_rx() 827 sge = wqe->sqe.sge; /* there is only one */ in siw_proc_rresp() [all …]
|
/linux/drivers/crypto/hisilicon/zip/ |
A D | zip_crypto.c | 284 sqe->dw9 = val; in hisi_zip_fill_buf_type() 293 sqe->dw9 = val; in hisi_zip_fill_req_type() 298 sqe->dw13 = req->req_id; in hisi_zip_fill_tag_v1() 303 sqe->dw26 = req->req_id; in hisi_zip_fill_tag_v2() 312 sqe->dw7 = val; in hisi_zip_fill_sqe_type() 322 ops->fill_addr(sqe, req); in hisi_zip_fill_sqe() 326 ops->fill_tag(sqe, req); in hisi_zip_fill_sqe() 385 return sqe->dw13; in hisi_zip_get_tag_v1() 390 return sqe->dw26; in hisi_zip_get_tag_v2() 400 return sqe->produced; in hisi_zip_get_dstlen() [all …]
|
/linux/fs/ |
A D | io_uring.c | 3802 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || in io_unlinkat_prep() 3851 if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index || in io_mkdirat_prep() 3894 if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index || in io_symlinkat_prep() 4341 if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off || in io_remove_buffers_prep() 4547 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in) in io_madvise_prep() 4582 if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in) in io_fadvise_prep() 4655 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || in io_close_prep() 5221 if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags || in io_connect_prep() 5867 if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->addr) in io_poll_add_prep() 6103 if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in) in io_timeout_remove_prep() [all …]
|
/linux/include/trace/events/ |
A D | io_uring.h | 513 TP_ARGS(sqe, error), 533 __entry->opcode = sqe->opcode; 534 __entry->flags = sqe->flags; 535 __entry->ioprio = sqe->ioprio; 536 __entry->off = sqe->off; 537 __entry->addr = sqe->addr; 538 __entry->len = sqe->len; 539 __entry->op_flags = sqe->rw_flags; 540 __entry->user_data = sqe->user_data; 544 __entry->pad1 = sqe->__pad2[0]; [all …]
|
/linux/drivers/net/ethernet/qlogic/qed/ |
A D | qed_nvmetcp_fw_funcs.c | 68 if (!task_params->sqe) in init_sqe() 71 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe() 72 task_params->sqe->task_id = cpu_to_le16(task_params->itid); in init_sqe() 79 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 81 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 94 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); in init_sqe() 99 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 101 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 106 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, in init_sqe() 112 SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, in init_sqe() [all …]
|
/linux/drivers/scsi/qedf/ |
A D | drv_fcoe_fw_funcs.c | 13 memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); in init_common_sqe() 14 SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, in init_common_sqe() 16 task_params->sqe->task_id = task_params->itid; in init_common_sqe() 167 task_params->sqe->additional_info_union.burst_length = in init_initiator_midpath_unsolicited_fcoe_task() 169 SET_FIELD(task_params->sqe->flags, in init_initiator_midpath_unsolicited_fcoe_task() 171 SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, in init_initiator_midpath_unsolicited_fcoe_task() 193 task_params->sqe->additional_info_union.seq_rec_updated_offset = in init_initiator_sequence_recovery_fcoe_task()
|
A D | qedf_io.c | 588 struct fcoe_wqe *sqe) in qedf_init_task() argument 624 io_req->task_params->sqe = sqe; in qedf_init_task() 703 io_req->task_params->sqe = sqe; in qedf_init_mp_task() 855 struct fcoe_wqe *sqe; in qedf_post_io_req() local 902 sqe = &fcport->sq[sqe_idx]; in qedf_post_io_req() 1862 struct fcoe_wqe *sqe; in qedf_initiate_abts() local 1940 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_abts() 1942 io_req->task_params->sqe = sqe; in qedf_initiate_abts() 2155 struct fcoe_wqe *sqe; in qedf_initiate_cleanup() local 2225 io_req->task_params->sqe = sqe; in qedf_initiate_cleanup() [all …]
|
A D | qedf_els.c | 23 struct fcoe_wqe *sqe; in qedf_initiate_els() local 120 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_els() 121 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_els() 125 qedf_init_mp_task(els_req, task, sqe); in qedf_initiate_els() 702 struct fcoe_wqe *sqe; in qedf_initiate_seq_cleanup() local 732 sqe = &fcport->sq[sqe_idx]; in qedf_initiate_seq_cleanup() 733 memset(sqe, 0, sizeof(struct fcoe_wqe)); in qedf_initiate_seq_cleanup() 734 orig_io_req->task_params->sqe = sqe; in qedf_initiate_seq_cleanup()
|
A D | drv_fcoe_fw_funcs.h | 16 struct fcoe_wqe *sqe; member
|
/linux/drivers/scsi/qedi/ |
A D | qedi_fw_api.c | 98 if (!task_params->sqe) in init_sqe() 101 memset(task_params->sqe, 0, sizeof(*task_params->sqe)); in init_sqe() 104 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 115 init_dif_context_flags(&task_params->sqe->prot_flags, in init_sqe() 118 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 134 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, in init_sqe() 141 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 147 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, in init_sqe() 152 SET_FIELD(task_params->sqe->contlen_cdbsize, in init_sqe() 178 SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE, in init_sqe() [all …]
|
/linux/drivers/infiniband/hw/bnxt_re/ |
A D | qplib_fp.c | 1780 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1781 sqe->wqe_size = wqe_sz; in bnxt_qplib_post_send() 1798 sqe->wqe_type = wqe->type; in bnxt_qplib_post_send() 1799 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1800 sqe->wqe_size = wqe_sz; in bnxt_qplib_post_send() 1829 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1830 sqe->wqe_size = wqe_sz; in bnxt_qplib_post_send() 1849 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1866 sqe->flags = wqe->flags; in bnxt_qplib_post_send() 1877 sqe->flags = wqe->flags; in bnxt_qplib_post_send() [all …]
|
/linux/drivers/dma/ |
A D | hisi_dma.c | 85 struct hisi_dma_sqe sqe; member 247 desc->sqe.length = cpu_to_le32(len); in hisi_dma_prep_dma_memcpy() 248 desc->sqe.src_addr = cpu_to_le64(src); in hisi_dma_prep_dma_memcpy() 249 desc->sqe.dst_addr = cpu_to_le64(dst); in hisi_dma_prep_dma_memcpy() 263 struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; in hisi_dma_start_transfer() local 278 memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); in hisi_dma_start_transfer() 281 sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); in hisi_dma_start_transfer() 282 sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); in hisi_dma_start_transfer()
|
/linux/drivers/infiniband/hw/cxgb4/ |
A D | restrack.c | 96 struct t4_swsqe *sqe) in fill_swsqe() argument 100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode)) in fill_swsqe() 102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete)) in fill_swsqe() 104 if (sqe->complete && in fill_swsqe() 105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe))) in fill_swsqe() 107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled)) in fill_swsqe() 109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) in fill_swsqe()
|
/linux/drivers/scsi/bnx2i/ |
A D | bnx2i.h | 498 struct sqe { struct 634 struct sqe *sq_virt; 638 struct sqe *sq_prod_qe; 639 struct sqe *sq_cons_qe; 640 struct sqe *sq_first_qe; 641 struct sqe *sq_last_qe;
|
/linux/drivers/nvme/host/ |
A D | rdma.c | 66 struct nvme_rdma_qe sqe; member 294 kfree(req->sqe.data); in nvme_rdma_exit_request() 308 if (!req->sqe.data) in nvme_rdma_init_request() 318 nvme_req(rq)->cmd = req->sqe.data; in nvme_rdma_init_request() 1716 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event() 1728 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event() 2044 struct nvme_rdma_qe *sqe = &req->sqe; in nvme_rdma_queue_rq() local 2058 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, in nvme_rdma_queue_rq() 2065 ib_dma_sync_single_for_cpu(dev, sqe->dma, in nvme_rdma_queue_rq() 2090 sqe->cqe.done = nvme_rdma_send_done; in nvme_rdma_queue_rq() [all …]
|
A D | fc.c | 1911 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done() local 2025 sqe->common.command_id, in nvme_fc_fcpio_done() 2132 nvme_req(rq)->cmd = &op->op.cmd_iu.sqe; in nvme_fc_init_request() 2141 struct nvme_command *sqe; in nvme_fc_init_aen_ops() local 2155 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops() 2167 memset(sqe, 0, sizeof(*sqe)); in nvme_fc_init_aen_ops() 2556 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_timeout() local 2566 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); in nvme_fc_timeout() 2659 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_start_fcp_op() local 2697 WARN_ON_ONCE(sqe->common.metadata); in nvme_fc_start_fcp_op() [all …]
|
/linux/drivers/crypto/hisilicon/hpre/ |
A D | hpre_crypto.c | 302 struct hpre_sqe *sqe = &req->req; in hpre_hw_data_clr_all() local 305 tmp = le64_to_cpu(sqe->in); in hpre_hw_data_clr_all() 316 tmp = le64_to_cpu(sqe->out); in hpre_hw_data_clr_all() 342 id = (int)le16_to_cpu(sqe->tag); in hpre_alg_res_post_hf() 462 struct hpre_sqe *sqe = resp; in hpre_alg_cb() local 1423 struct hpre_sqe *sqe = &req->req; in hpre_ecdh_hw_data_clr_all() local 1426 dma = le64_to_cpu(sqe->in); in hpre_ecdh_hw_data_clr_all() 1433 dma = le64_to_cpu(sqe->out); in hpre_ecdh_hw_data_clr_all() 1733 struct hpre_sqe *sqe = &req->req; in hpre_curve25519_hw_data_clr_all() local 1736 dma = le64_to_cpu(sqe->in); in hpre_curve25519_hw_data_clr_all() [all …]
|