/linux/drivers/media/mc/ |
A D | mc-request.c | 76 kfree(req); in media_request_release() 277 return req; in media_request_get_by_fd() 302 req = kzalloc(sizeof(*req), GFP_KERNEL); in media_request_alloc() 303 if (!req) in media_request_alloc() 331 snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", in media_request_alloc() 346 kfree(req); in media_request_alloc() 355 struct media_request *req = obj->req; in media_request_object_release() local 396 obj->req = NULL; in media_request_object_init() 421 obj->req = req; in media_request_object_bind() 440 struct media_request *req = obj->req; in media_request_object_unbind() local [all …]
|
/linux/drivers/s390/scsi/ |
A D | zfcp_fsf.c | 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 97 mempool_free(req, req->pool); in zfcp_fsf_req_free() 103 kfree(req); in zfcp_fsf_req_free() 464 req->handler(req); in zfcp_fsf_req_complete() 810 req = kmalloc(sizeof(*req), GFP_ATOMIC); in zfcp_fsf_alloc() 815 memset(req, 0, sizeof(*req)); in zfcp_fsf_alloc() 817 return req; in zfcp_fsf_alloc() 868 req->qtcb->prefix.req_id = req->req_id; in zfcp_fsf_req_create() 872 req->qtcb->header.req_handle = req->req_id; in zfcp_fsf_req_create() 879 return req; in zfcp_fsf_req_create() [all …]
|
/linux/drivers/block/drbd/ |
A D | drbd_req.c | 29 if (!req) in drbd_req_new() 31 memset(req, 0, sizeof(*req)); in drbd_req_new() 34 req->private_bio->bi_private = req; in drbd_req_new() 60 return req; in drbd_req_new() 113 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy() 251 bio_end_io_acct(req->master_bio, req->start_jif); in drbd_req_complete() 820 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { in __req_mod() 1039 req->i.sector, req->i.size)) { in do_remote_read() 1134 req->i.sector, req->i.size >> 9, flags); in drbd_process_discard_or_zeroes_req() 1343 if (!do_remote_read(req) && !req->private_bio) in drbd_send_and_submit() [all …]
|
/linux/drivers/nvme/target/ |
A D | admin-cmd.c | 45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); in nvmet_execute_get_log_page_noop() 240 status = nvmet_zero_sgl(req, len, req->transfer_len - len); in nvmet_execute_get_log_changed_ns() 340 req->cmd->get_log_page.lid, req->sq->qid); in nvmet_execute_get_log_page() 521 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); in nvmet_execute_identify_ns() 522 switch (req->port->ana_state[req->ns->anagrpid]) { in nvmet_execute_identify_ns() 552 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { in nvmet_execute_identify_ns() 636 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) { in nvmet_execute_identify_desclist() 643 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) { in nvmet_execute_identify_desclist() 808 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato() 894 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_get_feat_kato() [all …]
|
A D | io-cmd-file.c | 134 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() 138 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done() 160 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; in nvmet_file_execute_io() 161 if (unlikely(pos + req->transfer_len > req->ns->size)) { in nvmet_file_execute_io() 162 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); in nvmet_file_execute_io() 167 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_file_execute_io() 247 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_file_execute_rw() 259 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw() 263 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); in nvmet_file_execute_rw() 287 nvmet_req_complete(req, nvmet_file_flush(req)); in nvmet_file_flush_work() [all …]
|
A D | zns.c | 142 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv() 180 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv() 239 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba() 269 .req = req, in nvmet_bdev_zone_zmgmt_recv_work() 392 .req = req, in nvmet_bdev_zone_mgmt_emulate_all() 464 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work() 518 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bdev_zone_append_bio_done() 524 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append() 532 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_bdev_execute_zone_append() 554 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_zone_append() [all …]
|
A D | io-cmd-bdev.c | 159 req->error_slba = le64_to_cpu(req->cmd->rw.slba); in blk_to_nvme_status() 162 req->error_slba = in blk_to_nvme_status() 175 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 266 sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_rw() 270 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_rw() 285 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_bdev_execute_rw() 331 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_flush() 396 if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) in nvmet_bdev_execute_dsm() 434 nvmet_req_complete(req, errno_to_nvme_status(req, ret)); in nvmet_bdev_execute_write_zeroes() 444 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) in nvmet_bdev_parse_io_cmd() [all …]
|
A D | fabrics-cmd.c | 18 if (req->cmd->prop_set.attrib & 1) { in nvmet_execute_prop_set() 19 req->error_loc = in nvmet_execute_prop_set() 30 req->error_loc = in nvmet_execute_prop_set() 35 nvmet_req_complete(req, status); in nvmet_execute_prop_set() 74 req->error_loc = in nvmet_execute_prop_get() 77 req->error_loc = in nvmet_execute_prop_get() 82 nvmet_req_complete(req, status); in nvmet_execute_prop_get() 149 req->sq->sqhd_disabled = true; in nvmet_install_queue() 166 req->sq->ctrl = NULL; in nvmet_install_queue() 191 req->cqe->result.u32 = 0; in nvmet_execute_admin_connect() [all …]
|
A D | passthru.c | 96 if (req->port->inline_data_size) in nvmet_passthru_override_id_ctrl() 165 struct request *rq = req->p.rq; in nvmet_passthru_execute_cmd_work() 208 bio = &req->p.inline_bio; in nvmet_passthru_map_sg() 209 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_passthru_map_sg() 216 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_passthru_map_sg() 219 nvmet_req_bio_put(req, bio); in nvmet_passthru_map_sg() 265 if (req->sg_cnt) { in nvmet_passthru_execute_cmd() 283 req->p.rq = rq; in nvmet_passthru_execute_cmd() 284 schedule_work(&req->p.work); in nvmet_passthru_execute_cmd() 286 rq->end_io_data = req; in nvmet_passthru_execute_cmd() [all …]
|
/linux/drivers/staging/greybus/ |
A D | audio_apbridgea.c | 24 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 45 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_register_cport() 62 ret = gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_unregister_cport() 80 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_tx_data_size() 93 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_prepare_tx() 107 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_start_tx() 119 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_stop_tx() 132 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_shutdown_tx() 146 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_rx_data_size() 159 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_prepare_rx() [all …]
|
A D | audio_gb.c | 58 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 79 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 91 &req, sizeof(req), NULL, 0); in gb_audio_gb_enable_widget() 103 &req, sizeof(req), NULL, 0); in gb_audio_gb_disable_widget() 144 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_pcm() 157 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_tx_data_size() 169 &req, sizeof(req), NULL, 0); in gb_audio_gb_activate_tx() 181 &req, sizeof(req), NULL, 0); in gb_audio_gb_deactivate_tx() 194 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_rx_data_size() 206 &req, sizeof(req), NULL, 0); in gb_audio_gb_activate_rx() [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
A D | user_sdma.c | 629 req->tids[req->tididx]) { in compute_data_length() 642 len = min(req->data_len - req->sent, (u32)req->info.fragsize); in compute_data_length() 790 tx->req = req; in user_sdma_send_pkts() 807 iovec = &req->iovs[req->iov_idx]; in user_sdma_send_pkts() 1187 tidval = req->tids[req->tididx]; in set_txreq_header() 1200 !req->tids[req->tididx]) { in set_txreq_header() 1203 tidval = req->tids[req->tididx]; in set_txreq_header() 1289 tidval = req->tids[req->tididx]; in set_txreq_header_ahg() 1303 !req->tids[req->tididx]) in set_txreq_header_ahg() 1305 tidval = req->tids[req->tididx]; in set_txreq_header_ahg() [all …]
|
/linux/net/sunrpc/ |
A D | backchannel_rqst.c | 64 kfree(req); in xprt_free_allocation() 84 req = kzalloc(sizeof(*req), gfp_flags); in xprt_alloc_bc_req() 85 if (req == NULL) in xprt_alloc_bc_req() 103 return req; in xprt_alloc_bc_req() 258 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, in xprt_get_bc_request() 260 req->rq_xid = xid; in xprt_get_bc_request() 264 return req; in xprt_get_bc_request() 298 req = NULL; in xprt_free_bc_rqst() 341 if (req != new) in xprt_lookup_bc_request() 344 } else if (req) in xprt_lookup_bc_request() [all …]
|
/linux/crypto/ |
A D | chacha20poly1305.c | 77 err = cont(req); in async_done_continue() 136 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); in chacha_decrypt() 138 if (req->src != req->dst) in chacha_decrypt() 139 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); in chacha_decrypt() 220 return poly_tail(req); in poly_cipherpad() 237 crypt = req->dst; in poly_cipher() 297 ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); in poly_ad() 329 return poly_ad(req); in poly_setkey() 412 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); in chacha_encrypt() 414 if (req->src != req->dst) in chacha_encrypt() [all …]
|
A D | seqiv.c | 66 info = req->iv; in seqiv_aead_encrypt() 68 if (req->src != req->dst) { in seqiv_aead_encrypt() 74 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt() 75 req->assoclen + req->cryptlen, in seqiv_aead_encrypt() 85 info = kmemdup(req->iv, ivsize, req->base.flags & in seqiv_aead_encrypt() 92 data = req; in seqiv_aead_encrypt() 96 aead_request_set_crypt(subreq, req->dst, req->dst, in seqiv_aead_encrypt() 101 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); in seqiv_aead_encrypt() 127 aead_request_set_crypt(subreq, req->src, req->dst, in seqiv_aead_decrypt() 128 req->cryptlen - ivsize, req->iv); in seqiv_aead_decrypt() [all …]
|
A D | gcm.c | 168 if (req->src != req->dst) { in crypto_gcm_init_common() 186 dst = req->src == req->dst ? pctx->src : pctx->dst; in crypto_gcm_init_crypt() 377 req->src, req->assoclen, flags) ?: in gcm_hash_init_continue() 419 req->assoclen + req->cryptlen, in gcm_enc_copy_hash() 458 crypto_gcm_init_crypt(req, req->cryptlen); in crypto_gcm_encrypt() 744 if (req->src != req->dst) { in crypto_rfc4106_crypt() 771 req = crypto_rfc4106_crypt(req); in crypto_rfc4106_encrypt() 784 req = crypto_rfc4106_crypt(req); in crypto_rfc4106_decrypt() 939 if (req->src != req->dst) { in crypto_rfc4543_crypt() 950 req->base.complete, req->base.data); in crypto_rfc4543_crypt() [all …]
|
/linux/drivers/clk/sunxi/ |
A D | clk-sunxi.c | 137 req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz) in sun6i_a31_get_pll1_factors() 145 req->n = (req->n + 1) / 2 - 1; in sun6i_a31_get_pll1_factors() 146 req->m = (req->m + 1) / 2 - 1; in sun6i_a31_get_pll1_factors() 255 req->rate = req->parent_rate; in sun5i_a13_get_ahb_factors() 296 if (req->parent_rate && req->rate > req->parent_rate) in sun6i_get_ahb1_factors() 297 req->rate = req->parent_rate; in sun6i_get_ahb1_factors() 329 req->rate = req->parent_rate; in sun6i_ahb1_recalc() 333 req->rate /= req->m + 1; in sun6i_ahb1_recalc() 336 req->rate >>= req->p; in sun6i_ahb1_recalc() 351 req->rate = req->parent_rate; in sun4i_get_apb1_factors() [all …]
|
/linux/drivers/crypto/inside-secure/ |
A D | safexcel_hash.c | 72 return req->len - req->processed; in safexcel_queued_len() 129 if (!req->finish && req->xcbcmac) in safexcel_context_control() 167 req->hmac_zlen || (req->processed != req->block_sz)) { in safexcel_context_control() 846 req->cache[req->block_sz-8] = (req->block_sz << 3) & in safexcel_ahash_final() 848 req->cache[req->block_sz-7] = (req->block_sz >> 5); in safexcel_ahash_final() 851 req->cache[req->block_sz-2] = (req->block_sz >> 5); in safexcel_ahash_final() 852 req->cache[req->block_sz-1] = (req->block_sz << 3) & in safexcel_ahash_final() 940 memset(req, 0, sizeof(*req)); in safexcel_sha1_init() 1017 memset(req, 0, sizeof(*req)); in safexcel_hmac_sha1_init() 1269 memset(req, 0, sizeof(*req)); in safexcel_sha256_init() [all …]
|
/linux/arch/powerpc/platforms/52xx/ |
A D | mpc52xx_lpbfifo.c | 163 req->offset + req->pos); in mpc52xx_lpbfifo_kick() 234 req = lpbfifo.req; in mpc52xx_lpbfifo_irq() 278 if (req->size - req->pos) in mpc52xx_lpbfifo_irq() 300 req->last_byte = ((u8 *)req->data)[req->size - 1]; in mpc52xx_lpbfifo_irq() 315 req->callback(req); in mpc52xx_lpbfifo_irq() 335 req = lpbfifo.req; in mpc52xx_lpbfifo_bcom_irq() 356 req->last_byte = ((u8 *)req->data)[req->size - 1]; in mpc52xx_lpbfifo_bcom_irq() 368 req->callback(req); in mpc52xx_lpbfifo_bcom_irq() 412 lpbfifo.req = req; in mpc52xx_lpbfifo_submit() 446 if (lpbfifo.req && lpbfifo.req == req && in mpc52xx_lpbfifo_start_xfer() [all …]
|
/linux/drivers/s390/cio/ |
A D | ccwreq.c | 43 struct ccw_request *req = &cdev->private->req; in ccwreq_next_path() local 49 req->retries = req->maxretries; in ccwreq_next_path() 50 req->mask = lpm_adjust(req->mask >> 1, req->lpm); in ccwreq_next_path() 60 struct ccw_request *req = &cdev->private->req; in ccwreq_stop() local 69 req->callback(cdev, req->data, rc); in ccwreq_stop() 77 struct ccw_request *req = &cdev->private->req; in ccwreq_do() local 122 struct ccw_request *req = &cdev->private->req; in ccw_request_start() local 128 req->mask = req->lpm; in ccw_request_start() 130 req->retries = req->maxretries; in ccw_request_start() 131 req->mask = lpm_adjust(req->mask, req->lpm); in ccw_request_start() [all …]
|
/linux/drivers/macintosh/ |
A D | via-pmu.c | 728 vb = (req->reply[1] << 8) | req->reply[2]; in done_battery_state_ohare() 740 pcharge = (req->reply[6] << 8) + req->reply[7]; in done_battery_state_ohare() 812 max = (req->reply[4] << 8) | req->reply[5]; in done_battery_state_smart() 997 req->data[i] = req->data[i+1]; in pmu_send_request() 1025 req->data[i] = req->data[i+1]; in pmu_send_request() 1038 req->data[i+2] = req->data[i]; in pmu_send_request() 1039 req->data[3] = req->nbytes - 2; in pmu_send_request() 1520 reply_ptr = req->reply + req->reply_len; in pmu_sr_intr() 1749 (req.reply[2] << 8) + req.reply[3]; in pmu_get_time() 2567 (*req->done)(req); in pmu_polled_request() [all …]
|
/linux/fs/nfs/ |
A D | pagelist.c | 296 if (ret || req->wb_head == req) in nfs_page_group_lock() 308 if (req != req->wb_head) in nfs_page_group_unlock() 335 tmp = req; in nfs_page_group_sync_on_bit_locked() 375 req->wb_head = req; in nfs_page_group_init() 376 req->wb_this_page = req; in nfs_page_group_init() 418 tmp = req; in nfs_page_group_destroy() 465 return req; in __nfs_create_request() 581 WARN_ON_ONCE(req->wb_this_page != req); in nfs_free_request() 703 struct nfs_page *req = hdr->req; in nfs_pgio_rpcsetup() local 1166 subreq = req; in __nfs_pageio_add_request() [all …]
|
/linux/fs/ |
A D | io_uring.c | 1584 &req->work, req->flags); in io_queue_async_work() 4194 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate() 4744 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range() 5500 struct io_kiocb *req = pt->req; in __io_queue_proc() local 5563 trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data); in io_async_task_func() 5617 ipt->req = req; in __io_arm_poll_handler() 5693 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, in io_arm_poll_handler() 6197 data->req = req; in io_timeout_prep() 6534 io_for_each_link(req, req) in io_get_sequence() 6575 de->req = req; in io_drain_req() [all …]
|
/linux/tools/testing/selftests/net/ |
A D | ipsec.c | 281 memset(&req, 0, sizeof(req)); in veth_add() 329 memset(&req, 0, sizeof(req)); in ip4_addr_set() 369 memset(&req, 0, sizeof(req)); in link_set_up() 397 memset(&req, 0, sizeof(req)); in ip4_route_set() 939 memset(&req, 0, sizeof(req)); in xfrm_state_add() 1038 memset(&req, 0, sizeof(req)); in xfrm_state_check() 1122 memset(&req, 0, sizeof(req)); in xfrm_policy_add() 1196 memset(&req, 0, sizeof(req)); in xfrm_policy_del() 1247 memset(&req, 0, sizeof(req)); in xfrm_state_del() 1303 memset(&req, 0, sizeof(req)); in xfrm_state_allocspi() [all …]
|
/linux/fs/ksmbd/ |
A D | transport_ipc.c | 309 req->ifc_list_sz); in ipc_server_config_on_startup() 312 req->netbios_name, req->server_string, in ipc_server_config_on_startup() 570 snprintf(req->peer_addr, sizeof(req->peer_addr), "%pIS", peer_addr); in ksmbd_ipc_tree_connect_request() 664 req->handle = handle; in ksmbd_rpc_open() 667 req->payload_sz = 0; in ksmbd_rpc_open() 686 req->handle = handle; in ksmbd_rpc_close() 689 req->payload_sz = 0; in ksmbd_rpc_close() 709 req->handle = handle; in ksmbd_rpc_write() 733 req->handle = handle; in ksmbd_rpc_read() 737 req->payload_sz = 0; in ksmbd_rpc_read() [all …]
|