/linux/net/sunrpc/ |
A D | xdr.c | 186 head->iov_len = offset; in xdr_inline_pages() 513 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; in xdr_buf_try_expand() 617 talen = tail->iov_len > tato ? tail->iov_len - tato : 0; in xdr_buf_head_copy_right() 802 head->iov_len = len; in xdr_shrink_bufhead() 915 iov->iov_len += len; in xdr_init_encode() 1103 if (tail->iov_len) { in xdr_truncate_encode() 1129 head->iov_len = len; in xdr_truncate_encode() 1183 iov->iov_len = 0; in xdr_write_pages() 1191 iov->iov_len += pad; in xdr_write_pages() 1204 len = iov->iov_len; in xdr_set_iov() [all …]
|
A D | socklib.c | 96 len = xdr->head[0].iov_len; in xdr_partial_copy_from_skb() 154 len = xdr->tail[0].iov_len; in xdr_partial_copy_from_skb() 216 iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); in xprt_send_kvec() 246 .iov_len = sizeof(marker) in xprt_send_rm_and_kvec() 250 size_t len = iov[0].iov_len + iov[1].iov_len; in xprt_send_rm_and_kvec() 284 want = xdr->head[0].iov_len + rmsize; in xprt_sock_sendmsg() 319 if (base >= xdr->tail[0].iov_len) in xprt_sock_sendmsg()
|
A D | svc.c | 1260 if (argv->iov_len < 6*4) in svc_process_common() 1346 statp = resv->iov_base +resv->iov_len; in svc_process_common() 1391 argv->iov_len); in svc_process_common() 1469 resv->iov_len = 0; in svc_process() 1476 rqstp->rq_res.tail[0].iov_len = 0; in svc_process() 1532 rqstp->rq_arg.head[0].iov_len; in bc_svc_process() 1538 resv->iov_len = 0; in bc_svc_process() 1641 if (first->iov_len) { in svc_fill_write_vector() 1643 vec[i].iov_len = min_t(size_t, total, first->iov_len); in svc_fill_write_vector() 1644 total -= vec[i].iov_len; in svc_fill_write_vector() [all …]
|
/linux/include/linux/sunrpc/ |
A D | svc.h | 191 iov->iov_len -= sizeof(__be32); in svc_getnl() 197 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putnl() 199 iov->iov_len += sizeof(__be32); in svc_putnl() 208 iov->iov_len -= sizeof(__be32); in svc_getu32() 216 iov->iov_len += sizeof(*vp); in svc_ungetu32() 221 __be32 *vp = iov->iov_base + iov->iov_len; in svc_putu32() 223 iov->iov_len += sizeof(__be32); in svc_putu32() 364 vec->iov_len = cp - (char*)vec->iov_base; in xdr_ressize_check() 366 return vec->iov_len <= PAGE_SIZE; in xdr_ressize_check() 595 xdr->p = resv->iov_base + resv->iov_len; in svcxdr_init_encode() [all …]
|
/linux/tools/testing/selftests/powerpc/ptrace/ |
A D | ptrace.h | 110 iov.iov_len = n * sizeof(unsigned long); in ptrace_read_regs() 152 iov.iov_len = sizeof(unsigned long); in show_tar_registers() 199 iov.iov_len = sizeof(unsigned long); in write_tar_registers() 242 iov.iov_len = sizeof(unsigned long); in show_tm_checkpointed_state() 290 iov.iov_len = sizeof(unsigned long); in write_ckpt_tar_registers() 481 iov.iov_len = sizeof(struct pt_regs); in show_ckpt_gpr() 509 iov.iov_len = sizeof(struct pt_regs); in write_ckpt_gpr() 548 iov.iov_len = sizeof(regs); in show_vmx_ckpt() 579 iov.iov_len = sizeof(regs); in write_vmx_ckpt() 608 iov.iov_len = sizeof(regs); in show_vsx_ckpt() [all …]
|
/linux/fs/cifs/ |
A D | smb2transport.c | 269 iov[0].iov_len); in smb2_calc_signature() 481 d->label.iov_len = 12; in generate_smb30signingkey() 483 d->context.iov_len = 8; in generate_smb30signingkey() 487 d->label.iov_len = 11; in generate_smb30signingkey() 489 d->context.iov_len = 10; in generate_smb30signingkey() 493 d->label.iov_len = 11; in generate_smb30signingkey() 509 d->label.iov_len = 14; in generate_smb311signingkey() 515 d->label.iov_len = 16; in generate_smb311signingkey() 521 d->label.iov_len = 16; in generate_smb311signingkey() 588 iov[0].iov_len); in smb3_calc_signature() [all …]
|
A D | transport.c | 277 buflen += iov[i].iov_len; in smb_rqst_len() 354 .iov_len = 4 in __smb_send_rqst() 374 size += iov[i].iov_len; in __smb_send_rqst() 478 iov.iov_len = sizeof(*tr_hdr); in smb_send_rqst() 503 iov[0].iov_len = 4; in smb_send() 505 iov[1].iov_len = smb_buf_length; in smb_send() 969 iov[0].iov_len = 4; in cifs_check_receive() 971 iov[1].iov_len = len - 4; in cifs_check_receive() 1262 .iov_len = resp_iov[0].iov_len in compound_send_recv() 1318 new_iov[0].iov_len = 4; in SendReceive2() [all …]
|
/linux/net/sunrpc/auth_gss/ |
A D | svcauth_gss.c | 680 if (argv->iov_len < 4) in svc_safe_getnetobj() 684 if (argv->iov_len < l) in svc_safe_getnetobj() 688 argv->iov_len -= l; in svc_safe_getnetobj() 731 if (argv->iov_len < 4) in gss_verify_header() 788 iov.iov_len = 4; in gss_write_verf() 954 return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len; in total_buf_len() 964 buf->head[0].iov_len -= pad; in fix_priv_head() 1091 if (argv->iov_len < 2 * 4) in gss_read_common_verf() 1567 if (argv->iov_len < 5 * 4) in svcauth_gss_accept() 1796 if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len in svcauth_gss_wrap_resp_priv() [all …]
|
A D | gss_krb5_wrap.c | 55 if (buf->page_len || buf->tail[0].iov_len) in gss_krb5_add_padding() 59 p = iov->iov_base + iov->iov_len; in gss_krb5_add_padding() 60 iov->iov_len += padding; in gss_krb5_add_padding() 72 if (len <= buf->head[0].iov_len) { in gss_krb5_remove_padding() 74 if (pad > buf->head[0].iov_len) in gss_krb5_remove_padding() 76 buf->head[0].iov_len -= pad; in gss_krb5_remove_padding() 79 len -= buf->head[0].iov_len; in gss_krb5_remove_padding() 91 BUG_ON(len > buf->tail[0].iov_len); in gss_krb5_remove_padding() 345 buf->head[0].iov_len -= (data_start - orig_start); in gss_unwrap_kerberos_v1() 547 buf->head[0].iov_len); in gss_unwrap_kerberos_v2() [all …]
|
/linux/net/rxrpc/ |
A D | output.c | 230 iov[1].iov_len = sizeof(pkt->ackinfo); in rxrpc_send_ack_packet() 231 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_ack_packet() 325 iov[0].iov_len = sizeof(pkt); in rxrpc_send_abort_packet() 386 iov[0].iov_len = sizeof(whdr); in rxrpc_send_data_packet() 388 iov[1].iov_len = skb->len; in rxrpc_send_data_packet() 389 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_data_packet() 430 if (iov[1].iov_len >= call->peer->maxdata) in rxrpc_send_data_packet() 566 iov[0].iov_len = sizeof(whdr); in rxrpc_reject_packets() 568 iov[1].iov_len = sizeof(code); in rxrpc_reject_packets() 657 iov[0].iov_len = sizeof(whdr); in rxrpc_send_keepalive() [all …]
|
A D | local_event.c | 60 iov[0].iov_len = sizeof(whdr); in rxrpc_send_version_request() 62 iov[1].iov_len = sizeof(rxrpc_version_string); in rxrpc_send_version_request() 64 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_send_version_request()
|
A D | conn_event.c | 62 iov[0].iov_len = sizeof(pkt.whdr); in rxrpc_conn_retransmit_call() 64 iov[1].iov_len = 3; in rxrpc_conn_retransmit_call() 66 iov[2].iov_len = sizeof(ack_info); in rxrpc_conn_retransmit_call() 83 iov[0].iov_len += sizeof(pkt.abort_code); in rxrpc_conn_retransmit_call() 104 iov[0].iov_len += sizeof(pkt.ack); in rxrpc_conn_retransmit_call() 236 iov[0].iov_len = sizeof(whdr); in rxrpc_abort_connection() 238 iov[1].iov_len = sizeof(word); in rxrpc_abort_connection() 240 len = iov[0].iov_len + iov[1].iov_len; in rxrpc_abort_connection()
|
/linux/fs/nfsd/ |
A D | nfscache.c | 124 nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len); in nfsd_reply_cache_free_locked() 322 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, in nfsd_cache_csum() 324 size_t len = min(buf->head[0].iov_len, csum_len); in nfsd_cache_csum() 542 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update() 565 cachv->iov_len = bufsize; in nfsd_cache_update() 592 if (vec->iov_len + data->iov_len > PAGE_SIZE) { in nfsd_cache_append() 594 data->iov_len); in nfsd_cache_append() 597 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); in nfsd_cache_append() 598 vec->iov_len += data->iov_len; in nfsd_cache_append()
|
/linux/fs/ksmbd/ |
A D | auth.c | 562 iov[i].iov_len); in ksmbd_sign_smb2_pdu() 613 iov[i].iov_len); in ksmbd_sign_smb3_pdu() 675 label.iov_len); in generate_key() 689 context.iov_len); in generate_key() 759 d.label.iov_len = 12; in ksmbd_gen_smb30_signingkey() 761 d.context.iov_len = 8; in ksmbd_gen_smb30_signingkey() 773 d.label.iov_len = 14; in ksmbd_gen_smb311_signingkey() 839 d->label.iov_len = 11; in ksmbd_gen_smb30_encryptionkey() 845 d->label.iov_len = 11; in ksmbd_gen_smb30_encryptionkey() 859 d->label.iov_len = 16; in ksmbd_gen_smb311_encryptionkey() [all …]
|
A D | connection.c | 175 len += iov[iov_idx++].iov_len; in ksmbd_conn_write() 180 len += iov[iov_idx++].iov_len; in ksmbd_conn_write() 182 len += iov[iov_idx++].iov_len; in ksmbd_conn_write() 185 iov[iov_idx].iov_len = work->resp_hdr_sz; in ksmbd_conn_write() 187 iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4; in ksmbd_conn_write() 189 len += iov[iov_idx++].iov_len; in ksmbd_conn_write()
|
/linux/mm/ |
A D | process_vm_access.c | 163 ssize_t iov_len; in process_vm_rw_core() local 171 iov_len = rvec[i].iov_len; in process_vm_rw_core() 172 if (iov_len > 0) { in process_vm_rw_core() 174 + iov_len) in process_vm_rw_core() 216 (unsigned long)rvec[i].iov_base, rvec[i].iov_len, in process_vm_rw_core()
|
/linux/arch/arm64/kernel/ |
A D | mte.c | 337 size_t len = kiov->iov_len; in __access_remote_tags() 395 kiov->iov_len = buf - kiov->iov_base; in __access_remote_tags() 396 if (!kiov->iov_len) { in __access_remote_tags() 446 get_user(kiov.iov_len, &uiov->iov_len)) in mte_ptrace_copy_tags() 457 ret = put_user(kiov.iov_len, &uiov->iov_len); in mte_ptrace_copy_tags()
|
/linux/net/sunrpc/xprtrdma/ |
A D | rpc_rdma.c | 178 return (buf->head[0].iov_len + buf->tail[0].iov_len) < in rpcrdma_nonpayload_inline() 218 seg->mr_len = vec->iov_len; in rpcrdma_convert_kvec() 261 if (xdrbuf->tail[0].iov_len) in rpcrdma_convert_iovs() 351 pos = rqst->rq_snd_buf.head[0].iov_len; in rpcrdma_encode_read_list() 411 rqst->rq_rcv_buf.head[0].iov_len, in rpcrdma_encode_write_list() 689 dst += xdr->head[0].iov_len; in rpcrdma_pullup_pagelist() 720 if (unlikely(xdr->tail[0].iov_len)) in rpcrdma_prepare_noch_pullup() 741 if (tail->iov_len) in rpcrdma_prepare_noch_mapped() 744 tail->iov_len)) in rpcrdma_prepare_noch_mapped() 764 if (xdr->tail[0].iov_len > 3) { in rpcrdma_prepare_readch() [all …]
|
A D | svc_rdma_sendto.c | 557 if (!iov->iov_len) in svc_rdma_iov_dma_map() 561 iov->iov_len); in svc_rdma_iov_dma_map() 629 if (xdr->head[0].iov_len) in svc_rdma_xb_count_sges() 640 if (xdr->tail[0].iov_len) in svc_rdma_xb_count_sges() 696 if (xdr->head[0].iov_len) { in svc_rdma_xb_linearize() 697 memcpy(args->pd_dest, xdr->head[0].iov_base, xdr->head[0].iov_len); in svc_rdma_xb_linearize() 698 args->pd_dest += xdr->head[0].iov_len; in svc_rdma_xb_linearize() 713 if (xdr->tail[0].iov_len) { in svc_rdma_xb_linearize() 714 memcpy(args->pd_dest, xdr->tail[0].iov_base, xdr->tail[0].iov_len); in svc_rdma_xb_linearize() 715 args->pd_dest += xdr->tail[0].iov_len; in svc_rdma_xb_linearize()
|
/linux/lib/ |
A D | iov_iter.c | 24 len = min(n, __p->iov_len - skip); \ 31 if (skip < __p->iov_len) \ 246 if (skip == iov->iov_len) { in copy_page_to_iter_iovec() 330 if (skip == iov->iov_len) { in copy_page_from_iter_iovec() 1057 size -= iov->iov_len; in iov_iter_iovec_advance() 1144 size_t n = (--iov)->iov_len; in iov_iter_revert() 1349 if (i->iov[k].iov_len) { in iov_iter_gap_alignment() 1356 size -= i->iov[k].iov_len; in iov_iter_gap_alignment() 1881 iov[i].iov_len = len; in copy_compat_iovec_from_user() 1974 iov[seg].iov_len = len; in __import_iovec() [all …]
|
/linux/drivers/usb/usbip/ |
A D | stub_tx.c | 218 iov[iovnum].iov_len = sizeof(pdu_header); in stub_send_ret_submit() 232 iov[iovnum].iov_len = in stub_send_ret_submit() 254 iov[iovnum].iov_len = size; in stub_send_ret_submit() 261 iov[iovnum].iov_len = urb->actual_length; in stub_send_ret_submit() 280 iov[iovnum].iov_len = in stub_send_ret_submit() 311 iov[iovnum].iov_len = len; in stub_send_ret_submit() 388 iov[0].iov_len = sizeof(pdu_header); in stub_send_ret_unlink()
|
A D | vhci_tx.c | 100 iov[iovnum].iov_len = sizeof(pdu_header); in vhci_send_cmd_submit() 110 iov[iovnum].iov_len = sg->length; in vhci_send_cmd_submit() 115 iov[iovnum].iov_len = in vhci_send_cmd_submit() 134 iov[iovnum].iov_len = len; in vhci_send_cmd_submit() 216 iov.iov_len = sizeof(pdu_header); in vhci_send_cmd_unlink()
|
/linux/net/smc/ |
A D | smc_clc.c | 734 vec.iov_len = SMC_CLC_RECV_BUF_LEN; in smc_clc_wait_msg() 795 vec.iov_len = send_len; in smc_clc_send_decline() 934 vec[i++].iov_len = sizeof(*pclc_base); in smc_clc_send_proposal() 936 vec[i++].iov_len = sizeof(*pclc_smcd); in smc_clc_send_proposal() 939 vec[i++].iov_len = sizeof(*pclc_prfx); in smc_clc_send_proposal() 948 vec[i++].iov_len = sizeof(*v2_ext) + in smc_clc_send_proposal() 961 vec[i++].iov_len = sizeof(*trl); in smc_clc_send_proposal() 1089 vec[i++].iov_len = sizeof(fce); in smc_clc_send_confirm_accept() 1093 vec[i++].iov_len = sizeof(gle); in smc_clc_send_confirm_accept() 1095 vec[i++].iov_len = gle.gid_cnt * in smc_clc_send_confirm_accept() [all …]
|
/linux/drivers/net/ethernet/google/gve/ |
A D | gve_tx.c | 97 iov[0].iov_len = bytes; in gve_tx_alloc_fifo() 106 iov[0].iov_len -= overflow; in gve_tx_alloc_fifo() 108 iov[1].iov_len = overflow; in gve_tx_alloc_fifo() 414 u64 iov_offset, u64 iov_len) in gve_dma_sync_for_device() argument 416 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; in gve_dma_sync_for_device() 468 info->iov[hdr_nfrags - 1].iov_len); in gve_tx_add_skb_copy() 476 info->iov[i].iov_len, in gve_tx_add_skb_copy() 481 info->iov[i].iov_len); in gve_tx_add_skb_copy() 484 info->iov[i].iov_len); in gve_tx_add_skb_copy() 485 copy_offset += info->iov[i].iov_len; in gve_tx_add_skb_copy() [all …]
|
/linux/tools/testing/selftests/net/ |
A D | tls.c | 149 vec.iov_len = len; in tls_send_cmsg() 178 vec.iov_len = len; in tls_recv_cmsg() 474 vec.iov_len = send_len; in TEST_F() 496 vec[i].iov_len = SEND_LEN; in TEST_F() 548 int iov_len = 5; in TEST_F() local 561 msg.msg_iovlen = iov_len; in TEST_F() 582 int iov_len = 1024; in TEST_F() local 598 msg.msg_iovlen = iov_len; in TEST_F() 778 vec.iov_len = send_len; in TEST_F() 810 unsigned int iov_len = 16; in TEST_F() local [all …]
|