/linux/include/linux/ |
A D | bvec.h | 34 unsigned int bv_len; member 78 .bv_len = mp_bvec_iter_len((bvec), (iter)), \ 97 .bv_len = bvec_iter_len((bvec), (iter)), \ 115 while (bytes && bytes >= bv[idx].bv_len) { in bvec_iter_advance() 116 bytes -= bv[idx].bv_len; in bvec_iter_advance() 134 if (done == bv[iter->bi_idx].bv_len) { in bvec_iter_advance_single() 177 bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, in bvec_advance() 178 bvec->bv_len - iter_all->done); in bvec_advance() 179 iter_all->done += bv->bv_len; in bvec_advance() 181 if (iter_all->done == bvec->bv_len) { in bvec_advance() [all …]
|
/linux/drivers/block/ |
A D | n64cart.c | 69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec() 79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec() 83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec() 99 pos += bvec.bv_len; in n64cart_submit_bio()
|
/linux/drivers/md/ |
A D | dm-ebs-target.c | 69 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec() local 74 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec() 80 while (bv_len) { in __ebs_rw_bvec() 81 cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len); in __ebs_rw_bvec() 84 if (rw == READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) in __ebs_rw_bvec() 111 bv_len -= cur_len; in __ebs_rw_bvec()
|
A D | dm-log-writes.c | 384 block->vecs[i].bv_len, 0); in log_one_block() 385 if (ret != block->vecs[i].bv_len) { in log_one_block() 402 block->vecs[i].bv_len, 0); in log_one_block() 403 if (ret != block->vecs[i].bv_len) { in log_one_block() 409 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block() 772 block->vecs[i].bv_len = bv.bv_len; in log_writes_map()
|
/linux/block/ |
A D | blk-integrity.c | 40 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg() 43 seg_size += iv.bv_len; in blk_rq_count_integrity_sg() 47 seg_size = iv.bv_len; in blk_rq_count_integrity_sg() 82 if (sg->length + iv.bv_len > queue_max_segment_size(q)) in blk_rq_map_integrity_sg() 85 sg->length += iv.bv_len; in blk_rq_map_integrity_sg() 95 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); in blk_rq_map_integrity_sg()
|
A D | blk-merge.c | 29 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec() 46 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec() 236 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs() 256 return len > 0 || bv->bv_len > max_len; in bvec_split_segs() 298 sectors + (bv.bv_len >> 9) <= max_sectors && in blk_bio_segment_split() 299 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in blk_bio_segment_split() 301 sectors += bv.bv_len >> 9; in blk_bio_segment_split() 448 unsigned nbytes = bvec->bv_len; in blk_bvec_map_sg() 483 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in __blk_bvec_map_sg() 493 int nbytes = bvec->bv_len; in __blk_segment_map_sg_merge() [all …]
|
A D | blk-map.c | 56 bvec->bv_len, in bio_copy_from_iter() 62 if (ret < bvec->bv_len) in bio_copy_from_iter() 87 bvec->bv_len, in bio_copy_to_iter() 93 if (ret < bvec->bv_len) in bio_copy_to_iter() 317 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages() 404 p += bvec->bv_len; in bio_copy_kern_endio_read()
|
A D | bounce.c | 98 bio_advance_iter(from, &from_iter, tovec.bv_len); in copy_to_high_bio_irq() 118 bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len); in bounce_end_io() 222 sectors += from.bv_len >> 9; in __blk_queue_bounce()
|
A D | bio.c | 565 if (done + bv.bv_len > new_size) { in bio_truncate() 572 zero_user(bv.bv_page, offset, bv.bv_len - offset); in bio_truncate() 575 done += bv.bv_len; in bio_truncate() 798 size_t bv_end = bv->bv_offset + bv->bv_len; in page_is_mergeable() 843 bv->bv_len += len; in __bio_try_merge_page() 867 if (bv->bv_len + len > queue_max_segment_size(q)) in bio_try_merge_hw_seg() 918 bvec->bv_len = len; in bio_add_hw_page() 1002 bv->bv_len = len; in __bio_add_page() 1285 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter()
|
A D | blk.h | 106 if (addr1 + vec1->bv_len != addr2) in biovec_phys_mergeable() 110 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) in biovec_phys_mergeable() 119 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); in __bvec_gap_to_prev() 326 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; in blk_may_split()
|
A D | t10-pi.c | 154 for (j = 0; j < iv.bv_len; j += tuple_sz) { in t10_pi_type1_prepare() 200 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { in t10_pi_type1_complete()
|
A D | blk-crypto-fallback.c | 219 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_fallback_split_bio_if_needed() 335 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) { in blk_crypto_fallback_encrypt_bio() 423 for (i = 0; i < bv.bv_len; i += data_unit_size) { in blk_crypto_fallback_decrypt_bio()
|
A D | bio-integrity.c | 142 iv->bv_len = len; in bio_integrity_add_page() 175 iter.data_size = bv.bv_len; in bio_integrity_process()
|
/linux/drivers/md/bcache/ |
A D | util.c | 244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map() 251 base += bv->bv_len; in bch_bio_map() 254 size -= bv->bv_len; in bch_bio_map()
|
A D | debug.c | 136 cache_set_err_on(memcmp(p1, p2, bv.bv_len), in bch_data_verify() 144 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
|
/linux/fs/squashfs/ |
A D | block.c | 47 int bytes_to_copy = min_t(int, bvec->bv_len - offset, in copy_bio_to_actor() 65 if (offset >= bvec->bv_len) { in copy_bio_to_actor() 181 if (offset < bvec->bv_len - 1) { in squashfs_read_data()
|
A D | zlib_wrapper.c | 78 avail = min(length, ((int)bvec->bv_len) - offset); in zlib_uncompress()
|
/linux/drivers/s390/block/ |
A D | dasd_fba.c | 462 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular() 465 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular() 466 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular() 467 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular() 509 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular() 513 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_build_cp_regular() 587 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_free_cp() 598 memcpy(dst, cda, bv.bv_len); in dasd_fba_free_cp()
|
A D | dasd_diag.c | 536 if (bv.bv_len & (blksize - 1)) in dasd_diag_build_cp() 539 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_diag_build_cp() 556 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_diag_build_cp()
|
/linux/drivers/block/zram/ |
A D | zram_drv.c | 151 return bvec->bv_len != PAGE_SIZE; in is_partial_io() 186 *index += (*offset + bvec->bv_len) / PAGE_SIZE; in update_position() 187 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position() 700 bvec.bv_len = PAGE_SIZE; in writeback_store() 755 bio_add_page(&bio, bvec.bv_page, bvec.bv_len, in writeback_store() 1273 bvec.bv_len = PAGE_SIZE; in __zram_bvec_read() 1500 vec.bv_len = PAGE_SIZE; in zram_bvec_write() 1607 unsigned int unwritten = bvec.bv_len; in __zram_make_request() 1618 bv.bv_offset += bv.bv_len; in __zram_make_request() 1619 unwritten -= bv.bv_len; in __zram_make_request() [all …]
|
/linux/drivers/block/rsxx/ |
A D | dma.c | 674 unsigned int bv_len; in rsxx_dma_queue_bio() local 692 bv_len = bio->bi_iter.bi_size; in rsxx_dma_queue_bio() 694 while (bv_len > 0) { in rsxx_dma_queue_bio() 706 bv_len -= RSXX_HW_BLK_SIZE; in rsxx_dma_queue_bio() 710 bv_len = bvec.bv_len; in rsxx_dma_queue_bio() 713 while (bv_len > 0) { in rsxx_dma_queue_bio() 717 dma_len = min(bv_len, in rsxx_dma_queue_bio() 732 bv_len -= dma_len; in rsxx_dma_queue_bio()
|
/linux/net/ceph/ |
A D | messenger_v2.c | 153 bv.bv_len = min(iov_iter_count(it), in do_try_sendpage() 154 it->bvec->bv_len - it->iov_offset); in do_try_sendpage() 167 bv.bv_offset, bv.bv_len, in do_try_sendpage() 280 con->v2.out_bvec.bv_len); in set_out_bvec() 293 con->v2.out_bvec.bv_len); in set_out_bvec_zero() 868 bv->bv_len = len; in get_bvec_at() 903 ceph_msg_data_advance(cursor, bv.bv_len); in calc_sg_cnt_cursor() 1758 con->v2.in_bvec.bv_len); in prepare_read_data_cont() 2906 con->v2.out_enc_resid -= bv.bv_len; in queue_enc_page() 3224 sent = con->v2.out_bvec.bv_len - resid; in revoke_at_queue_data_cont() [all …]
|
/linux/drivers/xen/ |
A D | biomerge.c | 15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
|
/linux/drivers/nvme/target/ |
A D | io-cmd-file.c | 99 bv->bv_len = sg->length; in nvmet_file_init_bvec() 169 len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io() 170 total_len += req->f.bvec[bv_cnt].bv_len; in nvmet_file_execute_io()
|
/linux/drivers/nvdimm/ |
A D | blk.c | 91 cur_len = min(len, bv.bv_len); in nd_blk_rw_integrity() 184 unsigned int len = bvec.bv_len; in nd_blk_submit_bio()
|