/linux/drivers/gpu/drm/i915/gt/ |
A D | gen7_renderclear.c | 51 return bv->max_threads; in num_primitives() 61 bv->max_threads = 70; in batch_get_defaults() 64 bv->max_threads = 140; in batch_get_defaults() 67 bv->max_threads = 280; in batch_get_defaults() 76 bv->max_threads = 36; in batch_get_defaults() 85 bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K); in batch_get_defaults() 86 bv->surface_start = bv->state_start + SZ_4K; in batch_get_defaults() 87 bv->size = bv->surface_start + bv->surface_height * bv->surface_width; in batch_get_defaults() 178 gen7_fill_surface_state(state, bv->surface_start, bv); in gen7_fill_binding_table() 438 return bv.size; in gen7_setup_clear_gpr_bb() [all …]
|
/linux/include/linux/ |
A D | bvec.h | 50 struct bio_vec bv; member 115 while (bytes && bytes >= bv[idx].bv_len) { in bvec_iter_advance() 116 bytes -= bv[idx].bv_len; in bvec_iter_advance() 134 if (done == bv[iter->bi_idx].bv_len) { in bvec_iter_advance_single() 162 return &iter_all->bv; in bvec_init_iter_all() 168 struct bio_vec *bv = &iter_all->bv; in bvec_advance() local 171 bv->bv_page++; in bvec_advance() 172 bv->bv_offset = 0; in bvec_advance() 175 bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; in bvec_advance() 177 bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, in bvec_advance() [all …]
|
/linux/drivers/md/bcache/ |
A D | util.c | 234 struct bio_vec *bv = bio->bi_io_vec; in bch_bio_map() local 242 for (; size; bio->bi_vcnt++, bv++) { in bch_bio_map() 243 bv->bv_offset = 0; in bch_bio_map() 244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map() 251 base += bv->bv_len; in bch_bio_map() 254 size -= bv->bv_len; in bch_bio_map() 271 struct bio_vec *bv; in bch_bio_alloc_pages() local 277 for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) { in bch_bio_alloc_pages() 279 if (!bv->bv_page) { in bch_bio_alloc_pages() 280 while (--bv >= bio->bi_io_vec) in bch_bio_alloc_pages() [all …]
|
A D | debug.c | 111 struct bio_vec bv, cbv; in bch_data_verify() local 129 bio_for_each_segment(bv, bio, iter) { in bch_data_verify() 130 void *p1 = bvec_kmap_local(&bv); in bch_data_verify() 136 cache_set_err_on(memcmp(p1, p2, bv.bv_len), in bch_data_verify() 144 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
|
/linux/arch/parisc/kernel/ |
A D | entry.S | 142 bv,n 0(%r3) 756 bv %r0(%r2) 765 bv %r0(%r2) 924 bv %r0(%r20) 953 bv %r0(%r1) 1738 bv %r0(%r2) 1965 bv %r0(%rp) 2033 bv,n (%r1) 2129 bv,n (%r1) 2174 bv %r0(%rp) [all …]
|
A D | real2.S | 92 bv 0(%r31) 114 bv 0(%rp) 134 bv 0(%r2) 149 bv 0(%r2) 190 bv 0(%r2) 227 bv 0(%r2) 274 bv 0(%r31) 287 bv 0(%rp) 302 bv %r0(%r2)
|
A D | hpmc.S | 156 bv (r3) /* call pdce_proc */ 171 bv (%r3) /* call pdce_proc */ 201 bv (%r3) /* call pdce_proc */ 223 bv (%r5) 271 bv (%r3) /* call pdce_proc */
|
A D | pacache.S | 184 bv,n %r0(%r2) 245 bv %r0(%r2) 307 bv %r0(%r2) 370 bv %r0(%r2) 481 bv %r0(%r2) 682 bv %r0(%r2) 760 bv %r0(%r2) 819 bv %r0(%r2) 878 bv %r0(%r2) 947 bv %r0(%r2) [all …]
|
/linux/drivers/net/ethernet/netronome/nfp/bpf/ |
A D | main.c | 66 struct nfp_bpf_vnic *bv; in nfp_bpf_vnic_alloc() local 79 bv = kzalloc(sizeof(*bv), GFP_KERNEL); in nfp_bpf_vnic_alloc() 80 if (!bv) in nfp_bpf_vnic_alloc() 82 nn->app_priv = bv; in nfp_bpf_vnic_alloc() 101 WARN_ON(bv->tc_prog); in nfp_bpf_vnic_free() 102 kfree(bv); in nfp_bpf_vnic_free() 111 struct nfp_bpf_vnic *bv; in nfp_bpf_setup_tc_block_cb() local 143 bv = nn->app_priv; in nfp_bpf_setup_tc_block_cb() 185 struct nfp_bpf_vnic *bv; in nfp_bpf_check_mtu() local 194 bv = nn->app_priv; in nfp_bpf_check_mtu() [all …]
|
/linux/block/ |
A D | blk-merge.c | 28 bio_get_first_bvec(bio, bv); in bio_get_last_bvec() 29 if (bv->bv_len == bio->bi_iter.bi_size) in bio_get_last_bvec() 39 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec() 46 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec() 242 bv->bv_offset + total_len); in bvec_split_segs() 299 bv.bv_offset + bv.bv_len <= PAGE_SIZE) { in blk_bio_segment_split() 301 sectors += bv.bv_len >> 9; in blk_bio_segment_split() 307 bvprv = bv; in blk_bio_segment_split() 399 struct bio_vec bv; in blk_recalc_rq_segments() local 421 rq_for_each_bvec(bv, rq, iter) in blk_recalc_rq_segments() [all …]
|
A D | bio.c | 533 struct bio_vec bv; in zero_fill_bio() local 537 memzero_bvec(&bv); in zero_fill_bio() 553 struct bio_vec bv; in bio_truncate() local 572 zero_user(bv.bv_page, offset, bv.bv_len - offset); in bio_truncate() 575 done += bv.bv_len; in bio_truncate() 798 size_t bv_end = bv->bv_offset + bv->bv_len; in page_is_mergeable() 843 bv->bv_len += len; in __bio_try_merge_page() 862 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; in bio_try_merge_hw_seg() 1000 bv->bv_page = page; in __bio_add_page() 1001 bv->bv_offset = off; in __bio_add_page() [all …]
|
A D | blk-crypto-fallback.c | 162 struct bio_vec bv; in blk_crypto_fallback_clone_bio() local 177 bio_for_each_segment(bv, bio_src, iter) in blk_crypto_fallback_clone_bio() 178 bio->bi_io_vec[bio->bi_vcnt++] = bv; in blk_crypto_fallback_clone_bio() 215 struct bio_vec bv; in blk_crypto_fallback_split_bio_if_needed() local 218 bio_for_each_segment(bv, bio, iter) { in blk_crypto_fallback_split_bio_if_needed() 219 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_fallback_split_bio_if_needed() 388 struct bio_vec bv; in blk_crypto_fallback_decrypt_bio() local 417 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { in blk_crypto_fallback_decrypt_bio() 418 struct page *page = bv.bv_page; in blk_crypto_fallback_decrypt_bio() 420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio() [all …]
|
A D | blk-crypto.c | 209 struct bio_vec bv; in bio_crypt_check_alignment() local 211 bio_for_each_segment(bv, bio, iter) { in bio_crypt_check_alignment() 212 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in bio_crypt_check_alignment()
|
/linux/drivers/block/ |
A D | n64cart.c | 62 static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) in n64cart_do_bvec() argument 68 WARN_ON_ONCE((bv->bv_offset & (MIN_ALIGNMENT - 1)) || in n64cart_do_bvec() 69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec() 71 dma_addr = dma_map_bvec(dev, bv, DMA_FROM_DEVICE, 0); in n64cart_do_bvec() 79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec() 83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec()
|
/linux/drivers/s390/block/ |
A D | dasd_fba.c | 440 struct bio_vec bv; in dasd_fba_build_cp_regular() local 461 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular() 462 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular() 466 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular() 467 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular() 504 dst = bvec_virt(&bv); in dasd_fba_build_cp_regular() 509 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular() 511 dst = copy + bv.bv_offset; in dasd_fba_build_cp_regular() 572 struct bio_vec bv; in dasd_fba_free_cp() local 586 dst = bvec_virt(&bv); in dasd_fba_free_cp() [all …]
|
A D | dasd_diag.c | 515 struct bio_vec bv; in dasd_diag_build_cp() local 535 rq_for_each_segment(bv, req, iter) { in dasd_diag_build_cp() 536 if (bv.bv_len & (blksize - 1)) in dasd_diag_build_cp() 539 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_diag_build_cp() 554 rq_for_each_segment(bv, req, iter) { in dasd_diag_build_cp() 555 dst = bvec_virt(&bv); in dasd_diag_build_cp() 556 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_diag_build_cp()
|
/linux/fs/crypto/ |
A D | bio.c | 33 struct bio_vec *bv; in fscrypt_decrypt_bio() local 36 bio_for_each_segment_all(bv, bio, iter_all) { in fscrypt_decrypt_bio() 37 struct page *page = bv->bv_page; in fscrypt_decrypt_bio() 38 int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len, in fscrypt_decrypt_bio() 39 bv->bv_offset); in fscrypt_decrypt_bio()
|
/linux/fs/cifs/ |
A D | misc.c | 963 if (ctx->bv) { in cifs_aio_ctx_release() 969 put_page(ctx->bv[i].bv_page); in cifs_aio_ctx_release() 971 kvfree(ctx->bv); in cifs_aio_ctx_release() 992 struct bio_vec *bv = NULL; in setup_aio_ctx_iter() local 1002 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL); in setup_aio_ctx_iter() 1004 if (!bv) { in setup_aio_ctx_iter() 1005 bv = vmalloc(array_size(max_pages, sizeof(*bv))); in setup_aio_ctx_iter() 1006 if (!bv) in setup_aio_ctx_iter() 1016 kvfree(bv); in setup_aio_ctx_iter() 1050 bv[npages + i].bv_offset = start; in setup_aio_ctx_iter() [all …]
|
/linux/fs/orangefs/ |
A D | inode.c | 25 struct bio_vec bv; in orangefs_writepage_locked() local 52 bv.bv_page = page; in orangefs_writepage_locked() 53 bv.bv_len = wlen; in orangefs_writepage_locked() 87 struct bio_vec *bv; member 113 ow->bv[i].bv_offset = 0; in orangefs_writepages_work() 230 if (!ow->bv) { in orangefs_writepages() 241 kfree(ow->bv); in orangefs_writepages() 295 struct bio_vec bv; in orangefs_readpage() local 303 bv.bv_page = page; in orangefs_readpage() 304 bv.bv_len = PAGE_SIZE; in orangefs_readpage() [all …]
|
/linux/drivers/md/ |
A D | dm-ebs-target.c | 64 static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter) in __ebs_rw_bvec() argument 69 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec() 74 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec() 77 pa = bvec_virt(bv); in __ebs_rw_bvec() 100 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec() 102 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec() 123 struct bio_vec bv; in __ebs_rw_bio() local 126 bio_for_each_bvec(bv, bio, iter) { in __ebs_rw_bio() 127 rr = __ebs_rw_bvec(ec, rw, &bv, &iter); in __ebs_rw_bio()
|
/linux/net/ceph/ |
A D | messenger_v2.c | 143 struct bio_vec bv; in do_try_sendpage() local 167 bv.bv_offset, bv.bv_len, in do_try_sendpage() 170 iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len); in do_try_sendpage() 866 bv->bv_page = page; in get_bvec_at() 868 bv->bv_len = len; in get_bvec_at() 892 struct bio_vec bv; in calc_sg_cnt_cursor() local 944 struct bio_vec bv; in init_sgs_cursor() local 951 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in init_sgs_cursor() 1738 struct bio_vec bv; in prepare_read_data() local 1752 struct bio_vec bv; in prepare_read_data_cont() local [all …]
|
/linux/drivers/nvdimm/ |
A D | blk.c | 81 struct bio_vec bv; in nd_blk_rw_integrity() local 84 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); in nd_blk_rw_integrity() 91 cur_len = min(len, bv.bv_len); in nd_blk_rw_integrity() 92 iobuf = kmap_atomic(bv.bv_page); in nd_blk_rw_integrity() 93 err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset, in nd_blk_rw_integrity()
|
/linux/fs/verity/ |
A D | verify.c | 229 struct bio_vec *bv; in fsverity_verify_bio() local 246 bio_for_each_segment_all(bv, bio, iter_all) in fsverity_verify_bio() 251 bio_for_each_segment_all(bv, bio, iter_all) { in fsverity_verify_bio() 252 struct page *page = bv->bv_page; in fsverity_verify_bio()
|
/linux/drivers/gpu/drm/panel/ |
A D | panel-sony-acx565akm.c | 271 u8 bv; in acx565akm_get_actual_brightness() local 273 acx565akm_read(lcd, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, &bv, 1); in acx565akm_get_actual_brightness() 275 return bv; in acx565akm_get_actual_brightness() 281 int bv; in acx565akm_set_brightness() local 283 bv = level | (1 << 8); in acx565akm_set_brightness() 284 acx565akm_write(lcd, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, (u8 *)&bv, 2); in acx565akm_set_brightness()
|
/linux/drivers/gpu/drm/i915/display/ |
A D | intel_tv.c | 63 u16 rv, gv, bv, av; member 191 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, 201 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, 211 .rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200, 221 .rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200, 231 .rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200, 241 .rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200, 251 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, 261 .rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200, 271 .rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200, [all …]
|