Home
last modified time | relevance | path

Searched refs:bi_opf (Results 1 – 25 of 103) sorted by relevance

12345

/linux/fs/xfs/
A Dxfs_bio_io.c41 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; in xfs_flush_bdev_async()
67 bio->bi_opf = op | REQ_META | REQ_SYNC; in xfs_rw_bdev()
80 bio->bi_opf = prev->bi_opf; in xfs_rw_bdev()
/linux/block/
A Dfops.c92 bio.bi_opf = REQ_OP_READ; in __blkdev_direct_IO_simple()
96 bio.bi_opf = dio_bio_write_op(iocb); in __blkdev_direct_IO_simple()
100 bio.bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_simple()
241 bio->bi_opf = REQ_OP_READ; in __blkdev_direct_IO()
245 bio->bi_opf = dio_bio_write_op(iocb); in __blkdev_direct_IO()
249 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO()
351 bio->bi_opf = REQ_OP_READ; in __blkdev_direct_IO_async()
357 bio->bi_opf = dio_bio_write_op(iocb); in __blkdev_direct_IO_async()
362 bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; in __blkdev_direct_IO_async()
367 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_async()
A Dblk-core.c428 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter()
583 bio_devname(bio, b), bio->bi_opf, in handle_bad_sector()
627 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro()
716 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append()
732 bio->bi_opf |= REQ_NOWAIT; in submit_bio_checks()
738 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) in submit_bio_checks()
756 if (op_is_flush(bio->bi_opf) && in submit_bio_checks()
758 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_checks()
1209 if ((bio->bi_opf & ff) != ff) in blk_rq_err_bytes()
1641 sizeof_field(struct bio, bi_opf)); in blk_dev_init()
A Dblk-map.c158 bio->bi_opf |= req_op(rq); in bio_copy_user_iov()
247 bio->bi_opf |= req_op(rq); in bio_map_user_iov()
647 bio->bi_opf &= ~REQ_OP_MASK; in blk_rq_map_kern()
648 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
A Dblk-merge.c363 split->bi_opf |= REQ_NOMERGE; in __blk_queue_split()
731 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
732 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
733 bio->bi_opf |= ff; in blk_rq_set_mixed_merge()
966 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
990 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
A Dbounce.c173 bio->bi_opf = bio_src->bi_opf; in bounce_clone_bio()
A Dblk-zoned.c220 bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC; in blkdev_zone_reset_all_emulated()
244 bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC; in blkdev_zone_reset_all()
311 bio->bi_opf = op | REQ_SYNC; in blkdev_zone_mgmt()
A Dblk-lib.c271 bio->bi_opf = REQ_OP_WRITE_ZEROES; in __blkdev_issue_write_zeroes()
273 bio->bi_opf |= REQ_NOUNMAP; in __blkdev_issue_write_zeroes()
A Dbio-integrity.c85 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc()
111 bio->bi_opf &= ~REQ_INTEGRITY; in bio_integrity_free()
A Dblk-mq-sched.h42 return !(bio->bi_opf & REQ_NOMERGE_FLAGS); in bio_mergeable()
/linux/include/linux/
A Dblk_types.h240 unsigned int bi_opf; /* bottom bits req flags, member
455 ((bio)->bi_opf & REQ_OP_MASK)
461 bio->bi_opf = op | op_flags; in bio_set_op_attrs()
A Dbio.h295 if (bio->bi_opf & REQ_INTEGRITY) in bio_integrity()
736 bio->bi_opf |= REQ_POLLED; in bio_set_polled()
738 bio->bi_opf |= REQ_NOWAIT; in bio_set_polled()
/linux/drivers/md/bcache/
A Dwriteback.h123 return (op_is_sync(bio->bi_opf) || in should_writeback()
124 bio->bi_opf & (REQ_META|REQ_PRIO) || in should_writeback()
A Drequest.c202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); in bch_data_insert_start()
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { in check_should_bypass()
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && in check_should_bypass()
653 bio->bi_opf & REQ_PREFLUSH)) { in backing_request_endio()
745 s->iop.flush_journal = op_is_flush(bio->bi_opf); in search_alloc()
1021 if (bio->bi_opf & REQ_PREFLUSH) { in cached_dev_write()
1037 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in cached_dev_write()
A Ddebug.c56 bio->bi_opf = REQ_OP_READ | REQ_META; in bch_btree_verify()
118 check->bi_opf = REQ_OP_READ; in bch_data_verify()
/linux/include/trace/events/
A Dblock.h252 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
279 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
436 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
477 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
A Dbcache.h31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
241 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
/linux/drivers/md/
A Dmd-multipath.c86 else if (!(bio->bi_opf & REQ_RAHEAD)) { in multipath_end_request()
107 if (unlikely(bio->bi_opf & REQ_PREFLUSH) in multipath_make_request()
129 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; in multipath_make_request()
326 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; in multipathd()
A Ddm-zone.c135 return !op_is_flush(bio->bi_opf) && bio_sectors(bio); in dm_is_zone_write()
405 clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE | in dm_zone_map_bio_begin()
406 (orig_bio->bi_opf & (~REQ_OP_MASK)); in dm_zone_map_bio_begin()
502 if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio)) in dm_need_zone_wp_tracking()
A Ddm-zero.c40 if (bio->bi_opf & REQ_RAHEAD) { in zero_map()
A Ddm-raid1.c653 .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH), in do_write()
700 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes()
1210 if (bio->bi_opf & REQ_RAHEAD) in mirror_map()
1247 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io()
1256 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
/linux/mm/
A Dpage_io.c343 bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc); in __swap_writepage()
406 bio->bi_opf = REQ_OP_READ; in swap_readpage()
415 bio->bi_opf |= REQ_POLLED; in swap_readpage()
/linux/drivers/nvme/target/
A Dio-cmd-bdev.c278 bio->bi_opf = op; in nvmet_bdev_execute_rw()
302 bio->bi_opf = op; in nvmet_bdev_execute_rw()
335 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in nvmet_bdev_execute_flush()
A Dzns.c416 bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; in nvmet_bdev_zone_mgmt_emulate_all()
559 bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_zone_append()
565 bio->bi_opf |= REQ_FUA; in nvmet_bdev_execute_zone_append()
/linux/fs/iomap/
A Dbuffered-io.c308 ctx->bio->bi_opf = REQ_OP_READ; in iomap_readpage_iter()
310 ctx->bio->bi_opf |= REQ_RAHEAD; in iomap_readpage_iter()
543 bio.bi_opf = REQ_OP_READ; in iomap_read_page_sync()
1192 bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); in iomap_alloc_ioend()
1222 new->bi_opf = prev->bi_opf; in iomap_chain_bio()

Completed in 38 milliseconds

12345