/linux/block/ |
A D | blk-throttle.c | 479 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline() 481 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline() 1473 tg->bps_conf[WRITE][off]); in tg_prfill_limit() 1479 tg->iops_conf[WRITE][off]); in tg_prfill_limit() 1584 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit() 1588 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit() 1595 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit() 1667 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time() 1736 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade() 1926 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0); in throtl_downgrade_check() [all …]
|
/linux/drivers/md/ |
A D | dm-stats.c | 683 shared->tmp.ios[WRITE] = 0; in __dm_stat_init_temporary_percpu_totals() 699 shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); in __dm_stat_init_temporary_percpu_totals() 701 shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); in __dm_stat_init_temporary_percpu_totals() 703 shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); in __dm_stat_init_temporary_percpu_totals() 705 shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); in __dm_stat_init_temporary_percpu_totals() 707 shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); in __dm_stat_init_temporary_percpu_totals() 732 p->sectors[WRITE] -= shared->tmp.sectors[WRITE]; in __dm_stat_clear() 734 p->ios[WRITE] -= shared->tmp.ios[WRITE]; in __dm_stat_clear() 736 p->merges[WRITE] -= shared->tmp.merges[WRITE]; in __dm_stat_clear() 738 p->ticks[WRITE] -= shared->tmp.ticks[WRITE]; in __dm_stat_clear() [all …]
|
A D | dm-flakey.c | 128 fc->corrupt_bio_rw = WRITE; in parse_features() 160 if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { in parse_features() 164 } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { in parse_features() 310 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_data() 362 if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { in flakey_map() 439 (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', in flakey_status()
|
A D | md-faulty.c | 109 case WritePersistent*2+WRITE: return 1; in check_sector() 112 case ReadFixable*2+WRITE: in check_sector() 116 case AllPersist*2+WRITE: return 1; in check_sector() 169 if (bio_data_dir(bio) == WRITE) { in faulty_make_request() 180 bio_end_sector(bio), WRITE)) in faulty_make_request()
|
A D | dm-crypt.c | 532 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_lmk_gen() 550 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) in crypt_iv_lmk_post() 682 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { in crypt_iv_tcw_gen() 705 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_tcw_post() 981 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_elephant() 988 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { in crypt_iv_elephant() 998 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_elephant() 1030 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_elephant_post() 1353 if (bio_data_dir(ctx->bio_in) == WRITE) { in crypt_convert_block_aead() 1448 if (bio_data_dir(ctx->bio_in) == WRITE) in crypt_convert_block_skcipher() [all …]
|
/linux/drivers/s390/net/ |
A D | ctcm_sysfs.c | 95 priv->channel[WRITE]->prof.maxmulti); in ctcm_print_statistics() 97 priv->channel[WRITE]->prof.maxcqueue); in ctcm_print_statistics() 99 priv->channel[WRITE]->prof.doios_single); in ctcm_print_statistics() 101 priv->channel[WRITE]->prof.doios_multi); in ctcm_print_statistics() 103 priv->channel[WRITE]->prof.txlen); in ctcm_print_statistics() 105 jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time)); in ctcm_print_statistics() 132 memset(&priv->channel[WRITE]->prof, 0, in stats_write()
|
/linux/drivers/infiniband/ulp/rtrs/ |
A D | rtrs-clt-stats.c | 98 sum.dir[WRITE].cnt += r->dir[WRITE].cnt; in rtrs_clt_stats_rdma_to_str() 99 sum.dir[WRITE].size_total += r->dir[WRITE].size_total; in rtrs_clt_stats_rdma_to_str() 105 sum.dir[WRITE].cnt, sum.dir[WRITE].size_total, in rtrs_clt_stats_rdma_to_str()
|
A D | rtrs-srv-stats.c | 33 (s64)atomic64_read(&r->dir[WRITE].cnt), in rtrs_srv_stats_rdma_to_str() 34 (s64)atomic64_read(&r->dir[WRITE].size_total), 0); in rtrs_srv_stats_rdma_to_str()
|
/linux/Documentation/admin-guide/device-mapper/ |
A D | log-writes.rst | 8 There is a log_write_entry written for every WRITE request and the target is 10 that is in the WRITE requests is copied into the log to make the replay happen 17 cache. This means that normal WRITE requests are not actually logged until the 22 This works by attaching all WRITE requests to a list once the write completes. 42 Any REQ_OP_DISCARD requests are treated like WRITE requests. Otherwise we would 43 have all the DISCARD requests, and then the WRITE requests and then the FLUSH 46 WRITE block 1, DISCARD block 1, FLUSH 50 DISCARD 1, WRITE 1, FLUSH
|
/linux/tools/testing/selftests/resctrl/ |
A D | resctrl_val.c | 21 #define WRITE 1 macro 119 imc_counters_config[count][WRITE].event = in get_event_and_umask() 127 imc_counters_config[count][WRITE].umask = in get_event_and_umask() 171 imc_counters_config[count][WRITE].type = in read_from_imc_dir() 208 get_event_and_umask(cas_count_cfg, count, WRITE); in read_from_imc_dir() 338 &imc_counters_config[imc][WRITE]; in get_mem_bw_imc() 373 close(imc_counters_config[imc][WRITE].fd); in get_mem_bw_imc()
|
/linux/Documentation/PCI/endpoint/ |
A D | pci-test-howto.rst | 223 WRITE ( 1 bytes): OKAY 224 WRITE ( 1024 bytes): OKAY 225 WRITE ( 1025 bytes): OKAY 226 WRITE (1024000 bytes): OKAY 227 WRITE (1024001 bytes): OKAY
|
A D | pci-test-function.rst | 78 the COPY/WRITE command. 83 for the READ/WRITE/COPY and raise IRQ (Legacy/MSI) commands.
|
/linux/drivers/s390/block/ |
A D | dasd_fba.c | 88 if (rw == WRITE) in define_extent() 108 if (rw == WRITE) in locate_record() 358 define_extent(ccw++, cqr->data, WRITE, blksize, first_rec, count); in dasd_fba_build_cp_discard() 370 locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count); in dasd_fba_build_cp_discard() 390 locate_record(ccw++, LO_data++, WRITE, cur_pos, d_count); in dasd_fba_build_cp_discard() 408 locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count); in dasd_fba_build_cp_discard() 449 } else if (rq_data_dir(req) == WRITE) { in dasd_fba_build_cp_regular() 508 if (copy && rq_data_dir(req) == WRITE) in dasd_fba_build_cp_regular()
|
/linux/net/sunrpc/ |
A D | socklib.c | 216 iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len); in xprt_send_kvec() 229 iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr), in xprt_send_pagedata() 252 iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len); in xprt_send_rm_and_kvec()
|
/linux/tools/testing/selftests/powerpc/benchmarks/ |
A D | context_switch.c | 171 #define WRITE 1 macro 191 assert(write(pipe_fd2[WRITE], &c, 1) == 1); in pipe_thread1() 203 assert(write(pipe_fd1[WRITE], &c, 1) == 1); in pipe_thread2()
|
/linux/fs/f2fs/ |
A D | file.c | 972 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_setattr() 1367 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_do_collapse() 1620 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_insert_range() 2041 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_ioc_start_atomic_write() 2817 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); in f2fs_move_file_range() 2838 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); in f2fs_move_file_range() 2840 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); in f2fs_move_file_range() 3212 down_write(&fi->i_gc_rwsem[WRITE]); in f2fs_precache_extents() 3214 up_write(&fi->i_gc_rwsem[WRITE]); in f2fs_precache_extents() 3496 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_release_compress_blocks() [all …]
|
A D | verity.c | 211 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_end_enable_verity() 219 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); in f2fs_end_enable_verity()
|
/linux/fs/ |
A D | read_write.c | 501 iov_iter_init(&iter, WRITE, &iov, 1, len); in new_sync_write() 534 iov_iter_kvec(&iter, WRITE, &iov, 1, iov.iov_len); in __kernel_write() 559 ret = rw_verify_area(WRITE, file, pos, count); in kernel_write() 581 ret = rw_verify_area(WRITE, file, pos, count); in vfs_write() 846 ret = rw_verify_area(WRITE, file, pos, tot_len); in do_iter_write() 851 ret = do_iter_readv_writev(file, iter, pos, WRITE, flags); in do_iter_write() 853 ret = do_loop_readv_writev(file, iter, pos, WRITE, flags); in do_iter_write() 875 ret = rw_verify_area(WRITE, file, &iocb->ki_pos, tot_len); in vfs_iocb_iter_write() 921 ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); in vfs_writev() 1241 retval = rw_verify_area(WRITE, out.file, &out_pos, count); in do_sendfile() [all …]
|
/linux/drivers/gpu/drm/nouveau/dispnv50/ |
A D | core507d.c | 44 NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) | in core507d_update() 91 NVDEF(NV507D, SET_NOTIFIER_CONTROL, MODE, WRITE) | in core507d_read_caps()
|
/linux/drivers/block/drbd/ |
A D | drbd_req.c | 37 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 809 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod() 1224 if (rw == WRITE && req->private_bio && req->i.size in drbd_request_prepare() 1315 if (rw == WRITE) { in drbd_send_and_submit() 1342 if (rw != WRITE) { in drbd_send_and_submit() 1353 if (rw == WRITE) in drbd_send_and_submit() 1359 if (rw == WRITE) { in drbd_send_and_submit() 1388 &device->pending_master_completion[rw == WRITE]); in drbd_send_and_submit() 1393 &device->pending_completion[rw == WRITE]); in drbd_send_and_submit() 1439 if (rw == WRITE /* rw != WRITE should not even end up here! */ in submit_fast_path()
|
/linux/drivers/scsi/ufs/ |
A D | ufs-sysfs.c | 414 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]); in write_total_sectors_show() 423 ktime_to_us(hba->monitor.total_busy[WRITE])); in write_total_busy_show() 431 return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]); in write_nr_requests_show() 441 return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]), in write_req_latency_avg_show() 442 m->nr_req[WRITE])); in write_req_latency_avg_show() 452 ktime_to_us(hba->monitor.lat_max[WRITE])); in write_req_latency_max_show() 462 ktime_to_us(hba->monitor.lat_min[WRITE])); in write_req_latency_min_show() 472 ktime_to_us(hba->monitor.lat_sum[WRITE])); in write_req_latency_sum_show()
|
/linux/drivers/block/ |
A D | swim3.c | 333 if (rq_data_dir(req) == WRITE) { in swim3_queue_rq() 444 if (rq_data_dir(req) == WRITE) in setup_transfer() 461 if (rq_data_dir(req) == WRITE) { in setup_transfer() 478 if (rq_data_dir(req) == WRITE) in setup_transfer() 649 (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"), in xfer_timeout() 734 if (rq_data_dir(req) == WRITE) in swim3_interrupt() 767 rq_data_dir(req) == WRITE? "writ": "read", in swim3_interrupt()
|
/linux/fs/9p/ |
A D | vfs_addr.c | 179 iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); in v9fs_vfs_write_folio_locked() 259 if (iov_iter_rw(iter) == WRITE) { in v9fs_direct_IO()
|
/linux/tools/perf/trace/beauty/ |
A D | flock.c | 44 P_CMD(WRITE); in syscall_arg__scnprintf_flock()
|
/linux/Documentation/ABI/testing/ |
A D | sysfs-devices-platform-_UDC_-gadget | 17 the SCSI WRITE(10,12) commands when a gadget in USB Mass
|