Lines Matching refs:segbuf

30 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
32 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
36 struct nilfs_segment_buffer *segbuf; in nilfs_segbuf_new() local
38 segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS); in nilfs_segbuf_new()
39 if (unlikely(!segbuf)) in nilfs_segbuf_new()
42 segbuf->sb_super = sb; in nilfs_segbuf_new()
43 INIT_LIST_HEAD(&segbuf->sb_list); in nilfs_segbuf_new()
44 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); in nilfs_segbuf_new()
45 INIT_LIST_HEAD(&segbuf->sb_payload_buffers); in nilfs_segbuf_new()
46 segbuf->sb_super_root = NULL; in nilfs_segbuf_new()
48 init_completion(&segbuf->sb_bio_event); in nilfs_segbuf_new()
49 atomic_set(&segbuf->sb_err, 0); in nilfs_segbuf_new()
50 segbuf->sb_nbio = 0; in nilfs_segbuf_new()
52 return segbuf; in nilfs_segbuf_new()
55 void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_free() argument
57 kmem_cache_free(nilfs_segbuf_cachep, segbuf); in nilfs_segbuf_free()
60 void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum, in nilfs_segbuf_map() argument
63 segbuf->sb_segnum = segnum; in nilfs_segbuf_map()
64 nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start, in nilfs_segbuf_map()
65 &segbuf->sb_fseg_end); in nilfs_segbuf_map()
67 segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset; in nilfs_segbuf_map()
68 segbuf->sb_rest_blocks = in nilfs_segbuf_map()
69 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; in nilfs_segbuf_map()
77 void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_map_cont() argument
80 segbuf->sb_segnum = prev->sb_segnum; in nilfs_segbuf_map_cont()
81 segbuf->sb_fseg_start = prev->sb_fseg_start; in nilfs_segbuf_map_cont()
82 segbuf->sb_fseg_end = prev->sb_fseg_end; in nilfs_segbuf_map_cont()
83 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks; in nilfs_segbuf_map_cont()
84 segbuf->sb_rest_blocks = in nilfs_segbuf_map_cont()
85 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; in nilfs_segbuf_map_cont()
88 void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_set_next_segnum() argument
91 segbuf->sb_nextnum = nextnum; in nilfs_segbuf_set_next_segnum()
92 segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum); in nilfs_segbuf_set_next_segnum()
95 int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_extend_segsum() argument
99 bh = sb_getblk(segbuf->sb_super, in nilfs_segbuf_extend_segsum()
100 segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk); in nilfs_segbuf_extend_segsum()
104 nilfs_segbuf_add_segsum_buffer(segbuf, bh); in nilfs_segbuf_extend_segsum()
108 int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_extend_payload() argument
113 bh = sb_getblk(segbuf->sb_super, in nilfs_segbuf_extend_payload()
114 segbuf->sb_pseg_start + segbuf->sb_sum.nblocks); in nilfs_segbuf_extend_payload()
118 nilfs_segbuf_add_payload_buffer(segbuf, bh); in nilfs_segbuf_extend_payload()
123 int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags, in nilfs_segbuf_reset() argument
128 segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0; in nilfs_segbuf_reset()
129 err = nilfs_segbuf_extend_segsum(segbuf); in nilfs_segbuf_reset()
133 segbuf->sb_sum.flags = flags; in nilfs_segbuf_reset()
134 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); in nilfs_segbuf_reset()
135 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; in nilfs_segbuf_reset()
136 segbuf->sb_sum.ctime = ctime; in nilfs_segbuf_reset()
137 segbuf->sb_sum.cno = cno; in nilfs_segbuf_reset()
144 void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_fill_in_segsum() argument
149 bh_sum = list_entry(segbuf->sb_segsum_buffers.next, in nilfs_segbuf_fill_in_segsum()
155 raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags); in nilfs_segbuf_fill_in_segsum()
156 raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq); in nilfs_segbuf_fill_in_segsum()
157 raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime); in nilfs_segbuf_fill_in_segsum()
158 raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next); in nilfs_segbuf_fill_in_segsum()
159 raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks); in nilfs_segbuf_fill_in_segsum()
160 raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo); in nilfs_segbuf_fill_in_segsum()
161 raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes); in nilfs_segbuf_fill_in_segsum()
163 raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno); in nilfs_segbuf_fill_in_segsum()
170 nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed) in nilfs_segbuf_fill_in_segsum_crc() argument
174 unsigned long size, bytes = segbuf->sb_sum.sumbytes; in nilfs_segbuf_fill_in_segsum_crc()
177 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, in nilfs_segbuf_fill_in_segsum_crc()
188 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, in nilfs_segbuf_fill_in_segsum_crc()
197 static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_fill_in_data_crc() argument
205 bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head, in nilfs_segbuf_fill_in_data_crc()
212 list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers, in nilfs_segbuf_fill_in_data_crc()
216 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { in nilfs_segbuf_fill_in_data_crc()
225 nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_fill_in_super_root_crc() argument
229 struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info; in nilfs_segbuf_fill_in_super_root_crc()
233 raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data; in nilfs_segbuf_fill_in_super_root_crc()
251 static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_clear() argument
253 nilfs_release_buffers(&segbuf->sb_segsum_buffers); in nilfs_segbuf_clear()
254 nilfs_release_buffers(&segbuf->sb_payload_buffers); in nilfs_segbuf_clear()
255 segbuf->sb_super_root = NULL; in nilfs_segbuf_clear()
263 struct nilfs_segment_buffer *segbuf; in nilfs_clear_logs() local
265 list_for_each_entry(segbuf, logs, sb_list) in nilfs_clear_logs()
266 nilfs_segbuf_clear(segbuf); in nilfs_clear_logs()
272 struct nilfs_segment_buffer *n, *segbuf; in nilfs_truncate_logs() local
274 segbuf = list_prepare_entry(last, logs, sb_list); in nilfs_truncate_logs()
275 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) { in nilfs_truncate_logs()
276 list_del_init(&segbuf->sb_list); in nilfs_truncate_logs()
277 nilfs_segbuf_clear(segbuf); in nilfs_truncate_logs()
278 nilfs_segbuf_free(segbuf); in nilfs_truncate_logs()
284 struct nilfs_segment_buffer *segbuf; in nilfs_write_logs() local
287 list_for_each_entry(segbuf, logs, sb_list) { in nilfs_write_logs()
288 ret = nilfs_segbuf_write(segbuf, nilfs); in nilfs_write_logs()
297 struct nilfs_segment_buffer *segbuf; in nilfs_wait_on_logs() local
300 list_for_each_entry(segbuf, logs, sb_list) { in nilfs_wait_on_logs()
301 err = nilfs_segbuf_wait(segbuf); in nilfs_wait_on_logs()
315 struct nilfs_segment_buffer *segbuf; in nilfs_add_checksums_on_logs() local
317 list_for_each_entry(segbuf, logs, sb_list) { in nilfs_add_checksums_on_logs()
318 if (segbuf->sb_super_root) in nilfs_add_checksums_on_logs()
319 nilfs_segbuf_fill_in_super_root_crc(segbuf, seed); in nilfs_add_checksums_on_logs()
320 nilfs_segbuf_fill_in_segsum_crc(segbuf, seed); in nilfs_add_checksums_on_logs()
321 nilfs_segbuf_fill_in_data_crc(segbuf, seed); in nilfs_add_checksums_on_logs()
330 struct nilfs_segment_buffer *segbuf = bio->bi_private; in nilfs_end_bio_write() local
333 atomic_inc(&segbuf->sb_err); in nilfs_end_bio_write()
336 complete(&segbuf->sb_bio_event); in nilfs_end_bio_write()
339 static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_submit_bio() argument
346 if (segbuf->sb_nbio > 0 && in nilfs_segbuf_submit_bio()
347 bdi_write_congested(segbuf->sb_super->s_bdi)) { in nilfs_segbuf_submit_bio()
348 wait_for_completion(&segbuf->sb_bio_event); in nilfs_segbuf_submit_bio()
349 segbuf->sb_nbio--; in nilfs_segbuf_submit_bio()
350 if (unlikely(atomic_read(&segbuf->sb_err))) { in nilfs_segbuf_submit_bio()
358 bio->bi_private = segbuf; in nilfs_segbuf_submit_bio()
361 segbuf->sb_nbio++; in nilfs_segbuf_submit_bio()
397 static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_prepare_write() argument
401 wi->rest_blocks = segbuf->sb_sum.nblocks; in nilfs_segbuf_prepare_write()
405 wi->blocknr = segbuf->sb_pseg_start; in nilfs_segbuf_prepare_write()
408 static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_submit_bh() argument
429 err = nilfs_segbuf_submit_bio(segbuf, wi, mode, 0); in nilfs_segbuf_submit_bh()
448 static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, in nilfs_segbuf_write() argument
456 nilfs_segbuf_prepare_write(segbuf, &wi); in nilfs_segbuf_write()
458 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { in nilfs_segbuf_write()
459 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE); in nilfs_segbuf_write()
464 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { in nilfs_segbuf_write()
465 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE); in nilfs_segbuf_write()
475 res = nilfs_segbuf_submit_bio(segbuf, &wi, REQ_OP_WRITE, in nilfs_segbuf_write()
492 static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) in nilfs_segbuf_wait() argument
496 if (!segbuf->sb_nbio) in nilfs_segbuf_wait()
500 wait_for_completion(&segbuf->sb_bio_event); in nilfs_segbuf_wait()
501 } while (--segbuf->sb_nbio > 0); in nilfs_segbuf_wait()
503 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { in nilfs_segbuf_wait()
504 nilfs_err(segbuf->sb_super, in nilfs_segbuf_wait()
506 (unsigned long long)segbuf->sb_pseg_start, in nilfs_segbuf_wait()
507 segbuf->sb_sum.nblocks, in nilfs_segbuf_wait()
508 (unsigned long long)segbuf->sb_segnum); in nilfs_segbuf_wait()