1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/buffer_head.h
4 *
5 * Everything to do with buffer_heads.
6 */
7
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
10
11 #include <linux/types.h>
12 #include <linux/fs.h>
13 #include <linux/linkage.h>
14 #include <linux/pagemap.h>
15 #include <linux/wait.h>
16 #include <linux/atomic.h>
17
18 #ifdef CONFIG_BLOCK
19
20 enum bh_state_bits {
21 BH_Uptodate, /* Contains valid data */
22 BH_Dirty, /* Is dirty */
23 BH_Lock, /* Is locked */
24 BH_Req, /* Has been submitted for I/O */
25
26 BH_Mapped, /* Has a disk mapping */
27 BH_New, /* Disk mapping was newly created by get_block */
28 BH_Async_Read, /* Is under end_buffer_async_read I/O */
29 BH_Async_Write, /* Is under end_buffer_async_write I/O */
30 BH_Delay, /* Buffer is not yet allocated on disk */
31 BH_Boundary, /* Block is followed by a discontiguity */
32 BH_Write_EIO, /* I/O error on write */
33 BH_Unwritten, /* Buffer is allocated on disk but not written */
34 BH_Quiet, /* Buffer Error Prinks to be quiet */
35 BH_Meta, /* Buffer contains metadata */
36 BH_Prio, /* Buffer should be submitted with REQ_PRIO */
37 BH_Defer_Completion, /* Defer AIO completion to workqueue */
38
39 BH_PrivateStart,/* not a state bit, but the first bit available
40 * for private allocation by other entities
41 */
42 };
43
44 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45
46 struct page;
47 struct buffer_head;
48 struct address_space;
49 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
50
51 /*
52 * Historically, a buffer_head was used to map a single block
53 * within a page, and of course as the unit of I/O through the
54 * filesystem and block layers. Nowadays the basic I/O unit
55 * is the bio, and buffer_heads are used for extracting block
56 * mappings (via a get_block_t call), for tracking state within
57 * a page (via a page_mapping) and for wrapping bio submission
58 * for backward compatibility reasons (e.g. submit_bh).
59 */
60 struct buffer_head {
61 unsigned long b_state; /* buffer state bitmap (see above) */
62 struct buffer_head *b_this_page;/* circular list of page's buffers */
63 struct page *b_page; /* the page this bh is mapped to */
64
65 sector_t b_blocknr; /* start block number */
66 size_t b_size; /* size of mapping */
67 char *b_data; /* pointer to data within the page */
68
69 struct block_device *b_bdev;
70 bh_end_io_t *b_end_io; /* I/O completion */
71 void *b_private; /* reserved for b_end_io */
72 struct list_head b_assoc_buffers; /* associated with another mapping */
73 struct address_space *b_assoc_map; /* mapping this buffer is
74 associated with */
75 atomic_t b_count; /* users using this buffer_head */
76 spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
77 * serialise IO completion of other
78 * buffers in the page */
79 };
80
81 /*
82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83 * and buffer_foo() functions.
84 * To avoid reset buffer flags that are already set, because that causes
85 * a costly cache line transition, check the flag first.
86 */
87 #define BUFFER_FNS(bit, name) \
88 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
89 { \
90 if (!test_bit(BH_##bit, &(bh)->b_state)) \
91 set_bit(BH_##bit, &(bh)->b_state); \
92 } \
93 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
94 { \
95 clear_bit(BH_##bit, &(bh)->b_state); \
96 } \
97 static __always_inline int buffer_##name(const struct buffer_head *bh) \
98 { \
99 return test_bit(BH_##bit, &(bh)->b_state); \
100 }
101
102 /*
103 * test_set_buffer_foo() and test_clear_buffer_foo()
104 */
105 #define TAS_BUFFER_FNS(bit, name) \
106 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
107 { \
108 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
109 } \
110 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
111 { \
112 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
113 } \
114
115 /*
116 * Emit the buffer bitops functions. Note that there are also functions
117 * of the form "mark_buffer_foo()". These are higher-level functions which
118 * do something in addition to setting a b_state bit.
119 */
120 BUFFER_FNS(Uptodate, uptodate)
121 BUFFER_FNS(Dirty, dirty)
122 TAS_BUFFER_FNS(Dirty, dirty)
123 BUFFER_FNS(Lock, locked)
124 BUFFER_FNS(Req, req)
125 TAS_BUFFER_FNS(Req, req)
126 BUFFER_FNS(Mapped, mapped)
127 BUFFER_FNS(New, new)
128 BUFFER_FNS(Async_Read, async_read)
129 BUFFER_FNS(Async_Write, async_write)
130 BUFFER_FNS(Delay, delay)
131 BUFFER_FNS(Boundary, boundary)
132 BUFFER_FNS(Write_EIO, write_io_error)
133 BUFFER_FNS(Unwritten, unwritten)
134 BUFFER_FNS(Meta, meta)
135 BUFFER_FNS(Prio, prio)
136 BUFFER_FNS(Defer_Completion, defer_completion)
137
138 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
139
140 /* If we *know* page->private refers to buffer_heads */
141 #define page_buffers(page) \
142 ({ \
143 BUG_ON(!PagePrivate(page)); \
144 ((struct buffer_head *)page_private(page)); \
145 })
146 #define page_has_buffers(page) PagePrivate(page)
147
148 void buffer_check_dirty_writeback(struct page *page,
149 bool *dirty, bool *writeback);
150
151 /*
152 * Declarations
153 */
154
155 void mark_buffer_dirty(struct buffer_head *bh);
156 void mark_buffer_write_io_error(struct buffer_head *bh);
157 void touch_buffer(struct buffer_head *bh);
158 void set_bh_page(struct buffer_head *bh,
159 struct page *page, unsigned long offset);
160 int try_to_free_buffers(struct page *);
161 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
162 bool retry);
163 void create_empty_buffers(struct page *, unsigned long,
164 unsigned long b_state);
165 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
166 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
167 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
168
169 /* Things to do with buffers at mapping->private_list */
170 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
171 int inode_has_buffers(struct inode *);
172 void invalidate_inode_buffers(struct inode *);
173 int remove_inode_buffers(struct inode *inode);
174 int sync_mapping_buffers(struct address_space *mapping);
175 void clean_bdev_aliases(struct block_device *bdev, sector_t block,
176 sector_t len);
clean_bdev_bh_alias(struct buffer_head * bh)177 static inline void clean_bdev_bh_alias(struct buffer_head *bh)
178 {
179 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
180 }
181
182 void mark_buffer_async_write(struct buffer_head *bh);
183 void __wait_on_buffer(struct buffer_head *);
184 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
185 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
186 unsigned size);
187 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
188 unsigned size, gfp_t gfp);
189 void __brelse(struct buffer_head *);
190 void __bforget(struct buffer_head *);
191 void __breadahead(struct block_device *, sector_t block, unsigned int size);
192 void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
193 gfp_t gfp);
194 struct buffer_head *__bread_gfp(struct block_device *,
195 sector_t block, unsigned size, gfp_t gfp);
196 void invalidate_bh_lrus(void);
197 void invalidate_bh_lrus_cpu(void);
198 bool has_bh_in_lru(int cpu, void *dummy);
199 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
200 void free_buffer_head(struct buffer_head * bh);
201 void unlock_buffer(struct buffer_head *bh);
202 void __lock_buffer(struct buffer_head *bh);
203 void ll_rw_block(int, int, int, struct buffer_head * bh[]);
204 int sync_dirty_buffer(struct buffer_head *bh);
205 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
206 void write_dirty_buffer(struct buffer_head *bh, int op_flags);
207 int submit_bh(int, int, struct buffer_head *);
208 void write_boundary_block(struct block_device *bdev,
209 sector_t bblock, unsigned blocksize);
210 int bh_uptodate_or_lock(struct buffer_head *bh);
211 int bh_submit_read(struct buffer_head *bh);
212
213 extern int buffer_heads_over_limit;
214
215 /*
216 * Generic address_space_operations implementations for buffer_head-backed
217 * address_spaces.
218 */
219 void block_invalidatepage(struct page *page, unsigned int offset,
220 unsigned int length);
221 int block_write_full_page(struct page *page, get_block_t *get_block,
222 struct writeback_control *wbc);
223 int __block_write_full_page(struct inode *inode, struct page *page,
224 get_block_t *get_block, struct writeback_control *wbc,
225 bh_end_io_t *handler);
226 int block_read_full_page(struct page*, get_block_t*);
227 int block_is_partially_uptodate(struct page *page, unsigned long from,
228 unsigned long count);
229 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
230 unsigned flags, struct page **pagep, get_block_t *get_block);
231 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
232 get_block_t *get_block);
233 int block_write_end(struct file *, struct address_space *,
234 loff_t, unsigned, unsigned,
235 struct page *, void *);
236 int generic_write_end(struct file *, struct address_space *,
237 loff_t, unsigned, unsigned,
238 struct page *, void *);
239 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
240 void clean_page_buffers(struct page *page);
241 int cont_write_begin(struct file *, struct address_space *, loff_t,
242 unsigned, unsigned, struct page **, void **,
243 get_block_t *, loff_t *);
244 int generic_cont_expand_simple(struct inode *inode, loff_t size);
245 int block_commit_write(struct page *page, unsigned from, unsigned to);
246 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
247 get_block_t get_block);
248 /* Convert errno to return value from ->page_mkwrite() call */
block_page_mkwrite_return(int err)249 static inline vm_fault_t block_page_mkwrite_return(int err)
250 {
251 if (err == 0)
252 return VM_FAULT_LOCKED;
253 if (err == -EFAULT || err == -EAGAIN)
254 return VM_FAULT_NOPAGE;
255 if (err == -ENOMEM)
256 return VM_FAULT_OOM;
257 /* -ENOSPC, -EDQUOT, -EIO ... */
258 return VM_FAULT_SIGBUS;
259 }
260 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
261 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
262 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
263 struct page **, void **, get_block_t*);
264 int nobh_write_end(struct file *, struct address_space *,
265 loff_t, unsigned, unsigned,
266 struct page *, void *);
267 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
268 int nobh_writepage(struct page *page, get_block_t *get_block,
269 struct writeback_control *wbc);
270
271 void buffer_init(void);
272
273 /*
274 * inline definitions
275 */
276
get_bh(struct buffer_head * bh)277 static inline void get_bh(struct buffer_head *bh)
278 {
279 atomic_inc(&bh->b_count);
280 }
281
put_bh(struct buffer_head * bh)282 static inline void put_bh(struct buffer_head *bh)
283 {
284 smp_mb__before_atomic();
285 atomic_dec(&bh->b_count);
286 }
287
brelse(struct buffer_head * bh)288 static inline void brelse(struct buffer_head *bh)
289 {
290 if (bh)
291 __brelse(bh);
292 }
293
bforget(struct buffer_head * bh)294 static inline void bforget(struct buffer_head *bh)
295 {
296 if (bh)
297 __bforget(bh);
298 }
299
300 static inline struct buffer_head *
sb_bread(struct super_block * sb,sector_t block)301 sb_bread(struct super_block *sb, sector_t block)
302 {
303 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
304 }
305
306 static inline struct buffer_head *
sb_bread_unmovable(struct super_block * sb,sector_t block)307 sb_bread_unmovable(struct super_block *sb, sector_t block)
308 {
309 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
310 }
311
312 static inline void
sb_breadahead(struct super_block * sb,sector_t block)313 sb_breadahead(struct super_block *sb, sector_t block)
314 {
315 __breadahead(sb->s_bdev, block, sb->s_blocksize);
316 }
317
318 static inline void
sb_breadahead_unmovable(struct super_block * sb,sector_t block)319 sb_breadahead_unmovable(struct super_block *sb, sector_t block)
320 {
321 __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
322 }
323
324 static inline struct buffer_head *
sb_getblk(struct super_block * sb,sector_t block)325 sb_getblk(struct super_block *sb, sector_t block)
326 {
327 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
328 }
329
330
331 static inline struct buffer_head *
sb_getblk_gfp(struct super_block * sb,sector_t block,gfp_t gfp)332 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
333 {
334 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
335 }
336
337 static inline struct buffer_head *
sb_find_get_block(struct super_block * sb,sector_t block)338 sb_find_get_block(struct super_block *sb, sector_t block)
339 {
340 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
341 }
342
343 static inline void
map_bh(struct buffer_head * bh,struct super_block * sb,sector_t block)344 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
345 {
346 set_buffer_mapped(bh);
347 bh->b_bdev = sb->s_bdev;
348 bh->b_blocknr = block;
349 bh->b_size = sb->s_blocksize;
350 }
351
wait_on_buffer(struct buffer_head * bh)352 static inline void wait_on_buffer(struct buffer_head *bh)
353 {
354 might_sleep();
355 if (buffer_locked(bh))
356 __wait_on_buffer(bh);
357 }
358
trylock_buffer(struct buffer_head * bh)359 static inline int trylock_buffer(struct buffer_head *bh)
360 {
361 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
362 }
363
lock_buffer(struct buffer_head * bh)364 static inline void lock_buffer(struct buffer_head *bh)
365 {
366 might_sleep();
367 if (!trylock_buffer(bh))
368 __lock_buffer(bh);
369 }
370
getblk_unmovable(struct block_device * bdev,sector_t block,unsigned size)371 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
372 sector_t block,
373 unsigned size)
374 {
375 return __getblk_gfp(bdev, block, size, 0);
376 }
377
__getblk(struct block_device * bdev,sector_t block,unsigned size)378 static inline struct buffer_head *__getblk(struct block_device *bdev,
379 sector_t block,
380 unsigned size)
381 {
382 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
383 }
384
385 /**
386 * __bread() - reads a specified block and returns the bh
387 * @bdev: the block_device to read from
388 * @block: number of block
389 * @size: size (in bytes) to read
390 *
391 * Reads a specified block, and returns buffer head that contains it.
392 * The page cache is allocated from movable area so that it can be migrated.
393 * It returns NULL if the block was unreadable.
394 */
395 static inline struct buffer_head *
__bread(struct block_device * bdev,sector_t block,unsigned size)396 __bread(struct block_device *bdev, sector_t block, unsigned size)
397 {
398 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
399 }
400
401 extern int __set_page_dirty_buffers(struct page *page);
402
403 #else /* CONFIG_BLOCK */
404
buffer_init(void)405 static inline void buffer_init(void) {}
try_to_free_buffers(struct page * page)406 static inline int try_to_free_buffers(struct page *page) { return 1; }
inode_has_buffers(struct inode * inode)407 static inline int inode_has_buffers(struct inode *inode) { return 0; }
invalidate_inode_buffers(struct inode * inode)408 static inline void invalidate_inode_buffers(struct inode *inode) {}
remove_inode_buffers(struct inode * inode)409 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
sync_mapping_buffers(struct address_space * mapping)410 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
invalidate_bh_lrus_cpu(void)411 static inline void invalidate_bh_lrus_cpu(void) {}
has_bh_in_lru(int cpu,void * dummy)412 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
413 #define buffer_heads_over_limit 0
414
415 #endif /* CONFIG_BLOCK */
416 #endif /* _LINUX_BUFFER_HEAD_H */
417