1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Macros for manipulating and testing page->flags
4 */
5
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16
17 /*
18 * Various page->flags bits:
19 *
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
51 *
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
55 *
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
65 *
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
71 * PG_uptodate tells whether the page's contents is valid. When a read
72 * completes, the page becomes uptodate, unless a disk I/O error happened.
73 *
74 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75 * file-backed pagecache (see mm/vmscan.c).
76 *
77 * PG_error is set to indicate that an I/O error occurred on this page.
78 *
79 * PG_arch_1 is an architecture specific page state bit. The generic code
80 * guarantees that this bit is cleared for a page when it first is entered into
81 * the page cache.
82 *
83 * PG_hwpoison indicates that a page got corrupted in hardware and contains
84 * data with incorrect ECC bits that triggered a machine check. Accessing is
85 * not safe since it may cause another machine check. Don't touch!
86 */
87
88 /*
89 * Don't use the pageflags directly. Use the PageFoo macros.
90 *
91 * The page flags field is split into two parts, the main flags area
92 * which extends from the low bits upwards, and the fields area which
93 * extends from the high bits downwards.
94 *
95 * | FIELD | ... | FLAGS |
96 * N-1 ^ 0
97 * (NR_PAGEFLAGS)
98 *
99 * The fields area is reserved for fields mapping zone, node (for NUMA) and
100 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
101 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
102 */
103 enum pageflags {
104 PG_locked, /* Page is locked. Don't touch. */
105 PG_referenced,
106 PG_uptodate,
107 PG_dirty,
108 PG_lru,
109 PG_active,
110 PG_workingset,
111 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
112 PG_error,
113 PG_slab,
114 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
115 PG_arch_1,
116 PG_reserved,
117 PG_private, /* If pagecache, has fs-private data */
118 PG_private_2, /* If pagecache, has fs aux data */
119 PG_writeback, /* Page is under writeback */
120 PG_head, /* A head page */
121 PG_mappedtodisk, /* Has blocks allocated on-disk */
122 PG_reclaim, /* To be reclaimed asap */
123 PG_swapbacked, /* Page is backed by RAM/swap */
124 PG_unevictable, /* Page is "unevictable" */
125 #ifdef CONFIG_MMU
126 PG_mlocked, /* Page is vma mlocked */
127 #endif
128 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
129 PG_uncached, /* Page has been mapped as uncached */
130 #endif
131 #ifdef CONFIG_MEMORY_FAILURE
132 PG_hwpoison, /* hardware poisoned page. Don't touch */
133 #endif
134 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
135 PG_young,
136 PG_idle,
137 #endif
138 #ifdef CONFIG_64BIT
139 PG_arch_2,
140 #endif
141 #ifdef CONFIG_KASAN_HW_TAGS
142 PG_skip_kasan_poison,
143 #endif
144 __NR_PAGEFLAGS,
145
146 PG_readahead = PG_reclaim,
147
148 /* Filesystems */
149 PG_checked = PG_owner_priv_1,
150
151 /* SwapBacked */
152 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
153
154 /* Two page bits are conscripted by FS-Cache to maintain local caching
155 * state. These bits are set on pages belonging to the netfs's inodes
156 * when those inodes are being locally cached.
157 */
158 PG_fscache = PG_private_2, /* page backed by cache */
159
160 /* XEN */
161 /* Pinned in Xen as a read-only pagetable page. */
162 PG_pinned = PG_owner_priv_1,
163 /* Pinned as part of domain save (see xen_mm_pin_all()). */
164 PG_savepinned = PG_dirty,
165 /* Has a grant mapping of another (foreign) domain's page. */
166 PG_foreign = PG_owner_priv_1,
167 /* Remapped by swiotlb-xen. */
168 PG_xen_remapped = PG_owner_priv_1,
169
170 /* SLOB */
171 PG_slob_free = PG_private,
172
173 /* Compound pages. Stored in first tail page's flags */
174 PG_double_map = PG_workingset,
175
176 #ifdef CONFIG_MEMORY_FAILURE
177 /*
178 * Compound pages. Stored in first tail page's flags.
179 * Indicates that at least one subpage is hwpoisoned in the
180 * THP.
181 */
182 PG_has_hwpoisoned = PG_mappedtodisk,
183 #endif
184
185 /* non-lru isolated movable page */
186 PG_isolated = PG_reclaim,
187
188 /* Only valid for buddy pages. Used to track pages that are reported */
189 PG_reported = PG_uptodate,
190 };
191
192 #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
193
194 #ifndef __GENERATING_BOUNDS_H
195
_compound_head(const struct page * page)196 static inline unsigned long _compound_head(const struct page *page)
197 {
198 unsigned long head = READ_ONCE(page->compound_head);
199
200 if (unlikely(head & 1))
201 return head - 1;
202 return (unsigned long)page;
203 }
204
205 #define compound_head(page) ((typeof(page))_compound_head(page))
206
207 /**
208 * page_folio - Converts from page to folio.
209 * @p: The page.
210 *
211 * Every page is part of a folio. This function cannot be called on a
212 * NULL pointer.
213 *
214 * Context: No reference, nor lock is required on @page. If the caller
215 * does not hold a reference, this call may race with a folio split, so
216 * it should re-check the folio still contains this page after gaining
217 * a reference on the folio.
218 * Return: The folio which contains this page.
219 */
220 #define page_folio(p) (_Generic((p), \
221 const struct page *: (const struct folio *)_compound_head(p), \
222 struct page *: (struct folio *)_compound_head(p)))
223
224 /**
225 * folio_page - Return a page from a folio.
226 * @folio: The folio.
227 * @n: The page number to return.
228 *
229 * @n is relative to the start of the folio. This function does not
230 * check that the page number lies within @folio; the caller is presumed
231 * to have a reference to the page.
232 */
233 #define folio_page(folio, n) nth_page(&(folio)->page, n)
234
PageTail(struct page * page)235 static __always_inline int PageTail(struct page *page)
236 {
237 return READ_ONCE(page->compound_head) & 1;
238 }
239
PageCompound(struct page * page)240 static __always_inline int PageCompound(struct page *page)
241 {
242 return test_bit(PG_head, &page->flags) || PageTail(page);
243 }
244
245 #define PAGE_POISON_PATTERN -1l
PagePoisoned(const struct page * page)246 static inline int PagePoisoned(const struct page *page)
247 {
248 return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
249 }
250
251 #ifdef CONFIG_DEBUG_VM
252 void page_init_poison(struct page *page, size_t size);
253 #else
page_init_poison(struct page * page,size_t size)254 static inline void page_init_poison(struct page *page, size_t size)
255 {
256 }
257 #endif
258
folio_flags(struct folio * folio,unsigned n)259 static unsigned long *folio_flags(struct folio *folio, unsigned n)
260 {
261 struct page *page = &folio->page;
262
263 VM_BUG_ON_PGFLAGS(PageTail(page), page);
264 VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
265 return &page[n].flags;
266 }
267
268 /*
269 * Page flags policies wrt compound pages
270 *
271 * PF_POISONED_CHECK
272 * check if this struct page poisoned/uninitialized
273 *
274 * PF_ANY:
275 * the page flag is relevant for small, head and tail pages.
276 *
277 * PF_HEAD:
278 * for compound page all operations related to the page flag applied to
279 * head page.
280 *
281 * PF_ONLY_HEAD:
282 * for compound page, callers only ever operate on the head page.
283 *
284 * PF_NO_TAIL:
285 * modifications of the page flag must be done on small or head pages,
286 * checks can be done on tail pages too.
287 *
288 * PF_NO_COMPOUND:
289 * the page flag is not relevant for compound pages.
290 *
291 * PF_SECOND:
292 * the page flag is stored in the first tail page.
293 */
294 #define PF_POISONED_CHECK(page) ({ \
295 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
296 page; })
297 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
298 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
299 #define PF_ONLY_HEAD(page, enforce) ({ \
300 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
301 PF_POISONED_CHECK(page); })
302 #define PF_NO_TAIL(page, enforce) ({ \
303 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
304 PF_POISONED_CHECK(compound_head(page)); })
305 #define PF_NO_COMPOUND(page, enforce) ({ \
306 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
307 PF_POISONED_CHECK(page); })
308 #define PF_SECOND(page, enforce) ({ \
309 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
310 PF_POISONED_CHECK(&page[1]); })
311
312 /* Which page is the flag stored in */
313 #define FOLIO_PF_ANY 0
314 #define FOLIO_PF_HEAD 0
315 #define FOLIO_PF_ONLY_HEAD 0
316 #define FOLIO_PF_NO_TAIL 0
317 #define FOLIO_PF_NO_COMPOUND 0
318 #define FOLIO_PF_SECOND 1
319
320 /*
321 * Macros to create function definitions for page flags
322 */
323 #define TESTPAGEFLAG(uname, lname, policy) \
324 static __always_inline bool folio_test_##lname(struct folio *folio) \
325 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
326 static __always_inline int Page##uname(struct page *page) \
327 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
328
329 #define SETPAGEFLAG(uname, lname, policy) \
330 static __always_inline \
331 void folio_set_##lname(struct folio *folio) \
332 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
333 static __always_inline void SetPage##uname(struct page *page) \
334 { set_bit(PG_##lname, &policy(page, 1)->flags); }
335
336 #define CLEARPAGEFLAG(uname, lname, policy) \
337 static __always_inline \
338 void folio_clear_##lname(struct folio *folio) \
339 { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
340 static __always_inline void ClearPage##uname(struct page *page) \
341 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
342
343 #define __SETPAGEFLAG(uname, lname, policy) \
344 static __always_inline \
345 void __folio_set_##lname(struct folio *folio) \
346 { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
347 static __always_inline void __SetPage##uname(struct page *page) \
348 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
349
350 #define __CLEARPAGEFLAG(uname, lname, policy) \
351 static __always_inline \
352 void __folio_clear_##lname(struct folio *folio) \
353 { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
354 static __always_inline void __ClearPage##uname(struct page *page) \
355 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
356
357 #define TESTSETFLAG(uname, lname, policy) \
358 static __always_inline \
359 bool folio_test_set_##lname(struct folio *folio) \
360 { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
361 static __always_inline int TestSetPage##uname(struct page *page) \
362 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
363
364 #define TESTCLEARFLAG(uname, lname, policy) \
365 static __always_inline \
366 bool folio_test_clear_##lname(struct folio *folio) \
367 { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
368 static __always_inline int TestClearPage##uname(struct page *page) \
369 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
370
371 #define PAGEFLAG(uname, lname, policy) \
372 TESTPAGEFLAG(uname, lname, policy) \
373 SETPAGEFLAG(uname, lname, policy) \
374 CLEARPAGEFLAG(uname, lname, policy)
375
376 #define __PAGEFLAG(uname, lname, policy) \
377 TESTPAGEFLAG(uname, lname, policy) \
378 __SETPAGEFLAG(uname, lname, policy) \
379 __CLEARPAGEFLAG(uname, lname, policy)
380
381 #define TESTSCFLAG(uname, lname, policy) \
382 TESTSETFLAG(uname, lname, policy) \
383 TESTCLEARFLAG(uname, lname, policy)
384
385 #define TESTPAGEFLAG_FALSE(uname, lname) \
386 static inline bool folio_test_##lname(const struct folio *folio) { return 0; } \
387 static inline int Page##uname(const struct page *page) { return 0; }
388
389 #define SETPAGEFLAG_NOOP(uname, lname) \
390 static inline void folio_set_##lname(struct folio *folio) { } \
391 static inline void SetPage##uname(struct page *page) { }
392
393 #define CLEARPAGEFLAG_NOOP(uname, lname) \
394 static inline void folio_clear_##lname(struct folio *folio) { } \
395 static inline void ClearPage##uname(struct page *page) { }
396
397 #define __CLEARPAGEFLAG_NOOP(uname, lname) \
398 static inline void __folio_clear_##lname(struct folio *folio) { } \
399 static inline void __ClearPage##uname(struct page *page) { }
400
401 #define TESTSETFLAG_FALSE(uname, lname) \
402 static inline bool folio_test_set_##lname(struct folio *folio) \
403 { return 0; } \
404 static inline int TestSetPage##uname(struct page *page) { return 0; }
405
406 #define TESTCLEARFLAG_FALSE(uname, lname) \
407 static inline bool folio_test_clear_##lname(struct folio *folio) \
408 { return 0; } \
409 static inline int TestClearPage##uname(struct page *page) { return 0; }
410
411 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
412 SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
413
414 #define TESTSCFLAG_FALSE(uname, lname) \
415 TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
416
417 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
418 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
419 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
420 PAGEFLAG(Referenced, referenced, PF_HEAD)
421 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
422 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
423 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
424 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
425 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
426 TESTCLEARFLAG(LRU, lru, PF_HEAD)
427 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
428 TESTCLEARFLAG(Active, active, PF_HEAD)
429 PAGEFLAG(Workingset, workingset, PF_HEAD)
430 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
431 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
432 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
433 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
434
435 /* Xen */
436 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
437 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
438 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
439 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)440 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
441 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
442
443 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
444 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
445 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
446 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
447 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
448 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
449
450 /*
451 * Private page markings that may be used by the filesystem that owns the page
452 * for its own purposes.
453 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
454 */
455 PAGEFLAG(Private, private, PF_ANY)
456 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
457 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
458 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
459
460 /*
461 * Only test-and-set exist for PG_writeback. The unconditional operators are
462 * risky: they bypass page accounting.
463 */
464 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
465 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
466 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
467
468 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
469 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
470 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
471 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
472 TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
473
474 #ifdef CONFIG_HIGHMEM
475 /*
476 * Must use a macro here due to header dependency issues. page_zone() is not
477 * available at this point.
478 */
479 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
480 #else
481 PAGEFLAG_FALSE(HighMem, highmem)
482 #endif
483
484 #ifdef CONFIG_SWAP
485 static __always_inline bool folio_test_swapcache(struct folio *folio)
486 {
487 return folio_test_swapbacked(folio) &&
488 test_bit(PG_swapcache, folio_flags(folio, 0));
489 }
490
PageSwapCache(struct page * page)491 static __always_inline bool PageSwapCache(struct page *page)
492 {
493 return folio_test_swapcache(page_folio(page));
494 }
495
496 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
497 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
498 #else
499 PAGEFLAG_FALSE(SwapCache, swapcache)
500 #endif
501
502 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
503 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
504 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
505
506 #ifdef CONFIG_MMU
507 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
508 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
509 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
510 #else
511 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
512 TESTSCFLAG_FALSE(Mlocked, mlocked)
513 #endif
514
515 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
516 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
517 #else
518 PAGEFLAG_FALSE(Uncached, uncached)
519 #endif
520
521 #ifdef CONFIG_MEMORY_FAILURE
522 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
523 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
524 #define __PG_HWPOISON (1UL << PG_hwpoison)
525 extern bool take_page_off_buddy(struct page *page);
526 #else
527 PAGEFLAG_FALSE(HWPoison, hwpoison)
528 #define __PG_HWPOISON 0
529 #endif
530
531 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young,young,PF_ANY)532 TESTPAGEFLAG(Young, young, PF_ANY)
533 SETPAGEFLAG(Young, young, PF_ANY)
534 TESTCLEARFLAG(Young, young, PF_ANY)
535 PAGEFLAG(Idle, idle, PF_ANY)
536 #endif
537
538 #ifdef CONFIG_KASAN_HW_TAGS
539 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
540 #else
541 PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
542 #endif
543
544 /*
545 * PageReported() is used to track reported free pages within the Buddy
546 * allocator. We can use the non-atomic version of the test and set
547 * operations as both should be shielded with the zone lock to prevent
548 * any possible races on the setting or clearing of the bit.
549 */
550 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
551
552 /*
553 * On an anonymous page mapped into a user virtual memory area,
554 * page->mapping points to its anon_vma, not to a struct address_space;
555 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
556 *
557 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
558 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
559 * bit; and then page->mapping points, not to an anon_vma, but to a private
560 * structure which KSM associates with that merged page. See ksm.h.
561 *
562 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
563 * page and then page->mapping points a struct address_space.
564 *
565 * Please note that, confusingly, "page_mapping" refers to the inode
566 * address_space which maps the page from disk; whereas "page_mapped"
567 * refers to user virtual address space into which the page is mapped.
568 */
569 #define PAGE_MAPPING_ANON 0x1
570 #define PAGE_MAPPING_MOVABLE 0x2
571 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
572 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
573
574 static __always_inline int PageMappingFlags(struct page *page)
575 {
576 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
577 }
578
folio_test_anon(struct folio * folio)579 static __always_inline bool folio_test_anon(struct folio *folio)
580 {
581 return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
582 }
583
PageAnon(struct page * page)584 static __always_inline bool PageAnon(struct page *page)
585 {
586 return folio_test_anon(page_folio(page));
587 }
588
__PageMovable(struct page * page)589 static __always_inline int __PageMovable(struct page *page)
590 {
591 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
592 PAGE_MAPPING_MOVABLE;
593 }
594
595 #ifdef CONFIG_KSM
596 /*
597 * A KSM page is one of those write-protected "shared pages" or "merged pages"
598 * which KSM maps into multiple mms, wherever identical anonymous page content
599 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
600 * anon_vma, but to that page's node of the stable tree.
601 */
folio_test_ksm(struct folio * folio)602 static __always_inline bool folio_test_ksm(struct folio *folio)
603 {
604 return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
605 PAGE_MAPPING_KSM;
606 }
607
PageKsm(struct page * page)608 static __always_inline bool PageKsm(struct page *page)
609 {
610 return folio_test_ksm(page_folio(page));
611 }
612 #else
613 TESTPAGEFLAG_FALSE(Ksm, ksm)
614 #endif
615
616 u64 stable_page_flags(struct page *page);
617
folio_test_uptodate(struct folio * folio)618 static inline bool folio_test_uptodate(struct folio *folio)
619 {
620 bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
621 /*
622 * Must ensure that the data we read out of the folio is loaded
623 * _after_ we've loaded folio->flags to check the uptodate bit.
624 * We can skip the barrier if the folio is not uptodate, because
625 * we wouldn't be reading anything from it.
626 *
627 * See folio_mark_uptodate() for the other side of the story.
628 */
629 if (ret)
630 smp_rmb();
631
632 return ret;
633 }
634
PageUptodate(struct page * page)635 static inline int PageUptodate(struct page *page)
636 {
637 return folio_test_uptodate(page_folio(page));
638 }
639
__folio_mark_uptodate(struct folio * folio)640 static __always_inline void __folio_mark_uptodate(struct folio *folio)
641 {
642 smp_wmb();
643 __set_bit(PG_uptodate, folio_flags(folio, 0));
644 }
645
folio_mark_uptodate(struct folio * folio)646 static __always_inline void folio_mark_uptodate(struct folio *folio)
647 {
648 /*
649 * Memory barrier must be issued before setting the PG_uptodate bit,
650 * so that all previous stores issued in order to bring the folio
651 * uptodate are actually visible before folio_test_uptodate becomes true.
652 */
653 smp_wmb();
654 set_bit(PG_uptodate, folio_flags(folio, 0));
655 }
656
__SetPageUptodate(struct page * page)657 static __always_inline void __SetPageUptodate(struct page *page)
658 {
659 __folio_mark_uptodate((struct folio *)page);
660 }
661
SetPageUptodate(struct page * page)662 static __always_inline void SetPageUptodate(struct page *page)
663 {
664 folio_mark_uptodate((struct folio *)page);
665 }
666
667 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
668
669 bool __folio_start_writeback(struct folio *folio, bool keep_write);
670 bool set_page_writeback(struct page *page);
671
672 #define folio_start_writeback(folio) \
673 __folio_start_writeback(folio, false)
674 #define folio_start_writeback_keepwrite(folio) \
675 __folio_start_writeback(folio, true)
676
set_page_writeback_keepwrite(struct page * page)677 static inline void set_page_writeback_keepwrite(struct page *page)
678 {
679 folio_start_writeback_keepwrite(page_folio(page));
680 }
681
test_set_page_writeback(struct page * page)682 static inline bool test_set_page_writeback(struct page *page)
683 {
684 return set_page_writeback(page);
685 }
686
__PAGEFLAG(Head,head,PF_ANY)687 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
688
689 /**
690 * folio_test_large() - Does this folio contain more than one page?
691 * @folio: The folio to test.
692 *
693 * Return: True if the folio is larger than one page.
694 */
695 static inline bool folio_test_large(struct folio *folio)
696 {
697 return folio_test_head(folio);
698 }
699
set_compound_head(struct page * page,struct page * head)700 static __always_inline void set_compound_head(struct page *page, struct page *head)
701 {
702 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
703 }
704
clear_compound_head(struct page * page)705 static __always_inline void clear_compound_head(struct page *page)
706 {
707 WRITE_ONCE(page->compound_head, 0);
708 }
709
710 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)711 static inline void ClearPageCompound(struct page *page)
712 {
713 BUG_ON(!PageHead(page));
714 ClearPageHead(page);
715 }
716 #endif
717
718 #define PG_head_mask ((1UL << PG_head))
719
720 #ifdef CONFIG_HUGETLB_PAGE
721 int PageHuge(struct page *page);
722 int PageHeadHuge(struct page *page);
folio_test_hugetlb(struct folio * folio)723 static inline bool folio_test_hugetlb(struct folio *folio)
724 {
725 return PageHeadHuge(&folio->page);
726 }
727 #else
TESTPAGEFLAG_FALSE(Huge,hugetlb)728 TESTPAGEFLAG_FALSE(Huge, hugetlb)
729 TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
730 #endif
731
732 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
733 /*
734 * PageHuge() only returns true for hugetlbfs pages, but not for
735 * normal or transparent huge pages.
736 *
737 * PageTransHuge() returns true for both transparent huge and
738 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
739 * called only in the core VM paths where hugetlbfs pages can't exist.
740 */
741 static inline int PageTransHuge(struct page *page)
742 {
743 VM_BUG_ON_PAGE(PageTail(page), page);
744 return PageHead(page);
745 }
746
folio_test_transhuge(struct folio * folio)747 static inline bool folio_test_transhuge(struct folio *folio)
748 {
749 return folio_test_head(folio);
750 }
751
752 /*
753 * PageTransCompound returns true for both transparent huge pages
754 * and hugetlbfs pages, so it should only be called when it's known
755 * that hugetlbfs pages aren't involved.
756 */
PageTransCompound(struct page * page)757 static inline int PageTransCompound(struct page *page)
758 {
759 return PageCompound(page);
760 }
761
762 /*
763 * PageTransTail returns true for both transparent huge pages
764 * and hugetlbfs pages, so it should only be called when it's known
765 * that hugetlbfs pages aren't involved.
766 */
PageTransTail(struct page * page)767 static inline int PageTransTail(struct page *page)
768 {
769 return PageTail(page);
770 }
771
772 /*
773 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
774 * as PMDs.
775 *
776 * This is required for optimization of rmap operations for THP: we can postpone
777 * per small page mapcount accounting (and its overhead from atomic operations)
778 * until the first PMD split.
779 *
780 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
781 * by one. This reference will go away with last compound_mapcount.
782 *
783 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
784 */
PAGEFLAG(DoubleMap,double_map,PF_SECOND)785 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
786 TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
787 #else
788 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
789 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
790 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
791 TESTPAGEFLAG_FALSE(TransTail, transtail)
792 PAGEFLAG_FALSE(DoubleMap, double_map)
793 TESTSCFLAG_FALSE(DoubleMap, double_map)
794 #endif
795
796 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
797 /*
798 * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
799 * compound page.
800 *
801 * This flag is set by hwpoison handler. Cleared by THP split or free page.
802 */
803 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
804 TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
805 #else
806 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
807 TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
808 #endif
809
810 /*
811 * Check if a page is currently marked HWPoisoned. Note that this check is
812 * best effort only and inherently racy: there is no way to synchronize with
813 * failing hardware.
814 */
815 static inline bool is_page_hwpoison(struct page *page)
816 {
817 if (PageHWPoison(page))
818 return true;
819 return PageHuge(page) && PageHWPoison(compound_head(page));
820 }
821
822 /*
823 * For pages that are never mapped to userspace (and aren't PageSlab),
824 * page_type may be used. Because it is initialised to -1, we invert the
825 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
826 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
827 * low bits so that an underflow or overflow of page_mapcount() won't be
828 * mistaken for a page type value.
829 */
830
831 #define PAGE_TYPE_BASE 0xf0000000
832 /* Reserve 0x0000007f to catch underflows of page_mapcount */
833 #define PAGE_MAPCOUNT_RESERVE -128
834 #define PG_buddy 0x00000080
835 #define PG_offline 0x00000100
836 #define PG_table 0x00000200
837 #define PG_guard 0x00000400
838
839 #define PageType(page, flag) \
840 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
841
page_has_type(struct page * page)842 static inline int page_has_type(struct page *page)
843 {
844 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
845 }
846
847 #define PAGE_TYPE_OPS(uname, lname) \
848 static __always_inline int Page##uname(struct page *page) \
849 { \
850 return PageType(page, PG_##lname); \
851 } \
852 static __always_inline void __SetPage##uname(struct page *page) \
853 { \
854 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
855 page->page_type &= ~PG_##lname; \
856 } \
857 static __always_inline void __ClearPage##uname(struct page *page) \
858 { \
859 VM_BUG_ON_PAGE(!Page##uname(page), page); \
860 page->page_type |= PG_##lname; \
861 }
862
863 /*
864 * PageBuddy() indicates that the page is free and in the buddy system
865 * (see mm/page_alloc.c).
866 */
867 PAGE_TYPE_OPS(Buddy, buddy)
868
869 /*
870 * PageOffline() indicates that the page is logically offline although the
871 * containing section is online. (e.g. inflated in a balloon driver or
872 * not onlined when onlining the section).
873 * The content of these pages is effectively stale. Such pages should not
874 * be touched (read/write/dump/save) except by their owner.
875 *
876 * If a driver wants to allow to offline unmovable PageOffline() pages without
877 * putting them back to the buddy, it can do so via the memory notifier by
878 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
879 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
880 * pages (now with a reference count of zero) are treated like free pages,
881 * allowing the containing memory block to get offlined. A driver that
882 * relies on this feature is aware that re-onlining the memory block will
883 * require to re-set the pages PageOffline() and not giving them to the
884 * buddy via online_page_callback_t.
885 *
886 * There are drivers that mark a page PageOffline() and expect there won't be
887 * any further access to page content. PFN walkers that read content of random
888 * pages should check PageOffline() and synchronize with such drivers using
889 * page_offline_freeze()/page_offline_thaw().
890 */
891 PAGE_TYPE_OPS(Offline, offline)
892
893 extern void page_offline_freeze(void);
894 extern void page_offline_thaw(void);
895 extern void page_offline_begin(void);
896 extern void page_offline_end(void);
897
898 /*
899 * Marks pages in use as page tables.
900 */
901 PAGE_TYPE_OPS(Table, table)
902
903 /*
904 * Marks guardpages used with debug_pagealloc.
905 */
906 PAGE_TYPE_OPS(Guard, guard)
907
908 extern bool is_free_buddy_page(struct page *page);
909
910 __PAGEFLAG(Isolated, isolated, PF_ANY);
911
912 /*
913 * If network-based swap is enabled, sl*b must keep track of whether pages
914 * were allocated from pfmemalloc reserves.
915 */
PageSlabPfmemalloc(struct page * page)916 static inline int PageSlabPfmemalloc(struct page *page)
917 {
918 VM_BUG_ON_PAGE(!PageSlab(page), page);
919 return PageActive(page);
920 }
921
922 /*
923 * A version of PageSlabPfmemalloc() for opportunistic checks where the page
924 * might have been freed under us and not be a PageSlab anymore.
925 */
__PageSlabPfmemalloc(struct page * page)926 static inline int __PageSlabPfmemalloc(struct page *page)
927 {
928 return PageActive(page);
929 }
930
SetPageSlabPfmemalloc(struct page * page)931 static inline void SetPageSlabPfmemalloc(struct page *page)
932 {
933 VM_BUG_ON_PAGE(!PageSlab(page), page);
934 SetPageActive(page);
935 }
936
__ClearPageSlabPfmemalloc(struct page * page)937 static inline void __ClearPageSlabPfmemalloc(struct page *page)
938 {
939 VM_BUG_ON_PAGE(!PageSlab(page), page);
940 __ClearPageActive(page);
941 }
942
ClearPageSlabPfmemalloc(struct page * page)943 static inline void ClearPageSlabPfmemalloc(struct page *page)
944 {
945 VM_BUG_ON_PAGE(!PageSlab(page), page);
946 ClearPageActive(page);
947 }
948
949 #ifdef CONFIG_MMU
950 #define __PG_MLOCKED (1UL << PG_mlocked)
951 #else
952 #define __PG_MLOCKED 0
953 #endif
954
955 /*
956 * Flags checked when a page is freed. Pages being freed should not have
957 * these flags set. If they are, there is a problem.
958 */
959 #define PAGE_FLAGS_CHECK_AT_FREE \
960 (1UL << PG_lru | 1UL << PG_locked | \
961 1UL << PG_private | 1UL << PG_private_2 | \
962 1UL << PG_writeback | 1UL << PG_reserved | \
963 1UL << PG_slab | 1UL << PG_active | \
964 1UL << PG_unevictable | __PG_MLOCKED)
965
966 /*
967 * Flags checked when a page is prepped for return by the page allocator.
968 * Pages being prepped should not have these flags set. If they are set,
969 * there has been a kernel bug or struct page corruption.
970 *
971 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
972 * alloc-free cycle to prevent from reusing the page.
973 */
974 #define PAGE_FLAGS_CHECK_AT_PREP \
975 (PAGEFLAGS_MASK & ~__PG_HWPOISON)
976
977 #define PAGE_FLAGS_PRIVATE \
978 (1UL << PG_private | 1UL << PG_private_2)
979 /**
980 * page_has_private - Determine if page has private stuff
981 * @page: The page to be checked
982 *
983 * Determine if a page has private stuff, indicating that release routines
984 * should be invoked upon it.
985 */
page_has_private(struct page * page)986 static inline int page_has_private(struct page *page)
987 {
988 return !!(page->flags & PAGE_FLAGS_PRIVATE);
989 }
990
folio_has_private(struct folio * folio)991 static inline bool folio_has_private(struct folio *folio)
992 {
993 return page_has_private(&folio->page);
994 }
995
996 #undef PF_ANY
997 #undef PF_HEAD
998 #undef PF_ONLY_HEAD
999 #undef PF_NO_TAIL
1000 #undef PF_NO_COMPOUND
1001 #undef PF_SECOND
1002 #endif /* !__GENERATING_BOUNDS_H */
1003
1004 #endif /* PAGE_FLAGS_H */
1005