1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
4 
5 #include <linux/huge_mm.h>
6 #include <linux/swap.h>
7 
8 /**
9  * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
10  * @folio: The folio to test.
11  *
12  * We would like to get this info without a page flag, but the state
13  * needs to survive until the folio is last deleted from the LRU, which
14  * could be as far down as __page_cache_release.
15  *
16  * Return: An integer (not a boolean!) used to sort a folio onto the
17  * right LRU list and to account folios correctly.
18  * 1 if @folio is a regular filesystem backed page cache folio
19  * or a lazily freed anonymous folio (e.g. via MADV_FREE).
20  * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
21  * ram or swap backed folio.
22  */
folio_is_file_lru(struct folio * folio)23 static inline int folio_is_file_lru(struct folio *folio)
24 {
25 	return !folio_test_swapbacked(folio);
26 }
27 
page_is_file_lru(struct page * page)28 static inline int page_is_file_lru(struct page *page)
29 {
30 	return folio_is_file_lru(page_folio(page));
31 }
32 
update_lru_size(struct lruvec * lruvec,enum lru_list lru,enum zone_type zid,long nr_pages)33 static __always_inline void update_lru_size(struct lruvec *lruvec,
34 				enum lru_list lru, enum zone_type zid,
35 				long nr_pages)
36 {
37 	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
38 
39 	__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
40 	__mod_zone_page_state(&pgdat->node_zones[zid],
41 				NR_ZONE_LRU_BASE + lru, nr_pages);
42 #ifdef CONFIG_MEMCG
43 	mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
44 #endif
45 }
46 
47 /**
48  * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
49  * @folio: The folio that was on lru and now has a zero reference.
50  */
__folio_clear_lru_flags(struct folio * folio)51 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
52 {
53 	VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
54 
55 	__folio_clear_lru(folio);
56 
57 	/* this shouldn't happen, so leave the flags to bad_page() */
58 	if (folio_test_active(folio) && folio_test_unevictable(folio))
59 		return;
60 
61 	__folio_clear_active(folio);
62 	__folio_clear_unevictable(folio);
63 }
64 
__clear_page_lru_flags(struct page * page)65 static __always_inline void __clear_page_lru_flags(struct page *page)
66 {
67 	__folio_clear_lru_flags(page_folio(page));
68 }
69 
70 /**
71  * folio_lru_list - Which LRU list should a folio be on?
72  * @folio: The folio to test.
73  *
74  * Return: The LRU list a folio should be on, as an index
75  * into the array of LRU lists.
76  */
folio_lru_list(struct folio * folio)77 static __always_inline enum lru_list folio_lru_list(struct folio *folio)
78 {
79 	enum lru_list lru;
80 
81 	VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
82 
83 	if (folio_test_unevictable(folio))
84 		return LRU_UNEVICTABLE;
85 
86 	lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
87 	if (folio_test_active(folio))
88 		lru += LRU_ACTIVE;
89 
90 	return lru;
91 }
92 
93 static __always_inline
lruvec_add_folio(struct lruvec * lruvec,struct folio * folio)94 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
95 {
96 	enum lru_list lru = folio_lru_list(folio);
97 
98 	update_lru_size(lruvec, lru, folio_zonenum(folio),
99 			folio_nr_pages(folio));
100 	list_add(&folio->lru, &lruvec->lists[lru]);
101 }
102 
add_page_to_lru_list(struct page * page,struct lruvec * lruvec)103 static __always_inline void add_page_to_lru_list(struct page *page,
104 				struct lruvec *lruvec)
105 {
106 	lruvec_add_folio(lruvec, page_folio(page));
107 }
108 
109 static __always_inline
lruvec_add_folio_tail(struct lruvec * lruvec,struct folio * folio)110 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
111 {
112 	enum lru_list lru = folio_lru_list(folio);
113 
114 	update_lru_size(lruvec, lru, folio_zonenum(folio),
115 			folio_nr_pages(folio));
116 	list_add_tail(&folio->lru, &lruvec->lists[lru]);
117 }
118 
add_page_to_lru_list_tail(struct page * page,struct lruvec * lruvec)119 static __always_inline void add_page_to_lru_list_tail(struct page *page,
120 				struct lruvec *lruvec)
121 {
122 	lruvec_add_folio_tail(lruvec, page_folio(page));
123 }
124 
125 static __always_inline
lruvec_del_folio(struct lruvec * lruvec,struct folio * folio)126 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
127 {
128 	list_del(&folio->lru);
129 	update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio),
130 			-folio_nr_pages(folio));
131 }
132 
del_page_from_lru_list(struct page * page,struct lruvec * lruvec)133 static __always_inline void del_page_from_lru_list(struct page *page,
134 				struct lruvec *lruvec)
135 {
136 	lruvec_del_folio(lruvec, page_folio(page));
137 }
138 #endif
139