1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/page_isolation.c
4 */
5
6 #include <linux/mm.h>
7 #include <linux/page-isolation.h>
8 #include <linux/pageblock-flags.h>
9 #include <linux/memory.h>
10 #include <linux/hugetlb.h>
11 #include <linux/page_owner.h>
12 #include <linux/migrate.h>
13 #include "internal.h"
14
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/page_isolation.h>
17
set_migratetype_isolate(struct page * page,int migratetype,int isol_flags)18 static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
19 {
20 struct zone *zone = page_zone(page);
21 struct page *unmovable;
22 unsigned long flags;
23
24 spin_lock_irqsave(&zone->lock, flags);
25
26 /*
27 * We assume the caller intended to SET migrate type to isolate.
28 * If it is already set, then someone else must have raced and
29 * set it before us.
30 */
31 if (is_migrate_isolate_page(page)) {
32 spin_unlock_irqrestore(&zone->lock, flags);
33 return -EBUSY;
34 }
35
36 /*
37 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
38 * We just check MOVABLE pages.
39 */
40 unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
41 if (!unmovable) {
42 unsigned long nr_pages;
43 int mt = get_pageblock_migratetype(page);
44
45 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
46 zone->nr_isolate_pageblock++;
47 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
48 NULL);
49
50 __mod_zone_freepage_state(zone, -nr_pages, mt);
51 spin_unlock_irqrestore(&zone->lock, flags);
52 return 0;
53 }
54
55 spin_unlock_irqrestore(&zone->lock, flags);
56 if (isol_flags & REPORT_FAILURE) {
57 /*
58 * printk() with zone->lock held will likely trigger a
59 * lockdep splat, so defer it here.
60 */
61 dump_page(unmovable, "unmovable page");
62 }
63
64 return -EBUSY;
65 }
66
unset_migratetype_isolate(struct page * page,unsigned migratetype)67 static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
68 {
69 struct zone *zone;
70 unsigned long flags, nr_pages;
71 bool isolated_page = false;
72 unsigned int order;
73 unsigned long pfn, buddy_pfn;
74 struct page *buddy;
75
76 zone = page_zone(page);
77 spin_lock_irqsave(&zone->lock, flags);
78 if (!is_migrate_isolate_page(page))
79 goto out;
80
81 /*
82 * Because freepage with more than pageblock_order on isolated
83 * pageblock is restricted to merge due to freepage counting problem,
84 * it is possible that there is free buddy page.
85 * move_freepages_block() doesn't care of merge so we need other
86 * approach in order to merge them. Isolation and free will make
87 * these pages to be merged.
88 */
89 if (PageBuddy(page)) {
90 order = buddy_order(page);
91 if (order >= pageblock_order && order < MAX_ORDER - 1) {
92 pfn = page_to_pfn(page);
93 buddy_pfn = __find_buddy_pfn(pfn, order);
94 buddy = page + (buddy_pfn - pfn);
95
96 if (!is_migrate_isolate_page(buddy)) {
97 isolated_page = !!__isolate_free_page(page, order);
98 /*
99 * Isolating a free page in an isolated pageblock
100 * is expected to always work as watermarks don't
101 * apply here.
102 */
103 VM_WARN_ON(!isolated_page);
104 }
105 }
106 }
107
108 /*
109 * If we isolate freepage with more than pageblock_order, there
110 * should be no freepage in the range, so we could avoid costly
111 * pageblock scanning for freepage moving.
112 *
113 * We didn't actually touch any of the isolated pages, so place them
114 * to the tail of the freelist. This is an optimization for memory
115 * onlining - just onlined memory won't immediately be considered for
116 * allocation.
117 */
118 if (!isolated_page) {
119 nr_pages = move_freepages_block(zone, page, migratetype, NULL);
120 __mod_zone_freepage_state(zone, nr_pages, migratetype);
121 }
122 set_pageblock_migratetype(page, migratetype);
123 if (isolated_page)
124 __putback_isolated_page(page, order, migratetype);
125 zone->nr_isolate_pageblock--;
126 out:
127 spin_unlock_irqrestore(&zone->lock, flags);
128 }
129
130 static inline struct page *
__first_valid_page(unsigned long pfn,unsigned long nr_pages)131 __first_valid_page(unsigned long pfn, unsigned long nr_pages)
132 {
133 int i;
134
135 for (i = 0; i < nr_pages; i++) {
136 struct page *page;
137
138 page = pfn_to_online_page(pfn + i);
139 if (!page)
140 continue;
141 return page;
142 }
143 return NULL;
144 }
145
146 /**
147 * start_isolate_page_range() - make page-allocation-type of range of pages to
148 * be MIGRATE_ISOLATE.
149 * @start_pfn: The lower PFN of the range to be isolated.
150 * @end_pfn: The upper PFN of the range to be isolated.
151 * start_pfn/end_pfn must be aligned to pageblock_order.
152 * @migratetype: Migrate type to set in error recovery.
153 * @flags: The following flags are allowed (they can be combined in
154 * a bit mask)
155 * MEMORY_OFFLINE - isolate to offline (!allocate) memory
156 * e.g., skip over PageHWPoison() pages
157 * and PageOffline() pages.
158 * REPORT_FAILURE - report details about the failure to
159 * isolate the range
160 *
161 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
162 * the range will never be allocated. Any free pages and pages freed in the
163 * future will not be allocated again. If specified range includes migrate types
164 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
165 * pages in the range finally, the caller have to free all pages in the range.
166 * test_page_isolated() can be used for test it.
167 *
168 * There is no high level synchronization mechanism that prevents two threads
169 * from trying to isolate overlapping ranges. If this happens, one thread
170 * will notice pageblocks in the overlapping range already set to isolate.
171 * This happens in set_migratetype_isolate, and set_migratetype_isolate
172 * returns an error. We then clean up by restoring the migration type on
173 * pageblocks we may have modified and return -EBUSY to caller. This
174 * prevents two threads from simultaneously working on overlapping ranges.
175 *
176 * Please note that there is no strong synchronization with the page allocator
177 * either. Pages might be freed while their page blocks are marked ISOLATED.
178 * A call to drain_all_pages() after isolation can flush most of them. However
179 * in some cases pages might still end up on pcp lists and that would allow
180 * for their allocation even when they are in fact isolated already. Depending
181 * on how strong of a guarantee the caller needs, zone_pcp_disable/enable()
182 * might be used to flush and disable pcplist before isolation and enable after
183 * unisolation.
184 *
185 * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
186 */
start_isolate_page_range(unsigned long start_pfn,unsigned long end_pfn,unsigned migratetype,int flags)187 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 unsigned migratetype, int flags)
189 {
190 unsigned long pfn;
191 struct page *page;
192
193 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
194 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
195
196 for (pfn = start_pfn;
197 pfn < end_pfn;
198 pfn += pageblock_nr_pages) {
199 page = __first_valid_page(pfn, pageblock_nr_pages);
200 if (page && set_migratetype_isolate(page, migratetype, flags)) {
201 undo_isolate_page_range(start_pfn, pfn, migratetype);
202 return -EBUSY;
203 }
204 }
205 return 0;
206 }
207
208 /*
209 * Make isolated pages available again.
210 */
undo_isolate_page_range(unsigned long start_pfn,unsigned long end_pfn,unsigned migratetype)211 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
212 unsigned migratetype)
213 {
214 unsigned long pfn;
215 struct page *page;
216
217 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
218 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
219
220 for (pfn = start_pfn;
221 pfn < end_pfn;
222 pfn += pageblock_nr_pages) {
223 page = __first_valid_page(pfn, pageblock_nr_pages);
224 if (!page || !is_migrate_isolate_page(page))
225 continue;
226 unset_migratetype_isolate(page, migratetype);
227 }
228 }
229 /*
230 * Test all pages in the range is free(means isolated) or not.
231 * all pages in [start_pfn...end_pfn) must be in the same zone.
232 * zone->lock must be held before call this.
233 *
234 * Returns the last tested pfn.
235 */
236 static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn,unsigned long end_pfn,int flags)237 __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
238 int flags)
239 {
240 struct page *page;
241
242 while (pfn < end_pfn) {
243 page = pfn_to_page(pfn);
244 if (PageBuddy(page))
245 /*
246 * If the page is on a free list, it has to be on
247 * the correct MIGRATE_ISOLATE freelist. There is no
248 * simple way to verify that as VM_BUG_ON(), though.
249 */
250 pfn += 1 << buddy_order(page);
251 else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
252 /* A HWPoisoned page cannot be also PageBuddy */
253 pfn++;
254 else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
255 !page_count(page))
256 /*
257 * The responsible driver agreed to skip PageOffline()
258 * pages when offlining memory by dropping its
259 * reference in MEM_GOING_OFFLINE.
260 */
261 pfn++;
262 else
263 break;
264 }
265
266 return pfn;
267 }
268
269 /* Caller should ensure that requested range is in a single zone */
test_pages_isolated(unsigned long start_pfn,unsigned long end_pfn,int isol_flags)270 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
271 int isol_flags)
272 {
273 unsigned long pfn, flags;
274 struct page *page;
275 struct zone *zone;
276 int ret;
277
278 /*
279 * Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages
280 * are not aligned to pageblock_nr_pages.
281 * Then we just check migratetype first.
282 */
283 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
284 page = __first_valid_page(pfn, pageblock_nr_pages);
285 if (page && !is_migrate_isolate_page(page))
286 break;
287 }
288 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
289 if ((pfn < end_pfn) || !page) {
290 ret = -EBUSY;
291 goto out;
292 }
293
294 /* Check all pages are free or marked as ISOLATED */
295 zone = page_zone(page);
296 spin_lock_irqsave(&zone->lock, flags);
297 pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, isol_flags);
298 spin_unlock_irqrestore(&zone->lock, flags);
299
300 ret = pfn < end_pfn ? -EBUSY : 0;
301
302 out:
303 trace_test_pages_isolated(start_pfn, end_pfn, pfn);
304
305 return ret;
306 }
307