1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
15
16 struct ctl_table;
17 struct user_struct;
18 struct mmu_gather;
19
20 #ifndef is_hugepd
21 typedef struct { unsigned long pd; } hugepd_t;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
24 #endif
25
26 #ifdef CONFIG_HUGETLB_PAGE
27
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
31
32 /*
33 * For HugeTLB page, there are more metadata to save in the struct page. But
34 * the head struct page cannot meet our needs, so we have to abuse other tail
35 * struct page to store the metadata. In order to avoid conflicts caused by
36 * subsequent use of more tail struct pages, we gather these discrete indexes
37 * of tail struct page here.
38 */
39 enum {
40 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
41 #ifdef CONFIG_CGROUP_HUGETLB
42 SUBPAGE_INDEX_CGROUP, /* reuse page->private */
43 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
44 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
45 #endif
46 __NR_USED_SUBPAGE,
47 };
48
49 struct hugepage_subpool {
50 spinlock_t lock;
51 long count;
52 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
53 long used_hpages; /* Used count against maximum, includes */
54 /* both allocated and reserved pages. */
55 struct hstate *hstate;
56 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
57 long rsv_hpages; /* Pages reserved against global pool to */
58 /* satisfy minimum size. */
59 };
60
61 struct resv_map {
62 struct kref refs;
63 spinlock_t lock;
64 struct list_head regions;
65 long adds_in_progress;
66 struct list_head region_cache;
67 long region_cache_count;
68 #ifdef CONFIG_CGROUP_HUGETLB
69 /*
70 * On private mappings, the counter to uncharge reservations is stored
71 * here. If these fields are 0, then either the mapping is shared, or
72 * cgroup accounting is disabled for this resv_map.
73 */
74 struct page_counter *reservation_counter;
75 unsigned long pages_per_hpage;
76 struct cgroup_subsys_state *css;
77 #endif
78 };
79
80 /*
81 * Region tracking -- allows tracking of reservations and instantiated pages
82 * across the pages in a mapping.
83 *
84 * The region data structures are embedded into a resv_map and protected
85 * by a resv_map's lock. The set of regions within the resv_map represent
86 * reservations for huge pages, or huge pages that have already been
87 * instantiated within the map. The from and to elements are huge page
88 * indices into the associated mapping. from indicates the starting index
89 * of the region. to represents the first index past the end of the region.
90 *
91 * For example, a file region structure with from == 0 and to == 4 represents
92 * four huge pages in a mapping. It is important to note that the to element
93 * represents the first element past the end of the region. This is used in
94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
95 *
96 * Interval notation of the form [from, to) will be used to indicate that
97 * the endpoint from is inclusive and to is exclusive.
98 */
99 struct file_region {
100 struct list_head link;
101 long from;
102 long to;
103 #ifdef CONFIG_CGROUP_HUGETLB
104 /*
105 * On shared mappings, each reserved region appears as a struct
106 * file_region in resv_map. These fields hold the info needed to
107 * uncharge each reservation.
108 */
109 struct page_counter *reservation_counter;
110 struct cgroup_subsys_state *css;
111 #endif
112 };
113
114 extern struct resv_map *resv_map_alloc(void);
115 void resv_map_release(struct kref *ref);
116
117 extern spinlock_t hugetlb_lock;
118 extern int hugetlb_max_hstate __read_mostly;
119 #define for_each_hstate(h) \
120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
121
122 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
123 long min_hpages);
124 void hugepage_put_subpool(struct hugepage_subpool *spool);
125
126 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
127 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
128 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
129 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
130 loff_t *);
131 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
132 loff_t *);
133 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
134 loff_t *);
135
136 int move_hugetlb_page_tables(struct vm_area_struct *vma,
137 struct vm_area_struct *new_vma,
138 unsigned long old_addr, unsigned long new_addr,
139 unsigned long len);
140 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
141 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
142 struct page **, struct vm_area_struct **,
143 unsigned long *, unsigned long *, long, unsigned int,
144 int *);
145 void unmap_hugepage_range(struct vm_area_struct *,
146 unsigned long, unsigned long, struct page *);
147 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
148 struct vm_area_struct *vma,
149 unsigned long start, unsigned long end,
150 struct page *ref_page);
151 void hugetlb_report_meminfo(struct seq_file *);
152 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
153 void hugetlb_show_meminfo(void);
154 unsigned long hugetlb_total_pages(void);
155 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
156 unsigned long address, unsigned int flags);
157 #ifdef CONFIG_USERFAULTFD
158 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
159 struct vm_area_struct *dst_vma,
160 unsigned long dst_addr,
161 unsigned long src_addr,
162 enum mcopy_atomic_mode mode,
163 struct page **pagep);
164 #endif /* CONFIG_USERFAULTFD */
165 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
166 struct vm_area_struct *vma,
167 vm_flags_t vm_flags);
168 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
169 long freed);
170 bool isolate_huge_page(struct page *page, struct list_head *list);
171 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
172 void putback_active_hugepage(struct page *page);
173 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
174 void free_huge_page(struct page *page);
175 void hugetlb_fix_reserve_counts(struct inode *inode);
176 extern struct mutex *hugetlb_fault_mutex_table;
177 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
178
179 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
180 unsigned long addr, pud_t *pud);
181
182 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
183
184 extern int sysctl_hugetlb_shm_group;
185 extern struct list_head huge_boot_pages;
186
187 /* arch callbacks */
188
189 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
190 unsigned long addr, unsigned long sz);
191 pte_t *huge_pte_offset(struct mm_struct *mm,
192 unsigned long addr, unsigned long sz);
193 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
194 unsigned long *addr, pte_t *ptep);
195 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
196 unsigned long *start, unsigned long *end);
197 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
198 int write);
199 struct page *follow_huge_pd(struct vm_area_struct *vma,
200 unsigned long address, hugepd_t hpd,
201 int flags, int pdshift);
202 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
203 pmd_t *pmd, int flags);
204 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
205 pud_t *pud, int flags);
206 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
207 pgd_t *pgd, int flags);
208
209 int pmd_huge(pmd_t pmd);
210 int pud_huge(pud_t pud);
211 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
212 unsigned long address, unsigned long end, pgprot_t newprot);
213
214 bool is_hugetlb_entry_migration(pte_t pte);
215 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
216
217 #else /* !CONFIG_HUGETLB_PAGE */
218
reset_vma_resv_huge_pages(struct vm_area_struct * vma)219 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
220 {
221 }
222
clear_vma_resv_huge_pages(struct vm_area_struct * vma)223 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
224 {
225 }
226
hugetlb_total_pages(void)227 static inline unsigned long hugetlb_total_pages(void)
228 {
229 return 0;
230 }
231
hugetlb_page_mapping_lock_write(struct page * hpage)232 static inline struct address_space *hugetlb_page_mapping_lock_write(
233 struct page *hpage)
234 {
235 return NULL;
236 }
237
huge_pmd_unshare(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long * addr,pte_t * ptep)238 static inline int huge_pmd_unshare(struct mm_struct *mm,
239 struct vm_area_struct *vma,
240 unsigned long *addr, pte_t *ptep)
241 {
242 return 0;
243 }
244
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)245 static inline void adjust_range_if_pmd_sharing_possible(
246 struct vm_area_struct *vma,
247 unsigned long *start, unsigned long *end)
248 {
249 }
250
follow_hugetlb_page(struct mm_struct * mm,struct vm_area_struct * vma,struct page ** pages,struct vm_area_struct ** vmas,unsigned long * position,unsigned long * nr_pages,long i,unsigned int flags,int * nonblocking)251 static inline long follow_hugetlb_page(struct mm_struct *mm,
252 struct vm_area_struct *vma, struct page **pages,
253 struct vm_area_struct **vmas, unsigned long *position,
254 unsigned long *nr_pages, long i, unsigned int flags,
255 int *nonblocking)
256 {
257 BUG();
258 return 0;
259 }
260
follow_huge_addr(struct mm_struct * mm,unsigned long address,int write)261 static inline struct page *follow_huge_addr(struct mm_struct *mm,
262 unsigned long address, int write)
263 {
264 return ERR_PTR(-EINVAL);
265 }
266
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * vma)267 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
268 struct mm_struct *src, struct vm_area_struct *vma)
269 {
270 BUG();
271 return 0;
272 }
273
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)274 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
275 struct vm_area_struct *new_vma,
276 unsigned long old_addr,
277 unsigned long new_addr,
278 unsigned long len)
279 {
280 BUG();
281 return 0;
282 }
283
hugetlb_report_meminfo(struct seq_file * m)284 static inline void hugetlb_report_meminfo(struct seq_file *m)
285 {
286 }
287
hugetlb_report_node_meminfo(char * buf,int len,int nid)288 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
289 {
290 return 0;
291 }
292
hugetlb_show_meminfo(void)293 static inline void hugetlb_show_meminfo(void)
294 {
295 }
296
follow_huge_pd(struct vm_area_struct * vma,unsigned long address,hugepd_t hpd,int flags,int pdshift)297 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
298 unsigned long address, hugepd_t hpd, int flags,
299 int pdshift)
300 {
301 return NULL;
302 }
303
follow_huge_pmd(struct mm_struct * mm,unsigned long address,pmd_t * pmd,int flags)304 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
305 unsigned long address, pmd_t *pmd, int flags)
306 {
307 return NULL;
308 }
309
follow_huge_pud(struct mm_struct * mm,unsigned long address,pud_t * pud,int flags)310 static inline struct page *follow_huge_pud(struct mm_struct *mm,
311 unsigned long address, pud_t *pud, int flags)
312 {
313 return NULL;
314 }
315
follow_huge_pgd(struct mm_struct * mm,unsigned long address,pgd_t * pgd,int flags)316 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
317 unsigned long address, pgd_t *pgd, int flags)
318 {
319 return NULL;
320 }
321
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)322 static inline int prepare_hugepage_range(struct file *file,
323 unsigned long addr, unsigned long len)
324 {
325 return -EINVAL;
326 }
327
pmd_huge(pmd_t pmd)328 static inline int pmd_huge(pmd_t pmd)
329 {
330 return 0;
331 }
332
pud_huge(pud_t pud)333 static inline int pud_huge(pud_t pud)
334 {
335 return 0;
336 }
337
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)338 static inline int is_hugepage_only_range(struct mm_struct *mm,
339 unsigned long addr, unsigned long len)
340 {
341 return 0;
342 }
343
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)344 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
345 unsigned long addr, unsigned long end,
346 unsigned long floor, unsigned long ceiling)
347 {
348 BUG();
349 }
350
351 #ifdef CONFIG_USERFAULTFD
hugetlb_mcopy_atomic_pte(struct mm_struct * dst_mm,pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,enum mcopy_atomic_mode mode,struct page ** pagep)352 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
353 pte_t *dst_pte,
354 struct vm_area_struct *dst_vma,
355 unsigned long dst_addr,
356 unsigned long src_addr,
357 enum mcopy_atomic_mode mode,
358 struct page **pagep)
359 {
360 BUG();
361 return 0;
362 }
363 #endif /* CONFIG_USERFAULTFD */
364
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)365 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
366 unsigned long sz)
367 {
368 return NULL;
369 }
370
isolate_huge_page(struct page * page,struct list_head * list)371 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
372 {
373 return false;
374 }
375
get_hwpoison_huge_page(struct page * page,bool * hugetlb)376 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
377 {
378 return 0;
379 }
380
putback_active_hugepage(struct page * page)381 static inline void putback_active_hugepage(struct page *page)
382 {
383 }
384
move_hugetlb_state(struct page * oldpage,struct page * newpage,int reason)385 static inline void move_hugetlb_state(struct page *oldpage,
386 struct page *newpage, int reason)
387 {
388 }
389
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot)390 static inline unsigned long hugetlb_change_protection(
391 struct vm_area_struct *vma, unsigned long address,
392 unsigned long end, pgprot_t newprot)
393 {
394 return 0;
395 }
396
__unmap_hugepage_range_final(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct page * ref_page)397 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
398 struct vm_area_struct *vma, unsigned long start,
399 unsigned long end, struct page *ref_page)
400 {
401 BUG();
402 }
403
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)404 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
405 struct vm_area_struct *vma, unsigned long address,
406 unsigned int flags)
407 {
408 BUG();
409 return 0;
410 }
411
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)412 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
413
414 #endif /* !CONFIG_HUGETLB_PAGE */
415 /*
416 * hugepages at page global directory. If arch support
417 * hugepages at pgd level, they need to define this.
418 */
419 #ifndef pgd_huge
420 #define pgd_huge(x) 0
421 #endif
422 #ifndef p4d_huge
423 #define p4d_huge(x) 0
424 #endif
425
426 #ifndef pgd_write
pgd_write(pgd_t pgd)427 static inline int pgd_write(pgd_t pgd)
428 {
429 BUG();
430 return 0;
431 }
432 #endif
433
434 #define HUGETLB_ANON_FILE "anon_hugepage"
435
436 enum {
437 /*
438 * The file will be used as an shm file so shmfs accounting rules
439 * apply
440 */
441 HUGETLB_SHMFS_INODE = 1,
442 /*
443 * The file is being created on the internal vfs mount and shmfs
444 * accounting rules do not apply
445 */
446 HUGETLB_ANONHUGE_INODE = 2,
447 };
448
449 #ifdef CONFIG_HUGETLBFS
450 struct hugetlbfs_sb_info {
451 long max_inodes; /* inodes allowed */
452 long free_inodes; /* inodes free */
453 spinlock_t stat_lock;
454 struct hstate *hstate;
455 struct hugepage_subpool *spool;
456 kuid_t uid;
457 kgid_t gid;
458 umode_t mode;
459 };
460
HUGETLBFS_SB(struct super_block * sb)461 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
462 {
463 return sb->s_fs_info;
464 }
465
466 struct hugetlbfs_inode_info {
467 struct shared_policy policy;
468 struct inode vfs_inode;
469 unsigned int seals;
470 };
471
HUGETLBFS_I(struct inode * inode)472 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
473 {
474 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
475 }
476
477 extern const struct file_operations hugetlbfs_file_operations;
478 extern const struct vm_operations_struct hugetlb_vm_ops;
479 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
480 int creat_flags, int page_size_log);
481
is_file_hugepages(struct file * file)482 static inline bool is_file_hugepages(struct file *file)
483 {
484 if (file->f_op == &hugetlbfs_file_operations)
485 return true;
486
487 return is_file_shm_hugepages(file);
488 }
489
hstate_inode(struct inode * i)490 static inline struct hstate *hstate_inode(struct inode *i)
491 {
492 return HUGETLBFS_SB(i->i_sb)->hstate;
493 }
494 #else /* !CONFIG_HUGETLBFS */
495
496 #define is_file_hugepages(file) false
497 static inline struct file *
hugetlb_file_setup(const char * name,size_t size,vm_flags_t acctflag,int creat_flags,int page_size_log)498 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
499 int creat_flags, int page_size_log)
500 {
501 return ERR_PTR(-ENOSYS);
502 }
503
hstate_inode(struct inode * i)504 static inline struct hstate *hstate_inode(struct inode *i)
505 {
506 return NULL;
507 }
508 #endif /* !CONFIG_HUGETLBFS */
509
510 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
511 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
512 unsigned long len, unsigned long pgoff,
513 unsigned long flags);
514 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
515
516 /*
517 * huegtlb page specific state flags. These flags are located in page.private
518 * of the hugetlb head page. Functions created via the below macros should be
519 * used to manipulate these flags.
520 *
521 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
522 * allocation time. Cleared when page is fully instantiated. Free
523 * routine checks flag to restore a reservation on error paths.
524 * Synchronization: Examined or modified by code that knows it has
525 * the only reference to page. i.e. After allocation but before use
526 * or when the page is being freed.
527 * HPG_migratable - Set after a newly allocated page is added to the page
528 * cache and/or page tables. Indicates the page is a candidate for
529 * migration.
530 * Synchronization: Initially set after new page allocation with no
531 * locking. When examined and modified during migration processing
532 * (isolate, migrate, putback) the hugetlb_lock is held.
533 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
534 * allocator. Typically used for migration target pages when no pages
535 * are available in the pool. The hugetlb free page path will
536 * immediately free pages with this flag set to the buddy allocator.
537 * Synchronization: Can be set after huge page allocation from buddy when
538 * code knows it has only reference. All other examinations and
539 * modifications require hugetlb_lock.
540 * HPG_freed - Set when page is on the free lists.
541 * Synchronization: hugetlb_lock held for examination and modification.
542 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
543 */
544 enum hugetlb_page_flags {
545 HPG_restore_reserve = 0,
546 HPG_migratable,
547 HPG_temporary,
548 HPG_freed,
549 HPG_vmemmap_optimized,
550 __NR_HPAGEFLAGS,
551 };
552
553 /*
554 * Macros to create test, set and clear function definitions for
555 * hugetlb specific page flags.
556 */
557 #ifdef CONFIG_HUGETLB_PAGE
558 #define TESTHPAGEFLAG(uname, flname) \
559 static inline int HPage##uname(struct page *page) \
560 { return test_bit(HPG_##flname, &(page->private)); }
561
562 #define SETHPAGEFLAG(uname, flname) \
563 static inline void SetHPage##uname(struct page *page) \
564 { set_bit(HPG_##flname, &(page->private)); }
565
566 #define CLEARHPAGEFLAG(uname, flname) \
567 static inline void ClearHPage##uname(struct page *page) \
568 { clear_bit(HPG_##flname, &(page->private)); }
569 #else
570 #define TESTHPAGEFLAG(uname, flname) \
571 static inline int HPage##uname(struct page *page) \
572 { return 0; }
573
574 #define SETHPAGEFLAG(uname, flname) \
575 static inline void SetHPage##uname(struct page *page) \
576 { }
577
578 #define CLEARHPAGEFLAG(uname, flname) \
579 static inline void ClearHPage##uname(struct page *page) \
580 { }
581 #endif
582
583 #define HPAGEFLAG(uname, flname) \
584 TESTHPAGEFLAG(uname, flname) \
585 SETHPAGEFLAG(uname, flname) \
586 CLEARHPAGEFLAG(uname, flname) \
587
588 /*
589 * Create functions associated with hugetlb page flags
590 */
591 HPAGEFLAG(RestoreReserve, restore_reserve)
592 HPAGEFLAG(Migratable, migratable)
593 HPAGEFLAG(Temporary, temporary)
594 HPAGEFLAG(Freed, freed)
595 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
596
597 #ifdef CONFIG_HUGETLB_PAGE
598
599 #define HSTATE_NAME_LEN 32
600 /* Defines one hugetlb page size */
601 struct hstate {
602 struct mutex resize_lock;
603 int next_nid_to_alloc;
604 int next_nid_to_free;
605 unsigned int order;
606 unsigned int demote_order;
607 unsigned long mask;
608 unsigned long max_huge_pages;
609 unsigned long nr_huge_pages;
610 unsigned long free_huge_pages;
611 unsigned long resv_huge_pages;
612 unsigned long surplus_huge_pages;
613 unsigned long nr_overcommit_huge_pages;
614 struct list_head hugepage_activelist;
615 struct list_head hugepage_freelists[MAX_NUMNODES];
616 unsigned int max_huge_pages_node[MAX_NUMNODES];
617 unsigned int nr_huge_pages_node[MAX_NUMNODES];
618 unsigned int free_huge_pages_node[MAX_NUMNODES];
619 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
620 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
621 unsigned int nr_free_vmemmap_pages;
622 #endif
623 #ifdef CONFIG_CGROUP_HUGETLB
624 /* cgroup control files */
625 struct cftype cgroup_files_dfl[7];
626 struct cftype cgroup_files_legacy[9];
627 #endif
628 char name[HSTATE_NAME_LEN];
629 };
630
631 struct huge_bootmem_page {
632 struct list_head list;
633 struct hstate *hstate;
634 };
635
636 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
637 struct page *alloc_huge_page(struct vm_area_struct *vma,
638 unsigned long addr, int avoid_reserve);
639 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
640 nodemask_t *nmask, gfp_t gfp_mask);
641 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
642 unsigned long address);
643 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
644 pgoff_t idx);
645 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
646 unsigned long address, struct page *page);
647
648 /* arch callback */
649 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
650 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
651 bool __init hugetlb_node_alloc_supported(void);
652
653 void __init hugetlb_add_hstate(unsigned order);
654 bool __init arch_hugetlb_valid_size(unsigned long size);
655 struct hstate *size_to_hstate(unsigned long size);
656
657 #ifndef HUGE_MAX_HSTATE
658 #define HUGE_MAX_HSTATE 1
659 #endif
660
661 extern struct hstate hstates[HUGE_MAX_HSTATE];
662 extern unsigned int default_hstate_idx;
663
664 #define default_hstate (hstates[default_hstate_idx])
665
666 /*
667 * hugetlb page subpool pointer located in hpage[1].private
668 */
hugetlb_page_subpool(struct page * hpage)669 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
670 {
671 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
672 }
673
hugetlb_set_page_subpool(struct page * hpage,struct hugepage_subpool * subpool)674 static inline void hugetlb_set_page_subpool(struct page *hpage,
675 struct hugepage_subpool *subpool)
676 {
677 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
678 }
679
hstate_file(struct file * f)680 static inline struct hstate *hstate_file(struct file *f)
681 {
682 return hstate_inode(file_inode(f));
683 }
684
hstate_sizelog(int page_size_log)685 static inline struct hstate *hstate_sizelog(int page_size_log)
686 {
687 if (!page_size_log)
688 return &default_hstate;
689
690 return size_to_hstate(1UL << page_size_log);
691 }
692
hstate_vma(struct vm_area_struct * vma)693 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
694 {
695 return hstate_file(vma->vm_file);
696 }
697
huge_page_size(struct hstate * h)698 static inline unsigned long huge_page_size(struct hstate *h)
699 {
700 return (unsigned long)PAGE_SIZE << h->order;
701 }
702
703 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
704
705 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
706
huge_page_mask(struct hstate * h)707 static inline unsigned long huge_page_mask(struct hstate *h)
708 {
709 return h->mask;
710 }
711
huge_page_order(struct hstate * h)712 static inline unsigned int huge_page_order(struct hstate *h)
713 {
714 return h->order;
715 }
716
huge_page_shift(struct hstate * h)717 static inline unsigned huge_page_shift(struct hstate *h)
718 {
719 return h->order + PAGE_SHIFT;
720 }
721
hstate_is_gigantic(struct hstate * h)722 static inline bool hstate_is_gigantic(struct hstate *h)
723 {
724 return huge_page_order(h) >= MAX_ORDER;
725 }
726
pages_per_huge_page(struct hstate * h)727 static inline unsigned int pages_per_huge_page(struct hstate *h)
728 {
729 return 1 << h->order;
730 }
731
blocks_per_huge_page(struct hstate * h)732 static inline unsigned int blocks_per_huge_page(struct hstate *h)
733 {
734 return huge_page_size(h) / 512;
735 }
736
737 #include <asm/hugetlb.h>
738
739 #ifndef is_hugepage_only_range
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)740 static inline int is_hugepage_only_range(struct mm_struct *mm,
741 unsigned long addr, unsigned long len)
742 {
743 return 0;
744 }
745 #define is_hugepage_only_range is_hugepage_only_range
746 #endif
747
748 #ifndef arch_clear_hugepage_flags
arch_clear_hugepage_flags(struct page * page)749 static inline void arch_clear_hugepage_flags(struct page *page) { }
750 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
751 #endif
752
753 #ifndef arch_make_huge_pte
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)754 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
755 vm_flags_t flags)
756 {
757 return entry;
758 }
759 #endif
760
page_hstate(struct page * page)761 static inline struct hstate *page_hstate(struct page *page)
762 {
763 VM_BUG_ON_PAGE(!PageHuge(page), page);
764 return size_to_hstate(page_size(page));
765 }
766
hstate_index_to_shift(unsigned index)767 static inline unsigned hstate_index_to_shift(unsigned index)
768 {
769 return hstates[index].order + PAGE_SHIFT;
770 }
771
hstate_index(struct hstate * h)772 static inline int hstate_index(struct hstate *h)
773 {
774 return h - hstates;
775 }
776
777 extern int dissolve_free_huge_page(struct page *page);
778 extern int dissolve_free_huge_pages(unsigned long start_pfn,
779 unsigned long end_pfn);
780
781 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
782 #ifndef arch_hugetlb_migration_supported
arch_hugetlb_migration_supported(struct hstate * h)783 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
784 {
785 if ((huge_page_shift(h) == PMD_SHIFT) ||
786 (huge_page_shift(h) == PUD_SHIFT) ||
787 (huge_page_shift(h) == PGDIR_SHIFT))
788 return true;
789 else
790 return false;
791 }
792 #endif
793 #else
arch_hugetlb_migration_supported(struct hstate * h)794 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
795 {
796 return false;
797 }
798 #endif
799
hugepage_migration_supported(struct hstate * h)800 static inline bool hugepage_migration_supported(struct hstate *h)
801 {
802 return arch_hugetlb_migration_supported(h);
803 }
804
805 /*
806 * Movability check is different as compared to migration check.
807 * It determines whether or not a huge page should be placed on
808 * movable zone or not. Movability of any huge page should be
809 * required only if huge page size is supported for migration.
810 * There won't be any reason for the huge page to be movable if
811 * it is not migratable to start with. Also the size of the huge
812 * page should be large enough to be placed under a movable zone
813 * and still feasible enough to be migratable. Just the presence
814 * in movable zone does not make the migration feasible.
815 *
816 * So even though large huge page sizes like the gigantic ones
817 * are migratable they should not be movable because its not
818 * feasible to migrate them from movable zone.
819 */
hugepage_movable_supported(struct hstate * h)820 static inline bool hugepage_movable_supported(struct hstate *h)
821 {
822 if (!hugepage_migration_supported(h))
823 return false;
824
825 if (hstate_is_gigantic(h))
826 return false;
827 return true;
828 }
829
830 /* Movability of hugepages depends on migration support. */
htlb_alloc_mask(struct hstate * h)831 static inline gfp_t htlb_alloc_mask(struct hstate *h)
832 {
833 if (hugepage_movable_supported(h))
834 return GFP_HIGHUSER_MOVABLE;
835 else
836 return GFP_HIGHUSER;
837 }
838
htlb_modify_alloc_mask(struct hstate * h,gfp_t gfp_mask)839 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
840 {
841 gfp_t modified_mask = htlb_alloc_mask(h);
842
843 /* Some callers might want to enforce node */
844 modified_mask |= (gfp_mask & __GFP_THISNODE);
845
846 modified_mask |= (gfp_mask & __GFP_NOWARN);
847
848 return modified_mask;
849 }
850
huge_pte_lockptr(struct hstate * h,struct mm_struct * mm,pte_t * pte)851 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
852 struct mm_struct *mm, pte_t *pte)
853 {
854 if (huge_page_size(h) == PMD_SIZE)
855 return pmd_lockptr(mm, (pmd_t *) pte);
856 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
857 return &mm->page_table_lock;
858 }
859
860 #ifndef hugepages_supported
861 /*
862 * Some platform decide whether they support huge pages at boot
863 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
864 * when there is no such support
865 */
866 #define hugepages_supported() (HPAGE_SHIFT != 0)
867 #endif
868
869 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
870
hugetlb_count_init(struct mm_struct * mm)871 static inline void hugetlb_count_init(struct mm_struct *mm)
872 {
873 atomic_long_set(&mm->hugetlb_usage, 0);
874 }
875
hugetlb_count_add(long l,struct mm_struct * mm)876 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
877 {
878 atomic_long_add(l, &mm->hugetlb_usage);
879 }
880
hugetlb_count_sub(long l,struct mm_struct * mm)881 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
882 {
883 atomic_long_sub(l, &mm->hugetlb_usage);
884 }
885
886 #ifndef set_huge_swap_pte_at
set_huge_swap_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)887 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
888 pte_t *ptep, pte_t pte, unsigned long sz)
889 {
890 set_huge_pte_at(mm, addr, ptep, pte);
891 }
892 #endif
893
894 #ifndef huge_ptep_modify_prot_start
895 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
huge_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)896 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
897 unsigned long addr, pte_t *ptep)
898 {
899 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
900 }
901 #endif
902
903 #ifndef huge_ptep_modify_prot_commit
904 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
huge_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t old_pte,pte_t pte)905 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
906 unsigned long addr, pte_t *ptep,
907 pte_t old_pte, pte_t pte)
908 {
909 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
910 }
911 #endif
912
913 #else /* CONFIG_HUGETLB_PAGE */
914 struct hstate {};
915
916 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
917 {
918 return NULL;
919 }
920
921 static inline int isolate_or_dissolve_huge_page(struct page *page,
922 struct list_head *list)
923 {
924 return -ENOMEM;
925 }
926
927 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
928 unsigned long addr,
929 int avoid_reserve)
930 {
931 return NULL;
932 }
933
934 static inline struct page *
935 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
936 nodemask_t *nmask, gfp_t gfp_mask)
937 {
938 return NULL;
939 }
940
941 static inline struct page *alloc_huge_page_vma(struct hstate *h,
942 struct vm_area_struct *vma,
943 unsigned long address)
944 {
945 return NULL;
946 }
947
948 static inline int __alloc_bootmem_huge_page(struct hstate *h)
949 {
950 return 0;
951 }
952
953 static inline struct hstate *hstate_file(struct file *f)
954 {
955 return NULL;
956 }
957
958 static inline struct hstate *hstate_sizelog(int page_size_log)
959 {
960 return NULL;
961 }
962
963 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
964 {
965 return NULL;
966 }
967
968 static inline struct hstate *page_hstate(struct page *page)
969 {
970 return NULL;
971 }
972
973 static inline unsigned long huge_page_size(struct hstate *h)
974 {
975 return PAGE_SIZE;
976 }
977
978 static inline unsigned long huge_page_mask(struct hstate *h)
979 {
980 return PAGE_MASK;
981 }
982
983 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
984 {
985 return PAGE_SIZE;
986 }
987
988 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
989 {
990 return PAGE_SIZE;
991 }
992
993 static inline unsigned int huge_page_order(struct hstate *h)
994 {
995 return 0;
996 }
997
998 static inline unsigned int huge_page_shift(struct hstate *h)
999 {
1000 return PAGE_SHIFT;
1001 }
1002
1003 static inline bool hstate_is_gigantic(struct hstate *h)
1004 {
1005 return false;
1006 }
1007
1008 static inline unsigned int pages_per_huge_page(struct hstate *h)
1009 {
1010 return 1;
1011 }
1012
1013 static inline unsigned hstate_index_to_shift(unsigned index)
1014 {
1015 return 0;
1016 }
1017
1018 static inline int hstate_index(struct hstate *h)
1019 {
1020 return 0;
1021 }
1022
1023 static inline int dissolve_free_huge_page(struct page *page)
1024 {
1025 return 0;
1026 }
1027
1028 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1029 unsigned long end_pfn)
1030 {
1031 return 0;
1032 }
1033
1034 static inline bool hugepage_migration_supported(struct hstate *h)
1035 {
1036 return false;
1037 }
1038
1039 static inline bool hugepage_movable_supported(struct hstate *h)
1040 {
1041 return false;
1042 }
1043
1044 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1045 {
1046 return 0;
1047 }
1048
1049 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1050 {
1051 return 0;
1052 }
1053
1054 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1055 struct mm_struct *mm, pte_t *pte)
1056 {
1057 return &mm->page_table_lock;
1058 }
1059
1060 static inline void hugetlb_count_init(struct mm_struct *mm)
1061 {
1062 }
1063
1064 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1065 {
1066 }
1067
1068 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1069 {
1070 }
1071
1072 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1073 pte_t *ptep, pte_t pte, unsigned long sz)
1074 {
1075 }
1076 #endif /* CONFIG_HUGETLB_PAGE */
1077
1078 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1079 extern bool hugetlb_free_vmemmap_enabled;
1080 #else
1081 #define hugetlb_free_vmemmap_enabled false
1082 #endif
1083
huge_pte_lock(struct hstate * h,struct mm_struct * mm,pte_t * pte)1084 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1085 struct mm_struct *mm, pte_t *pte)
1086 {
1087 spinlock_t *ptl;
1088
1089 ptl = huge_pte_lockptr(h, mm, pte);
1090 spin_lock(ptl);
1091 return ptl;
1092 }
1093
1094 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1095 extern void __init hugetlb_cma_reserve(int order);
1096 extern void __init hugetlb_cma_check(void);
1097 #else
hugetlb_cma_reserve(int order)1098 static inline __init void hugetlb_cma_reserve(int order)
1099 {
1100 }
hugetlb_cma_check(void)1101 static inline __init void hugetlb_cma_check(void)
1102 {
1103 }
1104 #endif
1105
1106 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1107
1108 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1109 /*
1110 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1111 * implement this.
1112 */
1113 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1114 #endif
1115
1116 #endif /* _LINUX_HUGETLB_H */
1117