1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MM_TYPES_H
3 #define _LINUX_MM_TYPES_H
4 
5 #include <linux/mm_types_task.h>
6 
7 #include <linux/auxvec.h>
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
10 #include <linux/rbtree.h>
11 #include <linux/rwsem.h>
12 #include <linux/completion.h>
13 #include <linux/cpumask.h>
14 #include <linux/uprobes.h>
15 #include <linux/rcupdate.h>
16 #include <linux/page-flags-layout.h>
17 #include <linux/workqueue.h>
18 #include <linux/seqlock.h>
19 
20 #include <asm/mmu.h>
21 
22 #ifndef AT_VECTOR_SIZE_ARCH
23 #define AT_VECTOR_SIZE_ARCH 0
24 #endif
25 #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
26 
27 #define INIT_PASID	0
28 
29 struct address_space;
30 struct mem_cgroup;
31 
32 /*
33  * Each physical page in the system has a struct page associated with
34  * it to keep track of whatever it is we are using the page for at the
35  * moment. Note that we have no way to track which tasks are using
36  * a page, though if it is a pagecache page, rmap structures can tell us
37  * who is mapping it.
38  *
39  * If you allocate the page using alloc_pages(), you can use some of the
40  * space in struct page for your own purposes.  The five words in the main
41  * union are available, except for bit 0 of the first word which must be
42  * kept clear.  Many users use this word to store a pointer to an object
43  * which is guaranteed to be aligned.  If you use the same storage as
44  * page->mapping, you must restore it to NULL before freeing the page.
45  *
46  * If your page will not be mapped to userspace, you can also use the four
47  * bytes in the mapcount union, but you must call page_mapcount_reset()
48  * before freeing it.
49  *
50  * If you want to use the refcount field, it must be used in such a way
51  * that other CPUs temporarily incrementing and then decrementing the
52  * refcount does not cause problems.  On receiving the page from
53  * alloc_pages(), the refcount will be positive.
54  *
55  * If you allocate pages of order > 0, you can use some of the fields
56  * in each subpage, but you may need to restore some of their values
57  * afterwards.
58  *
59  * SLUB uses cmpxchg_double() to atomically update its freelist and
60  * counters.  That requires that freelist & counters be adjacent and
61  * double-word aligned.  We align all struct pages to double-word
62  * boundaries, and ensure that 'freelist' is aligned within the
63  * struct.
64  */
65 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
66 #define _struct_page_alignment	__aligned(2 * sizeof(unsigned long))
67 #else
68 #define _struct_page_alignment
69 #endif
70 
71 struct page {
72 	unsigned long flags;		/* Atomic flags, some possibly
73 					 * updated asynchronously */
74 	/*
75 	 * Five words (20/40 bytes) are available in this union.
76 	 * WARNING: bit 0 of the first word is used for PageTail(). That
77 	 * means the other users of this union MUST NOT use the bit to
78 	 * avoid collision and false-positive PageTail().
79 	 */
80 	union {
81 		struct {	/* Page cache and anonymous pages */
82 			/**
83 			 * @lru: Pageout list, eg. active_list protected by
84 			 * lruvec->lru_lock.  Sometimes used as a generic list
85 			 * by the page owner.
86 			 */
87 			struct list_head lru;
88 			/* See page-flags.h for PAGE_MAPPING_FLAGS */
89 			struct address_space *mapping;
90 			pgoff_t index;		/* Our offset within mapping. */
91 			/**
92 			 * @private: Mapping-private opaque data.
93 			 * Usually used for buffer_heads if PagePrivate.
94 			 * Used for swp_entry_t if PageSwapCache.
95 			 * Indicates order in the buddy system if PageBuddy.
96 			 */
97 			unsigned long private;
98 		};
99 		struct {	/* page_pool used by netstack */
100 			/**
101 			 * @pp_magic: magic value to avoid recycling non
102 			 * page_pool allocated pages.
103 			 */
104 			unsigned long pp_magic;
105 			struct page_pool *pp;
106 			unsigned long _pp_mapping_pad;
107 			unsigned long dma_addr;
108 			union {
109 				/**
110 				 * dma_addr_upper: might require a 64-bit
111 				 * value on 32-bit architectures.
112 				 */
113 				unsigned long dma_addr_upper;
114 				/**
115 				 * For frag page support, not supported in
116 				 * 32-bit architectures with 64-bit DMA.
117 				 */
118 				atomic_long_t pp_frag_count;
119 			};
120 		};
121 		struct {	/* slab, slob and slub */
122 			union {
123 				struct list_head slab_list;
124 				struct {	/* Partial pages */
125 					struct page *next;
126 #ifdef CONFIG_64BIT
127 					int pages;	/* Nr of pages left */
128 #else
129 					short int pages;
130 #endif
131 				};
132 			};
133 			struct kmem_cache *slab_cache; /* not slob */
134 			/* Double-word boundary */
135 			void *freelist;		/* first free object */
136 			union {
137 				void *s_mem;	/* slab: first object */
138 				unsigned long counters;		/* SLUB */
139 				struct {			/* SLUB */
140 					unsigned inuse:16;
141 					unsigned objects:15;
142 					unsigned frozen:1;
143 				};
144 			};
145 		};
146 		struct {	/* Tail pages of compound page */
147 			unsigned long compound_head;	/* Bit zero is set */
148 
149 			/* First tail page only */
150 			unsigned char compound_dtor;
151 			unsigned char compound_order;
152 			atomic_t compound_mapcount;
153 			unsigned int compound_nr; /* 1 << compound_order */
154 		};
155 		struct {	/* Second tail page of compound page */
156 			unsigned long _compound_pad_1;	/* compound_head */
157 			atomic_t hpage_pinned_refcount;
158 			/* For both global and memcg */
159 			struct list_head deferred_list;
160 		};
161 		struct {	/* Page table pages */
162 			unsigned long _pt_pad_1;	/* compound_head */
163 			pgtable_t pmd_huge_pte; /* protected by page->ptl */
164 			unsigned long _pt_pad_2;	/* mapping */
165 			union {
166 				struct mm_struct *pt_mm; /* x86 pgds only */
167 				atomic_t pt_frag_refcount; /* powerpc */
168 			};
169 #if ALLOC_SPLIT_PTLOCKS
170 			spinlock_t *ptl;
171 #else
172 			spinlock_t ptl;
173 #endif
174 		};
175 		struct {	/* ZONE_DEVICE pages */
176 			/** @pgmap: Points to the hosting device page map. */
177 			struct dev_pagemap *pgmap;
178 			void *zone_device_data;
179 			/*
180 			 * ZONE_DEVICE private pages are counted as being
181 			 * mapped so the next 3 words hold the mapping, index,
182 			 * and private fields from the source anonymous or
183 			 * page cache page while the page is migrated to device
184 			 * private memory.
185 			 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
186 			 * use the mapping, index, and private fields when
187 			 * pmem backed DAX files are mapped.
188 			 */
189 		};
190 
191 		/** @rcu_head: You can use this to free a page by RCU. */
192 		struct rcu_head rcu_head;
193 	};
194 
195 	union {		/* This union is 4 bytes in size. */
196 		/*
197 		 * If the page can be mapped to userspace, encodes the number
198 		 * of times this page is referenced by a page table.
199 		 */
200 		atomic_t _mapcount;
201 
202 		/*
203 		 * If the page is neither PageSlab nor mappable to userspace,
204 		 * the value stored here may help determine what this page
205 		 * is used for.  See page-flags.h for a list of page types
206 		 * which are currently stored here.
207 		 */
208 		unsigned int page_type;
209 
210 		unsigned int active;		/* SLAB */
211 		int units;			/* SLOB */
212 	};
213 
214 	/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
215 	atomic_t _refcount;
216 
217 #ifdef CONFIG_MEMCG
218 	unsigned long memcg_data;
219 #endif
220 
221 	/*
222 	 * On machines where all RAM is mapped into kernel address space,
223 	 * we can simply calculate the virtual address. On machines with
224 	 * highmem some memory is mapped into kernel virtual memory
225 	 * dynamically, so we need a place to store that address.
226 	 * Note that this field could be 16 bits on x86 ... ;)
227 	 *
228 	 * Architectures with slow multiplication can define
229 	 * WANT_PAGE_VIRTUAL in asm/page.h
230 	 */
231 #if defined(WANT_PAGE_VIRTUAL)
232 	void *virtual;			/* Kernel virtual address (NULL if
233 					   not kmapped, ie. highmem) */
234 #endif /* WANT_PAGE_VIRTUAL */
235 
236 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
237 	int _last_cpupid;
238 #endif
239 } _struct_page_alignment;
240 
241 /**
242  * struct folio - Represents a contiguous set of bytes.
243  * @flags: Identical to the page flags.
244  * @lru: Least Recently Used list; tracks how recently this folio was used.
245  * @mapping: The file this page belongs to, or refers to the anon_vma for
246  *    anonymous memory.
247  * @index: Offset within the file, in units of pages.  For anonymous memory,
248  *    this is the index from the beginning of the mmap.
249  * @private: Filesystem per-folio data (see folio_attach_private()).
250  *    Used for swp_entry_t if folio_test_swapcache().
251  * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
252  *    find out how many times this folio is mapped by userspace.
253  * @_refcount: Do not access this member directly.  Use folio_ref_count()
254  *    to find how many references there are to this folio.
255  * @memcg_data: Memory Control Group data.
256  *
257  * A folio is a physically, virtually and logically contiguous set
258  * of bytes.  It is a power-of-two in size, and it is aligned to that
259  * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
260  * in the page cache, it is at a file offset which is a multiple of that
261  * power-of-two.  It may be mapped into userspace at an address which is
262  * at an arbitrary page offset, but its kernel virtual address is aligned
263  * to its size.
264  */
265 struct folio {
266 	/* private: don't document the anon union */
267 	union {
268 		struct {
269 	/* public: */
270 			unsigned long flags;
271 			struct list_head lru;
272 			struct address_space *mapping;
273 			pgoff_t index;
274 			void *private;
275 			atomic_t _mapcount;
276 			atomic_t _refcount;
277 #ifdef CONFIG_MEMCG
278 			unsigned long memcg_data;
279 #endif
280 	/* private: the union with struct page is transitional */
281 		};
282 		struct page page;
283 	};
284 };
285 
286 static_assert(sizeof(struct page) == sizeof(struct folio));
287 #define FOLIO_MATCH(pg, fl)						\
288 	static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
289 FOLIO_MATCH(flags, flags);
290 FOLIO_MATCH(lru, lru);
291 FOLIO_MATCH(compound_head, lru);
292 FOLIO_MATCH(index, index);
293 FOLIO_MATCH(private, private);
294 FOLIO_MATCH(_mapcount, _mapcount);
295 FOLIO_MATCH(_refcount, _refcount);
296 #ifdef CONFIG_MEMCG
297 FOLIO_MATCH(memcg_data, memcg_data);
298 #endif
299 #undef FOLIO_MATCH
300 
folio_mapcount_ptr(struct folio * folio)301 static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
302 {
303 	struct page *tail = &folio->page + 1;
304 	return &tail->compound_mapcount;
305 }
306 
compound_mapcount_ptr(struct page * page)307 static inline atomic_t *compound_mapcount_ptr(struct page *page)
308 {
309 	return &page[1].compound_mapcount;
310 }
311 
compound_pincount_ptr(struct page * page)312 static inline atomic_t *compound_pincount_ptr(struct page *page)
313 {
314 	return &page[2].hpage_pinned_refcount;
315 }
316 
317 /*
318  * Used for sizing the vmemmap region on some architectures
319  */
320 #define STRUCT_PAGE_MAX_SHIFT	(order_base_2(sizeof(struct page)))
321 
322 #define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
323 #define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
324 
325 /*
326  * page_private can be used on tail pages.  However, PagePrivate is only
327  * checked by the VM on the head page.  So page_private on the tail pages
328  * should be used for data that's ancillary to the head page (eg attaching
329  * buffer heads to tail pages after attaching buffer heads to the head page)
330  */
331 #define page_private(page)		((page)->private)
332 
set_page_private(struct page * page,unsigned long private)333 static inline void set_page_private(struct page *page, unsigned long private)
334 {
335 	page->private = private;
336 }
337 
folio_get_private(struct folio * folio)338 static inline void *folio_get_private(struct folio *folio)
339 {
340 	return folio->private;
341 }
342 
343 struct page_frag_cache {
344 	void * va;
345 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
346 	__u16 offset;
347 	__u16 size;
348 #else
349 	__u32 offset;
350 #endif
351 	/* we maintain a pagecount bias, so that we dont dirty cache line
352 	 * containing page->_refcount every time we allocate a fragment.
353 	 */
354 	unsigned int		pagecnt_bias;
355 	bool pfmemalloc;
356 };
357 
358 typedef unsigned long vm_flags_t;
359 
360 /*
361  * A region containing a mapping of a non-memory backed file under NOMMU
362  * conditions.  These are held in a global tree and are pinned by the VMAs that
363  * map parts of them.
364  */
365 struct vm_region {
366 	struct rb_node	vm_rb;		/* link in global region tree */
367 	vm_flags_t	vm_flags;	/* VMA vm_flags */
368 	unsigned long	vm_start;	/* start address of region */
369 	unsigned long	vm_end;		/* region initialised to here */
370 	unsigned long	vm_top;		/* region allocated to here */
371 	unsigned long	vm_pgoff;	/* the offset in vm_file corresponding to vm_start */
372 	struct file	*vm_file;	/* the backing file or NULL */
373 
374 	int		vm_usage;	/* region usage count (access under nommu_region_sem) */
375 	bool		vm_icache_flushed : 1; /* true if the icache has been flushed for
376 						* this region */
377 };
378 
379 #ifdef CONFIG_USERFAULTFD
380 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
381 struct vm_userfaultfd_ctx {
382 	struct userfaultfd_ctx *ctx;
383 };
384 #else /* CONFIG_USERFAULTFD */
385 #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
386 struct vm_userfaultfd_ctx {};
387 #endif /* CONFIG_USERFAULTFD */
388 
389 /*
390  * This struct describes a virtual memory area. There is one of these
391  * per VM-area/task. A VM area is any part of the process virtual memory
392  * space that has a special rule for the page-fault handlers (ie a shared
393  * library, the executable area etc).
394  */
395 struct vm_area_struct {
396 	/* The first cache line has the info for VMA tree walking. */
397 
398 	unsigned long vm_start;		/* Our start address within vm_mm. */
399 	unsigned long vm_end;		/* The first byte after our end address
400 					   within vm_mm. */
401 
402 	/* linked list of VM areas per task, sorted by address */
403 	struct vm_area_struct *vm_next, *vm_prev;
404 
405 	struct rb_node vm_rb;
406 
407 	/*
408 	 * Largest free memory gap in bytes to the left of this VMA.
409 	 * Either between this VMA and vma->vm_prev, or between one of the
410 	 * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
411 	 * get_unmapped_area find a free area of the right size.
412 	 */
413 	unsigned long rb_subtree_gap;
414 
415 	/* Second cache line starts here. */
416 
417 	struct mm_struct *vm_mm;	/* The address space we belong to. */
418 
419 	/*
420 	 * Access permissions of this VMA.
421 	 * See vmf_insert_mixed_prot() for discussion.
422 	 */
423 	pgprot_t vm_page_prot;
424 	unsigned long vm_flags;		/* Flags, see mm.h. */
425 
426 	/*
427 	 * For areas with an address space and backing store,
428 	 * linkage into the address_space->i_mmap interval tree.
429 	 */
430 	struct {
431 		struct rb_node rb;
432 		unsigned long rb_subtree_last;
433 	} shared;
434 
435 	/*
436 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
437 	 * list, after a COW of one of the file pages.	A MAP_SHARED vma
438 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
439 	 * or brk vma (with NULL file) can only be in an anon_vma list.
440 	 */
441 	struct list_head anon_vma_chain; /* Serialized by mmap_lock &
442 					  * page_table_lock */
443 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
444 
445 	/* Function pointers to deal with this struct. */
446 	const struct vm_operations_struct *vm_ops;
447 
448 	/* Information about our backing store: */
449 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
450 					   units */
451 	struct file * vm_file;		/* File we map to (can be NULL). */
452 	void * vm_private_data;		/* was vm_pte (shared mem) */
453 
454 #ifdef CONFIG_SWAP
455 	atomic_long_t swap_readahead_info;
456 #endif
457 #ifndef CONFIG_MMU
458 	struct vm_region *vm_region;	/* NOMMU mapping region */
459 #endif
460 #ifdef CONFIG_NUMA
461 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
462 #endif
463 	struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
464 } __randomize_layout;
465 
466 struct kioctx_table;
467 struct mm_struct {
468 	struct {
469 		struct vm_area_struct *mmap;		/* list of VMAs */
470 		struct rb_root mm_rb;
471 		u64 vmacache_seqnum;                   /* per-thread vmacache */
472 #ifdef CONFIG_MMU
473 		unsigned long (*get_unmapped_area) (struct file *filp,
474 				unsigned long addr, unsigned long len,
475 				unsigned long pgoff, unsigned long flags);
476 #endif
477 		unsigned long mmap_base;	/* base of mmap area */
478 		unsigned long mmap_legacy_base;	/* base of mmap area in bottom-up allocations */
479 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
480 		/* Base addresses for compatible mmap() */
481 		unsigned long mmap_compat_base;
482 		unsigned long mmap_compat_legacy_base;
483 #endif
484 		unsigned long task_size;	/* size of task vm space */
485 		unsigned long highest_vm_end;	/* highest vma end address */
486 		pgd_t * pgd;
487 
488 #ifdef CONFIG_MEMBARRIER
489 		/**
490 		 * @membarrier_state: Flags controlling membarrier behavior.
491 		 *
492 		 * This field is close to @pgd to hopefully fit in the same
493 		 * cache-line, which needs to be touched by switch_mm().
494 		 */
495 		atomic_t membarrier_state;
496 #endif
497 
498 		/**
499 		 * @mm_users: The number of users including userspace.
500 		 *
501 		 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
502 		 * drops to 0 (i.e. when the task exits and there are no other
503 		 * temporary reference holders), we also release a reference on
504 		 * @mm_count (which may then free the &struct mm_struct if
505 		 * @mm_count also drops to 0).
506 		 */
507 		atomic_t mm_users;
508 
509 		/**
510 		 * @mm_count: The number of references to &struct mm_struct
511 		 * (@mm_users count as 1).
512 		 *
513 		 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
514 		 * &struct mm_struct is freed.
515 		 */
516 		atomic_t mm_count;
517 
518 #ifdef CONFIG_MMU
519 		atomic_long_t pgtables_bytes;	/* PTE page table pages */
520 #endif
521 		int map_count;			/* number of VMAs */
522 
523 		spinlock_t page_table_lock; /* Protects page tables and some
524 					     * counters
525 					     */
526 		/*
527 		 * With some kernel config, the current mmap_lock's offset
528 		 * inside 'mm_struct' is at 0x120, which is very optimal, as
529 		 * its two hot fields 'count' and 'owner' sit in 2 different
530 		 * cachelines,  and when mmap_lock is highly contended, both
531 		 * of the 2 fields will be accessed frequently, current layout
532 		 * will help to reduce cache bouncing.
533 		 *
534 		 * So please be careful with adding new fields before
535 		 * mmap_lock, which can easily push the 2 fields into one
536 		 * cacheline.
537 		 */
538 		struct rw_semaphore mmap_lock;
539 
540 		struct list_head mmlist; /* List of maybe swapped mm's.	These
541 					  * are globally strung together off
542 					  * init_mm.mmlist, and are protected
543 					  * by mmlist_lock
544 					  */
545 
546 
547 		unsigned long hiwater_rss; /* High-watermark of RSS usage */
548 		unsigned long hiwater_vm;  /* High-water virtual memory usage */
549 
550 		unsigned long total_vm;	   /* Total pages mapped */
551 		unsigned long locked_vm;   /* Pages that have PG_mlocked set */
552 		atomic64_t    pinned_vm;   /* Refcount permanently increased */
553 		unsigned long data_vm;	   /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
554 		unsigned long exec_vm;	   /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
555 		unsigned long stack_vm;	   /* VM_STACK */
556 		unsigned long def_flags;
557 
558 		/**
559 		 * @write_protect_seq: Locked when any thread is write
560 		 * protecting pages mapped by this mm to enforce a later COW,
561 		 * for instance during page table copying for fork().
562 		 */
563 		seqcount_t write_protect_seq;
564 
565 		spinlock_t arg_lock; /* protect the below fields */
566 
567 		unsigned long start_code, end_code, start_data, end_data;
568 		unsigned long start_brk, brk, start_stack;
569 		unsigned long arg_start, arg_end, env_start, env_end;
570 
571 		unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
572 
573 		/*
574 		 * Special counters, in some configurations protected by the
575 		 * page_table_lock, in other configurations by being atomic.
576 		 */
577 		struct mm_rss_stat rss_stat;
578 
579 		struct linux_binfmt *binfmt;
580 
581 		/* Architecture-specific MM context */
582 		mm_context_t context;
583 
584 		unsigned long flags; /* Must use atomic bitops to access */
585 
586 #ifdef CONFIG_AIO
587 		spinlock_t			ioctx_lock;
588 		struct kioctx_table __rcu	*ioctx_table;
589 #endif
590 #ifdef CONFIG_MEMCG
591 		/*
592 		 * "owner" points to a task that is regarded as the canonical
593 		 * user/owner of this mm. All of the following must be true in
594 		 * order for it to be changed:
595 		 *
596 		 * current == mm->owner
597 		 * current->mm != mm
598 		 * new_owner->mm == mm
599 		 * new_owner->alloc_lock is held
600 		 */
601 		struct task_struct __rcu *owner;
602 #endif
603 		struct user_namespace *user_ns;
604 
605 		/* store ref to file /proc/<pid>/exe symlink points to */
606 		struct file __rcu *exe_file;
607 #ifdef CONFIG_MMU_NOTIFIER
608 		struct mmu_notifier_subscriptions *notifier_subscriptions;
609 #endif
610 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
611 		pgtable_t pmd_huge_pte; /* protected by page_table_lock */
612 #endif
613 #ifdef CONFIG_NUMA_BALANCING
614 		/*
615 		 * numa_next_scan is the next time that the PTEs will be marked
616 		 * pte_numa. NUMA hinting faults will gather statistics and
617 		 * migrate pages to new nodes if necessary.
618 		 */
619 		unsigned long numa_next_scan;
620 
621 		/* Restart point for scanning and setting pte_numa */
622 		unsigned long numa_scan_offset;
623 
624 		/* numa_scan_seq prevents two threads setting pte_numa */
625 		int numa_scan_seq;
626 #endif
627 		/*
628 		 * An operation with batched TLB flushing is going on. Anything
629 		 * that can move process memory needs to flush the TLB when
630 		 * moving a PROT_NONE or PROT_NUMA mapped page.
631 		 */
632 		atomic_t tlb_flush_pending;
633 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
634 		/* See flush_tlb_batched_pending() */
635 		bool tlb_flush_batched;
636 #endif
637 		struct uprobes_state uprobes_state;
638 #ifdef CONFIG_PREEMPT_RT
639 		struct rcu_head delayed_drop;
640 #endif
641 #ifdef CONFIG_HUGETLB_PAGE
642 		atomic_long_t hugetlb_usage;
643 #endif
644 		struct work_struct async_put_work;
645 
646 #ifdef CONFIG_IOMMU_SUPPORT
647 		u32 pasid;
648 #endif
649 	} __randomize_layout;
650 
651 	/*
652 	 * The mm_cpumask needs to be at the end of mm_struct, because it
653 	 * is dynamically sized based on nr_cpu_ids.
654 	 */
655 	unsigned long cpu_bitmap[];
656 };
657 
658 extern struct mm_struct init_mm;
659 
660 /* Pointer magic because the dynamic array size confuses some compilers. */
mm_init_cpumask(struct mm_struct * mm)661 static inline void mm_init_cpumask(struct mm_struct *mm)
662 {
663 	unsigned long cpu_bitmap = (unsigned long)mm;
664 
665 	cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
666 	cpumask_clear((struct cpumask *)cpu_bitmap);
667 }
668 
669 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
mm_cpumask(struct mm_struct * mm)670 static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
671 {
672 	return (struct cpumask *)&mm->cpu_bitmap;
673 }
674 
675 struct mmu_gather;
676 extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
677 extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
678 extern void tlb_finish_mmu(struct mmu_gather *tlb);
679 
init_tlb_flush_pending(struct mm_struct * mm)680 static inline void init_tlb_flush_pending(struct mm_struct *mm)
681 {
682 	atomic_set(&mm->tlb_flush_pending, 0);
683 }
684 
inc_tlb_flush_pending(struct mm_struct * mm)685 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
686 {
687 	atomic_inc(&mm->tlb_flush_pending);
688 	/*
689 	 * The only time this value is relevant is when there are indeed pages
690 	 * to flush. And we'll only flush pages after changing them, which
691 	 * requires the PTL.
692 	 *
693 	 * So the ordering here is:
694 	 *
695 	 *	atomic_inc(&mm->tlb_flush_pending);
696 	 *	spin_lock(&ptl);
697 	 *	...
698 	 *	set_pte_at();
699 	 *	spin_unlock(&ptl);
700 	 *
701 	 *				spin_lock(&ptl)
702 	 *				mm_tlb_flush_pending();
703 	 *				....
704 	 *				spin_unlock(&ptl);
705 	 *
706 	 *	flush_tlb_range();
707 	 *	atomic_dec(&mm->tlb_flush_pending);
708 	 *
709 	 * Where the increment if constrained by the PTL unlock, it thus
710 	 * ensures that the increment is visible if the PTE modification is
711 	 * visible. After all, if there is no PTE modification, nobody cares
712 	 * about TLB flushes either.
713 	 *
714 	 * This very much relies on users (mm_tlb_flush_pending() and
715 	 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
716 	 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
717 	 * locks (PPC) the unlock of one doesn't order against the lock of
718 	 * another PTL.
719 	 *
720 	 * The decrement is ordered by the flush_tlb_range(), such that
721 	 * mm_tlb_flush_pending() will not return false unless all flushes have
722 	 * completed.
723 	 */
724 }
725 
dec_tlb_flush_pending(struct mm_struct * mm)726 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
727 {
728 	/*
729 	 * See inc_tlb_flush_pending().
730 	 *
731 	 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
732 	 * not order against TLB invalidate completion, which is what we need.
733 	 *
734 	 * Therefore we must rely on tlb_flush_*() to guarantee order.
735 	 */
736 	atomic_dec(&mm->tlb_flush_pending);
737 }
738 
mm_tlb_flush_pending(struct mm_struct * mm)739 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
740 {
741 	/*
742 	 * Must be called after having acquired the PTL; orders against that
743 	 * PTLs release and therefore ensures that if we observe the modified
744 	 * PTE we must also observe the increment from inc_tlb_flush_pending().
745 	 *
746 	 * That is, it only guarantees to return true if there is a flush
747 	 * pending for _this_ PTL.
748 	 */
749 	return atomic_read(&mm->tlb_flush_pending);
750 }
751 
mm_tlb_flush_nested(struct mm_struct * mm)752 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
753 {
754 	/*
755 	 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
756 	 * for which there is a TLB flush pending in order to guarantee
757 	 * we've seen both that PTE modification and the increment.
758 	 *
759 	 * (no requirement on actually still holding the PTL, that is irrelevant)
760 	 */
761 	return atomic_read(&mm->tlb_flush_pending) > 1;
762 }
763 
764 struct vm_fault;
765 
766 /**
767  * typedef vm_fault_t - Return type for page fault handlers.
768  *
769  * Page fault handlers return a bitmask of %VM_FAULT values.
770  */
771 typedef __bitwise unsigned int vm_fault_t;
772 
773 /**
774  * enum vm_fault_reason - Page fault handlers return a bitmask of
775  * these values to tell the core VM what happened when handling the
776  * fault. Used to decide whether a process gets delivered SIGBUS or
777  * just gets major/minor fault counters bumped up.
778  *
779  * @VM_FAULT_OOM:		Out Of Memory
780  * @VM_FAULT_SIGBUS:		Bad access
781  * @VM_FAULT_MAJOR:		Page read from storage
782  * @VM_FAULT_WRITE:		Special case for get_user_pages
783  * @VM_FAULT_HWPOISON:		Hit poisoned small page
784  * @VM_FAULT_HWPOISON_LARGE:	Hit poisoned large page. Index encoded
785  *				in upper bits
786  * @VM_FAULT_SIGSEGV:		segmentation fault
787  * @VM_FAULT_NOPAGE:		->fault installed the pte, not return page
788  * @VM_FAULT_LOCKED:		->fault locked the returned page
789  * @VM_FAULT_RETRY:		->fault blocked, must retry
790  * @VM_FAULT_FALLBACK:		huge page fault failed, fall back to small
791  * @VM_FAULT_DONE_COW:		->fault has fully handled COW
792  * @VM_FAULT_NEEDDSYNC:		->fault did not modify page tables and needs
793  *				fsync() to complete (for synchronous page faults
794  *				in DAX)
795  * @VM_FAULT_HINDEX_MASK:	mask HINDEX value
796  *
797  */
798 enum vm_fault_reason {
799 	VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
800 	VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
801 	VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
802 	VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
803 	VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
804 	VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
805 	VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
806 	VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
807 	VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
808 	VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
809 	VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
810 	VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
811 	VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
812 	VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
813 };
814 
815 /* Encode hstate index for a hwpoisoned large page */
816 #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
817 #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
818 
819 #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |	\
820 			VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |	\
821 			VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
822 
823 #define VM_FAULT_RESULT_TRACE \
824 	{ VM_FAULT_OOM,                 "OOM" },	\
825 	{ VM_FAULT_SIGBUS,              "SIGBUS" },	\
826 	{ VM_FAULT_MAJOR,               "MAJOR" },	\
827 	{ VM_FAULT_WRITE,               "WRITE" },	\
828 	{ VM_FAULT_HWPOISON,            "HWPOISON" },	\
829 	{ VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },	\
830 	{ VM_FAULT_SIGSEGV,             "SIGSEGV" },	\
831 	{ VM_FAULT_NOPAGE,              "NOPAGE" },	\
832 	{ VM_FAULT_LOCKED,              "LOCKED" },	\
833 	{ VM_FAULT_RETRY,               "RETRY" },	\
834 	{ VM_FAULT_FALLBACK,            "FALLBACK" },	\
835 	{ VM_FAULT_DONE_COW,            "DONE_COW" },	\
836 	{ VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
837 
838 struct vm_special_mapping {
839 	const char *name;	/* The name, e.g. "[vdso]". */
840 
841 	/*
842 	 * If .fault is not provided, this points to a
843 	 * NULL-terminated array of pages that back the special mapping.
844 	 *
845 	 * This must not be NULL unless .fault is provided.
846 	 */
847 	struct page **pages;
848 
849 	/*
850 	 * If non-NULL, then this is called to resolve page faults
851 	 * on the special mapping.  If used, .pages is not checked.
852 	 */
853 	vm_fault_t (*fault)(const struct vm_special_mapping *sm,
854 				struct vm_area_struct *vma,
855 				struct vm_fault *vmf);
856 
857 	int (*mremap)(const struct vm_special_mapping *sm,
858 		     struct vm_area_struct *new_vma);
859 };
860 
861 enum tlb_flush_reason {
862 	TLB_FLUSH_ON_TASK_SWITCH,
863 	TLB_REMOTE_SHOOTDOWN,
864 	TLB_LOCAL_SHOOTDOWN,
865 	TLB_LOCAL_MM_SHOOTDOWN,
866 	TLB_REMOTE_SEND_IPI,
867 	NR_TLB_FLUSH_REASONS,
868 };
869 
870  /*
871   * A swap entry has to fit into a "unsigned long", as the entry is hidden
872   * in the "index" field of the swapper address space.
873   */
874 typedef struct {
875 	unsigned long val;
876 } swp_entry_t;
877 
878 #endif /* _LINUX_MM_TYPES_H */
879