1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * NUMA memory policies for Linux.
4  * Copyright 2003,2004 Andi Kleen SuSE Labs
5  */
6 #ifndef _LINUX_MEMPOLICY_H
7 #define _LINUX_MEMPOLICY_H 1
8 
9 #include <linux/sched.h>
10 #include <linux/mmzone.h>
11 #include <linux/slab.h>
12 #include <linux/rbtree.h>
13 #include <linux/spinlock.h>
14 #include <linux/nodemask.h>
15 #include <linux/pagemap.h>
16 #include <uapi/linux/mempolicy.h>
17 
18 struct mm_struct;
19 
20 #ifdef CONFIG_NUMA
21 
22 /*
23  * Describe a memory policy.
24  *
25  * A mempolicy can be either associated with a process or with a VMA.
26  * For VMA related allocations the VMA policy is preferred, otherwise
27  * the process policy is used. Interrupts ignore the memory policy
28  * of the current process.
29  *
30  * Locking policy for interleave:
31  * In process context there is no locking because only the process accesses
32  * its own state. All vma manipulation is somewhat protected by a down_read on
33  * mmap_lock.
34  *
35  * Freeing policy:
36  * Mempolicy objects are reference counted.  A mempolicy will be freed when
37  * mpol_put() decrements the reference count to zero.
38  *
39  * Duplicating policy objects:
40  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
41  * to the new storage.  The reference count of the new object is initialized
42  * to 1, representing the caller of mpol_dup().
43  */
44 struct mempolicy {
45 	atomic_t refcnt;
46 	unsigned short mode; 	/* See MPOL_* above */
47 	unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
48 	nodemask_t nodes;	/* interleave/bind/perfer */
49 
50 	union {
51 		nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
52 		nodemask_t user_nodemask;	/* nodemask passed by user */
53 	} w;
54 };
55 
56 /*
57  * Support for managing mempolicy data objects (clone, copy, destroy)
58  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
59  */
60 
61 extern void __mpol_put(struct mempolicy *pol);
mpol_put(struct mempolicy * pol)62 static inline void mpol_put(struct mempolicy *pol)
63 {
64 	if (pol)
65 		__mpol_put(pol);
66 }
67 
68 /*
69  * Does mempolicy pol need explicit unref after use?
70  * Currently only needed for shared policies.
71  */
mpol_needs_cond_ref(struct mempolicy * pol)72 static inline int mpol_needs_cond_ref(struct mempolicy *pol)
73 {
74 	return (pol && (pol->flags & MPOL_F_SHARED));
75 }
76 
mpol_cond_put(struct mempolicy * pol)77 static inline void mpol_cond_put(struct mempolicy *pol)
78 {
79 	if (mpol_needs_cond_ref(pol))
80 		__mpol_put(pol);
81 }
82 
83 extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
mpol_dup(struct mempolicy * pol)84 static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
85 {
86 	if (pol)
87 		pol = __mpol_dup(pol);
88 	return pol;
89 }
90 
91 #define vma_policy(vma) ((vma)->vm_policy)
92 
mpol_get(struct mempolicy * pol)93 static inline void mpol_get(struct mempolicy *pol)
94 {
95 	if (pol)
96 		atomic_inc(&pol->refcnt);
97 }
98 
99 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
mpol_equal(struct mempolicy * a,struct mempolicy * b)100 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
101 {
102 	if (a == b)
103 		return true;
104 	return __mpol_equal(a, b);
105 }
106 
107 /*
108  * Tree of shared policies for a shared memory region.
109  * Maintain the policies in a pseudo mm that contains vmas. The vmas
110  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
111  * bytes, so that we can work with shared memory segments bigger than
112  * unsigned long.
113  */
114 
115 struct sp_node {
116 	struct rb_node nd;
117 	unsigned long start, end;
118 	struct mempolicy *policy;
119 };
120 
121 struct shared_policy {
122 	struct rb_root root;
123 	rwlock_t lock;
124 };
125 
126 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
127 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
128 int mpol_set_shared_policy(struct shared_policy *info,
129 				struct vm_area_struct *vma,
130 				struct mempolicy *new);
131 void mpol_free_shared_policy(struct shared_policy *p);
132 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
133 					    unsigned long idx);
134 
135 struct mempolicy *get_task_policy(struct task_struct *p);
136 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
137 		unsigned long addr);
138 bool vma_policy_mof(struct vm_area_struct *vma);
139 
140 extern void numa_default_policy(void);
141 extern void numa_policy_init(void);
142 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
143 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
144 
145 extern int huge_node(struct vm_area_struct *vma,
146 				unsigned long addr, gfp_t gfp_flags,
147 				struct mempolicy **mpol, nodemask_t **nodemask);
148 extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
149 extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
150 				const nodemask_t *mask);
151 extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
152 
policy_nodemask_current(gfp_t gfp)153 static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
154 {
155 	struct mempolicy *mpol = get_task_policy(current);
156 
157 	return policy_nodemask(gfp, mpol);
158 }
159 
160 extern unsigned int mempolicy_slab_node(void);
161 
162 extern enum zone_type policy_zone;
163 
check_highest_zone(enum zone_type k)164 static inline void check_highest_zone(enum zone_type k)
165 {
166 	if (k > policy_zone && k != ZONE_MOVABLE)
167 		policy_zone = k;
168 }
169 
170 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
171 		     const nodemask_t *to, int flags);
172 
173 
174 #ifdef CONFIG_TMPFS
175 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
176 #endif
177 
178 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
179 
180 /* Check if a vma is migratable */
181 extern bool vma_migratable(struct vm_area_struct *vma);
182 
183 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
184 extern void mpol_put_task_policy(struct task_struct *);
185 
mpol_is_preferred_many(struct mempolicy * pol)186 static inline bool mpol_is_preferred_many(struct mempolicy *pol)
187 {
188 	return  (pol->mode == MPOL_PREFERRED_MANY);
189 }
190 
191 
192 #else
193 
194 struct mempolicy {};
195 
mpol_equal(struct mempolicy * a,struct mempolicy * b)196 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
197 {
198 	return true;
199 }
200 
mpol_put(struct mempolicy * p)201 static inline void mpol_put(struct mempolicy *p)
202 {
203 }
204 
mpol_cond_put(struct mempolicy * pol)205 static inline void mpol_cond_put(struct mempolicy *pol)
206 {
207 }
208 
mpol_get(struct mempolicy * pol)209 static inline void mpol_get(struct mempolicy *pol)
210 {
211 }
212 
213 struct shared_policy {};
214 
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)215 static inline void mpol_shared_policy_init(struct shared_policy *sp,
216 						struct mempolicy *mpol)
217 {
218 }
219 
mpol_free_shared_policy(struct shared_policy * p)220 static inline void mpol_free_shared_policy(struct shared_policy *p)
221 {
222 }
223 
224 static inline struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)225 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
226 {
227 	return NULL;
228 }
229 
230 #define vma_policy(vma) NULL
231 
232 static inline int
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)233 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
234 {
235 	return 0;
236 }
237 
numa_policy_init(void)238 static inline void numa_policy_init(void)
239 {
240 }
241 
numa_default_policy(void)242 static inline void numa_default_policy(void)
243 {
244 }
245 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)246 static inline void mpol_rebind_task(struct task_struct *tsk,
247 				const nodemask_t *new)
248 {
249 }
250 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)251 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
252 {
253 }
254 
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)255 static inline int huge_node(struct vm_area_struct *vma,
256 				unsigned long addr, gfp_t gfp_flags,
257 				struct mempolicy **mpol, nodemask_t **nodemask)
258 {
259 	*mpol = NULL;
260 	*nodemask = NULL;
261 	return 0;
262 }
263 
init_nodemask_of_mempolicy(nodemask_t * m)264 static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
265 {
266 	return false;
267 }
268 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)269 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
270 				   const nodemask_t *to, int flags)
271 {
272 	return 0;
273 }
274 
check_highest_zone(int k)275 static inline void check_highest_zone(int k)
276 {
277 }
278 
279 #ifdef CONFIG_TMPFS
mpol_parse_str(char * str,struct mempolicy ** mpol)280 static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
281 {
282 	return 1;	/* error */
283 }
284 #endif
285 
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long address)286 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
287 				 unsigned long address)
288 {
289 	return -1; /* no node preference */
290 }
291 
mpol_put_task_policy(struct task_struct * task)292 static inline void mpol_put_task_policy(struct task_struct *task)
293 {
294 }
295 
policy_nodemask_current(gfp_t gfp)296 static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
297 {
298 	return NULL;
299 }
300 
mpol_is_preferred_many(struct mempolicy * pol)301 static inline bool mpol_is_preferred_many(struct mempolicy *pol)
302 {
303 	return  false;
304 }
305 
306 #endif /* CONFIG_NUMA */
307 #endif
308