1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SLUB_DEF_H
3 #define _LINUX_SLUB_DEF_H
4
5 /*
6 * SLUB : A Slab allocator without object queues.
7 *
8 * (C) 2007 SGI, Christoph Lameter
9 */
10 #include <linux/kfence.h>
11 #include <linux/kobject.h>
12 #include <linux/reciprocal_div.h>
13 #include <linux/local_lock.h>
14
15 enum stat_item {
16 ALLOC_FASTPATH, /* Allocation from cpu slab */
17 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
18 FREE_FASTPATH, /* Free to cpu slab */
19 FREE_SLOWPATH, /* Freeing not to cpu slab */
20 FREE_FROZEN, /* Freeing to frozen slab */
21 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
22 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
23 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
24 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
25 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
26 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
27 FREE_SLAB, /* Slab freed to the page allocator */
28 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
29 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
30 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
31 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
34 DEACTIVATE_BYPASS, /* Implicit deactivation */
35 ORDER_FALLBACK, /* Number of times fallback was necessary */
36 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
37 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
38 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
39 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
40 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
41 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
42 NR_SLUB_STAT_ITEMS };
43
44 /*
45 * When changing the layout, make sure freelist and tid are still compatible
46 * with this_cpu_cmpxchg_double() alignment requirements.
47 */
48 struct kmem_cache_cpu {
49 void **freelist; /* Pointer to next available object */
50 unsigned long tid; /* Globally unique transaction id */
51 struct page *page; /* The slab from which we are allocating */
52 #ifdef CONFIG_SLUB_CPU_PARTIAL
53 struct page *partial; /* Partially allocated frozen slabs */
54 #endif
55 local_lock_t lock; /* Protects the fields above */
56 #ifdef CONFIG_SLUB_STATS
57 unsigned stat[NR_SLUB_STAT_ITEMS];
58 #endif
59 };
60
61 #ifdef CONFIG_SLUB_CPU_PARTIAL
62 #define slub_percpu_partial(c) ((c)->partial)
63
64 #define slub_set_percpu_partial(c, p) \
65 ({ \
66 slub_percpu_partial(c) = (p)->next; \
67 })
68
69 #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
70 #else
71 #define slub_percpu_partial(c) NULL
72
73 #define slub_set_percpu_partial(c, p)
74
75 #define slub_percpu_partial_read_once(c) NULL
76 #endif // CONFIG_SLUB_CPU_PARTIAL
77
78 /*
79 * Word size structure that can be atomically updated or read and that
80 * contains both the order and the number of objects that a slab of the
81 * given order would contain.
82 */
83 struct kmem_cache_order_objects {
84 unsigned int x;
85 };
86
87 /*
88 * Slab cache management.
89 */
90 struct kmem_cache {
91 struct kmem_cache_cpu __percpu *cpu_slab;
92 /* Used for retrieving partial slabs, etc. */
93 slab_flags_t flags;
94 unsigned long min_partial;
95 unsigned int size; /* The size of an object including metadata */
96 unsigned int object_size;/* The size of an object without metadata */
97 struct reciprocal_value reciprocal_size;
98 unsigned int offset; /* Free pointer offset */
99 #ifdef CONFIG_SLUB_CPU_PARTIAL
100 /* Number of per cpu partial objects to keep around */
101 unsigned int cpu_partial;
102 /* Number of per cpu partial pages to keep around */
103 unsigned int cpu_partial_pages;
104 #endif
105 struct kmem_cache_order_objects oo;
106
107 /* Allocation and freeing of slabs */
108 struct kmem_cache_order_objects max;
109 struct kmem_cache_order_objects min;
110 gfp_t allocflags; /* gfp flags to use on each alloc */
111 int refcount; /* Refcount for slab cache destroy */
112 void (*ctor)(void *);
113 unsigned int inuse; /* Offset to metadata */
114 unsigned int align; /* Alignment */
115 unsigned int red_left_pad; /* Left redzone padding size */
116 const char *name; /* Name (only for display!) */
117 struct list_head list; /* List of slab caches */
118 #ifdef CONFIG_SYSFS
119 struct kobject kobj; /* For sysfs */
120 #endif
121 #ifdef CONFIG_SLAB_FREELIST_HARDENED
122 unsigned long random;
123 #endif
124
125 #ifdef CONFIG_NUMA
126 /*
127 * Defragmentation by allocating from a remote node.
128 */
129 unsigned int remote_node_defrag_ratio;
130 #endif
131
132 #ifdef CONFIG_SLAB_FREELIST_RANDOM
133 unsigned int *random_seq;
134 #endif
135
136 #ifdef CONFIG_KASAN
137 struct kasan_cache kasan_info;
138 #endif
139
140 unsigned int useroffset; /* Usercopy region offset */
141 unsigned int usersize; /* Usercopy region size */
142
143 struct kmem_cache_node *node[MAX_NUMNODES];
144 };
145
146 #ifdef CONFIG_SYSFS
147 #define SLAB_SUPPORTS_SYSFS
148 void sysfs_slab_unlink(struct kmem_cache *);
149 void sysfs_slab_release(struct kmem_cache *);
150 #else
sysfs_slab_unlink(struct kmem_cache * s)151 static inline void sysfs_slab_unlink(struct kmem_cache *s)
152 {
153 }
sysfs_slab_release(struct kmem_cache * s)154 static inline void sysfs_slab_release(struct kmem_cache *s)
155 {
156 }
157 #endif
158
159 void object_err(struct kmem_cache *s, struct page *page,
160 u8 *object, char *reason);
161
162 void *fixup_red_left(struct kmem_cache *s, void *p);
163
nearest_obj(struct kmem_cache * cache,struct page * page,void * x)164 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
165 void *x) {
166 void *object = x - (x - page_address(page)) % cache->size;
167 void *last_object = page_address(page) +
168 (page->objects - 1) * cache->size;
169 void *result = (unlikely(object > last_object)) ? last_object : object;
170
171 result = fixup_red_left(cache, result);
172 return result;
173 }
174
175 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)176 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
177 void *addr, void *obj)
178 {
179 return reciprocal_divide(kasan_reset_tag(obj) - addr,
180 cache->reciprocal_size);
181 }
182
obj_to_index(const struct kmem_cache * cache,const struct page * page,void * obj)183 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
184 const struct page *page, void *obj)
185 {
186 if (is_kfence_address(obj))
187 return 0;
188 return __obj_to_index(cache, page_address(page), obj);
189 }
190
objs_per_slab_page(const struct kmem_cache * cache,const struct page * page)191 static inline int objs_per_slab_page(const struct kmem_cache *cache,
192 const struct page *page)
193 {
194 return page->objects;
195 }
196 #endif /* _LINUX_SLUB_DEF_H */
197