1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BITOPS_H
3 #define _LINUX_BITOPS_H
4
5 #include <asm/types.h>
6 #include <linux/bits.h>
7 #include <linux/typecheck.h>
8
9 #include <uapi/linux/kernel.h>
10
11 /* Set bits in the first 'n' bytes when loaded from memory */
12 #ifdef __LITTLE_ENDIAN
13 # define aligned_byte_mask(n) ((1UL << 8*(n))-1)
14 #else
15 # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
16 #endif
17
18 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
19 #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
20 #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
21 #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
22 #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
23
24 extern unsigned int __sw_hweight8(unsigned int w);
25 extern unsigned int __sw_hweight16(unsigned int w);
26 extern unsigned int __sw_hweight32(unsigned int w);
27 extern unsigned long __sw_hweight64(__u64 w);
28
29 /*
30 * Include this here because some architectures need generic_ffs/fls in
31 * scope
32 */
33 #include <asm/bitops.h>
34
35 #define for_each_set_bit(bit, addr, size) \
36 for ((bit) = find_first_bit((addr), (size)); \
37 (bit) < (size); \
38 (bit) = find_next_bit((addr), (size), (bit) + 1))
39
40 /* same as for_each_set_bit() but use bit as value to start with */
41 #define for_each_set_bit_from(bit, addr, size) \
42 for ((bit) = find_next_bit((addr), (size), (bit)); \
43 (bit) < (size); \
44 (bit) = find_next_bit((addr), (size), (bit) + 1))
45
46 #define for_each_clear_bit(bit, addr, size) \
47 for ((bit) = find_first_zero_bit((addr), (size)); \
48 (bit) < (size); \
49 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
50
51 /* same as for_each_clear_bit() but use bit as value to start with */
52 #define for_each_clear_bit_from(bit, addr, size) \
53 for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
54 (bit) < (size); \
55 (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
56
57 /**
58 * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
59 * @start: bit offset to start search and to store the current iteration offset
60 * @clump: location to store copy of current 8-bit clump
61 * @bits: bitmap address to base the search on
62 * @size: bitmap size in number of bits
63 */
64 #define for_each_set_clump8(start, clump, bits, size) \
65 for ((start) = find_first_clump8(&(clump), (bits), (size)); \
66 (start) < (size); \
67 (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
68
get_bitmask_order(unsigned int count)69 static inline int get_bitmask_order(unsigned int count)
70 {
71 int order;
72
73 order = fls(count);
74 return order; /* We could be slightly more clever with -1 here... */
75 }
76
hweight_long(unsigned long w)77 static __always_inline unsigned long hweight_long(unsigned long w)
78 {
79 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
80 }
81
82 /**
83 * rol64 - rotate a 64-bit value left
84 * @word: value to rotate
85 * @shift: bits to roll
86 */
rol64(__u64 word,unsigned int shift)87 static inline __u64 rol64(__u64 word, unsigned int shift)
88 {
89 return (word << (shift & 63)) | (word >> ((-shift) & 63));
90 }
91
92 /**
93 * ror64 - rotate a 64-bit value right
94 * @word: value to rotate
95 * @shift: bits to roll
96 */
ror64(__u64 word,unsigned int shift)97 static inline __u64 ror64(__u64 word, unsigned int shift)
98 {
99 return (word >> (shift & 63)) | (word << ((-shift) & 63));
100 }
101
102 /**
103 * rol32 - rotate a 32-bit value left
104 * @word: value to rotate
105 * @shift: bits to roll
106 */
rol32(__u32 word,unsigned int shift)107 static inline __u32 rol32(__u32 word, unsigned int shift)
108 {
109 return (word << (shift & 31)) | (word >> ((-shift) & 31));
110 }
111
112 /**
113 * ror32 - rotate a 32-bit value right
114 * @word: value to rotate
115 * @shift: bits to roll
116 */
ror32(__u32 word,unsigned int shift)117 static inline __u32 ror32(__u32 word, unsigned int shift)
118 {
119 return (word >> (shift & 31)) | (word << ((-shift) & 31));
120 }
121
122 /**
123 * rol16 - rotate a 16-bit value left
124 * @word: value to rotate
125 * @shift: bits to roll
126 */
rol16(__u16 word,unsigned int shift)127 static inline __u16 rol16(__u16 word, unsigned int shift)
128 {
129 return (word << (shift & 15)) | (word >> ((-shift) & 15));
130 }
131
132 /**
133 * ror16 - rotate a 16-bit value right
134 * @word: value to rotate
135 * @shift: bits to roll
136 */
ror16(__u16 word,unsigned int shift)137 static inline __u16 ror16(__u16 word, unsigned int shift)
138 {
139 return (word >> (shift & 15)) | (word << ((-shift) & 15));
140 }
141
142 /**
143 * rol8 - rotate an 8-bit value left
144 * @word: value to rotate
145 * @shift: bits to roll
146 */
rol8(__u8 word,unsigned int shift)147 static inline __u8 rol8(__u8 word, unsigned int shift)
148 {
149 return (word << (shift & 7)) | (word >> ((-shift) & 7));
150 }
151
152 /**
153 * ror8 - rotate an 8-bit value right
154 * @word: value to rotate
155 * @shift: bits to roll
156 */
ror8(__u8 word,unsigned int shift)157 static inline __u8 ror8(__u8 word, unsigned int shift)
158 {
159 return (word >> (shift & 7)) | (word << ((-shift) & 7));
160 }
161
162 /**
163 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
164 * @value: value to sign extend
165 * @index: 0 based bit index (0<=index<32) to sign bit
166 *
167 * This is safe to use for 16- and 8-bit types as well.
168 */
sign_extend32(__u32 value,int index)169 static __always_inline __s32 sign_extend32(__u32 value, int index)
170 {
171 __u8 shift = 31 - index;
172 return (__s32)(value << shift) >> shift;
173 }
174
175 /**
176 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
177 * @value: value to sign extend
178 * @index: 0 based bit index (0<=index<64) to sign bit
179 */
sign_extend64(__u64 value,int index)180 static __always_inline __s64 sign_extend64(__u64 value, int index)
181 {
182 __u8 shift = 63 - index;
183 return (__s64)(value << shift) >> shift;
184 }
185
fls_long(unsigned long l)186 static inline unsigned fls_long(unsigned long l)
187 {
188 if (sizeof(l) == 4)
189 return fls(l);
190 return fls64(l);
191 }
192
get_count_order(unsigned int count)193 static inline int get_count_order(unsigned int count)
194 {
195 if (count == 0)
196 return -1;
197
198 return fls(--count);
199 }
200
201 /**
202 * get_count_order_long - get order after rounding @l up to power of 2
203 * @l: parameter
204 *
205 * it is same as get_count_order() but with long type parameter
206 */
get_count_order_long(unsigned long l)207 static inline int get_count_order_long(unsigned long l)
208 {
209 if (l == 0UL)
210 return -1;
211 return (int)fls_long(--l);
212 }
213
214 /**
215 * __ffs64 - find first set bit in a 64 bit word
216 * @word: The 64 bit word
217 *
218 * On 64 bit arches this is a synonym for __ffs
219 * The result is not defined if no bits are set, so check that @word
220 * is non-zero before calling this.
221 */
__ffs64(u64 word)222 static inline unsigned long __ffs64(u64 word)
223 {
224 #if BITS_PER_LONG == 32
225 if (((u32)word) == 0UL)
226 return __ffs((u32)(word >> 32)) + 32;
227 #elif BITS_PER_LONG != 64
228 #error BITS_PER_LONG not 32 or 64
229 #endif
230 return __ffs((unsigned long)word);
231 }
232
233 /**
234 * assign_bit - Assign value to a bit in memory
235 * @nr: the bit to set
236 * @addr: the address to start counting from
237 * @value: the value to assign
238 */
assign_bit(long nr,volatile unsigned long * addr,bool value)239 static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
240 bool value)
241 {
242 if (value)
243 set_bit(nr, addr);
244 else
245 clear_bit(nr, addr);
246 }
247
__assign_bit(long nr,volatile unsigned long * addr,bool value)248 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
249 bool value)
250 {
251 if (value)
252 __set_bit(nr, addr);
253 else
254 __clear_bit(nr, addr);
255 }
256
257 /**
258 * __ptr_set_bit - Set bit in a pointer's value
259 * @nr: the bit to set
260 * @addr: the address of the pointer variable
261 *
262 * Example:
263 * void *p = foo();
264 * __ptr_set_bit(bit, &p);
265 */
266 #define __ptr_set_bit(nr, addr) \
267 ({ \
268 typecheck_pointer(*(addr)); \
269 __set_bit(nr, (unsigned long *)(addr)); \
270 })
271
272 /**
273 * __ptr_clear_bit - Clear bit in a pointer's value
274 * @nr: the bit to clear
275 * @addr: the address of the pointer variable
276 *
277 * Example:
278 * void *p = foo();
279 * __ptr_clear_bit(bit, &p);
280 */
281 #define __ptr_clear_bit(nr, addr) \
282 ({ \
283 typecheck_pointer(*(addr)); \
284 __clear_bit(nr, (unsigned long *)(addr)); \
285 })
286
287 /**
288 * __ptr_test_bit - Test bit in a pointer's value
289 * @nr: the bit to test
290 * @addr: the address of the pointer variable
291 *
292 * Example:
293 * void *p = foo();
294 * if (__ptr_test_bit(bit, &p)) {
295 * ...
296 * } else {
297 * ...
298 * }
299 */
300 #define __ptr_test_bit(nr, addr) \
301 ({ \
302 typecheck_pointer(*(addr)); \
303 test_bit(nr, (unsigned long *)(addr)); \
304 })
305
306 #ifdef __KERNEL__
307
308 #ifndef set_mask_bits
309 #define set_mask_bits(ptr, mask, bits) \
310 ({ \
311 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
312 typeof(*(ptr)) old__, new__; \
313 \
314 do { \
315 old__ = READ_ONCE(*(ptr)); \
316 new__ = (old__ & ~mask__) | bits__; \
317 } while (cmpxchg(ptr, old__, new__) != old__); \
318 \
319 old__; \
320 })
321 #endif
322
323 #ifndef bit_clear_unless
324 #define bit_clear_unless(ptr, clear, test) \
325 ({ \
326 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
327 typeof(*(ptr)) old__, new__; \
328 \
329 do { \
330 old__ = READ_ONCE(*(ptr)); \
331 new__ = old__ & ~clear__; \
332 } while (!(old__ & test__) && \
333 cmpxchg(ptr, old__, new__) != old__); \
334 \
335 !(old__ & test__); \
336 })
337 #endif
338
339 #endif /* __KERNEL__ */
340 #endif
341