1 #ifndef _LINUX_BITOPS_H
2 #define _LINUX_BITOPS_H
3 #include <asm/types.h>
4 
5 /*
6  * Create a contiguous bitmask starting at bit position @l and ending at
7  * position @h. For example
8  * GENMASK(30, 21) gives us the 32bit vector 0x01fe00000.
9  */
10 #define GENMASK(h, l) \
11     (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
12 
13 #define GENMASK_ULL(h, l) \
14     (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LLONG - 1 - (h))))
15 
16 /*
17  * ffs: find first bit set. This is defined the same way as
18  * the libc and compiler builtin ffs routines, therefore
19  * differs in spirit from the above ffz (man ffs).
20  */
21 
generic_ffs(int x)22 static inline int generic_ffs(int x)
23 {
24     int r = 1;
25 
26     if (!x)
27         return 0;
28     if (!(x & 0xffff)) {
29         x >>= 16;
30         r += 16;
31     }
32     if (!(x & 0xff)) {
33         x >>= 8;
34         r += 8;
35     }
36     if (!(x & 0xf)) {
37         x >>= 4;
38         r += 4;
39     }
40     if (!(x & 3)) {
41         x >>= 2;
42         r += 2;
43     }
44     if (!(x & 1)) {
45         x >>= 1;
46         r += 1;
47     }
48     return r;
49 }
50 
51 /*
52  * fls: find last bit set.
53  */
54 
generic_fls(int x)55 static __inline__ int generic_fls(int x)
56 {
57     int r = 32;
58 
59     if (!x)
60         return 0;
61     if (!(x & 0xffff0000u)) {
62         x <<= 16;
63         r -= 16;
64     }
65     if (!(x & 0xff000000u)) {
66         x <<= 8;
67         r -= 8;
68     }
69     if (!(x & 0xf0000000u)) {
70         x <<= 4;
71         r -= 4;
72     }
73     if (!(x & 0xc0000000u)) {
74         x <<= 2;
75         r -= 2;
76     }
77     if (!(x & 0x80000000u)) {
78         x <<= 1;
79         r -= 1;
80     }
81     return r;
82 }
83 
84 #if BITS_PER_LONG == 64
85 
generic_ffsl(unsigned long x)86 static inline int generic_ffsl(unsigned long x)
87 {
88     return !x || (u32)x ? generic_ffs(x) : generic_ffs(x >> 32) + 32;
89 }
90 
generic_flsl(unsigned long x)91 static inline int generic_flsl(unsigned long x)
92 {
93     u32 h = x >> 32;
94 
95     return h ? generic_fls(h) + 32 : generic_fls(x);
96 }
97 
98 #else
99 # define generic_ffsl generic_ffs
100 # define generic_flsl generic_fls
101 #endif
102 
103 /*
104  * Include this here because some architectures need generic_ffs/fls in
105  * scope
106  */
107 #include <asm/bitops.h>
108 
109 #if BITS_PER_LONG == 64
110 # define fls64 flsl
111 # define ffs64 ffsl
112 #else
113 # ifndef ffs64
generic_ffs64(__u64 x)114 static inline int generic_ffs64(__u64 x)
115 {
116     return !x || (__u32)x ? ffs(x) : ffs(x >> 32) + 32;
117 }
118 #  define ffs64 generic_ffs64
119 # endif
120 # ifndef fls64
generic_fls64(__u64 x)121 static inline int generic_fls64(__u64 x)
122 {
123     __u32 h = x >> 32;
124 
125     return h ? fls(h) + 32 : fls(x);
126 }
127 #  define fls64 generic_fls64
128 # endif
129 #endif
130 
get_bitmask_order(unsigned int count)131 static __inline__ int get_bitmask_order(unsigned int count)
132 {
133     int order;
134 
135     order = fls(count);
136     return order;   /* We could be slightly more clever with -1 here... */
137 }
138 
get_count_order(unsigned int count)139 static __inline__ int get_count_order(unsigned int count)
140 {
141     int order;
142 
143     order = fls(count) - 1;
144     if (count & (count - 1))
145         order++;
146     return order;
147 }
148 
149 /*
150  * hweightN: returns the hamming weight (i.e. the number
151  * of bits set) of a N-bit word
152  */
153 
generic_hweight32(unsigned int w)154 static inline unsigned int generic_hweight32(unsigned int w)
155 {
156     w -= (w >> 1) & 0x55555555;
157     w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
158     w =  (w + (w >> 4)) & 0x0f0f0f0f;
159 
160     if ( IS_ENABLED(CONFIG_HAS_FAST_MULTIPLY) )
161         return (w * 0x01010101) >> 24;
162 
163     w += w >> 8;
164 
165     return (w + (w >> 16)) & 0xff;
166 }
167 
generic_hweight16(unsigned int w)168 static inline unsigned int generic_hweight16(unsigned int w)
169 {
170     w -= ((w >> 1) & 0x5555);
171     w =  (w & 0x3333) + ((w >> 2) & 0x3333);
172     w =  (w + (w >> 4)) & 0x0f0f;
173 
174     return (w + (w >> 8)) & 0xff;
175 }
176 
generic_hweight8(unsigned int w)177 static inline unsigned int generic_hweight8(unsigned int w)
178 {
179     w -= ((w >> 1) & 0x55);
180     w =  (w & 0x33) + ((w >> 2) & 0x33);
181 
182     return (w + (w >> 4)) & 0x0f;
183 }
184 
generic_hweight64(uint64_t w)185 static inline unsigned int generic_hweight64(uint64_t w)
186 {
187     if ( BITS_PER_LONG < 64 )
188         return generic_hweight32(w >> 32) + generic_hweight32(w);
189 
190     w -= (w >> 1) & 0x5555555555555555ul;
191     w =  (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
192     w =  (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
193 
194     if ( IS_ENABLED(CONFIG_HAS_FAST_MULTIPLY) )
195         return (w * 0x0101010101010101ul) >> 56;
196 
197     w += w >> 8;
198     w += w >> 16;
199 
200     return (w + (w >> 32)) & 0xFF;
201 }
202 
hweight_long(unsigned long w)203 static inline unsigned long hweight_long(unsigned long w)
204 {
205     return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
206 }
207 
208 /*
209  * rol32 - rotate a 32-bit value left
210  *
211  * @word: value to rotate
212  * @shift: bits to roll
213  */
rol32(__u32 word,unsigned int shift)214 static inline __u32 rol32(__u32 word, unsigned int shift)
215 {
216     return (word << shift) | (word >> (32 - shift));
217 }
218 
219 /*
220  * ror32 - rotate a 32-bit value right
221  *
222  * @word: value to rotate
223  * @shift: bits to roll
224  */
ror32(__u32 word,unsigned int shift)225 static inline __u32 ror32(__u32 word, unsigned int shift)
226 {
227     return (word >> shift) | (word << (32 - shift));
228 }
229 
230 /* base-2 logarithm */
231 #define __L2(_x)  (((_x) & 0x00000002) ?   1 : 0)
232 #define __L4(_x)  (((_x) & 0x0000000c) ? ( 2 + __L2( (_x)>> 2)) : __L2( _x))
233 #define __L8(_x)  (((_x) & 0x000000f0) ? ( 4 + __L4( (_x)>> 4)) : __L4( _x))
234 #define __L16(_x) (((_x) & 0x0000ff00) ? ( 8 + __L8( (_x)>> 8)) : __L8( _x))
235 #define ilog2(_x) (((_x) & 0xffff0000) ? (16 + __L16((_x)>>16)) : __L16(_x))
236 
237 /**
238  * for_each_set_bit - iterate over every set bit in a memory region
239  * @bit: The integer iterator
240  * @addr: The address to base the search on
241  * @size: The maximum size to search
242  */
243 #define for_each_set_bit(bit, addr, size)               \
244     for ( (bit) = find_first_bit(addr, size);           \
245           (bit) < (size);                               \
246           (bit) = find_next_bit(addr, size, (bit) + 1) )
247 
248 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
249 
250 #endif
251