1  /* SPDX-License-Identifier: GPL-2.0-or-later */
2  /*
3   * Copyright (C) 2001 Momchil Velikov
4   * Portions Copyright (C) 2001 Christoph Hellwig
5   * Copyright (C) 2006 Nick Piggin
6   * Copyright (C) 2012 Konstantin Khlebnikov
7   */
8  #ifndef _LINUX_RADIX_TREE_H
9  #define _LINUX_RADIX_TREE_H
10  
11  #include <linux/bitops.h>
12  #include <linux/gfp.h>
13  #include <linux/list.h>
14  #include <linux/lockdep.h>
15  #include <linux/math.h>
16  #include <linux/percpu.h>
17  #include <linux/preempt.h>
18  #include <linux/rcupdate.h>
19  #include <linux/spinlock.h>
20  #include <linux/types.h>
21  #include <linux/xarray.h>
22  #include <linux/local_lock.h>
23  
24  /* Keep unconverted code working */
25  #define radix_tree_root		xarray
26  #define radix_tree_node		xa_node
27  
28  struct radix_tree_preload {
29  	local_lock_t lock;
30  	unsigned nr;
31  	/* nodes->parent points to next preallocated node */
32  	struct radix_tree_node *nodes;
33  };
34  DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
35  
36  /*
37   * The bottom two bits of the slot determine how the remaining bits in the
38   * slot are interpreted:
39   *
40   * 00 - data pointer
41   * 10 - internal entry
42   * x1 - value entry
43   *
44   * The internal entry may be a pointer to the next level in the tree, a
45   * sibling entry, or an indicator that the entry in this slot has been moved
46   * to another location in the tree and the lookup should be restarted.  While
47   * NULL fits the 'data pointer' pattern, it means that there is no entry in
48   * the tree for this index (no matter what level of the tree it is found at).
49   * This means that storing a NULL entry in the tree is the same as deleting
50   * the entry from the tree.
51   */
52  #define RADIX_TREE_ENTRY_MASK		3UL
53  #define RADIX_TREE_INTERNAL_NODE	2UL
54  
radix_tree_is_internal_node(void * ptr)55  static inline bool radix_tree_is_internal_node(void *ptr)
56  {
57  	return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
58  				RADIX_TREE_INTERNAL_NODE;
59  }
60  
61  /*** radix-tree API starts here ***/
62  
63  #define RADIX_TREE_MAP_SHIFT	XA_CHUNK_SHIFT
64  #define RADIX_TREE_MAP_SIZE	(1UL << RADIX_TREE_MAP_SHIFT)
65  #define RADIX_TREE_MAP_MASK	(RADIX_TREE_MAP_SIZE-1)
66  
67  #define RADIX_TREE_MAX_TAGS	XA_MAX_MARKS
68  #define RADIX_TREE_TAG_LONGS	XA_MARK_LONGS
69  
70  #define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
71  #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
72  					  RADIX_TREE_MAP_SHIFT))
73  
74  /* The IDR tag is stored in the low bits of xa_flags */
75  #define ROOT_IS_IDR	((__force gfp_t)4)
76  /* The top bits of xa_flags are used to store the root tags */
77  #define ROOT_TAG_SHIFT	(__GFP_BITS_SHIFT)
78  
79  #define RADIX_TREE_INIT(name, mask)	XARRAY_INIT(name, mask)
80  
81  #define RADIX_TREE(name, mask) \
82  	struct radix_tree_root name = RADIX_TREE_INIT(name, mask)
83  
84  #define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask)
85  
radix_tree_empty(const struct radix_tree_root * root)86  static inline bool radix_tree_empty(const struct radix_tree_root *root)
87  {
88  	return root->xa_head == NULL;
89  }
90  
91  /**
92   * struct radix_tree_iter - radix tree iterator state
93   *
94   * @index:	index of current slot
95   * @next_index:	one beyond the last index for this chunk
96   * @tags:	bit-mask for tag-iterating
97   * @node:	node that contains current slot
98   *
99   * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
100   * subinterval of slots contained within one radix tree leaf node.  It is
101   * described by a pointer to its first slot and a struct radix_tree_iter
102   * which holds the chunk's position in the tree and its size.  For tagged
103   * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
104   * radix tree tag.
105   */
106  struct radix_tree_iter {
107  	unsigned long	index;
108  	unsigned long	next_index;
109  	unsigned long	tags;
110  	struct radix_tree_node *node;
111  };
112  
113  /**
114   * Radix-tree synchronization
115   *
116   * The radix-tree API requires that users provide all synchronisation (with
117   * specific exceptions, noted below).
118   *
119   * Synchronization of access to the data items being stored in the tree, and
120   * management of their lifetimes must be completely managed by API users.
121   *
122   * For API usage, in general,
123   * - any function _modifying_ the tree or tags (inserting or deleting
124   *   items, setting or clearing tags) must exclude other modifications, and
125   *   exclude any functions reading the tree.
126   * - any function _reading_ the tree or tags (looking up items or tags,
127   *   gang lookups) must exclude modifications to the tree, but may occur
128   *   concurrently with other readers.
129   *
130   * The notable exceptions to this rule are the following functions:
131   * __radix_tree_lookup
132   * radix_tree_lookup
133   * radix_tree_lookup_slot
134   * radix_tree_tag_get
135   * radix_tree_gang_lookup
136   * radix_tree_gang_lookup_tag
137   * radix_tree_gang_lookup_tag_slot
138   * radix_tree_tagged
139   *
140   * The first 7 functions are able to be called locklessly, using RCU. The
141   * caller must ensure calls to these functions are made within rcu_read_lock()
142   * regions. Other readers (lock-free or otherwise) and modifications may be
143   * running concurrently.
144   *
145   * It is still required that the caller manage the synchronization and lifetimes
146   * of the items. So if RCU lock-free lookups are used, typically this would mean
147   * that the items have their own locks, or are amenable to lock-free access; and
148   * that the items are freed by RCU (or only freed after having been deleted from
149   * the radix tree *and* a synchronize_rcu() grace period).
150   *
151   * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
152   * access to data items when inserting into or looking up from the radix tree)
153   *
154   * Note that the value returned by radix_tree_tag_get() may not be relied upon
155   * if only the RCU read lock is held.  Functions to set/clear tags and to
156   * delete nodes running concurrently with it may affect its result such that
157   * two consecutive reads in the same locked section may return different
158   * values.  If reliability is required, modification functions must also be
159   * excluded from concurrency.
160   *
161   * radix_tree_tagged is able to be called without locking or RCU.
162   */
163  
164  /**
165   * radix_tree_deref_slot - dereference a slot
166   * @slot: slot pointer, returned by radix_tree_lookup_slot
167   *
168   * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
169   * locked across slot lookup and dereference. Not required if write lock is
170   * held (ie. items cannot be concurrently inserted).
171   *
172   * radix_tree_deref_retry must be used to confirm validity of the pointer if
173   * only the read lock is held.
174   *
175   * Return: entry stored in that slot.
176   */
radix_tree_deref_slot(void __rcu ** slot)177  static inline void *radix_tree_deref_slot(void __rcu **slot)
178  {
179  	return rcu_dereference(*slot);
180  }
181  
182  /**
183   * radix_tree_deref_slot_protected - dereference a slot with tree lock held
184   * @slot: slot pointer, returned by radix_tree_lookup_slot
185   *
186   * Similar to radix_tree_deref_slot.  The caller does not hold the RCU read
187   * lock but it must hold the tree lock to prevent parallel updates.
188   *
189   * Return: entry stored in that slot.
190   */
radix_tree_deref_slot_protected(void __rcu ** slot,spinlock_t * treelock)191  static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
192  							spinlock_t *treelock)
193  {
194  	return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
195  }
196  
197  /**
198   * radix_tree_deref_retry	- check radix_tree_deref_slot
199   * @arg:	pointer returned by radix_tree_deref_slot
200   * Returns:	0 if retry is not required, otherwise retry is required
201   *
202   * radix_tree_deref_retry must be used with radix_tree_deref_slot.
203   */
radix_tree_deref_retry(void * arg)204  static inline int radix_tree_deref_retry(void *arg)
205  {
206  	return unlikely(radix_tree_is_internal_node(arg));
207  }
208  
209  /**
210   * radix_tree_exception	- radix_tree_deref_slot returned either exception?
211   * @arg:	value returned by radix_tree_deref_slot
212   * Returns:	0 if well-aligned pointer, non-0 if either kind of exception.
213   */
radix_tree_exception(void * arg)214  static inline int radix_tree_exception(void *arg)
215  {
216  	return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
217  }
218  
219  int radix_tree_insert(struct radix_tree_root *, unsigned long index,
220  			void *);
221  void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
222  			  struct radix_tree_node **nodep, void __rcu ***slotp);
223  void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
224  void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
225  					unsigned long index);
226  void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
227  			  void __rcu **slot, void *entry);
228  void radix_tree_iter_replace(struct radix_tree_root *,
229  		const struct radix_tree_iter *, void __rcu **slot, void *entry);
230  void radix_tree_replace_slot(struct radix_tree_root *,
231  			     void __rcu **slot, void *entry);
232  void radix_tree_iter_delete(struct radix_tree_root *,
233  			struct radix_tree_iter *iter, void __rcu **slot);
234  void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
235  void *radix_tree_delete(struct radix_tree_root *, unsigned long);
236  unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
237  			void **results, unsigned long first_index,
238  			unsigned int max_items);
239  int radix_tree_preload(gfp_t gfp_mask);
240  int radix_tree_maybe_preload(gfp_t gfp_mask);
241  void radix_tree_init(void);
242  void *radix_tree_tag_set(struct radix_tree_root *,
243  			unsigned long index, unsigned int tag);
244  void *radix_tree_tag_clear(struct radix_tree_root *,
245  			unsigned long index, unsigned int tag);
246  int radix_tree_tag_get(const struct radix_tree_root *,
247  			unsigned long index, unsigned int tag);
248  void radix_tree_iter_tag_clear(struct radix_tree_root *,
249  		const struct radix_tree_iter *iter, unsigned int tag);
250  unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
251  		void **results, unsigned long first_index,
252  		unsigned int max_items, unsigned int tag);
253  unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
254  		void __rcu ***results, unsigned long first_index,
255  		unsigned int max_items, unsigned int tag);
256  int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
257  
radix_tree_preload_end(void)258  static inline void radix_tree_preload_end(void)
259  {
260  	local_unlock(&radix_tree_preloads.lock);
261  }
262  
263  void __rcu **idr_get_free(struct radix_tree_root *root,
264  			      struct radix_tree_iter *iter, gfp_t gfp,
265  			      unsigned long max);
266  
267  enum {
268  	RADIX_TREE_ITER_TAG_MASK = 0x0f,	/* tag index in lower nybble */
269  	RADIX_TREE_ITER_TAGGED   = 0x10,	/* lookup tagged slots */
270  	RADIX_TREE_ITER_CONTIG   = 0x20,	/* stop at first hole */
271  };
272  
273  /**
274   * radix_tree_iter_init - initialize radix tree iterator
275   *
276   * @iter:	pointer to iterator state
277   * @start:	iteration starting index
278   * Returns:	NULL
279   */
280  static __always_inline void __rcu **
radix_tree_iter_init(struct radix_tree_iter * iter,unsigned long start)281  radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
282  {
283  	/*
284  	 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
285  	 * in the case of a successful tagged chunk lookup.  If the lookup was
286  	 * unsuccessful or non-tagged then nobody cares about ->tags.
287  	 *
288  	 * Set index to zero to bypass next_index overflow protection.
289  	 * See the comment in radix_tree_next_chunk() for details.
290  	 */
291  	iter->index = 0;
292  	iter->next_index = start;
293  	return NULL;
294  }
295  
296  /**
297   * radix_tree_next_chunk - find next chunk of slots for iteration
298   *
299   * @root:	radix tree root
300   * @iter:	iterator state
301   * @flags:	RADIX_TREE_ITER_* flags and tag index
302   * Returns:	pointer to chunk first slot, or NULL if there no more left
303   *
304   * This function looks up the next chunk in the radix tree starting from
305   * @iter->next_index.  It returns a pointer to the chunk's first slot.
306   * Also it fills @iter with data about chunk: position in the tree (index),
307   * its end (next_index), and constructs a bit mask for tagged iterating (tags).
308   */
309  void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
310  			     struct radix_tree_iter *iter, unsigned flags);
311  
312  /**
313   * radix_tree_iter_lookup - look up an index in the radix tree
314   * @root: radix tree root
315   * @iter: iterator state
316   * @index: key to look up
317   *
318   * If @index is present in the radix tree, this function returns the slot
319   * containing it and updates @iter to describe the entry.  If @index is not
320   * present, it returns NULL.
321   */
322  static inline void __rcu **
radix_tree_iter_lookup(const struct radix_tree_root * root,struct radix_tree_iter * iter,unsigned long index)323  radix_tree_iter_lookup(const struct radix_tree_root *root,
324  			struct radix_tree_iter *iter, unsigned long index)
325  {
326  	radix_tree_iter_init(iter, index);
327  	return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
328  }
329  
330  /**
331   * radix_tree_iter_retry - retry this chunk of the iteration
332   * @iter:	iterator state
333   *
334   * If we iterate over a tree protected only by the RCU lock, a race
335   * against deletion or creation may result in seeing a slot for which
336   * radix_tree_deref_retry() returns true.  If so, call this function
337   * and continue the iteration.
338   */
339  static inline __must_check
radix_tree_iter_retry(struct radix_tree_iter * iter)340  void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
341  {
342  	iter->next_index = iter->index;
343  	iter->tags = 0;
344  	return NULL;
345  }
346  
347  static inline unsigned long
__radix_tree_iter_add(struct radix_tree_iter * iter,unsigned long slots)348  __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
349  {
350  	return iter->index + slots;
351  }
352  
353  /**
354   * radix_tree_iter_resume - resume iterating when the chunk may be invalid
355   * @slot: pointer to current slot
356   * @iter: iterator state
357   * Returns: New slot pointer
358   *
359   * If the iterator needs to release then reacquire a lock, the chunk may
360   * have been invalidated by an insertion or deletion.  Call this function
361   * before releasing the lock to continue the iteration from the next index.
362   */
363  void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
364  					struct radix_tree_iter *iter);
365  
366  /**
367   * radix_tree_chunk_size - get current chunk size
368   *
369   * @iter:	pointer to radix tree iterator
370   * Returns:	current chunk size
371   */
372  static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter * iter)373  radix_tree_chunk_size(struct radix_tree_iter *iter)
374  {
375  	return iter->next_index - iter->index;
376  }
377  
378  /**
379   * radix_tree_next_slot - find next slot in chunk
380   *
381   * @slot:	pointer to current slot
382   * @iter:	pointer to iterator state
383   * @flags:	RADIX_TREE_ITER_*, should be constant
384   * Returns:	pointer to next slot, or NULL if there no more left
385   *
386   * This function updates @iter->index in the case of a successful lookup.
387   * For tagged lookup it also eats @iter->tags.
388   *
389   * There are several cases where 'slot' can be passed in as NULL to this
390   * function.  These cases result from the use of radix_tree_iter_resume() or
391   * radix_tree_iter_retry().  In these cases we don't end up dereferencing
392   * 'slot' because either:
393   * a) we are doing tagged iteration and iter->tags has been set to 0, or
394   * b) we are doing non-tagged iteration, and iter->index and iter->next_index
395   *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
396   */
radix_tree_next_slot(void __rcu ** slot,struct radix_tree_iter * iter,unsigned flags)397  static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
398  				struct radix_tree_iter *iter, unsigned flags)
399  {
400  	if (flags & RADIX_TREE_ITER_TAGGED) {
401  		iter->tags >>= 1;
402  		if (unlikely(!iter->tags))
403  			return NULL;
404  		if (likely(iter->tags & 1ul)) {
405  			iter->index = __radix_tree_iter_add(iter, 1);
406  			slot++;
407  			goto found;
408  		}
409  		if (!(flags & RADIX_TREE_ITER_CONTIG)) {
410  			unsigned offset = __ffs(iter->tags);
411  
412  			iter->tags >>= offset++;
413  			iter->index = __radix_tree_iter_add(iter, offset);
414  			slot += offset;
415  			goto found;
416  		}
417  	} else {
418  		long count = radix_tree_chunk_size(iter);
419  
420  		while (--count > 0) {
421  			slot++;
422  			iter->index = __radix_tree_iter_add(iter, 1);
423  
424  			if (likely(*slot))
425  				goto found;
426  			if (flags & RADIX_TREE_ITER_CONTIG) {
427  				/* forbid switching to the next chunk */
428  				iter->next_index = 0;
429  				break;
430  			}
431  		}
432  	}
433  	return NULL;
434  
435   found:
436  	return slot;
437  }
438  
439  /**
440   * radix_tree_for_each_slot - iterate over non-empty slots
441   *
442   * @slot:	the void** variable for pointer to slot
443   * @root:	the struct radix_tree_root pointer
444   * @iter:	the struct radix_tree_iter pointer
445   * @start:	iteration starting index
446   *
447   * @slot points to radix tree slot, @iter->index contains its index.
448   */
449  #define radix_tree_for_each_slot(slot, root, iter, start)		\
450  	for (slot = radix_tree_iter_init(iter, start) ;			\
451  	     slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;	\
452  	     slot = radix_tree_next_slot(slot, iter, 0))
453  
454  /**
455   * radix_tree_for_each_tagged - iterate over tagged slots
456   *
457   * @slot:	the void** variable for pointer to slot
458   * @root:	the struct radix_tree_root pointer
459   * @iter:	the struct radix_tree_iter pointer
460   * @start:	iteration starting index
461   * @tag:	tag index
462   *
463   * @slot points to radix tree slot, @iter->index contains its index.
464   */
465  #define radix_tree_for_each_tagged(slot, root, iter, start, tag)	\
466  	for (slot = radix_tree_iter_init(iter, start) ;			\
467  	     slot || (slot = radix_tree_next_chunk(root, iter,		\
468  			      RADIX_TREE_ITER_TAGGED | tag)) ;		\
469  	     slot = radix_tree_next_slot(slot, iter,			\
470  				RADIX_TREE_ITER_TAGGED | tag))
471  
472  #endif /* _LINUX_RADIX_TREE_H */
473