/linux/tools/perf/tests/ |
A D | hists_output.c | 109 he = rb_entry(node, struct hist_entry, rb_node); in del_hist_entries() 167 he = rb_entry(node, struct hist_entry, rb_node); in test1() 173 he = rb_entry(node, struct hist_entry, rb_node); in test1() 179 he = rb_entry(node, struct hist_entry, rb_node); in test1() 185 he = rb_entry(node, struct hist_entry, rb_node); in test1() 191 he = rb_entry(node, struct hist_entry, rb_node); in test1() 197 he = rb_entry(node, struct hist_entry, rb_node); in test1() 203 he = rb_entry(node, struct hist_entry, rb_node); in test1() 209 he = rb_entry(node, struct hist_entry, rb_node); in test1() 215 he = rb_entry(node, struct hist_entry, rb_node); in test1() [all …]
|
/linux/fs/f2fs/ |
A D | extent_cache.c | 18 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re, in __lookup_rb_tree_fast() 34 struct rb_entry *re; in __lookup_rb_tree_slow() 37 re = rb_entry(node, struct rb_entry, rb_node); in __lookup_rb_tree_slow() 52 struct rb_entry *re; in f2fs_lookup_rb_tree() 67 struct rb_entry *re; in f2fs_lookup_rb_tree_ext() 71 re = rb_entry(*parent, struct rb_entry, rb_node); in f2fs_lookup_rb_tree_ext() 94 re = rb_entry(*parent, struct rb_entry, rb_node); in f2fs_lookup_rb_tree_for_insert() 149 re = rb_entry(*pnode, struct rb_entry, rb_node); in f2fs_lookup_rb_tree_ret() 165 re = rb_entry(parent, struct rb_entry, rb_node); in f2fs_lookup_rb_tree_ret() 206 cur_re = rb_entry(cur, struct rb_entry, rb_node); in f2fs_check_rb_tree_consistence() [all …]
|
/linux/include/linux/ |
A D | interval_tree_generic.h | 48 parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \ 89 ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \ 108 node = rb_entry(node->ITRB.rb_right, \ 140 node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \ 144 leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \ 165 ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \ 177 node = rb_entry(rb, ITSTRUCT, ITRB); \
|
A D | rbtree_augmented.h | 80 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ 89 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 90 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 96 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 97 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 127 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ 132 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
|
/linux/fs/jffs2/ |
A D | nodelist.h | 334 return rb_entry(node, struct jffs2_node_frag, rb); in frag_first() 344 return rb_entry(node, struct jffs2_node_frag, rb); in frag_last() 347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) 348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) 350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) 354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) 357 #define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb) 358 #define tn_right(tn) rb_entry((tn)->rb.rb_right, struct jffs2_tmp_dnode_info, rb) 360 #define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb) [all …]
|
/linux/drivers/block/drbd/ |
A D | drbd_interval.c | 12 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end() 34 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval() 76 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval() 124 rb_entry(node, struct drbd_interval, rb); in drbd_find_overlap() 153 i = rb_entry(node, struct drbd_interval, rb); in drbd_next_overlap()
|
/linux/net/ceph/ |
A D | debugfs.c | 71 rb_entry(n, struct ceph_pg_pool_info, node); in osdmap_show() 94 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show() 105 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show() 112 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show() 123 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show() 162 req = rb_entry(rp, struct ceph_mon_generic_request, node); in monc_show() 241 rb_entry(n, struct ceph_osd_request, r_node); in dump_requests() 267 rb_entry(n, struct ceph_osd_linger_request, node); in dump_linger_requests() 328 rb_entry(n, struct ceph_osd_backoff, id_node); in dump_backoffs() 353 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in osdc_show() [all …]
|
/linux/tools/include/linux/ |
A D | rbtree_augmented.h | 82 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \ 91 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 92 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 98 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \ 99 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \ 129 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \ 134 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
|
/linux/security/keys/ |
A D | proc.c | 70 struct key *key = rb_entry(n, struct key, serial_node); in key_serial_next() 85 struct key *key = rb_entry(n, struct key, serial_node); in find_ge_key() 108 minkey = rb_entry(n, struct key, serial_node); in find_ge_key() 131 struct key *key = rb_entry(n, struct key, serial_node); in key_node_serial() 156 struct key *key = rb_entry(_p, struct key, serial_node); in proc_keys_show() 255 struct key_user *user = rb_entry(n, struct key_user, node); in __key_user_next() 306 struct key_user *user = rb_entry(_p, struct key_user, node); in proc_key_users_show()
|
/linux/fs/btrfs/ |
A D | extent_map.c | 105 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert() 120 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert() 127 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert() 130 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert() 156 entry = rb_entry(n, struct extent_map, rb_node); in __tree_search() 172 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search() 179 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search() 182 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search() 254 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map() 273 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map() [all …]
|
A D | ordered-data.c | 42 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); in tree_insert() 71 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); in __tree_search() 89 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search() 97 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, in __tree_search() 103 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search() 133 entry = rb_entry(tree->last, struct btrfs_ordered_extent, in tree_search() 349 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_mark_ordered_io_finished() 361 entry = rb_entry(node, struct btrfs_ordered_extent, in btrfs_mark_ordered_io_finished() 477 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_ordered_pending() 827 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_lookup_ordered_extent() [all …]
|
A D | ref-verify.c | 105 entry = rb_entry(n, struct block_entry, node); in lookup_block_entry() 125 entry = rb_entry(parent_node, struct root_entry, node); in insert_root_entry() 171 entry = rb_entry(parent_node, struct ref_entry, node); in insert_ref_entry() 194 entry = rb_entry(n, struct root_entry, node); in lookup_root_entry() 240 re = rb_entry(n, struct root_entry, node); in free_block_entry() 246 ref = rb_entry(n, struct ref_entry, node); in free_block_entry() 634 ref = rb_entry(n, struct ref_entry, node); in dump_block_entry() 642 re = rb_entry(n, struct root_entry, node); in dump_block_entry() 900 be = rb_entry(n, struct block_entry, node); in btrfs_free_ref_cache() 920 entry = rb_entry(n, struct block_entry, node); in btrfs_free_ref_tree_range() [all …]
|
/linux/arch/powerpc/kernel/ |
A D | eeh_cache.c | 60 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_get_device() 106 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in eeh_addr_cache_print() 128 piar = rb_entry(parent, struct pci_io_addr_range, rb_node); in eeh_addr_cache_insert() 221 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_rmv_dev() 271 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in eeh_addr_cache_show()
|
/linux/lib/ |
A D | rbtree_test.c | 39 if (key < rb_entry(parent, struct test_node, rb)->key) in insert() 57 if (key < rb_entry(parent, struct test_node, rb)->key) in insert_cached() 95 parent = rb_entry(rb_parent, struct test_node, rb); in RB_DECLARE_CALLBACKS_MAX() 120 parent = rb_entry(rb_parent, struct test_node, rb); in insert_augmented_cached() 198 struct test_node *node = rb_entry(rb, struct test_node, rb); in check() 224 struct test_node *node = rb_entry(rb, struct test_node, rb); in check_augmented() 227 subtree = rb_entry(node->rb.rb_left, struct test_node, in check_augmented() 233 subtree = rb_entry(node->rb.rb_right, struct test_node, in check_augmented()
|
/linux/tools/perf/util/ |
A D | hist.c | 256 n = rb_entry(next, struct hist_entry, rb_node); in hists__output_recalc_col_len() 342 child = rb_entry(node, struct hist_entry, rb_node); in hists__decay_entry() 385 n = rb_entry(next, struct hist_entry, rb_node); in hists__decay_entries() 401 n = rb_entry(next, struct hist_entry, rb_node); in hists__delete_entries() 415 n = rb_entry(next, struct hist_entry, rb_node); in hists__get_entry() 1659 n = rb_entry(next, struct hist_entry, rb_node_in); in hists__collapse_resort() 1744 he = rb_entry(node, struct hist_entry, rb_node); in hierarchy_recalc_total_periods() 1922 n = rb_entry(next, struct hist_entry, rb_node_in); in output_resort() 1987 he = rb_entry(node, struct hist_entry, rb_node); in rb_hierarchy_last() 2036 child = rb_entry(node, struct hist_entry, rb_node); in hist_entry__has_hierarchy_children() [all …]
|
A D | rb_resort.h | 66 a = rb_entry(nda, struct __name##_sorted_entry, rb_node); \ 67 b = rb_entry(ndb, struct __name##_sorted_entry, rb_node); \ 129 __name##_entry = rb_entry(__nd, struct __name##_sorted_entry, \
|
A D | symbol.c | 197 curr = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_duplicate() 200 next = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_duplicate() 228 curr = rb_entry(prevnd, struct symbol, rb_node); in symbols__fixup_end() 232 curr = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_end() 313 pos = rb_entry(next, struct symbol, rb_node); in symbols__delete() 342 s = rb_entry(parent, struct symbol, rb_node); in __symbols__insert() 387 return rb_entry(n, struct symbol, rb_node); in symbols__first() 397 return rb_entry(n, struct symbol, rb_node); in symbols__last() 407 return rb_entry(n, struct symbol, rb_node); in symbols__next() 760 pos = rb_entry(next, struct symbol, rb_node); in maps__split_kallsyms_for_kcore() [all …]
|
/linux/fs/ext4/ |
A D | extents_status.c | 188 es = rb_entry(node, struct extent_status, rb_node); in ext4_es_print_tree() 217 es = rb_entry(node, struct extent_status, rb_node); in __es_tree_search() 287 es1 = rb_entry(node, struct extent_status, rb_node); in __es_find_extent_range() 548 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_left() 572 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_right() 973 es1 = rb_entry(node, struct extent_status, in ext4_es_lookup_extent() 1030 rc->left_es = node ? rb_entry(node, in init_rsvd() 1223 es = node ? rb_entry(node, struct extent_status, in get_rsvd() 1236 es = rb_entry(node, struct extent_status, in get_rsvd() 1281 pr = rb_entry(node, struct pending_reservation, in get_rsvd() [all …]
|
A D | block_validity.c | 79 entry = rb_entry(parent, struct ext4_system_zone, node); in add_system_zone() 103 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone() 115 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone() 137 entry = rb_entry(node, struct ext4_system_zone, node); in debug_print_tree() 326 entry = rb_entry(n, struct ext4_system_zone, node); in ext4_inode_block_valid()
|
/linux/drivers/gpu/drm/ |
A D | drm_vma_manager.c | 152 node = rb_entry(iter, struct drm_mm_node, rb); in drm_vma_offset_lookup_locked() 282 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_allow() 335 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_revoke() 376 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_is_allowed()
|
/linux/fs/ocfs2/ |
A D | reservations.c | 85 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_dump_resv() 140 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_check_resmap() 275 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_resmap_clear_all_resv() 316 tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node); in ocfs2_resv_insert() 367 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_find_resv_lhs() 525 next_resv = rb_entry(next, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window() 562 next_resv = rb_entry(next, in __ocfs2_resv_find_window() 605 prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
|
/linux/Documentation/core-api/ |
A D | rbtree.rst | 188 rb_entry(node, type, member). 314 rb_entry(node->rb.rb_left, 333 node = rb_entry(node->rb.rb_right, 350 subtree_last = rb_entry(node->rb.rb_left, 356 subtree_last = rb_entry(node->rb.rb_right, 368 rb_entry(rb, struct interval_tree_node, rb); 380 rb_entry(rb_old, struct interval_tree_node, rb); 382 rb_entry(rb_new, struct interval_tree_node, rb); 390 rb_entry(rb_old, struct interval_tree_node, rb); 392 rb_entry(rb_new, struct interval_tree_node, rb); [all …]
|
/linux/fs/ntfs3/ |
A D | bitmap.c | 141 rb_entry(node, struct e_node, start.node)); in wnd_close() 154 k = rb_entry(*p, struct rb_node_key, node); in rb_lookup() 256 e = rb_entry(n, struct e_node, start.node); in wnd_add_free_ext() 272 e = rb_entry(n, struct e_node, start.node); in wnd_add_free_ext() 324 e = rb_entry(n, struct e_node, count.node); in wnd_add_free_ext() 332 e2 = rb_entry(n, struct e_node, count.node); in wnd_add_free_ext() 379 e = rb_entry(n, struct e_node, start.node); in wnd_remove_free_ext() 399 e3 = rb_entry(n3, struct e_node, start.node); in wnd_remove_free_ext() 878 e = rb_entry(n, struct e_node, start.node); in wnd_is_free() 915 e = rb_entry(n, struct e_node, start.node); in wnd_is_used() [all …]
|
/linux/drivers/base/regmap/ |
A D | regcache-rbtree.c | 80 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_lookup() 107 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); in regcache_rbtree_insert() 150 n = rb_entry(node, struct regcache_rbtree_node, node); in rbtree_show() 227 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); in regcache_rbtree_exit() 404 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, in regcache_rbtree_write() 476 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_sync() 516 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_drop()
|
/linux/kernel/locking/ |
A D | rtmutex_common.h | 108 return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter; in rt_mutex_waiter_is_top_waiter() 117 w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry); in rt_mutex_top_waiter() 130 return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, in task_top_pi_waiter()
|