| /linux/include/linux/ |
| A D | hashtable.h | 126 #define hash_for_each(name, bkt, obj, member) \ argument 127 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ 128 (bkt)++)\ 129 hlist_for_each_entry(obj, &name[bkt], member) 138 #define hash_for_each_rcu(name, bkt, obj, member) \ argument 139 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ 140 (bkt)++)\ 141 hlist_for_each_entry_rcu(obj, &name[bkt], member) 153 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ 154 (bkt)++)\ [all …]
|
| A D | rhashtable.h | 369 return __rht_ptr(rcu_dereference(*bkt), bkt); in rht_ptr_rcu() 377 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); in rht_ptr() 383 return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); in rht_ptr_exclusive() 720 if (!bkt) in __rhashtable_insert_fast() 723 rht_lock(tbl, bkt); in __rhashtable_insert_fast() 800 rht_unlock(tbl, bkt); in __rhashtable_insert_fast() 999 if (!bkt) in __rhashtable_remove_fast_one() 1002 rht_lock(tbl, bkt); in __rhashtable_remove_fast_one() 1053 rht_unlock(tbl, bkt); in __rhashtable_remove_fast_one() 1157 if (!bkt) in __rhashtable_replace_fast() [all …]
|
| /linux/tools/include/linux/ |
| A D | hashtable.h | 105 #define hash_for_each(name, bkt, obj, member) \ argument 106 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ 107 (bkt)++)\ 108 hlist_for_each_entry(obj, &name[bkt], member) 119 #define hash_for_each_safe(name, bkt, tmp, obj, member) \ argument 120 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ 121 (bkt)++)\ 122 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
|
| /linux/tools/perf/util/ |
| A D | hashmap.h | 155 #define hashmap__for_each_entry(map, cur, bkt) \ argument 156 for (bkt = 0; bkt < map->cap; bkt++) \ 157 for (cur = map->buckets[bkt]; cur; cur = cur->next) 167 #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ argument 168 for (bkt = 0; bkt < map->cap; bkt++) \ 169 for (cur = map->buckets[bkt]; \
|
| A D | expr.c | 80 size_t bkt; in ids__free() local 85 hashmap__for_each_entry(ids, cur, bkt) { in ids__free() 110 size_t bkt; in ids__union() local 128 hashmap__for_each_entry(ids2, cur, bkt) { in ids__union() 239 size_t bkt; in expr__subset_of_ids() local 242 hashmap__for_each_entry(needles->ids, cur, bkt) { in expr__subset_of_ids() 320 size_t bkt; in expr__ctx_clear() local 322 hashmap__for_each_entry(ctx->ids, cur, bkt) { in expr__ctx_clear() 332 size_t bkt; in expr__ctx_free() local 334 hashmap__for_each_entry(ctx->ids, cur, bkt) { in expr__ctx_free()
|
| A D | hashmap.c | 66 size_t bkt; in hashmap__clear() local 68 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap__clear() 106 size_t h, bkt; in hashmap_grow() local 117 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap_grow()
|
| A D | metricgroup.c | 730 size_t bkt; in metricgroup__build_event_string() local 736 hashmap__for_each_entry(ctx->ids, cur, bkt) { in metricgroup__build_event_string() 878 size_t bkt; in resolve_metric() local 894 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) { in resolve_metric() 1269 size_t bkt; in build_combined_expr_ctx() local 1280 hashmap__for_each_entry(m->pctx->ids, cur, bkt) { in build_combined_expr_ctx()
|
| /linux/tools/lib/bpf/ |
| A D | hashmap.h | 155 #define hashmap__for_each_entry(map, cur, bkt) \ argument 156 for (bkt = 0; bkt < map->cap; bkt++) \ 157 for (cur = map->buckets[bkt]; cur; cur = cur->next) 167 #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ argument 168 for (bkt = 0; bkt < map->cap; bkt++) \ 169 for (cur = map->buckets[bkt]; \
|
| A D | hashmap.c | 66 size_t bkt; in hashmap__clear() local 68 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap__clear() 106 size_t h, bkt; in hashmap_grow() local 117 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap_grow()
|
| /linux/drivers/infiniband/ulp/opa_vnic/ |
| A D | opa_vnic_internal.h | 280 #define vnic_hash_for_each_safe(name, bkt, tmp, obj, member) \ argument 281 for ((bkt) = 0, obj = NULL; \ 282 !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \ 283 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) 289 #define vnic_hash_for_each(name, bkt, obj, member) \ argument 290 for ((bkt) = 0, obj = NULL; \ 291 !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \ 292 hlist_for_each_entry(obj, &name[bkt], member)
|
| A D | opa_vnic_encap.c | 107 int bkt; in opa_vnic_free_mac_tbl() local 112 vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) { in opa_vnic_free_mac_tbl() 157 int bkt; in opa_vnic_query_mac_tbl() local 168 vnic_hash_for_each(mactbl, bkt, node, hlist) { in opa_vnic_query_mac_tbl() 209 int i, bkt, rc = 0; in opa_vnic_update_mac_tbl() local 263 vnic_hash_for_each(old_mactbl, bkt, node, hlist) { in opa_vnic_update_mac_tbl()
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| A D | hashmap.c | 46 int err, bkt, found_cnt, i; in test_hashmap_generic() local 93 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic() 141 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { in test_hashmap_generic() 200 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { in test_hashmap_generic() 235 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic() 243 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic() 265 int err, bkt; in test_hashmap_multimap() local 302 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_multimap() 334 int bkt; in test_hashmap_empty() local 356 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_empty()
|
| /linux/lib/ |
| A D | rhashtable.c | 268 rht_assign_locked(bkt, next); in rhashtable_rehash_one() 281 if (!bkt) in rhashtable_rehash_chain() 283 rht_lock(old_tbl, bkt); in rhashtable_rehash_chain() 290 rht_unlock(old_tbl, bkt); in rhashtable_rehash_chain() 527 rht_assign_locked(bkt, obj); in rhashtable_lookup_one() 565 head = rht_ptr(bkt, tbl, hash); in rhashtable_insert_one() 578 rht_assign_locked(bkt, obj); in rhashtable_insert_one() 603 bkt = rht_bucket_var(tbl, hash); in rhashtable_try_insert() 606 if (bkt == NULL) { in rhashtable_try_insert() 610 rht_lock(tbl, bkt); in rhashtable_try_insert() [all …]
|
| /linux/drivers/s390/crypto/ |
| A D | ap_card.c | 80 int bkt; in request_count_store() local 85 hash_for_each(ap_queues, bkt, aq, hnode) in request_count_store() 99 int bkt; in requestq_count_show() local 106 hash_for_each(ap_queues, bkt, aq, hnode) in requestq_count_show() 118 int bkt; in pendingq_count_show() local 125 hash_for_each(ap_queues, bkt, aq, hnode) in pendingq_count_show()
|
| A D | ap_bus.c | 461 int bkt; in ap_tasklet_fn() local 473 hash_for_each(ap_queues, bkt, aq, hnode) { in ap_tasklet_fn() 485 int bkt; in ap_pending_requests() local 489 hash_for_each(ap_queues, bkt, aq, hnode) { in ap_pending_requests() 928 int bkt; in ap_get_qdev() local 932 hash_for_each(ap_queues, bkt, aq, hnode) { in ap_get_qdev()
|
| /linux/net/ipv6/ |
| A D | calipso.c | 203 u32 bkt; in calipso_cache_check() local 212 bkt = hash & (CALIPSO_CACHE_BUCKETS - 1); in calipso_cache_check() 213 spin_lock_bh(&calipso_cache[bkt].lock); in calipso_cache_check() 224 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check() 239 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check() 244 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check() 268 u32 bkt; in calipso_cache_add() local 291 bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1); in calipso_cache_add() 292 spin_lock_bh(&calipso_cache[bkt].lock); in calipso_cache_add() 295 calipso_cache[bkt].size += 1; in calipso_cache_add() [all …]
|
| /linux/net/ipv4/ |
| A D | cipso_ipv4.c | 237 u32 bkt; in cipso_v4_cache_check() local 246 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); in cipso_v4_cache_check() 247 spin_lock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check() 258 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check() 273 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check() 278 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check() 300 u32 bkt; in cipso_v4_cache_add() local 323 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); in cipso_v4_cache_add() 324 spin_lock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_add() 327 cipso_v4_cache[bkt].size += 1; in cipso_v4_cache_add() [all …]
|
| /linux/tools/perf/tests/ |
| A D | pmu-events.c | 844 size_t bkt; in resolve_metric_simple() local 850 hashmap__for_each_entry_safe(pctx->ids, cur, cur_tmp, bkt) { in resolve_metric_simple() 923 size_t bkt; in test__parsing() local 950 hashmap__for_each_entry(ctx->ids, cur, bkt) in test__parsing() 953 hashmap__for_each_entry(ctx->ids, cur, bkt) { in test__parsing() 994 size_t bkt; in metric_parse_fake() local 1015 hashmap__for_each_entry(ctx->ids, cur, bkt) in metric_parse_fake() 1018 hashmap__for_each_entry(ctx->ids, cur, bkt) { in metric_parse_fake()
|
| /linux/net/sched/ |
| A D | cls_route.c | 54 struct route4_bucket *bkt; member 326 b = f->bkt; in route4_delete() 454 f->bkt = b; in route4_set_parms() 508 f->bkt = fold->bkt; in route4_change() 518 fp = &f->bkt->ht[h]; in route4_change()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| A D | vxlan.c | 184 int bkt; in mlx5_vxlan_reset_to_default() local 189 hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { in mlx5_vxlan_reset_to_default()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| A D | qos.c | 400 int bkt, err; in mlx5e_qos_open_queues() local 409 hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { in mlx5e_qos_open_queues() 425 int bkt; in mlx5e_qos_activate_queues() local 427 hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { in mlx5e_qos_activate_queues() 723 int bkt; in mlx5e_sw_node_find_by_qid() local 725 hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) in mlx5e_sw_node_find_by_qid() 932 int bkt; in mlx5e_qos_update_children() local 934 hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) { in mlx5e_qos_update_children()
|
| /linux/net/netlabel/ |
| A D | netlabel_domainhash.c | 140 u32 bkt; in netlbl_domhsh_search() local 145 bkt = netlbl_domhsh_hash(domain); in netlbl_domhsh_search() 146 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; in netlbl_domhsh_search() 436 u32 bkt = netlbl_domhsh_hash(entry->domain); in netlbl_domhsh_add() local 438 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); in netlbl_domhsh_add()
|
| A D | netlabel_unlabeled.c | 204 u32 bkt; in netlbl_unlhsh_search_iface() local 208 bkt = netlbl_unlhsh_hash(ifindex); in netlbl_unlhsh_search_iface() 209 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; in netlbl_unlhsh_search_iface() 314 u32 bkt; in netlbl_unlhsh_add_iface() local 328 bkt = netlbl_unlhsh_hash(ifindex); in netlbl_unlhsh_add_iface() 332 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); in netlbl_unlhsh_add_iface()
|
| /linux/tools/bpf/bpftool/ |
| A D | pids.c | 177 size_t bkt; in delete_obj_refs_table() local 182 hashmap__for_each_entry(map, entry, bkt) { in delete_obj_refs_table()
|
| /linux/drivers/net/ethernet/rocker/ |
| A D | rocker_ofdpa.c | 1952 int bkt; in ofdpa_port_fdb_flush() local 1963 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { in ofdpa_port_fdb_flush() 1993 int bkt; in ofdpa_fdb_cleanup() local 1997 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) { in ofdpa_fdb_cleanup() 2386 int bkt; in ofdpa_fini() local 2392 hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) in ofdpa_fini() 2397 hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry) in ofdpa_fini() 2402 hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry) in ofdpa_fini() 2407 hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt, in ofdpa_fini() 2413 hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry) in ofdpa_fini() [all …]
|