/linux/lib/ |
A D | test_xarray.c | 63 XA_BUG_ON(xa, xa_load(xa, index) != NULL); in xa_erase_index() 153 void *entry = xa_load(xa, j); in check_xa_load() 164 void *entry = xa_load(xa, j); in check_xa_load() 344 XA_BUG_ON(xa, xa_load(xa, 1) != NULL); in check_xa_shrink() 397 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); in check_insert() 436 XA_BUG_ON(xa, xa_load(xa, 12345678)); in check_reserve() 541 XA_BUG_ON(xa, xa_load(xa, max) != NULL); in check_multi_store_1() 607 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); in check_multi_store() 617 XA_BUG_ON(xa, xa_load(xa, 2) != NULL); in check_multi_store() 629 XA_BUG_ON(xa, xa_load(xa, 4) != NULL); in check_multi_store() [all …]
|
A D | test_hmm.c | 328 entry = xa_load(&dmirror->pt, pfn); in dmirror_do_read() 397 entry = xa_load(&dmirror->pt, pfn); in dmirror_do_write() 631 entry = xa_load(&dmirror->pt, pfn); in dmirror_check_atomic()
|
/linux/drivers/iommu/ |
A D | ioasid.c | 275 ioasid_data = xa_load(&active_allocator->xa, ioasid); in ioasid_set_data() 358 ioasid_data = xa_load(&active_allocator->xa, ioasid); in ioasid_get() 382 ioasid_data = xa_load(&active_allocator->xa, ioasid); in ioasid_put() 428 ioasid_data = xa_load(&idata->xa, ioasid); in ioasid_find()
|
/linux/Documentation/translations/zh_CN/core-api/ |
A D | xarray.rst | 62 然后你可以用xa_store()来设置条目,用xa_load()来获取条目。xa_store将用新的条目覆盖任 152 * xa_load() 194 如果你想利用锁来保护你存储在XArray中的数据结构,你可以在调用xa_load()之前调用xa_lock(),然后在
|
/linux/drivers/gpu/drm/tegra/ |
A D | uapi.c | 141 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_close() 169 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_map() 260 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_unmap()
|
A D | submit.c | 123 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get() 314 sp = xa_load(syncpoints, args->syncpt.id); in submit_get_syncpt() 497 context = xa_load(&fpriv->contexts, args->context); in tegra_drm_ioctl_channel_submit()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | pagealloc.c | 83 root = xa_load(&dev->priv.page_root_xa, function); in page_root_per_function() 154 root = xa_load(&dev->priv.page_root_xa, function); in find_fw_page() 235 root = xa_load(&dev->priv.page_root_xa, fwp->function); in free_fwp() 405 root = xa_load(&dev->priv.page_root_xa, function); in release_all_pages() 465 root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); in reclaim_pages_cmd()
|
/linux/drivers/infiniband/hw/cxgb4/ |
A D | ev.c | 127 qhp = xa_load(&dev->qps, CQE_QPID(err_cqe)); in c4iw_ev_dispatch() 228 chp = xa_load(&dev->cqs, qid); in c4iw_ev_handler()
|
/linux/mm/ |
A D | readahead.c | 199 struct page *page = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded() 674 struct page *page = xa_load(&mapping->i_pages, index); in readahead_expand() 697 struct page *page = xa_load(&mapping->i_pages, index); in readahead_expand()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
A D | mapping.c | 116 mi = xa_load(&ctx->xarray, index); in mapping_remove() 139 mi = xa_load(&ctx->xarray, index); in mapping_find()
|
/linux/arch/arm64/mm/ |
A D | mteswap.c | 51 void *tags = xa_load(&mte_pages, entry.val); in mte_restore_tags()
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | devx.c | 1356 event = xa_load(&dev->devx_event_table.event_xa, in devx_cleanup_subscription() 1360 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); in devx_cleanup_subscription() 1421 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); in devx_cq_comp() 1425 obj_event = xa_load(&event->object_ids, obj_id); in devx_cq_comp() 1834 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_dealloc() 1837 xa_val_level2 = xa_load(&event->object_ids, in subscribe_event_xa_dealloc() 1856 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_alloc() 1878 obj_event = xa_load(&event->object_ids, key_level2); in subscribe_event_xa_alloc() 2108 event = xa_load(&devx_event_table->event_xa, in UVERBS_HANDLER() 2118 obj_event = xa_load(&event->object_ids, obj_id); in UVERBS_HANDLER() [all …]
|
/linux/drivers/tty/serial/ |
A D | liteuart.c | 339 uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index); in liteuart_console_write() 356 uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index); in liteuart_console_setup()
|
/linux/drivers/infiniband/core/ |
A D | device.c | 312 device = xa_load(&devices, index); in ib_device_get_by_index() 431 struct ib_client *client = xa_load(&clients, index); in ib_device_rename() 665 if (xa_load(&devices, device->index) == device) in ib_dealloc_device() 760 client_data = xa_load(&device->client_data, client_id); in remove_client_context() 762 client = xa_load(&clients, client_id); in remove_client_context() 943 cdev = xa_load(&device->compat_devs, rnet->id); in add_one_compat_dev() 1756 if (xa_load(&clients, highest_client_id - 1)) in remove_client_id() 1881 struct ib_client *client = xa_load(&clients, index); in __ib_get_client_nl_info() 2549 struct ib_client *client = xa_load(&clients, index); in ib_get_net_dev_by_params()
|
A D | ib_core_uverbs.c | 127 entry = xa_load(&ucontext->mmap_xa, pgoff); in rdma_user_mmap_entry_get_pgoff()
|
/linux/block/partitions/ |
A D | core.c | 350 if (xa_load(&disk->part_tbl, partno)) in add_partition() 486 part = xa_load(&disk->part_tbl, partno); in bdev_del_partition() 508 part = xa_load(&disk->part_tbl, partno); in bdev_resize_partition()
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | cm.c | 275 ent = xa_load(&sriov->pv_id_table, *pv_cm_id); in id_map_get() 366 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); in alloc_rej_tmout() 411 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id); in lookup_rej_tmout_slave()
|
/linux/fs/ksmbd/mgmt/ |
A D | tree_connect.c | 97 return xa_load(&sess->tree_conns, id); in ksmbd_tree_conn_lookup()
|
/linux/drivers/gpu/drm/lima/ |
A D | lima_ctx.c | 72 ctx = xa_load(&mgr->handles, id); in lima_ctx_get()
|
/linux/drivers/base/ |
A D | memory.c | 580 mem = xa_load(&memory_blocks, block_id); in find_memory_block_by_id() 1029 group = xa_load(&memory_groups, mgid); in memory_group_unregister() 1048 return xa_load(&memory_groups, mgid); in memory_group_find_by_id()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
A D | dr_domain.c | 41 recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num); in mlx5dr_domain_get_recalc_cs_ft_addr() 225 vport_caps = xa_load(&caps->vports.vports_caps_xa, vport); in mlx5dr_domain_get_vport_cap()
|
/linux/drivers/infiniband/hw/hns/ |
A D | hns_roce_cq.c | 460 hr_cq = xa_load(&hr_dev->cq_table.array, in hns_roce_cq_completion() 481 hr_cq = xa_load(&hr_dev->cq_table.array, in hns_roce_cq_event()
|
/linux/fs/erofs/ |
A D | utils.c | 62 grp = xa_load(&sbi->managed_pslots, index); in erofs_find_workgroup()
|
/linux/drivers/dma-buf/ |
A D | dma-heap.c | 82 heap = xa_load(&dma_heap_minors, iminor(inode)); in dma_heap_open()
|
/linux/arch/x86/kernel/cpu/sgx/ |
A D | virt.c | 45 epc_page = xa_load(&vepc->page_array, index); in __sgx_vepc_fault()
|