/linux/lib/ |
A D | xarray.c | 149 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); in xas_set_offset() 1161 xas->xa_node = xa_parent(xas->xa, xas->xa_node); in __xas_prev() 1167 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); in __xas_prev() 1200 xas->xa_node = xa_parent(xas->xa, xas->xa_node); in __xas_next() 1206 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); in __xas_next() 1258 xas->xa_node = xa_parent(xas->xa, xas->xa_node); in xas_find() 1262 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); in xas_find() 1328 xas->xa_offset = xas->xa_index >> xas->xa_node->shift; in xas_find_marked() 1334 xas->xa_node = xa_parent(xas->xa, xas->xa_node); in xas_find_marked() 1361 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); in xas_find_marked() [all …]
|
A D | test_xarray.c | 78 xas_lock(&xas); in xa_store_order() 80 xas_unlock(&xas); in xa_store_order() 116 xas_reset(&xas); in check_xas_retry() 131 xas_lock(&xas); in check_xas_retry() 132 xas_set(&xas, 0); in check_xas_retry() 139 xas_store(&xas, xa_mk_index(xas.xa_index)); in check_xas_retry() 263 xas_lock(&xas); in check_xa_mark_2() 264 xas_load(&xas); in check_xa_mark_2() 281 xas_lock(&xas); in check_xa_mark_2() 339 xas_lock(&xas); in check_xa_shrink() [all …]
|
A D | idr.c | 395 xas_lock_irqsave(&xas, flags); in ida_alloc_range() 422 xas_store(&xas, bitmap); in ida_alloc_range() 423 if (xas_error(&xas)) { in ida_alloc_range() 450 xas_store(&xas, bitmap); in ida_alloc_range() 454 if (xas_nomem(&xas, gfp)) { in ida_alloc_range() 461 if (xas_error(&xas)) in ida_alloc_range() 462 return xas_error(&xas); in ida_alloc_range() 497 bitmap = xas_load(&xas); in ida_free() 517 xas_store(&xas, NULL); in ida_free() 542 XA_STATE(xas, &ida->xa, 0); in ida_destroy() [all …]
|
A D | iov_iter.c | 78 XA_STATE(xas, i->xarray, index); \ 81 xas_for_each(&xas, head, ULONG_MAX) { \ 83 if (xas_retry(&xas, head)) \ 1407 XA_STATE(xas, xa, index); in iter_xarray_populate_pages() 1412 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iter_xarray_populate_pages() 1413 if (xas_retry(&xas, page)) in iter_xarray_populate_pages() 1417 if (unlikely(page != xas_reload(&xas))) { in iter_xarray_populate_pages() 1418 xas_reset(&xas); in iter_xarray_populate_pages() 1422 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages()
|
/linux/include/linux/ |
A D | xarray.h | 1371 #define xas_trylock(xas) xa_trylock((xas)->xa) argument 1372 #define xas_lock(xas) xa_lock((xas)->xa) argument 1373 #define xas_unlock(xas) xa_unlock((xas)->xa) argument 1374 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) argument 1375 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) argument 1376 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) argument 1377 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) argument 1438 return xas_valid(xas) && xas->xa_node; in xas_is_node() 1633 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK))) in xas_next_entry() 1793 return xa_entry(xas->xa, node, xas->xa_offset); in xas_prev() [all …]
|
A D | pagemap.h | 1111 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch() 1119 xas_set(&xas, rac->_index); in __readahead_batch() 1121 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { in __readahead_batch() 1122 if (xas_retry(&xas, page)) in __readahead_batch() 1136 xas_set(&xas, rac->_index + rac->_batch_count); in __readahead_batch()
|
/linux/fs/ |
A D | dax.c | 170 key->xa = xas->xa; in dax_entry_waitqueue() 245 xas_reset(xas); in get_unlocked_entry() 248 xas_lock_irq(xas); in get_unlocked_entry() 295 xas_reset(xas); in dax_unlock_entry() 296 xas_lock_irq(xas); in dax_unlock_entry() 496 xas_lock_irq(xas); in grab_mapping_entry() 533 xas_reset(xas); in grab_mapping_entry() 643 xas_pause(&xas); in dax_layout_busy_page_range() 763 xas_reset(xas); in dax_insert_entry() 948 xas_reset(xas); in dax_writeback_one() [all …]
|
/linux/tools/testing/radix-tree/ |
A D | multiorder.c | 22 xas_lock(&xas); in item_insert_order() 23 xas_store(&xas, item); in item_insert_order() 24 xas_unlock(&xas); in item_insert_order() 27 if (!xas_error(&xas)) in item_insert_order() 31 return xas_error(&xas); in item_insert_order() 36 XA_STATE(xas, xa, 0); in multiorder_iteration() 56 xas_set(&xas, j); in multiorder_iteration() 76 XA_STATE(xas, xa, 0); in multiorder_tagged_iteration() 107 xas_set(&xas, j); in multiorder_tagged_iteration() 135 xas_set(&xas, j); in multiorder_tagged_iteration() [all …]
|
A D | iteration_check.c | 23 XA_STATE(xas, xa, index); in my_item_insert() 28 xas_lock(&xas); in my_item_insert() 34 xas_store(&xas, item); in my_item_insert() 35 xas_set_mark(&xas, TAG); in my_item_insert() 38 xas_unlock(&xas); in my_item_insert() 69 XA_STATE(xas, &array, 0); in tagged_iteration_fn() 75 xas_set(&xas, 0); in tagged_iteration_fn() 82 xas_pause(&xas); in tagged_iteration_fn() 102 XA_STATE(xas, &array, 0); in untagged_iteration_fn() 108 xas_set(&xas, 0); in untagged_iteration_fn() [all …]
|
A D | test.c | 176 XA_STATE(xas, xa, start); in tag_tagged_items() 183 xas_lock_irq(&xas); in tag_tagged_items() 185 xas_set_mark(&xas, thentag); in tag_tagged_items() 189 xas_pause(&xas); in tag_tagged_items() 190 xas_unlock_irq(&xas); in tag_tagged_items() 192 xas_lock_irq(&xas); in tag_tagged_items() 194 xas_unlock_irq(&xas); in tag_tagged_items() 257 XA_STATE(xas, xa, 0); in item_kill_tree() 260 xas_for_each(&xas, entry, ULONG_MAX) { in item_kill_tree() 262 item_free(entry, xas.xa_index); in item_kill_tree() [all …]
|
A D | iteration_check_2.c | 15 XA_STATE(xas, arg, 0); in iterator() 21 xas_set(&xas, 0); in iterator() 23 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) in iterator() 26 assert(xas.xa_index >= 100); in iterator()
|
A D | regression1.c | 82 XA_STATE(xas, &mt_tree, start); in find_get_pages() 87 xas_for_each(&xas, page, ULONG_MAX) { in find_get_pages() 88 if (xas_retry(&xas, page)) in find_get_pages() 99 if (unlikely(page != xas_reload(&xas))) in find_get_pages() 108 xas_reset(&xas); in find_get_pages()
|
/linux/mm/ |
A D | memfd.c | 38 xas_lock_irq(xas); in memfd_tag_pins() 49 xas_pause(xas); in memfd_tag_pins() 50 xas_unlock_irq(xas); in memfd_tag_pins() 52 xas_lock_irq(xas); in memfd_tag_pins() 54 xas_unlock_irq(xas); in memfd_tag_pins() 72 memfd_tag_pins(&xas); in memfd_wait_for_pins() 86 xas_set(&xas, 0); in memfd_wait_for_pins() 87 xas_lock_irq(&xas); in memfd_wait_for_pins() 109 xas_pause(&xas); in memfd_wait_for_pins() 112 xas_lock_irq(&xas); in memfd_wait_for_pins() [all …]
|
A D | filemap.c | 916 unsigned int order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio() 920 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), in __filemap_add_folio() 935 order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio() 1842 xas_reset(&xas); in mapping_get_entry() 2014 xas_reset(xas); in find_get_entry() 2223 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig() 2247 xas_reset(&xas); in find_get_pages_contig() 2350 for (head = xas_load(&xas); head; head = xas_next(&xas)) { in filemap_get_read_batch() 2369 xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK; in filemap_get_read_batch() 2808 xas_pause(xas); in page_seek_hole_data() [all …]
|
A D | khugepaged.c | 1672 xas_lock_irq(&xas); in collapse_file() 1674 if (!xas_error(&xas)) in collapse_file() 1676 xas_unlock_irq(&xas); in collapse_file() 1695 xas_set(&xas, start); in collapse_file() 1834 xas_lock_irq(&xas); in collapse_file() 1835 xas_set(&xas, index); in collapse_file() 1896 xas_unlock_irq(&xas); in collapse_file() 1945 xas_lock_irq(&xas); in collapse_file() 1970 xas_pause(&xas); in collapse_file() 1974 xas_lock_irq(&xas); in collapse_file() [all …]
|
A D | swap_state.c | 117 xas_lock_irq(&xas); in add_to_swap_cache() 118 xas_create_range(&xas); in add_to_swap_cache() 119 if (xas_error(&xas)) in add_to_swap_cache() 123 old = xas_load(&xas); in add_to_swap_cache() 129 xas_store(&xas, page); in add_to_swap_cache() 130 xas_next(&xas); in add_to_swap_cache() 137 xas_unlock_irq(&xas); in add_to_swap_cache() 140 if (!xas_error(&xas)) in add_to_swap_cache() 145 return xas_error(&xas); in add_to_swap_cache() 168 xas_next(&xas); in __delete_from_swap_cache() [all …]
|
A D | page-writeback.c | 2127 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2131 xas_lock_irq(&xas); in tag_pages_for_writeback() 2133 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback() 2137 xas_pause(&xas); in tag_pages_for_writeback() 2138 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2140 xas_lock_irq(&xas); in tag_pages_for_writeback() 2142 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2859 xas_lock_irqsave(&xas, flags); in __folio_start_writeback() 2860 xas_load(&xas); in __folio_start_writeback() 2886 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); in __folio_start_writeback() [all …]
|
A D | shmem.c | 418 item = xas_load(&xas); in shmem_replace_entry() 725 xas_lock_irq(&xas); in shmem_add_to_page_cache() 729 xas_create_range(&xas); in shmem_add_to_page_cache() 730 if (xas_error(&xas)) in shmem_add_to_page_cache() 733 xas_store(&xas, page); in shmem_add_to_page_cache() 735 xas_next(&xas); in shmem_add_to_page_cache() 746 xas_unlock_irq(&xas); in shmem_add_to_page_cache() 749 if (xas_error(&xas)) { in shmem_add_to_page_cache() 750 error = xas_error(&xas); in shmem_add_to_page_cache() 819 xas_pause(&xas); in shmem_partial_swap_usage() [all …]
|
A D | migrate.c | 406 xas_lock_irq(&xas); in folio_migrate_mapping() 408 xas_unlock_irq(&xas); in folio_migrate_mapping() 436 xas_store(&xas, newfolio); in folio_migrate_mapping() 441 xas_next(&xas); in folio_migrate_mapping() 442 xas_store(&xas, newfolio); in folio_migrate_mapping() 453 xas_unlock(&xas); in folio_migrate_mapping() 509 xas_lock_irq(&xas); in migrate_huge_page_move_mapping() 512 xas_unlock_irq(&xas); in migrate_huge_page_move_mapping() 517 xas_unlock_irq(&xas); in migrate_huge_page_move_mapping() 526 xas_store(&xas, newpage); in migrate_huge_page_move_mapping() [all …]
|
A D | truncate.c | 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 39 xas_set_update(&xas, workingset_update_node); in __clear_shadow_entry() 40 if (xas_load(&xas) != entry) in __clear_shadow_entry() 42 xas_store(&xas, NULL); in __clear_shadow_entry()
|
/linux/drivers/infiniband/core/ |
A D | ib_core_uverbs.c | 268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); in rdma_user_mmap_entry_insert_range() 294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); in rdma_user_mmap_entry_insert_range() 295 if (xas.xa_node == XAS_RESTART) in rdma_user_mmap_entry_insert_range() 298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range() 308 xas_next_entry(&xas, xa_last - 1); in rdma_user_mmap_entry_insert_range() 309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
|
A D | restrack.c | 116 XA_STATE(xas, &rt->xa, 0); in rdma_restrack_count() 120 xas_for_each(&xas, e, U32_MAX) in rdma_restrack_count()
|
/linux/arch/x86/kernel/cpu/sgx/ |
A D | encl.c | 226 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); in sgx_encl_may_map() 236 xas_lock(&xas); in sgx_encl_may_map() 237 xas_for_each(&xas, page, PFN_DOWN(end - 1)) { in sgx_encl_may_map() 245 xas_pause(&xas); in sgx_encl_may_map() 246 xas_unlock(&xas); in sgx_encl_may_map() 252 xas_lock(&xas); in sgx_encl_may_map() 255 xas_unlock(&xas); in sgx_encl_may_map()
|
/linux/fs/afs/ |
A D | write.c | 250 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back() 258 xas_for_each(&xas, folio, end) { in afs_pages_written_back() 433 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback() 443 xas_for_each(&xas, folio, ULONG_MAX) { in afs_extend_writeback() 445 if (xas_retry(&xas, folio)) in afs_extend_writeback() 453 xas_reset(&xas); in afs_extend_writeback() 458 if (unlikely(folio != xas_reload(&xas))) { in afs_extend_writeback() 498 xas_pause(&xas); in afs_extend_writeback()
|
/linux/drivers/target/ |
A D | target_core_user.c | 508 XA_STATE(xas, &udev->data_pages, 0); in tcmu_get_empty_block() 519 xas_set(&xas, dpi); in tcmu_get_empty_block() 1666 xas_lock(&xas); in tcmu_blocks_release() 1668 xas_store(&xas, NULL); in tcmu_blocks_release() 1672 xas_unlock(&xas); in tcmu_blocks_release() 3028 xas_lock(&xas); in tcmu_free_kept_buf_store() 3029 cmd = xas_load(&xas); in tcmu_free_kept_buf_store() 3033 xas_unlock(&xas); in tcmu_free_kept_buf_store() 3040 xas_unlock(&xas); in tcmu_free_kept_buf_store() 3043 xas_store(&xas, NULL); in tcmu_free_kept_buf_store() [all …]
|