Lines Matching refs:xas
126 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
129 mapping_set_update(&xas, mapping); in page_cache_delete()
133 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
141 xas_store(&xas, shadow); in page_cache_delete()
142 xas_init_marks(&xas); in page_cache_delete()
293 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
298 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
299 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
321 if (page->index == xas.xa_index) in page_cache_delete_batch()
330 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
332 xas_store(&xas, NULL); in page_cache_delete_batch()
493 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
501 page = xas_find(&xas, max); in filemap_range_has_page()
502 if (xas_retry(&xas, page)) in filemap_range_has_page()
652 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
660 xas_for_each(&xas, page, max) { in filemap_range_has_writeback()
661 if (xas_retry(&xas, page)) in filemap_range_has_writeback()
857 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
869 xas_lock_irq(&xas); in replace_page_cache_page()
870 xas_store(&xas, new); in replace_page_cache_page()
882 xas_unlock_irq(&xas); in replace_page_cache_page()
892 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
899 mapping_set_update(&xas, mapping); in __filemap_add_folio()
916 unsigned int order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio()
920 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), in __filemap_add_folio()
922 xas_lock_irq(&xas); in __filemap_add_folio()
923 xas_for_each_conflict(&xas, entry) { in __filemap_add_folio()
926 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
935 order = xa_get_order(xas.xa, xas.xa_index); in __filemap_add_folio()
937 xas_split(&xas, old, order); in __filemap_add_folio()
938 xas_reset(&xas); in __filemap_add_folio()
942 xas_store(&xas, folio); in __filemap_add_folio()
943 if (xas_error(&xas)) in __filemap_add_folio()
952 xas_unlock_irq(&xas); in __filemap_add_folio()
953 } while (xas_nomem(&xas, gfp)); in __filemap_add_folio()
955 if (xas_error(&xas)) { in __filemap_add_folio()
956 error = xas_error(&xas); in __filemap_add_folio()
1753 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1756 void *entry = xas_next(&xas); in page_cache_next_miss()
1759 if (xas.xa_index == 0) in page_cache_next_miss()
1763 return xas.xa_index; in page_cache_next_miss()
1789 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1792 void *entry = xas_prev(&xas); in page_cache_prev_miss()
1795 if (xas.xa_index == ULONG_MAX) in page_cache_prev_miss()
1799 return xas.xa_index; in page_cache_prev_miss()
1837 XA_STATE(xas, &mapping->i_pages, index); in mapping_get_entry()
1842 xas_reset(&xas); in mapping_get_entry()
1843 folio = xas_load(&xas); in mapping_get_entry()
1844 if (xas_retry(&xas, folio)) in mapping_get_entry()
1856 if (unlikely(folio != xas_reload(&xas))) { in mapping_get_entry()
1982 static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max, in find_get_entry() argument
1989 page = xas_find(xas, max); in find_get_entry()
1991 page = xas_find_marked(xas, max, mark); in find_get_entry()
1993 if (xas_retry(xas, page)) in find_get_entry()
2007 if (unlikely(page != xas_reload(xas))) { in find_get_entry()
2014 xas_reset(xas); in find_get_entry()
2047 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
2053 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_get_entries()
2060 page = find_subpage(page, xas.xa_index); in find_get_entries()
2064 indices[ret] = xas.xa_index; in find_get_entries()
2099 XA_STATE(xas, &mapping->i_pages, start); in find_lock_entries()
2103 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_lock_entries()
2113 VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index), in find_lock_entries()
2116 indices[pvec->nr] = xas.xa_index; in find_lock_entries()
2129 xas_set(&xas, page->index + nr_pages); in find_lock_entries()
2130 if (xas.xa_index < nr_pages) in find_lock_entries()
2164 XA_STATE(xas, &mapping->i_pages, *start); in find_get_pages_range()
2172 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { in find_get_pages_range()
2177 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2179 *start = xas.xa_index + 1; in find_get_pages_range()
2215 XA_STATE(xas, &mapping->i_pages, index); in find_get_pages_contig()
2223 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2224 if (xas_retry(&xas, page)) in find_get_pages_contig()
2237 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2240 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2247 xas_reset(&xas); in find_get_pages_contig()
2273 XA_STATE(xas, &mapping->i_pages, *index); in find_get_pages_range_tag()
2281 while ((page = find_get_entry(&xas, end, tag))) { in find_get_pages_range_tag()
2346 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2350 for (head = xas_load(&xas); head; head = xas_next(&xas)) { in filemap_get_read_batch()
2351 if (xas_retry(&xas, head)) in filemap_get_read_batch()
2353 if (xas.xa_index > max || xa_is_value(head)) in filemap_get_read_batch()
2359 if (unlikely(head != xas_reload(&xas))) in filemap_get_read_batch()
2368 xas.xa_index = head->index + thp_nr_pages(head) - 1; in filemap_get_read_batch()
2369 xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK; in filemap_get_read_batch()
2374 xas_reset(&xas); in filemap_get_read_batch()
2796 static inline loff_t page_seek_hole_data(struct xa_state *xas, in page_seek_hole_data() argument
2808 xas_pause(xas); in page_seek_hole_data()
2829 unsigned int seek_page_size(struct xa_state *xas, struct page *page) in seek_page_size() argument
2832 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); in seek_page_size()
2857 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
2866 while ((page = find_get_entry(&xas, max, XA_PRESENT))) { in mapping_seek_hole_data()
2867 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; in mapping_seek_hole_data()
2876 seek_size = seek_page_size(&xas, page); in mapping_seek_hole_data()
2878 start = page_seek_hole_data(&xas, mapping, page, start, pos, in mapping_seek_hole_data()
2885 xas_set(&xas, pos >> PAGE_SHIFT); in mapping_seek_hole_data()
3236 struct xa_state *xas, pgoff_t end_pgoff) in next_uptodate_page() argument
3243 if (xas_retry(xas, page)) in next_uptodate_page()
3252 if (unlikely(page != xas_reload(xas))) in next_uptodate_page()
3263 if (xas->xa_index >= max_idx) in next_uptodate_page()
3270 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
3276 struct xa_state *xas, in first_map_page() argument
3279 return next_uptodate_page(xas_find(xas, end_pgoff), in first_map_page()
3280 mapping, xas, end_pgoff); in first_map_page()
3284 struct xa_state *xas, in next_map_page() argument
3287 return next_uptodate_page(xas_next_entry(xas, end_pgoff), in next_map_page()
3288 mapping, xas, end_pgoff); in next_map_page()
3299 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3305 head = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3317 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
3324 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3325 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3326 last_pgoff = xas.xa_index; in filemap_map_pages()
3343 } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()