Home
last modified time | relevance | path

Searched refs:i_pages (Results 1 – 25 of 41) sorted by relevance

12

/linux/fs/nilfs2/
A Dbtnode.c181 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key()
182 err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); in nilfs_btnode_prepare_change_key()
183 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key()
238 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key()
239 __xa_erase(&btnc->i_pages, oldkey); in nilfs_btnode_commit_change_key()
240 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); in nilfs_btnode_commit_change_key()
241 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key()
269 xa_erase_irq(&btnc->i_pages, newkey); in nilfs_btnode_abort_change_key()
A Dpage.c324 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages()
325 p = __xa_erase(&smap->i_pages, offset); in nilfs_copy_back_pages()
328 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages()
330 xa_lock_irq(&dmap->i_pages); in nilfs_copy_back_pages()
331 p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); in nilfs_copy_back_pages()
340 __xa_set_mark(&dmap->i_pages, offset, in nilfs_copy_back_pages()
343 xa_unlock_irq(&dmap->i_pages); in nilfs_copy_back_pages()
464 xa_lock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty()
466 __xa_clear_mark(&mapping->i_pages, page_index(page), in __nilfs_clear_page_dirty()
468 xa_unlock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty()
[all …]
/linux/mm/
A Dtruncate.c37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
49 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
51 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
82 xa_lock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
103 xa_unlock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
466 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
467 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
581 xa_lock_irq(&mapping->i_pages); in invalidate_complete_page2()
587 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_page2()
598 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_page2()
A Dswap_state.c90 page = xa_load(&address_space->i_pages, idx); in get_shadow_from_swap_cache()
105 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
158 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache()
245 xa_lock_irq(&address_space->i_pages); in delete_from_swap_cache()
247 xa_unlock_irq(&address_space->i_pages); in delete_from_swap_cache()
262 XA_STATE(xas, &address_space->i_pages, curr); in clear_shadow_from_swap_cache()
264 xa_lock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache()
270 xa_unlock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache()
675 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); in init_swap_address_space()
A Dworkingset.c537 mapping = container_of(node->array, struct address_space, i_pages); in shadow_lru_isolate()
540 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate()
547 xa_unlock(&mapping->i_pages); in shadow_lru_isolate()
571 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate()
A Dfilemap.c265 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache()
267 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache()
347 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
354 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
857 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
892 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
1753 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1789 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1837 XA_STATE(xas, &mapping->i_pages, index); in mapping_get_entry()
2047 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
[all …]
A Dreadahead.c199 struct page *page = xa_load(&mapping->i_pages, index + i); in page_cache_ra_unbounded()
674 struct page *page = xa_load(&mapping->i_pages, index); in readahead_expand()
697 struct page *page = xa_load(&mapping->i_pages, index); in readahead_expand()
A Dpage-writeback.c2127 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback()
2506 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_mark_dirty()
2510 __xa_set_mark(&mapping->i_pages, folio_index(folio), in __folio_mark_dirty()
2513 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_mark_dirty()
2812 xa_lock_irqsave(&mapping->i_pages, flags); in __folio_end_writeback()
2815 __xa_clear_mark(&mapping->i_pages, folio_index(folio), in __folio_end_writeback()
2832 xa_unlock_irqrestore(&mapping->i_pages, flags); in __folio_end_writeback()
2854 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_start_writeback()
A Dshmem.c413 XA_STATE(xas, &mapping->i_pages, index); in shmem_replace_entry()
435 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); in shmem_confirm_swap()
696 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); in shmem_add_to_page_cache()
771 xa_lock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
777 xa_unlock_irq(&mapping->i_pages); in shmem_delete_from_page_cache()
790 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); in shmem_free_swap()
807 XA_STATE(xas, &mapping->i_pages, start); in shmem_partial_swap_usage()
1170 XA_STATE(xas, &mapping->i_pages, start); in shmem_find_swap_entries()
1557 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, in shmem_alloc_hugepage()
1672 xa_lock_irq(&swap_mapping->i_pages); in shmem_replace_page()
[all …]
A Dhuge_memory.c2424 xa_lock(&swap_cache->i_pages); in __split_huge_page()
2442 __xa_store(&head->mapping->i_pages, head[i].index, in __split_huge_page()
2445 __xa_store(&swap_cache->i_pages, offset + i, in __split_huge_page()
2461 xa_unlock(&swap_cache->i_pages); in __split_huge_page()
2468 xa_unlock(&head->mapping->i_pages); in __split_huge_page()
2684 XA_STATE(xas, &mapping->i_pages, page_index(head)); in split_huge_page_to_list()
2690 xa_lock(&mapping->i_pages); in split_huge_page_to_list()
2722 xa_unlock(&mapping->i_pages); in split_huge_page_to_list()
A Dmemfd.c68 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins()
/linux/arch/nios2/include/asm/
A Dcacheflush.h49 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
50 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/linux/arch/nds32/include/asm/
A Dcacheflush.h42 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
43 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
/linux/arch/csky/abiv1/inc/abi/
A Dcacheflush.h17 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
18 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/linux/arch/parisc/include/asm/
A Dcacheflush.h54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/linux/include/linux/
A Dbacking-dev.h271 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb()
315 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_begin()
333 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_end()
A Dpagemap.h23 return xa_empty(&mapping->i_pages); in mapping_empty()
60 head = rcu_access_pointer(mapping->i_pages.xa_head); in mapping_shrinkable()
1067 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); in __readahead_folio()
1111 XA_STATE(xas, &rac->mapping->i_pages, 0); in __readahead_batch()
/linux/arch/arm/include/asm/
A Dcacheflush.h316 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
317 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
/linux/fs/
A Ddax.c425 xas.xa = &mapping->i_pages; in dax_lock_page()
450 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
599 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
662 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
970 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
1428 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1538 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1662 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
/linux/fs/9p/
A Dvfs_addr.c42 iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len); in v9fs_req_issue_op()
179 iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); in v9fs_vfs_write_folio_locked()
/linux/fs/netfs/
A Dread_helper.c153 iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages, in netfs_clear_unread()
179 iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages, in netfs_read_from_cache()
248 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); in netfs_rreq_unmark_after_write()
333 iov_iter_xarray(&iter, WRITE, &rreq->mapping->i_pages, in netfs_rreq_do_write_to_cache()
377 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock()
/linux/fs/orangefs/
A Dinode.c253 struct xarray *i_pages; in orangefs_readahead() local
271 i_pages = &rac->mapping->i_pages; in orangefs_readahead()
273 iov_iter_xarray(&iter, READ, i_pages, offset, readahead_length(rac)); in orangefs_readahead()
/linux/fs/afs/
A Dwrite.c250 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back()
433 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback()
588 iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); in afs_write_back_from_locked_folio()
A Dfile.c316 &fsreq->vnode->vfs_inode.i_mapping->i_pages, in afs_req_issue_op()
338 iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages, in afs_symlink_readpage()
/linux/fs/gfs2/
A Dglops.c539 xa_lock_irq(&inode->i_data.i_pages); in inode_go_dump()
541 xa_unlock_irq(&inode->i_data.i_pages); in inode_go_dump()

Completed in 56 milliseconds

12