Lines Matching refs:xas

157 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,  in dax_entry_waitqueue()  argument
161 unsigned long index = xas->xa_index; in dax_entry_waitqueue()
170 key->xa = xas->xa; in dax_entry_waitqueue()
173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue()
195 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument
201 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry()
223 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument
233 entry = xas_find_conflict(xas); in get_unlocked_entry()
241 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry()
244 xas_unlock_irq(xas); in get_unlocked_entry()
245 xas_reset(xas); in get_unlocked_entry()
248 xas_lock_irq(xas); in get_unlocked_entry()
257 static void wait_entry_unlocked(struct xa_state *xas, void *entry) in wait_entry_unlocked() argument
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in wait_entry_unlocked()
273 xas_unlock_irq(xas); in wait_entry_unlocked()
278 static void put_unlocked_entry(struct xa_state *xas, void *entry, in put_unlocked_entry() argument
282 dax_wake_entry(xas, entry, mode); in put_unlocked_entry()
290 static void dax_unlock_entry(struct xa_state *xas, void *entry) in dax_unlock_entry() argument
295 xas_reset(xas); in dax_unlock_entry()
296 xas_lock_irq(xas); in dax_unlock_entry()
297 old = xas_store(xas, entry); in dax_unlock_entry()
298 xas_unlock_irq(xas); in dax_unlock_entry()
300 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_unlock_entry()
306 static void *dax_lock_entry(struct xa_state *xas, void *entry) in dax_lock_entry() argument
309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); in dax_lock_entry()
402 XA_STATE(xas, NULL, 0); in dax_lock_page()
425 xas.xa = &mapping->i_pages; in dax_lock_page()
426 xas_lock_irq(&xas); in dax_lock_page()
428 xas_unlock_irq(&xas); in dax_lock_page()
431 xas_set(&xas, page->index); in dax_lock_page()
432 entry = xas_load(&xas); in dax_lock_page()
435 wait_entry_unlocked(&xas, entry); in dax_lock_page()
439 dax_lock_entry(&xas, entry); in dax_lock_page()
440 xas_unlock_irq(&xas); in dax_lock_page()
450 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page()
455 dax_unlock_entry(&xas, (void *)cookie); in dax_unlock_page()
487 static void *grab_mapping_entry(struct xa_state *xas, in grab_mapping_entry() argument
490 unsigned long index = xas->xa_index; in grab_mapping_entry()
496 xas_lock_irq(xas); in grab_mapping_entry()
497 entry = get_unlocked_entry(xas, order); in grab_mapping_entry()
503 xas_set_err(xas, -EIO); in grab_mapping_entry()
521 dax_lock_entry(xas, entry); in grab_mapping_entry()
529 xas_unlock_irq(xas); in grab_mapping_entry()
531 xas->xa_index & ~PG_PMD_COLOUR, in grab_mapping_entry()
533 xas_reset(xas); in grab_mapping_entry()
534 xas_lock_irq(xas); in grab_mapping_entry()
538 xas_store(xas, NULL); /* undo the PMD join */ in grab_mapping_entry()
539 dax_wake_entry(xas, entry, WAKE_ALL); in grab_mapping_entry()
542 xas_set(xas, index); in grab_mapping_entry()
546 dax_lock_entry(xas, entry); in grab_mapping_entry()
553 dax_lock_entry(xas, entry); in grab_mapping_entry()
554 if (xas_error(xas)) in grab_mapping_entry()
560 xas_unlock_irq(xas); in grab_mapping_entry()
561 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) in grab_mapping_entry()
563 if (xas->xa_node == XA_ERROR(-ENOMEM)) in grab_mapping_entry()
565 if (xas_error(xas)) in grab_mapping_entry()
569 xas_unlock_irq(xas); in grab_mapping_entry()
599 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range()
629 xas_lock_irq(&xas); in dax_layout_busy_page_range()
630 xas_for_each(&xas, entry, end_idx) { in dax_layout_busy_page_range()
634 entry = get_unlocked_entry(&xas, 0); in dax_layout_busy_page_range()
637 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_layout_busy_page_range()
643 xas_pause(&xas); in dax_layout_busy_page_range()
644 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
646 xas_lock_irq(&xas); in dax_layout_busy_page_range()
648 xas_unlock_irq(&xas); in dax_layout_busy_page_range()
662 XA_STATE(xas, &mapping->i_pages, index); in __dax_invalidate_entry()
666 xas_lock_irq(&xas); in __dax_invalidate_entry()
667 entry = get_unlocked_entry(&xas, 0); in __dax_invalidate_entry()
671 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || in __dax_invalidate_entry()
672 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) in __dax_invalidate_entry()
675 xas_store(&xas, NULL); in __dax_invalidate_entry()
679 put_unlocked_entry(&xas, entry, WAKE_ALL); in __dax_invalidate_entry()
680 xas_unlock_irq(&xas); in __dax_invalidate_entry()
744 static void *dax_insert_entry(struct xa_state *xas, in dax_insert_entry() argument
754 unsigned long index = xas->xa_index; in dax_insert_entry()
763 xas_reset(xas); in dax_insert_entry()
764 xas_lock_irq(xas); in dax_insert_entry()
778 old = dax_lock_entry(xas, new_entry); in dax_insert_entry()
783 xas_load(xas); /* Walk the xa_state */ in dax_insert_entry()
787 xas_set_mark(xas, PAGECACHE_TAG_DIRTY); in dax_insert_entry()
789 xas_unlock_irq(xas); in dax_insert_entry()
877 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, in dax_writeback_one() argument
893 entry = get_unlocked_entry(xas, 0); in dax_writeback_one()
912 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) in dax_writeback_one()
917 dax_lock_entry(xas, entry); in dax_writeback_one()
926 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); in dax_writeback_one()
927 xas_unlock_irq(xas); in dax_writeback_one()
938 index = xas->xa_index & ~(count - 1); in dax_writeback_one()
948 xas_reset(xas); in dax_writeback_one()
949 xas_lock_irq(xas); in dax_writeback_one()
950 xas_store(xas, entry); in dax_writeback_one()
951 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); in dax_writeback_one()
952 dax_wake_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
958 put_unlocked_entry(xas, entry, WAKE_NEXT); in dax_writeback_one()
970 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); in dax_writeback_mapping_range()
983 trace_dax_writeback_range(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
985 tag_pages_for_writeback(mapping, xas.xa_index, end_index); in dax_writeback_mapping_range()
987 xas_lock_irq(&xas); in dax_writeback_mapping_range()
988 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { in dax_writeback_mapping_range()
989 ret = dax_writeback_one(&xas, dax_dev, mapping, entry); in dax_writeback_mapping_range()
997 xas_pause(&xas); in dax_writeback_mapping_range()
998 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
1000 xas_lock_irq(&xas); in dax_writeback_mapping_range()
1002 xas_unlock_irq(&xas); in dax_writeback_mapping_range()
1003 trace_dax_writeback_range_done(inode, xas.xa_index, end_index); in dax_writeback_mapping_range()
1052 static vm_fault_t dax_load_hole(struct xa_state *xas, in dax_load_hole() argument
1061 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_load_hole()
1070 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1089 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, in dax_pmd_load_hole()
1122 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() argument
1376 struct xa_state *xas, void **entry, bool pmd) in dax_fault_iter() argument
1381 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; in dax_fault_iter()
1395 return dax_load_hole(xas, mapping, entry, vmf); in dax_fault_iter()
1396 return dax_pmd_load_hole(xas, vmf, iomap, entry); in dax_fault_iter()
1408 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, in dax_fault_iter()
1428 XA_STATE(xas, &mapping->i_pages, vmf->pgoff); in dax_iomap_pte_fault()
1453 entry = grab_mapping_entry(&xas, mapping, 0); in dax_iomap_pte_fault()
1476 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); in dax_iomap_pte_fault()
1494 dax_unlock_entry(&xas, entry); in dax_iomap_pte_fault()
1501 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() argument
1528 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) in dax_fault_check_fallback()
1538 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); in dax_iomap_pmd_fault()
1561 if (xas.xa_index >= max_pgoff) { in dax_iomap_pmd_fault()
1566 if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) in dax_iomap_pmd_fault()
1575 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); in dax_iomap_pmd_fault()
1593 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; in dax_iomap_pmd_fault()
1598 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); in dax_iomap_pmd_fault()
1604 dax_unlock_entry(&xas, entry); in dax_iomap_pmd_fault()
1662 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); in dax_insert_pfn_mkwrite()
1666 xas_lock_irq(&xas); in dax_insert_pfn_mkwrite()
1667 entry = get_unlocked_entry(&xas, order); in dax_insert_pfn_mkwrite()
1671 put_unlocked_entry(&xas, entry, WAKE_NEXT); in dax_insert_pfn_mkwrite()
1672 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1677 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); in dax_insert_pfn_mkwrite()
1678 dax_lock_entry(&xas, entry); in dax_insert_pfn_mkwrite()
1679 xas_unlock_irq(&xas); in dax_insert_pfn_mkwrite()
1688 dax_unlock_entry(&xas, entry); in dax_insert_pfn_mkwrite()