/linux/fs/xfs/libxfs/ |
A D | xfs_dir2_leaf.c | 47 to->stale = be16_to_cpu(from3->hdr.stale); in xfs_dir2_leaf_hdr_from_disk() 57 to->stale = be16_to_cpu(from->hdr.stale); in xfs_dir2_leaf_hdr_from_disk() 81 to3->hdr.stale = cpu_to_be16(from->stale); in xfs_dir2_leaf_hdr_to_disk() 90 to->hdr.stale = cpu_to_be16(from->stale); in xfs_dir2_leaf_hdr_to_disk() 147 int stale; in xfs_dir3_leaf_check_int() local 177 stale++; in xfs_dir3_leaf_check_int() 179 if (hdr->stale != stale) in xfs_dir3_leaf_check_int() 432 leafhdr.stale = be32_to_cpu(btp->stale); in xfs_dir2_block_to_leaf() 581 leafhdr->stale--; in xfs_dir3_leaf_find_entry() 601 leafhdr->stale--; in xfs_dir3_leaf_find_entry() [all …]
|
A D | xfs_dir2_block.c | 209 if (btp->stale) { in xfs_dir2_block_need_space() 330 *lfloglow = toidx + 1 - (be32_to_cpu(btp->stale) - 1); in xfs_dir2_block_compact() 331 *lfloghigh -= be32_to_cpu(btp->stale) - 1; in xfs_dir2_block_compact() 332 be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1)); in xfs_dir2_block_compact() 337 btp->stale = cpu_to_be32(1); in xfs_dir2_block_compact() 440 } else if (btp->stale) { in xfs_dir2_block_addname() 467 if (!btp->stale) { in xfs_dir2_block_addname() 546 be32_add_cpu(&btp->stale, -1); in xfs_dir2_block_addname() 826 be32_add_cpu(&btp->stale, 1); in xfs_dir2_block_removename() 1026 btp->stale = 0; in xfs_dir2_leaf_to_block() [all …]
|
A D | xfs_dir2_node.c | 531 if (!leafhdr.stale) in xfs_dir2_leafn_add() 986 if (shdr->stale) { in xfs_dir3_leafn_moveents() 992 stale++; in xfs_dir3_leafn_moveents() 995 stale = 0; in xfs_dir3_leafn_moveents() 1018 shdr->stale -= stale; in xfs_dir3_leafn_moveents() 1020 dhdr->stale += stale; in xfs_dir3_leafn_moveents() 1100 oldstale = hdr1.stale + hdr2.stale; in xfs_dir2_leafn_rebalance() 1139 ASSERT(hdr1.stale + hdr2.stale == oldstale); in xfs_dir2_leafn_rebalance() 1308 leafhdr.stale++; in xfs_dir2_leafn_remove() 1643 if (drophdr.stale) in xfs_dir2_leafn_unbalance() [all …]
|
A D | xfs_da_format.h | 414 __be16 stale; /* count of stale entries */ member 420 __be16 stale; /* count of stale entries */ member 530 __be32 stale; /* count of stale lf entries */ member
|
A D | xfs_dir2_data.c | 114 int stale; /* count of stale leaves */ in __xfs_dir3_data_check() local 256 for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { in __xfs_dir3_data_check() 259 stale++; in __xfs_dir3_data_check() 264 if (count != be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)) in __xfs_dir3_data_check() 266 if (stale != be32_to_cpu(btp->stale)) in __xfs_dir3_data_check()
|
/linux/drivers/gpu/drm/i915/gt/ |
A D | intel_gt_buffer_pool.c | 43 struct intel_gt_buffer_pool_node *node, *stale = NULL; in pool_free_older_than() local 71 node->free = stale; in pool_free_older_than() 72 stale = node; in pool_free_older_than() 83 while ((node = stale)) { in pool_free_older_than() 84 stale = stale->free; in pool_free_older_than()
|
/linux/fs/cachefiles/ |
A D | xattr.c | 219 goto stale; /* no attribute - power went off in cachefiles_check_object_xattr() 236 goto stale; in cachefiles_check_object_xattr() 265 goto stale; in cachefiles_check_object_xattr() 298 stale: in cachefiles_check_object_xattr()
|
/linux/drivers/md/bcache/ |
A D | extents.c | 540 unsigned int i, stale; in bch_extent_bad() local 552 stale = ptr_stale(b->c, k, i); in bch_extent_bad() 554 if (stale && KEY_DIRTY(k)) { in bch_extent_bad() 557 stale, buf); in bch_extent_bad() 560 btree_bug_on(stale > BUCKET_GC_GEN_MAX, b, in bch_extent_bad() 562 stale, b->c->need_gc); in bch_extent_bad() 564 if (stale) in bch_extent_bad()
|
A D | btree.c | 1196 uint8_t stale = 0; in __bch_btree_mark_key() local 1206 return stale; in __bch_btree_mark_key() 1218 stale = max(stale, ptr_stale(c, k, i)); in __bch_btree_mark_key() 1242 return stale; in __bch_btree_mark_key() 1274 uint8_t stale = 0; in btree_gc_mark_node() local 1283 stale = max(stale, btree_mark_key(b, k)); in btree_gc_mark_node() 1305 if (stale > 10) in btree_gc_mark_node()
|
/linux/Documentation/RCU/ |
A D | listRCU.rst | 68 equipment outside of the computer, it will at times contain stale data. 204 So, when readers can tolerate stale data and when entries are either added or 283 The auditing example above tolerates stale data, as do most algorithms 288 However, there are many examples where stale data cannot be tolerated. 303 If the system-call audit module were to ever need to reject stale data, one way 414 the ``cancel_list``, the ``might_cancel`` flag is consulted to skip stale 446 Read-mostly list-based data structures that can tolerate stale data are 451 If stale data cannot be tolerated, then a *deleted* flag may be used 462 then the caller will be processing stale data in any case. If it 463 is really OK to be processing stale data, then you don't need a [all …]
|
/linux/fs/xfs/ |
A D | xfs_buf_item.c | 487 int stale = bip->bli_flags & XFS_BLI_STALE; in xfs_buf_item_unpin() local 505 if (freed && !stale && remove) in xfs_buf_item_unpin() 514 if (stale) { in xfs_buf_item_unpin() 663 bool stale = bip->bli_flags & XFS_BLI_STALE; in xfs_buf_item_release() local 679 ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); in xfs_buf_item_release() 695 if (hold || (stale && !released)) in xfs_buf_item_release() 697 ASSERT(!stale || aborted); in xfs_buf_item_release()
|
/linux/fs/xfs/scrub/ |
A D | dir.c | 513 unsigned int stale = 0; in xchk_directory_leaf1_bestfree() local 565 stale++; in xchk_directory_leaf1_bestfree() 567 if (leafhdr.stale != stale) in xchk_directory_leaf1_bestfree() 616 unsigned int stale = 0; in xchk_directory_free_bestfree() local 638 stale++; in xchk_directory_free_bestfree() 651 if (freehdr.nused + stale != freehdr.nvalid) in xchk_directory_free_bestfree()
|
/linux/Documentation/networking/ |
A D | mptcp-sysctl.rst | 51 pending outstanding data on a given subflow required to declare it stale. 52 The packet scheduler ignores stale subflows.
|
/linux/drivers/gpu/drm/i915/gem/selftests/ |
A D | mock_context.c | 30 spin_lock_init(&ctx->stale.lock); in mock_context() 31 INIT_LIST_HEAD(&ctx->stale.engines); in mock_context()
|
/linux/drivers/gpu/drm/i915/gem/ |
A D | i915_gem_context.c | 1016 spin_lock_irqsave(&ctx->stale.lock, flags); in engines_notify() 1018 spin_unlock_irqrestore(&ctx->stale.lock, flags); in engines_notify() 1360 spin_lock_irq(&ctx->stale.lock); in kill_context() 1368 spin_unlock_irq(&ctx->stale.lock); in kill_context() 1372 spin_lock_irq(&ctx->stale.lock); in kill_context() 1379 spin_unlock_irq(&ctx->stale.lock); in kill_context() 1409 spin_lock_irq(&ctx->stale.lock); in engines_idle_release() 1411 list_add_tail(&engines->link, &ctx->stale.engines); in engines_idle_release() 1412 spin_unlock_irq(&ctx->stale.lock); in engines_idle_release() 1554 spin_lock_init(&ctx->stale.lock); in i915_gem_create_context() [all …]
|
A D | i915_gem_context_types.h | 410 } stale; member
|
/linux/drivers/gpu/drm/msm/disp/mdp4/ |
A D | mdp4_crtc.c | 29 bool stale; member 364 if (mdp4_crtc->cursor.stale) { in update_cursor() 393 mdp4_crtc->cursor.stale = false; in update_cursor() 443 mdp4_crtc->cursor.stale = true; in mdp4_crtc_cursor_set()
|
/linux/Documentation/filesystems/ |
A D | ceph.rst | 175 After reconnect, file locks become stale because the MDS loses track 176 of them. If an inode contains any stale file locks, read/write on the 177 inode is not allowed until applications release all stale file locks.
|
/linux/drivers/md/ |
A D | dm-ps-historical-service-time.c | 347 u64 *out, u64 *stale) in hst_fill_compare() argument 354 *stale = pi->stale_after; in hst_fill_compare()
|
/linux/mm/ |
A D | z3fold.c | 163 struct list_head stale; member 521 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page() 560 while (!list_empty(&pool->stale)) { in free_pages_work() 561 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work() 1000 INIT_LIST_HEAD(&pool->stale); in z3fold_create_pool() 1115 zhdr = list_first_entry_or_null(&pool->stale, in z3fold_alloc()
|
/linux/Documentation/scsi/ |
A D | hpsa.rst | 80 and not be disrupted in any way by stale commands or other stale state
|
/linux/Documentation/x86/ |
A D | mds.rst | 29 deallocated it can retain the stale data of the preceding operations which 37 contain stale data from a previous operation which can be forwarded to 177 that stale data from the idling CPU from spilling to the Hyper-Thread
|
/linux/drivers/net/wireless/ath/ath9k/ |
A D | xmit.c | 404 tbf->bf_state.stale = false; in ath_clone_txbuf() 476 if (!bf->bf_state.stale || bf_next != NULL) in ath_tx_complete_aggr() 568 if (bf_next != NULL || !bf_last->bf_state.stale) in ath_tx_complete_aggr() 596 if (bf->bf_next == NULL && bf_last->bf_state.stale) { in ath_tx_complete_aggr() 914 bf->bf_state.stale = false; in ath_tx_get_tid_subframe() 1826 if (bf->bf_state.stale) { in ath_drain_txq_list() 2597 if (bf->bf_state.stale) { in ath_tx_processq() 2621 lastbf->bf_state.stale = true; in ath_tx_processq() 2702 if (bf->bf_state.stale) { in ath_tx_edma_tasklet() 2724 lastbf->bf_state.stale = true; in ath_tx_edma_tasklet()
|
/linux/drivers/net/ethernet/intel/iavf/ |
A D | iavf_txrx.c | 2153 const skb_frag_t *frag, *stale; in __iavf_chk_linearize() local 2185 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __iavf_chk_linearize() 2186 int stale_size = skb_frag_size(stale); in __iavf_chk_linearize() 2197 int align_pad = -(skb_frag_off(stale)) & in __iavf_chk_linearize()
|
/linux/drivers/net/ethernet/intel/ice/ |
A D | ice_txrx.c | 2110 const skb_frag_t *frag, *stale; in __ice_chk_linearize() local 2142 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __ice_chk_linearize() 2143 int stale_size = skb_frag_size(stale); in __ice_chk_linearize() 2154 int align_pad = -(skb_frag_off(stale)) & in __ice_chk_linearize()
|