Lines Matching refs:rs

82 			 struct gfs2_blkreserv *rs, bool nowrap);
204 static inline int rs_cmp(u64 start, u32 len, struct gfs2_blkreserv *rs) in rs_cmp() argument
206 if (start >= rs->rs_start + rs->rs_requested) in rs_cmp()
208 if (rs->rs_start >= start + len) in rs_cmp()
619 static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs, in dump_rs() argument
622 struct gfs2_inode *ip = container_of(rs, struct gfs2_inode, i_res); in dump_rs()
627 (unsigned long long)rs->rs_start, in dump_rs()
628 rs->rs_requested); in dump_rs()
636 static void __rs_deltree(struct gfs2_blkreserv *rs) in __rs_deltree() argument
640 if (!gfs2_rs_active(rs)) in __rs_deltree()
643 rgd = rs->rs_rgd; in __rs_deltree()
644 trace_gfs2_rs(rs, TRACE_RS_TREEDEL); in __rs_deltree()
645 rb_erase(&rs->rs_node, &rgd->rd_rstree); in __rs_deltree()
646 RB_CLEAR_NODE(&rs->rs_node); in __rs_deltree()
648 if (rs->rs_requested) { in __rs_deltree()
650 BUG_ON(rs->rs_rgd->rd_requested < rs->rs_requested); in __rs_deltree()
651 rs->rs_rgd->rd_requested -= rs->rs_requested; in __rs_deltree()
657 rgd->rd_extfail_pt += rs->rs_requested; in __rs_deltree()
658 rs->rs_requested = 0; in __rs_deltree()
667 void gfs2_rs_deltree(struct gfs2_blkreserv *rs) in gfs2_rs_deltree() argument
671 rgd = rs->rs_rgd; in gfs2_rs_deltree()
674 __rs_deltree(rs); in gfs2_rs_deltree()
675 BUG_ON(rs->rs_requested); in gfs2_rs_deltree()
705 struct gfs2_blkreserv *rs; in return_all_reservations() local
709 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in return_all_reservations()
710 __rs_deltree(rs); in return_all_reservations()
1491 struct gfs2_blkreserv *rs = &ip->i_res; in rs_insert() local
1492 struct gfs2_rgrpd *rgd = rs->rs_rgd; in rs_insert()
1494 BUG_ON(gfs2_rs_active(rs)); in rs_insert()
1503 rc = rs_cmp(rs->rs_start, rs->rs_requested, cur); in rs_insert()
1515 rb_link_node(&rs->rs_node, parent, newn); in rs_insert()
1516 rb_insert_color(&rs->rs_node, &rgd->rd_rstree); in rs_insert()
1519 rgd->rd_requested += rs->rs_requested; /* blocks requested */ in rs_insert()
1521 trace_gfs2_rs(rs, TRACE_RS_INSERT); in rs_insert()
1537 static inline u32 rgd_free(struct gfs2_rgrpd *rgd, struct gfs2_blkreserv *rs) in rgd_free() argument
1541 if (WARN_ON_ONCE(rgd->rd_requested < rs->rs_requested)) in rgd_free()
1543 tot_reserved = rgd->rd_requested - rs->rs_requested; in rgd_free()
1566 struct gfs2_blkreserv *rs = &ip->i_res; in rg_mblk_search() local
1573 free_blocks = rgd_free(rgd, rs); in rg_mblk_search()
1577 if (rgd == rs->rs_rgd) in rg_mblk_search()
1578 blocks_available += rs->rs_reserved; in rg_mblk_search()
1601 rs->rs_start = gfs2_rbm_to_block(&rbm); in rg_mblk_search()
1602 rs->rs_requested = extlen; in rg_mblk_search()
1627 struct gfs2_blkreserv *rs; in gfs2_next_unreserved_block() local
1634 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_next_unreserved_block()
1635 rc = rs_cmp(block, length, rs); in gfs2_next_unreserved_block()
1645 while (rs_cmp(block, length, rs) == 0 && rs != ignore_rs) { in gfs2_next_unreserved_block()
1646 block = rs->rs_start + rs->rs_requested; in gfs2_next_unreserved_block()
1650 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_next_unreserved_block()
1675 struct gfs2_blkreserv *rs, in gfs2_reservation_check_and_update() argument
1697 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, rs); in gfs2_reservation_check_and_update()
1738 struct gfs2_blkreserv *rs, bool nowrap) in gfs2_rbm_find() argument
1775 if (!rs || !minext) in gfs2_rbm_find()
1778 ret = gfs2_reservation_check_and_update(rbm, rs, *minext, in gfs2_rbm_find()
1975 static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs, in gfs2_rgrp_used_recently() argument
1981 rs->rs_rgd->rd_gl->gl_dstamp)); in gfs2_rgrp_used_recently()
2048 struct gfs2_blkreserv *rs = &ip->i_res; in gfs2_inplace_reserve() local
2056 BUG_ON(rs->rs_reserved); in gfs2_inplace_reserve()
2062 if (gfs2_rs_active(rs)) { in gfs2_inplace_reserve()
2063 begin = rs->rs_rgd; in gfs2_inplace_reserve()
2064 } else if (rs->rs_rgd && in gfs2_inplace_reserve()
2065 rgrp_contains_block(rs->rs_rgd, ip->i_goal)) { in gfs2_inplace_reserve()
2066 begin = rs->rs_rgd; in gfs2_inplace_reserve()
2069 rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); in gfs2_inplace_reserve()
2073 if (rs->rs_rgd == NULL) in gfs2_inplace_reserve()
2079 rg_locked = gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl); in gfs2_inplace_reserve()
2081 rgrp_lock_local(rs->rs_rgd); in gfs2_inplace_reserve()
2085 if (!gfs2_rs_active(rs)) { in gfs2_inplace_reserve()
2087 !fast_to_acquire(rs->rs_rgd)) in gfs2_inplace_reserve()
2090 gfs2_rgrp_used_recently(rs, 1000) && in gfs2_inplace_reserve()
2091 gfs2_rgrp_congested(rs->rs_rgd, loops)) in gfs2_inplace_reserve()
2094 error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl, in gfs2_inplace_reserve()
2099 rgrp_lock_local(rs->rs_rgd); in gfs2_inplace_reserve()
2100 if (!gfs2_rs_active(rs) && (loops < 2) && in gfs2_inplace_reserve()
2101 gfs2_rgrp_congested(rs->rs_rgd, loops)) in gfs2_inplace_reserve()
2104 error = update_rgrp_lvb(rs->rs_rgd, in gfs2_inplace_reserve()
2107 rgrp_unlock_local(rs->rs_rgd); in gfs2_inplace_reserve()
2115 if ((rs->rs_rgd->rd_flags & (GFS2_RGF_NOALLOC | in gfs2_inplace_reserve()
2117 (loops == 0 && target > rs->rs_rgd->rd_extfail_pt)) in gfs2_inplace_reserve()
2127 if (!gfs2_rs_active(rs)) in gfs2_inplace_reserve()
2128 rg_mblk_search(rs->rs_rgd, ip, ap); in gfs2_inplace_reserve()
2131 if (!gfs2_rs_active(rs) && (loops < 1)) in gfs2_inplace_reserve()
2135 rgd = rs->rs_rgd; in gfs2_inplace_reserve()
2137 free_blocks = rgd_free(rgd, rs); in gfs2_inplace_reserve()
2143 rs->rs_reserved = ap->target; in gfs2_inplace_reserve()
2144 if (rs->rs_reserved > blocks_available) in gfs2_inplace_reserve()
2145 rs->rs_reserved = blocks_available; in gfs2_inplace_reserve()
2146 rgd->rd_reserved += rs->rs_reserved; in gfs2_inplace_reserve()
2148 rgrp_unlock_local(rs->rs_rgd); in gfs2_inplace_reserve()
2152 if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK) in gfs2_inplace_reserve()
2153 try_rgrp_unlink(rs->rs_rgd, &last_unlinked, in gfs2_inplace_reserve()
2156 rgrp_unlock_local(rs->rs_rgd); in gfs2_inplace_reserve()
2159 if (gfs2_rs_active(rs)) in gfs2_inplace_reserve()
2160 gfs2_rs_deltree(rs); in gfs2_inplace_reserve()
2167 if (gfs2_select_rgrp(&rs->rs_rgd, begin)) in gfs2_inplace_reserve()
2204 struct gfs2_blkreserv *rs = &ip->i_res; in gfs2_inplace_release() local
2206 if (rs->rs_reserved) { in gfs2_inplace_release()
2207 struct gfs2_rgrpd *rgd = rs->rs_rgd; in gfs2_inplace_release()
2210 GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved < rs->rs_reserved); in gfs2_inplace_release()
2211 rgd->rd_reserved -= rs->rs_reserved; in gfs2_inplace_release()
2213 rs->rs_reserved = 0; in gfs2_inplace_release()
2350 struct gfs2_blkreserv *rs = &ip->i_res; in gfs2_adjust_reservation() local
2353 BUG_ON(rs->rs_reserved < len); in gfs2_adjust_reservation()
2354 rs->rs_reserved -= len; in gfs2_adjust_reservation()
2355 if (gfs2_rs_active(rs)) { in gfs2_adjust_reservation()
2358 if (rs->rs_start == start) { in gfs2_adjust_reservation()
2361 rs->rs_start += len; in gfs2_adjust_reservation()
2362 rlen = min(rs->rs_requested, len); in gfs2_adjust_reservation()
2363 rs->rs_requested -= rlen; in gfs2_adjust_reservation()
2365 trace_gfs2_rs(rs, TRACE_RS_CLAIM); in gfs2_adjust_reservation()
2366 if (rs->rs_start < rgd->rd_data0 + rgd->rd_data && in gfs2_adjust_reservation()
2367 rs->rs_requested) in gfs2_adjust_reservation()
2373 __rs_deltree(rs); in gfs2_adjust_reservation()