1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_BACKING_DEV_DEFS_H
3 #define __LINUX_BACKING_DEV_DEFS_H
4
5 #include <linux/list.h>
6 #include <linux/radix-tree.h>
7 #include <linux/rbtree.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu_counter.h>
10 #include <linux/percpu-refcount.h>
11 #include <linux/flex_proportions.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/kref.h>
15 #include <linux/refcount.h>
16
17 struct page;
18 struct device;
19 struct dentry;
20
21 /*
22 * Bits in bdi_writeback.state
23 */
24 enum wb_state {
25 WB_registered, /* bdi_register() was done */
26 WB_writeback_running, /* Writeback is in progress */
27 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
28 WB_start_all, /* nr_pages == 0 (all) work pending */
29 };
30
31 enum wb_congested_state {
32 WB_async_congested, /* The async (write) queue is getting full */
33 WB_sync_congested, /* The sync queue is getting full */
34 };
35
36 enum wb_stat_item {
37 WB_RECLAIMABLE,
38 WB_WRITEBACK,
39 WB_DIRTIED,
40 WB_WRITTEN,
41 NR_WB_STAT_ITEMS
42 };
43
44 #define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
45
46 /*
47 * why some writeback work was initiated
48 */
49 enum wb_reason {
50 WB_REASON_BACKGROUND,
51 WB_REASON_VMSCAN,
52 WB_REASON_SYNC,
53 WB_REASON_PERIODIC,
54 WB_REASON_LAPTOP_TIMER,
55 WB_REASON_FS_FREE_SPACE,
56 /*
57 * There is no bdi forker thread any more and works are done
58 * by emergency worker, however, this is TPs userland visible
59 * and we'll be exposing exactly the same information,
60 * so it has a mismatch name.
61 */
62 WB_REASON_FORKER_THREAD,
63 WB_REASON_FOREIGN_FLUSH,
64
65 WB_REASON_MAX,
66 };
67
68 struct wb_completion {
69 atomic_t cnt;
70 wait_queue_head_t *waitq;
71 };
72
73 #define __WB_COMPLETION_INIT(_waitq) \
74 (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
75
76 /*
77 * If one wants to wait for one or more wb_writeback_works, each work's
78 * ->done should be set to a wb_completion defined using the following
79 * macro. Once all work items are issued with wb_queue_work(), the caller
80 * can wait for the completion of all using wb_wait_for_completion(). Work
81 * items which are waited upon aren't freed automatically on completion.
82 */
83 #define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
84
85 #define DEFINE_WB_COMPLETION(cmpl, bdi) \
86 struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
87
88 /*
89 * Each wb (bdi_writeback) can perform writeback operations, is measured
90 * and throttled, independently. Without cgroup writeback, each bdi
91 * (bdi_writeback) is served by its embedded bdi->wb.
92 *
93 * On the default hierarchy, blkcg implicitly enables memcg. This allows
94 * using memcg's page ownership for attributing writeback IOs, and every
95 * memcg - blkcg combination can be served by its own wb by assigning a
96 * dedicated wb to each memcg, which enables isolation across different
97 * cgroups and propagation of IO back pressure down from the IO layer upto
98 * the tasks which are generating the dirty pages to be written back.
99 *
100 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
101 * refcounted with the number of inodes attached to it, and pins the memcg
102 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
103 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
104 * is tested for blkcg after lookup and removed from index on mismatch so
105 * that a new wb for the combination can be created.
106 *
107 * Each bdi_writeback that is not embedded into the backing_dev_info must hold
108 * a reference to the parent backing_dev_info. See cgwb_create() for details.
109 */
110 struct bdi_writeback {
111 struct backing_dev_info *bdi; /* our parent bdi */
112
113 unsigned long state; /* Always use atomic bitops on this */
114 unsigned long last_old_flush; /* last old data flush */
115
116 struct list_head b_dirty; /* dirty inodes */
117 struct list_head b_io; /* parked for writeback */
118 struct list_head b_more_io; /* parked for more writeback */
119 struct list_head b_dirty_time; /* time stamps are dirty */
120 spinlock_t list_lock; /* protects the b_* lists */
121
122 atomic_t writeback_inodes; /* number of inodes under writeback */
123 struct percpu_counter stat[NR_WB_STAT_ITEMS];
124
125 unsigned long congested; /* WB_[a]sync_congested flags */
126
127 unsigned long bw_time_stamp; /* last time write bw is updated */
128 unsigned long dirtied_stamp;
129 unsigned long written_stamp; /* pages written at bw_time_stamp */
130 unsigned long write_bandwidth; /* the estimated write bandwidth */
131 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
132
133 /*
134 * The base dirty throttle rate, re-calculated on every 200ms.
135 * All the bdi tasks' dirty rate will be curbed under it.
136 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
137 * in small steps and is much more smooth/stable than the latter.
138 */
139 unsigned long dirty_ratelimit;
140 unsigned long balanced_dirty_ratelimit;
141
142 struct fprop_local_percpu completions;
143 int dirty_exceeded;
144 enum wb_reason start_all_reason;
145
146 spinlock_t work_lock; /* protects work_list & dwork scheduling */
147 struct list_head work_list;
148 struct delayed_work dwork; /* work item used for writeback */
149 struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
150
151 unsigned long dirty_sleep; /* last wait */
152
153 struct list_head bdi_node; /* anchored at bdi->wb_list */
154
155 #ifdef CONFIG_CGROUP_WRITEBACK
156 struct percpu_ref refcnt; /* used only for !root wb's */
157 struct fprop_local_percpu memcg_completions;
158 struct cgroup_subsys_state *memcg_css; /* the associated memcg */
159 struct cgroup_subsys_state *blkcg_css; /* and blkcg */
160 struct list_head memcg_node; /* anchored at memcg->cgwb_list */
161 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
162 struct list_head b_attached; /* attached inodes, protected by list_lock */
163 struct list_head offline_node; /* anchored at offline_cgwbs */
164
165 union {
166 struct work_struct release_work;
167 struct rcu_head rcu;
168 };
169 #endif
170 };
171
172 struct backing_dev_info {
173 u64 id;
174 struct rb_node rb_node; /* keyed by ->id */
175 struct list_head bdi_list;
176 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
177 unsigned long io_pages; /* max allowed IO size */
178
179 struct kref refcnt; /* Reference counter for the structure */
180 unsigned int capabilities; /* Device capabilities */
181 unsigned int min_ratio;
182 unsigned int max_ratio, max_prop_frac;
183
184 /*
185 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
186 * any dirty wbs, which is depended upon by bdi_has_dirty().
187 */
188 atomic_long_t tot_write_bandwidth;
189
190 struct bdi_writeback wb; /* the root writeback info for this bdi */
191 struct list_head wb_list; /* list of all wbs */
192 #ifdef CONFIG_CGROUP_WRITEBACK
193 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
194 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
195 struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
196 #endif
197 wait_queue_head_t wb_waitq;
198
199 struct device *dev;
200 char dev_name[64];
201 struct device *owner;
202
203 struct timer_list laptop_mode_wb_timer;
204
205 #ifdef CONFIG_DEBUG_FS
206 struct dentry *debug_dir;
207 #endif
208 };
209
210 enum {
211 BLK_RW_ASYNC = 0,
212 BLK_RW_SYNC = 1,
213 };
214
215 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
216 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
217
218 struct wb_lock_cookie {
219 bool locked;
220 unsigned long flags;
221 };
222
223 #ifdef CONFIG_CGROUP_WRITEBACK
224
225 /**
226 * wb_tryget - try to increment a wb's refcount
227 * @wb: bdi_writeback to get
228 */
wb_tryget(struct bdi_writeback * wb)229 static inline bool wb_tryget(struct bdi_writeback *wb)
230 {
231 if (wb != &wb->bdi->wb)
232 return percpu_ref_tryget(&wb->refcnt);
233 return true;
234 }
235
236 /**
237 * wb_get - increment a wb's refcount
238 * @wb: bdi_writeback to get
239 */
wb_get(struct bdi_writeback * wb)240 static inline void wb_get(struct bdi_writeback *wb)
241 {
242 if (wb != &wb->bdi->wb)
243 percpu_ref_get(&wb->refcnt);
244 }
245
246 /**
247 * wb_put - decrement a wb's refcount
248 * @wb: bdi_writeback to put
249 * @nr: number of references to put
250 */
wb_put_many(struct bdi_writeback * wb,unsigned long nr)251 static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
252 {
253 if (WARN_ON_ONCE(!wb->bdi)) {
254 /*
255 * A driver bug might cause a file to be removed before bdi was
256 * initialized.
257 */
258 return;
259 }
260
261 if (wb != &wb->bdi->wb)
262 percpu_ref_put_many(&wb->refcnt, nr);
263 }
264
265 /**
266 * wb_put - decrement a wb's refcount
267 * @wb: bdi_writeback to put
268 */
wb_put(struct bdi_writeback * wb)269 static inline void wb_put(struct bdi_writeback *wb)
270 {
271 wb_put_many(wb, 1);
272 }
273
274 /**
275 * wb_dying - is a wb dying?
276 * @wb: bdi_writeback of interest
277 *
278 * Returns whether @wb is unlinked and being drained.
279 */
wb_dying(struct bdi_writeback * wb)280 static inline bool wb_dying(struct bdi_writeback *wb)
281 {
282 return percpu_ref_is_dying(&wb->refcnt);
283 }
284
285 #else /* CONFIG_CGROUP_WRITEBACK */
286
wb_tryget(struct bdi_writeback * wb)287 static inline bool wb_tryget(struct bdi_writeback *wb)
288 {
289 return true;
290 }
291
wb_get(struct bdi_writeback * wb)292 static inline void wb_get(struct bdi_writeback *wb)
293 {
294 }
295
wb_put(struct bdi_writeback * wb)296 static inline void wb_put(struct bdi_writeback *wb)
297 {
298 }
299
wb_put_many(struct bdi_writeback * wb,unsigned long nr)300 static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
301 {
302 }
303
wb_dying(struct bdi_writeback * wb)304 static inline bool wb_dying(struct bdi_writeback *wb)
305 {
306 return false;
307 }
308
309 #endif /* CONFIG_CGROUP_WRITEBACK */
310
311 #endif /* __LINUX_BACKING_DEV_DEFS_H */
312