Lines Matching refs:wq

56 struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)  in btrfs_workqueue_owner()  argument
58 return wq->fs_info; in btrfs_workqueue_owner()
63 return work->wq->fs_info; in btrfs_work_owner()
66 bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) in btrfs_workqueue_normal_congested() argument
74 if (wq->normal->thresh == NO_THRESHOLD) in btrfs_workqueue_normal_congested()
77 return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; in btrfs_workqueue_normal_congested()
127 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
165 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) in thresh_queue_hook() argument
167 if (wq->thresh == NO_THRESHOLD) in thresh_queue_hook()
169 atomic_inc(&wq->pending); in thresh_queue_hook()
177 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) in thresh_exec_hook() argument
183 if (wq->thresh == NO_THRESHOLD) in thresh_exec_hook()
186 atomic_dec(&wq->pending); in thresh_exec_hook()
187 spin_lock(&wq->thres_lock); in thresh_exec_hook()
192 wq->count++; in thresh_exec_hook()
193 wq->count %= (wq->thresh / 4); in thresh_exec_hook()
194 if (!wq->count) in thresh_exec_hook()
196 new_current_active = wq->current_active; in thresh_exec_hook()
202 pending = atomic_read(&wq->pending); in thresh_exec_hook()
203 if (pending > wq->thresh) in thresh_exec_hook()
205 if (pending < wq->thresh / 2) in thresh_exec_hook()
207 new_current_active = clamp_val(new_current_active, 1, wq->limit_active); in thresh_exec_hook()
208 if (new_current_active != wq->current_active) { in thresh_exec_hook()
210 wq->current_active = new_current_active; in thresh_exec_hook()
213 spin_unlock(&wq->thres_lock); in thresh_exec_hook()
216 workqueue_set_max_active(wq->normal_wq, wq->current_active); in thresh_exec_hook()
220 static void run_ordered_work(struct __btrfs_workqueue *wq, in run_ordered_work() argument
223 struct list_head *list = &wq->ordered_list; in run_ordered_work()
225 spinlock_t *lock = &wq->list_lock; in run_ordered_work()
292 trace_btrfs_all_work_done(wq->fs_info, work); in run_ordered_work()
300 trace_btrfs_all_work_done(wq->fs_info, self); in run_ordered_work()
308 struct __btrfs_workqueue *wq; in btrfs_work_helper() local
321 wq = work->wq; in btrfs_work_helper()
324 thresh_exec_hook(wq); in btrfs_work_helper()
335 run_ordered_work(wq, work); in btrfs_work_helper()
338 trace_btrfs_all_work_done(wq->fs_info, work); in btrfs_work_helper()
353 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, in __btrfs_queue_work() argument
358 work->wq = wq; in __btrfs_queue_work()
359 thresh_queue_hook(wq); in __btrfs_queue_work()
361 spin_lock_irqsave(&wq->list_lock, flags); in __btrfs_queue_work()
362 list_add_tail(&work->ordered_list, &wq->ordered_list); in __btrfs_queue_work()
363 spin_unlock_irqrestore(&wq->list_lock, flags); in __btrfs_queue_work()
366 queue_work(wq->normal_wq, &work->normal_work); in __btrfs_queue_work()
369 void btrfs_queue_work(struct btrfs_workqueue *wq, in btrfs_queue_work() argument
374 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) in btrfs_queue_work()
375 dest_wq = wq->high; in btrfs_queue_work()
377 dest_wq = wq->normal; in btrfs_queue_work()
382 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) in __btrfs_destroy_workqueue() argument
384 destroy_workqueue(wq->normal_wq); in __btrfs_destroy_workqueue()
385 trace_btrfs_workqueue_destroy(wq); in __btrfs_destroy_workqueue()
386 kfree(wq); in __btrfs_destroy_workqueue()
389 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) in btrfs_destroy_workqueue() argument
391 if (!wq) in btrfs_destroy_workqueue()
393 if (wq->high) in btrfs_destroy_workqueue()
394 __btrfs_destroy_workqueue(wq->high); in btrfs_destroy_workqueue()
395 __btrfs_destroy_workqueue(wq->normal); in btrfs_destroy_workqueue()
396 kfree(wq); in btrfs_destroy_workqueue()
399 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) in btrfs_workqueue_set_max() argument
401 if (!wq) in btrfs_workqueue_set_max()
403 wq->normal->limit_active = limit_active; in btrfs_workqueue_set_max()
404 if (wq->high) in btrfs_workqueue_set_max()
405 wq->high->limit_active = limit_active; in btrfs_workqueue_set_max()
413 void btrfs_flush_workqueue(struct btrfs_workqueue *wq) in btrfs_flush_workqueue() argument
415 if (wq->high) in btrfs_flush_workqueue()
416 flush_workqueue(wq->high->normal_wq); in btrfs_flush_workqueue()
418 flush_workqueue(wq->normal->normal_wq); in btrfs_flush_workqueue()