Lines Matching refs:td
129 return tg->td; in sq_to_td()
142 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) in throtl_adjusted_limit() argument
145 if (td->scale < 4096 && time_after_eq(jiffies, in throtl_adjusted_limit()
146 td->low_upgrade_time + td->scale * td->throtl_slice)) in throtl_adjusted_limit()
147 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; in throtl_adjusted_limit()
149 return low + (low >> 1) * td->scale; in throtl_adjusted_limit()
155 struct throtl_data *td; in tg_bps_limit() local
161 td = tg->td; in tg_bps_limit()
162 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
163 if (ret == 0 && td->limit_index == LIMIT_LOW) { in tg_bps_limit()
166 tg->iops[rw][td->limit_index]) in tg_bps_limit()
172 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
176 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
185 struct throtl_data *td; in tg_iops_limit() local
191 td = tg->td; in tg_iops_limit()
192 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
193 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
196 tg->bps[rw][td->limit_index]) in tg_iops_limit()
202 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
206 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
396 struct throtl_data *td = blkg->q->td; in throtl_pd_init() local
412 sq->parent_sq = &td->service_queue; in throtl_pd_init()
415 tg->td = td; in throtl_pd_init()
426 struct throtl_data *td = tg->td; in tg_update_has_rules() local
431 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
447 static void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
454 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
465 td->limit_valid[LIMIT_LOW] = low_valid; in blk_throtl_update_limit_valid()
468 static inline void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
473 static void throtl_upgrade_state(struct throtl_data *td);
483 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
485 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
486 throtl_upgrade_state(tg->td); in throtl_pd_offline()
647 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
659 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
672 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
718 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
722 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
726 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
730 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
746 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
771 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_with_in_iops_limit()
820 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
822 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
890 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
892 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1038 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1039 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1114 static bool throtl_can_upgrade(struct throtl_data *td,
1135 struct throtl_data *td = sq_to_td(sq); in throtl_pending_timer_fn() local
1136 struct request_queue *q = td->queue; in throtl_pending_timer_fn()
1142 if (throtl_can_upgrade(td, NULL)) in throtl_pending_timer_fn()
1143 throtl_upgrade_state(td); in throtl_pending_timer_fn()
1185 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1201 struct throtl_data *td = container_of(work, struct throtl_data, in blk_throtl_dispatch_work_fn() local
1203 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1204 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1281 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1609 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1610 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1612 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1614 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1616 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1645 struct throtl_data *td = q->td; in throtl_shutdown_wq() local
1647 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1722 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1747 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1765 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
1771 if (td->limit_index != LIMIT_LOW) in throtl_can_upgrade()
1774 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) in throtl_can_upgrade()
1778 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1798 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1801 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1807 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1810 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1811 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1814 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
1819 throtl_log(&td->service_queue, "upgrade to max"); in throtl_upgrade_state()
1820 td->limit_index = LIMIT_MAX; in throtl_upgrade_state()
1821 td->low_upgrade_time = jiffies; in throtl_upgrade_state()
1822 td->scale = 0; in throtl_upgrade_state()
1824 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
1833 throtl_select_dispatch(&td->service_queue); in throtl_upgrade_state()
1834 throtl_schedule_next_dispatch(&td->service_queue, true); in throtl_upgrade_state()
1835 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_upgrade_state()
1838 static void throtl_downgrade_state(struct throtl_data *td) in throtl_downgrade_state() argument
1840 td->scale /= 2; in throtl_downgrade_state()
1842 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); in throtl_downgrade_state()
1843 if (td->scale) { in throtl_downgrade_state()
1844 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; in throtl_downgrade_state()
1848 td->limit_index = LIMIT_LOW; in throtl_downgrade_state()
1849 td->low_downgrade_time = jiffies; in throtl_downgrade_state()
1854 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade() local
1861 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && in throtl_tg_can_downgrade()
1863 td->throtl_slice) && in throtl_tg_can_downgrade()
1889 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
1890 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
1894 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
1901 tg->td->throtl_slice)) in throtl_downgrade_check()
1937 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
1963 static void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
1970 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW]) in throtl_update_latency_buckets()
1972 if (time_before(jiffies, td->last_calculate_time + HZ)) in throtl_update_latency_buckets()
1974 td->last_calculate_time = jiffies; in throtl_update_latency_buckets()
1979 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; in throtl_update_latency_buckets()
1985 bucket = per_cpu_ptr(td->latency_buckets[rw], in throtl_update_latency_buckets()
2011 if (td->avg_buckets[rw][i].latency < last_latency[rw]) in throtl_update_latency_buckets()
2012 td->avg_buckets[rw][i].latency = in throtl_update_latency_buckets()
2017 if (!td->avg_buckets[rw][i].valid) in throtl_update_latency_buckets()
2020 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + in throtl_update_latency_buckets()
2023 td->avg_buckets[rw][i].latency = max(latency[rw], in throtl_update_latency_buckets()
2025 td->avg_buckets[rw][i].valid = true; in throtl_update_latency_buckets()
2026 last_latency[rw] = td->avg_buckets[rw][i].latency; in throtl_update_latency_buckets()
2031 throtl_log(&td->service_queue, in throtl_update_latency_buckets()
2034 td->avg_buckets[READ][i].latency, in throtl_update_latency_buckets()
2035 td->avg_buckets[READ][i].valid, in throtl_update_latency_buckets()
2036 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2037 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2040 static inline void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2073 struct throtl_data *td = tg->td; in __blk_throtl_bio() local
2085 throtl_update_latency_buckets(td); in __blk_throtl_bio()
2104 if (throtl_can_upgrade(td, tg)) { in __blk_throtl_bio()
2105 throtl_upgrade_state(td); in __blk_throtl_bio()
2149 td->nr_queued[rw]++; in __blk_throtl_bio()
2169 if (throttled || !td->track_bio_latency) in __blk_throtl_bio()
2177 static void throtl_track_latency(struct throtl_data *td, sector_t size, in throtl_track_latency() argument
2183 if (!td || td->limit_index != LIMIT_LOW || in throtl_track_latency()
2185 !blk_queue_nonrot(td->queue)) in throtl_track_latency()
2190 latency = get_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
2193 put_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
2199 struct throtl_data *td = q->td; in blk_throtl_stat_add() local
2201 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq), in blk_throtl_stat_add()
2219 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2233 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2236 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2241 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2253 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2262 struct throtl_data *td; in blk_throtl_init() local
2265 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
2266 if (!td) in blk_throtl_init()
2268 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2270 if (!td->latency_buckets[READ]) { in blk_throtl_init()
2271 kfree(td); in blk_throtl_init()
2274 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2276 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2277 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2278 kfree(td); in blk_throtl_init()
2282 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
2283 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
2285 q->td = td; in blk_throtl_init()
2286 td->queue = q; in blk_throtl_init()
2288 td->limit_valid[LIMIT_MAX] = true; in blk_throtl_init()
2289 td->limit_index = LIMIT_MAX; in blk_throtl_init()
2290 td->low_upgrade_time = jiffies; in blk_throtl_init()
2291 td->low_downgrade_time = jiffies; in blk_throtl_init()
2296 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2297 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2298 kfree(td); in blk_throtl_init()
2305 BUG_ON(!q->td); in blk_throtl_exit()
2306 del_timer_sync(&q->td->service_queue.pending_timer); in blk_throtl_exit()
2309 free_percpu(q->td->latency_buckets[READ]); in blk_throtl_exit()
2310 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2311 kfree(q->td); in blk_throtl_exit()
2316 struct throtl_data *td; in blk_throtl_register_queue() local
2319 td = q->td; in blk_throtl_register_queue()
2320 BUG_ON(!td); in blk_throtl_register_queue()
2323 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_register_queue()
2324 td->filtered_latency = LATENCY_FILTERED_SSD; in blk_throtl_register_queue()
2326 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2327 td->filtered_latency = LATENCY_FILTERED_HD; in blk_throtl_register_queue()
2329 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2330 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2335 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2338 td->track_bio_latency = !queue_is_mq(q); in blk_throtl_register_queue()
2339 if (!td->track_bio_latency) in blk_throtl_register_queue()
2346 if (!q->td) in blk_throtl_sample_time_show()
2348 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show()
2357 if (!q->td) in blk_throtl_sample_time_store()
2364 q->td->throtl_slice = t; in blk_throtl_sample_time_store()