Lines Matching refs:rps
147 static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) in wait_for_freq() argument
162 act = read_cagf(rps); in wait_for_freq()
184 static u8 rps_set_check(struct intel_rps *rps, u8 freq) in rps_set_check() argument
186 mutex_lock(&rps->lock); in rps_set_check()
187 GEM_BUG_ON(!intel_rps_is_active(rps)); in rps_set_check()
188 if (wait_for(!intel_rps_set(rps, freq), 50)) { in rps_set_check()
189 mutex_unlock(&rps->lock); in rps_set_check()
192 GEM_BUG_ON(rps->last_freq != freq); in rps_set_check()
193 mutex_unlock(&rps->lock); in rps_set_check()
195 return wait_for_freq(rps, freq, 50); in rps_set_check()
198 static void show_pstate_limits(struct intel_rps *rps) in show_pstate_limits() argument
200 struct drm_i915_private *i915 = rps_to_i915(rps); in show_pstate_limits()
205 intel_uncore_read(rps_to_uncore(rps), in show_pstate_limits()
210 intel_uncore_read(rps_to_uncore(rps), in show_pstate_limits()
218 struct intel_rps *rps = >->rps; in live_rps_clock_interval() local
225 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_clock_interval()
232 saved_work = rps->work.func; in live_rps_clock_interval()
233 rps->work.func = dummy_rps_work; in live_rps_clock_interval()
236 intel_rps_disable(>->rps); in live_rps_clock_interval()
353 intel_rps_enable(>->rps); in live_rps_clock_interval()
359 rps->work.func = saved_work; in live_rps_clock_interval()
370 struct intel_rps *rps = >->rps; in live_rps_control() local
384 if (!intel_rps_is_enabled(rps)) in live_rps_control()
394 saved_work = rps->work.func; in live_rps_control()
395 rps->work.func = dummy_rps_work; in live_rps_control()
429 if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { in live_rps_control()
431 engine->name, rps->min_freq, read_cagf(rps)); in live_rps_control()
434 show_pstate_limits(rps); in live_rps_control()
439 for (f = rps->min_freq + 1; f < rps->max_freq; f++) { in live_rps_control()
440 if (rps_set_check(rps, f) < f) in live_rps_control()
444 limit = rps_set_check(rps, f); in live_rps_control()
446 if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { in live_rps_control()
448 engine->name, rps->min_freq, read_cagf(rps)); in live_rps_control()
451 show_pstate_limits(rps); in live_rps_control()
457 max = rps_set_check(rps, limit); in live_rps_control()
461 min = rps_set_check(rps, rps->min_freq); in live_rps_control()
469 rps->min_freq, intel_gpu_freq(rps, rps->min_freq), in live_rps_control()
470 rps->max_freq, intel_gpu_freq(rps, rps->max_freq), in live_rps_control()
471 limit, intel_gpu_freq(rps, limit), in live_rps_control()
474 if (limit == rps->min_freq) { in live_rps_control()
477 show_pstate_limits(rps); in live_rps_control()
492 rps->work.func = saved_work; in live_rps_control()
497 static void show_pcu_config(struct intel_rps *rps) in show_pcu_config() argument
499 struct drm_i915_private *i915 = rps_to_i915(rps); in show_pcu_config()
507 min_gpu_freq = rps->min_freq; in show_pcu_config()
508 max_gpu_freq = rps->max_freq; in show_pcu_config()
515 wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm); in show_pcu_config()
531 intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref); in show_pcu_config()
547 static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq) in measure_frequency_at() argument
552 *freq = rps_set_check(rps, *freq); in measure_frequency_at()
555 *freq = (*freq + read_cagf(rps)) / 2; in measure_frequency_at()
576 static u64 measure_cs_frequency_at(struct intel_rps *rps, in measure_cs_frequency_at() argument
583 *freq = rps_set_check(rps, *freq); in measure_cs_frequency_at()
586 *freq = (*freq + read_cagf(rps)) / 2; in measure_cs_frequency_at()
602 struct intel_rps *rps = >->rps; in live_rps_frequency_cs() local
614 if (!intel_rps_is_enabled(rps)) in live_rps_frequency_cs()
624 saved_work = rps->work.func; in live_rps_frequency_cs()
625 rps->work.func = dummy_rps_work; in live_rps_frequency_cs()
671 min.freq = rps->min_freq; in live_rps_frequency_cs()
672 min.count = measure_cs_frequency_at(rps, engine, &min.freq); in live_rps_frequency_cs()
674 max.freq = rps->max_freq; in live_rps_frequency_cs()
675 max.count = measure_cs_frequency_at(rps, engine, &max.freq); in live_rps_frequency_cs()
679 min.count, intel_gpu_freq(rps, min.freq), in live_rps_frequency_cs()
680 max.count, intel_gpu_freq(rps, max.freq), in live_rps_frequency_cs()
693 show_pcu_config(rps); in live_rps_frequency_cs()
695 for (f = min.freq + 1; f <= rps->max_freq; f++) { in live_rps_frequency_cs()
699 count = measure_cs_frequency_at(rps, engine, &act); in live_rps_frequency_cs()
705 act, intel_gpu_freq(rps, act), count, in live_rps_frequency_cs()
731 rps->work.func = saved_work; in live_rps_frequency_cs()
743 struct intel_rps *rps = >->rps; in live_rps_frequency_srm() local
755 if (!intel_rps_is_enabled(rps)) in live_rps_frequency_srm()
765 saved_work = rps->work.func; in live_rps_frequency_srm()
766 rps->work.func = dummy_rps_work; in live_rps_frequency_srm()
811 min.freq = rps->min_freq; in live_rps_frequency_srm()
812 min.count = measure_frequency_at(rps, cntr, &min.freq); in live_rps_frequency_srm()
814 max.freq = rps->max_freq; in live_rps_frequency_srm()
815 max.count = measure_frequency_at(rps, cntr, &max.freq); in live_rps_frequency_srm()
819 min.count, intel_gpu_freq(rps, min.freq), in live_rps_frequency_srm()
820 max.count, intel_gpu_freq(rps, max.freq), in live_rps_frequency_srm()
833 show_pcu_config(rps); in live_rps_frequency_srm()
835 for (f = min.freq + 1; f <= rps->max_freq; f++) { in live_rps_frequency_srm()
839 count = measure_frequency_at(rps, cntr, &act); in live_rps_frequency_srm()
845 act, intel_gpu_freq(rps, act), count, in live_rps_frequency_srm()
871 rps->work.func = saved_work; in live_rps_frequency_srm()
879 static void sleep_for_ei(struct intel_rps *rps, int timeout_us) in sleep_for_ei() argument
885 rps_disable_interrupts(rps); in sleep_for_ei()
886 GEM_BUG_ON(rps->pm_iir); in sleep_for_ei()
887 rps_enable_interrupts(rps); in sleep_for_ei()
893 static int __rps_up_interrupt(struct intel_rps *rps, in __rps_up_interrupt() argument
904 rps_set_check(rps, rps->min_freq); in __rps_up_interrupt()
921 if (!intel_rps_is_active(rps)) { in __rps_up_interrupt()
929 if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) { in __rps_up_interrupt()
936 if (rps->last_freq != rps->min_freq) { in __rps_up_interrupt()
947 sleep_for_ei(rps, timeout); in __rps_up_interrupt()
953 if (rps->cur_freq != rps->min_freq) { in __rps_up_interrupt()
955 engine->name, intel_rps_read_actual_frequency(rps)); in __rps_up_interrupt()
959 if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) { in __rps_up_interrupt()
961 engine->name, rps->pm_iir, in __rps_up_interrupt()
971 static int __rps_down_interrupt(struct intel_rps *rps, in __rps_down_interrupt() argument
977 rps_set_check(rps, rps->max_freq); in __rps_down_interrupt()
979 if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) { in __rps_down_interrupt()
985 if (rps->last_freq != rps->max_freq) { in __rps_down_interrupt()
995 sleep_for_ei(rps, timeout); in __rps_down_interrupt()
997 if (rps->cur_freq != rps->max_freq) { in __rps_down_interrupt()
1000 intel_rps_read_actual_frequency(rps)); in __rps_down_interrupt()
1004 if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) { in __rps_down_interrupt()
1006 engine->name, rps->pm_iir, in __rps_down_interrupt()
1022 struct intel_rps *rps = >->rps; in live_rps_interrupt() local
1034 if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_interrupt()
1038 pm_events = rps->pm_events; in live_rps_interrupt()
1049 saved_work = rps->work.func; in live_rps_interrupt()
1050 rps->work.func = dummy_rps_work; in live_rps_interrupt()
1056 GEM_BUG_ON(intel_rps_is_active(rps)); in live_rps_interrupt()
1060 err = __rps_up_interrupt(rps, engine, &spin); in live_rps_interrupt()
1074 err = __rps_down_interrupt(rps, engine); in live_rps_interrupt()
1090 rps->work.func = saved_work; in live_rps_interrupt()
1108 static u64 measure_power_at(struct intel_rps *rps, int *freq) in measure_power_at() argument
1113 *freq = rps_set_check(rps, *freq); in measure_power_at()
1116 *freq = (*freq + read_cagf(rps)) / 2; in measure_power_at()
1126 struct intel_rps *rps = >->rps; in live_rps_power() local
1139 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_power()
1149 saved_work = rps->work.func; in live_rps_power()
1150 rps->work.func = dummy_rps_work; in live_rps_power()
1185 max.freq = rps->max_freq; in live_rps_power()
1186 max.power = measure_power_at(rps, &max.freq); in live_rps_power()
1188 min.freq = rps->min_freq; in live_rps_power()
1189 min.power = measure_power_at(rps, &min.freq); in live_rps_power()
1196 min.power, intel_gpu_freq(rps, min.freq), in live_rps_power()
1197 max.power, intel_gpu_freq(rps, max.freq)); in live_rps_power()
1201 min.freq, intel_gpu_freq(rps, min.freq), in live_rps_power()
1202 max.freq, intel_gpu_freq(rps, max.freq)); in live_rps_power()
1222 rps->work.func = saved_work; in live_rps_power()
1230 struct intel_rps *rps = >->rps; in live_rps_dynamic() local
1243 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_dynamic()
1249 if (intel_rps_has_interrupts(rps)) in live_rps_dynamic()
1251 if (intel_rps_uses_timer(rps)) in live_rps_dynamic()
1265 GEM_BUG_ON(intel_rps_is_active(rps)); in live_rps_dynamic()
1266 rps->cur_freq = rps->min_freq; in live_rps_dynamic()
1270 GEM_BUG_ON(rps->last_freq != rps->min_freq); in live_rps_dynamic()
1283 max.freq = wait_for_freq(rps, rps->max_freq, 500); in live_rps_dynamic()
1289 min.freq = wait_for_freq(rps, rps->min_freq, 2000); in live_rps_dynamic()
1294 max.freq, intel_gpu_freq(rps, max.freq), in live_rps_dynamic()
1296 min.freq, intel_gpu_freq(rps, min.freq), in live_rps_dynamic()