1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_gt.h"
8 #include "intel_gt_clock_utils.h"
9 
read_reference_ts_freq(struct intel_uncore * uncore)10 static u32 read_reference_ts_freq(struct intel_uncore *uncore)
11 {
12 	u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
13 	u32 base_freq, frac_freq;
14 
15 	base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
16 		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
17 	base_freq *= 1000000;
18 
19 	frac_freq = ((ts_override &
20 		      GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
21 		     GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
22 	frac_freq = 1000000 / (frac_freq + 1);
23 
24 	return base_freq + frac_freq;
25 }
26 
gen9_get_crystal_clock_freq(struct intel_uncore * uncore,u32 rpm_config_reg)27 static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
28 				       u32 rpm_config_reg)
29 {
30 	u32 f19_2_mhz = 19200000;
31 	u32 f24_mhz = 24000000;
32 	u32 crystal_clock =
33 		(rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
34 		GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
35 
36 	switch (crystal_clock) {
37 	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
38 		return f19_2_mhz;
39 	case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
40 		return f24_mhz;
41 	default:
42 		MISSING_CASE(crystal_clock);
43 		return 0;
44 	}
45 }
46 
gen11_get_crystal_clock_freq(struct intel_uncore * uncore,u32 rpm_config_reg)47 static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
48 					u32 rpm_config_reg)
49 {
50 	u32 f19_2_mhz = 19200000;
51 	u32 f24_mhz = 24000000;
52 	u32 f25_mhz = 25000000;
53 	u32 f38_4_mhz = 38400000;
54 	u32 crystal_clock =
55 		(rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
56 		GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
57 
58 	switch (crystal_clock) {
59 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
60 		return f24_mhz;
61 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
62 		return f19_2_mhz;
63 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
64 		return f38_4_mhz;
65 	case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
66 		return f25_mhz;
67 	default:
68 		MISSING_CASE(crystal_clock);
69 		return 0;
70 	}
71 }
72 
read_clock_frequency(struct intel_uncore * uncore)73 static u32 read_clock_frequency(struct intel_uncore *uncore)
74 {
75 	u32 f12_5_mhz = 12500000;
76 	u32 f19_2_mhz = 19200000;
77 	u32 f24_mhz = 24000000;
78 
79 	if (GRAPHICS_VER(uncore->i915) <= 4) {
80 		/*
81 		 * PRMs say:
82 		 *
83 		 *     "The value in this register increments once every 16
84 		 *      hclks." (through the “Clocking Configuration”
85 		 *      (“CLKCFG”) MCHBAR register)
86 		 */
87 		return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
88 	} else if (GRAPHICS_VER(uncore->i915) <= 8) {
89 		/*
90 		 * PRMs say:
91 		 *
92 		 *     "The PCU TSC counts 10ns increments; this timestamp
93 		 *      reflects bits 38:3 of the TSC (i.e. 80ns granularity,
94 		 *      rolling over every 1.5 hours).
95 		 */
96 		return f12_5_mhz;
97 	} else if (GRAPHICS_VER(uncore->i915) <= 9) {
98 		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
99 		u32 freq = 0;
100 
101 		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
102 			freq = read_reference_ts_freq(uncore);
103 		} else {
104 			freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
105 
106 			/*
107 			 * Now figure out how the command stream's timestamp
108 			 * register increments from this frequency (it might
109 			 * increment only every few clock cycle).
110 			 */
111 			freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
112 				      CTC_SHIFT_PARAMETER_SHIFT);
113 		}
114 
115 		return freq;
116 	} else if (GRAPHICS_VER(uncore->i915) <= 12) {
117 		u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
118 		u32 freq = 0;
119 
120 		/*
121 		 * First figure out the reference frequency. There are 2 ways
122 		 * we can compute the frequency, either through the
123 		 * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
124 		 * tells us which one we should use.
125 		 */
126 		if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
127 			freq = read_reference_ts_freq(uncore);
128 		} else {
129 			u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
130 
131 			if (GRAPHICS_VER(uncore->i915) >= 11)
132 				freq = gen11_get_crystal_clock_freq(uncore, c0);
133 			else
134 				freq = gen9_get_crystal_clock_freq(uncore, c0);
135 
136 			/*
137 			 * Now figure out how the command stream's timestamp
138 			 * register increments from this frequency (it might
139 			 * increment only every few clock cycle).
140 			 */
141 			freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
142 				      GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
143 		}
144 
145 		return freq;
146 	}
147 
148 	MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
149 	return 0;
150 }
151 
intel_gt_init_clock_frequency(struct intel_gt * gt)152 void intel_gt_init_clock_frequency(struct intel_gt *gt)
153 {
154 	/*
155 	 * Note that on gen11+, the clock frequency may be reconfigured.
156 	 * We do not, and we assume nobody else does.
157 	 */
158 	gt->clock_frequency = read_clock_frequency(gt->uncore);
159 	if (gt->clock_frequency)
160 		gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
161 
162 	GT_TRACE(gt,
163 		 "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
164 		 gt->clock_frequency / 1000,
165 		 gt->clock_period_ns,
166 		 div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
167 			 USEC_PER_SEC));
168 }
169 
170 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
intel_gt_check_clock_frequency(const struct intel_gt * gt)171 void intel_gt_check_clock_frequency(const struct intel_gt *gt)
172 {
173 	if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
174 		dev_err(gt->i915->drm.dev,
175 			"GT clock frequency changed, was %uHz, now %uHz!\n",
176 			gt->clock_frequency,
177 			read_clock_frequency(gt->uncore));
178 	}
179 }
180 #endif
181 
div_u64_roundup(u64 nom,u32 den)182 static u64 div_u64_roundup(u64 nom, u32 den)
183 {
184 	return div_u64(nom + den - 1, den);
185 }
186 
intel_gt_clock_interval_to_ns(const struct intel_gt * gt,u64 count)187 u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
188 {
189 	return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
190 }
191 
intel_gt_pm_interval_to_ns(const struct intel_gt * gt,u64 count)192 u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
193 {
194 	return intel_gt_clock_interval_to_ns(gt, 16 * count);
195 }
196 
intel_gt_ns_to_clock_interval(const struct intel_gt * gt,u64 ns)197 u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
198 {
199 	return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
200 }
201 
intel_gt_ns_to_pm_interval(const struct intel_gt * gt,u64 ns)202 u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
203 {
204 	u64 val;
205 
206 	/*
207 	 * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
208 	 * 8300) freezing up around GPU hangs. Looks as if even
209 	 * scheduling/timer interrupts start misbehaving if the RPS
210 	 * EI/thresholds are "bad", leading to a very sluggish or even
211 	 * frozen machine.
212 	 */
213 	val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
214 	if (GRAPHICS_VER(gt->i915) == 6)
215 		val = div_u64_roundup(val, 25) * 25;
216 
217 	return val;
218 }
219