1 /*
2  * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <cdefs.h>
9 #include <stdbool.h>
10 
11 #include "../amu_private.h"
12 #include <arch.h>
13 #include <arch_helpers.h>
14 #include <common/debug.h>
15 #include <lib/el3_runtime/pubsub_events.h>
16 #include <lib/extensions/amu.h>
17 
18 #include <plat/common/platform.h>
19 
20 struct amu_ctx {
21 	uint64_t group0_cnts[AMU_GROUP0_MAX_COUNTERS];
22 #if ENABLE_AMU_AUXILIARY_COUNTERS
23 	uint64_t group1_cnts[AMU_GROUP1_MAX_COUNTERS];
24 #endif
25 
26 	uint16_t group0_enable;
27 #if ENABLE_AMU_AUXILIARY_COUNTERS
28 	uint16_t group1_enable;
29 #endif
30 };
31 
32 static struct amu_ctx amu_ctxs_[PLATFORM_CORE_COUNT];
33 
34 CASSERT((sizeof(amu_ctxs_[0].group0_enable) * CHAR_BIT) <= AMU_GROUP0_MAX_COUNTERS,
35 	amu_ctx_group0_enable_cannot_represent_all_group0_counters);
36 
37 #if ENABLE_AMU_AUXILIARY_COUNTERS
38 CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTERS,
39 	amu_ctx_group1_enable_cannot_represent_all_group1_counters);
40 #endif
41 
read_id_pfr0_amu(void)42 static inline __unused uint32_t read_id_pfr0_amu(void)
43 {
44 	return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
45 		ID_PFR0_AMU_MASK;
46 }
47 
write_hcptr_tam(uint32_t value)48 static inline __unused void write_hcptr_tam(uint32_t value)
49 {
50 	write_hcptr((read_hcptr() & ~TAM_BIT) |
51 		((value << TAM_SHIFT) & TAM_BIT));
52 }
53 
write_amcr_cg1rz(uint32_t value)54 static inline __unused void write_amcr_cg1rz(uint32_t value)
55 {
56 	write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
57 		((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
58 }
59 
read_amcfgr_ncg(void)60 static inline __unused uint32_t read_amcfgr_ncg(void)
61 {
62 	return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
63 		AMCFGR_NCG_MASK;
64 }
65 
read_amcgcr_cg0nc(void)66 static inline __unused uint32_t read_amcgcr_cg0nc(void)
67 {
68 	return (read_amcgcr() >> AMCGCR_CG0NC_SHIFT) &
69 		AMCGCR_CG0NC_MASK;
70 }
71 
read_amcgcr_cg1nc(void)72 static inline __unused uint32_t read_amcgcr_cg1nc(void)
73 {
74 	return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
75 		AMCGCR_CG1NC_MASK;
76 }
77 
read_amcntenset0_px(void)78 static inline __unused uint32_t read_amcntenset0_px(void)
79 {
80 	return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
81 		AMCNTENSET0_Pn_MASK;
82 }
83 
read_amcntenset1_px(void)84 static inline __unused uint32_t read_amcntenset1_px(void)
85 {
86 	return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
87 		AMCNTENSET1_Pn_MASK;
88 }
89 
write_amcntenset0_px(uint32_t px)90 static inline __unused void write_amcntenset0_px(uint32_t px)
91 {
92 	uint32_t value = read_amcntenset0();
93 
94 	value &= ~AMCNTENSET0_Pn_MASK;
95 	value |= (px << AMCNTENSET0_Pn_SHIFT) &
96 		AMCNTENSET0_Pn_MASK;
97 
98 	write_amcntenset0(value);
99 }
100 
write_amcntenset1_px(uint32_t px)101 static inline __unused void write_amcntenset1_px(uint32_t px)
102 {
103 	uint32_t value = read_amcntenset1();
104 
105 	value &= ~AMCNTENSET1_Pn_MASK;
106 	value |= (px << AMCNTENSET1_Pn_SHIFT) &
107 		AMCNTENSET1_Pn_MASK;
108 
109 	write_amcntenset1(value);
110 }
111 
write_amcntenclr0_px(uint32_t px)112 static inline __unused void write_amcntenclr0_px(uint32_t px)
113 {
114 	uint32_t value = read_amcntenclr0();
115 
116 	value &= ~AMCNTENCLR0_Pn_MASK;
117 	value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
118 
119 	write_amcntenclr0(value);
120 }
121 
write_amcntenclr1_px(uint32_t px)122 static inline __unused void write_amcntenclr1_px(uint32_t px)
123 {
124 	uint32_t value = read_amcntenclr1();
125 
126 	value &= ~AMCNTENCLR1_Pn_MASK;
127 	value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
128 
129 	write_amcntenclr1(value);
130 }
131 
amu_supported(void)132 static __unused bool amu_supported(void)
133 {
134 	return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
135 }
136 
137 #if ENABLE_AMU_AUXILIARY_COUNTERS
amu_group1_supported(void)138 static __unused bool amu_group1_supported(void)
139 {
140 	return read_amcfgr_ncg() > 0U;
141 }
142 #endif
143 
144 /*
145  * Enable counters. This function is meant to be invoked by the context
146  * management library before exiting from EL3.
147  */
amu_enable(bool el2_unused)148 void amu_enable(bool el2_unused)
149 {
150 	uint32_t id_pfr0_amu;		/* AMU version */
151 
152 	uint32_t amcfgr_ncg;		/* Number of counter groups */
153 	uint32_t amcgcr_cg0nc;		/* Number of group 0 counters */
154 
155 	uint32_t amcntenset0_px = 0x0;	/* Group 0 enable mask */
156 	uint32_t amcntenset1_px = 0x0;	/* Group 1 enable mask */
157 
158 	id_pfr0_amu = read_id_pfr0_amu();
159 	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
160 		/*
161 		 * If the AMU is unsupported, nothing needs to be done.
162 		 */
163 
164 		return;
165 	}
166 
167 	if (el2_unused) {
168 		/*
169 		 * HCPTR.TAM: Set to zero so any accesses to the Activity
170 		 * Monitor registers do not trap to EL2.
171 		 */
172 		write_hcptr_tam(0U);
173 	}
174 
175 	/*
176 	 * Retrieve the number of architected counters. All of these counters
177 	 * are enabled by default.
178 	 */
179 
180 	amcgcr_cg0nc = read_amcgcr_cg0nc();
181 	amcntenset0_px = (UINT32_C(1) << (amcgcr_cg0nc)) - 1U;
182 
183 	assert(amcgcr_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
184 
185 	/*
186 	 * The platform may opt to enable specific auxiliary counters. This can
187 	 * be done via the common FCONF getter, or via the platform-implemented
188 	 * function.
189 	 */
190 
191 #if ENABLE_AMU_AUXILIARY_COUNTERS
192 	const struct amu_topology *topology;
193 
194 #if ENABLE_AMU_FCONF
195 	topology = FCONF_GET_PROPERTY(amu, config, topology);
196 #else
197 	topology = plat_amu_topology();
198 #endif /* ENABLE_AMU_FCONF */
199 
200 	if (topology != NULL) {
201 		unsigned int core_pos = plat_my_core_pos();
202 
203 		amcntenset1_el0_px = topology->cores[core_pos].enable;
204 	} else {
205 		ERROR("AMU: failed to generate AMU topology\n");
206 	}
207 #endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
208 
209 	/*
210 	 * Enable the requested counters.
211 	 */
212 
213 	write_amcntenset0_px(amcntenset0_px);
214 
215 	amcfgr_ncg = read_amcfgr_ncg();
216 	if (amcfgr_ncg > 0U) {
217 		write_amcntenset1_px(amcntenset1_px);
218 
219 #if !ENABLE_AMU_AUXILIARY_COUNTERS
220 		VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
221 #endif
222 	}
223 
224 	/* Initialize FEAT_AMUv1p1 features if present. */
225 	if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
226 		return;
227 	}
228 
229 #if AMU_RESTRICT_COUNTERS
230 	/*
231 	 * FEAT_AMUv1p1 adds a register field to restrict access to group 1
232 	 * counters at all but the highest implemented EL.  This is controlled
233 	 * with the AMU_RESTRICT_COUNTERS compile time flag, when set, system
234 	 * register reads at lower ELs return zero.  Reads from the memory
235 	 * mapped view are unaffected.
236 	 */
237 	VERBOSE("AMU group 1 counter access restricted.\n");
238 	write_amcr_cg1rz(1U);
239 #else
240 	write_amcr_cg1rz(0U);
241 #endif
242 }
243 
244 /* Read the group 0 counter identified by the given `idx`. */
amu_group0_cnt_read(unsigned int idx)245 static uint64_t amu_group0_cnt_read(unsigned int idx)
246 {
247 	assert(amu_supported());
248 	assert(idx < read_amcgcr_cg0nc());
249 
250 	return amu_group0_cnt_read_internal(idx);
251 }
252 
253 /* Write the group 0 counter identified by the given `idx` with `val` */
amu_group0_cnt_write(unsigned int idx,uint64_t val)254 static void amu_group0_cnt_write(unsigned  int idx, uint64_t val)
255 {
256 	assert(amu_supported());
257 	assert(idx < read_amcgcr_cg0nc());
258 
259 	amu_group0_cnt_write_internal(idx, val);
260 	isb();
261 }
262 
263 #if ENABLE_AMU_AUXILIARY_COUNTERS
264 /* Read the group 1 counter identified by the given `idx` */
amu_group1_cnt_read(unsigned int idx)265 static uint64_t amu_group1_cnt_read(unsigned  int idx)
266 {
267 	assert(amu_supported());
268 	assert(amu_group1_supported());
269 	assert(idx < read_amcgcr_cg1nc());
270 
271 	return amu_group1_cnt_read_internal(idx);
272 }
273 
274 /* Write the group 1 counter identified by the given `idx` with `val` */
amu_group1_cnt_write(unsigned int idx,uint64_t val)275 static void amu_group1_cnt_write(unsigned  int idx, uint64_t val)
276 {
277 	assert(amu_supported());
278 	assert(amu_group1_supported());
279 	assert(idx < read_amcgcr_cg1nc());
280 
281 	amu_group1_cnt_write_internal(idx, val);
282 	isb();
283 }
284 #endif
285 
amu_context_save(const void * arg)286 static void *amu_context_save(const void *arg)
287 {
288 	uint32_t i;
289 
290 	unsigned int core_pos;
291 	struct amu_ctx *ctx;
292 
293 	uint32_t id_pfr0_amu;	/* AMU version */
294 	uint32_t amcgcr_cg0nc;	/* Number of group 0 counters */
295 
296 #if ENABLE_AMU_AUXILIARY_COUNTERS
297 	uint32_t amcfgr_ncg;	/* Number of counter groups */
298 	uint32_t amcgcr_cg1nc;	/* Number of group 1 counters */
299 #endif
300 
301 	id_pfr0_amu = read_id_pfr0_amu();
302 	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
303 		return (void *)0;
304 	}
305 
306 	core_pos = plat_my_core_pos();
307 	ctx = &amu_ctxs_[core_pos];
308 
309 	amcgcr_cg0nc = read_amcgcr_cg0nc();
310 
311 #if ENABLE_AMU_AUXILIARY_COUNTERS
312 	amcfgr_ncg = read_amcfgr_ncg();
313 	amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
314 #endif
315 
316 	/*
317 	 * Disable all AMU counters.
318 	 */
319 
320 	ctx->group0_enable = read_amcntenset0_px();
321 	write_amcntenclr0_px(ctx->group0_enable);
322 
323 #if ENABLE_AMU_AUXILIARY_COUNTERS
324 	if (amcfgr_ncg > 0U) {
325 		ctx->group1_enable = read_amcntenset1_px();
326 		write_amcntenclr1_px(ctx->group1_enable);
327 	}
328 #endif
329 
330 	/*
331 	 * Save the counters to the local context.
332 	 */
333 
334 	isb(); /* Ensure counters have been stopped */
335 
336 	for (i = 0U; i < amcgcr_cg0nc; i++) {
337 		ctx->group0_cnts[i] = amu_group0_cnt_read(i);
338 	}
339 
340 #if ENABLE_AMU_AUXILIARY_COUNTERS
341 	for (i = 0U; i < amcgcr_cg1nc; i++) {
342 		ctx->group1_cnts[i] = amu_group1_cnt_read(i);
343 	}
344 #endif
345 
346 	return (void *)0;
347 }
348 
amu_context_restore(const void * arg)349 static void *amu_context_restore(const void *arg)
350 {
351 	uint32_t i;
352 
353 	unsigned int core_pos;
354 	struct amu_ctx *ctx;
355 
356 	uint32_t id_pfr0_amu;	/* AMU version */
357 
358 	uint32_t amcfgr_ncg;	/* Number of counter groups */
359 	uint32_t amcgcr_cg0nc;	/* Number of group 0 counters */
360 
361 #if ENABLE_AMU_AUXILIARY_COUNTERS
362 	uint32_t amcgcr_cg1nc;	/* Number of group 1 counters */
363 #endif
364 
365 	id_pfr0_amu = read_id_pfr0_amu();
366 	if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
367 		return (void *)0;
368 	}
369 
370 	core_pos = plat_my_core_pos();
371 	ctx = &amu_ctxs_[core_pos];
372 
373 	amcfgr_ncg = read_amcfgr_ncg();
374 	amcgcr_cg0nc = read_amcgcr_cg0nc();
375 
376 #if ENABLE_AMU_AUXILIARY_COUNTERS
377 	amcgcr_cg1nc = (amcfgr_ncg > 0U) ? read_amcgcr_cg1nc() : 0U;
378 #endif
379 
380 	/*
381 	 * Sanity check that all counters were disabled when the context was
382 	 * previously saved.
383 	 */
384 
385 	assert(read_amcntenset0_px() == 0U);
386 
387 	if (amcfgr_ncg > 0U) {
388 		assert(read_amcntenset1_px() == 0U);
389 	}
390 
391 	/*
392 	 * Restore the counter values from the local context.
393 	 */
394 
395 	for (i = 0U; i < amcgcr_cg0nc; i++) {
396 		amu_group0_cnt_write(i, ctx->group0_cnts[i]);
397 	}
398 
399 #if ENABLE_AMU_AUXILIARY_COUNTERS
400 	for (i = 0U; i < amcgcr_cg1nc; i++) {
401 		amu_group1_cnt_write(i, ctx->group1_cnts[i]);
402 	}
403 #endif
404 
405 	/*
406 	 * Re-enable counters that were disabled during context save.
407 	 */
408 
409 	write_amcntenset0_px(ctx->group0_enable);
410 
411 #if ENABLE_AMU_AUXILIARY_COUNTERS
412 	if (amcfgr_ncg > 0U) {
413 		write_amcntenset1_px(ctx->group1_enable);
414 	}
415 #endif
416 
417 	return (void *)0;
418 }
419 
420 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
421 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
422