1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <platform_def.h>
11 
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <drivers/arm/cci.h>
15 #include <drivers/console.h>
16 #include <lib/bakery_lock.h>
17 #include <lib/mmio.h>
18 #include <lib/psci/psci.h>
19 
20 #include <mcucfg.h>
21 #include <plat_private.h>
22 #include <power_tracer.h>
23 #include <scu.h>
24 
25 struct core_context {
26 	unsigned long timer_data[8];
27 	unsigned int count;
28 	unsigned int rst;
29 	unsigned int abt;
30 	unsigned int brk;
31 };
32 
33 struct cluster_context {
34 	struct core_context core[PLATFORM_MAX_CPUS_PER_CLUSTER];
35 };
36 
37 /*
38  * Top level structure to hold the complete context of a multi cluster system
39  */
40 struct system_context {
41 	struct cluster_context cluster[PLATFORM_CLUSTER_COUNT];
42 };
43 
44 /*
45  * Top level structure which encapsulates the context of the entire system
46  */
47 static struct system_context dormant_data[1];
48 
system_cluster(struct system_context * system,uint32_t clusterid)49 static inline struct cluster_context *system_cluster(
50 						struct system_context *system,
51 						uint32_t clusterid)
52 {
53 	return &system->cluster[clusterid];
54 }
55 
cluster_core(struct cluster_context * cluster,uint32_t cpuid)56 static inline struct core_context *cluster_core(struct cluster_context *cluster,
57 						uint32_t cpuid)
58 {
59 	return &cluster->core[cpuid];
60 }
61 
get_cluster_data(unsigned long mpidr)62 static struct cluster_context *get_cluster_data(unsigned long mpidr)
63 {
64 	uint32_t clusterid;
65 
66 	clusterid = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;
67 
68 	return system_cluster(dormant_data, clusterid);
69 }
70 
get_core_data(unsigned long mpidr)71 static struct core_context *get_core_data(unsigned long mpidr)
72 {
73 	struct cluster_context *cluster;
74 	uint32_t cpuid;
75 
76 	cluster = get_cluster_data(mpidr);
77 	cpuid = mpidr & MPIDR_CPU_MASK;
78 
79 	return cluster_core(cluster, cpuid);
80 }
81 
mt_save_generic_timer(unsigned long * container)82 static void mt_save_generic_timer(unsigned long *container)
83 {
84 	uint64_t ctl;
85 	uint64_t val;
86 
87 	__asm__ volatile("mrs	%x0, cntkctl_el1\n\t"
88 			 "mrs	%x1, cntp_cval_el0\n\t"
89 			 "stp	%x0, %x1, [%2, #0]"
90 			 : "=&r" (ctl), "=&r" (val)
91 			 : "r" (container)
92 			 : "memory");
93 
94 	__asm__ volatile("mrs	%x0, cntp_tval_el0\n\t"
95 			 "mrs	%x1, cntp_ctl_el0\n\t"
96 			 "stp	%x0, %x1, [%2, #16]"
97 			 : "=&r" (val), "=&r" (ctl)
98 			 : "r" (container)
99 			 : "memory");
100 
101 	__asm__ volatile("mrs	%x0, cntv_tval_el0\n\t"
102 			 "mrs	%x1, cntv_ctl_el0\n\t"
103 			 "stp	%x0, %x1, [%2, #32]"
104 			 : "=&r" (val), "=&r" (ctl)
105 			 : "r" (container)
106 			 : "memory");
107 }
108 
mt_restore_generic_timer(unsigned long * container)109 static void mt_restore_generic_timer(unsigned long *container)
110 {
111 	uint64_t ctl;
112 	uint64_t val;
113 
114 	__asm__ volatile("ldp	%x0, %x1, [%2, #0]\n\t"
115 			 "msr	cntkctl_el1, %x0\n\t"
116 			 "msr	cntp_cval_el0, %x1"
117 			 : "=&r" (ctl), "=&r" (val)
118 			 : "r" (container)
119 			 : "memory");
120 
121 	__asm__ volatile("ldp	%x0, %x1, [%2, #16]\n\t"
122 			 "msr	cntp_tval_el0, %x0\n\t"
123 			 "msr	cntp_ctl_el0, %x1"
124 			 : "=&r" (val), "=&r" (ctl)
125 			 : "r" (container)
126 			 : "memory");
127 
128 	__asm__ volatile("ldp	%x0, %x1, [%2, #32]\n\t"
129 			 "msr	cntv_tval_el0, %x0\n\t"
130 			 "msr	cntv_ctl_el0, %x1"
131 			 : "=&r" (val), "=&r" (ctl)
132 			 : "r" (container)
133 			 : "memory");
134 }
135 
stop_generic_timer(void)136 static void stop_generic_timer(void)
137 {
138 	/*
139 	 * Disable the timer and mask the irq to prevent
140 	 * suprious interrupts on this cpu interface. It
141 	 * will bite us when we come back if we don't. It
142 	 * will be replayed on the inbound cluster.
143 	 */
144 	uint64_t cntpctl = read_cntp_ctl_el0();
145 
146 	write_cntp_ctl_el0(clr_cntp_ctl_enable(cntpctl));
147 }
148 
mt_cpu_save(unsigned long mpidr)149 static void mt_cpu_save(unsigned long mpidr)
150 {
151 	struct core_context *core;
152 
153 	core = get_core_data(mpidr);
154 	mt_save_generic_timer(core->timer_data);
155 
156 	/* disable timer irq, and upper layer should enable it again. */
157 	stop_generic_timer();
158 }
159 
mt_cpu_restore(unsigned long mpidr)160 static void mt_cpu_restore(unsigned long mpidr)
161 {
162 	struct core_context *core;
163 
164 	core = get_core_data(mpidr);
165 	mt_restore_generic_timer(core->timer_data);
166 }
167 
mt_platform_save_context(unsigned long mpidr)168 static void mt_platform_save_context(unsigned long mpidr)
169 {
170 	/* mcusys_save_context: */
171 	mt_cpu_save(mpidr);
172 }
173 
mt_platform_restore_context(unsigned long mpidr)174 static void mt_platform_restore_context(unsigned long mpidr)
175 {
176 	/* mcusys_restore_context: */
177 	mt_cpu_restore(mpidr);
178 }
179 
180 /*******************************************************************************
181 * Private function which is used to determine if any platform actions
182 * should be performed for the specified affinity instance given its
183 * state. Nothing needs to be done if the 'state' is not off or if this is not
184 * the highest affinity level which will enter the 'state'.
185 *******************************************************************************/
plat_do_plat_actions(unsigned int afflvl,unsigned int state)186 static int32_t plat_do_plat_actions(unsigned int afflvl, unsigned int state)
187 {
188 	unsigned int max_phys_off_afflvl;
189 
190 	assert(afflvl <= MPIDR_AFFLVL2);
191 
192 	if (state != PSCI_STATE_OFF)
193 		return -EAGAIN;
194 
195 	/*
196 	 * Find the highest affinity level which will be suspended and postpone
197 	 * all the platform specific actions until that level is hit.
198 	 */
199 	max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
200 	assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
201 	if (afflvl != max_phys_off_afflvl)
202 		return -EAGAIN;
203 
204 	return 0;
205 }
206 
207 /*******************************************************************************
208  * MTK_platform handler called when an affinity instance is about to enter
209  * standby.
210  ******************************************************************************/
plat_affinst_standby(unsigned int power_state)211 static void plat_affinst_standby(unsigned int power_state)
212 {
213 	unsigned int target_afflvl;
214 
215 	/* Sanity check the requested state */
216 	target_afflvl = psci_get_pstate_afflvl(power_state);
217 
218 	/*
219 	 * It's possible to enter standby only on affinity level 0 i.e. a cpu
220 	 * on the MTK_platform. Ignore any other affinity level.
221 	 */
222 	if (target_afflvl == MPIDR_AFFLVL0) {
223 		/*
224 		 * Enter standby state. dsb is good practice before using wfi
225 		 * to enter low power states.
226 		 */
227 		dsb();
228 		wfi();
229 	}
230 }
231 
232 /*******************************************************************************
233  * MTK_platform handler called when an affinity instance is about to be turned
234  * on. The level and mpidr determine the affinity instance.
235  ******************************************************************************/
plat_affinst_on(unsigned long mpidr,unsigned long sec_entrypoint,unsigned int afflvl,unsigned int state)236 static int plat_affinst_on(unsigned long mpidr,
237 		    unsigned long sec_entrypoint,
238 		    unsigned int afflvl,
239 		    unsigned int state)
240 {
241 	int rc = PSCI_E_SUCCESS;
242 	unsigned long cpu_id;
243 	unsigned long cluster_id;
244 	uintptr_t rv;
245 
246 	/*
247 	 * It's possible to turn on only affinity level 0 i.e. a cpu
248 	 * on the MTK_platform. Ignore any other affinity level.
249 	 */
250 	if (afflvl != MPIDR_AFFLVL0)
251 		return rc;
252 
253 	cpu_id = mpidr & MPIDR_CPU_MASK;
254 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
255 
256 	if (cluster_id)
257 		rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
258 	else
259 		rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
260 
261 	mmio_write_32(rv, sec_entrypoint);
262 	INFO("mt_on[%ld:%ld], entry %x\n",
263 		cluster_id, cpu_id, mmio_read_32(rv));
264 
265 	return rc;
266 }
267 
268 /*******************************************************************************
269  * MTK_platform handler called when an affinity instance is about to be turned
270  * off. The level and mpidr determine the affinity instance. The 'state' arg.
271  * allows the platform to decide whether the cluster is being turned off and
272  * take apt actions.
273  *
274  * CAUTION: This function is called with coherent stacks so that caches can be
275  * turned off, flushed and coherency disabled. There is no guarantee that caches
276  * will remain turned on across calls to this function as each affinity level is
277  * dealt with. So do not write & read global variables across calls. It will be
278  * wise to do flush a write to the global to prevent unpredictable results.
279  ******************************************************************************/
plat_affinst_off(unsigned int afflvl,unsigned int state)280 static void plat_affinst_off(unsigned int afflvl, unsigned int state)
281 {
282 	unsigned long mpidr = read_mpidr_el1();
283 
284 	/* Determine if any platform actions need to be executed. */
285 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
286 		return;
287 
288 	/* Prevent interrupts from spuriously waking up this cpu */
289 	plat_mt_gic_cpuif_disable();
290 
291 	trace_power_flow(mpidr, CPU_DOWN);
292 
293 	if (afflvl != MPIDR_AFFLVL0) {
294 		/* Disable coherency if this cluster is to be turned off */
295 		plat_cci_disable();
296 
297 		trace_power_flow(mpidr, CLUSTER_DOWN);
298 	}
299 }
300 
301 /*******************************************************************************
302  * MTK_platform handler called when an affinity instance is about to be
303  * suspended. The level and mpidr determine the affinity instance. The 'state'
304  * arg. allows the platform to decide whether the cluster is being turned off
305  * and take apt actions.
306  *
307  * CAUTION: This function is called with coherent stacks so that caches can be
308  * turned off, flushed and coherency disabled. There is no guarantee that caches
309  * will remain turned on across calls to this function as each affinity level is
310  * dealt with. So do not write & read global variables across calls. It will be
311  * wise to do flush a write to the global to prevent unpredictable results.
312  ******************************************************************************/
plat_affinst_suspend(unsigned long sec_entrypoint,unsigned int afflvl,unsigned int state)313 static void plat_affinst_suspend(unsigned long sec_entrypoint,
314 			  unsigned int afflvl,
315 			  unsigned int state)
316 {
317 	unsigned long mpidr = read_mpidr_el1();
318 	unsigned long cluster_id;
319 	unsigned long cpu_id;
320 	uintptr_t rv;
321 
322 	/* Determine if any platform actions need to be executed. */
323 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
324 		return;
325 
326 	cpu_id = mpidr & MPIDR_CPU_MASK;
327 	cluster_id = mpidr & MPIDR_CLUSTER_MASK;
328 
329 	if (cluster_id)
330 		rv = (uintptr_t)&mt6795_mcucfg->mp1_rv_addr[cpu_id].rv_addr_lw;
331 	else
332 		rv = (uintptr_t)&mt6795_mcucfg->mp0_rv_addr[cpu_id].rv_addr_lw;
333 
334 	mmio_write_32(rv, sec_entrypoint);
335 
336 	if (afflvl >= MPIDR_AFFLVL0)
337 		mt_platform_save_context(mpidr);
338 
339 	/* Perform the common cluster specific operations */
340 	if (afflvl >= MPIDR_AFFLVL1) {
341 		/* Disable coherency if this cluster is to be turned off */
342 		plat_cci_disable();
343 		disable_scu(mpidr);
344 
345 		trace_power_flow(mpidr, CLUSTER_SUSPEND);
346 	}
347 
348 	if (afflvl >= MPIDR_AFFLVL2) {
349 		/* Prevent interrupts from spuriously waking up this cpu */
350 		plat_mt_gic_cpuif_disable();
351 	}
352 }
353 
354 /*******************************************************************************
355  * MTK_platform handler called when an affinity instance has just been powered
356  * on after being turned off earlier. The level and mpidr determine the affinity
357  * instance. The 'state' arg. allows the platform to decide whether the cluster
358  * was turned off prior to wakeup and do what's necessary to setup it up
359  * correctly.
360  ******************************************************************************/
plat_affinst_on_finish(unsigned int afflvl,unsigned int state)361 static void plat_affinst_on_finish(unsigned int afflvl, unsigned int state)
362 {
363 	unsigned long mpidr = read_mpidr_el1();
364 
365 	/* Determine if any platform actions need to be executed. */
366 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
367 		return;
368 
369 	/* Perform the common cluster specific operations */
370 	if (afflvl >= MPIDR_AFFLVL1) {
371 		enable_scu(mpidr);
372 
373 		/* Enable coherency if this cluster was off */
374 		plat_cci_enable();
375 		trace_power_flow(mpidr, CLUSTER_UP);
376 	}
377 
378 	/* Enable the gic cpu interface */
379 	plat_mt_gic_cpuif_enable();
380 	plat_mt_gic_pcpu_init();
381 	trace_power_flow(mpidr, CPU_UP);
382 }
383 
384 /*******************************************************************************
385  * MTK_platform handler called when an affinity instance has just been powered
386  * on after having been suspended earlier. The level and mpidr determine the
387  * affinity instance.
388  ******************************************************************************/
plat_affinst_suspend_finish(unsigned int afflvl,unsigned int state)389 static void plat_affinst_suspend_finish(unsigned int afflvl, unsigned int state)
390 {
391 	unsigned long mpidr = read_mpidr_el1();
392 
393 	/* Determine if any platform actions need to be executed. */
394 	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
395 		return;
396 
397 	if (afflvl >= MPIDR_AFFLVL2) {
398 		/* Enable the gic cpu interface */
399 		plat_mt_gic_init();
400 		plat_mt_gic_cpuif_enable();
401 	}
402 
403 	/* Perform the common cluster specific operations */
404 	if (afflvl >= MPIDR_AFFLVL1) {
405 		enable_scu(mpidr);
406 
407 		/* Enable coherency if this cluster was off */
408 		plat_cci_enable();
409 		trace_power_flow(mpidr, CLUSTER_UP);
410 	}
411 
412 	if (afflvl >= MPIDR_AFFLVL0)
413 		mt_platform_restore_context(mpidr);
414 
415 	plat_mt_gic_pcpu_init();
416 }
417 
plat_get_sys_suspend_power_state(void)418 static unsigned int plat_get_sys_suspend_power_state(void)
419 {
420 	/* StateID: 0, StateType: 1(power down), PowerLevel: 2(system) */
421 	return psci_make_powerstate(0, 1, 2);
422 }
423 
424 /*******************************************************************************
425  * MTK handlers to shutdown/reboot the system
426  ******************************************************************************/
plat_system_off(void)427 static void __dead2 plat_system_off(void)
428 {
429 	INFO("MTK System Off\n");
430 	wfi();
431 	ERROR("MTK System Off: operation not handled.\n");
432 	panic();
433 }
434 
plat_system_reset(void)435 static void __dead2 plat_system_reset(void)
436 {
437 	/* Write the System Configuration Control Register */
438 	INFO("MTK System Reset\n");
439 
440 	mmio_clrbits_32(MTK_WDT_BASE,
441 		(MTK_WDT_MODE_DUAL_MODE | MTK_WDT_MODE_IRQ));
442 	mmio_setbits_32(MTK_WDT_BASE, (MTK_WDT_MODE_KEY | MTK_WDT_MODE_EXTEN));
443 	mmio_setbits_32(MTK_WDT_SWRST, MTK_WDT_SWRST_KEY);
444 
445 	wfi();
446 	ERROR("MTK System Reset: operation not handled.\n");
447 	panic();
448 }
449 
450 /*******************************************************************************
451  * Export the platform handlers to enable psci to invoke them
452  ******************************************************************************/
453 static const plat_pm_ops_t plat_plat_pm_ops = {
454 	.affinst_standby		= plat_affinst_standby,
455 	.affinst_on			= plat_affinst_on,
456 	.affinst_off			= plat_affinst_off,
457 	.affinst_suspend		= plat_affinst_suspend,
458 	.affinst_on_finish		= plat_affinst_on_finish,
459 	.affinst_suspend_finish		= plat_affinst_suspend_finish,
460 	.system_off			= plat_system_off,
461 	.system_reset			= plat_system_reset,
462 	.get_sys_suspend_power_state	= plat_get_sys_suspend_power_state,
463 };
464 
465 /*******************************************************************************
466  * Export the platform specific power ops & initialize the mtk_platform power
467  * controller
468  ******************************************************************************/
platform_setup_pm(const plat_pm_ops_t ** plat_ops)469 int platform_setup_pm(const plat_pm_ops_t **plat_ops)
470 {
471 	*plat_ops = &plat_plat_pm_ops;
472 	return 0;
473 }
474