1 /*
2  * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <platform_def.h>
11 
12 #include <arch_helpers.h>
13 #include <common/debug.h>
14 #include <drivers/delay_timer.h>
15 #include <lib/mmio.h>
16 #include <plat/common/platform.h>
17 
18 #include <plat_private.h>
19 #include <pmu.h>
20 #include <pmu_com.h>
21 #include <rk3288_def.h>
22 #include <secure.h>
23 #include <soc.h>
24 
25 DEFINE_BAKERY_LOCK(rockchip_pd_lock);
26 
27 static uint32_t cpu_warm_boot_addr;
28 
29 static uint32_t store_pmu_pwrmode_con;
30 static uint32_t store_sgrf_soc_con0;
31 static uint32_t store_sgrf_cpu_con0;
32 
33 /* These enum are variants of low power mode */
34 enum {
35 	ROCKCHIP_ARM_OFF_LOGIC_NORMAL = 0,
36 	ROCKCHIP_ARM_OFF_LOGIC_DEEP = 1,
37 };
38 
rk3288_pmu_bus_idle(uint32_t req,uint32_t idle)39 static inline int rk3288_pmu_bus_idle(uint32_t req, uint32_t idle)
40 {
41 	uint32_t mask = BIT(req);
42 	uint32_t idle_mask = 0;
43 	uint32_t idle_target = 0;
44 	uint32_t val;
45 	uint32_t wait_cnt = 0;
46 
47 	switch (req) {
48 	case bus_ide_req_gpu:
49 		idle_mask = BIT(pmu_idle_ack_gpu) | BIT(pmu_idle_gpu);
50 		idle_target = (idle << pmu_idle_ack_gpu) |
51 			      (idle << pmu_idle_gpu);
52 		break;
53 	case bus_ide_req_core:
54 		idle_mask = BIT(pmu_idle_ack_core) | BIT(pmu_idle_core);
55 		idle_target = (idle << pmu_idle_ack_core) |
56 			      (idle << pmu_idle_core);
57 		break;
58 	case bus_ide_req_cpup:
59 		idle_mask = BIT(pmu_idle_ack_cpup) | BIT(pmu_idle_cpup);
60 		idle_target = (idle << pmu_idle_ack_cpup) |
61 			      (idle << pmu_idle_cpup);
62 		break;
63 	case bus_ide_req_bus:
64 		idle_mask = BIT(pmu_idle_ack_bus) | BIT(pmu_idle_bus);
65 		idle_target = (idle << pmu_idle_ack_bus) |
66 			      (idle << pmu_idle_bus);
67 		break;
68 	case bus_ide_req_dma:
69 		idle_mask = BIT(pmu_idle_ack_dma) | BIT(pmu_idle_dma);
70 		idle_target = (idle << pmu_idle_ack_dma) |
71 			      (idle << pmu_idle_dma);
72 		break;
73 	case bus_ide_req_peri:
74 		idle_mask = BIT(pmu_idle_ack_peri) | BIT(pmu_idle_peri);
75 		idle_target = (idle << pmu_idle_ack_peri) |
76 			      (idle << pmu_idle_peri);
77 		break;
78 	case bus_ide_req_video:
79 		idle_mask = BIT(pmu_idle_ack_video) | BIT(pmu_idle_video);
80 		idle_target = (idle << pmu_idle_ack_video) |
81 			      (idle << pmu_idle_video);
82 		break;
83 	case bus_ide_req_hevc:
84 		idle_mask = BIT(pmu_idle_ack_hevc) | BIT(pmu_idle_hevc);
85 		idle_target = (idle << pmu_idle_ack_hevc) |
86 			      (idle << pmu_idle_hevc);
87 		break;
88 	case bus_ide_req_vio:
89 		idle_mask = BIT(pmu_idle_ack_vio) | BIT(pmu_idle_vio);
90 		idle_target = (pmu_idle_ack_vio) |
91 			      (idle << pmu_idle_vio);
92 		break;
93 	case bus_ide_req_alive:
94 		idle_mask = BIT(pmu_idle_ack_alive) | BIT(pmu_idle_alive);
95 		idle_target = (idle << pmu_idle_ack_alive) |
96 			      (idle << pmu_idle_alive);
97 		break;
98 	default:
99 		ERROR("%s: Unsupported the idle request\n", __func__);
100 		break;
101 	}
102 
103 	val = mmio_read_32(PMU_BASE + PMU_BUS_IDE_REQ);
104 	if (idle)
105 		val |= mask;
106 	else
107 		val &= ~mask;
108 
109 	mmio_write_32(PMU_BASE + PMU_BUS_IDE_REQ, val);
110 
111 	while ((mmio_read_32(PMU_BASE +
112 	       PMU_BUS_IDE_ST) & idle_mask) != idle_target) {
113 		wait_cnt++;
114 		if (!(wait_cnt % MAX_WAIT_CONUT))
115 			WARN("%s:st=%x(%x)\n", __func__,
116 			     mmio_read_32(PMU_BASE + PMU_BUS_IDE_ST),
117 			     idle_mask);
118 	}
119 
120 	return 0;
121 }
122 
rk3288_sleep_disable_osc(void)123 static bool rk3288_sleep_disable_osc(void)
124 {
125 	static const uint32_t reg_offset[] = { GRF_UOC0_CON0, GRF_UOC1_CON0,
126 					       GRF_UOC2_CON0 };
127 	uint32_t reg, i;
128 
129 	/*
130 	 * if any usb phy is still on(GRF_SIDDQ==0), that means we need the
131 	 * function of usb wakeup, so do not switch to 32khz, since the usb phy
132 	 * clk does not connect to 32khz osc
133 	 */
134 	for (i = 0; i < ARRAY_SIZE(reg_offset); i++) {
135 		reg = mmio_read_32(GRF_BASE + reg_offset[i]);
136 		if (!(reg & GRF_SIDDQ))
137 			return false;
138 	}
139 
140 	return true;
141 }
142 
pmu_set_sleep_mode(int level)143 static void pmu_set_sleep_mode(int level)
144 {
145 	uint32_t mode_set, mode_set1;
146 	bool osc_disable = rk3288_sleep_disable_osc();
147 
148 	mode_set = BIT(pmu_mode_glb_int_dis) | BIT(pmu_mode_l2_flush_en) |
149 		   BIT(pmu_mode_sref0_enter) | BIT(pmu_mode_sref1_enter) |
150 		   BIT(pmu_mode_ddrc0_gt) | BIT(pmu_mode_ddrc1_gt) |
151 		   BIT(pmu_mode_en) | BIT(pmu_mode_chip_pd) |
152 		   BIT(pmu_mode_scu_pd);
153 
154 	mode_set1 = BIT(pmu_mode_clr_core) | BIT(pmu_mode_clr_cpup);
155 
156 	if (level == ROCKCHIP_ARM_OFF_LOGIC_DEEP) {
157 		/* arm off, logic deep sleep */
158 		mode_set |= BIT(pmu_mode_bus_pd) | BIT(pmu_mode_pmu_use_lf) |
159 			    BIT(pmu_mode_ddrio1_ret) |
160 			    BIT(pmu_mode_ddrio0_ret) |
161 			    BIT(pmu_mode_pmu_alive_use_lf) |
162 			    BIT(pmu_mode_pll_pd);
163 
164 		if (osc_disable)
165 			mode_set |= BIT(pmu_mode_osc_dis);
166 
167 		mode_set1 |= BIT(pmu_mode_clr_alive) | BIT(pmu_mode_clr_bus) |
168 			     BIT(pmu_mode_clr_peri) | BIT(pmu_mode_clr_dma);
169 
170 		mmio_write_32(PMU_BASE + PMU_WAKEUP_CFG1,
171 			      pmu_armint_wakeup_en);
172 
173 		/*
174 		 * In deep suspend we use PMU_PMU_USE_LF to let the rk3288
175 		 * switch its main clock supply to the alternative 32kHz
176 		 * source. Therefore set 30ms on a 32kHz clock for pmic
177 		 * stabilization. Similar 30ms on 24MHz for the other
178 		 * mode below.
179 		 */
180 		mmio_write_32(PMU_BASE + PMU_STABL_CNT, 32 * 30);
181 
182 		/* only wait for stabilization, if we turned the osc off */
183 		mmio_write_32(PMU_BASE + PMU_OSC_CNT,
184 					 osc_disable ? 32 * 30 : 0);
185 	} else {
186 		/*
187 		 * arm off, logic normal
188 		 * if pmu_clk_core_src_gate_en is not set,
189 		 * wakeup will be error
190 		 */
191 		mode_set |= BIT(pmu_mode_core_src_gt);
192 
193 		mmio_write_32(PMU_BASE + PMU_WAKEUP_CFG1,
194 			      BIT(pmu_armint_wakeup_en) |
195 			      BIT(pmu_gpioint_wakeup_en));
196 
197 		/* 30ms on a 24MHz clock for pmic stabilization */
198 		mmio_write_32(PMU_BASE + PMU_STABL_CNT, 24000 * 30);
199 
200 		/* oscillator is still running, so no need to wait */
201 		mmio_write_32(PMU_BASE + PMU_OSC_CNT, 0);
202 	}
203 
204 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, mode_set);
205 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CON1, mode_set1);
206 }
207 
cpus_power_domain_on(uint32_t cpu_id)208 static int cpus_power_domain_on(uint32_t cpu_id)
209 {
210 	uint32_t cpu_pd;
211 
212 	cpu_pd = PD_CPU0 + cpu_id;
213 
214 	/* if the core has been on, power it off first */
215 	if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) {
216 		/* put core in reset - some sort of A12/A17 bug */
217 		mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(0),
218 			      BIT(cpu_id) | (BIT(cpu_id) << 16));
219 
220 		pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
221 	}
222 
223 	pmu_power_domain_ctr(cpu_pd, pmu_pd_on);
224 
225 	/* pull core out of reset */
226 	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(0), BIT(cpu_id) << 16);
227 
228 	return 0;
229 }
230 
cpus_power_domain_off(uint32_t cpu_id)231 static int cpus_power_domain_off(uint32_t cpu_id)
232 {
233 	uint32_t cpu_pd = PD_CPU0 + cpu_id;
234 
235 	if (pmu_power_domain_st(cpu_pd) == pmu_pd_off)
236 		return 0;
237 
238 	if (check_cpu_wfie(cpu_id, CKECK_WFEI_MSK))
239 		return -EINVAL;
240 
241 	/* put core in reset - some sort of A12/A17 bug */
242 	mmio_write_32(CRU_BASE + CRU_SOFTRSTS_CON(0),
243 		      BIT(cpu_id) | (BIT(cpu_id) << 16));
244 
245 	pmu_power_domain_ctr(cpu_pd, pmu_pd_off);
246 
247 	return 0;
248 }
249 
nonboot_cpus_off(void)250 static void nonboot_cpus_off(void)
251 {
252 	uint32_t boot_cpu, cpu;
253 
254 	boot_cpu = plat_my_core_pos();
255 	boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr());
256 
257 	/* turn off noboot cpus */
258 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
259 		if (cpu == boot_cpu)
260 			continue;
261 
262 		cpus_power_domain_off(cpu);
263 	}
264 }
265 
sram_save(void)266 void sram_save(void)
267 {
268 	/* TODO: support the sdram save for rk3288 SoCs*/
269 }
270 
sram_restore(void)271 void sram_restore(void)
272 {
273 	/* TODO: support the sdram restore for rk3288 SoCs */
274 }
275 
rockchip_soc_cores_pwr_dm_on(unsigned long mpidr,uint64_t entrypoint)276 int rockchip_soc_cores_pwr_dm_on(unsigned long mpidr, uint64_t entrypoint)
277 {
278 	uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
279 
280 	assert(cpu_id < PLATFORM_CORE_COUNT);
281 	assert(cpuson_flags[cpu_id] == 0);
282 	cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG;
283 	cpuson_entry_point[cpu_id] = entrypoint;
284 	dsb();
285 
286 	cpus_power_domain_on(cpu_id);
287 
288 	/*
289 	 * We communicate with the bootrom to active the cpus other
290 	 * than cpu0, after a blob of initialize code, they will
291 	 * stay at wfe state, once they are actived, they will check
292 	 * the mailbox:
293 	 * sram_base_addr + 4: 0xdeadbeaf
294 	 * sram_base_addr + 8: start address for pc
295 	 * The cpu0 need to wait the other cpus other than cpu0 entering
296 	 * the wfe state.The wait time is affected by many aspects.
297 	 * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
298 	 */
299 	mdelay(1); /* ensure the cpus other than cpu0 to startup */
300 
301 	/* tell the bootrom mailbox where to start from */
302 	mmio_write_32(SRAM_BASE + 8, cpu_warm_boot_addr);
303 	mmio_write_32(SRAM_BASE + 4, 0xDEADBEAF);
304 	dsb();
305 	sev();
306 
307 	return 0;
308 }
309 
rockchip_soc_cores_pwr_dm_on_finish(void)310 int rockchip_soc_cores_pwr_dm_on_finish(void)
311 {
312 	return 0;
313 }
314 
rockchip_soc_sys_pwr_dm_resume(void)315 int rockchip_soc_sys_pwr_dm_resume(void)
316 {
317 	mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, store_pmu_pwrmode_con);
318 	mmio_write_32(SGRF_BASE + SGRF_CPU_CON(0),
319 		      store_sgrf_cpu_con0 | SGRF_DAPDEVICE_MSK);
320 
321 	/* disable fastboot mode */
322 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(0),
323 		      store_sgrf_soc_con0 | SGRF_FAST_BOOT_DIS);
324 
325 	secure_watchdog_ungate();
326 	clk_gate_con_restore();
327 	clk_sel_con_restore();
328 	clk_plls_resume();
329 
330 	secure_gic_init();
331 	plat_rockchip_gic_init();
332 
333 	return 0;
334 }
335 
rockchip_soc_sys_pwr_dm_suspend(void)336 int rockchip_soc_sys_pwr_dm_suspend(void)
337 {
338 	nonboot_cpus_off();
339 
340 	store_sgrf_cpu_con0 = mmio_read_32(SGRF_BASE + SGRF_CPU_CON(0));
341 	store_sgrf_soc_con0 = mmio_read_32(SGRF_BASE + SGRF_SOC_CON(0));
342 	store_pmu_pwrmode_con = mmio_read_32(PMU_BASE + PMU_PWRMODE_CON);
343 
344 	/* save clk-gates and ungate all for suspend */
345 	clk_gate_con_save();
346 	clk_gate_con_disable();
347 	clk_sel_con_save();
348 
349 	pmu_set_sleep_mode(ROCKCHIP_ARM_OFF_LOGIC_NORMAL);
350 
351 	clk_plls_suspend();
352 	secure_watchdog_gate();
353 
354 	/*
355 	 * The dapswjdp can not auto reset before resume, that cause it may
356 	 * access some illegal address during resume. Let's disable it before
357 	 * suspend, and the MASKROM will enable it back.
358 	 */
359 	mmio_write_32(SGRF_BASE + SGRF_CPU_CON(0), SGRF_DAPDEVICE_MSK);
360 
361 	/*
362 	 * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
363 	 */
364 	mmio_write_32(SGRF_BASE + SGRF_SOC_CON(0), SGRF_FAST_BOOT_ENA);
365 
366 	/* boot-address of resuming system is from this register value */
367 	mmio_write_32(SGRF_BASE + SGRF_FAST_BOOT_ADDR,
368 		      (uint32_t)&pmu_cpuson_entrypoint);
369 
370 	/* flush all caches - otherwise we might loose the resume address */
371 	dcsw_op_all(DC_OP_CISW);
372 
373 	return 0;
374 }
375 
rockchip_plat_mmu_svc_mon(void)376 void rockchip_plat_mmu_svc_mon(void)
377 {
378 }
379 
plat_rockchip_pmu_init(void)380 void plat_rockchip_pmu_init(void)
381 {
382 	uint32_t cpu;
383 
384 	cpu_warm_boot_addr = (uint32_t)platform_cpu_warmboot;
385 
386 	/* on boot all power-domains are on */
387 	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
388 		cpuson_flags[cpu] = pmu_pd_on;
389 
390 	nonboot_cpus_off();
391 }
392