1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * OMAP3 Power Management Routines
4 *
5 * Copyright (C) 2006-2008 Nokia Corporation
6 * Tony Lindgren <tony@atomide.com>
7 * Jouni Hogander
8 *
9 * Copyright (C) 2007 Texas Instruments, Inc.
10 * Rajendra Nayak <rnayak@ti.com>
11 *
12 * Copyright (C) 2005 Texas Instruments, Inc.
13 * Richard Woodruff <r-woodruff2@ti.com>
14 *
15 * Based on pm.c for omap1
16 */
17
18 #include <linux/cpu_pm.h>
19 #include <linux/pm.h>
20 #include <linux/suspend.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29
30 #include <trace/events/power.h>
31
32 #include <asm/fncpy.h>
33 #include <asm/suspend.h>
34 #include <asm/system_misc.h>
35
36 #include "clockdomain.h"
37 #include "powerdomain.h"
38 #include "soc.h"
39 #include "common.h"
40 #include "cm3xxx.h"
41 #include "cm-regbits-34xx.h"
42 #include "prm-regbits-34xx.h"
43 #include "prm3xxx.h"
44 #include "pm.h"
45 #include "sdrc.h"
46 #include "omap-secure.h"
47 #include "sram.h"
48 #include "control.h"
49 #include "vc.h"
50
51 /* pm34xx errata defined in pm.h */
52 u16 pm34xx_errata;
53
54 struct power_state {
55 struct powerdomain *pwrdm;
56 u32 next_state;
57 #ifdef CONFIG_SUSPEND
58 u32 saved_state;
59 #endif
60 struct list_head node;
61 };
62
63 static LIST_HEAD(pwrst_list);
64
65 void (*omap3_do_wfi_sram)(void);
66
67 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
68 static struct powerdomain *core_pwrdm, *per_pwrdm;
69
omap3_core_save_context(void)70 static void omap3_core_save_context(void)
71 {
72 omap3_ctrl_save_padconf();
73
74 /*
75 * Force write last pad into memory, as this can fail in some
76 * cases according to errata 1.157, 1.185
77 */
78 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
79 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
80
81 /* Save the Interrupt controller context */
82 omap_intc_save_context();
83 /* Save the system control module context, padconf already save above*/
84 omap3_control_save_context();
85 }
86
omap3_core_restore_context(void)87 static void omap3_core_restore_context(void)
88 {
89 /* Restore the control module context, padconf restored by h/w */
90 omap3_control_restore_context();
91 /* Restore the interrupt controller context */
92 omap_intc_restore_context();
93 }
94
95 /*
96 * FIXME: This function should be called before entering off-mode after
97 * OMAP3 secure services have been accessed. Currently it is only called
98 * once during boot sequence, but this works as we are not using secure
99 * services.
100 */
omap3_save_secure_ram_context(void)101 static void omap3_save_secure_ram_context(void)
102 {
103 u32 ret;
104 int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
105
106 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
107 /*
108 * MPU next state must be set to POWER_ON temporarily,
109 * otherwise the WFI executed inside the ROM code
110 * will hang the system.
111 */
112 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
113 ret = omap3_save_secure_ram(omap3_secure_ram_storage,
114 OMAP3_SAVE_SECURE_RAM_SZ);
115 pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
116 /* Following is for error tracking, it should not happen */
117 if (ret) {
118 pr_err("save_secure_sram() returns %08x\n", ret);
119 while (1)
120 ;
121 }
122 }
123 }
124
_prcm_int_handle_io(int irq,void * unused)125 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
126 {
127 int c;
128
129 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK |
130 OMAP3430_ST_IO_CHAIN_MASK);
131
132 return c ? IRQ_HANDLED : IRQ_NONE;
133 }
134
_prcm_int_handle_wakeup(int irq,void * unused)135 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
136 {
137 int c;
138
139 /*
140 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
141 * these are handled in a separate handler to avoid acking
142 * IO events before parsing in mux code
143 */
144 c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK |
145 OMAP3430_ST_IO_CHAIN_MASK));
146 c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0);
147 c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0);
148 if (omap_rev() > OMAP3430_REV_ES1_0) {
149 c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0);
150 c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0);
151 }
152
153 return c ? IRQ_HANDLED : IRQ_NONE;
154 }
155
omap34xx_save_context(u32 * save)156 static void omap34xx_save_context(u32 *save)
157 {
158 u32 val;
159
160 /* Read Auxiliary Control Register */
161 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
162 *save++ = 1;
163 *save++ = val;
164
165 /* Read L2 AUX ctrl register */
166 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
167 *save++ = 1;
168 *save++ = val;
169 }
170
omap34xx_do_sram_idle(unsigned long save_state)171 static int omap34xx_do_sram_idle(unsigned long save_state)
172 {
173 omap34xx_cpu_suspend(save_state);
174 return 0;
175 }
176
omap_sram_idle(void)177 void omap_sram_idle(void)
178 {
179 /* Variable to tell what needs to be saved and restored
180 * in omap_sram_idle*/
181 /* save_state = 0 => Nothing to save and restored */
182 /* save_state = 1 => Only L1 and logic lost */
183 /* save_state = 2 => Only L2 lost */
184 /* save_state = 3 => L1, L2 and logic lost */
185 int save_state = 0;
186 int mpu_next_state = PWRDM_POWER_ON;
187 int per_next_state = PWRDM_POWER_ON;
188 int core_next_state = PWRDM_POWER_ON;
189 u32 sdrc_pwr = 0;
190 int error;
191
192 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
193 switch (mpu_next_state) {
194 case PWRDM_POWER_ON:
195 case PWRDM_POWER_RET:
196 /* No need to save context */
197 save_state = 0;
198 break;
199 case PWRDM_POWER_OFF:
200 save_state = 3;
201 break;
202 default:
203 /* Invalid state */
204 pr_err("Invalid mpu state in sram_idle\n");
205 return;
206 }
207
208 /* NEON control */
209 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
210 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
211
212 /* Enable IO-PAD and IO-CHAIN wakeups */
213 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
214 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
215
216 pwrdm_pre_transition(NULL);
217
218 /* PER */
219 if (per_next_state == PWRDM_POWER_OFF) {
220 error = cpu_cluster_pm_enter();
221 if (error)
222 return;
223 }
224
225 /* CORE */
226 if (core_next_state < PWRDM_POWER_ON) {
227 if (core_next_state == PWRDM_POWER_OFF) {
228 omap3_core_save_context();
229 omap3_cm_save_context();
230 }
231 }
232
233 /* Configure PMIC signaling for I2C4 or sys_off_mode */
234 omap3_vc_set_pmic_signaling(core_next_state);
235
236 omap3_intc_prepare_idle();
237
238 /*
239 * On EMU/HS devices ROM code restores a SRDC value
240 * from scratchpad which has automatic self refresh on timeout
241 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
242 * Hence store/restore the SDRC_POWER register here.
243 */
244 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
245 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
246 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
247 core_next_state == PWRDM_POWER_OFF)
248 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
249
250 /*
251 * omap3_arm_context is the location where some ARM context
252 * get saved. The rest is placed on the stack, and restored
253 * from there before resuming.
254 */
255 if (save_state)
256 omap34xx_save_context(omap3_arm_context);
257 if (save_state == 1 || save_state == 3)
258 cpu_suspend(save_state, omap34xx_do_sram_idle);
259 else
260 omap34xx_do_sram_idle(save_state);
261
262 /* Restore normal SDRC POWER settings */
263 if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
264 (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
265 omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
266 core_next_state == PWRDM_POWER_OFF)
267 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
268
269 /* CORE */
270 if (core_next_state < PWRDM_POWER_ON &&
271 pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
272 omap3_core_restore_context();
273 omap3_cm_restore_context();
274 omap3_sram_restore_context();
275 omap2_sms_restore_context();
276 } else {
277 /*
278 * In off-mode resume path above, omap3_core_restore_context
279 * also handles the INTC autoidle restore done here so limit
280 * this to non-off mode resume paths so we don't do it twice.
281 */
282 omap3_intc_resume_idle();
283 }
284
285 pwrdm_post_transition(NULL);
286
287 /* PER */
288 if (per_next_state == PWRDM_POWER_OFF)
289 cpu_cluster_pm_exit();
290 }
291
omap3_pm_idle(void)292 static void omap3_pm_idle(void)
293 {
294 if (omap_irq_pending())
295 return;
296
297 omap_sram_idle();
298 }
299
300 #ifdef CONFIG_SUSPEND
omap3_pm_suspend(void)301 static int omap3_pm_suspend(void)
302 {
303 struct power_state *pwrst;
304 int state, ret = 0;
305
306 /* Read current next_pwrsts */
307 list_for_each_entry(pwrst, &pwrst_list, node)
308 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
309 /* Set ones wanted by suspend */
310 list_for_each_entry(pwrst, &pwrst_list, node) {
311 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
312 goto restore;
313 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
314 goto restore;
315 }
316
317 omap3_intc_suspend();
318
319 omap_sram_idle();
320
321 restore:
322 /* Restore next_pwrsts */
323 list_for_each_entry(pwrst, &pwrst_list, node) {
324 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
325 if (state > pwrst->next_state) {
326 pr_info("Powerdomain (%s) didn't enter target state %d\n",
327 pwrst->pwrdm->name, pwrst->next_state);
328 ret = -1;
329 }
330 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
331 }
332 if (ret)
333 pr_err("Could not enter target state in pm_suspend\n");
334 else
335 pr_info("Successfully put all powerdomains to target state\n");
336
337 return ret;
338 }
339 #else
340 #define omap3_pm_suspend NULL
341 #endif /* CONFIG_SUSPEND */
342
prcm_setup_regs(void)343 static void __init prcm_setup_regs(void)
344 {
345 omap3_ctrl_init();
346
347 omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva());
348 }
349
omap3_pm_off_mode_enable(int enable)350 void omap3_pm_off_mode_enable(int enable)
351 {
352 struct power_state *pwrst;
353 u32 state;
354
355 if (enable)
356 state = PWRDM_POWER_OFF;
357 else
358 state = PWRDM_POWER_RET;
359
360 list_for_each_entry(pwrst, &pwrst_list, node) {
361 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
362 pwrst->pwrdm == core_pwrdm &&
363 state == PWRDM_POWER_OFF) {
364 pwrst->next_state = PWRDM_POWER_RET;
365 pr_warn("%s: Core OFF disabled due to errata i583\n",
366 __func__);
367 } else {
368 pwrst->next_state = state;
369 }
370 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
371 }
372 }
373
omap3_pm_get_suspend_state(struct powerdomain * pwrdm)374 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
375 {
376 struct power_state *pwrst;
377
378 list_for_each_entry(pwrst, &pwrst_list, node) {
379 if (pwrst->pwrdm == pwrdm)
380 return pwrst->next_state;
381 }
382 return -EINVAL;
383 }
384
omap3_pm_set_suspend_state(struct powerdomain * pwrdm,int state)385 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
386 {
387 struct power_state *pwrst;
388
389 list_for_each_entry(pwrst, &pwrst_list, node) {
390 if (pwrst->pwrdm == pwrdm) {
391 pwrst->next_state = state;
392 return 0;
393 }
394 }
395 return -EINVAL;
396 }
397
pwrdms_setup(struct powerdomain * pwrdm,void * unused)398 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
399 {
400 struct power_state *pwrst;
401
402 if (!pwrdm->pwrsts)
403 return 0;
404
405 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
406 if (!pwrst)
407 return -ENOMEM;
408 pwrst->pwrdm = pwrdm;
409
410 if (enable_off_mode)
411 pwrst->next_state = PWRDM_POWER_OFF;
412 else
413 pwrst->next_state = PWRDM_POWER_RET;
414
415 list_add(&pwrst->node, &pwrst_list);
416
417 if (pwrdm_has_hdwr_sar(pwrdm))
418 pwrdm_enable_hdwr_sar(pwrdm);
419
420 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
421 }
422
423 /*
424 * Push functions to SRAM
425 *
426 * The minimum set of functions is pushed to SRAM for execution:
427 * - omap3_do_wfi for erratum i581 WA,
428 */
omap_push_sram_idle(void)429 void omap_push_sram_idle(void)
430 {
431 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
432 }
433
pm_errata_configure(void)434 static void __init pm_errata_configure(void)
435 {
436 if (cpu_is_omap3630()) {
437 pm34xx_errata |= PM_RTA_ERRATUM_i608;
438 /* Enable the l2 cache toggling in sleep logic */
439 enable_omap3630_toggle_l2_on_restore();
440 if (omap_rev() < OMAP3630_REV_ES1_2)
441 pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
442 PM_PER_MEMORIES_ERRATUM_i582);
443 } else if (cpu_is_omap34xx()) {
444 pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
445 }
446 }
447
omap3_pm_check_pmic(void)448 static void __init omap3_pm_check_pmic(void)
449 {
450 struct device_node *np;
451
452 np = of_find_compatible_node(NULL, NULL, "ti,twl4030-power-idle");
453 if (!np)
454 np = of_find_compatible_node(NULL, NULL, "ti,twl4030-power-idle-osc-off");
455
456 if (np) {
457 of_node_put(np);
458 enable_off_mode = 1;
459 } else {
460 enable_off_mode = 0;
461 }
462 }
463
omap3_pm_init(void)464 int __init omap3_pm_init(void)
465 {
466 struct power_state *pwrst, *tmp;
467 struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
468 int ret;
469
470 if (!omap3_has_io_chain_ctrl())
471 pr_warn("PM: no software I/O chain control; some wakeups may be lost\n");
472
473 pm_errata_configure();
474
475 /* XXX prcm_setup_regs needs to be before enabling hw
476 * supervised mode for powerdomains */
477 prcm_setup_regs();
478
479 ret = request_irq(omap_prcm_event_to_irq("wkup"),
480 _prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
481
482 if (ret) {
483 pr_err("pm: Failed to request pm_wkup irq\n");
484 goto err1;
485 }
486
487 /* IO interrupt is shared with mux code */
488 ret = request_irq(omap_prcm_event_to_irq("io"),
489 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
490 omap3_pm_init);
491
492 if (ret) {
493 pr_err("pm: Failed to request pm_io irq\n");
494 goto err2;
495 }
496
497 omap3_pm_check_pmic();
498
499 ret = pwrdm_for_each(pwrdms_setup, NULL);
500 if (ret) {
501 pr_err("Failed to setup powerdomains\n");
502 goto err3;
503 }
504
505 (void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
506
507 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
508 if (mpu_pwrdm == NULL) {
509 pr_err("Failed to get mpu_pwrdm\n");
510 ret = -EINVAL;
511 goto err3;
512 }
513
514 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
515 per_pwrdm = pwrdm_lookup("per_pwrdm");
516 core_pwrdm = pwrdm_lookup("core_pwrdm");
517
518 neon_clkdm = clkdm_lookup("neon_clkdm");
519 mpu_clkdm = clkdm_lookup("mpu_clkdm");
520 per_clkdm = clkdm_lookup("per_clkdm");
521 wkup_clkdm = clkdm_lookup("wkup_clkdm");
522
523 omap_common_suspend_init(omap3_pm_suspend);
524
525 arm_pm_idle = omap3_pm_idle;
526 omap3_idle_init();
527
528 /*
529 * RTA is disabled during initialization as per erratum i608
530 * it is safer to disable RTA by the bootloader, but we would like
531 * to be doubly sure here and prevent any mishaps.
532 */
533 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
534 omap3630_ctrl_disable_rta();
535
536 /*
537 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
538 * not correctly reset when the PER powerdomain comes back
539 * from OFF or OSWR when the CORE powerdomain is kept active.
540 * See OMAP36xx Erratum i582 "PER Domain reset issue after
541 * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
542 * complete workaround. The kernel must also prevent the PER
543 * powerdomain from going to OSWR/OFF while the CORE
544 * powerdomain is not going to OSWR/OFF. And if PER last
545 * power state was off while CORE last power state was ON, the
546 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
547 * self-test using their loopback tests; if that fails, those
548 * devices are unusable until the PER/CORE can complete a transition
549 * from ON to OSWR/OFF and then back to ON.
550 *
551 * XXX Technically this workaround is only needed if off-mode
552 * or OSWR is enabled.
553 */
554 if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
555 clkdm_add_wkdep(per_clkdm, wkup_clkdm);
556
557 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
558 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
559 omap3_secure_ram_storage =
560 kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
561 if (!omap3_secure_ram_storage)
562 pr_err("Memory allocation failed when allocating for secure sram context\n");
563
564 local_irq_disable();
565
566 omap3_save_secure_ram_context();
567
568 local_irq_enable();
569 }
570
571 omap3_save_scratchpad_contents();
572 return ret;
573
574 err3:
575 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
576 list_del(&pwrst->node);
577 kfree(pwrst);
578 }
579 free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
580 err2:
581 free_irq(omap_prcm_event_to_irq("wkup"), NULL);
582 err1:
583 return ret;
584 }
585