1 /*
2  * Copyright (C) 2018-2020 Marvell International Ltd.
3  *
4  * SPDX-License-Identifier:	BSD-3-Clause
5  * https://spdx.org/licenses
6  */
7 
8 #include <common/debug.h>
9 #ifdef USE_CCI
10 #include <drivers/arm/cci.h>
11 #endif
12 #include <lib/psci/psci.h>
13 #include <lib/mmio.h>
14 #include <plat/common/platform.h>
15 
16 #include <a3700_pm.h>
17 #include <arch_helpers.h>
18 #include <armada_common.h>
19 #include <dram_win.h>
20 #include <io_addr_dec.h>
21 #include <mvebu.h>
22 #include <mvebu_def.h>
23 #include <marvell_plat_priv.h>
24 #include <plat_marvell.h>
25 
26 /* Warm reset register */
27 #define MVEBU_WARM_RESET_REG		(MVEBU_NB_REGS_BASE + 0x840)
28 #define MVEBU_WARM_RESET_MAGIC		0x1D1E
29 
30 /* North Bridge GPIO1 SEL register */
31 #define MVEBU_NB_GPIO1_SEL_REG		(MVEBU_NB_REGS_BASE + 0x830)
32  #define MVEBU_NB_GPIO1_UART1_SEL	BIT(19)
33  #define MVEBU_NB_GPIO1_GPIO_25_26_EN	BIT(17)
34  #define MVEBU_NB_GPIO1_GPIO_19_EN	BIT(14)
35  #define MVEBU_NB_GPIO1_GPIO_18_EN	BIT(13)
36 
37 /* CPU 1 reset register */
38 #define MVEBU_CPU_1_RESET_VECTOR	(MVEBU_REGS_BASE + 0x14044)
39 #define MVEBU_CPU_1_RESET_REG		(MVEBU_REGS_BASE + 0xD00C)
40 #define MVEBU_CPU_1_RESET_BIT		31
41 
42 /* IRQ register */
43 #define MVEBU_NB_IRQ_STATUS_1_REG		(MVEBU_NB_SB_IRQ_REG_BASE)
44 #define MVEBU_NB_IRQ_STATUS_2_REG		(MVEBU_NB_SB_IRQ_REG_BASE + \
45 						0x10)
46 #define MVEBU_NB_IRQ_MASK_2_REG			(MVEBU_NB_SB_IRQ_REG_BASE + \
47 						0x18)
48 #define MVEBU_SB_IRQ_STATUS_1_REG		(MVEBU_NB_SB_IRQ_REG_BASE + \
49 						0x40)
50 #define MVEBU_SB_IRQ_STATUS_2_REG		(MVEBU_NB_SB_IRQ_REG_BASE + \
51 						0x50)
52 #define MVEBU_NB_GPIO_IRQ_MASK_1_REG		(MVEBU_NB_SB_IRQ_REG_BASE + \
53 						0xC8)
54 #define MVEBU_NB_GPIO_IRQ_MASK_2_REG		(MVEBU_NB_SB_IRQ_REG_BASE + \
55 						0xD8)
56 #define MVEBU_SB_GPIO_IRQ_MASK_REG		(MVEBU_NB_SB_IRQ_REG_BASE + \
57 						0xE8)
58 #define MVEBU_NB_GPIO_IRQ_EN_LOW_REG		(MVEBU_NB_GPIO_IRQ_REG_BASE)
59 #define MVEBU_NB_GPIO_IRQ_EN_HIGH_REG		(MVEBU_NB_GPIO_IRQ_REG_BASE + \
60 						0x04)
61 #define MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG	(MVEBU_NB_GPIO_IRQ_REG_BASE + \
62 						0x10)
63 #define MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG	(MVEBU_NB_GPIO_IRQ_REG_BASE + \
64 						0x14)
65 #define MVEBU_NB_GPIO_IRQ_WK_LOW_REG		(MVEBU_NB_GPIO_IRQ_REG_BASE + \
66 						0x18)
67 #define MVEBU_NB_GPIO_IRQ_WK_HIGH_REG		(MVEBU_NB_GPIO_IRQ_REG_BASE + \
68 						0x1C)
69 #define MVEBU_SB_GPIO_IRQ_EN_REG		(MVEBU_SB_GPIO_IRQ_REG_BASE)
70 #define MVEBU_SB_GPIO_IRQ_STATUS_REG		(MVEBU_SB_GPIO_IRQ_REG_BASE + \
71 						0x10)
72 #define MVEBU_SB_GPIO_IRQ_WK_REG		(MVEBU_SB_GPIO_IRQ_REG_BASE + \
73 						0x18)
74 
75 /* PMU registers */
76 #define MVEBU_PM_NB_PWR_CTRL_REG	(MVEBU_PMSU_REG_BASE)
77  #define MVEBU_PM_PWR_DN_CNT_SEL	BIT(28)
78  #define MVEBU_PM_SB_PWR_DWN		BIT(4)
79  #define MVEBU_PM_INTERFACE_IDLE	BIT(0)
80 #define MVEBU_PM_NB_CPU_PWR_CTRL_REG	(MVEBU_PMSU_REG_BASE + 0x4)
81  #define MVEBU_PM_L2_FLUSH_EN		BIT(22)
82 #define MVEBU_PM_NB_PWR_OPTION_REG	(MVEBU_PMSU_REG_BASE + 0x8)
83  #define MVEBU_PM_DDR_SR_EN		BIT(29)
84  #define MVEBU_PM_DDR_CLK_DIS_EN	BIT(28)
85  #define MVEBU_PM_WARM_RESET_EN		BIT(27)
86  #define MVEBU_PM_DDRPHY_PWRDWN_EN	BIT(23)
87  #define MVEBU_PM_DDRPHY_PAD_PWRDWN_EN	BIT(22)
88  #define MVEBU_PM_OSC_OFF_EN		BIT(21)
89  #define MVEBU_PM_TBG_OFF_EN		BIT(20)
90  #define MVEBU_PM_CPU_VDDV_OFF_EN	BIT(19)
91  #define MVEBU_PM_AVS_DISABLE_MODE	BIT(14)
92  #define MVEBU_PM_AVS_VDD2_MODE		BIT(13)
93  #define MVEBU_PM_AVS_HOLD_MODE		BIT(12)
94  #define MVEBU_PM_L2_SRAM_LKG_PD_EN	BIT(8)
95  #define MVEBU_PM_EIP_SRAM_LKG_PD_EN	BIT(7)
96  #define MVEBU_PM_DDRMC_SRAM_LKG_PD_EN	BIT(6)
97  #define MVEBU_PM_MCI_SRAM_LKG_PD_EN	BIT(5)
98  #define MVEBU_PM_MMC_SRAM_LKG_PD_EN	BIT(4)
99  #define MVEBU_PM_SATA_SRAM_LKG_PD_EN	BIT(3)
100  #define MVEBU_PM_DMA_SRAM_LKG_PD_EN	BIT(2)
101  #define MVEBU_PM_SEC_SRAM_LKG_PD_EN	BIT(1)
102  #define MVEBU_PM_CPU_SRAM_LKG_PD_EN	BIT(0)
103  #define MVEBU_PM_NB_SRAM_LKG_PD_EN	(MVEBU_PM_L2_SRAM_LKG_PD_EN |\
104 	MVEBU_PM_EIP_SRAM_LKG_PD_EN | MVEBU_PM_DDRMC_SRAM_LKG_PD_EN |\
105 	MVEBU_PM_MCI_SRAM_LKG_PD_EN | MVEBU_PM_MMC_SRAM_LKG_PD_EN |\
106 	MVEBU_PM_SATA_SRAM_LKG_PD_EN | MVEBU_PM_DMA_SRAM_LKG_PD_EN |\
107 	MVEBU_PM_SEC_SRAM_LKG_PD_EN | MVEBU_PM_CPU_SRAM_LKG_PD_EN)
108 #define MVEBU_PM_NB_PWR_DEBUG_REG	(MVEBU_PMSU_REG_BASE + 0xC)
109  #define MVEBU_PM_NB_FORCE_CLK_ON	BIT(30)
110  #define MVEBU_PM_IGNORE_CM3_SLEEP	BIT(21)
111  #define MVEBU_PM_IGNORE_CM3_DEEP	BIT(20)
112 #define MVEBU_PM_NB_WAKE_UP_EN_REG	(MVEBU_PMSU_REG_BASE + 0x2C)
113  #define MVEBU_PM_SB_WKP_NB_EN		BIT(31)
114  #define MVEBU_PM_NB_GPIO_WKP_EN	BIT(27)
115  #define MVEBU_PM_SOC_TIMER_WKP_EN	BIT(26)
116  #define MVEBU_PM_UART_WKP_EN		BIT(25)
117  #define MVEBU_PM_UART2_WKP_EN		BIT(19)
118  #define MVEBU_PM_CPU_TIMER_WKP_EN	BIT(17)
119  #define MVEBU_PM_NB_WKP_EN		BIT(16)
120  #define MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN	BIT(13)
121  #define MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN	BIT(12)
122 #define MVEBU_PM_CPU_0_PWR_CTRL_REG	(MVEBU_PMSU_REG_BASE + 0x34)
123 #define MVEBU_PM_CPU_1_PWR_CTRL_REG	(MVEBU_PMSU_REG_BASE + 0x38)
124  #define MVEBU_PM_CORE_SOC_PD		BIT(2)
125  #define MVEBU_PM_CORE_PROC_PD		BIT(1)
126  #define MVEBU_PM_CORE_PD		BIT(0)
127 #define MVEBU_PM_CORE_1_RETURN_ADDR_REG	(MVEBU_PMSU_REG_BASE + 0x44)
128 #define MVEBU_PM_CPU_VDD_OFF_INFO_1_REG	(MVEBU_PMSU_REG_BASE + 0x48)
129 #define MVEBU_PM_CPU_VDD_OFF_INFO_2_REG	(MVEBU_PMSU_REG_BASE + 0x4C)
130  #define MVEBU_PM_LOW_POWER_STATE	BIT(0)
131 #define MVEBU_PM_CPU_WAKE_UP_CONF_REG	(MVEBU_PMSU_REG_BASE + 0x54)
132  #define MVEBU_PM_CORE1_WAKEUP		BIT(13)
133  #define MVEBU_PM_CORE0_WAKEUP		BIT(12)
134 #define MVEBU_PM_WAIT_DDR_RDY_VALUE	(0x15)
135 #define MVEBU_PM_SB_CPU_PWR_CTRL_REG	(MVEBU_SB_WAKEUP_REG_BASE)
136   #define MVEBU_PM_SB_PM_START		BIT(0)
137 #define MVEBU_PM_SB_PWR_OPTION_REG	(MVEBU_SB_WAKEUP_REG_BASE + 0x4)
138   #define MVEBU_PM_SDIO_PHY_PDWN_EN	BIT(17)
139   #define MVEBU_PM_SB_VDDV_OFF_EN	BIT(16)
140   #define MVEBU_PM_EBM_SRAM_LKG_PD_EN		BIT(11)
141   #define MVEBU_PM_PCIE_SRAM_LKG_PD_EN		BIT(10)
142   #define MVEBU_PM_GBE1_TX_SRAM_LKG_PD_EN	BIT(9)
143   #define MVEBU_PM_GBE1_RX_SRAM_LKG_PD_EN	BIT(8)
144   #define MVEBU_PM_GBE1_MIB_SRAM_LKG_PD_EN	BIT(7)
145   #define MVEBU_PM_GBE0_TX_SRAM_LKG_PD_EN	BIT(6)
146   #define MVEBU_PM_GBE0_RX_SRAM_LKG_PD_EN	BIT(5)
147   #define MVEBU_PM_GBE0_MIB_SRAM_LKG_PD_EN	BIT(4)
148   #define MVEBU_PM_SDIO_SRAM_LKG_PD_EN		BIT(3)
149   #define MVEBU_PM_USB2_SRAM_LKG_PD_EN		BIT(2)
150   #define MVEBU_PM_USB3_H_SRAM_LKG_PD_EN	BIT(1)
151   #define MVEBU_PM_SB_SRAM_LKG_PD_EN	(MVEBU_PM_EBM_SRAM_LKG_PD_EN |\
152 	MVEBU_PM_PCIE_SRAM_LKG_PD_EN | MVEBU_PM_GBE1_TX_SRAM_LKG_PD_EN |\
153 	MVEBU_PM_GBE1_RX_SRAM_LKG_PD_EN | MVEBU_PM_GBE1_MIB_SRAM_LKG_PD_EN |\
154 	MVEBU_PM_GBE0_TX_SRAM_LKG_PD_EN | MVEBU_PM_GBE0_RX_SRAM_LKG_PD_EN |\
155 	MVEBU_PM_GBE0_MIB_SRAM_LKG_PD_EN | MVEBU_PM_SDIO_SRAM_LKG_PD_EN |\
156 	MVEBU_PM_USB2_SRAM_LKG_PD_EN | MVEBU_PM_USB3_H_SRAM_LKG_PD_EN)
157 #define MVEBU_PM_SB_WK_EN_REG		(MVEBU_SB_WAKEUP_REG_BASE + 0x10)
158   #define MVEBU_PM_SB_GPIO_WKP_EN	BIT(24)
159   #define MVEBU_PM_SB_WKP_EN		BIT(20)
160 
161 /* DRAM registers */
162 #define MVEBU_DRAM_STATS_CH0_REG	(MVEBU_DRAM_REG_BASE + 0x4)
163  #define MVEBU_DRAM_WCP_EMPTY		BIT(19)
164 #define MVEBU_DRAM_CMD_0_REG		(MVEBU_DRAM_REG_BASE + 0x20)
165  #define MVEBU_DRAM_CH0_CMD0		BIT(28)
166  #define MVEBU_DRAM_CS_CMD0		BIT(24)
167  #define MVEBU_DRAM_WCB_DRAIN_REQ	BIT(1)
168 #define MVEBU_DRAM_PWR_CTRL_REG		(MVEBU_DRAM_REG_BASE + 0x54)
169  #define MVEBU_DRAM_PHY_CLK_GATING_EN	BIT(1)
170  #define MVEBU_DRAM_PHY_AUTO_AC_OFF_EN	BIT(0)
171 
172 /* AVS registers */
173 #define MVEBU_AVS_CTRL_2_REG		(MVEBU_AVS_REG_BASE + 0x8)
174  #define MVEBU_LOW_VDD_MODE_EN		BIT(6)
175 
176 /* Clock registers */
177 #define MVEBU_NB_CLOCK_SEL_REG		(MVEBU_NB_REGS_BASE + 0x10)
178  #define MVEBU_A53_CPU_CLK_SEL		BIT(15)
179 
180 /* North Bridge Step-Down Registers */
181 #define MVEBU_NB_STEP_DOWN_INT_EN_REG	MVEBU_NB_STEP_DOWN_REG_BASE
182  #define MVEBU_NB_GPIO_INT_WAKE_WCPU_CLK	BIT(8)
183 
184 #define MVEBU_NB_GPIO_18	18
185 #define MVEBU_NB_GPIO_19	19
186 #define MVEBU_NB_GPIO_25	25
187 #define MVEBU_NB_GPIO_26	26
188 
189 typedef int (*wake_up_src_func)(union pm_wake_up_src_data *);
190 
191 struct wake_up_src_func_map {
192 	enum pm_wake_up_src_type type;
193 	wake_up_src_func func;
194 };
195 
marvell_psci_arch_init(int die_index)196 void marvell_psci_arch_init(int die_index)
197 {
198 }
199 
a3700_pm_ack_irq(void)200 static void a3700_pm_ack_irq(void)
201 {
202 	uint32_t reg;
203 
204 	reg = mmio_read_32(MVEBU_NB_IRQ_STATUS_1_REG);
205 	if (reg)
206 		mmio_write_32(MVEBU_NB_IRQ_STATUS_1_REG, reg);
207 
208 	reg = mmio_read_32(MVEBU_NB_IRQ_STATUS_2_REG);
209 	if (reg)
210 		mmio_write_32(MVEBU_NB_IRQ_STATUS_2_REG, reg);
211 
212 	reg = mmio_read_32(MVEBU_SB_IRQ_STATUS_1_REG);
213 	if (reg)
214 		mmio_write_32(MVEBU_SB_IRQ_STATUS_1_REG, reg);
215 
216 	reg = mmio_read_32(MVEBU_SB_IRQ_STATUS_2_REG);
217 	if (reg)
218 		mmio_write_32(MVEBU_SB_IRQ_STATUS_2_REG, reg);
219 
220 	reg = mmio_read_32(MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG);
221 	if (reg)
222 		mmio_write_32(MVEBU_NB_GPIO_IRQ_STATUS_LOW_REG, reg);
223 
224 	reg = mmio_read_32(MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG);
225 	if (reg)
226 		mmio_write_32(MVEBU_NB_GPIO_IRQ_STATUS_HIGH_REG, reg);
227 
228 	reg = mmio_read_32(MVEBU_SB_GPIO_IRQ_STATUS_REG);
229 	if (reg)
230 		mmio_write_32(MVEBU_SB_GPIO_IRQ_STATUS_REG, reg);
231 }
232 
233 /*****************************************************************************
234  * A3700 handler called to check the validity of the power state
235  * parameter.
236  *****************************************************************************
237  */
a3700_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)238 int a3700_validate_power_state(unsigned int power_state,
239 			       psci_power_state_t *req_state)
240 {
241 	ERROR("%s needs to be implemented\n", __func__);
242 	panic();
243 }
244 
245 /*****************************************************************************
246  * A3700 handler called when a CPU is about to enter standby.
247  *****************************************************************************
248  */
a3700_cpu_standby(plat_local_state_t cpu_state)249 void a3700_cpu_standby(plat_local_state_t cpu_state)
250 {
251 	ERROR("%s needs to be implemented\n", __func__);
252 	panic();
253 }
254 
255 /*****************************************************************************
256  * A3700 handler called when a power domain is about to be turned on. The
257  * mpidr determines the CPU to be turned on.
258  *****************************************************************************
259  */
a3700_pwr_domain_on(u_register_t mpidr)260 int a3700_pwr_domain_on(u_register_t mpidr)
261 {
262 	/* Set barrier */
263 	dsbsy();
264 
265 	/* Set the cpu start address to BL1 entry point */
266 	mmio_write_32(MVEBU_CPU_1_RESET_VECTOR,
267 		      PLAT_MARVELL_CPU_ENTRY_ADDR >> 2);
268 
269 	/* Get the cpu out of reset */
270 	mmio_clrbits_32(MVEBU_CPU_1_RESET_REG, BIT(MVEBU_CPU_1_RESET_BIT));
271 	mmio_setbits_32(MVEBU_CPU_1_RESET_REG, BIT(MVEBU_CPU_1_RESET_BIT));
272 
273 	return 0;
274 }
275 
276 /*****************************************************************************
277  * A3700 handler called to validate the entry point.
278  *****************************************************************************
279  */
a3700_validate_ns_entrypoint(uintptr_t entrypoint)280 int a3700_validate_ns_entrypoint(uintptr_t entrypoint)
281 {
282 	return PSCI_E_SUCCESS;
283 }
284 
285 /*****************************************************************************
286  * A3700 handler called when a power domain is about to be turned off. The
287  * target_state encodes the power state that each level should transition to.
288  *****************************************************************************
289  */
a3700_pwr_domain_off(const psci_power_state_t * target_state)290 void a3700_pwr_domain_off(const psci_power_state_t *target_state)
291 {
292 	/* Prevent interrupts from spuriously waking up this cpu */
293 	plat_marvell_gic_cpuif_disable();
294 
295 	/* Core can not be powered down with pending IRQ,
296 	 * acknowledge all the pending IRQ
297 	 */
298 	a3700_pm_ack_irq();
299 }
300 
a3700_set_gen_pwr_off_option(void)301 static void a3700_set_gen_pwr_off_option(void)
302 {
303 	/* Enable L2 flush -> processor state-machine option */
304 	mmio_setbits_32(MVEBU_PM_NB_CPU_PWR_CTRL_REG, MVEBU_PM_L2_FLUSH_EN);
305 
306 	/*
307 	 * North bridge cannot be VDD off (always ON).
308 	 * The NB state machine support low power mode by its state machine.
309 	 * This bit MUST be set for north bridge power down, e.g.,
310 	 * OSC input cutoff(NOT TEST), SRAM power down, PMIC, etc.
311 	 * It is not related to CPU VDD OFF!!
312 	 */
313 	mmio_clrbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_CPU_VDDV_OFF_EN);
314 
315 	/*
316 	 * MUST: Switch CPU/AXI clock to OSC
317 	 * NB state machine clock is always connected to OSC (slow clock).
318 	 * But Core0/1/processor state machine's clock are connected to AXI
319 	 *  clock. Now, AXI clock takes the TBG as clock source.
320 	 * If using AXI clock, Core0/1/processor state machine may much faster
321 	 * than NB state machine. It will cause problem in this case if cores
322 	 * are released before north bridge gets ready.
323 	 */
324 	mmio_clrbits_32(MVEBU_NB_CLOCK_SEL_REG, MVEBU_A53_CPU_CLK_SEL);
325 
326 	/*
327 	 * These register bits will trigger north bridge
328 	 * power-down state machine regardless CM3 status.
329 	 */
330 	mmio_setbits_32(MVEBU_PM_NB_PWR_DEBUG_REG, MVEBU_PM_IGNORE_CM3_SLEEP);
331 	mmio_setbits_32(MVEBU_PM_NB_PWR_DEBUG_REG, MVEBU_PM_IGNORE_CM3_DEEP);
332 
333 	/*
334 	 * SRAM => controlled by north bridge state machine.
335 	 * Core VDD OFF is not related to CPU SRAM power down.
336 	 */
337 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_NB_SRAM_LKG_PD_EN);
338 
339 	/*
340 	 * Idle AXI interface in order to get L2_WFI
341 	 * L2 WFI is only asserted after CORE-0 and CORE-1 WFI asserted.
342 	 * (only both core-0/1in WFI, L2 WFI will be issued by CORE.)
343 	 * Once L2 WFI asserted, this bit is used for signalling assertion
344 	 * to AXI IO masters.
345 	 */
346 	mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_INTERFACE_IDLE);
347 
348 	/* Enable core0 and core1 VDD_OFF */
349 	mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_PD);
350 	mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_PD);
351 
352 	/* Enable North bridge power down -
353 	 * Both Cores MUST enable this bit to power down north bridge!
354 	 */
355 	mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_SOC_PD);
356 	mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_SOC_PD);
357 
358 	/* CA53 (processor domain) power down */
359 	mmio_setbits_32(MVEBU_PM_CPU_0_PWR_CTRL_REG, MVEBU_PM_CORE_PROC_PD);
360 	mmio_setbits_32(MVEBU_PM_CPU_1_PWR_CTRL_REG, MVEBU_PM_CORE_PROC_PD);
361 }
362 
a3700_en_ddr_self_refresh(void)363 static void a3700_en_ddr_self_refresh(void)
364 {
365 	/*
366 	 * Both count is 16 bits and configurable. By default, osc stb cnt
367 	 * is 0xFFF for lower 12 bits.
368 	 * Thus, powerdown count is smaller than osc count.
369 	 * This count is used for exiting DDR SR mode on wakeup event.
370 	 * The powerdown count also has impact on the following
371 	 * state changes: idle -> count-down -> ... (power-down, vdd off, etc)
372 	 * Here, make stable counter shorter
373 	 * Use power down count value instead of osc_stb_cnt to speed up
374 	 * DDR self refresh exit
375 	 */
376 	mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_PWR_DN_CNT_SEL);
377 
378 	/*
379 	 * Enable DDR SR mode => controlled by north bridge state machine
380 	 * Therefore, we must powerdown north bridge to trigger the DDR SR
381 	 * mode switching.
382 	 */
383 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDR_SR_EN);
384 	/* Disable DDR clock, otherwise DDR will not enter into SR mode. */
385 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDR_CLK_DIS_EN);
386 	/* Power down DDR PHY (PAD) */
387 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_DDRPHY_PWRDWN_EN);
388 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG,
389 			MVEBU_PM_DDRPHY_PAD_PWRDWN_EN);
390 
391 	/* Set wait time for DDR ready in ROM code */
392 	mmio_write_32(MVEBU_PM_CPU_VDD_OFF_INFO_1_REG,
393 		      MVEBU_PM_WAIT_DDR_RDY_VALUE);
394 
395 	/* DDR flush write buffer - mandatory */
396 	mmio_write_32(MVEBU_DRAM_CMD_0_REG, MVEBU_DRAM_CH0_CMD0 |
397 		      MVEBU_DRAM_CS_CMD0 | MVEBU_DRAM_WCB_DRAIN_REQ);
398 	while ((mmio_read_32(MVEBU_DRAM_STATS_CH0_REG) &
399 			     MVEBU_DRAM_WCP_EMPTY) != MVEBU_DRAM_WCP_EMPTY)
400 		;
401 
402 	/* Trigger PHY reset after ddr out of self refresh =>
403 	 * supply reset pulse for DDR phy after wake up
404 	 */
405 	mmio_setbits_32(MVEBU_DRAM_PWR_CTRL_REG, MVEBU_DRAM_PHY_CLK_GATING_EN |
406 						 MVEBU_DRAM_PHY_AUTO_AC_OFF_EN);
407 }
408 
a3700_pwr_dn_avs(void)409 static void a3700_pwr_dn_avs(void)
410 {
411 	/*
412 	 * AVS power down - controlled by north bridge statemachine
413 	 * Enable AVS power down by clear the AVS disable bit.
414 	 */
415 	mmio_clrbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_DISABLE_MODE);
416 	/*
417 	 * Should set BIT[12:13] to powerdown AVS.
418 	 * 1. Enable AVS VDD2 mode
419 	 * 2. After power down AVS, we must hold AVS output voltage.
420 	 * 3. We can choose the lower VDD for AVS power down.
421 	 */
422 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_VDD2_MODE);
423 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_AVS_HOLD_MODE);
424 
425 	/* Enable low VDD mode, AVS will set CPU to lowest core VDD 747mV */
426 	mmio_setbits_32(MVEBU_AVS_CTRL_2_REG, MVEBU_LOW_VDD_MODE_EN);
427 }
428 
a3700_pwr_dn_tbg(void)429 static void a3700_pwr_dn_tbg(void)
430 {
431 	/* Power down TBG */
432 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_TBG_OFF_EN);
433 }
434 
a3700_pwr_dn_sb(void)435 static void a3700_pwr_dn_sb(void)
436 {
437 	/* Enable south bridge power down option */
438 	mmio_setbits_32(MVEBU_PM_NB_PWR_CTRL_REG, MVEBU_PM_SB_PWR_DWN);
439 
440 	/* Enable SDIO_PHY_PWRDWN */
441 	mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SDIO_PHY_PDWN_EN);
442 
443 	/* Enable SRAM LRM on SB */
444 	mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SB_SRAM_LKG_PD_EN);
445 
446 	/* Enable SB Power Off */
447 	mmio_setbits_32(MVEBU_PM_SB_PWR_OPTION_REG, MVEBU_PM_SB_VDDV_OFF_EN);
448 
449 	/* Kick off South Bridge Power Off */
450 	mmio_setbits_32(MVEBU_PM_SB_CPU_PWR_CTRL_REG, MVEBU_PM_SB_PM_START);
451 }
452 
a3700_set_pwr_off_option(void)453 static void a3700_set_pwr_off_option(void)
454 {
455 	/* Set general power off option */
456 	a3700_set_gen_pwr_off_option();
457 
458 	/* Enable DDR self refresh in low power mode */
459 	a3700_en_ddr_self_refresh();
460 
461 	/* Power down AVS */
462 	a3700_pwr_dn_avs();
463 
464 	/* Power down TBG */
465 	a3700_pwr_dn_tbg();
466 
467 	/* Power down south bridge, pay attention south bridge setting
468 	 * should be done before
469 	 */
470 	a3700_pwr_dn_sb();
471 }
472 
a3700_set_wake_up_option(void)473 static void a3700_set_wake_up_option(void)
474 {
475 	/*
476 	 * Enable the wakeup event for NB SOC => north-bridge
477 	 * state-machine enablement on wake-up event
478 	 */
479 	mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_NB_WKP_EN);
480 
481 	 /* Enable both core0 and core1 wakeup on demand */
482 	mmio_setbits_32(MVEBU_PM_CPU_WAKE_UP_CONF_REG,
483 			MVEBU_PM_CORE1_WAKEUP | MVEBU_PM_CORE0_WAKEUP);
484 
485 	/* Enable warm reset in low power mode */
486 	mmio_setbits_32(MVEBU_PM_NB_PWR_OPTION_REG, MVEBU_PM_WARM_RESET_EN);
487 }
488 
a3700_pm_en_nb_gpio(uint32_t gpio)489 static void a3700_pm_en_nb_gpio(uint32_t gpio)
490 {
491 	/* For GPIO1 interrupt -- North bridge only */
492 	if (gpio >= 32) {
493 		/* GPIO int mask */
494 		mmio_clrbits_32(MVEBU_NB_GPIO_IRQ_MASK_2_REG, BIT(gpio - 32));
495 
496 		/* NB_CPU_WAKE-up ENABLE GPIO int */
497 		mmio_setbits_32(MVEBU_NB_GPIO_IRQ_EN_HIGH_REG, BIT(gpio - 32));
498 	} else {
499 		/* GPIO int mask */
500 		mmio_clrbits_32(MVEBU_NB_GPIO_IRQ_MASK_1_REG, BIT(gpio));
501 
502 		/* NB_CPU_WAKE-up ENABLE GPIO int */
503 		mmio_setbits_32(MVEBU_NB_GPIO_IRQ_EN_LOW_REG, BIT(gpio));
504 	}
505 
506 	mmio_setbits_32(MVEBU_NB_STEP_DOWN_INT_EN_REG,
507 			MVEBU_NB_GPIO_INT_WAKE_WCPU_CLK);
508 
509 	/* Enable using GPIO as wakeup event
510 	 * (actually not only for north bridge)
511 	 */
512 	mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_NB_GPIO_WKP_EN |
513 		MVEBU_PM_NB_WKP_EN | MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN |
514 		MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN);
515 }
516 
a3700_pm_en_sb_gpio(uint32_t gpio)517 static void a3700_pm_en_sb_gpio(uint32_t gpio)
518 {
519 	/* Enable using GPIO as wakeup event */
520 	mmio_setbits_32(MVEBU_PM_NB_WAKE_UP_EN_REG, MVEBU_PM_SB_WKP_NB_EN |
521 		MVEBU_PM_NB_WKP_EN | MVEBU_PM_CORE1_FIQ_IRQ_WKP_EN |
522 		MVEBU_PM_CORE0_FIQ_IRQ_WKP_EN);
523 
524 	/* SB GPIO Wake UP | South Bridge Wake Up Enable */
525 	mmio_setbits_32(MVEBU_PM_SB_WK_EN_REG, MVEBU_PM_SB_GPIO_WKP_EN |
526 			MVEBU_PM_SB_GPIO_WKP_EN);
527 
528 	/* GPIO int mask */
529 	mmio_clrbits_32(MVEBU_SB_GPIO_IRQ_MASK_REG, BIT(gpio));
530 
531 	/* NB_CPU_WAKE-up ENABLE GPIO int */
532 	mmio_setbits_32(MVEBU_SB_GPIO_IRQ_EN_REG, BIT(gpio));
533 }
534 
a3700_pm_src_gpio(union pm_wake_up_src_data * src_data)535 int a3700_pm_src_gpio(union pm_wake_up_src_data *src_data)
536 {
537 	if (src_data->gpio_data.bank_num == 0)
538 		/* North Bridge GPIO */
539 		a3700_pm_en_nb_gpio(src_data->gpio_data.gpio_num);
540 	else
541 		a3700_pm_en_sb_gpio(src_data->gpio_data.gpio_num);
542 	return 0;
543 }
544 
a3700_pm_src_uart1(union pm_wake_up_src_data * src_data)545 int a3700_pm_src_uart1(union pm_wake_up_src_data *src_data)
546 {
547 	/* Clear Uart1 select */
548 	mmio_clrbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_UART1_SEL);
549 	/* set pin 19 gpio usage*/
550 	mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_19_EN);
551 	/* Enable gpio wake-up*/
552 	a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_19);
553 	/* set pin 18 gpio usage*/
554 	mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_18_EN);
555 	/* Enable gpio wake-up*/
556 	a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_18);
557 
558 	return 0;
559 }
560 
a3700_pm_src_uart0(union pm_wake_up_src_data * src_data)561 int a3700_pm_src_uart0(union pm_wake_up_src_data *src_data)
562 {
563 	/* set pin 25/26 gpio usage*/
564 	mmio_setbits_32(MVEBU_NB_GPIO1_SEL_REG, MVEBU_NB_GPIO1_GPIO_25_26_EN);
565 	/* Enable gpio wake-up*/
566 	a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_25);
567 	/* Enable gpio wake-up*/
568 	a3700_pm_en_nb_gpio(MVEBU_NB_GPIO_26);
569 
570 	return 0;
571 }
572 
573 struct wake_up_src_func_map src_func_table[WAKE_UP_SRC_MAX] = {
574 	{WAKE_UP_SRC_GPIO, a3700_pm_src_gpio},
575 	{WAKE_UP_SRC_UART1, a3700_pm_src_uart1},
576 	{WAKE_UP_SRC_UART0, a3700_pm_src_uart0},
577 	/* FOLLOWING SRC NOT SUPPORTED YET */
578 	{WAKE_UP_SRC_TIMER, NULL}
579 };
580 
a3700_get_wake_up_src_func(enum pm_wake_up_src_type type)581 static wake_up_src_func a3700_get_wake_up_src_func(
582 						  enum pm_wake_up_src_type type)
583 {
584 	uint32_t loop;
585 
586 	for (loop = 0; loop < WAKE_UP_SRC_MAX; loop++) {
587 		if (src_func_table[loop].type == type)
588 			return src_func_table[loop].func;
589 	}
590 	return NULL;
591 }
592 
593 #pragma weak mv_wake_up_src_config_get
mv_wake_up_src_config_get(void)594 struct pm_wake_up_src_config *mv_wake_up_src_config_get(void)
595 {
596 	static struct pm_wake_up_src_config wake_up_src_cfg = {};
597 	return &wake_up_src_cfg;
598 }
599 
a3700_set_wake_up_source(void)600 static void a3700_set_wake_up_source(void)
601 {
602 	struct pm_wake_up_src_config *wake_up_src;
603 	uint32_t loop;
604 	wake_up_src_func src_func = NULL;
605 
606 	wake_up_src = mv_wake_up_src_config_get();
607 	for (loop = 0; loop < wake_up_src->wake_up_src_num; loop++) {
608 		src_func = a3700_get_wake_up_src_func(
609 			   wake_up_src->wake_up_src[loop].wake_up_src_type);
610 		if (src_func)
611 			src_func(
612 				&(wake_up_src->wake_up_src[loop].wake_up_data));
613 	}
614 }
615 
a3700_pm_save_lp_flag(void)616 static void a3700_pm_save_lp_flag(void)
617 {
618 	/* Save the flag for enter the low power mode */
619 	mmio_setbits_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG,
620 			MVEBU_PM_LOW_POWER_STATE);
621 }
622 
a3700_pm_clear_lp_flag(void)623 static void a3700_pm_clear_lp_flag(void)
624 {
625 	/* Clear the flag for enter the low power mode */
626 	mmio_clrbits_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG,
627 			MVEBU_PM_LOW_POWER_STATE);
628 }
629 
a3700_pm_get_lp_flag(void)630 static uint32_t a3700_pm_get_lp_flag(void)
631 {
632 	/* Get the flag for enter the low power mode */
633 	return mmio_read_32(MVEBU_PM_CPU_VDD_OFF_INFO_2_REG) &
634 			    MVEBU_PM_LOW_POWER_STATE;
635 }
636 
637 /*****************************************************************************
638  * A3700 handler called when a power domain is about to be suspended. The
639  * target_state encodes the power state that each level should transition to.
640  *****************************************************************************
641  */
a3700_pwr_domain_suspend(const psci_power_state_t * target_state)642 void a3700_pwr_domain_suspend(const psci_power_state_t *target_state)
643 {
644 	/* Prevent interrupts from spuriously waking up this cpu */
645 	plat_marvell_gic_cpuif_disable();
646 
647 	/* Save IRQ states */
648 	plat_marvell_gic_irq_save();
649 
650 	/* Set wake up options */
651 	a3700_set_wake_up_option();
652 
653 	/* Set wake up sources */
654 	a3700_set_wake_up_source();
655 
656 	/* SoC can not be powered down with pending IRQ,
657 	 * acknowledge all the pending IRQ
658 	 */
659 	a3700_pm_ack_irq();
660 
661 	/* Set power off options */
662 	a3700_set_pwr_off_option();
663 
664 	/* Save the flag for enter the low power mode */
665 	a3700_pm_save_lp_flag();
666 
667 	isb();
668 }
669 
670 /*****************************************************************************
671  * A3700 handler called when a power domain has just been powered on after
672  * being turned off earlier. The target_state encodes the low power state that
673  * each level has woken up from.
674  *****************************************************************************
675  */
a3700_pwr_domain_on_finish(const psci_power_state_t * target_state)676 void a3700_pwr_domain_on_finish(const psci_power_state_t *target_state)
677 {
678 	/* arch specific configuration */
679 	marvell_psci_arch_init(0);
680 
681 	/* Per-CPU interrupt initialization */
682 	plat_marvell_gic_pcpu_init();
683 	plat_marvell_gic_cpuif_enable();
684 
685 	/* Restore the per-cpu IRQ state */
686 	if (a3700_pm_get_lp_flag())
687 		plat_marvell_gic_irq_pcpu_restore();
688 }
689 
690 /*****************************************************************************
691  * A3700 handler called when a power domain has just been powered on after
692  * having been suspended earlier. The target_state encodes the low power state
693  * that each level has woken up from.
694  * TODO: At the moment we reuse the on finisher and reinitialize the secure
695  * context. Need to implement a separate suspend finisher.
696  *****************************************************************************
697  */
a3700_pwr_domain_suspend_finish(const psci_power_state_t * target_state)698 void a3700_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
699 {
700 	struct dec_win_config *io_dec_map;
701 	uint32_t dec_win_num;
702 	struct dram_win_map dram_wins_map;
703 
704 	/* arch specific configuration */
705 	marvell_psci_arch_init(0);
706 
707 	/* Interrupt initialization */
708 	plat_marvell_gic_init();
709 
710 	/* Restore IRQ states */
711 	plat_marvell_gic_irq_restore();
712 
713 	/*
714 	 * Initialize CCI for this cluster after resume from suspend state.
715 	 * No need for locks as no other CPU is active.
716 	 */
717 	plat_marvell_interconnect_init();
718 	/*
719 	 * Enable CCI coherency for the primary CPU's cluster.
720 	 * Platform specific PSCI code will enable coherency for other
721 	 * clusters.
722 	 */
723 	plat_marvell_interconnect_enter_coherency();
724 
725 	/* CPU address decoder windows initialization. */
726 	cpu_wins_init();
727 
728 	/* fetch CPU-DRAM window mapping information by reading
729 	 * CPU-DRAM decode windows (only the enabled ones)
730 	 */
731 	dram_win_map_build(&dram_wins_map);
732 
733 	/* Get IO address decoder windows */
734 	if (marvell_get_io_dec_win_conf(&io_dec_map, &dec_win_num)) {
735 		printf("No IO address decoder windows configurations found!\n");
736 		return;
737 	}
738 
739 	/* IO address decoder init */
740 	if (init_io_addr_dec(&dram_wins_map, io_dec_map, dec_win_num)) {
741 		printf("IO address decoder windows initialization failed!\n");
742 		return;
743 	}
744 
745 	/* Clear low power mode flag */
746 	a3700_pm_clear_lp_flag();
747 }
748 
749 /*****************************************************************************
750  * This handler is called by the PSCI implementation during the `SYSTEM_SUSPEND
751  * call to get the `power_state` parameter. This allows the platform to encode
752  * the appropriate State-ID field within the `power_state` parameter which can
753  * be utilized in `pwr_domain_suspend()` to suspend to system affinity level.
754  *****************************************************************************
755  */
a3700_get_sys_suspend_power_state(psci_power_state_t * req_state)756 void a3700_get_sys_suspend_power_state(psci_power_state_t *req_state)
757 {
758 	/* lower affinities use PLAT_MAX_OFF_STATE */
759 	for (int i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++)
760 		req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
761 }
762 
763 /*****************************************************************************
764  * A3700 handlers to shutdown/reboot the system
765  *****************************************************************************
766  */
a3700_system_off(void)767 static void __dead2 a3700_system_off(void)
768 {
769 	ERROR("%s needs to be implemented\n", __func__);
770 	panic();
771 }
772 
773 #pragma weak cm3_system_reset
cm3_system_reset(void)774 void cm3_system_reset(void)
775 {
776 }
777 
778 /*****************************************************************************
779  * A3700 handlers to reset the system
780  *****************************************************************************
781  */
a3700_system_reset(void)782 static void __dead2 a3700_system_reset(void)
783 {
784 	/* Clean the mailbox magic number to let it as act like cold boot */
785 	mmio_write_32(PLAT_MARVELL_MAILBOX_BASE, 0x0);
786 
787 	dsbsy();
788 
789 	/* Flush data cache if the mail box shared RAM is cached */
790 #if PLAT_MARVELL_SHARED_RAM_CACHED
791 	flush_dcache_range((uintptr_t)PLAT_MARVELL_MAILBOX_BASE,
792 			   2 * sizeof(uint64_t));
793 #endif
794 
795 	/* Use Cortex-M3 secure coprocessor for system reset */
796 	cm3_system_reset();
797 
798 	/* Trigger the warm reset */
799 	mmio_write_32(MVEBU_WARM_RESET_REG, MVEBU_WARM_RESET_MAGIC);
800 
801 	/* Shouldn't get to this point */
802 	panic();
803 }
804 
805 /*****************************************************************************
806  * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
807  * platform layer will take care of registering the handlers with PSCI.
808  *****************************************************************************
809  */
810 const plat_psci_ops_t plat_arm_psci_pm_ops = {
811 	.cpu_standby = a3700_cpu_standby,
812 	.pwr_domain_on = a3700_pwr_domain_on,
813 	.pwr_domain_off = a3700_pwr_domain_off,
814 	.pwr_domain_suspend = a3700_pwr_domain_suspend,
815 	.pwr_domain_on_finish = a3700_pwr_domain_on_finish,
816 	.pwr_domain_suspend_finish = a3700_pwr_domain_suspend_finish,
817 	.get_sys_suspend_power_state = a3700_get_sys_suspend_power_state,
818 	.system_off = a3700_system_off,
819 	.system_reset = a3700_system_reset,
820 	.validate_power_state = a3700_validate_power_state,
821 	.validate_ns_entrypoint = a3700_validate_ns_entrypoint
822 };
823