1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright 2018 NXP
4 */
5
6#include <asm.S>
7#include <arm.h>
8#include <arm32_macros.S>
9#include <generated/imx_pm_asm_defines.h>
10#include <imx-regs.h>
11#include <platform_config.h>
12#include <kernel/cache_helpers.h>
13#include <kernel/tz_ssvce_def.h>
14#include <kernel/tz_proc_def.h>
15
16#define MX7_SRC_GPR1	0x74
17#define MX7_SRC_GPR2	0x78
18#define MX7_SRC_GPR3	0x7c
19#define MX7_SRC_GPR4	0x80
20#define MX7_GPC_IMR1	0x30
21#define MX7_GPC_IMR2	0x34
22#define MX7_GPC_IMR3	0x38
23#define MX7_GPC_IMR4	0x3c
24#define DDRC_STAT	0x4
25#define DDRC_PWRCTL	0x30
26#define DDRC_DBG1	0x304
27#define DDRC_DBGCAM	0x308
28#define DDRC_PSTAT	0x3fc
29#define DDRC_PCTRL_0	0x490
30
31	.section .text.psci.cpuidle
32	.align 3
33/*
34 * imx_pen_lock
35 *
36 * The reference link of Peterson's algorithm:
37 * http://en.wikipedia.org/wiki/Peterson's_algorithm
38 *
39 * val1 = r1 = !turn (inverted from Peterson's algorithm)
40 * on cpu 0:
41 * r2 = flag[0] (in flag0)
42 * r3 = flag[1] (in flag1)
43 * on cpu1:
44 * r2 = flag[1] (in flag1)
45 * r3 = flag[0] (in flag0)
46 *
47 */
48	.macro	imx_pen_lock
49
50	mov	r8, r0
51	read_mpidr r5
52	and	r5, r5, #3
53	add	r6, r8, #PM_INFO_MX7_VAL_OFF
54	cmp	r5, #0
55	addeq	r7, r8, #PM_INFO_MX7_FLAG0_OFF
56	addeq	r8, r8, #PM_INFO_MX7_FLAG1_OFF
57	addne	r7, r8, #PM_INFO_MX7_FLAG1_OFF
58	addne	r8, r8, #PM_INFO_MX7_FLAG0_OFF
59
60	mov	r9, #1
61	str	r9, [r7]
62	dsb
63	str	r5, [r6]
641:
65	dsb
66	ldr	r9, [r8]
67	cmp	r9, #1
68	ldreq	r9, [r6]
69	cmpeq	r9, r5
70	beq	1b
71
72	.endm
73
74	.macro	imx_pen_unlock
75
76	dsb
77	read_mpidr r6
78	and	r6, r6, #3
79	cmp	r6, #0
80	addeq	r7, r0, #PM_INFO_MX7_FLAG0_OFF
81	addne	r7, r0, #PM_INFO_MX7_FLAG1_OFF
82	mov	r9, #0
83	str	r9, [r7]
84
85	.endm
86
87	.macro  disable_l1_dcache
88
89	push	{r0 - r12, lr}
90	mov	r0, #DCACHE_OP_CLEAN_INV
91	ldr	r1, =dcache_op_all
92	blx	r1
93	pop	{r0 - r12, lr}
94
95	/* disable d-cache */
96	read_sctlr r7
97	bic	r7, r7, #SCTLR_C
98	write_sctlr r7
99	dsb
100	isb
101
102	push	{r0 - r12, lr}
103	mov	r0, #DCACHE_OP_CLEAN_INV
104	ldr	r1, =dcache_op_all
105	blx	r1
106	pop	{r0 - r12, lr}
107
108	/* TODO: handle non-SMP kernel */
109	clrex
110
111	/* Turn off SMP bit. */
112	read_actlr r8
113	bic	r8, r8, #ACTLR_SMP
114	write_actlr r8
115	isb
116	dsb
117
118	.endm
119
120	.macro	tlb_set_to_ocram
121
122	/* save ttbr */
123	read_ttbr1 r7
124	str	r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
125
126	read_ttbr0 r7
127	str	r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
128
129	/*
130	 * To ensure no page table walks occur in DDR, we
131	 * have a another page table stored in IRAM that only
132	 * contains entries pointing to IRAM, AIPS1 and AIPS2.
133	 * We need to set the TTBR1 to the new IRAM TLB.
134	 * Do the following steps:
135	 * 1. Flush the Branch Target Address Cache (BTAC)
136	 * 2. Set TTBR1 to point to IRAM page table.
137	 * 3. Disable page table walks in TTBR0 (PD0 = 1)
138	 */
139
140	/* Disable Branch Prediction, Z bit in SCTLR. */
141	read_sctlr r6
142	bic	r6, r6, #SCTLR_Z
143	write_sctlr r6
144
145	/* Flush the BTAC. */
146	write_bpiallis
147
148	ldr	r6, =iram_tbl_phys_addr
149	ldr	r7, [r6]
150
151	dsb
152	isb
153
154	/* Store the IRAM table in TTBR1/TTBR0 */
155	write_ttbr1 r7
156	write_ttbr0 r7
157
158	/* Read TTBCR and set PD0=1 */
159	read_ttbcr r6
160	orr	r6, r6, #TTBCR_PD0
161	write_ttbcr r6
162
163	dsb
164	isb
165
166	/* flush the TLB */
167	write_tlbiallis
168	isb
169
170	.endm
171
172	.macro	tlb_back_to_ddr
173
174	/* Read TTBCR and set PD0=0 */
175	read_ttbcr r6
176	bic	r6, r6, #TTBCR_PD0
177	write_ttbcr r6
178
179	dsb
180	isb
181
182	/* flush the TLB */
183	write_tlbiallis
184
185	/* Enable Branch Prediction, Z bit in SCTLR. */
186	read_sctlr r6
187	orr	r6, r6, #SCTLR_Z
188	write_sctlr r6
189
190	/* Flush the Branch Target Address Cache (BTAC) */
191	write_bpiallis
192
193	/* restore ttbr */
194	ldr	r7, [r0, #PM_INFO_MX7_TTBR1_OFF]
195	write_ttbr1 r7
196	ldr	r7, [r0, #PM_INFO_MX7_TTBR0_OFF]
197	write_ttbr0 r7
198
199	.endm
200
201	/* r10 must be DDRC base address */
202	.macro ddrc_enter_self_refresh
203
204	ldr	r10, [r0, #PM_INFO_MX7_DDRC_V_OFF]
205
206	/* disable port */
207	ldr	r7, =0x0
208	str	r7, [r10, #DDRC_PCTRL_0]
209
210	/* let DDR out of self-refresh */
211	ldr	r7, =0x0
212	str	r7, [r10, #DDRC_PWRCTL]
213
214	/* wait rw port_busy clear */
215	ldr	r6, =(0x1 << 16)
216	orr	r6, r6, #0x1
2172:
218	ldr	r7, [r10, #DDRC_PSTAT]
219	ands	r7, r7, r6
220	bne	2b
221
222	ldr	r7, =0x1
223	str	r7, [r10, #DDRC_DBG1]
224
225	ldr	r6, =0x36000000
22611:
227	ldr	r7, [r10, #DDRC_DBGCAM]
228	and	r7, r7, r6
229	cmp	r7, r6
230	bne	11b
231
232	/* enter self-refresh bit 5 */
233	ldr	r7, =(0x1 << 5)
234	str	r7, [r10, #DDRC_PWRCTL]
235
236	/* wait until self-refresh mode entered */
2373:
238	ldr	r7, [r10, #DDRC_STAT]
239	and	r7, r7, #0x3
240	cmp	r7, #0x3
241	bne	3b
2424:
243	ldr	r7, [r10, #DDRC_STAT]
244	ands	r7, r7, #0x20
245	beq	4b
246
247	/* disable dram clk */
248	ldr	r7, [r10, #DDRC_PWRCTL]
249	orr	r7, r7, #(1 << 3)
250	str	r7, [r10, #DDRC_PWRCTL]
251
252	/*
253	 * TO1.1 adds feature of DDR pads power down,
254	 * although TO1.0 has no such function, but it is
255	 * NOT harmful to program GPR registers for TO1.0,
256	 * it can avoid the logic of version check in idle
257	 * thread.
258	 */
259	ldr	r10, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
260	ldr	r7, =0xf0000
261	str	r7, [r10]
262
263	/* delay 20us, measured by gpio */
264	ldr	r7, =20
26512:
266	subs	r7, r7, #0x1
267	bne	12b
268
269	.endm
270
271	/* r10 must be DDRC base address */
272	.macro ddrc_exit_self_refresh
273
274	cmp	r5, #0x1
275	ldreq	r10, [r0, #PM_INFO_MX7_IOMUXC_GPR_P_OFF]
276	ldrne	r10, [r0, #PM_INFO_MX7_IOMUXC_GPR_V_OFF]
277
278	ldr	r7, =0x0
279	str	r7, [r10]
280
281	ldr	r7, =20
28213:
283	subs	r7, r7, #0x1
284	bne	13b
285
286	cmp	r5, #0x1
287	ldreq	r10, [r0, #PM_INFO_MX7_DDRC_P_OFF]
288	ldrne	r10, [r0, #PM_INFO_MX7_DDRC_V_OFF]
289
290	ldr	r7, =0x0
291	str	r7, [r10, #DDRC_DBG1]
292
293	ldr	r6, =0x30000000
29414:
295	ldr	r7, [r10, #DDRC_DBGCAM]
296	and	r7, r7, r6
297	cmp	r7, r6
298	bne	14b
299
300	/* let DDR out of self-refresh */
301	ldr	r7, =0x0
302	str	r7, [r10, #DDRC_PWRCTL]
303
304	/* wait until self-refresh mode exited */
3055:
306	ldr	r7, [r10, #DDRC_STAT]
307	and	r7, r7, #0x3
308	cmp	r7, #0x3
309	beq	5b
310
311	/* enable auto self-refresh */
312	ldr	r7, [r10, #DDRC_PWRCTL]
313	orr	r7, r7, #(1 << 0)
314	str	r7, [r10, #DDRC_PWRCTL]
315
316	ldr	r7, =0x1
317	str	r7, [r10, #DDRC_PCTRL_0]
318
319	.endm
320
321	.macro	pll_do_wait_lock
3226:
323	ldr	r7, [r10, r8]
324	ands	r7, #0x80000000
325	beq	6b
326
327	.endm
328
329	.macro ccm_enter_idle
330
331	ldr	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
332
333	/* ungate pfd1 332m for lower axi */
334	ldr	r7, =0x8000
335	str	r7, [r10, #0xc8]
336
337	ldr	r10, [r0, #PM_INFO_MX7_CCM_V_OFF]
338
339	/* switch ARM CLK to OSC */
340	ldr	r8, =0x8000
341	ldr	r7, [r10, r8]
342	bic	r7, r7, #0x7000000
343	str	r7, [r10, r8]
344
345	/* lower AXI clk from 24MHz to 3MHz */
346	ldr	r8, =0x8800
347	ldr	r7, [r10, r8]
348	orr	r7, r7, #0x7
349	str	r7, [r10, r8]
350
351	/* lower AHB clk from 24MHz to 3MHz */
352	ldr	r8, =0x9000
353	ldr	r7, [r10, r8]
354	orr	r7, r7, #0x7
355	str	r7, [r10, r8]
356
357	/* gate dram clk */
358	ldr	r8, =0x9880
359	ldr	r7, [r10, r8]
360	bic	r7, r7, #0x10000000
361	str	r7, [r10, r8]
362
363	ldr	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
364
365	/* gate pfd1 332m */
366	ldr	r7, =0x8000
367	str	r7, [r10, #0xc4]
368
369	/* gate system pll pfd div 1 */
370	ldr	r7, =0x10
371	str	r7, [r10, #0xb4]
372	/* power down ARM, 480 and DRAM PLL */
373	ldr	r7, =0x1000
374	str	r7, [r10, #0x64]
375	str	r7, [r10, #0xb4]
376	ldr	r7, =0x100000
377	str	r7, [r10, #0x74]
378
379	.endm
380
381	.macro ccm_exit_idle
382
383	cmp	r5, #0x1
384	ldreq	r10, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
385	ldrne	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
386
387	/* power up ARM, 480 and DRAM PLL */
388	ldr	r7, =0x1000
389	str	r7, [r10, #0x68]
390	ldr	r8, =0x60
391	pll_do_wait_lock
392
393	ldr	r7, =0x1000
394	str	r7, [r10, #0xb8]
395	ldr	r8, =0xb0
396	pll_do_wait_lock
397
398	ldr	r7, =0x100000
399	str	r7, [r10, #0x78]
400	ldr	r8, =0x70
401	pll_do_wait_lock
402
403	/* ungate pfd1 332m for lower axi */
404	ldr	r7, =0x8000
405	str	r7, [r10, #0xc8]
406
407	/* ungate system pll pfd div 1 */
408	ldr	r7, =0x10
409	str	r7, [r10, #0xb8]
410
411	cmp	r5, #0x1
412	ldreq	r10, [r0, #PM_INFO_MX7_CCM_P_OFF]
413	ldrne	r10, [r0, #PM_INFO_MX7_CCM_V_OFF]
414
415	/* switch ARM CLK to PLL */
416	ldr	r8, =0x8000
417	ldr	r7, [r10, r8]
418	orr	r7, r7, #0x1000000
419	str	r7, [r10, r8]
420
421	/* restore AXI clk from 3MHz to 24MHz */
422	ldr	r8, =0x8800
423	ldr	r7, [r10, r8]
424	bic	r7, r7, #0x7
425	str	r7, [r10, r8]
426
427	/* restore AHB clk from 3MHz to 24MHz */
428	ldr	r8, =0x9000
429	ldr	r7, [r10, r8]
430	bic	r7, r7, #0x7
431	str	r7, [r10, r8]
432
433	/* ungate dram clk */
434	ldr	r8, =0x9880
435	ldr	r7, [r10, r8]
436	orr	r7, r7, #0x10000000
437	str	r7, [r10, r8]
438
439	cmp	r5, #0x1
440	ldreq	r10, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
441	ldrne	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
442
443	/* gate pfd1 332m for lower axi */
444	ldr	r7, =0x8000
445	str	r7, [r10, #0xc4]
446
447	.endm
448
449	.macro anatop_enter_idle
450
451	ldr	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
452
453	/* XTAL to RC-OSC switch */
454	ldr	r7, [r10]
455	orr	r7, r7, #0x1000
456	str	r7, [r10]
457	/* power down XTAL */
458	ldr	r7, [r10]
459	orr	r7, r7, #0x1
460	str	r7, [r10]
461
462	/* enable weak 1P0A */
463	ldr	r7, [r10, #0x200]
464	orr	r7, r7, #0x40000
465	str	r7, [r10, #0x200]
466
467	/* disable LDO 1P0A */
468	ldr	r7, [r10, #0x200]
469	bic	r7, r7, #0x1
470	str	r7, [r10, #0x200]
471
472	/* disable LDO 1P0D */
473	ldr	r7, [r10, #0x210]
474	bic	r7, r7, #0x1
475	str	r7, [r10, #0x210]
476
477	/* disable LDO 1P2 */
478	ldr	r7, [r10, #0x220]
479	bic	r7, r7, #0x1
480	str	r7, [r10, #0x220]
481
482	/* switch to low power bandgap */
483	ldr	r7, [r10, #0x270]
484	orr	r7, r7, #0x400
485	str	r7, [r10, #0x270]
486	/* power down normal bandgap */
487	orr	r7, r7, #0x1
488	str	r7, [r10, #0x270]
489
490	.endm
491
492	.macro anatop_exit_idle
493
494	cmp	r5, #0x1
495	ldreq	r10, [r0, #PM_INFO_MX7_ANATOP_P_OFF]
496	ldrne	r10, [r0, #PM_INFO_MX7_ANATOP_V_OFF]
497
498	/* power on normal bandgap */
499	ldr	r7, [r10, #0x270]
500	bic	r7, r7, #0x1
501	str	r7, [r10, #0x270]
502	/* switch to normal bandgap */
503	bic	r7, r7, #0x400
504	str	r7, [r10, #0x270]
505
506	/* enable LDO 1P2 */
507	ldr	r7, [r10, #0x220]
508	orr	r7, r7, #0x1
509	str	r7, [r10, #0x220]
5107:
511	ldr	r7, [r10, #0x220]
512	ands	r7, #0x20000
513	beq	7b
514
515	/* enable LDO 1P0D */
516	ldr	r7, [r10, #0x210]
517	orr	r7, r7, #0x1
518	str	r7, [r10, #0x210]
5198:
520	ldr	r7, [r10, #0x210]
521	ands	r7, #0x20000
522	beq	8b
523
524	/* enable LDO 1P0A */
525	ldr	r7, [r10, #0x200]
526	orr	r7, r7, #0x1
527	str	r7, [r10, #0x200]
5289:
529	ldr	r7, [r10, #0x200]
530	ands	r7, #0x20000
531	beq	9b
532	/* disable weak 1P0A */
533	ldr	r7, [r10, #0x200]
534	bic	r7, r7, #0x40000
535	str	r7, [r10, #0x200]
536
537	/* power up XTAL and wait */
538	ldr	r7, [r10]
539	bic	r7, r7, #0x1
540	str	r7, [r10]
54110:
542	ldr	r7, [r10]
543	ands	r7, r7, #0x4
544	beq	10b
545	/* RC-OSC to XTAL switch */
546	ldr	r7, [r10]
547	bic	r7, r7, #0x1000
548	str	r7, [r10]
549
550	.endm
551
552.extern iram_tlb_phys_addr
553
554FUNC imx7d_low_power_idle, :
555	push	{r0 - r12}
556
557	/* get necessary info from pm_info */
558	ldr	r1, [r0, #PM_INFO_MX7_PBASE_OFF]
559	ldr	r2, [r0, #PM_INFO_MX7_SIZE_OFF]
560
561	/*
562	 * counting the resume address in iram
563	 * to set it in SRC register.
564	 */
565	ldr	r5, =imx7d_low_power_idle
566	ldr     r6, =wakeup
567	sub	r6, r6, r5
568	add     r8, r1, r2
569	add	r3, r8, r6
570
571	/* r11 is cpu id */
572	read_mpidr r11
573	and	r11, r11, #3
574	cmp	r11, #0x0
575	ldreq	r6, =MX7_SRC_GPR1
576	ldreq	r7, =MX7_SRC_GPR2
577	ldrne	r6, =MX7_SRC_GPR3
578	ldrne	r7, =MX7_SRC_GPR4
579	/* store physical resume addr and pm_info address. */
580	ldr	r10, [r0, #PM_INFO_MX7_SRC_V_OFF]
581	str	r3, [r10, r6]
582	str	r1, [r10, r7]
583
584	disable_l1_dcache
585
586	tlb_set_to_ocram
587
588	/* check last to sleep */
589	ldr	r6, [r0, #PM_INFO_MX7_NUM_ONLINE_CPUS_OFF]
590	ldr	r7, [r0, #PM_INFO_MX7_NUM_LPI_CPUS_OFF]
591	cmp	r6, r7
592	bne	lpi_enter_done
593
594	ddrc_enter_self_refresh
595	ccm_enter_idle
596	anatop_enter_idle
597
598	ldr	r10, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
599	ldr	r7, =0x0
600	ldr	r8, =0x1000
601	str	r7, [r10, r8]
602
603	ldr	r10, [r0, #PM_INFO_MX7_GPC_V_OFF]
604	ldr	r4, [r10, #MX7_GPC_IMR1]
605	ldr	r5, [r10, #MX7_GPC_IMR2]
606	ldr	r6, [r10, #MX7_GPC_IMR3]
607	ldr	r7, [r10, #MX7_GPC_IMR4]
608
609	ldr	r8, =0xffffffff
610	str	r8, [r10, #MX7_GPC_IMR1]
611	str	r8, [r10, #MX7_GPC_IMR2]
612	str	r8, [r10, #MX7_GPC_IMR3]
613	str	r8, [r10, #MX7_GPC_IMR4]
614
615	/*
616	 * enable the RBC bypass counter here
617	 * to hold off the interrupts. RBC counter
618	 * = 8 (240us). With this setting, the latency
619	 * from wakeup interrupt to ARM power up
620	 * is ~250uS.
621	 */
622	ldr	r8, [r10, #0x14]
623	bic	r8, r8, #(0x3f << 24)
624	orr	r8, r8, #(0x8 << 24)
625	str	r8, [r10, #0x14]
626
627	/* enable the counter. */
628	ldr	r8, [r10, #0x14]
629	orr	r8, r8, #(0x1 << 30)
630	str	r8, [r10, #0x14]
631
632	/* unmask all the GPC interrupts. */
633	str	r4, [r10, #MX7_GPC_IMR1]
634	str	r5, [r10, #MX7_GPC_IMR2]
635	str	r6, [r10, #MX7_GPC_IMR3]
636	str	r7, [r10, #MX7_GPC_IMR4]
637
638	/*
639	 * now delay for a short while (30usec)
640	 * ARM is at 24MHz at this point
641	 * so a short loop should be enough.
642	 * this delay is required to ensure that
643	 * the RBC counter can start counting in
644	 * case an interrupt is already pending
645	 * or in case an interrupt arrives just
646	 * as ARM is about to assert DSM_request.
647	 */
648	ldr	r4, =5
649rbc_loop:
650	subs	r4, r4, #0x1
651	bne	rbc_loop
652
653lpi_enter_done:
654	imx_pen_unlock
655
656        wfi
657	isb
658
659	imx_pen_lock
660
661	/* check first to wake */
662	ldr	r6, [r0, #PM_INFO_MX7_NUM_ONLINE_CPUS_OFF]
663	ldr	r7, [r0, #PM_INFO_MX7_NUM_LPI_CPUS_OFF]
664	cmp	r6, r7
665	bne	skip_lpi_flow
666
667	ldr	r5, =0x0
668	anatop_exit_idle
669	ccm_exit_idle
670	ddrc_exit_self_refresh
671
672	ldr	r10, [r0, #PM_INFO_MX7_GIC_DIST_V_OFF]
673	ldr	r7, =0x3
674	ldr	r8, =0x1000
675	str	r7, [r10, r8]
676skip_lpi_flow:
677
678	tlb_back_to_ddr
679
680	/* TODO: handle non-SMP kernel */
681	/* Turn on SMP bit. */
682	read_actlr r7
683	orr	r7, r7, #ACTLR_SMP
684	write_actlr r7
685
686	isb
687
688	/* enable d-cache */
689	read_sctlr r7
690	orr	r7, r7, #SCTLR_C
691	write_sctlr r7
692	dsb
693	isb
694
695	/* Restore registers */
696	pop	{r0 - r12}
697	bx	lr
698
699wakeup:
700	/* invalidate L1 I-cache first */
701	write_iciallu
702	write_bpiall
703	/* enable the Icache and branch prediction */
704	mov	r1, #(SCTLR_I | SCTLR_Z)
705	write_sctlr r1
706	isb
707
708	/* switch monitor mode */
709	cps	#CPSR_MODE_MON
710
711	imx_pen_lock
712	/* check first to wake */
713	ldr	r6, [r0, #PM_INFO_MX7_NUM_ONLINE_CPUS_OFF]
714	ldr	r7, [r0, #PM_INFO_MX7_NUM_LPI_CPUS_OFF]
715	cmp	r6, r7
716	bne	wakeup_skip_lpi_flow
717
718	ldr	r5, =0x1
719	anatop_exit_idle
720	ccm_exit_idle
721	ddrc_exit_self_refresh
722wakeup_skip_lpi_flow:
723
724	/* get physical resume address from pm_info. */
725	ldr	lr, [r0, #PM_INFO_MX7_RESUME_ADDR_OFF]
726
727	/* Restore registers */
728	bx	lr
729END_FUNC imx7d_low_power_idle
730
731/*
732 * Note: OPTEE VA = PA, for TEE_RAM.
733 * This maybe changed in future.
734 */
735FUNC v7_cpu_resume, :
736	mov     r0, #0	@ ; write the cache size selection register to be
737	write_csselr r0	@ ; sure we address the data cache
738	isb		@ ; isb to sync the change to the cachesizeid reg
739
740_inv_dcache_off:
741	mov     r0, #0	@ ; set way number to 0
742_inv_nextway:
743	mov     r1, #0	@ ; set line number (=index) to 0
744_inv_nextline:
745	orr     r2, r0, r1	@ ; construct way/index value
746	write_dcisw r2 @ ; invalidate data or unified cache line by set/way
747	add     r1, r1, #1 << LINE_FIELD_OFFSET	@ ; increment the index
748	cmp     r1, #1 << LINE_FIELD_OVERFLOW	@ ; overflow out of set field
749	bne     _inv_nextline
750	add     r0, r0, #1 << WAY_FIELD_OFFSET	@ ; increment the way number
751	cmp     r0, #0				@ ; overflow out of way field
752	bne     _inv_nextway
753
754	dsb					@ ; synchronise
755
756	/*
757	 * no stack, scratch r0-r3
758	 * TODO: need to use specific configure, but not plat_xxx.
759	 * because plat_xx maybe changed in future, we can not rely on it.
760	 * need handle sp carefully.
761	 */
762	blx plat_cpu_reset_early
763
764	b	sm_pm_cpu_resume
765END_FUNC v7_cpu_resume
766