1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright 2017 NXP
4 */
5
6 #include <arm.h>
7 #include <arm32.h>
8 #include <console.h>
9 #include <io.h>
10 #include <imx.h>
11 #include <imx_pm.h>
12 #include <kernel/panic.h>
13 #include <kernel/cache_helpers.h>
14 #include <mm/core_mmu.h>
15 #include <mm/core_memprot.h>
16 #include <mmdc.h>
17 #include <platform_config.h>
18 #include <sm/pm.h>
19 #include <sm/psci.h>
20 #include <sm/sm.h>
21 #include <string.h>
22
23 paddr_t iram_tbl_phys_addr = -1UL;
24 void *iram_tbl_virt_addr;
25
26 #define READ_DATA_FROM_HARDWARE 0
27
28 static uint32_t imx7d_ddrc_ddr3_setting[][2] = {
29 { 0x0, READ_DATA_FROM_HARDWARE },
30 { 0x1a0, READ_DATA_FROM_HARDWARE },
31 { 0x1a4, READ_DATA_FROM_HARDWARE },
32 { 0x1a8, READ_DATA_FROM_HARDWARE },
33 { 0x64, READ_DATA_FROM_HARDWARE },
34 { 0x490, READ_DATA_FROM_HARDWARE },
35 { 0xd0, READ_DATA_FROM_HARDWARE },
36 { 0xd4, READ_DATA_FROM_HARDWARE },
37 { 0xdc, READ_DATA_FROM_HARDWARE },
38 { 0xe0, READ_DATA_FROM_HARDWARE },
39 { 0xe4, READ_DATA_FROM_HARDWARE },
40 { 0xf4, READ_DATA_FROM_HARDWARE },
41 { 0x100, READ_DATA_FROM_HARDWARE },
42 { 0x104, READ_DATA_FROM_HARDWARE },
43 { 0x108, READ_DATA_FROM_HARDWARE },
44 { 0x10c, READ_DATA_FROM_HARDWARE },
45 { 0x110, READ_DATA_FROM_HARDWARE },
46 { 0x114, READ_DATA_FROM_HARDWARE },
47 { 0x120, READ_DATA_FROM_HARDWARE },
48 { 0x180, READ_DATA_FROM_HARDWARE },
49 { 0x190, READ_DATA_FROM_HARDWARE },
50 { 0x194, READ_DATA_FROM_HARDWARE },
51 { 0x200, READ_DATA_FROM_HARDWARE },
52 { 0x204, READ_DATA_FROM_HARDWARE },
53 { 0x214, READ_DATA_FROM_HARDWARE },
54 { 0x218, READ_DATA_FROM_HARDWARE },
55 { 0x240, READ_DATA_FROM_HARDWARE },
56 { 0x244, READ_DATA_FROM_HARDWARE },
57 };
58
59 static uint32_t imx7d_ddrc_phy_ddr3_setting[][2] = {
60 { 0x0, READ_DATA_FROM_HARDWARE },
61 { 0x4, READ_DATA_FROM_HARDWARE },
62 { 0x10, READ_DATA_FROM_HARDWARE },
63 { 0xb0, READ_DATA_FROM_HARDWARE },
64 { 0x9c, READ_DATA_FROM_HARDWARE },
65 { 0x7c, READ_DATA_FROM_HARDWARE },
66 { 0x80, READ_DATA_FROM_HARDWARE },
67 { 0x84, READ_DATA_FROM_HARDWARE },
68 { 0x88, READ_DATA_FROM_HARDWARE },
69 { 0x6c, READ_DATA_FROM_HARDWARE },
70 { 0x20, READ_DATA_FROM_HARDWARE },
71 { 0x30, READ_DATA_FROM_HARDWARE },
72 { 0x50, 0x01000010 },
73 { 0x50, 0x00000010 },
74 { 0xc0, 0x0e407304 },
75 { 0xc0, 0x0e447304 },
76 { 0xc0, 0x0e447306 },
77 { 0xc0, 0x0e447304 },
78 { 0xc0, 0x0e407306 },
79 };
80
81 static struct imx7_pm_data imx7d_pm_data_ddr3 = {
82 .ddrc_num = ARRAY_SIZE(imx7d_ddrc_ddr3_setting),
83 .ddrc_offset = imx7d_ddrc_ddr3_setting,
84 .ddrc_phy_num = ARRAY_SIZE(imx7d_ddrc_phy_ddr3_setting),
85 .ddrc_phy_offset = imx7d_ddrc_phy_ddr3_setting,
86 };
87
88 paddr_t phys_addr[] = {
89 AIPS1_BASE, AIPS2_BASE, AIPS3_BASE
90 };
91
pm_imx7_iram_tbl_init(void)92 int pm_imx7_iram_tbl_init(void)
93 {
94 uint32_t i;
95 struct tee_mmap_region map;
96
97 /* iram mmu translation table already initialized */
98 if (iram_tbl_phys_addr != (-1UL))
99 return 0;
100
101 iram_tbl_phys_addr = TRUSTZONE_OCRAM_START + 16 * 1024;
102 iram_tbl_virt_addr = phys_to_virt(iram_tbl_phys_addr,
103 MEM_AREA_TEE_COHERENT,
104 16 * 1024);
105
106 /* 16KB */
107 memset(iram_tbl_virt_addr, 0, 16 * 1024);
108
109 for (i = 0; i < ARRAY_SIZE(phys_addr); i++) {
110 map.pa = phys_addr[i];
111 map.va = (vaddr_t)phys_to_virt(phys_addr[i], MEM_AREA_IO_SEC,
112 AIPS1_SIZE);
113 map.region_size = CORE_MMU_PGDIR_SIZE;
114 map.size = AIPS1_SIZE; /* 4M for AIPS1/2/3 */
115 map.type = MEM_AREA_IO_SEC;
116 map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW |
117 TEE_MATTR_SECURE |
118 (TEE_MATTR_CACHE_NONCACHE << TEE_MATTR_CACHE_SHIFT);
119 map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr);
120 }
121
122 /* Note IRAM_S_BASE is not 1M aligned, so take care */
123 map.pa = ROUNDDOWN(IRAM_S_BASE, CORE_MMU_PGDIR_SIZE);
124 map.va = (vaddr_t)phys_to_virt(map.pa, MEM_AREA_TEE_COHERENT,
125 CORE_MMU_PGDIR_SIZE);
126 map.region_size = CORE_MMU_PGDIR_SIZE;
127 map.size = CORE_MMU_PGDIR_SIZE;
128 map.type = MEM_AREA_TEE_COHERENT;
129 map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRWX | TEE_MATTR_SECURE;
130 map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr);
131
132 map.pa = GIC_BASE;
133 map.va = (vaddr_t)phys_to_virt((paddr_t)GIC_BASE, MEM_AREA_IO_SEC, 1);
134 map.region_size = CORE_MMU_PGDIR_SIZE;
135 map.size = CORE_MMU_PGDIR_SIZE;
136 map.type = MEM_AREA_TEE_COHERENT;
137 map.attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_SECURE;
138 map_memarea_sections(&map, (uint32_t *)iram_tbl_virt_addr);
139
140 return 0;
141 }
142
imx7_suspend_init(void)143 int imx7_suspend_init(void)
144 {
145 uint32_t i;
146 uint32_t (*ddrc_offset_array)[2];
147 uint32_t (*ddrc_phy_offset_array)[2];
148 uint32_t suspend_ocram_base =
149 core_mmu_get_va(TRUSTZONE_OCRAM_START + SUSPEND_OCRAM_OFFSET,
150 MEM_AREA_TEE_COHERENT,
151 sizeof(struct imx7_pm_info));
152 struct imx7_pm_info *p = (struct imx7_pm_info *)suspend_ocram_base;
153 struct imx7_pm_data *pm_data;
154
155 pm_imx7_iram_tbl_init();
156
157 dcache_op_level1(DCACHE_OP_CLEAN_INV);
158
159 p->pa_base = TRUSTZONE_OCRAM_START + SUSPEND_OCRAM_OFFSET;
160 p->tee_resume = virt_to_phys((void *)(vaddr_t)ca7_cpu_resume);
161 p->pm_info_size = sizeof(*p);
162 p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC, 1);
163 p->ccm_pa_base = CCM_BASE;
164 p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC, 1);
165 p->ddrc_pa_base = DDRC_BASE;
166 p->ddrc_phy_va_base = core_mmu_get_va(DDRC_PHY_BASE, MEM_AREA_IO_SEC,
167 1);
168 p->ddrc_phy_pa_base = DDRC_PHY_BASE;
169 p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC, 1);
170 p->src_pa_base = SRC_BASE;
171 p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE,
172 MEM_AREA_IO_SEC, 1);
173 p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE;
174 p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC, 1);
175 p->gpc_pa_base = GPC_BASE;
176 p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC, 1);
177 p->anatop_pa_base = ANATOP_BASE;
178 p->snvs_va_base = core_mmu_get_va(SNVS_BASE, MEM_AREA_IO_SEC, 1);
179 p->snvs_pa_base = SNVS_BASE;
180 p->lpsr_va_base = core_mmu_get_va(LPSR_BASE, MEM_AREA_IO_SEC, 1);
181 p->lpsr_pa_base = LPSR_BASE;
182 p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC, 1);
183 p->gic_pa_base = GIC_BASE;
184
185 /* TODO:lpsr disabled now */
186 io_write32(p->lpsr_va_base, 0);
187
188 p->ddr_type = imx_get_ddr_type();
189 switch (p->ddr_type) {
190 case IMX_DDR_TYPE_DDR3:
191 pm_data = &imx7d_pm_data_ddr3;
192 break;
193 default:
194 panic("Not supported ddr type\n");
195 break;
196 }
197
198 p->ddrc_num = pm_data->ddrc_num;
199 p->ddrc_phy_num = pm_data->ddrc_phy_num;
200 ddrc_offset_array = pm_data->ddrc_offset;
201 ddrc_phy_offset_array = pm_data->ddrc_phy_offset;
202
203 for (i = 0; i < p->ddrc_num; i++) {
204 p->ddrc_val[i][0] = ddrc_offset_array[i][0];
205 if (ddrc_offset_array[i][1] == READ_DATA_FROM_HARDWARE)
206 p->ddrc_val[i][1] = io_read32(p->ddrc_va_base +
207 ddrc_offset_array[i][0]);
208 else
209 p->ddrc_val[i][1] = ddrc_offset_array[i][1];
210
211 if (p->ddrc_val[i][0] == 0xd0)
212 p->ddrc_val[i][1] |= 0xc0000000;
213 }
214
215 /* initialize DDRC PHY settings */
216 for (i = 0; i < p->ddrc_phy_num; i++) {
217 p->ddrc_phy_val[i][0] = ddrc_phy_offset_array[i][0];
218 if (ddrc_phy_offset_array[i][1] == READ_DATA_FROM_HARDWARE)
219 p->ddrc_phy_val[i][1] =
220 io_read32(p->ddrc_phy_va_base +
221 ddrc_phy_offset_array[i][0]);
222 else
223 p->ddrc_phy_val[i][1] = ddrc_phy_offset_array[i][1];
224 }
225
226 memcpy((void *)(suspend_ocram_base + sizeof(*p)),
227 (void *)(vaddr_t)imx7_suspend, SUSPEND_OCRAM_SIZE - sizeof(*p));
228
229 dcache_clean_range((void *)suspend_ocram_base, SUSPEND_OCRAM_SIZE);
230
231 /*
232 * Note that IRAM IOSEC map, if changed to MEM map,
233 * need to flush cache
234 */
235 icache_inv_all();
236
237 return 0;
238 }
239