1 /*
2 * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <platform_def.h>
8
9 #include <arch_helpers.h>
10 #include <common/debug.h>
11
12 #include <dram.h>
13 #include <plat_private.h>
14 #include <pmu.h>
15 #include <pmu_bits.h>
16 #include <pmu_regs.h>
17 #include <rk3399_def.h>
18 #include <secure.h>
19 #include <soc.h>
20 #include <suspend.h>
21
22 #define PMUGRF_OS_REG0 0x300
23 #define PMUGRF_OS_REG1 0x304
24 #define PMUGRF_OS_REG2 0x308
25 #define PMUGRF_OS_REG3 0x30c
26
27 #define CRU_SFTRST_DDR_CTRL(ch, n) ((0x1 << (8 + 16 + (ch) * 4)) | \
28 ((n) << (8 + (ch) * 4)))
29 #define CRU_SFTRST_DDR_PHY(ch, n) ((0x1 << (9 + 16 + (ch) * 4)) | \
30 ((n) << (9 + (ch) * 4)))
31
32 #define FBDIV_ENC(n) ((n) << 16)
33 #define FBDIV_DEC(n) (((n) >> 16) & 0xfff)
34 #define POSTDIV2_ENC(n) ((n) << 12)
35 #define POSTDIV2_DEC(n) (((n) >> 12) & 0x7)
36 #define POSTDIV1_ENC(n) ((n) << 8)
37 #define POSTDIV1_DEC(n) (((n) >> 8) & 0x7)
38 #define REFDIV_ENC(n) (n)
39 #define REFDIV_DEC(n) ((n) & 0x3f)
40
41 /* PMU CRU */
42 #define PMUCRU_RSTNHOLD_CON0 0x120
43 #define PMUCRU_RSTNHOLD_CON1 0x124
44
45 #define PRESET_GPIO0_HOLD(n) (((n) << 7) | WMSK_BIT(7))
46 #define PRESET_GPIO1_HOLD(n) (((n) << 8) | WMSK_BIT(8))
47
48 #define SYS_COUNTER_FREQ_IN_MHZ (SYS_COUNTER_FREQ_IN_TICKS / 1000000)
49
50 __pmusramdata uint32_t dpll_data[PLL_CON_COUNT];
51 __pmusramdata uint32_t cru_clksel_con6;
52 __pmusramdata uint8_t pmu_enable_watchdog0;
53
54 /*
55 * Copy @num registers from @src to @dst
56 */
sram_regcpy(uintptr_t dst,uintptr_t src,uint32_t num)57 static __pmusramfunc void sram_regcpy(uintptr_t dst, uintptr_t src,
58 uint32_t num)
59 {
60 while (num--) {
61 mmio_write_32(dst, mmio_read_32(src));
62 dst += sizeof(uint32_t);
63 src += sizeof(uint32_t);
64 }
65 }
66
67 /*
68 * Copy @num registers from @src to @dst
69 * This is intentionally a copy of the sram_regcpy function. PMUSRAM functions
70 * cannot be called from code running in DRAM.
71 */
dram_regcpy(uintptr_t dst,uintptr_t src,uint32_t num)72 static void dram_regcpy(uintptr_t dst, uintptr_t src, uint32_t num)
73 {
74 while (num--) {
75 mmio_write_32(dst, mmio_read_32(src));
76 dst += sizeof(uint32_t);
77 src += sizeof(uint32_t);
78 }
79 }
80
sram_get_timer_value(void)81 static __pmusramfunc uint32_t sram_get_timer_value(void)
82 {
83 /*
84 * Generic delay timer implementation expects the timer to be a down
85 * counter. We apply bitwise NOT operator to the tick values returned
86 * by read_cntpct_el0() to simulate the down counter.
87 */
88 return (uint32_t)(~read_cntpct_el0());
89 }
90
sram_udelay(uint32_t usec)91 static __pmusramfunc void sram_udelay(uint32_t usec)
92 {
93 uint32_t start, cnt, delta, total_ticks;
94
95 /* counter is decreasing */
96 start = sram_get_timer_value();
97 total_ticks = usec * SYS_COUNTER_FREQ_IN_MHZ;
98 do {
99 cnt = sram_get_timer_value();
100 if (cnt > start) {
101 delta = UINT32_MAX - cnt;
102 delta += start;
103 } else
104 delta = start - cnt;
105 } while (delta <= total_ticks);
106 }
107
configure_sgrf(void)108 static __pmusramfunc void configure_sgrf(void)
109 {
110 /*
111 * SGRF_DDR_RGN_DPLL_CLK and SGRF_DDR_RGN_RTC_CLK:
112 * IC ECO bug, need to set this register.
113 *
114 * SGRF_DDR_RGN_BYPS:
115 * After the PD_CENTER suspend/resume, the DDR region
116 * related registers in the SGRF will be reset, we
117 * need to re-initialize them.
118 */
119 mmio_write_32(SGRF_BASE + SGRF_DDRRGN_CON0_16(16),
120 SGRF_DDR_RGN_DPLL_CLK |
121 SGRF_DDR_RGN_RTC_CLK |
122 SGRF_DDR_RGN_BYPS);
123 }
124
rkclk_ddr_reset(uint32_t channel,uint32_t ctl,uint32_t phy)125 static __pmusramfunc void rkclk_ddr_reset(uint32_t channel, uint32_t ctl,
126 uint32_t phy)
127 {
128 channel &= 0x1;
129 ctl &= 0x1;
130 phy &= 0x1;
131 mmio_write_32(CRU_BASE + CRU_SOFTRST_CON(4),
132 CRU_SFTRST_DDR_CTRL(channel, ctl) |
133 CRU_SFTRST_DDR_PHY(channel, phy));
134 }
135
phy_pctrl_reset(uint32_t ch)136 static __pmusramfunc void phy_pctrl_reset(uint32_t ch)
137 {
138 rkclk_ddr_reset(ch, 1, 1);
139 sram_udelay(10);
140 rkclk_ddr_reset(ch, 1, 0);
141 sram_udelay(10);
142 rkclk_ddr_reset(ch, 0, 0);
143 sram_udelay(10);
144 }
145
set_cs_training_index(uint32_t ch,uint32_t rank)146 static __pmusramfunc void set_cs_training_index(uint32_t ch, uint32_t rank)
147 {
148 uint32_t byte;
149
150 /* PHY_8/136/264/392 phy_per_cs_training_index_X 1bit offset_24 */
151 for (byte = 0; byte < 4; byte++)
152 mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 24,
153 rank << 24);
154 }
155
select_per_cs_training_index(uint32_t ch,uint32_t rank)156 static __pmusramfunc void select_per_cs_training_index(uint32_t ch,
157 uint32_t rank)
158 {
159 /* PHY_84 PHY_PER_CS_TRAINING_EN_0 1bit offset_16 */
160 if ((mmio_read_32(PHY_REG(ch, 84)) >> 16) & 1)
161 set_cs_training_index(ch, rank);
162 }
163
override_write_leveling_value(uint32_t ch)164 static __pmusramfunc void override_write_leveling_value(uint32_t ch)
165 {
166 uint32_t byte;
167
168 for (byte = 0; byte < 4; byte++) {
169 /*
170 * PHY_8/136/264/392
171 * phy_per_cs_training_multicast_en_X 1bit offset_16
172 */
173 mmio_clrsetbits_32(PHY_REG(ch, 8 + (128 * byte)), 0x1 << 16,
174 1 << 16);
175 mmio_clrsetbits_32(PHY_REG(ch, 63 + (128 * byte)),
176 0xffffu << 16,
177 0x200 << 16);
178 }
179
180 /* CTL_200 ctrlupd_req 1bit offset_8 */
181 mmio_clrsetbits_32(CTL_REG(ch, 200), 0x1 << 8, 0x1 << 8);
182 }
183
data_training(uint32_t ch,struct rk3399_sdram_params * sdram_params,uint32_t training_flag)184 static __pmusramfunc int data_training(uint32_t ch,
185 struct rk3399_sdram_params *sdram_params,
186 uint32_t training_flag)
187 {
188 uint32_t obs_0, obs_1, obs_2, obs_3, obs_err = 0;
189 uint32_t rank = sdram_params->ch[ch].rank;
190 uint32_t rank_mask;
191 uint32_t i, tmp;
192
193 if (sdram_params->dramtype == LPDDR4)
194 rank_mask = (rank == 1) ? 0x5 : 0xf;
195 else
196 rank_mask = (rank == 1) ? 0x1 : 0x3;
197
198 /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
199 mmio_setbits_32(PHY_REG(ch, 927), (1 << 22));
200
201 if (training_flag == PI_FULL_TRAINING) {
202 if (sdram_params->dramtype == LPDDR4) {
203 training_flag = PI_WRITE_LEVELING |
204 PI_READ_GATE_TRAINING |
205 PI_READ_LEVELING |
206 PI_WDQ_LEVELING;
207 } else if (sdram_params->dramtype == LPDDR3) {
208 training_flag = PI_CA_TRAINING | PI_WRITE_LEVELING |
209 PI_READ_GATE_TRAINING;
210 } else if (sdram_params->dramtype == DDR3) {
211 training_flag = PI_WRITE_LEVELING |
212 PI_READ_GATE_TRAINING |
213 PI_READ_LEVELING;
214 }
215 }
216
217 /* ca training(LPDDR4,LPDDR3 support) */
218 if ((training_flag & PI_CA_TRAINING) == PI_CA_TRAINING) {
219 for (i = 0; i < 4; i++) {
220 if (!(rank_mask & (1 << i)))
221 continue;
222
223 select_per_cs_training_index(ch, i);
224 /* PI_100 PI_CALVL_EN:RW:8:2 */
225 mmio_clrsetbits_32(PI_REG(ch, 100), 0x3 << 8, 0x2 << 8);
226
227 /* PI_92 PI_CALVL_REQ:WR:16:1,PI_CALVL_CS:RW:24:2 */
228 mmio_clrsetbits_32(PI_REG(ch, 92),
229 (0x1 << 16) | (0x3 << 24),
230 (0x1 << 16) | (i << 24));
231 while (1) {
232 /* PI_174 PI_INT_STATUS:RD:8:18 */
233 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
234
235 /*
236 * check status obs
237 * PHY_532/660/788 phy_adr_calvl_obs1_:0:32
238 */
239 obs_0 = mmio_read_32(PHY_REG(ch, 532));
240 obs_1 = mmio_read_32(PHY_REG(ch, 660));
241 obs_2 = mmio_read_32(PHY_REG(ch, 788));
242 if (((obs_0 >> 30) & 0x3) ||
243 ((obs_1 >> 30) & 0x3) ||
244 ((obs_2 >> 30) & 0x3))
245 obs_err = 1;
246 if ((((tmp >> 11) & 0x1) == 0x1) &&
247 (((tmp >> 13) & 0x1) == 0x1) &&
248 (((tmp >> 5) & 0x1) == 0x0) &&
249 (obs_err == 0))
250 break;
251 else if ((((tmp >> 5) & 0x1) == 0x1) ||
252 (obs_err == 1))
253 return -1;
254 }
255 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
256 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
257 }
258 mmio_clrbits_32(PI_REG(ch, 100), 0x3 << 8);
259 }
260
261 /* write leveling(LPDDR4,LPDDR3,DDR3 support) */
262 if ((training_flag & PI_WRITE_LEVELING) == PI_WRITE_LEVELING) {
263 for (i = 0; i < rank; i++) {
264 select_per_cs_training_index(ch, i);
265 /* PI_60 PI_WRLVL_EN:RW:8:2 */
266 mmio_clrsetbits_32(PI_REG(ch, 60), 0x3 << 8, 0x2 << 8);
267 /* PI_59 PI_WRLVL_REQ:WR:8:1,PI_WRLVL_CS:RW:16:2 */
268 mmio_clrsetbits_32(PI_REG(ch, 59),
269 (0x1 << 8) | (0x3 << 16),
270 (0x1 << 8) | (i << 16));
271
272 while (1) {
273 /* PI_174 PI_INT_STATUS:RD:8:18 */
274 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
275
276 /*
277 * check status obs, if error maybe can not
278 * get leveling done PHY_40/168/296/424
279 * phy_wrlvl_status_obs_X:0:13
280 */
281 obs_0 = mmio_read_32(PHY_REG(ch, 40));
282 obs_1 = mmio_read_32(PHY_REG(ch, 168));
283 obs_2 = mmio_read_32(PHY_REG(ch, 296));
284 obs_3 = mmio_read_32(PHY_REG(ch, 424));
285 if (((obs_0 >> 12) & 0x1) ||
286 ((obs_1 >> 12) & 0x1) ||
287 ((obs_2 >> 12) & 0x1) ||
288 ((obs_3 >> 12) & 0x1))
289 obs_err = 1;
290 if ((((tmp >> 10) & 0x1) == 0x1) &&
291 (((tmp >> 13) & 0x1) == 0x1) &&
292 (((tmp >> 4) & 0x1) == 0x0) &&
293 (obs_err == 0))
294 break;
295 else if ((((tmp >> 4) & 0x1) == 0x1) ||
296 (obs_err == 1))
297 return -1;
298 }
299
300 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
301 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
302 }
303 override_write_leveling_value(ch);
304 mmio_clrbits_32(PI_REG(ch, 60), 0x3 << 8);
305 }
306
307 /* read gate training(LPDDR4,LPDDR3,DDR3 support) */
308 if ((training_flag & PI_READ_GATE_TRAINING) == PI_READ_GATE_TRAINING) {
309 for (i = 0; i < rank; i++) {
310 select_per_cs_training_index(ch, i);
311 /* PI_80 PI_RDLVL_GATE_EN:RW:24:2 */
312 mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 24,
313 0x2 << 24);
314 /*
315 * PI_74 PI_RDLVL_GATE_REQ:WR:16:1
316 * PI_RDLVL_CS:RW:24:2
317 */
318 mmio_clrsetbits_32(PI_REG(ch, 74),
319 (0x1 << 16) | (0x3 << 24),
320 (0x1 << 16) | (i << 24));
321
322 while (1) {
323 /* PI_174 PI_INT_STATUS:RD:8:18 */
324 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
325
326 /*
327 * check status obs
328 * PHY_43/171/299/427
329 * PHY_GTLVL_STATUS_OBS_x:16:8
330 */
331 obs_0 = mmio_read_32(PHY_REG(ch, 43));
332 obs_1 = mmio_read_32(PHY_REG(ch, 171));
333 obs_2 = mmio_read_32(PHY_REG(ch, 299));
334 obs_3 = mmio_read_32(PHY_REG(ch, 427));
335 if (((obs_0 >> (16 + 6)) & 0x3) ||
336 ((obs_1 >> (16 + 6)) & 0x3) ||
337 ((obs_2 >> (16 + 6)) & 0x3) ||
338 ((obs_3 >> (16 + 6)) & 0x3))
339 obs_err = 1;
340 if ((((tmp >> 9) & 0x1) == 0x1) &&
341 (((tmp >> 13) & 0x1) == 0x1) &&
342 (((tmp >> 3) & 0x1) == 0x0) &&
343 (obs_err == 0))
344 break;
345 else if ((((tmp >> 3) & 0x1) == 0x1) ||
346 (obs_err == 1))
347 return -1;
348 }
349 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
350 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
351 }
352 mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 24);
353 }
354
355 /* read leveling(LPDDR4,LPDDR3,DDR3 support) */
356 if ((training_flag & PI_READ_LEVELING) == PI_READ_LEVELING) {
357 for (i = 0; i < rank; i++) {
358 select_per_cs_training_index(ch, i);
359 /* PI_80 PI_RDLVL_EN:RW:16:2 */
360 mmio_clrsetbits_32(PI_REG(ch, 80), 0x3 << 16,
361 0x2 << 16);
362 /* PI_74 PI_RDLVL_REQ:WR:8:1,PI_RDLVL_CS:RW:24:2 */
363 mmio_clrsetbits_32(PI_REG(ch, 74),
364 (0x1 << 8) | (0x3 << 24),
365 (0x1 << 8) | (i << 24));
366 while (1) {
367 /* PI_174 PI_INT_STATUS:RD:8:18 */
368 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
369
370 /*
371 * make sure status obs not report error bit
372 * PHY_46/174/302/430
373 * phy_rdlvl_status_obs_X:16:8
374 */
375 if ((((tmp >> 8) & 0x1) == 0x1) &&
376 (((tmp >> 13) & 0x1) == 0x1) &&
377 (((tmp >> 2) & 0x1) == 0x0))
378 break;
379 else if (((tmp >> 2) & 0x1) == 0x1)
380 return -1;
381 }
382 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
383 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
384 }
385 mmio_clrbits_32(PI_REG(ch, 80), 0x3 << 16);
386 }
387
388 /* wdq leveling(LPDDR4 support) */
389 if ((training_flag & PI_WDQ_LEVELING) == PI_WDQ_LEVELING) {
390 for (i = 0; i < 4; i++) {
391 if (!(rank_mask & (1 << i)))
392 continue;
393
394 select_per_cs_training_index(ch, i);
395 /*
396 * disable PI_WDQLVL_VREF_EN before wdq leveling?
397 * PI_181 PI_WDQLVL_VREF_EN:RW:8:1
398 */
399 mmio_clrbits_32(PI_REG(ch, 181), 0x1 << 8);
400 /* PI_124 PI_WDQLVL_EN:RW:16:2 */
401 mmio_clrsetbits_32(PI_REG(ch, 124), 0x3 << 16,
402 0x2 << 16);
403 /* PI_121 PI_WDQLVL_REQ:WR:8:1,PI_WDQLVL_CS:RW:16:2 */
404 mmio_clrsetbits_32(PI_REG(ch, 121),
405 (0x1 << 8) | (0x3 << 16),
406 (0x1 << 8) | (i << 16));
407 while (1) {
408 /* PI_174 PI_INT_STATUS:RD:8:18 */
409 tmp = mmio_read_32(PI_REG(ch, 174)) >> 8;
410 if ((((tmp >> 12) & 0x1) == 0x1) &&
411 (((tmp >> 13) & 0x1) == 0x1) &&
412 (((tmp >> 6) & 0x1) == 0x0))
413 break;
414 else if (((tmp >> 6) & 0x1) == 0x1)
415 return -1;
416 }
417 /* clear interrupt,PI_175 PI_INT_ACK:WR:0:17 */
418 mmio_write_32(PI_REG(ch, 175), 0x00003f7c);
419 }
420 mmio_clrbits_32(PI_REG(ch, 124), 0x3 << 16);
421 }
422
423 /* PHY_927 PHY_PAD_DQS_DRIVE RPULL offset_22 */
424 mmio_clrbits_32(PHY_REG(ch, 927), (1 << 22));
425
426 return 0;
427 }
428
set_ddrconfig(struct rk3399_sdram_params * sdram_params,unsigned char channel,uint32_t ddrconfig)429 static __pmusramfunc void set_ddrconfig(
430 struct rk3399_sdram_params *sdram_params,
431 unsigned char channel, uint32_t ddrconfig)
432 {
433 /* only need to set ddrconfig */
434 struct rk3399_sdram_channel *ch = &sdram_params->ch[channel];
435 unsigned int cs0_cap = 0;
436 unsigned int cs1_cap = 0;
437
438 cs0_cap = (1 << (ch->cs0_row + ch->col + ch->bk + ch->bw - 20));
439 if (ch->rank > 1)
440 cs1_cap = cs0_cap >> (ch->cs0_row - ch->cs1_row);
441 if (ch->row_3_4) {
442 cs0_cap = cs0_cap * 3 / 4;
443 cs1_cap = cs1_cap * 3 / 4;
444 }
445
446 mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICECONF,
447 ddrconfig | (ddrconfig << 6));
448 mmio_write_32(MSCH_BASE(channel) + MSCH_DEVICESIZE,
449 ((cs0_cap / 32) & 0xff) | (((cs1_cap / 32) & 0xff) << 8));
450 }
451
dram_all_config(struct rk3399_sdram_params * sdram_params)452 static __pmusramfunc void dram_all_config(
453 struct rk3399_sdram_params *sdram_params)
454 {
455 unsigned int i;
456
457 for (i = 0; i < 2; i++) {
458 struct rk3399_sdram_channel *info = &sdram_params->ch[i];
459 struct rk3399_msch_timings *noc = &info->noc_timings;
460
461 if (sdram_params->ch[i].col == 0)
462 continue;
463
464 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGA0,
465 noc->ddrtiminga0.d32);
466 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGB0,
467 noc->ddrtimingb0.d32);
468 mmio_write_32(MSCH_BASE(i) + MSCH_DDRTIMINGC0,
469 noc->ddrtimingc0.d32);
470 mmio_write_32(MSCH_BASE(i) + MSCH_DEVTODEV0,
471 noc->devtodev0.d32);
472 mmio_write_32(MSCH_BASE(i) + MSCH_DDRMODE, noc->ddrmode.d32);
473
474 /* rank 1 memory clock disable (dfi_dram_clk_disable = 1) */
475 if (sdram_params->ch[i].rank == 1)
476 mmio_setbits_32(CTL_REG(i, 276), 1 << 17);
477 }
478
479 DDR_STRIDE(sdram_params->stride);
480
481 /* reboot hold register set */
482 mmio_write_32(PMUCRU_BASE + CRU_PMU_RSTHOLD_CON(1),
483 CRU_PMU_SGRF_RST_RLS |
484 PRESET_GPIO0_HOLD(1) |
485 PRESET_GPIO1_HOLD(1));
486 mmio_clrsetbits_32(CRU_BASE + CRU_GLB_RST_CON, 0x3, 0x3);
487 }
488
pctl_cfg(uint32_t ch,struct rk3399_sdram_params * sdram_params)489 static __pmusramfunc void pctl_cfg(uint32_t ch,
490 struct rk3399_sdram_params *sdram_params)
491 {
492 const uint32_t *params_ctl = sdram_params->pctl_regs.denali_ctl;
493 const uint32_t *params_pi = sdram_params->pi_regs.denali_pi;
494 const struct rk3399_ddr_publ_regs *phy_regs = &sdram_params->phy_regs;
495 uint32_t tmp, tmp1, tmp2, i;
496
497 /*
498 * Workaround controller bug:
499 * Do not program DRAM_CLASS until NO_PHY_IND_TRAIN_INT is programmed
500 */
501 sram_regcpy(CTL_REG(ch, 1), (uintptr_t)¶ms_ctl[1],
502 CTL_REG_NUM - 1);
503 mmio_write_32(CTL_REG(ch, 0), params_ctl[0]);
504 sram_regcpy(PI_REG(ch, 0), (uintptr_t)¶ms_pi[0],
505 PI_REG_NUM);
506
507 sram_regcpy(PHY_REG(ch, 910), (uintptr_t)&phy_regs->phy896[910 - 896],
508 3);
509
510 mmio_clrsetbits_32(CTL_REG(ch, 68), PWRUP_SREFRESH_EXIT,
511 PWRUP_SREFRESH_EXIT);
512
513 /* PHY_DLL_RST_EN */
514 mmio_clrsetbits_32(PHY_REG(ch, 957), 0x3 << 24, 1 << 24);
515 dmbst();
516
517 mmio_setbits_32(PI_REG(ch, 0), START);
518 mmio_setbits_32(CTL_REG(ch, 0), START);
519
520 /* wait lock */
521 while (1) {
522 tmp = mmio_read_32(PHY_REG(ch, 920));
523 tmp1 = mmio_read_32(PHY_REG(ch, 921));
524 tmp2 = mmio_read_32(PHY_REG(ch, 922));
525 if ((((tmp >> 16) & 0x1) == 0x1) &&
526 (((tmp1 >> 16) & 0x1) == 0x1) &&
527 (((tmp1 >> 0) & 0x1) == 0x1) &&
528 (((tmp2 >> 0) & 0x1) == 0x1))
529 break;
530 /* if PLL bypass,don't need wait lock */
531 if (mmio_read_32(PHY_REG(ch, 911)) & 0x1)
532 break;
533 }
534
535 sram_regcpy(PHY_REG(ch, 896), (uintptr_t)&phy_regs->phy896[0], 63);
536
537 for (i = 0; i < 4; i++)
538 sram_regcpy(PHY_REG(ch, 128 * i),
539 (uintptr_t)&phy_regs->phy0[0], 91);
540
541 for (i = 0; i < 3; i++)
542 sram_regcpy(PHY_REG(ch, 512 + 128 * i),
543 (uintptr_t)&phy_regs->phy512[i][0], 38);
544 }
545
dram_switch_to_next_index(struct rk3399_sdram_params * sdram_params)546 static __pmusramfunc int dram_switch_to_next_index(
547 struct rk3399_sdram_params *sdram_params)
548 {
549 uint32_t ch, ch_count;
550 uint32_t fn = ((mmio_read_32(CTL_REG(0, 111)) >> 16) + 1) & 0x1;
551
552 mmio_write_32(CIC_BASE + CIC_CTRL0,
553 (((0x3 << 4) | (1 << 2) | 1) << 16) |
554 (fn << 4) | (1 << 2) | 1);
555 while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 2)))
556 ;
557
558 mmio_write_32(CIC_BASE + CIC_CTRL0, 0x20002);
559 while (!(mmio_read_32(CIC_BASE + CIC_STATUS0) & (1 << 0)))
560 ;
561
562 ch_count = sdram_params->num_channels;
563
564 /* LPDDR4 f2 cann't do training, all training will fail */
565 for (ch = 0; ch < ch_count; ch++) {
566 /*
567 * Without this disabled for LPDDR4 we end up writing 0's
568 * in place of real data in an interesting pattern.
569 */
570 if (sdram_params->dramtype != LPDDR4) {
571 mmio_clrsetbits_32(PHY_REG(ch, 896), (0x3 << 8) | 1,
572 fn << 8);
573 }
574
575 /* data_training failed */
576 if (data_training(ch, sdram_params, PI_FULL_TRAINING))
577 return -1;
578 }
579
580 return 0;
581 }
582
583 /*
584 * Needs to be done for both channels at once in case of a shared reset signal
585 * between channels.
586 */
pctl_start(uint32_t channel_mask,struct rk3399_sdram_params * sdram_params)587 static __pmusramfunc int pctl_start(uint32_t channel_mask,
588 struct rk3399_sdram_params *sdram_params)
589 {
590 uint32_t count;
591 uint32_t byte;
592
593 mmio_setbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
594 mmio_setbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
595
596 /* need de-access IO retention before controller START */
597 if (channel_mask & (1 << 0))
598 mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 19));
599 if (channel_mask & (1 << 1))
600 mmio_setbits_32(PMU_BASE + PMU_PWRMODE_CON, (1 << 23));
601
602 /* PHY_DLL_RST_EN */
603 if (channel_mask & (1 << 0))
604 mmio_clrsetbits_32(PHY_REG(0, 957), 0x3 << 24,
605 0x2 << 24);
606 if (channel_mask & (1 << 1))
607 mmio_clrsetbits_32(PHY_REG(1, 957), 0x3 << 24,
608 0x2 << 24);
609
610 /* check ERROR bit */
611 if (channel_mask & (1 << 0)) {
612 count = 0;
613 while (!(mmio_read_32(CTL_REG(0, 203)) & (1 << 3))) {
614 /* CKE is low, loop 10ms */
615 if (count > 100)
616 return -1;
617
618 sram_udelay(100);
619 count++;
620 }
621
622 mmio_clrbits_32(CTL_REG(0, 68), PWRUP_SREFRESH_EXIT);
623
624 /* Restore the PHY_RX_CAL_DQS value */
625 for (byte = 0; byte < 4; byte++)
626 mmio_clrsetbits_32(PHY_REG(0, 57 + 128 * byte),
627 0xfff << 16,
628 sdram_params->rx_cal_dqs[0][byte]);
629 }
630 if (channel_mask & (1 << 1)) {
631 count = 0;
632 while (!(mmio_read_32(CTL_REG(1, 203)) & (1 << 3))) {
633 /* CKE is low, loop 10ms */
634 if (count > 100)
635 return -1;
636
637 sram_udelay(100);
638 count++;
639 }
640
641 mmio_clrbits_32(CTL_REG(1, 68), PWRUP_SREFRESH_EXIT);
642
643 /* Restore the PHY_RX_CAL_DQS value */
644 for (byte = 0; byte < 4; byte++)
645 mmio_clrsetbits_32(PHY_REG(1, 57 + 128 * byte),
646 0xfff << 16,
647 sdram_params->rx_cal_dqs[1][byte]);
648 }
649
650 return 0;
651 }
652
pmusram_restore_pll(int pll_id,uint32_t * src)653 __pmusramfunc static void pmusram_restore_pll(int pll_id, uint32_t *src)
654 {
655 mmio_write_32((CRU_BASE + CRU_PLL_CON(pll_id, 3)), PLL_SLOW_MODE);
656
657 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 0), src[0] | REG_SOC_WMSK);
658 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 1), src[1] | REG_SOC_WMSK);
659 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 2), src[2]);
660 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 4), src[4] | REG_SOC_WMSK);
661 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 5), src[5] | REG_SOC_WMSK);
662
663 mmio_write_32(CRU_BASE + CRU_PLL_CON(pll_id, 3), src[3] | REG_SOC_WMSK);
664
665 while ((mmio_read_32(CRU_BASE + CRU_PLL_CON(pll_id, 2)) &
666 (1U << 31)) == 0x0)
667 ;
668 }
669
pmusram_enable_watchdog(void)670 __pmusramfunc static void pmusram_enable_watchdog(void)
671 {
672 /* Make the watchdog use the first global reset. */
673 mmio_write_32(CRU_BASE + CRU_GLB_RST_CON, 1 << 1);
674
675 /*
676 * This gives the system ~8 seconds before reset. The pclk for the
677 * watchdog is 4MHz on reset. The value of 0x9 in WDT_TORR means that
678 * the watchdog will wait for 0x1ffffff cycles before resetting.
679 */
680 mmio_write_32(WDT0_BASE + 4, 0x9);
681
682 /* Enable the watchdog */
683 mmio_setbits_32(WDT0_BASE, 0x1);
684
685 /* Magic reset the watchdog timer value for WDT_CRR. */
686 mmio_write_32(WDT0_BASE + 0xc, 0x76);
687
688 secure_watchdog_ungate();
689
690 /* The watchdog is in PD_ALIVE, so deidle it. */
691 mmio_clrbits_32(PMU_BASE + PMU_BUS_CLR, PMU_CLR_ALIVE);
692 }
693
dmc_suspend(void)694 void dmc_suspend(void)
695 {
696 struct rk3399_sdram_params *sdram_params = &sdram_config;
697 struct rk3399_ddr_publ_regs *phy_regs;
698 uint32_t *params_ctl;
699 uint32_t *params_pi;
700 uint32_t refdiv, postdiv2, postdiv1, fbdiv;
701 uint32_t ch, byte, i;
702
703 phy_regs = &sdram_params->phy_regs;
704 params_ctl = sdram_params->pctl_regs.denali_ctl;
705 params_pi = sdram_params->pi_regs.denali_pi;
706
707 /* save dpll register and ddr clock register value to pmusram */
708 cru_clksel_con6 = mmio_read_32(CRU_BASE + CRU_CLKSEL_CON6);
709 for (i = 0; i < PLL_CON_COUNT; i++)
710 dpll_data[i] = mmio_read_32(CRU_BASE + CRU_PLL_CON(DPLL_ID, i));
711
712 fbdiv = dpll_data[0] & 0xfff;
713 postdiv2 = POSTDIV2_DEC(dpll_data[1]);
714 postdiv1 = POSTDIV1_DEC(dpll_data[1]);
715 refdiv = REFDIV_DEC(dpll_data[1]);
716
717 sdram_params->ddr_freq = ((fbdiv * 24) /
718 (refdiv * postdiv1 * postdiv2)) * MHz;
719
720 INFO("sdram_params->ddr_freq = %d\n", sdram_params->ddr_freq);
721 sdram_params->odt = (((mmio_read_32(PHY_REG(0, 5)) >> 16) &
722 0x7) != 0) ? 1 : 0;
723
724 /* copy the registers CTL PI and PHY */
725 dram_regcpy((uintptr_t)¶ms_ctl[0], CTL_REG(0, 0), CTL_REG_NUM);
726
727 /* mask DENALI_CTL_00_DATA.START, only copy here, will trigger later */
728 params_ctl[0] &= ~(0x1 << 0);
729
730 dram_regcpy((uintptr_t)¶ms_pi[0], PI_REG(0, 0),
731 PI_REG_NUM);
732
733 /* mask DENALI_PI_00_DATA.START, only copy here, will trigger later*/
734 params_pi[0] &= ~(0x1 << 0);
735
736 dram_regcpy((uintptr_t)&phy_regs->phy0[0],
737 PHY_REG(0, 0), 91);
738
739 for (i = 0; i < 3; i++)
740 dram_regcpy((uintptr_t)&phy_regs->phy512[i][0],
741 PHY_REG(0, 512 + 128 * i), 38);
742
743 dram_regcpy((uintptr_t)&phy_regs->phy896[0], PHY_REG(0, 896), 63);
744
745 for (ch = 0; ch < sdram_params->num_channels; ch++) {
746 for (byte = 0; byte < 4; byte++)
747 sdram_params->rx_cal_dqs[ch][byte] = (0xfff << 16) &
748 mmio_read_32(PHY_REG(ch, 57 + byte * 128));
749 }
750
751 /* set DENALI_PHY_957_DATA.PHY_DLL_RST_EN = 0x1 */
752 phy_regs->phy896[957 - 896] &= ~(0x3 << 24);
753 phy_regs->phy896[957 - 896] |= 1 << 24;
754 phy_regs->phy896[0] |= 1;
755 phy_regs->phy896[0] &= ~(0x3 << 8);
756 }
757
phy_dll_bypass_set(uint32_t ch,uint32_t freq)758 __pmusramfunc void phy_dll_bypass_set(uint32_t ch, uint32_t freq)
759 {
760 if (freq <= (125 * 1000 * 1000)) {
761 /* Set master mode to SW for slices*/
762 mmio_setbits_32(PHY_REG(ch, 86), 3 << 10);
763 mmio_setbits_32(PHY_REG(ch, 214), 3 << 10);
764 mmio_setbits_32(PHY_REG(ch, 342), 3 << 10);
765 mmio_setbits_32(PHY_REG(ch, 470), 3 << 10);
766 /* Set master mode to SW for address slices*/
767 mmio_setbits_32(PHY_REG(ch, 547), 3 << 18);
768 mmio_setbits_32(PHY_REG(ch, 675), 3 << 18);
769 mmio_setbits_32(PHY_REG(ch, 803), 3 << 18);
770 } else {
771 /* Clear SW master mode for slices*/
772 mmio_clrbits_32(PHY_REG(ch, 86), 3 << 10);
773 mmio_clrbits_32(PHY_REG(ch, 214), 3 << 10);
774 mmio_clrbits_32(PHY_REG(ch, 342), 3 << 10);
775 mmio_clrbits_32(PHY_REG(ch, 470), 3 << 10);
776 /* Clear SW master mode for address slices*/
777 mmio_clrbits_32(PHY_REG(ch, 547), 3 << 18);
778 mmio_clrbits_32(PHY_REG(ch, 675), 3 << 18);
779 mmio_clrbits_32(PHY_REG(ch, 803), 3 << 18);
780 }
781 }
782
dmc_resume(void)783 __pmusramfunc void dmc_resume(void)
784 {
785 struct rk3399_sdram_params *sdram_params = &sdram_config;
786 uint32_t channel_mask = 0;
787 uint32_t channel;
788
789 /*
790 * We can't turn off the watchdog, so if we have not turned it on before
791 * we should not turn it on here.
792 */
793 if ((pmu_enable_watchdog0 & 0x1) == 0x1) {
794 pmusram_enable_watchdog();
795 }
796 pmu_sgrf_rst_hld_release();
797 restore_pmu_rsthold();
798 sram_secure_timer_init();
799
800 /*
801 * we switch ddr clock to abpll when suspend,
802 * we set back to dpll here
803 */
804 mmio_write_32(CRU_BASE + CRU_CLKSEL_CON6,
805 cru_clksel_con6 | REG_SOC_WMSK);
806 pmusram_restore_pll(DPLL_ID, dpll_data);
807
808 configure_sgrf();
809
810 retry:
811 for (channel = 0; channel < sdram_params->num_channels; channel++) {
812 phy_pctrl_reset(channel);
813 /*
814 * Without this, LPDDR4 will write 0's in place of real data
815 * in a strange pattern.
816 */
817 if (sdram_params->dramtype == LPDDR4) {
818 phy_dll_bypass_set(channel, sdram_params->ddr_freq);
819 }
820 pctl_cfg(channel, sdram_params);
821 }
822
823 for (channel = 0; channel < 2; channel++) {
824 if (sdram_params->ch[channel].col)
825 channel_mask |= 1 << channel;
826 }
827
828 if (pctl_start(channel_mask, sdram_params) < 0)
829 goto retry;
830
831 for (channel = 0; channel < sdram_params->num_channels; channel++) {
832 /* LPDDR2/LPDDR3 need to wait DAI complete, max 10us */
833 if (sdram_params->dramtype == LPDDR3)
834 sram_udelay(10);
835
836 /*
837 * Training here will always fail for LPDDR4, so skip it
838 * If traning fail, retry to do it again.
839 */
840 if (sdram_params->dramtype != LPDDR4 &&
841 data_training(channel, sdram_params, PI_FULL_TRAINING))
842 goto retry;
843
844 set_ddrconfig(sdram_params, channel,
845 sdram_params->ch[channel].ddrconfig);
846 }
847
848 dram_all_config(sdram_params);
849
850 /* Switch to index 1 and prepare for DDR frequency switch. */
851 dram_switch_to_next_index(sdram_params);
852 }
853