1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) Marvell International Ltd. and its affiliates
4 */
5
6 #include "ddr3_init.h"
7 #include "mv_ddr_common.h"
8 #include "mv_ddr_training_db.h"
9 #include "mv_ddr_regs.h"
10 #include "mv_ddr_sys_env_lib.h"
11
12 #define DDR_INTERFACES_NUM 1
13 #define DDR_INTERFACE_OCTETS_NUM 5
14
15 /*
16 * 1. L2 filter should be set at binary header to 0xD000000,
17 * to avoid conflict with internal register IO.
18 * 2. U-Boot modifies internal registers base to 0xf100000,
19 * and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
20 */
21 #define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
22 #define ADDRESS_FILTERING_END_REGISTER 0x8c04
23
24 #define DYNAMIC_CS_SIZE_CONFIG
25 #define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
26
27 /* Termal Sensor Registers */
28 #define TSEN_CONTROL_LSB_REG 0xE4070
29 #define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0
30 #define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
31 #define TSEN_CONTROL_MSB_REG 0xE4074
32 #define TSEN_CONTROL_MSB_RST_OFFSET 8
33 #define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
34 #define TSEN_STATUS_REG 0xe4078
35 #define TSEN_STATUS_READOUT_VALID_OFFSET 10
36 #define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
37 TSEN_STATUS_READOUT_VALID_OFFSET)
38 #define TSEN_STATUS_TEMP_OUT_OFFSET 0
39 #define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
40
41 static struct dlb_config ddr3_dlb_config_table[] = {
42 {DLB_CTRL_REG, 0x2000005c},
43 {DLB_BUS_OPT_WT_REG, 0x00880000},
44 {DLB_AGING_REG, 0x0f7f007f},
45 {DLB_EVICTION_CTRL_REG, 0x0000129f},
46 {DLB_EVICTION_TIMERS_REG, 0x00ff0000},
47 {DLB_WTS_DIFF_CS_REG, 0x04030802},
48 {DLB_WTS_DIFF_BG_REG, 0x00000a02},
49 {DLB_WTS_SAME_BG_REG, 0x09000a01},
50 {DLB_WTS_CMDS_REG, 0x00020005},
51 {DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
52 {DLB_QUEUE_MAP_REG, 0x00000543},
53 {DLB_SPLIT_REG, 0x00000000},
54 {DLB_USER_CMD_REG, 0x00000000},
55 {0x0, 0x0}
56 };
57
sys_env_dlb_config_ptr_get(void)58 static struct dlb_config *sys_env_dlb_config_ptr_get(void)
59 {
60 return &ddr3_dlb_config_table[0];
61 }
62
63 static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = {
64 0x3, /* MV_DDR_FREQ_100 */
65 0x4, /* MV_DDR_FREQ_400 */
66 0x4, /* MV_DDR_FREQ_533 */
67 0x5, /* MV_DDR_FREQ_667 */
68 0x5, /* MV_DDR_FREQ_800 */
69 0x5, /* MV_DDR_FREQ_933 */
70 0x5, /* MV_DDR_FREQ_1066 */
71 0x3, /* MV_DDR_FREQ_311 */
72 0x3, /* MV_DDR_FREQ_333 */
73 0x4, /* MV_DDR_FREQ_467 */
74 0x5, /* MV_DDR_FREQ_850 */
75 0x5, /* MV_DDR_FREQ_600 */
76 0x3, /* MV_DDR_FREQ_300 */
77 0x5, /* MV_DDR_FREQ_900 */
78 0x3, /* MV_DDR_FREQ_360 */
79 0x5 /* MV_DDR_FREQ_1000 */
80 };
81
82 static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = {
83 0x1, /* MV_DDR_FREQ_100 */
84 0x2, /* MV_DDR_FREQ_400 */
85 0x2, /* MV_DDR_FREQ_533 */
86 0x2, /* MV_DDR_FREQ_667 */
87 0x2, /* MV_DDR_FREQ_800 */
88 0x3, /* MV_DDR_FREQ_933 */
89 0x3, /* MV_DDR_FREQ_1066 */
90 0x1, /* MV_DDR_FREQ_311 */
91 0x1, /* MV_DDR_FREQ_333 */
92 0x2, /* MV_DDR_FREQ_467 */
93 0x2, /* MV_DDR_FREQ_850 */
94 0x2, /* MV_DDR_FREQ_600 */
95 0x1, /* MV_DDR_FREQ_300 */
96 0x2, /* MV_DDR_FREQ_900 */
97 0x1, /* MV_DDR_FREQ_360 */
98 0x2 /* MV_DDR_FREQ_1000 */
99 };
100
101 static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
102 666, /* 0 */
103 1332,
104 800,
105 1600,
106 1066,
107 2132,
108 1200,
109 2400,
110 1332,
111 1332,
112 1500,
113 1500,
114 1600, /* 12 */
115 1600,
116 1700,
117 1700,
118 1866,
119 1866,
120 1800, /* 18 */
121 2000,
122 2000,
123 4000,
124 2132,
125 2132,
126 2300,
127 2300,
128 2400,
129 2400,
130 2500,
131 2500,
132 800
133 };
134
135 static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
136 666, /* 0 */
137 1332,
138 800,
139 800, /* 0x3 */
140 1066,
141 1066, /* 0x5 */
142 1200,
143 2400,
144 1332,
145 1332,
146 1500, /* 10 */
147 1600, /* 0xB */
148 1600,
149 1600,
150 1700,
151 1560, /* 0xF */
152 1866,
153 1866,
154 1800,
155 2000,
156 2000, /* 20 */
157 4000,
158 2132,
159 2132,
160 2300,
161 2300,
162 2400,
163 2400,
164 2500,
165 2500,
166 1800 /* 30 - 0x1E */
167 };
168
169
170 static u32 async_mode_at_tf;
171
172 static u32 dq_bit_map_2_phy_pin[] = {
173 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
174 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
175 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
176 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
177 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
178 };
179
mv_ddr_mem_scrubbing(void)180 void mv_ddr_mem_scrubbing(void)
181 {
182 ddr3_new_tip_ecc_scrub();
183 }
184
185 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
186 enum mv_ddr_freq freq);
187
188 /*
189 * Read temperature TJ value
190 */
ddr3_ctrl_get_junc_temp(u8 dev_num)191 static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
192 {
193 int reg = 0;
194
195 /* Initiates TSEN hardware reset once */
196 if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
197 reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
198 /* set Tsen Tc Trim to correct default value (errata #132698) */
199 reg = reg_read(TSEN_CONTROL_LSB_REG);
200 reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
201 reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
202 reg_write(TSEN_CONTROL_LSB_REG, reg);
203 }
204 mdelay(10);
205
206 /* Check if the readout field is valid */
207 if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
208 printf("%s: TSEN not ready\n", __func__);
209 return 0;
210 }
211
212 reg = reg_read(TSEN_STATUS_REG);
213 reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
214
215 return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
216 }
217
218 /*
219 * Name: ddr3_tip_a38x_get_freq_config.
220 * Desc:
221 * Args:
222 * Notes:
223 * Returns: MV_OK if success, other error code if fail.
224 */
ddr3_tip_a38x_get_freq_config(u8 dev_num,enum mv_ddr_freq freq,struct hws_tip_freq_config_info * freq_config_info)225 static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq,
226 struct hws_tip_freq_config_info
227 *freq_config_info)
228 {
229 if (a38x_bw_per_freq[freq] == 0xff)
230 return MV_NOT_SUPPORTED;
231
232 if (freq_config_info == NULL)
233 return MV_BAD_PARAM;
234
235 freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
236 freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
237 freq_config_info->is_supported = 1;
238
239 return MV_OK;
240 }
241
dunit_read(u32 addr,u32 mask,u32 * data)242 static void dunit_read(u32 addr, u32 mask, u32 *data)
243 {
244 *data = reg_read(addr) & mask;
245 }
246
dunit_write(u32 addr,u32 mask,u32 data)247 static void dunit_write(u32 addr, u32 mask, u32 data)
248 {
249 u32 reg_val = data;
250
251 if (mask != MASK_ALL_BITS) {
252 dunit_read(addr, MASK_ALL_BITS, ®_val);
253 reg_val &= (~mask);
254 reg_val |= (data & mask);
255 }
256
257 reg_write(addr, reg_val);
258 }
259
260 #define ODPG_ENABLE_REG 0x186d4
261 #define ODPG_EN_OFFS 0
262 #define ODPG_EN_MASK 0x1
263 #define ODPG_EN_ENA 1
264 #define ODPG_EN_DONE 0
265 #define ODPG_DIS_OFFS 8
266 #define ODPG_DIS_MASK 0x1
267 #define ODPG_DIS_DIS 1
mv_ddr_odpg_enable(void)268 void mv_ddr_odpg_enable(void)
269 {
270 dunit_write(ODPG_ENABLE_REG,
271 ODPG_EN_MASK << ODPG_EN_OFFS,
272 ODPG_EN_ENA << ODPG_EN_OFFS);
273 }
274
mv_ddr_odpg_disable(void)275 void mv_ddr_odpg_disable(void)
276 {
277 dunit_write(ODPG_ENABLE_REG,
278 ODPG_DIS_MASK << ODPG_DIS_OFFS,
279 ODPG_DIS_DIS << ODPG_DIS_OFFS);
280 }
281
mv_ddr_odpg_done_clr(void)282 void mv_ddr_odpg_done_clr(void)
283 {
284 return;
285 }
286
mv_ddr_is_odpg_done(u32 count)287 int mv_ddr_is_odpg_done(u32 count)
288 {
289 u32 i, data;
290
291 for (i = 0; i < count; i++) {
292 dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
293 if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
294 ODPG_EN_DONE)
295 break;
296 }
297
298 if (i >= count) {
299 printf("%s: timeout\n", __func__);
300 return MV_FAIL;
301 }
302
303 return MV_OK;
304 }
305
mv_ddr_training_enable(void)306 void mv_ddr_training_enable(void)
307 {
308 dunit_write(GLOB_CTRL_STATUS_REG,
309 TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
310 TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
311 }
312
313 #define DRAM_INIT_CTRL_STATUS_REG 0x18488
314 #define TRAINING_TRIGGER_OFFS 0
315 #define TRAINING_TRIGGER_MASK 0x1
316 #define TRAINING_TRIGGER_ENA 1
317 #define TRAINING_DONE_OFFS 1
318 #define TRAINING_DONE_MASK 0x1
319 #define TRAINING_DONE_DONE 1
320 #define TRAINING_DONE_NOT_DONE 0
321 #define TRAINING_RESULT_OFFS 2
322 #define TRAINING_RESULT_MASK 0x1
323 #define TRAINING_RESULT_PASS 0
324 #define TRAINING_RESULT_FAIL 1
mv_ddr_is_training_done(u32 count,u32 * result)325 int mv_ddr_is_training_done(u32 count, u32 *result)
326 {
327 u32 i, data;
328
329 if (result == NULL) {
330 printf("%s: NULL result pointer found\n", __func__);
331 return MV_FAIL;
332 }
333
334 for (i = 0; i < count; i++) {
335 dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
336 if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
337 TRAINING_DONE_DONE)
338 break;
339 }
340
341 if (i >= count) {
342 printf("%s: timeout\n", __func__);
343 return MV_FAIL;
344 }
345
346 *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
347
348 return MV_OK;
349 }
350
351 #define DM_PAD 10
mv_ddr_dm_pad_get(void)352 u32 mv_ddr_dm_pad_get(void)
353 {
354 return DM_PAD;
355 }
356
357 /*
358 * Name: ddr3_tip_a38x_select_ddr_controller.
359 * Desc: Enable/Disable access to Marvell's server.
360 * Args: dev_num - device number
361 * enable - whether to enable or disable the server
362 * Notes:
363 * Returns: MV_OK if success, other error code if fail.
364 */
ddr3_tip_a38x_select_ddr_controller(u8 dev_num,int enable)365 static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
366 {
367 u32 reg;
368
369 reg = reg_read(DUAL_DUNIT_CFG_REG);
370
371 if (enable)
372 reg |= (1 << 6);
373 else
374 reg &= ~(1 << 6);
375
376 reg_write(DUAL_DUNIT_CFG_REG, reg);
377
378 return MV_OK;
379 }
380
ddr3_tip_clock_mode(u32 frequency)381 static u8 ddr3_tip_clock_mode(u32 frequency)
382 {
383 if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400))
384 return 1;
385
386 return 2;
387 }
388
mv_ddr_sar_freq_get(int dev_num,enum mv_ddr_freq * freq)389 static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq)
390 {
391 u32 reg, ref_clk_satr;
392
393 /* Read sample at reset setting */
394 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
395 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
396 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
397
398 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
399 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
400 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
401 switch (reg) {
402 case 0x1:
403 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
404 ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
405 reg));
406 /* fallthrough */
407 case 0x0:
408 *freq = MV_DDR_FREQ_333;
409 break;
410 case 0x3:
411 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
412 ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
413 reg));
414 /* fallthrough */
415 case 0x2:
416 *freq = MV_DDR_FREQ_400;
417 break;
418 case 0xd:
419 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
420 ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
421 reg));
422 /* fallthrough */
423 case 0x4:
424 *freq = MV_DDR_FREQ_533;
425 break;
426 case 0x6:
427 *freq = MV_DDR_FREQ_600;
428 break;
429 case 0x11:
430 case 0x14:
431 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
432 ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
433 reg));
434 /* fallthrough */
435 case 0x8:
436 *freq = MV_DDR_FREQ_667;
437 break;
438 case 0x15:
439 case 0x1b:
440 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
441 ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
442 reg));
443 /* fallthrough */
444 case 0xc:
445 *freq = MV_DDR_FREQ_800;
446 break;
447 case 0x10:
448 *freq = MV_DDR_FREQ_933;
449 break;
450 case 0x12:
451 *freq = MV_DDR_FREQ_900;
452 break;
453 case 0x13:
454 *freq = MV_DDR_FREQ_933;
455 break;
456 default:
457 *freq = 0;
458 return MV_NOT_SUPPORTED;
459 }
460 } else { /* REFCLK 40MHz case */
461 switch (reg) {
462 case 0x3:
463 *freq = MV_DDR_FREQ_400;
464 break;
465 case 0x5:
466 *freq = MV_DDR_FREQ_533;
467 break;
468 case 0xb:
469 *freq = MV_DDR_FREQ_800;
470 break;
471 case 0x1e:
472 *freq = MV_DDR_FREQ_900;
473 break;
474 default:
475 *freq = 0;
476 return MV_NOT_SUPPORTED;
477 }
478 }
479
480 return MV_OK;
481 }
482
ddr3_tip_a38x_get_medium_freq(int dev_num,enum mv_ddr_freq * freq)483 static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq)
484 {
485 u32 reg, ref_clk_satr;
486
487 /* Read sample at reset setting */
488 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
489 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
490 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
491
492 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
493 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
494 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
495 switch (reg) {
496 case 0x0:
497 case 0x1:
498 /* Medium is same as TF to run PBS in this freq */
499 *freq = MV_DDR_FREQ_333;
500 break;
501 case 0x2:
502 case 0x3:
503 /* Medium is same as TF to run PBS in this freq */
504 *freq = MV_DDR_FREQ_400;
505 break;
506 case 0x4:
507 case 0xd:
508 /* Medium is same as TF to run PBS in this freq */
509 *freq = MV_DDR_FREQ_533;
510 break;
511 case 0x8:
512 case 0x10:
513 case 0x11:
514 case 0x14:
515 *freq = MV_DDR_FREQ_333;
516 break;
517 case 0xc:
518 case 0x15:
519 case 0x1b:
520 *freq = MV_DDR_FREQ_400;
521 break;
522 case 0x6:
523 *freq = MV_DDR_FREQ_300;
524 break;
525 case 0x12:
526 *freq = MV_DDR_FREQ_360;
527 break;
528 case 0x13:
529 *freq = MV_DDR_FREQ_400;
530 break;
531 default:
532 *freq = 0;
533 return MV_NOT_SUPPORTED;
534 }
535 } else { /* REFCLK 40MHz case */
536 switch (reg) {
537 case 0x3:
538 /* Medium is same as TF to run PBS in this freq */
539 *freq = MV_DDR_FREQ_400;
540 break;
541 case 0x5:
542 /* Medium is same as TF to run PBS in this freq */
543 *freq = MV_DDR_FREQ_533;
544 break;
545 case 0xb:
546 *freq = MV_DDR_FREQ_400;
547 break;
548 case 0x1e:
549 *freq = MV_DDR_FREQ_360;
550 break;
551 default:
552 *freq = 0;
553 return MV_NOT_SUPPORTED;
554 }
555 }
556
557 return MV_OK;
558 }
559
ddr3_tip_a38x_get_device_info(u8 dev_num,struct ddr3_device_info * info_ptr)560 static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
561 {
562 info_ptr->device_id = 0x6800;
563 info_ptr->ck_delay = ck_delay;
564
565 return MV_OK;
566 }
567
568 /* check indirect access to phy register file completed */
is_prfa_done(void)569 static int is_prfa_done(void)
570 {
571 u32 reg_val;
572 u32 iter = 0;
573
574 do {
575 if (iter++ > MAX_POLLING_ITERATIONS) {
576 printf("error: %s: polling timeout\n", __func__);
577 return MV_FAIL;
578 }
579 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
580 reg_val >>= PRFA_REQ_OFFS;
581 reg_val &= PRFA_REQ_MASK;
582 } while (reg_val == PRFA_REQ_ENA); /* request pending */
583
584 return MV_OK;
585 }
586
587 /* write to phy register thru indirect access */
prfa_write(enum hws_access_type phy_access,u32 phy,enum hws_ddr_phy phy_type,u32 addr,u32 data,enum hws_operation op_type)588 static int prfa_write(enum hws_access_type phy_access, u32 phy,
589 enum hws_ddr_phy phy_type, u32 addr,
590 u32 data, enum hws_operation op_type)
591 {
592 u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
593 ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
594 ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
595 ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
596 ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
597 (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
598 ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
599 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
600 reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
601 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
602
603 /* polling for prfa request completion */
604 if (is_prfa_done() != MV_OK)
605 return MV_FAIL;
606
607 return MV_OK;
608 }
609
610 /* read from phy register thru indirect access */
prfa_read(enum hws_access_type phy_access,u32 phy,enum hws_ddr_phy phy_type,u32 addr,u32 * data)611 static int prfa_read(enum hws_access_type phy_access, u32 phy,
612 enum hws_ddr_phy phy_type, u32 addr, u32 *data)
613 {
614 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
615 u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
616 u32 i, reg_val;
617
618 if (phy_access == ACCESS_TYPE_MULTICAST) {
619 for (i = 0; i < max_phy; i++) {
620 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
621 if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
622 return MV_FAIL;
623 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
624 data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
625 }
626 } else {
627 if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
628 return MV_FAIL;
629 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val);
630 *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
631 }
632
633 return MV_OK;
634 }
635
mv_ddr_sw_db_init(u32 dev_num,u32 board_id)636 static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
637 {
638 struct hws_tip_config_func_db config_func;
639
640 /* new read leveling version */
641 config_func.mv_ddr_dunit_read = dunit_read;
642 config_func.mv_ddr_dunit_write = dunit_write;
643 config_func.tip_dunit_mux_select_func =
644 ddr3_tip_a38x_select_ddr_controller;
645 config_func.tip_get_freq_config_info_func =
646 ddr3_tip_a38x_get_freq_config;
647 config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
648 config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
649 config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
650 config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
651 config_func.tip_external_read = ddr3_tip_ext_read;
652 config_func.tip_external_write = ddr3_tip_ext_write;
653 config_func.mv_ddr_phy_read = prfa_read;
654 config_func.mv_ddr_phy_write = prfa_write;
655
656 ddr3_tip_init_config_func(dev_num, &config_func);
657
658 ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
659
660 /* set device attributes*/
661 ddr3_tip_dev_attr_init(dev_num);
662 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
663 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
664 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
665 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
666
667 ca_delay = 0;
668 delay_enable = 1;
669 dfs_low_freq = DFS_LOW_FREQ_VALUE;
670 calibration_update_control = 1;
671
672 ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
673
674 return MV_OK;
675 }
676
mv_ddr_training_mask_set(void)677 static int mv_ddr_training_mask_set(void)
678 {
679 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
680 enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
681
682 mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
683 LOAD_PATTERN_MASK_BIT |
684 SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
685 WRITE_LEVELING_SUPP_MASK_BIT |
686 READ_LEVELING_MASK_BIT |
687 PBS_RX_MASK_BIT |
688 PBS_TX_MASK_BIT |
689 SET_TARGET_FREQ_MASK_BIT |
690 WRITE_LEVELING_TF_MASK_BIT |
691 WRITE_LEVELING_SUPP_TF_MASK_BIT |
692 READ_LEVELING_TF_MASK_BIT |
693 CENTRALIZATION_RX_MASK_BIT |
694 CENTRALIZATION_TX_MASK_BIT);
695 rl_mid_freq_wa = 1;
696
697 if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) {
698 mask_tune_func = (WRITE_LEVELING_MASK_BIT |
699 LOAD_PATTERN_2_MASK_BIT |
700 WRITE_LEVELING_SUPP_MASK_BIT |
701 READ_LEVELING_MASK_BIT |
702 PBS_RX_MASK_BIT |
703 PBS_TX_MASK_BIT |
704 CENTRALIZATION_RX_MASK_BIT |
705 CENTRALIZATION_TX_MASK_BIT);
706 rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
707 }
708
709 /* Supplementary not supported for ECC modes */
710 if (mv_ddr_is_ecc_ena()) {
711 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
712 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
713 mask_tune_func &= ~PBS_TX_MASK_BIT;
714 mask_tune_func &= ~PBS_RX_MASK_BIT;
715 }
716
717 return MV_OK;
718 }
719
720 /* function: mv_ddr_set_calib_controller
721 * this function sets the controller which will control
722 * the calibration cycle in the end of the training.
723 * 1 - internal controller
724 * 2 - external controller
725 */
mv_ddr_set_calib_controller(void)726 void mv_ddr_set_calib_controller(void)
727 {
728 calibration_update_control = CAL_UPDATE_CTRL_INT;
729 }
730
ddr3_tip_a38x_set_divider(u8 dev_num,u32 if_id,enum mv_ddr_freq frequency)731 static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
732 enum mv_ddr_freq frequency)
733 {
734 u32 divider = 0;
735 u32 sar_val, ref_clk_satr;
736 u32 async_val;
737 u32 freq = mv_ddr_freq_get(frequency);
738
739 if (if_id != 0) {
740 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
741 ("A38x does not support interface 0x%x\n",
742 if_id));
743 return MV_BAD_PARAM;
744 }
745
746 /* get VCO freq index */
747 sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
748 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
749 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
750
751 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
752 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
753 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
754 divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq;
755 else
756 divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq;
757
758 if ((async_mode_at_tf == 1) && (freq > 400)) {
759 /* Set async mode */
760 dunit_write(0x20220, 0x1000, 0x1000);
761 dunit_write(0xe42f4, 0x200, 0x200);
762
763 /* Wait for async mode setup */
764 mdelay(5);
765
766 /* Set KNL values */
767 switch (frequency) {
768 case MV_DDR_FREQ_467:
769 async_val = 0x806f012;
770 break;
771 case MV_DDR_FREQ_533:
772 async_val = 0x807f012;
773 break;
774 case MV_DDR_FREQ_600:
775 async_val = 0x805f00a;
776 break;
777 case MV_DDR_FREQ_667:
778 async_val = 0x809f012;
779 break;
780 case MV_DDR_FREQ_800:
781 async_val = 0x807f00a;
782 break;
783 case MV_DDR_FREQ_850:
784 async_val = 0x80cb012;
785 break;
786 case MV_DDR_FREQ_900:
787 async_val = 0x80d7012;
788 break;
789 case MV_DDR_FREQ_933:
790 async_val = 0x80df012;
791 break;
792 case MV_DDR_FREQ_1000:
793 async_val = 0x80ef012;
794 break;
795 case MV_DDR_FREQ_1066:
796 async_val = 0x80ff012;
797 break;
798 default:
799 /* set MV_DDR_FREQ_667 as default */
800 async_val = 0x809f012;
801 }
802 dunit_write(0xe42f0, 0xffffffff, async_val);
803 } else {
804 /* Set sync mode */
805 dunit_write(0x20220, 0x1000, 0x0);
806 dunit_write(0xe42f4, 0x200, 0x0);
807
808 /* cpupll_clkdiv_reset_mask */
809 dunit_write(0xe4264, 0xff, 0x1f);
810
811 /* cpupll_clkdiv_reload_smooth */
812 dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
813
814 /* cpupll_clkdiv_relax_en */
815 dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
816
817 /* write the divider */
818 dunit_write(0xe4268, (0x3f << 8), (divider << 8));
819
820 /* set cpupll_clkdiv_reload_ratio */
821 dunit_write(0xe4264, (1 << 8), (1 << 8));
822
823 /* undet cpupll_clkdiv_reload_ratio */
824 dunit_write(0xe4264, (1 << 8), 0x0);
825
826 /* clear cpupll_clkdiv_reload_force */
827 dunit_write(0xe4260, (0xff << 8), 0x0);
828
829 /* clear cpupll_clkdiv_relax_en */
830 dunit_write(0xe4260, (0xff << 24), 0x0);
831
832 /* clear cpupll_clkdiv_reset_mask */
833 dunit_write(0xe4264, 0xff, 0x0);
834 }
835
836 /* Dunit training clock + 1:1/2:1 mode */
837 dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
838 dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
839
840 return MV_OK;
841 }
842
843 /*
844 * external read from memory
845 */
ddr3_tip_ext_read(u32 dev_num,u32 if_id,u32 reg_addr,u32 num_of_bursts,u32 * data)846 int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
847 u32 num_of_bursts, u32 *data)
848 {
849 u32 burst_num;
850
851 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
852 data[burst_num] = readl(reg_addr + 4 * burst_num);
853
854 return MV_OK;
855 }
856
857 /*
858 * external write to memory
859 */
ddr3_tip_ext_write(u32 dev_num,u32 if_id,u32 reg_addr,u32 num_of_bursts,u32 * data)860 int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
861 u32 num_of_bursts, u32 *data) {
862 u32 burst_num;
863
864 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
865 writel(data[burst_num], reg_addr + 4 * burst_num);
866
867 return MV_OK;
868 }
869
mv_ddr_early_init(void)870 int mv_ddr_early_init(void)
871 {
872 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
873
874 /* FIXME: change this configuration per ddr type
875 * configure a380 and a390 to work with receiver odt timing
876 * the odt_config is defined:
877 * '1' in ddr4
878 * '0' in ddr3
879 * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
880 * to configure the odt to work with timing restrictions
881 */
882
883 mv_ddr_sw_db_init(0, 0);
884
885 if (tm->interface_params[0].memory_freq != MV_DDR_FREQ_SAR)
886 async_mode_at_tf = 1;
887
888 return MV_OK;
889 }
890
mv_ddr_early_init2(void)891 int mv_ddr_early_init2(void)
892 {
893 mv_ddr_training_mask_set();
894
895 return MV_OK;
896 }
897
mv_ddr_pre_training_fixup(void)898 int mv_ddr_pre_training_fixup(void)
899 {
900 return 0;
901 }
902
mv_ddr_post_training_fixup(void)903 int mv_ddr_post_training_fixup(void)
904 {
905 return 0;
906 }
907
ddr3_post_run_alg(void)908 int ddr3_post_run_alg(void)
909 {
910 return MV_OK;
911 }
912
ddr3_silicon_post_init(void)913 int ddr3_silicon_post_init(void)
914 {
915 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
916
917 /* Set half bus width */
918 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
919 CHECK_STATUS(ddr3_tip_if_write
920 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
921 SDRAM_CFG_REG, 0x0, 0x8000));
922 }
923
924 return MV_OK;
925 }
926
mv_ddr_init_freq_get(void)927 u32 mv_ddr_init_freq_get(void)
928 {
929 enum mv_ddr_freq freq;
930
931 mv_ddr_sar_freq_get(0, &freq);
932
933 return freq;
934 }
935
ddr3_get_bus_width(void)936 static u32 ddr3_get_bus_width(void)
937 {
938 u32 bus_width;
939
940 bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
941 BUS_IN_USE_OFFS;
942
943 return (bus_width == 0) ? 16 : 32;
944 }
945
ddr3_get_device_width(u32 cs)946 static u32 ddr3_get_device_width(u32 cs)
947 {
948 u32 device_width;
949
950 device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
951 (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
952 CS_STRUCT_OFFS(cs);
953
954 return (device_width == 0) ? 8 : 16;
955 }
956
ddr3_get_device_size(u32 cs)957 static u32 ddr3_get_device_size(u32 cs)
958 {
959 u32 device_size_low, device_size_high, device_size;
960 u32 data, cs_low_offset, cs_high_offset;
961
962 cs_low_offset = CS_SIZE_OFFS(cs);
963 cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
964
965 data = reg_read(SDRAM_ADDR_CTRL_REG);
966 device_size_low = (data >> cs_low_offset) & 0x3;
967 device_size_high = (data >> cs_high_offset) & 0x1;
968
969 device_size = device_size_low | (device_size_high << 2);
970
971 switch (device_size) {
972 case 0:
973 return 2048;
974 case 2:
975 return 512;
976 case 3:
977 return 1024;
978 case 4:
979 return 4096;
980 case 5:
981 return 8192;
982 case 1:
983 default:
984 DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
985 /* zeroes mem size in ddr3_calc_mem_cs_size */
986 return 0;
987 }
988 }
989
ddr3_calc_mem_cs_size(u32 cs,uint64_t * cs_size)990 int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
991 {
992 u32 cs_mem_size;
993
994 /* Calculate in MiB */
995 cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
996 ddr3_get_device_size(cs)) / 8;
997
998 /*
999 * Multiple controller bus width, 2x for 64 bit
1000 * (SoC controller may be 32 or 64 bit,
1001 * so bit 15 in 0x1400, that means if whole bus used or only half,
1002 * have a differnt meaning
1003 */
1004 cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
1005
1006 if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
1007 DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
1008 return MV_BAD_VALUE;
1009 }
1010
1011 *cs_size = cs_mem_size;
1012
1013 return MV_OK;
1014 }
1015
ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)1016 static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
1017 {
1018 u32 reg, cs;
1019 uint64_t mem_total_size = 0;
1020 uint64_t cs_mem_size_mb = 0;
1021 uint64_t cs_mem_size = 0;
1022 uint64_t mem_total_size_c, cs_mem_size_c;
1023
1024
1025 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1026 u32 physical_mem_size;
1027 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
1028 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1029 #endif
1030
1031 /* Open fast path windows */
1032 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1033 if (cs_ena & (1 << cs)) {
1034 /* get CS size */
1035 if (ddr3_calc_mem_cs_size(cs, &cs_mem_size_mb) != MV_OK)
1036 return MV_FAIL;
1037 cs_mem_size = cs_mem_size_mb * _1M;
1038
1039 #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
1040 /*
1041 * if number of address pins doesn't allow to use max
1042 * mem size that is defined in topology
1043 * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
1044 */
1045 physical_mem_size = mem_size
1046 [tm->interface_params[0].memory_size];
1047
1048 if (ddr3_get_device_width(cs) == 16) {
1049 /*
1050 * 16bit mem device can be twice more - no need
1051 * in less significant pin
1052 */
1053 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
1054 }
1055
1056 if (physical_mem_size > max_mem_size) {
1057 cs_mem_size = max_mem_size *
1058 (ddr3_get_bus_width() /
1059 ddr3_get_device_width(cs));
1060 printf("Updated Physical Mem size is from 0x%x to %x\n",
1061 physical_mem_size,
1062 DEVICE_MAX_DRAM_ADDRESS_SIZE);
1063 }
1064 #endif
1065
1066 /* set fast path window control for the cs */
1067 reg = 0xffffe1;
1068 reg |= (cs << 2);
1069 reg |= (cs_mem_size - 1) & 0xffff0000;
1070 /*Open fast path Window */
1071 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
1072
1073 /* Set fast path window base address for the cs */
1074 reg = ((cs_mem_size) * cs) & 0xffff0000;
1075 /* Set base address */
1076 reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
1077
1078 /*
1079 * Since memory size may be bigger than 4G the summ may
1080 * be more than 32 bit word,
1081 * so to estimate the result divide mem_total_size and
1082 * cs_mem_size by 0x10000 (it is equal to >> 16)
1083 */
1084 mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
1085 cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
1086
1087 /* if the sum less than 2 G - calculate the value */
1088 if (mem_total_size_c + cs_mem_size_c < 0x10000)
1089 mem_total_size += cs_mem_size;
1090 else /* put max possible size */
1091 mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
1092 }
1093 }
1094
1095 /* Set L2 filtering to Max Memory size */
1096 reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
1097
1098 return MV_OK;
1099 }
1100
ddr3_restore_and_set_final_windows(u32 * win,const char * ddr_type)1101 static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
1102 {
1103 u32 win_ctrl_reg, num_of_win_regs;
1104 u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
1105 u32 ui;
1106
1107 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1108 num_of_win_regs = 16;
1109
1110 /* Return XBAR windows 4-7 or 16-19 init configuration */
1111 for (ui = 0; ui < num_of_win_regs; ui++)
1112 reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
1113
1114 printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
1115 ddr_type);
1116
1117 #if defined DYNAMIC_CS_SIZE_CONFIG
1118 if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
1119 printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
1120 #else
1121 u32 reg, cs;
1122 reg = 0x1fffffe1;
1123 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1124 if (cs_ena & (1 << cs)) {
1125 reg |= (cs << 2);
1126 break;
1127 }
1128 }
1129 /* Open fast path Window to - 0.5G */
1130 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
1131 #endif
1132
1133 return MV_OK;
1134 }
1135
ddr3_save_and_set_training_windows(u32 * win)1136 static int ddr3_save_and_set_training_windows(u32 *win)
1137 {
1138 u32 cs_ena;
1139 u32 reg, tmp_count, cs, ui;
1140 u32 win_ctrl_reg, win_base_reg, win_remap_reg;
1141 u32 num_of_win_regs, win_jump_index;
1142 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
1143 win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
1144 win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
1145 win_jump_index = 0x10;
1146 num_of_win_regs = 16;
1147 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1148
1149 #ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
1150 /*
1151 * Disable L2 filtering during DDR training
1152 * (when Cross Bar window is open)
1153 */
1154 reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
1155 #endif
1156
1157 cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
1158
1159 /* Close XBAR Window 19 - Not needed */
1160 /* {0x000200e8} - Open Mbus Window - 2G */
1161 reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
1162
1163 /* Save XBAR Windows 4-19 init configurations */
1164 for (ui = 0; ui < num_of_win_regs; ui++)
1165 win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
1166
1167 /* Open XBAR Windows 4-7 or 16-19 for other CS */
1168 reg = 0;
1169 tmp_count = 0;
1170 for (cs = 0; cs < MAX_CS_NUM; cs++) {
1171 if (cs_ena & (1 << cs)) {
1172 switch (cs) {
1173 case 0:
1174 reg = 0x0e00;
1175 break;
1176 case 1:
1177 reg = 0x0d00;
1178 break;
1179 case 2:
1180 reg = 0x0b00;
1181 break;
1182 case 3:
1183 reg = 0x0700;
1184 break;
1185 }
1186 reg |= (1 << 0);
1187 reg |= (SDRAM_CS_SIZE & 0xffff0000);
1188
1189 reg_write(win_ctrl_reg + win_jump_index * tmp_count,
1190 reg);
1191 reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
1192 0xffff0000);
1193 reg_write(win_base_reg + win_jump_index * tmp_count,
1194 reg);
1195
1196 if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
1197 reg_write(win_remap_reg +
1198 win_jump_index * tmp_count, 0);
1199
1200 tmp_count++;
1201 }
1202 }
1203
1204 return MV_OK;
1205 }
1206
1207 static u32 win[16];
1208
mv_ddr_pre_training_soc_config(const char * ddr_type)1209 int mv_ddr_pre_training_soc_config(const char *ddr_type)
1210 {
1211 u32 soc_num;
1212 u32 reg_val;
1213
1214 /* Switching CPU to MRVL ID */
1215 soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
1216 SAR1_CPU_CORE_OFFSET;
1217 switch (soc_num) {
1218 case 0x3:
1219 reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
1220 reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
1221 /* fallthrough */
1222 case 0x1:
1223 reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
1224 /* fallthrough */
1225 case 0x0:
1226 reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
1227 /* fallthrough */
1228 default:
1229 break;
1230 }
1231
1232 /*
1233 * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
1234 * suspend i.e the DRAM values will not be overwritten / reset when
1235 * waking from suspend
1236 */
1237 if (mv_ddr_sys_env_suspend_wakeup_check() ==
1238 SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
1239 reg_bit_set(SDRAM_INIT_CTRL_REG,
1240 DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
1241 }
1242
1243 /* Check if DRAM is already initialized */
1244 if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
1245 (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
1246 printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
1247 return MV_OK;
1248 }
1249
1250 /* Fix read ready phases for all SOC in reg 0x15c8 */
1251 reg_val = reg_read(TRAINING_DBG_3_REG);
1252
1253 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
1254 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); /* phase 0 */
1255
1256 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
1257 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); /* phase 1 */
1258
1259 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
1260 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); /* phase 3 */
1261
1262 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
1263 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); /* phase 4 */
1264
1265 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
1266 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); /* phase 5 */
1267
1268 reg_write(TRAINING_DBG_3_REG, reg_val);
1269
1270 /*
1271 * Axi_bresp_mode[8] = Compliant,
1272 * Axi_addr_decode_cntrl[11] = Internal,
1273 * Axi_data_bus_width[0] = 128bit
1274 * */
1275 /* 0x14a8 - AXI Control Register */
1276 reg_write(AXI_CTRL_REG, 0);
1277
1278 /*
1279 * Stage 2 - Training Values Setup
1280 */
1281 /* Set X-BAR windows for the training sequence */
1282 ddr3_save_and_set_training_windows(win);
1283
1284 return MV_OK;
1285 }
1286
ddr3_new_tip_dlb_config(void)1287 static int ddr3_new_tip_dlb_config(void)
1288 {
1289 u32 reg, i = 0;
1290 struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
1291
1292 /* Write the configuration */
1293 while (config_table_ptr[i].reg_addr != 0) {
1294 reg_write(config_table_ptr[i].reg_addr,
1295 config_table_ptr[i].reg_data);
1296 i++;
1297 }
1298
1299
1300 /* Enable DLB */
1301 reg = reg_read(DLB_CTRL_REG);
1302 reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
1303 ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
1304 ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
1305 ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
1306 ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1307
1308 reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
1309 (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
1310 (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
1311 (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
1312 (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
1313
1314 reg_write(DLB_CTRL_REG, reg);
1315
1316 return MV_OK;
1317 }
1318
mv_ddr_post_training_soc_config(const char * ddr_type)1319 int mv_ddr_post_training_soc_config(const char *ddr_type)
1320 {
1321 u32 reg_val;
1322
1323 /* Restore and set windows */
1324 ddr3_restore_and_set_final_windows(win, ddr_type);
1325
1326 /* Update DRAM init indication in bootROM register */
1327 reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
1328 reg_write(REG_BOOTROM_ROUTINE_ADDR,
1329 reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
1330
1331 /* DLB config */
1332 ddr3_new_tip_dlb_config();
1333
1334 return MV_OK;
1335 }
1336
mv_ddr_mc_config(void)1337 void mv_ddr_mc_config(void)
1338 {
1339 /* Memory controller initializations */
1340 struct init_cntr_param init_param;
1341 int status;
1342
1343 init_param.do_mrs_phy = 1;
1344 init_param.is_ctrl64_bit = 0;
1345 init_param.init_phy = 1;
1346 init_param.msys_init = 1;
1347 status = hws_ddr3_tip_init_controller(0, &init_param);
1348 if (status != MV_OK)
1349 printf("DDR3 init controller - FAILED 0x%x\n", status);
1350
1351 status = mv_ddr_mc_init();
1352 if (status != MV_OK)
1353 printf("DDR3 init_sequence - FAILED 0x%x\n", status);
1354 }
1355 /* function: mv_ddr_mc_init
1356 * this function enables the dunit after init controller configuration
1357 */
mv_ddr_mc_init(void)1358 int mv_ddr_mc_init(void)
1359 {
1360 CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
1361
1362 return MV_OK;
1363 }
1364
1365 /* function: ddr3_tip_configure_phy
1366 * configures phy and electrical parameters
1367 */
ddr3_tip_configure_phy(u32 dev_num)1368 int ddr3_tip_configure_phy(u32 dev_num)
1369 {
1370 u32 if_id, phy_id;
1371 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
1372 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
1373
1374 CHECK_STATUS(ddr3_tip_bus_write
1375 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1376 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1377 PAD_ZRI_CAL_PHY_REG,
1378 ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
1379 CHECK_STATUS(ddr3_tip_bus_write
1380 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1381 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1382 PAD_ZRI_CAL_PHY_REG,
1383 ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
1384 CHECK_STATUS(ddr3_tip_bus_write
1385 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1386 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1387 PAD_ODT_CAL_PHY_REG,
1388 ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
1389 CHECK_STATUS(ddr3_tip_bus_write
1390 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1391 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1392 PAD_ODT_CAL_PHY_REG,
1393 ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
1394
1395 CHECK_STATUS(ddr3_tip_bus_write
1396 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1397 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1398 PAD_PRE_DISABLE_PHY_REG, 0));
1399 CHECK_STATUS(ddr3_tip_bus_write
1400 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1401 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
1402 CMOS_CONFIG_PHY_REG, 0));
1403 CHECK_STATUS(ddr3_tip_bus_write
1404 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1405 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
1406 CMOS_CONFIG_PHY_REG, 0));
1407
1408 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
1409 /* check if the interface is enabled */
1410 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
1411
1412 for (phy_id = 0;
1413 phy_id < octets_per_if_num;
1414 phy_id++) {
1415 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
1416 /* Vref & clamp */
1417 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1418 (dev_num, ACCESS_TYPE_UNICAST,
1419 if_id, phy_id, DDR_PHY_DATA,
1420 PAD_CFG_PHY_REG,
1421 ((clamp_tbl[if_id] << 4) | vref_init_val),
1422 ((0x7 << 4) | 0x7)));
1423 /* clamp not relevant for control */
1424 CHECK_STATUS(ddr3_tip_bus_read_modify_write
1425 (dev_num, ACCESS_TYPE_UNICAST,
1426 if_id, phy_id, DDR_PHY_CONTROL,
1427 PAD_CFG_PHY_REG, 0x4, 0x7));
1428 }
1429 }
1430
1431 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
1432 MV_DDR_PHY_EDGE_POSITIVE)
1433 CHECK_STATUS(ddr3_tip_bus_write
1434 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1435 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
1436 DDR_PHY_DATA, 0x90, 0x6002));
1437
1438
1439 return MV_OK;
1440 }
1441
1442
mv_ddr_manual_cal_do(void)1443 int mv_ddr_manual_cal_do(void)
1444 {
1445 return 0;
1446 }
1447