1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2002
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
5 */
6
7 /* for now: just dummy functions to satisfy the linker */
8
9 #include <common.h>
10 #include <cpu_func.h>
11 #include <log.h>
12 #include <malloc.h>
13 #include <asm/cache.h>
14 #include <asm/global_data.h>
15
16 DECLARE_GLOBAL_DATA_PTR;
17
18 /*
19 * Flush range from all levels of d-cache/unified-cache.
20 * Affects the range [start, start + size - 1].
21 */
flush_cache(unsigned long start,unsigned long size)22 __weak void flush_cache(unsigned long start, unsigned long size)
23 {
24 flush_dcache_range(start, start + size);
25 }
26
27 /*
28 * Default implementation:
29 * do a range flush for the entire range
30 */
flush_dcache_all(void)31 __weak void flush_dcache_all(void)
32 {
33 flush_cache(0, ~0);
34 }
35
36 /*
37 * Default implementation of enable_caches()
38 * Real implementation should be in platform code
39 */
enable_caches(void)40 __weak void enable_caches(void)
41 {
42 puts("WARNING: Caches not enabled\n");
43 }
44
invalidate_dcache_range(unsigned long start,unsigned long stop)45 __weak void invalidate_dcache_range(unsigned long start, unsigned long stop)
46 {
47 /* An empty stub, real implementation should be in platform code */
48 }
flush_dcache_range(unsigned long start,unsigned long stop)49 __weak void flush_dcache_range(unsigned long start, unsigned long stop)
50 {
51 /* An empty stub, real implementation should be in platform code */
52 }
53
check_cache_range(unsigned long start,unsigned long stop)54 int check_cache_range(unsigned long start, unsigned long stop)
55 {
56 int ok = 1;
57
58 if (start & (CONFIG_SYS_CACHELINE_SIZE - 1))
59 ok = 0;
60
61 if (stop & (CONFIG_SYS_CACHELINE_SIZE - 1))
62 ok = 0;
63
64 if (!ok) {
65 warn_non_spl("CACHE: Misaligned operation at range [%08lx, %08lx]\n",
66 start, stop);
67 }
68
69 return ok;
70 }
71
72 #ifdef CONFIG_SYS_NONCACHED_MEMORY
73 /*
74 * Reserve one MMU section worth of address space below the malloc() area that
75 * will be mapped uncached.
76 */
77 static unsigned long noncached_start;
78 static unsigned long noncached_end;
79 static unsigned long noncached_next;
80
noncached_set_region(void)81 void noncached_set_region(void)
82 {
83 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
84 mmu_set_region_dcache_behaviour(noncached_start,
85 noncached_end - noncached_start,
86 DCACHE_OFF);
87 #endif
88 }
89
noncached_init(void)90 int noncached_init(void)
91 {
92 phys_addr_t start, end;
93 size_t size;
94
95 /* If this calculation changes, update board_f.c:reserve_noncached() */
96 end = ALIGN(mem_malloc_start, MMU_SECTION_SIZE) - MMU_SECTION_SIZE;
97 size = ALIGN(CONFIG_SYS_NONCACHED_MEMORY, MMU_SECTION_SIZE);
98 start = end - size;
99
100 debug("mapping memory %pa-%pa non-cached\n", &start, &end);
101
102 noncached_start = start;
103 noncached_end = end;
104 noncached_next = start;
105
106 noncached_set_region();
107
108 return 0;
109 }
110
noncached_alloc(size_t size,size_t align)111 phys_addr_t noncached_alloc(size_t size, size_t align)
112 {
113 phys_addr_t next = ALIGN(noncached_next, align);
114
115 if (next >= noncached_end || (noncached_end - next) < size)
116 return 0;
117
118 debug("allocated %zu bytes of uncached memory @%pa\n", size, &next);
119 noncached_next = next + size;
120
121 return next;
122 }
123 #endif /* CONFIG_SYS_NONCACHED_MEMORY */
124
125 #if CONFIG_IS_ENABLED(SYS_THUMB_BUILD)
invalidate_l2_cache(void)126 void invalidate_l2_cache(void)
127 {
128 unsigned int val = 0;
129
130 asm volatile("mcr p15, 1, %0, c15, c11, 0 @ invl l2 cache"
131 : : "r" (val) : "cc");
132 isb();
133 }
134 #endif
135
arch_reserve_mmu(void)136 int arch_reserve_mmu(void)
137 {
138 return arm_reserve_mmu();
139 }
140
arm_reserve_mmu(void)141 __weak int arm_reserve_mmu(void)
142 {
143 #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
144 /* reserve TLB table */
145 gd->arch.tlb_size = PGTABLE_SIZE;
146 gd->relocaddr -= gd->arch.tlb_size;
147
148 /* round down to next 64 kB limit */
149 gd->relocaddr &= ~(0x10000 - 1);
150
151 gd->arch.tlb_addr = gd->relocaddr;
152 debug("TLB table from %08lx to %08lx\n", gd->arch.tlb_addr,
153 gd->arch.tlb_addr + gd->arch.tlb_size);
154
155 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
156 /*
157 * Record allocated tlb_addr in case gd->tlb_addr to be overwritten
158 * with location within secure ram.
159 */
160 gd->arch.tlb_allocated = gd->arch.tlb_addr;
161 #endif
162 #endif
163
164 return 0;
165 }
166