1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/io.h>
4 #include <linux/slab.h>
5 #include <linux/mmzone.h>
6 #include <linux/vmalloc.h>
7 #include <asm/io-workarounds.h>
8
9 unsigned long ioremap_bot;
10 EXPORT_SYMBOL(ioremap_bot);
11
ioremap(phys_addr_t addr,unsigned long size)12 void __iomem *ioremap(phys_addr_t addr, unsigned long size)
13 {
14 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
15 void *caller = __builtin_return_address(0);
16
17 if (iowa_is_active())
18 return iowa_ioremap(addr, size, prot, caller);
19 return __ioremap_caller(addr, size, prot, caller);
20 }
21 EXPORT_SYMBOL(ioremap);
22
ioremap_wc(phys_addr_t addr,unsigned long size)23 void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
24 {
25 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
26 void *caller = __builtin_return_address(0);
27
28 if (iowa_is_active())
29 return iowa_ioremap(addr, size, prot, caller);
30 return __ioremap_caller(addr, size, prot, caller);
31 }
32 EXPORT_SYMBOL(ioremap_wc);
33
ioremap_coherent(phys_addr_t addr,unsigned long size)34 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
35 {
36 pgprot_t prot = pgprot_cached(PAGE_KERNEL);
37 void *caller = __builtin_return_address(0);
38
39 if (iowa_is_active())
40 return iowa_ioremap(addr, size, prot, caller);
41 return __ioremap_caller(addr, size, prot, caller);
42 }
43
ioremap_prot(phys_addr_t addr,unsigned long size,unsigned long flags)44 void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
45 {
46 pte_t pte = __pte(flags);
47 void *caller = __builtin_return_address(0);
48
49 /* writeable implies dirty for kernel addresses */
50 if (pte_write(pte))
51 pte = pte_mkdirty(pte);
52
53 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
54 pte = pte_exprotect(pte);
55 pte = pte_mkprivileged(pte);
56
57 if (iowa_is_active())
58 return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
59 return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
60 }
61 EXPORT_SYMBOL(ioremap_prot);
62
early_ioremap_range(unsigned long ea,phys_addr_t pa,unsigned long size,pgprot_t prot)63 int early_ioremap_range(unsigned long ea, phys_addr_t pa,
64 unsigned long size, pgprot_t prot)
65 {
66 unsigned long i;
67
68 for (i = 0; i < size; i += PAGE_SIZE) {
69 int err = map_kernel_page(ea + i, pa + i, prot);
70
71 if (WARN_ON_ONCE(err)) /* Should clean up */
72 return err;
73 }
74
75 return 0;
76 }
77
do_ioremap(phys_addr_t pa,phys_addr_t offset,unsigned long size,pgprot_t prot,void * caller)78 void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
79 pgprot_t prot, void *caller)
80 {
81 struct vm_struct *area;
82 int ret;
83 unsigned long va;
84
85 area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
86 if (area == NULL)
87 return NULL;
88
89 area->phys_addr = pa;
90 va = (unsigned long)area->addr;
91
92 ret = ioremap_page_range(va, va + size, pa, prot);
93 if (!ret)
94 return (void __iomem *)area->addr + offset;
95
96 vunmap_range(va, va + size);
97 free_vm_area(area);
98
99 return NULL;
100 }
101
102 #ifdef CONFIG_ZONE_DEVICE
103 /*
104 * Override the generic version in mm/memremap.c.
105 *
106 * With hash translation, the direct-map range is mapped with just one
107 * page size selected by htab_init_page_sizes(). Consult
108 * mmu_psize_defs[] to determine the minimum page size alignment.
109 */
memremap_compat_align(void)110 unsigned long memremap_compat_align(void)
111 {
112 unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
113
114 if (radix_enabled())
115 return SUBSECTION_SIZE;
116 return max(SUBSECTION_SIZE, 1UL << shift);
117
118 }
119 EXPORT_SYMBOL_GPL(memremap_compat_align);
120 #endif
121