1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4  * Copyright (C) 2012 Regents of the University of California
5  * Copyright (C) 2017 SiFive
6  * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
7  */
8 
9 #ifndef _ASM_RISCV_PAGE_H
10 #define _ASM_RISCV_PAGE_H
11 
12 #include <linux/pfn.h>
13 #include <linux/const.h>
14 
15 #define PAGE_SHIFT	(12)
16 #define PAGE_SIZE	(_AC(1, UL) << PAGE_SHIFT)
17 #define PAGE_MASK	(~(PAGE_SIZE - 1))
18 
19 #ifdef CONFIG_64BIT
20 #define HUGE_MAX_HSTATE		2
21 #else
22 #define HUGE_MAX_HSTATE		1
23 #endif
24 #define HPAGE_SHIFT		PMD_SHIFT
25 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
26 #define HPAGE_MASK              (~(HPAGE_SIZE - 1))
27 #define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
28 
29 /*
30  * PAGE_OFFSET -- the first address of the first page of memory.
31  * When not using MMU this corresponds to the first free page in
32  * physical memory (aligned on a page boundary).
33  */
34 #define PAGE_OFFSET		_AC(CONFIG_PAGE_OFFSET, UL)
35 
36 #define KERN_VIRT_SIZE (-PAGE_OFFSET)
37 
38 #ifndef __ASSEMBLY__
39 
40 #define clear_page(pgaddr)			memset((pgaddr), 0, PAGE_SIZE)
41 #define copy_page(to, from)			memcpy((to), (from), PAGE_SIZE)
42 
43 #define clear_user_page(pgaddr, vaddr, page)	memset((pgaddr), 0, PAGE_SIZE)
44 #define copy_user_page(vto, vfrom, vaddr, topg) \
45 			memcpy((vto), (vfrom), PAGE_SIZE)
46 
47 /*
48  * Use struct definitions to apply C type checking
49  */
50 
51 /* Page Global Directory entry */
52 typedef struct {
53 	unsigned long pgd;
54 } pgd_t;
55 
56 /* Page Table entry */
57 typedef struct {
58 	unsigned long pte;
59 } pte_t;
60 
61 typedef struct {
62 	unsigned long pgprot;
63 } pgprot_t;
64 
65 typedef struct page *pgtable_t;
66 
67 #define pte_val(x)	((x).pte)
68 #define pgd_val(x)	((x).pgd)
69 #define pgprot_val(x)	((x).pgprot)
70 
71 #define __pte(x)	((pte_t) { (x) })
72 #define __pgd(x)	((pgd_t) { (x) })
73 #define __pgprot(x)	((pgprot_t) { (x) })
74 
75 #ifdef CONFIG_64BIT
76 #define PTE_FMT "%016lx"
77 #else
78 #define PTE_FMT "%08lx"
79 #endif
80 
81 #ifdef CONFIG_MMU
82 extern unsigned long riscv_pfn_base;
83 #define ARCH_PFN_OFFSET		(riscv_pfn_base)
84 #else
85 #define ARCH_PFN_OFFSET		(PAGE_OFFSET >> PAGE_SHIFT)
86 #endif /* CONFIG_MMU */
87 
88 struct kernel_mapping {
89 	unsigned long virt_addr;
90 	uintptr_t phys_addr;
91 	uintptr_t size;
92 	/* Offset between linear mapping virtual address and kernel load address */
93 	unsigned long va_pa_offset;
94 	/* Offset between kernel mapping virtual address and kernel load address */
95 	unsigned long va_kernel_pa_offset;
96 	unsigned long va_kernel_xip_pa_offset;
97 #ifdef CONFIG_XIP_KERNEL
98 	uintptr_t xiprom;
99 	uintptr_t xiprom_sz;
100 #endif
101 };
102 
103 extern struct kernel_mapping kernel_map;
104 extern phys_addr_t phys_ram_base;
105 
106 #define is_kernel_mapping(x)	\
107 	((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
108 
109 #define is_linear_mapping(x)	\
110 	((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
111 
112 #define linear_mapping_pa_to_va(x)	((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
113 #define kernel_mapping_pa_to_va(y)	({						\
114 	unsigned long _y = y;								\
115 	(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ?					\
116 		(void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) :		\
117 		(void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET);	\
118 	})
119 #define __pa_to_va_nodebug(x)		linear_mapping_pa_to_va(x)
120 
121 #define linear_mapping_va_to_pa(x)	((unsigned long)(x) - kernel_map.va_pa_offset)
122 #define kernel_mapping_va_to_pa(y) ({						\
123 	unsigned long _y = y;							\
124 	(IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ?	\
125 		((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) :		\
126 		((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET);	\
127 	})
128 
129 #define __va_to_pa_nodebug(x)	({						\
130 	unsigned long _x = x;							\
131 	is_linear_mapping(_x) ?							\
132 		linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x);	\
133 	})
134 
135 #ifdef CONFIG_DEBUG_VIRTUAL
136 extern phys_addr_t __virt_to_phys(unsigned long x);
137 extern phys_addr_t __phys_addr_symbol(unsigned long x);
138 #else
139 #define __virt_to_phys(x)	__va_to_pa_nodebug(x)
140 #define __phys_addr_symbol(x)	__va_to_pa_nodebug(x)
141 #endif /* CONFIG_DEBUG_VIRTUAL */
142 
143 #define __pa_symbol(x)	__phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
144 #define __pa(x)		__virt_to_phys((unsigned long)(x))
145 #define __va(x)		((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
146 
147 #define phys_to_pfn(phys)	(PFN_DOWN(phys))
148 #define pfn_to_phys(pfn)	(PFN_PHYS(pfn))
149 
150 #define virt_to_pfn(vaddr)	(phys_to_pfn(__pa(vaddr)))
151 #define pfn_to_virt(pfn)	(__va(pfn_to_phys(pfn)))
152 
153 #define virt_to_page(vaddr)	(pfn_to_page(virt_to_pfn(vaddr)))
154 #define page_to_virt(page)	(pfn_to_virt(page_to_pfn(page)))
155 
156 #define page_to_phys(page)	(pfn_to_phys(page_to_pfn(page)))
157 #define page_to_bus(page)	(page_to_phys(page))
158 #define phys_to_page(paddr)	(pfn_to_page(phys_to_pfn(paddr)))
159 
160 #define sym_to_pfn(x)           __phys_to_pfn(__pa_symbol(x))
161 
162 #ifdef CONFIG_FLATMEM
163 #define pfn_valid(pfn) \
164 	(((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
165 #endif
166 
167 #endif /* __ASSEMBLY__ */
168 
169 #define virt_addr_valid(vaddr)	({						\
170 	unsigned long _addr = (unsigned long)vaddr;				\
171 	(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr));	\
172 })
173 
174 #define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
175 
176 #include <asm-generic/memory_model.h>
177 #include <asm-generic/getorder.h>
178 
179 #endif /* _ASM_RISCV_PAGE_H */
180