1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_PAGE_H
3 #define _ASM_IA64_PAGE_H
4 /*
5 * Pagetable related stuff.
6 *
7 * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10
11 #include <asm/intrinsics.h>
12 #include <asm/types.h>
13
14 /*
15 * The top three bits of an IA64 address are its Region Number.
16 * Different regions are assigned to different purposes.
17 */
18 #define RGN_SHIFT (61)
19 #define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
20 #define RGN_BITS (RGN_BASE(-1))
21
22 #define RGN_KERNEL 7 /* Identity mapped region */
23 #define RGN_UNCACHED 6 /* Identity mapped I/O region */
24 #define RGN_GATE 5 /* Gate page, Kernel text, etc */
25 #define RGN_HPAGE 4 /* For Huge TLB pages */
26
27 /*
28 * PAGE_SHIFT determines the actual kernel page size.
29 */
30 #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
31 # define PAGE_SHIFT 12
32 #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
33 # define PAGE_SHIFT 13
34 #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
35 # define PAGE_SHIFT 14
36 #elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
37 # define PAGE_SHIFT 16
38 #else
39 # error Unsupported page size!
40 #endif
41
42 #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
43 #define PAGE_MASK (~(PAGE_SIZE - 1))
44
45 #define PERCPU_PAGE_SHIFT 18 /* log2() of max. size of per-CPU area */
46 #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
47
48
49 #ifdef CONFIG_HUGETLB_PAGE
50 # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
51 # define HPAGE_SHIFT hpage_shift
52 # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
53 # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
54 # define HPAGE_MASK (~(HPAGE_SIZE - 1))
55
56 # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
57 #endif /* CONFIG_HUGETLB_PAGE */
58
59 #ifdef __ASSEMBLY__
60 # define __pa(x) ((x) - PAGE_OFFSET)
61 # define __va(x) ((x) + PAGE_OFFSET)
62 #else /* !__ASSEMBLY */
63 # define STRICT_MM_TYPECHECKS
64
65 extern void clear_page (void *page);
66 extern void copy_page (void *to, void *from);
67
68 /*
69 * clear_user_page() and copy_user_page() can't be inline functions because
70 * flush_dcache_page() can't be defined until later...
71 */
72 #define clear_user_page(addr, vaddr, page) \
73 do { \
74 clear_page(addr); \
75 flush_dcache_page(page); \
76 } while (0)
77
78 #define copy_user_page(to, from, vaddr, page) \
79 do { \
80 copy_page((to), (from)); \
81 flush_dcache_page(page); \
82 } while (0)
83
84
85 #define alloc_zeroed_user_highpage_movable(vma, vaddr) \
86 ({ \
87 struct page *page = alloc_page_vma( \
88 GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
89 if (page) \
90 flush_dcache_page(page); \
91 page; \
92 })
93
94 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
95
96 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
97
98 #include <asm-generic/memory_model.h>
99
100 #ifdef CONFIG_FLATMEM
101 # define pfn_valid(pfn) ((pfn) < max_mapnr)
102 #endif
103
104 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
105 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
106 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
107
108 typedef union ia64_va {
109 struct {
110 unsigned long off : 61; /* intra-region offset */
111 unsigned long reg : 3; /* region number */
112 } f;
113 unsigned long l;
114 void *p;
115 } ia64_va;
116
117 /*
118 * Note: These macros depend on the fact that PAGE_OFFSET has all
119 * region bits set to 1 and all other bits set to zero. They are
120 * expressed in this way to ensure they result in a single "dep"
121 * instruction.
122 */
123 #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
124 #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
125
126 #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
127 #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
128
129 #ifdef CONFIG_HUGETLB_PAGE
130 # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
131 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
132 # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
133 extern unsigned int hpage_shift;
134 #endif
135
136 static __inline__ int
get_order(unsigned long size)137 get_order (unsigned long size)
138 {
139 long double d = size - 1;
140 long order;
141
142 order = ia64_getf_exp(d);
143 order = order - PAGE_SHIFT - 0xffff + 1;
144 if (order < 0)
145 order = 0;
146 return order;
147 }
148
149 #endif /* !__ASSEMBLY__ */
150
151 #ifdef STRICT_MM_TYPECHECKS
152 /*
153 * These are used to make use of C type-checking..
154 */
155 typedef struct { unsigned long pte; } pte_t;
156 typedef struct { unsigned long pmd; } pmd_t;
157 #if CONFIG_PGTABLE_LEVELS == 4
158 typedef struct { unsigned long pud; } pud_t;
159 #endif
160 typedef struct { unsigned long pgd; } pgd_t;
161 typedef struct { unsigned long pgprot; } pgprot_t;
162 typedef struct page *pgtable_t;
163
164 # define pte_val(x) ((x).pte)
165 # define pmd_val(x) ((x).pmd)
166 #if CONFIG_PGTABLE_LEVELS == 4
167 # define pud_val(x) ((x).pud)
168 #endif
169 # define pgd_val(x) ((x).pgd)
170 # define pgprot_val(x) ((x).pgprot)
171
172 # define __pte(x) ((pte_t) { (x) } )
173 # define __pmd(x) ((pmd_t) { (x) } )
174 # define __pgprot(x) ((pgprot_t) { (x) } )
175
176 #else /* !STRICT_MM_TYPECHECKS */
177 /*
178 * .. while these make it easier on the compiler
179 */
180 # ifndef __ASSEMBLY__
181 typedef unsigned long pte_t;
182 typedef unsigned long pmd_t;
183 typedef unsigned long pgd_t;
184 typedef unsigned long pgprot_t;
185 typedef struct page *pgtable_t;
186 # endif
187
188 # define pte_val(x) (x)
189 # define pmd_val(x) (x)
190 # define pgd_val(x) (x)
191 # define pgprot_val(x) (x)
192
193 # define __pte(x) (x)
194 # define __pgd(x) (x)
195 # define __pgprot(x) (x)
196 #endif /* !STRICT_MM_TYPECHECKS */
197
198 #define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
199
200 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
201
202 #define GATE_ADDR RGN_BASE(RGN_GATE)
203
204 /*
205 * 0xa000000000000000+2*PERCPU_PAGE_SIZE
206 * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
207 */
208 #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
209 #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
210 #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
211
212 #define __HAVE_ARCH_GATE_AREA 1
213
214 #endif /* _ASM_IA64_PAGE_H */
215