1 #ifndef __X86_PAGE_H__
2 #define __X86_PAGE_H__
3 
4 #include <xen/const.h>
5 
6 /*
7  * It is important that the masks are signed quantities. This ensures that
8  * the compiler sign-extends a 32-bit mask to 64 bits if that is required.
9  */
10 #define PAGE_SIZE           (_AC(1,L) << PAGE_SHIFT)
11 #define PAGE_MASK           (~(PAGE_SIZE-1))
12 #define PAGE_FLAG_MASK      (~0)
13 #define PAGE_OFFSET(ptr)    ((unsigned long)(ptr) & ~PAGE_MASK)
14 
15 #define PAGE_ORDER_4K       0
16 #define PAGE_ORDER_2M       9
17 #define PAGE_ORDER_1G       18
18 
19 #ifndef __ASSEMBLY__
20 # include <asm/types.h>
21 # include <xen/lib.h>
22 #endif
23 
24 #include <asm/x86_64/page.h>
25 
26 /* Read a pte atomically from memory. */
27 #define l1e_read_atomic(l1ep) \
28     l1e_from_intpte(pte_read_atomic(&l1e_get_intpte(*(l1ep))))
29 #define l2e_read_atomic(l2ep) \
30     l2e_from_intpte(pte_read_atomic(&l2e_get_intpte(*(l2ep))))
31 #define l3e_read_atomic(l3ep) \
32     l3e_from_intpte(pte_read_atomic(&l3e_get_intpte(*(l3ep))))
33 #define l4e_read_atomic(l4ep) \
34     l4e_from_intpte(pte_read_atomic(&l4e_get_intpte(*(l4ep))))
35 
36 /* Write a pte atomically to memory. */
37 #define l1e_write_atomic(l1ep, l1e) \
38     pte_write_atomic(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
39 #define l2e_write_atomic(l2ep, l2e) \
40     pte_write_atomic(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
41 #define l3e_write_atomic(l3ep, l3e) \
42     pte_write_atomic(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
43 #define l4e_write_atomic(l4ep, l4e) \
44     pte_write_atomic(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
45 
46 /*
47  * Write a pte safely but non-atomically to memory.
48  * The PTE may become temporarily not-present during the update.
49  */
50 #define l1e_write(l1ep, l1e) \
51     pte_write(&l1e_get_intpte(*(l1ep)), l1e_get_intpte(l1e))
52 #define l2e_write(l2ep, l2e) \
53     pte_write(&l2e_get_intpte(*(l2ep)), l2e_get_intpte(l2e))
54 #define l3e_write(l3ep, l3e) \
55     pte_write(&l3e_get_intpte(*(l3ep)), l3e_get_intpte(l3e))
56 #define l4e_write(l4ep, l4e) \
57     pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
58 
59 /* Get direct integer representation of a pte's contents (intpte_t). */
60 #define l1e_get_intpte(x)          ((x).l1)
61 #define l2e_get_intpte(x)          ((x).l2)
62 #define l3e_get_intpte(x)          ((x).l3)
63 #define l4e_get_intpte(x)          ((x).l4)
64 
65 /* Get pfn mapped by pte (unsigned long). */
66 #define l1e_get_pfn(x)             \
67     ((unsigned long)(((x).l1 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
68 #define l2e_get_pfn(x)             \
69     ((unsigned long)(((x).l2 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
70 #define l3e_get_pfn(x)             \
71     ((unsigned long)(((x).l3 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
72 #define l4e_get_pfn(x)             \
73     ((unsigned long)(((x).l4 & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT))
74 
75 /* Get mfn mapped by pte (mfn_t). */
76 #define l1e_get_mfn(x) _mfn(l1e_get_pfn(x))
77 #define l2e_get_mfn(x) _mfn(l2e_get_pfn(x))
78 #define l3e_get_mfn(x) _mfn(l3e_get_pfn(x))
79 #define l4e_get_mfn(x) _mfn(l4e_get_pfn(x))
80 
81 /* Get physical address of page mapped by pte (paddr_t). */
82 #define l1e_get_paddr(x)           \
83     ((paddr_t)(((x).l1 & (PADDR_MASK&PAGE_MASK))))
84 #define l2e_get_paddr(x)           \
85     ((paddr_t)(((x).l2 & (PADDR_MASK&PAGE_MASK))))
86 #define l3e_get_paddr(x)           \
87     ((paddr_t)(((x).l3 & (PADDR_MASK&PAGE_MASK))))
88 #define l4e_get_paddr(x)           \
89     ((paddr_t)(((x).l4 & (PADDR_MASK&PAGE_MASK))))
90 
91 /* Get pointer to info structure of page mapped by pte (struct page_info *). */
92 #define l1e_get_page(x)           mfn_to_page(l1e_get_mfn(x))
93 #define l2e_get_page(x)           mfn_to_page(l2e_get_mfn(x))
94 #define l3e_get_page(x)           mfn_to_page(l3e_get_mfn(x))
95 #define l4e_get_page(x)           mfn_to_page(l4e_get_mfn(x))
96 
97 /* Get pte access flags (unsigned int). */
98 #define l1e_get_flags(x)           (get_pte_flags((x).l1))
99 #define l2e_get_flags(x)           (get_pte_flags((x).l2))
100 #define l3e_get_flags(x)           (get_pte_flags((x).l3))
101 #define l4e_get_flags(x)           (get_pte_flags((x).l4))
102 
103 /* Get pte pkeys (unsigned int). */
104 #define l1e_get_pkey(x)           get_pte_pkey((x).l1)
105 #define l2e_get_pkey(x)           get_pte_pkey((x).l2)
106 #define l3e_get_pkey(x)           get_pte_pkey((x).l3)
107 
108 /* Construct an empty pte. */
109 #define l1e_empty()                ((l1_pgentry_t) { 0 })
110 #define l2e_empty()                ((l2_pgentry_t) { 0 })
111 #define l3e_empty()                ((l3_pgentry_t) { 0 })
112 #define l4e_empty()                ((l4_pgentry_t) { 0 })
113 
114 /* Construct a pte from a pfn and access flags. */
115 #define l1e_from_pfn(pfn, flags)   \
116     ((l1_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
117 #define l2e_from_pfn(pfn, flags)   \
118     ((l2_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
119 #define l3e_from_pfn(pfn, flags)   \
120     ((l3_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
121 #define l4e_from_pfn(pfn, flags)   \
122     ((l4_pgentry_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
123 
124 /* Construct a pte from an mfn and access flags. */
125 #define l1e_from_mfn(m, f) l1e_from_pfn(mfn_x(m), f)
126 #define l2e_from_mfn(m, f) l2e_from_pfn(mfn_x(m), f)
127 #define l3e_from_mfn(m, f) l3e_from_pfn(mfn_x(m), f)
128 #define l4e_from_mfn(m, f) l4e_from_pfn(mfn_x(m), f)
129 
130 /* Construct a pte from a physical address and access flags. */
131 #ifndef __ASSEMBLY__
l1e_from_paddr(paddr_t pa,unsigned int flags)132 static inline l1_pgentry_t l1e_from_paddr(paddr_t pa, unsigned int flags)
133 {
134     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
135     return (l1_pgentry_t) { pa | put_pte_flags(flags) };
136 }
l2e_from_paddr(paddr_t pa,unsigned int flags)137 static inline l2_pgentry_t l2e_from_paddr(paddr_t pa, unsigned int flags)
138 {
139     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
140     return (l2_pgentry_t) { pa | put_pte_flags(flags) };
141 }
l3e_from_paddr(paddr_t pa,unsigned int flags)142 static inline l3_pgentry_t l3e_from_paddr(paddr_t pa, unsigned int flags)
143 {
144     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
145     return (l3_pgentry_t) { pa | put_pte_flags(flags) };
146 }
l4e_from_paddr(paddr_t pa,unsigned int flags)147 static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
148 {
149     ASSERT((pa & ~(PADDR_MASK & PAGE_MASK)) == 0);
150     return (l4_pgentry_t) { pa | put_pte_flags(flags) };
151 }
152 #endif /* !__ASSEMBLY__ */
153 
154 /* Construct a pte from its direct integer representation. */
155 #define l1e_from_intpte(intpte)    ((l1_pgentry_t) { (intpte_t)(intpte) })
156 #define l2e_from_intpte(intpte)    ((l2_pgentry_t) { (intpte_t)(intpte) })
157 #define l3e_from_intpte(intpte)    ((l3_pgentry_t) { (intpte_t)(intpte) })
158 #define l4e_from_intpte(intpte)    ((l4_pgentry_t) { (intpte_t)(intpte) })
159 
160 /* Construct a pte from a page pointer and access flags. */
161 #define l1e_from_page(page, flags) l1e_from_mfn(page_to_mfn(page), flags)
162 #define l2e_from_page(page, flags) l2e_from_mfn(page_to_mfn(page), flags)
163 #define l3e_from_page(page, flags) l3e_from_mfn(page_to_mfn(page), flags)
164 #define l4e_from_page(page, flags) l4e_from_mfn(page_to_mfn(page), flags)
165 
166 /* Add extra flags to an existing pte. */
167 #define l1e_add_flags(x, flags)    ((x).l1 |= put_pte_flags(flags))
168 #define l2e_add_flags(x, flags)    ((x).l2 |= put_pte_flags(flags))
169 #define l3e_add_flags(x, flags)    ((x).l3 |= put_pte_flags(flags))
170 #define l4e_add_flags(x, flags)    ((x).l4 |= put_pte_flags(flags))
171 
172 /* Remove flags from an existing pte. */
173 #define l1e_remove_flags(x, flags) ((x).l1 &= ~put_pte_flags(flags))
174 #define l2e_remove_flags(x, flags) ((x).l2 &= ~put_pte_flags(flags))
175 #define l3e_remove_flags(x, flags) ((x).l3 &= ~put_pte_flags(flags))
176 #define l4e_remove_flags(x, flags) ((x).l4 &= ~put_pte_flags(flags))
177 
178 /* Flip flags in an existing L1 PTE. */
179 #define l1e_flip_flags(x, flags)    ((x).l1 ^= put_pte_flags(flags))
180 
181 /* Check if a pte's page mapping or significant access flags have changed. */
182 #define l1e_has_changed(x,y,flags) \
183     ( !!(((x).l1 ^ (y).l1) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
184 #define l2e_has_changed(x,y,flags) \
185     ( !!(((x).l2 ^ (y).l2) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
186 #define l3e_has_changed(x,y,flags) \
187     ( !!(((x).l3 ^ (y).l3) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
188 #define l4e_has_changed(x,y,flags) \
189     ( !!(((x).l4 ^ (y).l4) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
190 
191 /* Pagetable walking. */
192 #define l2e_to_l1e(x)              ((l1_pgentry_t *)__va(l2e_get_paddr(x)))
193 #define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
194 #define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
195 
196 #define map_l1t_from_l2e(x)        (l1_pgentry_t *)map_domain_page(l2e_get_mfn(x))
197 #define map_l2t_from_l3e(x)        (l2_pgentry_t *)map_domain_page(l3e_get_mfn(x))
198 #define map_l3t_from_l4e(x)        (l3_pgentry_t *)map_domain_page(l4e_get_mfn(x))
199 
200 /* Unlike lYe_to_lXe(), lXe_from_lYe() do not rely on the direct map. */
201 #define l1e_from_l2e(l2e_, offset_) ({                      \
202         const l1_pgentry_t *l1t_ = map_l1t_from_l2e(l2e_);  \
203         l1_pgentry_t l1e_ = l1t_[offset_];                  \
204         unmap_domain_page(l1t_);                            \
205         l1e_; })
206 
207 #define l2e_from_l3e(l3e_, offset_) ({                      \
208         const l2_pgentry_t *l2t_ = map_l2t_from_l3e(l3e_);  \
209         l2_pgentry_t l2e_ = l2t_[offset_];                  \
210         unmap_domain_page(l2t_);                            \
211         l2e_; })
212 
213 #define l3e_from_l4e(l4e_, offset_) ({                      \
214         const l3_pgentry_t *l3t_ = map_l3t_from_l4e(l4e_);  \
215         l3_pgentry_t l3e_ = l3t_[offset_];                  \
216         unmap_domain_page(l3t_);                            \
217         l3e_; })
218 
219 /* Given a virtual address, get an entry offset into a page table. */
220 #define l1_table_offset(a)         \
221     (((a) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1))
222 #define l2_table_offset(a)         \
223     (((a) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1))
224 #define l3_table_offset(a)         \
225     (((a) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1))
226 #define l4_table_offset(a)         \
227     (((a) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1))
228 
229 /* Convert a pointer to a page-table entry into pagetable slot index. */
230 #define pgentry_ptr_to_slot(_p)    \
231     (((unsigned long)(_p) & ~PAGE_MASK) / sizeof(*(_p)))
232 
233 #ifndef __ASSEMBLY__
234 
235 /* Page-table type. */
236 typedef struct { u64 pfn; } pagetable_t;
237 #define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
238 #define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))
239 #define pagetable_get_pfn(x)    ((x).pfn)
240 #define pagetable_get_mfn(x)    _mfn(((x).pfn))
241 #define pagetable_is_null(x)    ((x).pfn == 0)
242 #define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
243 #define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
244 #define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
245 #define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
246 #define pagetable_null()        pagetable_from_pfn(0)
247 
248 void clear_page_sse2(void *);
249 void copy_page_sse2(void *, const void *);
250 
251 #define clear_page(_p)      clear_page_sse2(_p)
252 #define copy_page(_t, _f)   copy_page_sse2(_t, _f)
253 
254 /* Convert between Xen-heap virtual addresses and machine addresses. */
255 #define __pa(x)             (virt_to_maddr(x))
256 #define __va(x)             (maddr_to_virt(x))
257 
258 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
259 #define __virt_to_mfn(va)   (virt_to_maddr(va) >> PAGE_SHIFT)
260 #define __mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
261 
262 /* Convert between machine frame numbers and page-info structures. */
263 #define mfn_to_page(mfn)    (frame_table + mfn_to_pdx(mfn))
264 #define page_to_mfn(pg)     pdx_to_mfn((unsigned long)((pg) - frame_table))
265 
266 /* Convert between machine addresses and page-info structures. */
267 #define __maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma))
268 #define __page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg))
269 
270 /* Convert between frame number and address formats.  */
271 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
272 #define __paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
273 #define gfn_to_gaddr(gfn)   __pfn_to_paddr(gfn_x(gfn))
274 #define gaddr_to_gfn(ga)    _gfn(__paddr_to_pfn(ga))
275 #define mfn_to_maddr(mfn)   __pfn_to_paddr(mfn_x(mfn))
276 #define maddr_to_mfn(ma)    _mfn(__paddr_to_pfn(ma))
277 
278 /*
279  * We define non-underscored wrappers for above conversion functions. These are
280  * overridden in various source files while underscored versions remain intact.
281  */
282 #define mfn_valid(mfn)      __mfn_valid(mfn_x(mfn))
283 #define virt_to_mfn(va)     __virt_to_mfn(va)
284 #define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
285 #define virt_to_maddr(va)   __virt_to_maddr((unsigned long)(va))
286 #define maddr_to_virt(ma)   __maddr_to_virt((unsigned long)(ma))
287 #define maddr_to_page(ma)   __maddr_to_page(ma)
288 #define page_to_maddr(pg)   __page_to_maddr(pg)
289 #define virt_to_page(va)    __virt_to_page(va)
290 #define page_to_virt(pg)    __page_to_virt(pg)
291 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
292 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
293 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
294 #define vmap_to_mfn(va)     _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned long)(va))))
295 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
296 
297 #endif /* !defined(__ASSEMBLY__) */
298 
299 /* Where to find each level of the linear mapping */
300 #define __linear_l1_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START))
301 #define __linear_l2_table \
302  ((l2_pgentry_t *)(__linear_l1_table + l1_linear_offset(LINEAR_PT_VIRT_START)))
303 #define __linear_l3_table \
304  ((l3_pgentry_t *)(__linear_l2_table + l2_linear_offset(LINEAR_PT_VIRT_START)))
305 #define __linear_l4_table \
306  ((l4_pgentry_t *)(__linear_l3_table + l3_linear_offset(LINEAR_PT_VIRT_START)))
307 
308 
309 #ifndef __ASSEMBLY__
310 extern root_pgentry_t idle_pg_table[ROOT_PAGETABLE_ENTRIES];
311 extern l2_pgentry_t  *compat_idle_pg_table_l2;
312 extern unsigned int   m2p_compat_vstart;
313 extern l2_pgentry_t l2_xenmap[L2_PAGETABLE_ENTRIES],
314     l2_bootmap[4*L2_PAGETABLE_ENTRIES];
315 extern l3_pgentry_t l3_bootmap[L3_PAGETABLE_ENTRIES];
316 extern l2_pgentry_t l2_directmap[4*L2_PAGETABLE_ENTRIES];
317 extern l1_pgentry_t l1_fixmap[L1_PAGETABLE_ENTRIES];
318 void paging_init(void);
319 void efi_update_l4_pgtable(unsigned int l4idx, l4_pgentry_t);
320 #endif /* !defined(__ASSEMBLY__) */
321 
322 #define _PAGE_NONE     _AC(0x000,U)
323 #define _PAGE_PRESENT  _AC(0x001,U)
324 #define _PAGE_RW       _AC(0x002,U)
325 #define _PAGE_USER     _AC(0x004,U)
326 #define _PAGE_PWT      _AC(0x008,U)
327 #define _PAGE_PCD      _AC(0x010,U)
328 #define _PAGE_ACCESSED _AC(0x020,U)
329 #define _PAGE_DIRTY    _AC(0x040,U)
330 #define _PAGE_PAT      _AC(0x080,U)
331 #define _PAGE_PSE      _AC(0x080,U)
332 #define _PAGE_GLOBAL   _AC(0x100,U)
333 #define _PAGE_AVAIL0   _AC(0x200,U)
334 #define _PAGE_AVAIL1   _AC(0x400,U)
335 #define _PAGE_AVAIL2   _AC(0x800,U)
336 #define _PAGE_AVAIL    _AC(0xE00,U)
337 #define _PAGE_PSE_PAT  _AC(0x1000,U)
338 #define _PAGE_AVAIL_HIGH (_AC(0x7ff, U) << 12)
339 
340 #ifndef __ASSEMBLY__
341 /* Dependency on NX being available can't be expressed. */
342 #define _PAGE_NX       (cpu_has_nx ? _PAGE_NX_BIT : 0)
343 #endif
344 
345 #define PAGE_CACHE_ATTRS (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
346 
347 /*
348  * Debug option: Ensure that granted mappings are not implicitly unmapped.
349  * WARNING: This will need to be disabled to run OSes that use the spare PTE
350  * bits themselves (e.g., *BSD).
351  */
352 #ifdef NDEBUG
353 #undef _PAGE_GNTTAB
354 #endif
355 #ifndef _PAGE_GNTTAB
356 #define _PAGE_GNTTAB   0
357 #endif
358 
359 #define __PAGE_HYPERVISOR_RO      (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NX)
360 #define __PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RO | \
361                                    _PAGE_DIRTY | _PAGE_RW)
362 #define __PAGE_HYPERVISOR_RX      (_PAGE_PRESENT | _PAGE_ACCESSED)
363 #define __PAGE_HYPERVISOR         (__PAGE_HYPERVISOR_RX | \
364                                    _PAGE_DIRTY | _PAGE_RW)
365 #define __PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR | _PAGE_PCD)
366 #define __PAGE_HYPERVISOR_UC      (__PAGE_HYPERVISOR | _PAGE_PCD | _PAGE_PWT)
367 #define __PAGE_HYPERVISOR_SHSTK   (__PAGE_HYPERVISOR_RO | _PAGE_DIRTY)
368 
369 #define MAP_SMALL_PAGES _PAGE_AVAIL0 /* don't use superpages mappings */
370 
371 #ifndef __ASSEMBLY__
372 
373 /* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
pte_flags_to_cacheattr(unsigned int flags)374 static inline unsigned int pte_flags_to_cacheattr(unsigned int flags)
375 {
376     return ((flags >> 5) & 4) | ((flags >> 3) & 3);
377 }
cacheattr_to_pte_flags(unsigned int cacheattr)378 static inline unsigned int cacheattr_to_pte_flags(unsigned int cacheattr)
379 {
380     return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
381 }
382 
383 /* return true if permission increased */
384 static inline bool_t
perms_strictly_increased(uint32_t old_flags,uint32_t new_flags)385 perms_strictly_increased(uint32_t old_flags, uint32_t new_flags)
386 /* Given the flags of two entries, are the new flags a strict
387  * increase in rights over the old ones? */
388 {
389     uint32_t of = old_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT);
390     uint32_t nf = new_flags & (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_NX_BIT);
391     /* Flip the NX bit, since it's the only one that decreases rights;
392      * we calculate as if it were an "X" bit. */
393     of ^= _PAGE_NX_BIT;
394     nf ^= _PAGE_NX_BIT;
395     /* If the changed bits are all set in the new flags, then rights strictly
396      * increased between old and new. */
397     return ((of | (of ^ nf)) == nf);
398 }
399 
invalidate_icache(void)400 static inline void invalidate_icache(void)
401 {
402 /*
403  * There is nothing to be done here as icaches are sufficiently
404  * coherent on x86.
405  */
406 }
407 
408 #endif /* !__ASSEMBLY__ */
409 
410 #define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
411 
412 #endif /* __X86_PAGE_H__ */
413 
414 /*
415  * Local variables:
416  * mode: C
417  * c-file-style: "BSD"
418  * c-basic-offset: 4
419  * tab-width: 4
420  * indent-tabs-mode: nil
421  * End:
422  */
423