1 
2 #ifndef __X86_64_PAGE_H__
3 #define __X86_64_PAGE_H__
4 
5 #define L1_PAGETABLE_SHIFT      12
6 #define L2_PAGETABLE_SHIFT      21
7 #define L3_PAGETABLE_SHIFT      30
8 #define L4_PAGETABLE_SHIFT      39
9 #define PAGE_SHIFT              L1_PAGETABLE_SHIFT
10 #define SUPERPAGE_SHIFT         L2_PAGETABLE_SHIFT
11 #define ROOT_PAGETABLE_SHIFT    L4_PAGETABLE_SHIFT
12 
13 #define PAGETABLE_ORDER         9
14 #define L1_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
15 #define L2_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
16 #define L3_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
17 #define L4_PAGETABLE_ENTRIES    (1<<PAGETABLE_ORDER)
18 #define ROOT_PAGETABLE_ENTRIES  L4_PAGETABLE_ENTRIES
19 #define SUPERPAGE_ORDER         PAGETABLE_ORDER
20 #define SUPERPAGE_PAGES         (1<<SUPERPAGE_ORDER)
21 
22 #define __XEN_VIRT_START        XEN_VIRT_START
23 
24 /* These are architectural limits. Current CPUs support only 40-bit phys. */
25 #define PADDR_BITS              52
26 #define VADDR_BITS              48
27 #define PADDR_MASK              ((_AC(1,UL) << PADDR_BITS) - 1)
28 #define VADDR_MASK              ((_AC(1,UL) << VADDR_BITS) - 1)
29 
30 #define VADDR_TOP_BIT           (1UL << (VADDR_BITS - 1))
31 #define CANONICAL_MASK          (~0UL & ~VADDR_MASK)
32 
33 #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
34 
35 #ifndef __ASSEMBLY__
36 
canonicalise_addr(unsigned long addr)37 static inline unsigned long canonicalise_addr(unsigned long addr)
38 {
39     if ( addr & VADDR_TOP_BIT )
40         return addr | CANONICAL_MASK;
41     else
42         return addr & ~CANONICAL_MASK;
43 }
44 
45 #include <asm/types.h>
46 
47 #include <xen/pdx.h>
48 
49 extern unsigned long xen_virt_end;
50 
51 /*
52  * Note: These are solely for the use by page_{get,set}_owner(), and
53  *       therefore don't need to handle the XEN_VIRT_{START,END} range.
54  */
55 #define virt_to_pdx(va)  (((unsigned long)(va) - DIRECTMAP_VIRT_START) >> \
56                           PAGE_SHIFT)
57 #define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
58                                    ((unsigned long)(pdx) << PAGE_SHIFT)))
59 
__virt_to_maddr(unsigned long va)60 static inline unsigned long __virt_to_maddr(unsigned long va)
61 {
62     ASSERT(va < DIRECTMAP_VIRT_END);
63     if ( va >= DIRECTMAP_VIRT_START )
64         va -= DIRECTMAP_VIRT_START;
65     else
66     {
67         BUILD_BUG_ON(XEN_VIRT_END - XEN_VIRT_START != GB(1));
68         /* Signed, so ((long)XEN_VIRT_START >> 30) fits in an imm32. */
69         ASSERT(((long)va >> (PAGE_ORDER_1G + PAGE_SHIFT)) ==
70                ((long)XEN_VIRT_START >> (PAGE_ORDER_1G + PAGE_SHIFT)));
71 
72         va += xen_phys_start - XEN_VIRT_START;
73     }
74     return (va & ma_va_bottom_mask) |
75            ((va << pfn_pdx_hole_shift) & ma_top_mask);
76 }
77 
__maddr_to_virt(unsigned long ma)78 static inline void *__maddr_to_virt(unsigned long ma)
79 {
80     ASSERT(pfn_to_pdx(ma >> PAGE_SHIFT) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
81     return (void *)(DIRECTMAP_VIRT_START +
82                     ((ma & ma_va_bottom_mask) |
83                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
84 }
85 
86 /* read access (should only be used for debug printk's) */
87 typedef u64 intpte_t;
88 #define PRIpte "016lx"
89 
90 typedef struct { intpte_t l1; } l1_pgentry_t;
91 typedef struct { intpte_t l2; } l2_pgentry_t;
92 typedef struct { intpte_t l3; } l3_pgentry_t;
93 typedef struct { intpte_t l4; } l4_pgentry_t;
94 typedef l4_pgentry_t root_pgentry_t;
95 
96 #endif /* !__ASSEMBLY__ */
97 
98 #define pte_read_atomic(ptep)       read_atomic(ptep)
99 #define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
100 #define pte_write(ptep, pte)        write_atomic(ptep, pte)
101 
102 /* Given a virtual address, get an entry offset into a linear page table. */
103 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
104 #define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
105 #define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT)
106 #define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT)
107 
108 #define is_guest_l2_slot(_d, _t, _s)                   \
109     ( !is_pv_32bit_domain(_d) ||                       \
110       !((_t) & PGT_pae_xen_l2) ||                      \
111       ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
112 #define is_guest_l4_slot(_d, _s)                    \
113     ( is_pv_32bit_domain(_d)                        \
114       ? ((_s) == 0)                                 \
115       : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) ||  \
116          ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
117 
118 #define root_table_offset         l4_table_offset
119 #define root_get_pfn              l4e_get_pfn
120 #define root_get_flags            l4e_get_flags
121 #define root_get_intpte           l4e_get_intpte
122 #define root_empty                l4e_empty
123 #define root_from_paddr           l4e_from_paddr
124 #define PGT_root_page_table       PGT_l4_page_table
125 
126 /*
127  * PTE pfn and flags:
128  *  40-bit pfn   = (pte[51:12])
129  *  24-bit flags = (pte[63:52],pte[11:0])
130  */
131 
132 /* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */
133 #ifndef __ASSEMBLY__
get_pte_flags(intpte_t x)134 static inline unsigned int get_pte_flags(intpte_t x)
135 {
136     return ((x >> 40) & ~0xfff) | (x & 0xfff);
137 }
138 
put_pte_flags(unsigned int x)139 static inline intpte_t put_pte_flags(unsigned int x)
140 {
141     return (((intpte_t)x & ~0xfff) << 40) | (x & 0xfff);
142 }
143 #endif
144 
145 /*
146  * Protection keys define a new 4-bit protection key field
147  * (PKEY) in bits 62:59 of leaf entries of the page tables.
148  * This corresponds to bit 22:19 of a 24-bit flags.
149  *
150  * Notice: Bit 22 is used by _PAGE_GNTTAB which is visible to PV guests,
151  * so Protection keys must be disabled on PV guests.
152  */
153 #define _PAGE_PKEY_BITS  (0x780000)	 /* Protection Keys, 22:19 */
154 
155 #define get_pte_pkey(x) (MASK_EXTR(get_pte_flags(x), _PAGE_PKEY_BITS))
156 
157 /* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/
158 #define _PAGE_NX_BIT (1U<<23)
159 
160 /* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/
161 #define _PAGE_GNTTAB (1U<<22)
162 
163 /*
164  * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
165  * This is needed to distinguish between user and kernel PTEs since _PAGE_USER
166  * is asserted for both.
167  */
168 #define _PAGE_GUEST_KERNEL (1U<<12)
169 
170 #define PAGE_HYPERVISOR_RO      (__PAGE_HYPERVISOR_RO      | _PAGE_GLOBAL)
171 #define PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RW      | _PAGE_GLOBAL)
172 #define PAGE_HYPERVISOR_RX      (__PAGE_HYPERVISOR_RX      | _PAGE_GLOBAL)
173 #define PAGE_HYPERVISOR_RWX     (__PAGE_HYPERVISOR         | _PAGE_GLOBAL)
174 #define PAGE_HYPERVISOR_SHSTK   (__PAGE_HYPERVISOR_SHSTK   | _PAGE_GLOBAL)
175 
176 #define PAGE_HYPERVISOR         PAGE_HYPERVISOR_RW
177 #define PAGE_HYPERVISOR_UCMINUS (__PAGE_HYPERVISOR_UCMINUS | \
178                                  _PAGE_GLOBAL | _PAGE_NX)
179 #define PAGE_HYPERVISOR_UC      (__PAGE_HYPERVISOR_UC | \
180                                  _PAGE_GLOBAL | _PAGE_NX)
181 
182 #endif /* __X86_64_PAGE_H__ */
183 
184 /*
185  * Local variables:
186  * mode: C
187  * c-file-style: "BSD"
188  * c-basic-offset: 4
189  * tab-width: 4
190  * indent-tabs-mode: nil
191  * End:
192  */
193