1 /******************************************************************************
2  * xen/asm-x86/guest_pt.h
3  *
4  * Types and accessors for guest pagetable entries, as distinct from
5  * Xen's pagetable types.
6  *
7  * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including
8  * this file.
9  *
10  * Parts of this code are Copyright (c) 2006 by XenSource Inc.
11  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
12  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #ifndef _XEN_ASM_GUEST_PT_H
29 #define _XEN_ASM_GUEST_PT_H
30 
31 #if !defined(GUEST_PAGING_LEVELS)
32 #error GUEST_PAGING_LEVELS not defined
33 #endif
34 
35 static inline paddr_t
gfn_to_paddr(gfn_t gfn)36 gfn_to_paddr(gfn_t gfn)
37 {
38     return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
39 }
40 
41 /* Override get_gfn to work with gfn_t */
42 #undef get_gfn
43 #define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC)
44 
45 /* Mask covering the reserved bits from superpage alignment. */
46 #define SUPERPAGE_RSVD(bit)                                             \
47     (((1ul << (bit)) - 1) & ~(_PAGE_PSE_PAT | (_PAGE_PSE_PAT - 1ul)))
48 
fold_pse36(uint64_t val)49 static inline uint32_t fold_pse36(uint64_t val)
50 {
51     return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 32)) >> (32 - 13));
52 }
unfold_pse36(uint32_t val)53 static inline uint64_t unfold_pse36(uint32_t val)
54 {
55     return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 13)) << (32 - 13));
56 }
57 
58 /* Types of the guest's page tables and access functions for them */
59 
60 #if GUEST_PAGING_LEVELS == 2
61 
62 #define GUEST_L1_PAGETABLE_ENTRIES     1024
63 #define GUEST_L2_PAGETABLE_ENTRIES     1024
64 
65 #define GUEST_L1_PAGETABLE_SHIFT         12
66 #define GUEST_L2_PAGETABLE_SHIFT         22
67 
68 #define GUEST_L1_PAGETABLE_RSVD           0
69 #define GUEST_L2_PAGETABLE_RSVD           0
70 
71 typedef uint32_t guest_intpte_t;
72 typedef struct { guest_intpte_t l1; } guest_l1e_t;
73 typedef struct { guest_intpte_t l2; } guest_l2e_t;
74 
75 #define PRI_gpte "08x"
76 
guest_l1e_get_gfn(guest_l1e_t gl1e)77 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
78 { return _gfn(gl1e.l1 >> PAGE_SHIFT); }
guest_l2e_get_gfn(guest_l2e_t gl2e)79 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
80 { return _gfn(gl2e.l2 >> PAGE_SHIFT); }
81 
guest_l1e_get_flags(guest_l1e_t gl1e)82 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
83 { return gl1e.l1 & 0xfff; }
guest_l2e_get_flags(guest_l2e_t gl2e)84 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
85 { return gl2e.l2 & 0xfff; }
86 
guest_l1e_get_pkey(guest_l1e_t gl1e)87 static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e)
88 { return 0; }
guest_l2e_get_pkey(guest_l2e_t gl2e)89 static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e)
90 { return 0; }
91 
guest_l1e_from_gfn(gfn_t gfn,u32 flags)92 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
93 { return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; }
guest_l2e_from_gfn(gfn_t gfn,u32 flags)94 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
95 { return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; }
96 
97 #define guest_l1_table_offset(_va)                                           \
98     (((_va) >> GUEST_L1_PAGETABLE_SHIFT) & (GUEST_L1_PAGETABLE_ENTRIES - 1))
99 #define guest_l2_table_offset(_va)                                           \
100     (((_va) >> GUEST_L2_PAGETABLE_SHIFT) & (GUEST_L2_PAGETABLE_ENTRIES - 1))
101 
102 #else /* GUEST_PAGING_LEVELS != 2 */
103 
104 #if GUEST_PAGING_LEVELS == 3
105 
106 #define GUEST_L1_PAGETABLE_ENTRIES      512
107 #define GUEST_L2_PAGETABLE_ENTRIES      512
108 #define GUEST_L3_PAGETABLE_ENTRIES        4
109 
110 #define GUEST_L1_PAGETABLE_SHIFT         12
111 #define GUEST_L2_PAGETABLE_SHIFT         21
112 #define GUEST_L3_PAGETABLE_SHIFT         30
113 
114 #define GUEST_L1_PAGETABLE_RSVD            0x7ff0000000000000ul
115 #define GUEST_L2_PAGETABLE_RSVD            0x7ff0000000000000ul
116 #define GUEST_L3_PAGETABLE_RSVD                                      \
117     (0xfff0000000000000ul | _PAGE_GLOBAL | _PAGE_PSE | _PAGE_DIRTY | \
118      _PAGE_ACCESSED | _PAGE_USER | _PAGE_RW)
119 
120 #else /* GUEST_PAGING_LEVELS == 4 */
121 
122 #define GUEST_L1_PAGETABLE_ENTRIES      512
123 #define GUEST_L2_PAGETABLE_ENTRIES      512
124 #define GUEST_L3_PAGETABLE_ENTRIES      512
125 #define GUEST_L4_PAGETABLE_ENTRIES      512
126 
127 #define GUEST_L1_PAGETABLE_SHIFT         12
128 #define GUEST_L2_PAGETABLE_SHIFT         21
129 #define GUEST_L3_PAGETABLE_SHIFT         30
130 #define GUEST_L4_PAGETABLE_SHIFT         39
131 
132 #define GUEST_L1_PAGETABLE_RSVD            0
133 #define GUEST_L2_PAGETABLE_RSVD            0
134 #define GUEST_L3_PAGETABLE_RSVD            0
135 /* NB L4e._PAGE_GLOBAL is reserved for AMD, but ignored for Intel. */
136 #define GUEST_L4_PAGETABLE_RSVD            _PAGE_PSE
137 
138 #endif
139 
140 typedef l1_pgentry_t guest_l1e_t;
141 typedef l2_pgentry_t guest_l2e_t;
142 typedef l3_pgentry_t guest_l3e_t;
143 #if GUEST_PAGING_LEVELS >= 4
144 typedef l4_pgentry_t guest_l4e_t;
145 #endif
146 typedef intpte_t guest_intpte_t;
147 
148 #define PRI_gpte "016"PRIx64
149 
guest_l1e_get_gfn(guest_l1e_t gl1e)150 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
151 { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
guest_l2e_get_gfn(guest_l2e_t gl2e)152 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
153 { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
guest_l3e_get_gfn(guest_l3e_t gl3e)154 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
155 { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
156 #if GUEST_PAGING_LEVELS >= 4
guest_l4e_get_gfn(guest_l4e_t gl4e)157 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
158 { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
159 #endif
160 
guest_l1e_get_flags(guest_l1e_t gl1e)161 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
162 { return l1e_get_flags(gl1e); }
guest_l2e_get_flags(guest_l2e_t gl2e)163 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
164 { return l2e_get_flags(gl2e); }
guest_l3e_get_flags(guest_l3e_t gl3e)165 static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
166 { return l3e_get_flags(gl3e); }
167 #if GUEST_PAGING_LEVELS >= 4
guest_l4e_get_flags(guest_l4e_t gl4e)168 static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
169 { return l4e_get_flags(gl4e); }
170 #endif
171 
guest_l1e_get_pkey(guest_l1e_t gl1e)172 static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e)
173 { return l1e_get_pkey(gl1e); }
guest_l2e_get_pkey(guest_l2e_t gl2e)174 static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e)
175 { return l2e_get_pkey(gl2e); }
guest_l3e_get_pkey(guest_l3e_t gl3e)176 static inline u32 guest_l3e_get_pkey(guest_l3e_t gl3e)
177 { return l3e_get_pkey(gl3e); }
178 
guest_l1e_from_gfn(gfn_t gfn,u32 flags)179 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
180 { return l1e_from_pfn(gfn_x(gfn), flags); }
guest_l2e_from_gfn(gfn_t gfn,u32 flags)181 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
182 { return l2e_from_pfn(gfn_x(gfn), flags); }
guest_l3e_from_gfn(gfn_t gfn,u32 flags)183 static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
184 { return l3e_from_pfn(gfn_x(gfn), flags); }
185 #if GUEST_PAGING_LEVELS >= 4
guest_l4e_from_gfn(gfn_t gfn,u32 flags)186 static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
187 { return l4e_from_pfn(gfn_x(gfn), flags); }
188 #endif
189 
190 #define guest_l1_table_offset(a) l1_table_offset(a)
191 #define guest_l2_table_offset(a) l2_table_offset(a)
192 #define guest_l3_table_offset(a) l3_table_offset(a)
193 #define guest_l4_table_offset(a) l4_table_offset(a)
194 
195 #endif /* GUEST_PAGING_LEVELS != 2 */
196 
197 /* Mask of the GFNs covered by an L2 or L3 superpage */
198 #define GUEST_L2_GFN_MASK (GUEST_L1_PAGETABLE_ENTRIES - 1)
199 #define GUEST_L3_GFN_MASK \
200     ((GUEST_L2_PAGETABLE_ENTRIES * GUEST_L1_PAGETABLE_ENTRIES) - 1)
201 
202 
203 /* Which pagetable features are supported on this vcpu? */
204 
guest_can_use_l2_superpages(const struct vcpu * v)205 static always_inline bool guest_can_use_l2_superpages(const struct vcpu *v)
206 {
207     /*
208      * PV guests use Xen's paging settings.  Being 4-level, 2M
209      * superpages are unconditionally supported.
210      *
211      * The L2 _PAGE_PSE bit must be honoured in HVM guests, whenever
212      * CR4.PSE is set or the guest is in PAE or long mode.
213      * It's also used in the dummy PT for vcpus with CR0.PG cleared.
214      */
215     return (is_pv_vcpu(v) ||
216             GUEST_PAGING_LEVELS != 2 ||
217             !hvm_paging_enabled(v) ||
218             (v->arch.hvm.guest_cr[4] & X86_CR4_PSE));
219 }
220 
guest_can_use_l3_superpages(const struct domain * d)221 static always_inline bool guest_can_use_l3_superpages(const struct domain *d)
222 {
223     /*
224      * There are no control register settings for the hardware pagewalk on the
225      * subject of 1G superpages.
226      *
227      * Shadow pagetables don't support 1GB superpages at all, and will always
228      * treat L3 _PAGE_PSE as reserved.
229      *
230      * With HAP however, if the guest constructs a 1GB superpage on capable
231      * hardware, it will function irrespective of whether the feature is
232      * advertised.  Xen's model of performing a pagewalk should match.
233      */
234     return GUEST_PAGING_LEVELS >= 4 && paging_mode_hap(d) && cpu_has_page1gb;
235 }
236 
guest_can_use_pse36(const struct domain * d)237 static inline bool guest_can_use_pse36(const struct domain *d)
238 {
239     /*
240      * Only called in the context of 2-level guests, after
241      * guest_can_use_l2_superpages() has indicated true.
242      *
243      * Shadow pagetables don't support PSE36 superpages at all, and will
244      * always treat them as reserved.
245      *
246      * With HAP however, once L2 superpages are active, here are no control
247      * register settings for the hardware pagewalk on the subject of PSE36.
248      * If the guest constructs a PSE36 superpage on capable hardware, it will
249      * function irrespective of whether the feature is advertised.  Xen's
250      * model of performing a pagewalk should match.
251      */
252     return paging_mode_hap(d) && cpu_has_pse36;
253 }
254 
guest_nx_enabled(const struct vcpu * v)255 static always_inline bool guest_nx_enabled(const struct vcpu *v)
256 {
257     if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */
258         return false;
259 
260     /* PV guests can't control EFER.NX, and inherits Xen's choice. */
261     return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v);
262 }
263 
guest_wp_enabled(const struct vcpu * v)264 static always_inline bool guest_wp_enabled(const struct vcpu *v)
265 {
266     /* PV guests can't control CR0.WP, and it is unconditionally set by Xen. */
267     return is_pv_vcpu(v) || hvm_wp_enabled(v);
268 }
269 
guest_smep_enabled(const struct vcpu * v)270 static always_inline bool guest_smep_enabled(const struct vcpu *v)
271 {
272     return !is_pv_vcpu(v) && hvm_smep_enabled(v);
273 }
274 
guest_smap_enabled(const struct vcpu * v)275 static always_inline bool guest_smap_enabled(const struct vcpu *v)
276 {
277     return !is_pv_vcpu(v) && hvm_smap_enabled(v);
278 }
279 
guest_pku_enabled(const struct vcpu * v)280 static always_inline bool guest_pku_enabled(const struct vcpu *v)
281 {
282     return !is_pv_vcpu(v) && hvm_pku_enabled(v);
283 }
284 
285 /* Helpers for identifying whether guest entries have reserved bits set. */
286 
287 /* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */
guest_rsvd_bits(const struct vcpu * v)288 static always_inline uint64_t guest_rsvd_bits(const struct vcpu *v)
289 {
290     return ((PADDR_MASK &
291              ~((1ul << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) |
292             (guest_nx_enabled(v) ? 0 : put_pte_flags(_PAGE_NX_BIT)));
293 }
294 
guest_l1e_rsvd_bits(const struct vcpu * v,guest_l1e_t l1e)295 static always_inline bool guest_l1e_rsvd_bits(const struct vcpu *v,
296                                               guest_l1e_t l1e)
297 {
298     return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD);
299 }
300 
guest_l2e_rsvd_bits(const struct vcpu * v,guest_l2e_t l2e)301 static always_inline bool guest_l2e_rsvd_bits(const struct vcpu *v,
302                                               guest_l2e_t l2e)
303 {
304     uint64_t rsvd_bits = guest_rsvd_bits(v);
305 
306     return ((l2e.l2 & (rsvd_bits | GUEST_L2_PAGETABLE_RSVD |
307                        (guest_can_use_l2_superpages(v) ? 0 : _PAGE_PSE))) ||
308             ((l2e.l2 & _PAGE_PSE) &&
309              (l2e.l2 & ((GUEST_PAGING_LEVELS == 2 && guest_can_use_pse36(v->domain))
310                           /* PSE36 tops out at 40 bits of address width. */
311                         ? (fold_pse36(rsvd_bits | (1ul << 40)))
312                         : SUPERPAGE_RSVD(GUEST_L2_PAGETABLE_SHIFT)))));
313 }
314 
315 #if GUEST_PAGING_LEVELS >= 3
guest_l3e_rsvd_bits(const struct vcpu * v,guest_l3e_t l3e)316 static always_inline bool guest_l3e_rsvd_bits(const struct vcpu *v,
317                                               guest_l3e_t l3e)
318 {
319     return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD |
320                        (guest_can_use_l3_superpages(v->domain) ? 0 : _PAGE_PSE))) ||
321             ((l3e.l3 & _PAGE_PSE) &&
322              (l3e.l3 & SUPERPAGE_RSVD(GUEST_L3_PAGETABLE_SHIFT))));
323 }
324 
325 #if GUEST_PAGING_LEVELS >= 4
guest_l4e_rsvd_bits(const struct vcpu * v,guest_l4e_t l4e)326 static always_inline bool guest_l4e_rsvd_bits(const struct vcpu *v,
327                                               guest_l4e_t l4e)
328 {
329     return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD |
330                      ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD)
331                       ? _PAGE_GLOBAL : 0));
332 }
333 #endif /* GUEST_PAGING_LEVELS >= 4 */
334 #endif /* GUEST_PAGING_LEVELS >= 3 */
335 
336 /* Type used for recording a walk through guest pagetables.  It is
337  * filled in by the pagetable walk function, and also used as a cache
338  * for later walks.  When we encounter a superpage l2e, we fabricate an
339  * l1e for propagation to the shadow (for splintering guest superpages
340  * into many shadow l1 entries).  */
341 typedef struct guest_pagetable_walk walk_t;
342 struct guest_pagetable_walk
343 {
344     unsigned long va;           /* Address we were looking for */
345 #if GUEST_PAGING_LEVELS >= 3
346 #if GUEST_PAGING_LEVELS >= 4
347     guest_l4e_t l4e;            /* Guest's level 4 entry */
348 #endif
349     guest_l3e_t l3e;            /* Guest's level 3 entry */
350 #endif
351     guest_l2e_t l2e;            /* Guest's level 2 entry */
352     union
353     {
354         guest_l1e_t l1e;        /* Guest's level 1 entry (or fabrication). */
355         uint64_t   el1e;        /* L2 PSE36 superpages wider than 32 bits. */
356     };
357 #if GUEST_PAGING_LEVELS >= 4
358     mfn_t l4mfn;                /* MFN that the level 4 entry was in */
359     mfn_t l3mfn;                /* MFN that the level 3 entry was in */
360 #endif
361     mfn_t l2mfn;                /* MFN that the level 2 entry was in */
362     mfn_t l1mfn;                /* MFN that the level 1 entry was in */
363 
364     uint32_t pfec;              /* Accumulated PFEC_* error code from walk. */
365 };
366 
367 /* Given a walk_t, translate the gw->va into the guest's notion of the
368  * corresponding frame number. */
guest_walk_to_gfn(const walk_t * gw)369 static inline gfn_t guest_walk_to_gfn(const walk_t *gw)
370 {
371     if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) )
372         return INVALID_GFN;
373     return (GUEST_PAGING_LEVELS == 2
374             ? _gfn(gw->el1e >> PAGE_SHIFT)
375             : guest_l1e_get_gfn(gw->l1e));
376 }
377 
378 /* Given a walk_t, translate the gw->va into the guest's notion of the
379  * corresponding physical address. */
guest_walk_to_gpa(const walk_t * gw)380 static inline paddr_t guest_walk_to_gpa(const walk_t *gw)
381 {
382     gfn_t gfn = guest_walk_to_gfn(gw);
383 
384     if ( gfn_eq(gfn, INVALID_GFN) )
385         return INVALID_PADDR;
386 
387     return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK);
388 }
389 
390 /* Given a walk_t from a successful walk, return the page-order of the
391  * page or superpage that the virtual address is in. */
guest_walk_to_page_order(const walk_t * gw)392 static inline unsigned int guest_walk_to_page_order(const walk_t *gw)
393 {
394     /* This is only valid for successful walks - otherwise the
395      * PSE bits might be invalid. */
396     ASSERT(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT);
397 #if GUEST_PAGING_LEVELS >= 3
398     if ( guest_l3e_get_flags(gw->l3e) & _PAGE_PSE )
399         return GUEST_L3_PAGETABLE_SHIFT - PAGE_SHIFT;
400 #endif
401     if ( guest_l2e_get_flags(gw->l2e) & _PAGE_PSE )
402         return GUEST_L2_PAGETABLE_SHIFT - PAGE_SHIFT;
403     return GUEST_L1_PAGETABLE_SHIFT - PAGE_SHIFT;
404 }
405 
406 
407 /*
408  * Walk the guest pagetables, after the manner of a hardware walker.
409  *
410  * Inputs: a vcpu, a virtual address, a walk_t to fill, a
411  *         pointer to a pagefault code, the MFN of the guest's
412  *         top-level pagetable, and a mapping of the
413  *         guest's top-level pagetable.
414  *
415  * We walk the vcpu's guest pagetables, filling the walk_t with what we
416  * see and adding any Accessed and Dirty bits that are needed in the
417  * guest entries.  Using the pagefault code, we check the permissions as
418  * we go.  For the purposes of reading pagetables we treat all non-RAM
419  * memory as contining zeroes.
420  *
421  * Returns a boolean indicating success or failure.  walk_t.pfec contains
422  * the accumulated error code on failure.
423  */
424 
425 /* Macro-fu so you can call guest_walk_tables() and get the right one. */
426 #define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels
427 #define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l)
428 #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
429 
430 bool
431 guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m,
432                   unsigned long va, walk_t *gw, uint32_t pfec,
433                   gfn_t top_gfn, mfn_t top_mfn, void *top_map);
434 
435 /* Pretty-print the contents of a guest-walk */
print_gw(const walk_t * gw)436 static inline void print_gw(const walk_t *gw)
437 {
438     gprintk(XENLOG_INFO, "GUEST WALK TO %p\n", _p(gw->va));
439 #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
440 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
441     gprintk(XENLOG_INFO, "   l4e=%" PRI_gpte " l4mfn=%" PRI_mfn "\n",
442             gw->l4e.l4, mfn_x(gw->l4mfn));
443     gprintk(XENLOG_INFO, "   l3e=%" PRI_gpte " l3mfn=%" PRI_mfn "\n",
444             gw->l3e.l3, mfn_x(gw->l3mfn));
445 #else  /* PAE only... */
446     gprintk(XENLOG_INFO, "   l3e=%" PRI_gpte "\n", gw->l3e.l3);
447 #endif /* PAE or 64... */
448 #endif /* All levels... */
449     gprintk(XENLOG_INFO, "   l2e=%" PRI_gpte " l2mfn=%" PRI_mfn "\n",
450             gw->l2e.l2, mfn_x(gw->l2mfn));
451 #if GUEST_PAGING_LEVELS == 2
452     gprintk(XENLOG_INFO, "  el1e=%08" PRIx64 " l1mfn=%" PRI_mfn "\n",
453             gw->el1e, mfn_x(gw->l1mfn));
454 #else
455     gprintk(XENLOG_INFO, "   l1e=%" PRI_gpte " l1mfn=%" PRI_mfn "\n",
456             gw->l1e.l1, mfn_x(gw->l1mfn));
457 #endif
458     gprintk(XENLOG_INFO, "   pfec=%02x[%c%c%c%c%c%c]\n", gw->pfec,
459             gw->pfec & PFEC_prot_key     ? 'K' : '-',
460             gw->pfec & PFEC_insn_fetch   ? 'I' : 'd',
461             gw->pfec & PFEC_reserved_bit ? 'R' : '-',
462             gw->pfec & PFEC_user_mode    ? 'U' : 's',
463             gw->pfec & PFEC_write_access ? 'W' : 'r',
464             gw->pfec & PFEC_page_present ? 'P' : '-'
465         );
466 }
467 
468 #endif /* _XEN_ASM_GUEST_PT_H */
469