1 /*
2 * Copyright (C) 2009, Mukesh Rathor, Oracle Corp. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <xen/sched.h>
18 #include <xen/compile.h>
19 #include <xen/mm.h>
20 #include <xen/domain_page.h>
21 #include <xen/guest_access.h>
22 #include <asm/debugger.h>
23 #include <asm/p2m.h>
24
25 typedef unsigned long dbgva_t;
26 typedef unsigned char dbgbyte_t;
27
28 /* Returns: mfn for the given (hvm guest) vaddr */
29 static mfn_t
dbg_hvm_va2mfn(dbgva_t vaddr,struct domain * dp,int toaddr,gfn_t * gfn)30 dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn)
31 {
32 mfn_t mfn;
33 uint32_t pfec = PFEC_page_present;
34 p2m_type_t gfntype;
35
36 *gfn = _gfn(paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec));
37 if ( gfn_eq(*gfn, INVALID_GFN) )
38 return INVALID_MFN;
39
40 mfn = get_gfn(dp, gfn_x(*gfn), &gfntype);
41 if ( p2m_is_readonly(gfntype) && toaddr )
42 mfn = INVALID_MFN;
43
44 if ( mfn_eq(mfn, INVALID_MFN) )
45 {
46 put_gfn(dp, gfn_x(*gfn));
47 *gfn = INVALID_GFN;
48 }
49
50 return mfn;
51 }
52
53 /*
54 * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional.
55 * This to assist debug of modules in the guest. The kernel address
56 * space seems is always mapped, but modules are not necessarily
57 * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0.
58 * Modules should always be addressible if we use cr3 from init_mm.
59 * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to
60 * do 2 level lookups.
61 *
62 * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e
63 * mode.
64 * Returns: mfn for the given (pv guest) vaddr
65 */
66 static mfn_t
dbg_pv_va2mfn(dbgva_t vaddr,struct domain * dp,uint64_t pgd3val)67 dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val)
68 {
69 l4_pgentry_t l4e, *l4t;
70 l3_pgentry_t l3e, *l3t;
71 l2_pgentry_t l2e, *l2t;
72 l1_pgentry_t l1e, *l1t;
73 unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3);
74 mfn_t mfn = maddr_to_mfn(cr3_pa(cr3));
75
76 if ( pgd3val == 0 )
77 {
78 l4t = map_domain_page(mfn);
79 l4e = l4t[l4_table_offset(vaddr)];
80 unmap_domain_page(l4t);
81 mfn = l4e_get_mfn(l4e);
82 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
83 return INVALID_MFN;
84
85 l3t = map_domain_page(mfn);
86 l3e = l3t[l3_table_offset(vaddr)];
87 unmap_domain_page(l3t);
88 mfn = l3e_get_mfn(l3e);
89 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ||
90 (l3e_get_flags(l3e) & _PAGE_PSE) )
91 return INVALID_MFN;
92 }
93
94 l2t = map_domain_page(mfn);
95 l2e = l2t[l2_table_offset(vaddr)];
96 unmap_domain_page(l2t);
97 mfn = l2e_get_mfn(l2e);
98 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
99 (l2e_get_flags(l2e) & _PAGE_PSE) )
100 return INVALID_MFN;
101
102 l1t = map_domain_page(mfn);
103 l1e = l1t[l1_table_offset(vaddr)];
104 unmap_domain_page(l1t);
105 mfn = l1e_get_mfn(l1e);
106
107 return mfn_valid(mfn) ? mfn : INVALID_MFN;
108 }
109
110 /* Returns: number of bytes remaining to be copied */
dbg_rw_guest_mem(struct domain * dp,void * __user gaddr,void * __user buf,unsigned int len,bool toaddr,uint64_t pgd3)111 static unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
112 void * __user buf, unsigned int len,
113 bool toaddr, uint64_t pgd3)
114 {
115 while ( len > 0 )
116 {
117 char *va;
118 unsigned long addr = (unsigned long)gaddr;
119 mfn_t mfn;
120 gfn_t gfn = INVALID_GFN;
121 unsigned long pagecnt;
122
123 pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
124
125 mfn = (is_hvm_domain(dp)
126 ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
127 : dbg_pv_va2mfn(addr, dp, pgd3));
128 if ( mfn_eq(mfn, INVALID_MFN) )
129 break;
130
131 va = map_domain_page(mfn);
132 va = va + (addr & (PAGE_SIZE-1));
133
134 if ( toaddr )
135 {
136 copy_from_user(va, buf, pagecnt); /* va = buf */
137 paging_mark_dirty(dp, mfn);
138 }
139 else
140 {
141 copy_to_user(buf, va, pagecnt); /* buf = va */
142 }
143
144 unmap_domain_page(va);
145 if ( !gfn_eq(gfn, INVALID_GFN) )
146 put_gfn(dp, gfn_x(gfn));
147
148 addr += pagecnt;
149 buf += pagecnt;
150 len -= pagecnt;
151 }
152
153 return len;
154 }
155
156 /*
157 * addr is guest addr
158 * buf is debugger buffer.
159 * if toaddr, then addr = buf (write to addr), else buf = addr (rd from guest)
160 * pgd3: value of init_mm.pgd[3] in guest. see above.
161 * Returns: number of bytes remaining to be copied.
162 */
dbg_rw_mem(void * __user addr,void * __user buf,unsigned int len,domid_t domid,bool toaddr,uint64_t pgd3)163 unsigned int dbg_rw_mem(void * __user addr, void * __user buf,
164 unsigned int len, domid_t domid, bool toaddr,
165 uint64_t pgd3)
166 {
167 struct domain *d = get_domain_by_id(domid);
168
169 if ( d )
170 {
171 if ( !d->is_dying )
172 len = dbg_rw_guest_mem(d, addr, buf, len, toaddr, pgd3);
173 put_domain(d);
174 }
175
176 return len;
177 }
178
179 /*
180 * Local variables:
181 * mode: C
182 * c-file-style: "BSD"
183 * c-basic-offset: 4
184 * indent-tabs-mode: nil
185 * End:
186 */
187