1 /*
2  * arch/arm/mem_access.c
3  *
4  * Architecture-specific mem_access handling routines
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public
8  * License v2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public
16  * License along with this program; If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <xen/mem_access.h>
20 #include <xen/monitor.h>
21 #include <xen/sched.h>
22 #include <xen/vm_event.h>
23 #include <public/vm_event.h>
24 #include <asm/event.h>
25 #include <asm/guest_walk.h>
26 
__p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access)27 static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
28                                 xenmem_access_t *access)
29 {
30     struct p2m_domain *p2m = p2m_get_hostp2m(d);
31     void *i;
32     unsigned int index;
33 
34     static const xenmem_access_t memaccess[] = {
35 #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
36             ACCESS(n),
37             ACCESS(r),
38             ACCESS(w),
39             ACCESS(rw),
40             ACCESS(x),
41             ACCESS(rx),
42             ACCESS(wx),
43             ACCESS(rwx),
44             ACCESS(rx2rw),
45             ACCESS(n2rwx),
46 #undef ACCESS
47     };
48 
49     ASSERT(p2m_is_locked(p2m));
50     *access = memaccess[p2m->default_access];
51 
52     /* If no setting was ever set, just return rwx. */
53     if ( !p2m->mem_access_enabled )
54     {
55         *access = XENMEM_access_rwx;
56         return 0;
57     }
58 
59     /* If request to get default access. */
60     if ( gfn_eq(gfn, INVALID_GFN) )
61     {
62         *access = memaccess[p2m->default_access];
63         return 0;
64     }
65 
66     i = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
67 
68     if ( !i )
69     {
70         /*
71          * No setting was found in the Radix tree. Check if the
72          * entry exists in the page-tables.
73          */
74         mfn_t mfn = p2m_get_entry(p2m, gfn, NULL, NULL, NULL, NULL);
75 
76         if ( mfn_eq(mfn, INVALID_MFN) )
77             return -ESRCH;
78 
79         /* If entry exists then its rwx. */
80         *access = XENMEM_access_rwx;
81     }
82     else
83     {
84         /* Setting was found in the Radix tree. */
85         index = radix_tree_ptr_to_int(i);
86         if ( index >= ARRAY_SIZE(memaccess) )
87             return -ERANGE;
88 
89         *access = memaccess[index];
90     }
91 
92     return 0;
93 }
94 
95 /*
96  * If mem_access is in use it might have been the reason why get_page_from_gva
97  * failed to fetch the page, as it uses the MMU for the permission checking.
98  * Only in these cases we do a software-based type check and fetch the page if
99  * we indeed found a conflicting mem_access setting.
100  */
101 struct page_info*
p2m_mem_access_check_and_get_page(vaddr_t gva,unsigned long flag,const struct vcpu * v)102 p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
103                                   const struct vcpu *v)
104 {
105     long rc;
106     unsigned int perms;
107     paddr_t ipa;
108     gfn_t gfn;
109     mfn_t mfn;
110     xenmem_access_t xma;
111     p2m_type_t t;
112     struct page_info *page = NULL;
113     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
114 
115     rc = gva_to_ipa(gva, &ipa, flag);
116 
117     /*
118      * In case mem_access is active, hardware-based gva_to_ipa translation
119      * might fail. Since gva_to_ipa uses the guest's translation tables, access
120      * to which might be restricted by the active VTTBR, we perform a gva to
121      * ipa translation in software.
122      */
123     if ( rc < 0 )
124     {
125         /*
126          * The software gva to ipa translation can still fail, e.g., if the gva
127          * is not mapped.
128          */
129         if ( !guest_walk_tables(v, gva, &ipa, &perms) )
130             return NULL;
131 
132         /*
133          * Check permissions that are assumed by the caller. For instance in
134          * case of guestcopy, the caller assumes that the translated page can
135          * be accessed with requested permissions. If this is not the case, we
136          * should fail.
137          *
138          * Please note that we do not check for the GV2M_EXEC permission. Yet,
139          * since the hardware-based translation through gva_to_ipa does not
140          * test for execute permissions this check can be left out.
141          */
142         if ( (flag & GV2M_WRITE) && !(perms & GV2M_WRITE) )
143             return NULL;
144     }
145 
146     gfn = gaddr_to_gfn(ipa);
147 
148     p2m_read_lock(p2m);
149 
150     /*
151      * We do this first as this is faster in the default case when no
152      * permission is set on the page.
153      */
154     rc = __p2m_get_mem_access(v->domain, gfn, &xma);
155     if ( rc < 0 )
156         goto err;
157 
158     /* Let's check if mem_access limited the access. */
159     switch ( xma )
160     {
161     default:
162     case XENMEM_access_rwx:
163     case XENMEM_access_rw:
164         /*
165          * If mem_access contains no rw perm restrictions at all then the original
166          * fault was correct.
167          */
168         goto err;
169     case XENMEM_access_n2rwx:
170     case XENMEM_access_n:
171     case XENMEM_access_x:
172         /*
173          * If no r/w is permitted by mem_access, this was a fault caused by mem_access.
174          */
175         break;
176     case XENMEM_access_wx:
177     case XENMEM_access_w:
178         /*
179          * If this was a read then it was because of mem_access, but if it was
180          * a write then the original get_page_from_gva fault was correct.
181          */
182         if ( flag == GV2M_READ )
183             break;
184         else
185             goto err;
186     case XENMEM_access_rx2rw:
187     case XENMEM_access_rx:
188     case XENMEM_access_r:
189         /*
190          * If this was a write then it was because of mem_access, but if it was
191          * a read then the original get_page_from_gva fault was correct.
192          */
193         if ( flag == GV2M_WRITE )
194             break;
195         else
196             goto err;
197     }
198 
199     /*
200      * We had a mem_access permission limiting the access, but the page type
201      * could also be limiting, so we need to check that as well.
202      */
203     mfn = p2m_get_entry(p2m, gfn, &t, NULL, NULL, NULL);
204     if ( mfn_eq(mfn, INVALID_MFN) )
205         goto err;
206 
207     if ( !mfn_valid(mfn) )
208         goto err;
209 
210     /*
211      * Base type doesn't allow r/w
212      */
213     if ( t != p2m_ram_rw )
214         goto err;
215 
216     page = mfn_to_page(mfn);
217 
218     if ( unlikely(!get_page(page, v->domain)) )
219         page = NULL;
220 
221 err:
222     p2m_read_unlock(p2m);
223 
224     return page;
225 }
226 
p2m_mem_access_check(paddr_t gpa,vaddr_t gla,const struct npfec npfec)227 bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
228 {
229     int rc;
230     bool violation;
231     xenmem_access_t xma;
232     vm_event_request_t *req;
233     struct vcpu *v = current;
234     struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
235 
236     /* Mem_access is not in use. */
237     if ( !p2m->mem_access_enabled )
238         return true;
239 
240     rc = p2m_get_mem_access(v->domain, gaddr_to_gfn(gpa), &xma, 0);
241     if ( rc )
242         return true;
243 
244     /* Now check for mem_access violation. */
245     switch ( xma )
246     {
247     case XENMEM_access_rwx:
248         violation = false;
249         break;
250     case XENMEM_access_rw:
251         violation = npfec.insn_fetch;
252         break;
253     case XENMEM_access_wx:
254         violation = npfec.read_access;
255         break;
256     case XENMEM_access_rx:
257     case XENMEM_access_rx2rw:
258         violation = npfec.write_access;
259         break;
260     case XENMEM_access_x:
261         violation = npfec.read_access || npfec.write_access;
262         break;
263     case XENMEM_access_w:
264         violation = npfec.read_access || npfec.insn_fetch;
265         break;
266     case XENMEM_access_r:
267         violation = npfec.write_access || npfec.insn_fetch;
268         break;
269     default:
270     case XENMEM_access_n:
271     case XENMEM_access_n2rwx:
272         violation = true;
273         break;
274     }
275 
276     if ( !violation )
277         return true;
278 
279     /* First, handle rx2rw and n2rwx conversion automatically. */
280     if ( npfec.write_access && xma == XENMEM_access_rx2rw )
281     {
282         rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
283                                 0, ~0, XENMEM_access_rw, 0);
284         return false;
285     }
286     else if ( xma == XENMEM_access_n2rwx )
287     {
288         rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
289                                 0, ~0, XENMEM_access_rwx, 0);
290     }
291 
292     /* Otherwise, check if there is a vm_event monitor subscriber */
293     if ( !vm_event_check_ring(v->domain->vm_event_monitor) )
294     {
295         /* No listener */
296         if ( p2m->access_required )
297         {
298             gdprintk(XENLOG_INFO, "Memory access permissions failure, "
299                                   "no vm_event listener VCPU %d, dom %d\n",
300                                   v->vcpu_id, v->domain->domain_id);
301             domain_crash(v->domain);
302         }
303         else
304         {
305             /* n2rwx was already handled */
306             if ( xma != XENMEM_access_n2rwx )
307             {
308                 /* A listener is not required, so clear the access
309                  * restrictions. */
310                 rc = p2m_set_mem_access(v->domain, gaddr_to_gfn(gpa), 1,
311                                         0, ~0, XENMEM_access_rwx, 0);
312             }
313         }
314 
315         /* No need to reinject */
316         return false;
317     }
318 
319     req = xzalloc(vm_event_request_t);
320     if ( req )
321     {
322         req->reason = VM_EVENT_REASON_MEM_ACCESS;
323 
324         /* Send request to mem access subscriber */
325         req->u.mem_access.gfn = gpa >> PAGE_SHIFT;
326         req->u.mem_access.offset =  gpa & ((1 << PAGE_SHIFT) - 1);
327         if ( npfec.gla_valid )
328         {
329             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
330             req->u.mem_access.gla = gla;
331 
332             if ( npfec.kind == npfec_kind_with_gla )
333                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
334             else if ( npfec.kind == npfec_kind_in_gpt )
335                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
336         }
337         req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
338         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
339         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
340 
341         if ( monitor_traps(v, (xma != XENMEM_access_n2rwx), req) < 0 )
342             domain_crash(v->domain);
343 
344         xfree(req);
345     }
346 
347     return false;
348 }
349 
350 /*
351  * Set access type for a region of pfns.
352  * If gfn == INVALID_GFN, sets the default access type.
353  */
p2m_set_mem_access(struct domain * d,gfn_t gfn,uint32_t nr,uint32_t start,uint32_t mask,xenmem_access_t access,unsigned int altp2m_idx)354 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
355                         uint32_t start, uint32_t mask, xenmem_access_t access,
356                         unsigned int altp2m_idx)
357 {
358     struct p2m_domain *p2m = p2m_get_hostp2m(d);
359     p2m_access_t a;
360     unsigned int order;
361     long rc = 0;
362 
363     static const p2m_access_t memaccess[] = {
364 #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
365         ACCESS(n),
366         ACCESS(r),
367         ACCESS(w),
368         ACCESS(rw),
369         ACCESS(x),
370         ACCESS(rx),
371         ACCESS(wx),
372         ACCESS(rwx),
373         ACCESS(rx2rw),
374         ACCESS(n2rwx),
375 #undef ACCESS
376     };
377 
378     switch ( access )
379     {
380     case 0 ... ARRAY_SIZE(memaccess) - 1:
381         a = memaccess[access];
382         break;
383     case XENMEM_access_default:
384         a = p2m->default_access;
385         break;
386     default:
387         return -EINVAL;
388     }
389 
390     /*
391      * Flip mem_access_enabled to true when a permission is set, as to prevent
392      * allocating or inserting super-pages.
393      */
394     p2m->mem_access_enabled = true;
395 
396     /* If request to set default access. */
397     if ( gfn_eq(gfn, INVALID_GFN) )
398     {
399         p2m->default_access = a;
400         return 0;
401     }
402 
403     p2m_write_lock(p2m);
404 
405     for ( gfn = gfn_add(gfn, start); nr > start;
406           gfn = gfn_next_boundary(gfn, order) )
407     {
408         p2m_type_t t;
409         mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order, NULL);
410 
411 
412         if ( !mfn_eq(mfn, INVALID_MFN) )
413         {
414             order = 0;
415             rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a);
416             if ( rc )
417                 break;
418         }
419 
420         start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn);
421         /* Check for continuation if it is not the last iteration */
422         if ( nr > start && !(start & mask) && hypercall_preempt_check() )
423         {
424             rc = start;
425             break;
426         }
427     }
428 
429     p2m_write_unlock(p2m);
430 
431     return rc;
432 }
433 
p2m_set_mem_access_multi(struct domain * d,const XEN_GUEST_HANDLE (const_uint64)pfn_list,const XEN_GUEST_HANDLE (const_uint8)access_list,uint32_t nr,uint32_t start,uint32_t mask,unsigned int altp2m_idx)434 long p2m_set_mem_access_multi(struct domain *d,
435                               const XEN_GUEST_HANDLE(const_uint64) pfn_list,
436                               const XEN_GUEST_HANDLE(const_uint8) access_list,
437                               uint32_t nr, uint32_t start, uint32_t mask,
438                               unsigned int altp2m_idx)
439 {
440     /* Not yet implemented on ARM. */
441     return -EOPNOTSUPP;
442 }
443 
p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access,unsigned int altp2m_idx)444 int p2m_get_mem_access(struct domain *d, gfn_t gfn,
445                        xenmem_access_t *access, unsigned int altp2m_idx)
446 {
447     int ret;
448     struct p2m_domain *p2m = p2m_get_hostp2m(d);
449 
450     /* altp2m is not yet implemented on Arm. The altp2m_idx should be 0. */
451     ASSERT(altp2m_idx == 0);
452 
453     p2m_read_lock(p2m);
454     ret = __p2m_get_mem_access(d, gfn, access);
455     p2m_read_unlock(p2m);
456 
457     return ret;
458 }
459 
arch_p2m_set_access_required(struct domain * d,bool access_required)460 void arch_p2m_set_access_required(struct domain *d, bool access_required)
461 {
462     ASSERT(atomic_read(&d->pause_count));
463     p2m_get_hostp2m(d)->access_required = access_required;
464 }
465 
466 /*
467  * Local variables:
468  * mode: C
469  * c-file-style: "BSD"
470  * c-basic-offset: 4
471  * indent-tabs-mode: nil
472  * End:
473  */
474