1 /******************************************************************************
2  * arch/x86/mm/mem_access.c
3  *
4  * Parts of this code are Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
5  * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
6  * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
7  * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
8  * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #include <xen/guest_access.h> /* copy_from_guest() */
25 #include <xen/mem_access.h>
26 #include <xen/nospec.h>
27 #include <xen/vm_event.h>
28 #include <xen/event.h>
29 #include <public/vm_event.h>
30 #include <asm/p2m.h>
31 #include <asm/altp2m.h>
32 #include <asm/hvm/emulate.h>
33 #include <asm/vm_event.h>
34 
35 #include "mm-locks.h"
36 
37 /*
38  * Get access type for a gfn.
39  * If gfn == INVALID_GFN, gets the default access type.
40  */
_p2m_get_mem_access(struct p2m_domain * p2m,gfn_t gfn,xenmem_access_t * access)41 static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn,
42                                xenmem_access_t *access)
43 {
44     p2m_type_t t;
45     p2m_access_t a;
46     mfn_t mfn;
47 
48     static const xenmem_access_t memaccess[] = {
49 #define ACCESS(ac) [p2m_access_##ac] = XENMEM_access_##ac
50             ACCESS(n),
51             ACCESS(r),
52             ACCESS(w),
53             ACCESS(rw),
54             ACCESS(x),
55             ACCESS(rx),
56             ACCESS(wx),
57             ACCESS(rwx),
58             ACCESS(rx2rw),
59             ACCESS(n2rwx),
60 #undef ACCESS
61     };
62 
63     /* If request to get default access. */
64     if ( gfn_eq(gfn, INVALID_GFN) )
65     {
66         *access = memaccess[p2m->default_access];
67         return 0;
68     }
69 
70     gfn_lock(p2m, gfn, 0);
71     mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL);
72     gfn_unlock(p2m, gfn, 0);
73 
74     if ( mfn_eq(mfn, INVALID_MFN) )
75         return -ESRCH;
76 
77     if ( (unsigned int)a >= ARRAY_SIZE(memaccess) )
78         return -ERANGE;
79 
80     *access =  memaccess[a];
81     return 0;
82 }
83 
p2m_mem_access_emulate_check(struct vcpu * v,const vm_event_response_t * rsp)84 bool p2m_mem_access_emulate_check(struct vcpu *v,
85                                   const vm_event_response_t *rsp)
86 {
87     xenmem_access_t access;
88     bool violation = true;
89     const struct vm_event_mem_access *data = &rsp->u.mem_access;
90     struct domain *d = v->domain;
91     struct p2m_domain *p2m = NULL;
92 
93     if ( altp2m_active(d) )
94         p2m = p2m_get_altp2m(v);
95     if ( !p2m )
96         p2m = p2m_get_hostp2m(d);
97 
98     if ( _p2m_get_mem_access(p2m, _gfn(data->gfn), &access) == 0 )
99     {
100         switch ( access )
101         {
102         case XENMEM_access_n:
103         case XENMEM_access_n2rwx:
104         default:
105             violation = data->flags & MEM_ACCESS_RWX;
106             break;
107 
108         case XENMEM_access_r:
109             violation = data->flags & MEM_ACCESS_WX;
110             break;
111 
112         case XENMEM_access_w:
113             violation = data->flags & MEM_ACCESS_RX;
114             break;
115 
116         case XENMEM_access_x:
117             violation = data->flags & MEM_ACCESS_RW;
118             break;
119 
120         case XENMEM_access_rx:
121         case XENMEM_access_rx2rw:
122             violation = data->flags & MEM_ACCESS_W;
123             break;
124 
125         case XENMEM_access_wx:
126             violation = data->flags & MEM_ACCESS_R;
127             break;
128 
129         case XENMEM_access_rw:
130             violation = data->flags & MEM_ACCESS_X;
131             break;
132 
133         case XENMEM_access_rwx:
134             violation = false;
135             break;
136         }
137     }
138 
139     return violation;
140 }
141 
142 #ifdef CONFIG_HVM
p2m_mem_access_check(paddr_t gpa,unsigned long gla,struct npfec npfec,vm_event_request_t ** req_ptr)143 bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
144                           struct npfec npfec,
145                           vm_event_request_t **req_ptr)
146 {
147     struct vcpu *v = current;
148     gfn_t gfn = gaddr_to_gfn(gpa);
149     struct domain *d = v->domain;
150     struct p2m_domain *p2m = NULL;
151     mfn_t mfn;
152     p2m_type_t p2mt;
153     p2m_access_t p2ma;
154     vm_event_request_t *req;
155     int rc;
156 
157     if ( altp2m_active(d) )
158         p2m = p2m_get_altp2m(v);
159     if ( !p2m )
160         p2m = p2m_get_hostp2m(d);
161 
162     /* First, handle rx2rw conversion automatically.
163      * These calls to p2m->set_entry() must succeed: we have the gfn
164      * locked and just did a successful get_entry(). */
165     gfn_lock(p2m, gfn, 0);
166     mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
167 
168     if ( npfec.write_access && p2ma == p2m_access_rx2rw )
169     {
170         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw, -1);
171         ASSERT(rc == 0);
172         gfn_unlock(p2m, gfn, 0);
173         return true;
174     }
175     else if ( p2ma == p2m_access_n2rwx )
176     {
177         ASSERT(npfec.write_access || npfec.read_access || npfec.insn_fetch);
178         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
179                             p2mt, p2m_access_rwx, -1);
180         ASSERT(rc == 0);
181     }
182     gfn_unlock(p2m, gfn, 0);
183 
184     /* Otherwise, check if there is a memory event listener, and send the message along */
185     if ( !vm_event_check_ring(d->vm_event_monitor) || !req_ptr )
186     {
187         /* No listener */
188         if ( p2m->access_required )
189         {
190             gdprintk(XENLOG_INFO, "Memory access permissions failure, "
191                                   "no vm_event listener VCPU %d, dom %d\n",
192                                   v->vcpu_id, d->domain_id);
193             domain_crash(v->domain);
194             return false;
195         }
196         else
197         {
198             gfn_lock(p2m, gfn, 0);
199             mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL);
200             if ( p2ma != p2m_access_n2rwx )
201             {
202                 /* A listener is not required, so clear the access
203                  * restrictions.  This set must succeed: we have the
204                  * gfn locked and just did a successful get_entry(). */
205                 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
206                                     p2mt, p2m_access_rwx, -1);
207                 ASSERT(rc == 0);
208             }
209             gfn_unlock(p2m, gfn, 0);
210             return true;
211         }
212     }
213 
214     /*
215      * Try to avoid sending a mem event. Suppress events caused by page-walks
216      * by emulating but still checking mem_access violations.
217      */
218     if ( vm_event_check_ring(d->vm_event_monitor) &&
219          d->arch.monitor.inguest_pagefault_disabled &&
220          npfec.kind == npfec_kind_in_gpt )
221     {
222         v->arch.vm_event->send_event = true;
223         hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC);
224         v->arch.vm_event->send_event = false;
225 
226         return true;
227     }
228 
229     *req_ptr = NULL;
230     req = xzalloc(vm_event_request_t);
231     if ( req )
232     {
233         *req_ptr = req;
234 
235         req->reason = VM_EVENT_REASON_MEM_ACCESS;
236         req->u.mem_access.gfn = gfn_x(gfn);
237         req->u.mem_access.offset = gpa & ((1 << PAGE_SHIFT) - 1);
238 
239         if ( npfec.gla_valid )
240         {
241             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
242             req->u.mem_access.gla = gla;
243         }
244 
245         switch ( npfec.kind )
246         {
247         case npfec_kind_with_gla:
248             req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
249             break;
250 
251         case npfec_kind_in_gpt:
252             req->u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT;
253             break;
254         }
255 
256         req->u.mem_access.flags |= npfec.read_access    ? MEM_ACCESS_R : 0;
257         req->u.mem_access.flags |= npfec.write_access   ? MEM_ACCESS_W : 0;
258         req->u.mem_access.flags |= npfec.insn_fetch     ? MEM_ACCESS_X : 0;
259     }
260 
261     /* Return whether vCPU pause is required (aka. sync event) */
262     return (p2ma != p2m_access_n2rwx);
263 }
264 
p2m_set_altp2m_mem_access(struct domain * d,struct p2m_domain * hp2m,struct p2m_domain * ap2m,p2m_access_t a,gfn_t gfn)265 int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
266                               struct p2m_domain *ap2m, p2m_access_t a,
267                               gfn_t gfn)
268 {
269     mfn_t mfn;
270     p2m_type_t t;
271     p2m_access_t old_a;
272     int rc;
273 
274     rc = altp2m_get_effective_entry(ap2m, gfn, &mfn, &t, &old_a,
275                                     AP2MGET_prepopulate);
276     if ( rc )
277         return rc;
278 
279     /*
280      * Inherit the old suppress #VE bit value if it is already set, or set it
281      * to 1 otherwise
282      */
283     return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
284 }
285 #endif
286 
set_mem_access(struct domain * d,struct p2m_domain * p2m,struct p2m_domain * ap2m,p2m_access_t a,gfn_t gfn)287 static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
288                           struct p2m_domain *ap2m, p2m_access_t a,
289                           gfn_t gfn)
290 {
291     int rc = 0;
292 
293 #ifdef CONFIG_HVM
294     if ( ap2m )
295     {
296         rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
297         /* If the corresponding mfn is invalid we will want to just skip it */
298         if ( rc == -ESRCH )
299             rc = 0;
300     }
301     else
302 #else
303     ASSERT(!ap2m);
304 #endif
305     {
306         p2m_access_t _a;
307         p2m_type_t t;
308         mfn_t mfn = __get_gfn_type_access(p2m, gfn_x(gfn), &t, &_a,
309                                           P2M_ALLOC, NULL, false);
310         rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
311     }
312 
313     return rc;
314 }
315 
xenmem_access_to_p2m_access(const struct p2m_domain * p2m,xenmem_access_t xaccess,p2m_access_t * paccess)316 bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
317                                  xenmem_access_t xaccess,
318                                  p2m_access_t *paccess)
319 {
320     static const p2m_access_t memaccess[] = {
321 #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
322         ACCESS(n),
323         ACCESS(r),
324         ACCESS(w),
325         ACCESS(rw),
326         ACCESS(x),
327         ACCESS(rx),
328         ACCESS(wx),
329         ACCESS(rwx),
330         ACCESS(rx2rw),
331         ACCESS(n2rwx),
332 #undef ACCESS
333     };
334 
335     switch ( xaccess )
336     {
337     case 0 ... ARRAY_SIZE(memaccess) - 1:
338         xaccess = array_index_nospec(xaccess, ARRAY_SIZE(memaccess));
339         *paccess = memaccess[xaccess];
340         break;
341     case XENMEM_access_default:
342         *paccess = p2m->default_access;
343         break;
344     default:
345         return false;
346     }
347 
348     return true;
349 }
350 
351 /*
352  * Set access type for a region of gfns.
353  * If gfn == INVALID_GFN, sets the default access type.
354  */
p2m_set_mem_access(struct domain * d,gfn_t gfn,uint32_t nr,uint32_t start,uint32_t mask,xenmem_access_t access,unsigned int altp2m_idx)355 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
356                         uint32_t start, uint32_t mask, xenmem_access_t access,
357                         unsigned int altp2m_idx)
358 {
359     struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
360     p2m_access_t a;
361     unsigned long gfn_l;
362     long rc = 0;
363 
364     /* altp2m view 0 is treated as the hostp2m */
365 #ifdef CONFIG_HVM
366     if ( altp2m_idx )
367     {
368         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
369              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
370              mfn_x(INVALID_MFN) )
371             return -EINVAL;
372 
373         ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
374     }
375 #else
376     ASSERT(!altp2m_idx);
377 #endif
378 
379     if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
380         return -EINVAL;
381 
382     /* If request to set default access. */
383     if ( gfn_eq(gfn, INVALID_GFN) )
384     {
385         p2m->default_access = a;
386         return 0;
387     }
388 
389     p2m_lock(p2m);
390     if ( ap2m )
391         p2m_lock(ap2m);
392 
393     for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
394     {
395         rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
396 
397         if ( rc )
398             break;
399 
400         /* Check for continuation if it's not the last iteration. */
401         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
402         {
403             rc = start;
404             break;
405         }
406     }
407 
408     if ( ap2m )
409         p2m_unlock(ap2m);
410     p2m_unlock(p2m);
411 
412     return rc;
413 }
414 
p2m_set_mem_access_multi(struct domain * d,const XEN_GUEST_HANDLE (const_uint64)pfn_list,const XEN_GUEST_HANDLE (const_uint8)access_list,uint32_t nr,uint32_t start,uint32_t mask,unsigned int altp2m_idx)415 long p2m_set_mem_access_multi(struct domain *d,
416                               const XEN_GUEST_HANDLE(const_uint64) pfn_list,
417                               const XEN_GUEST_HANDLE(const_uint8) access_list,
418                               uint32_t nr, uint32_t start, uint32_t mask,
419                               unsigned int altp2m_idx)
420 {
421     struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
422     long rc = 0;
423 
424     /* altp2m view 0 is treated as the hostp2m */
425 #ifdef CONFIG_HVM
426     if ( altp2m_idx )
427     {
428         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
429              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
430              mfn_x(INVALID_MFN) )
431             return -EINVAL;
432 
433         ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
434     }
435 #else
436     ASSERT(!altp2m_idx);
437 #endif
438 
439     p2m_lock(p2m);
440     if ( ap2m )
441         p2m_lock(ap2m);
442 
443     while ( start < nr )
444     {
445         p2m_access_t a;
446         uint8_t access;
447         uint64_t gfn_l;
448 
449         if ( copy_from_guest_offset(&gfn_l, pfn_list, start, 1) ||
450              copy_from_guest_offset(&access, access_list, start, 1) )
451         {
452             rc = -EFAULT;
453             break;
454         }
455 
456         if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
457         {
458             rc = -EINVAL;
459             break;
460         }
461 
462         rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
463 
464         if ( rc )
465             break;
466 
467         /* Check for continuation if it's not the last iteration. */
468         if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
469         {
470             rc = start;
471             break;
472         }
473     }
474 
475     if ( ap2m )
476         p2m_unlock(ap2m);
477     p2m_unlock(p2m);
478 
479     return rc;
480 }
481 
p2m_get_mem_access(struct domain * d,gfn_t gfn,xenmem_access_t * access,unsigned int altp2m_idx)482 int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
483                        unsigned int altp2m_idx)
484 {
485     struct p2m_domain *p2m = p2m_get_hostp2m(d);
486 
487 #ifdef CONFIG_HVM
488     if ( !altp2m_active(d) )
489     {
490         if ( altp2m_idx )
491             return -EINVAL;
492     }
493     else if ( altp2m_idx ) /* altp2m view 0 is treated as the hostp2m */
494     {
495         if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
496              d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
497              mfn_x(INVALID_MFN) )
498             return -EINVAL;
499 
500         p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
501     }
502 #else
503     ASSERT(!altp2m_idx);
504 #endif
505 
506     return _p2m_get_mem_access(p2m, gfn, access);
507 }
508 
arch_p2m_set_access_required(struct domain * d,bool access_required)509 void arch_p2m_set_access_required(struct domain *d, bool access_required)
510 {
511     ASSERT(atomic_read(&d->pause_count));
512 
513     p2m_get_hostp2m(d)->access_required = access_required;
514 
515 #ifdef CONFIG_HVM
516     if ( altp2m_active(d) )
517     {
518         unsigned int i;
519         for ( i = 0; i < MAX_ALTP2M; i++ )
520         {
521             struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
522 
523             if ( p2m )
524                 p2m->access_required = access_required;
525         }
526     }
527 #endif
528 }
529 
p2m_mem_access_sanity_check(const struct domain * d)530 bool p2m_mem_access_sanity_check(const struct domain *d)
531 {
532     return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
533 }
534 
535 /*
536  * Local variables:
537  * mode: C
538  * c-file-style: "BSD"
539  * c-basic-offset: 4
540  * indent-tabs-mode: nil
541  * End:
542  */
543