1 /******************************************************************************
2  * vm_event.h
3  *
4  * Common interface for memory event support.
5  *
6  * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 
23 #ifndef __VM_EVENT_H__
24 #define __VM_EVENT_H__
25 
26 #include <xen/sched.h>
27 #include <public/vm_event.h>
28 
29 struct vm_event_domain
30 {
31     spinlock_t lock;
32     /* The ring has 64 entries */
33     unsigned char foreign_producers;
34     unsigned char target_producers;
35     /* shared ring page */
36     void *ring_page;
37     struct page_info *ring_pg_struct;
38     /* front-end ring */
39     vm_event_front_ring_t front_ring;
40     /* event channel port (vcpu0 only) */
41     int xen_port;
42     /* vm_event bit for vcpu->pause_flags */
43     int pause_flag;
44     /* list of vcpus waiting for room in the ring */
45     struct waitqueue_head wq;
46     /* the number of vCPUs blocked */
47     unsigned int blocked;
48     /* The last vcpu woken up */
49     unsigned int last_vcpu_wake_up;
50 };
51 
52 /* Clean up on domain destruction */
53 void vm_event_cleanup(struct domain *d);
54 
55 /* Returns whether a ring has been set up */
56 bool vm_event_check_ring(struct vm_event_domain *ved);
57 
58 /* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
59  * available space and the caller is a foreign domain. If the guest itself
60  * is the caller, -EBUSY is avoided by sleeping on a wait queue to ensure
61  * that the ring does not lose future events.
62  *
63  * However, the allow_sleep flag can be set to false in cases in which it is ok
64  * to lose future events, and thus -EBUSY can be returned to guest vcpus
65  * (handle with care!).
66  *
67  * In general, you must follow a claim_slot() call with either put_request() or
68  * cancel_slot(), both of which are guaranteed to
69  * succeed.
70  */
71 int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
72                           bool allow_sleep);
vm_event_claim_slot(struct domain * d,struct vm_event_domain * ved)73 static inline int vm_event_claim_slot(struct domain *d,
74                                       struct vm_event_domain *ved)
75 {
76     return __vm_event_claim_slot(d, ved, true);
77 }
78 
vm_event_claim_slot_nosleep(struct domain * d,struct vm_event_domain * ved)79 static inline int vm_event_claim_slot_nosleep(struct domain *d,
80                                               struct vm_event_domain *ved)
81 {
82     return __vm_event_claim_slot(d, ved, false);
83 }
84 
85 void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
86 
87 void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
88                           vm_event_request_t *req);
89 
90 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);
91 
92 void vm_event_vcpu_pause(struct vcpu *v);
93 void vm_event_vcpu_unpause(struct vcpu *v);
94 
95 void vm_event_fill_regs(vm_event_request_t *req);
96 void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
97 
98 void vm_event_monitor_next_interrupt(struct vcpu *v);
99 
100 #endif /* __VM_EVENT_H__ */
101 
102 /*
103  * Local variables:
104  * mode: C
105  * c-file-style: "BSD"
106  * c-basic-offset: 4
107  * indent-tabs-mode: nil
108  * End:
109  */
110