1 /*
2  * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
3  */
4 
5 #include <xen/init.h>
6 #include <xen/kernel.h>
7 #include <xen/mm.h>
8 #include <xen/rcupdate.h>
9 #include <xen/spinlock.h>
10 #include <xen/virtual_region.h>
11 
12 static struct virtual_region core = {
13     .list = LIST_HEAD_INIT(core.list),
14     .start = _stext,
15     .end = _etext,
16 };
17 
18 /* Becomes irrelevant when __init sections are cleared. */
19 static struct virtual_region core_init __initdata = {
20     .list = LIST_HEAD_INIT(core_init.list),
21     .start = _sinittext,
22     .end = _einittext,
23 };
24 
25 /*
26  * RCU locking. Additions are done either at startup (when there is only
27  * one CPU) or when all CPUs are running without IRQs.
28  *
29  * Deletions are bit tricky. We do it when Live Patch (all CPUs running
30  * without IRQs) or during bootup (when clearing the init).
31  *
32  * Hence we use list_del_rcu (which sports an memory fence) and a spinlock
33  * on deletion.
34  *
35  * All readers of virtual_region_list MUST use list_for_each_entry_rcu.
36  */
37 static LIST_HEAD(virtual_region_list);
38 static DEFINE_SPINLOCK(virtual_region_lock);
39 static DEFINE_RCU_READ_LOCK(rcu_virtual_region_lock);
40 
find_text_region(unsigned long addr)41 const struct virtual_region *find_text_region(unsigned long addr)
42 {
43     const struct virtual_region *region;
44 
45     rcu_read_lock(&rcu_virtual_region_lock);
46     list_for_each_entry_rcu( region, &virtual_region_list, list )
47     {
48         if ( (void *)addr >= region->start && (void *)addr < region->end )
49         {
50             rcu_read_unlock(&rcu_virtual_region_lock);
51             return region;
52         }
53     }
54     rcu_read_unlock(&rcu_virtual_region_lock);
55 
56     return NULL;
57 }
58 
register_virtual_region(struct virtual_region * r)59 void register_virtual_region(struct virtual_region *r)
60 {
61     ASSERT(!local_irq_is_enabled());
62 
63     list_add_tail_rcu(&r->list, &virtual_region_list);
64 }
65 
remove_virtual_region(struct virtual_region * r)66 static void remove_virtual_region(struct virtual_region *r)
67 {
68     unsigned long flags;
69 
70     spin_lock_irqsave(&virtual_region_lock, flags);
71     list_del_rcu(&r->list);
72     spin_unlock_irqrestore(&virtual_region_lock, flags);
73     /*
74      * We do not need to invoke call_rcu.
75      *
76      * This is due to the fact that on the deletion we have made sure
77      * to use spinlocks (to guard against somebody else calling
78      * unregister_virtual_region) and list_deletion spiced with
79      * memory barrier.
80      *
81      * That protects us from corrupting the list as the readers all
82      * use list_for_each_entry_rcu which is safe against concurrent
83      * deletions.
84      */
85 }
86 
unregister_virtual_region(struct virtual_region * r)87 void unregister_virtual_region(struct virtual_region *r)
88 {
89     /* Expected to be called from Live Patch - which has IRQs disabled. */
90     ASSERT(!local_irq_is_enabled());
91 
92     remove_virtual_region(r);
93 }
94 
95 #if defined(CONFIG_LIVEPATCH) && defined(CONFIG_XEN_SHSTK)
reset_virtual_region_perms(void)96 void reset_virtual_region_perms(void)
97 {
98     const struct virtual_region *region;
99 
100     rcu_read_lock(&rcu_virtual_region_lock);
101     list_for_each_entry_rcu( region, &virtual_region_list, list )
102         modify_xen_mappings((unsigned long)region->start,
103                             ROUNDUP((unsigned long)region->end, PAGE_SIZE),
104                             PAGE_HYPERVISOR_RX);
105     rcu_read_unlock(&rcu_virtual_region_lock);
106 }
107 #endif
108 
unregister_init_virtual_region(void)109 void __init unregister_init_virtual_region(void)
110 {
111     BUG_ON(system_state != SYS_STATE_active);
112 
113     remove_virtual_region(&core_init);
114 }
115 
setup_virtual_regions(const struct exception_table_entry * start,const struct exception_table_entry * end)116 void __init setup_virtual_regions(const struct exception_table_entry *start,
117                                   const struct exception_table_entry *end)
118 {
119     size_t sz;
120     unsigned int i;
121     static const struct bug_frame *const __initconstrel bug_frames[] = {
122         __start_bug_frames,
123         __stop_bug_frames_0,
124         __stop_bug_frames_1,
125         __stop_bug_frames_2,
126 #ifdef CONFIG_X86
127         __stop_bug_frames_3,
128 #endif
129         NULL
130     };
131 
132     for ( i = 1; bug_frames[i]; i++ )
133     {
134         const struct bug_frame *s;
135 
136         s = bug_frames[i - 1];
137         sz = bug_frames[i] - s;
138 
139         core.frame[i - 1].n_bugs = sz;
140         core.frame[i - 1].bugs = s;
141 
142         core_init.frame[i - 1].n_bugs = sz;
143         core_init.frame[i - 1].bugs = s;
144     }
145 
146     core_init.ex = core.ex = start;
147     core_init.ex_end = core.ex_end = end;
148 
149     register_virtual_region(&core_init);
150     register_virtual_region(&core);
151 }
152 
153 /*
154  * Local variables:
155  * mode: C
156  * c-file-style: "BSD"
157  * c-basic-offset: 4
158  * tab-width: 4
159  * indent-tabs-mode: nil
160  * End:
161  */
162