1 /*
2 * alternative runtime patching
3 * inspired by the x86 version
4 *
5 * Copyright (C) 2014-2016 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <xen/init.h>
21 #include <xen/types.h>
22 #include <xen/kernel.h>
23 #include <xen/mm.h>
24 #include <xen/vmap.h>
25 #include <xen/smp.h>
26 #include <xen/stop_machine.h>
27 #include <xen/virtual_region.h>
28 #include <asm/alternative.h>
29 #include <asm/atomic.h>
30 #include <asm/byteorder.h>
31 #include <asm/cpufeature.h>
32 #include <asm/insn.h>
33 #include <asm/page.h>
34
35 /* Override macros from asm/page.h to make them work with mfn_t */
36 #undef virt_to_mfn
37 #define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
38
39 extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
40
41 struct alt_region {
42 const struct alt_instr *begin;
43 const struct alt_instr *end;
44 };
45
46 /*
47 * Check if the target PC is within an alternative block.
48 */
branch_insn_requires_update(const struct alt_instr * alt,unsigned long pc)49 static bool branch_insn_requires_update(const struct alt_instr *alt,
50 unsigned long pc)
51 {
52 unsigned long replptr;
53
54 if ( is_active_kernel_text(pc) )
55 return true;
56
57 replptr = (unsigned long)ALT_REPL_PTR(alt);
58 if ( pc >= replptr && pc <= (replptr + alt->alt_len) )
59 return false;
60
61 /*
62 * Branching into *another* alternate sequence is doomed, and
63 * we're not even trying to fix it up.
64 */
65 BUG();
66 }
67
get_alt_insn(const struct alt_instr * alt,const u32 * insnptr,const u32 * altinsnptr)68 static u32 get_alt_insn(const struct alt_instr *alt,
69 const u32 *insnptr, const u32 *altinsnptr)
70 {
71 u32 insn;
72
73 insn = le32_to_cpu(*altinsnptr);
74
75 if ( insn_is_branch_imm(insn) )
76 {
77 s32 offset = insn_get_branch_offset(insn);
78 unsigned long target;
79
80 target = (unsigned long)altinsnptr + offset;
81
82 /*
83 * If we're branching inside the alternate sequence,
84 * do not rewrite the instruction, as it is already
85 * correct. Otherwise, generate the new instruction.
86 */
87 if ( branch_insn_requires_update(alt, target) )
88 {
89 offset = target - (unsigned long)insnptr;
90 insn = insn_set_branch_offset(insn, offset);
91 }
92 }
93
94 return insn;
95 }
96
patch_alternative(const struct alt_instr * alt,const uint32_t * origptr,uint32_t * updptr,int nr_inst)97 static void patch_alternative(const struct alt_instr *alt,
98 const uint32_t *origptr,
99 uint32_t *updptr, int nr_inst)
100 {
101 const uint32_t *replptr;
102 unsigned int i;
103
104 replptr = ALT_REPL_PTR(alt);
105 for ( i = 0; i < nr_inst; i++ )
106 {
107 uint32_t insn;
108
109 insn = get_alt_insn(alt, origptr + i, replptr + i);
110 updptr[i] = cpu_to_le32(insn);
111 }
112 }
113
114 /*
115 * The region patched should be read-write to allow __apply_alternatives
116 * to replacing the instructions when necessary.
117 *
118 * @update_offset: Offset between the region patched and the writable
119 * region for the update. 0 if the patched region is writable.
120 */
__apply_alternatives(const struct alt_region * region,paddr_t update_offset)121 static int __apply_alternatives(const struct alt_region *region,
122 paddr_t update_offset)
123 {
124 const struct alt_instr *alt;
125 const u32 *origptr;
126 u32 *updptr;
127 alternative_cb_t alt_cb;
128
129 printk(XENLOG_INFO "alternatives: Patching with alt table %p -> %p\n",
130 region->begin, region->end);
131
132 for ( alt = region->begin; alt < region->end; alt++ )
133 {
134 int nr_inst;
135
136 /* Use ARM_CB_PATCH as an unconditional patch */
137 if ( alt->cpufeature < ARM_CB_PATCH &&
138 !cpus_have_cap(alt->cpufeature) )
139 continue;
140
141 if ( alt->cpufeature == ARM_CB_PATCH )
142 BUG_ON(alt->alt_len != 0);
143 else
144 BUG_ON(alt->alt_len != alt->orig_len);
145
146 origptr = ALT_ORIG_PTR(alt);
147 updptr = (void *)origptr + update_offset;
148
149 nr_inst = alt->orig_len / ARCH_PATCH_INSN_SIZE;
150
151 if ( alt->cpufeature < ARM_CB_PATCH )
152 alt_cb = patch_alternative;
153 else
154 alt_cb = ALT_REPL_PTR(alt);
155
156 alt_cb(alt, origptr, updptr, nr_inst);
157
158 /* Ensure the new instructions reached the memory and nuke */
159 clean_and_invalidate_dcache_va_range(origptr,
160 (sizeof (*origptr) * nr_inst));
161 }
162
163 /* Nuke the instruction cache */
164 invalidate_icache();
165
166 return 0;
167 }
168
169 /*
170 * We might be patching the stop_machine state machine, so implement a
171 * really simple polling protocol here.
172 */
__apply_alternatives_multi_stop(void * unused)173 static int __apply_alternatives_multi_stop(void *unused)
174 {
175 static int patched = 0;
176
177 /* We always have a CPU 0 at this point (__init) */
178 if ( smp_processor_id() )
179 {
180 while ( !read_atomic(&patched) )
181 cpu_relax();
182 isb();
183 }
184 else
185 {
186 int ret;
187 struct alt_region region;
188 mfn_t xen_mfn = virt_to_mfn(_start);
189 paddr_t xen_size = _end - _start;
190 unsigned int xen_order = get_order_from_bytes(xen_size);
191 void *xenmap;
192
193 BUG_ON(patched);
194
195 /*
196 * The text and inittext section are read-only. So re-map Xen to
197 * be able to patch the code.
198 */
199 xenmap = __vmap(&xen_mfn, 1U << xen_order, 1, 1, PAGE_HYPERVISOR,
200 VMAP_DEFAULT);
201 /* Re-mapping Xen is not expected to fail during boot. */
202 BUG_ON(!xenmap);
203
204 region.begin = __alt_instructions;
205 region.end = __alt_instructions_end;
206
207 ret = __apply_alternatives(®ion, xenmap - (void *)_start);
208 /* The patching is not expected to fail during boot. */
209 BUG_ON(ret != 0);
210
211 vunmap(xenmap);
212
213 /* Barriers provided by the cache flushing */
214 write_atomic(&patched, 1);
215 }
216
217 return 0;
218 }
219
220 /*
221 * This function should only be called during boot and before CPU0 jump
222 * into the idle_loop.
223 */
apply_alternatives_all(void)224 void __init apply_alternatives_all(void)
225 {
226 int ret;
227
228 ASSERT(system_state != SYS_STATE_active);
229
230 /* better not try code patching on a live SMP system */
231 ret = stop_machine_run(__apply_alternatives_multi_stop, NULL, NR_CPUS);
232
233 /* stop_machine_run should never fail at this stage of the boot */
234 BUG_ON(ret);
235 }
236
apply_alternatives(const struct alt_instr * start,const struct alt_instr * end)237 int apply_alternatives(const struct alt_instr *start, const struct alt_instr *end)
238 {
239 const struct alt_region region = {
240 .begin = start,
241 .end = end,
242 };
243
244 return __apply_alternatives(®ion, 0);
245 }
246
247 /*
248 * Local variables:
249 * mode: C
250 * c-file-style: "BSD"
251 * c-basic-offset: 4
252 * indent-tabs-mode: nil
253 * End:
254 */
255