1#include <asm/asm_defns.h>
2#include <asm/current.h>
3#include <asm/macros.h>
4#include <asm/regs.h>
5#include <asm/alternative.h>
6#include <asm/smccc.h>
7#include <public/xen.h>
8
9#define IFLAGS_D_BIT    8
10#define IFLAGS_A_BIT    4
11#define IFLAGS_I_BIT    2
12#define IFLAGS_F_BIT    1
13
14/*
15 * Short-hands to define the interrupts (D, A, I, F)
16 *
17 * _ means the interrupt state will not change
18 * X means the state of interrupt X will change
19 *
20 * To be used with msr daif{set, clr} only.
21 *
22 */
23#define IFLAGS__AI_     IFLAGS_A_BIT | IFLAGS_I_BIT
24#define IFLAGS__A__     IFLAGS_A_BIT
25#define IFLAGS___I_     IFLAGS_I_BIT
26
27/*
28 * Stack pushing/popping (register pairs only). Equivalent to store decrement
29 * before, load increment after.
30 */
31        .macro  push, xreg1, xreg2
32        stp     \xreg1, \xreg2, [sp, #-16]!
33        .endm
34
35        .macro  pop, xreg1, xreg2
36        ldp     \xreg1, \xreg2, [sp], #16
37        .endm
38
39/*
40 * Save/restore guest mode specific state, outer stack frame
41 */
42        .macro  entry_guest, compat
43
44        add     x21, sp, #UREGS_SPSR_el1
45        mrs     x23, SPSR_el1
46        str     x23, [x21]
47
48        .if \compat == 0 /* Aarch64 mode */
49
50        add     x21, sp, #UREGS_SP_el0
51        mrs     x22, SP_el0
52        str     x22, [x21]
53
54        add     x21, sp, #UREGS_SP_el1
55        mrs     x22, SP_el1
56        mrs     x23, ELR_el1
57        stp     x22, x23, [x21]
58
59        .else            /* Aarch32 mode */
60
61        add     x21, sp, #UREGS_SPSR_fiq
62        mrs     x22, SPSR_fiq
63        mrs     x23, SPSR_irq
64        stp     w22, w23, [x21]
65
66        add     x21, sp, #UREGS_SPSR_und
67        mrs     x22, SPSR_und
68        mrs     x23, SPSR_abt
69        stp     w22, w23, [x21]
70
71        .endif
72
73        .endm
74
75        .macro  exit_guest, compat
76
77        add     x21, sp, #UREGS_SPSR_el1
78        ldr     x23, [x21]
79        msr     SPSR_el1, x23
80
81        .if \compat == 0 /* Aarch64 mode */
82
83        add     x21, sp, #UREGS_SP_el0
84        ldr     x22, [x21]
85        msr     SP_el0, x22
86
87        add     x21, sp, #UREGS_SP_el1
88        ldp     x22, x23, [x21]
89        msr     SP_el1, x22
90        msr     ELR_el1, x23
91
92        .else            /* Aarch32 mode */
93
94        add     x21, sp, #UREGS_SPSR_fiq
95        ldp     w22, w23, [x21]
96        msr     SPSR_fiq, x22
97        msr     SPSR_irq, x23
98
99        add     x21, sp, #UREGS_SPSR_und
100        ldp     w22, w23, [x21]
101        msr     SPSR_und, x22
102        msr     SPSR_abt, x23
103
104        .endif
105
106        .endm
107/*
108 * Save state on entry to hypervisor, restore on exit
109 *
110 * save_x0_x1: Does the macro needs to save x0/x1? Defaults to 1
111 * If 0, we rely on the on x0/x1 to have been saved at the correct
112 * position on the stack before.
113 */
114        .macro  entry, hyp, compat, save_x0_x1=1
115        sub     sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
116        push    x28, x29
117        push    x26, x27
118        push    x24, x25
119        push    x22, x23
120        push    x20, x21
121        push    x18, x19
122        push    x16, x17
123        push    x14, x15
124        push    x12, x13
125        push    x10, x11
126        push    x8, x9
127        push    x6, x7
128        push    x4, x5
129        push    x2, x3
130        /*
131         * The caller may already have saved x0/x1 on the stack at the
132         * correct address and corrupt them with another value. Only
133         * save them if save_x0_x1 == 1.
134         */
135        .if \save_x0_x1 == 1
136        push    x0, x1
137        .else
138        sub     sp, sp, #16
139        .endif
140
141        .if \hyp == 1        /* Hypervisor mode */
142
143        add     x21, sp, #UREGS_kernel_sizeof
144
145        .else                /* Guest mode */
146
147        entry_guest \compat
148        mov     x21, ~0 /* sp only valid for hyp frame XXX */
149
150        .endif
151
152        stp     lr, x21, [sp, #UREGS_LR]
153
154        mrs     x21, elr_el2
155        str     x21, [sp, #UREGS_PC]
156
157        add     x21, sp, #UREGS_CPSR
158        mrs     x22, spsr_el2
159        mrs     x23, esr_el2
160        stp     w22, w23, [x21]
161
162        .endm
163
164        .macro  exit, hyp, compat
165
166        .if \hyp == 0         /* Guest mode */
167
168        bl      leave_hypervisor_to_guest /* Mask IRQ on return */
169
170        exit_guest \compat
171
172        .endif
173
174        b       return_from_trap
175
176        .endm
177
178        /*
179         * Generate a guest vector.
180         *
181         * iflags: Correspond to the list of interrupts to unmask
182         * save_x0_x1: See the description on top of the macro 'entry'
183         */
184        .macro  guest_vector compat, iflags, trap, save_x0_x1=1
185        entry   hyp=0, compat=\compat, save_x0_x1=\save_x0_x1
186        /*
187         * We may have entered the hypervisor with pending SErrors
188         * generated by the guest. If we need to categorize them, then
189         * we need to check any outstanding SErrors will be consumed.
190         *
191         * The function check_pending_guest_serror() will unmask SError
192         * exception temporarily. This is fine to do before enter_*
193         * helpers are called because we fully control the state of the
194         * processor and only limited code willl be executed (see
195         * do_trap_hyp_serror()).
196         *
197         * When a SError has been consumed (x19 != 0), we may have injected a
198         * virtual SError to the guest.
199         *
200         * In this case, the initial exception will be discarded (PC has
201         * been adjusted by inject_vabt_exception()). However, we still
202         * want to give an opportunity to reschedule the vCPU. So we
203         * only want to skip the handling of the initial exception (i.e.
204         * do_trap_*()).
205         *
206         * TODO: The SErrors path should be reworked to inject the vSError in
207         * enter_hypervisor_* rather than do_trap_hyp_serror. This should make
208         * easier to understand the path.
209         */
210        alternative_if_not SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
211        bl      check_pending_guest_serror
212        alternative_else_nop_endif
213
214        bl      enter_hypervisor_from_guest_preirq
215        msr     daifclr, \iflags
216        bl      enter_hypervisor_from_guest
217
218        alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
219        cbnz    x19, 1f
220        alternative_else_nop_endif
221
222        mov     x0, sp
223        bl      do_trap_\trap
2241:
225        exit    hyp=0, compat=\compat
226        .endm
227
228
229/*
230 * Bad Abort numbers
231 *-----------------
232 */
233#define BAD_SYNC        0
234#define BAD_IRQ         1
235#define BAD_FIQ         2
236#define BAD_ERROR       3
237
238        .macro  invalid, reason
239        mov     x0, sp
240        mov     x1, #\reason
241        b       do_bad_mode
242        .endm
243
244hyp_sync_invalid:
245        entry   hyp=1
246        invalid BAD_SYNC
247
248hyp_irq_invalid:
249        entry   hyp=1
250        invalid BAD_IRQ
251
252hyp_fiq_invalid:
253        entry   hyp=1
254        invalid BAD_FIQ
255
256hyp_error_invalid:
257        entry   hyp=1
258        invalid BAD_ERROR
259
260/*
261 * SError received while running in the hypervisor mode.
262 *
263 * Technically, we could unmask the IRQ if it were unmasked in the
264 * interrupted context. However, this require to check the PSTATE. For
265 * simplicity, as SError should be rare and potentially fatal,
266 * all interrupts are kept masked.
267 */
268hyp_error:
269        entry   hyp=1
270        mov     x0, sp
271        bl      do_trap_hyp_serror
272        exit    hyp=1
273
274/*
275 * Synchronous exception received while running in the hypervisor mode.
276 *
277 * While the exception could be executed with all the interrupts (e.g.
278 * IRQ) unmasked, the interrupted context may have purposefully masked
279 * some of them. So we want to inherit the state from the interrupted
280 * context.
281 */
282hyp_sync:
283        entry   hyp=1
284
285        /* Inherit interrupts */
286        mrs     x0, SPSR_el2
287        and     x0, x0, #(PSR_DBG_MASK | PSR_ABT_MASK | PSR_IRQ_MASK | PSR_FIQ_MASK)
288        msr     daif, x0
289
290        mov     x0, sp
291        bl      do_trap_hyp_sync
292        exit    hyp=1
293
294/*
295 * IRQ received while running in the hypervisor mode.
296 *
297 * While the exception could be executed with all the interrupts but IRQ
298 * unmasked, the interrupted context may have purposefully masked some
299 * of them. So we want to inherit the state from the interrupt context
300 * and keep IRQ masked.
301 *
302 * XXX: We may want to consider an ordering between interrupts (e.g. if
303 * SError are masked, then IRQ should be masked too). However, this
304 * would require some rework in some paths (e.g. panic, livepatch) to
305 * ensure the ordering is enforced everywhere.
306 */
307hyp_irq:
308        entry   hyp=1
309
310        /* Inherit D, A, F interrupts and keep I masked */
311        mrs     x0, SPSR_el2
312        mov     x1, #(PSR_DBG_MASK | PSR_ABT_MASK | PSR_FIQ_MASK)
313        and     x0, x0, x1
314        orr     x0, x0, #PSR_IRQ_MASK
315        msr     daif, x0
316
317        mov     x0, sp
318        bl      do_trap_irq
319        exit    hyp=1
320
321guest_sync:
322        /*
323         * Save x0, x1 in advance
324         */
325        stp     x0, x1, [sp, #-(UREGS_kernel_sizeof - UREGS_X0)]
326
327        /*
328         * x1 is used because x0 may contain the function identifier.
329         * This avoids to restore x0 from the stack.
330         */
331        mrs     x1, esr_el2
332        lsr     x1, x1, #HSR_EC_SHIFT           /* x1 = ESR_EL2.EC */
333        cmp     x1, #HSR_EC_HVC64
334        b.ne    guest_sync_slowpath             /* Not a HVC skip fastpath. */
335
336        mrs     x1, esr_el2
337        and     x1, x1, #0xffff                 /* Check the immediate [0:16] */
338        cbnz    x1, guest_sync_slowpath         /* should be 0 for HVC #0 */
339
340        /*
341         * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1.
342         * The workaround has already been applied on the exception
343         * entry from the guest, so let's quickly get back to the guest.
344         *
345         * Note that eor is used because the function identifier cannot
346         * be encoded as an immediate for cmp.
347         */
348        eor     w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID
349        cbnz    w0, check_wa2
350
351        /*
352         * Clobber both x0 and x1 to prevent leakage. Note that thanks
353         * the eor, x0 = 0.
354         */
355        mov     x1, xzr
356        eret
357        sb
358
359check_wa2:
360        /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
361        eor     w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ ARM_SMCCC_ARCH_WORKAROUND_2_FID)
362        cbnz    w0, guest_sync_slowpath
363#ifdef CONFIG_ARM_SSBD
364alternative_cb arm_enable_wa2_handling
365        b       wa2_end
366alternative_cb_end
367        /* Sanitize the argument */
368        mov     x0, #-(UREGS_kernel_sizeof - UREGS_X1)  /* x0 := offset of guest's x1 on the stack */
369        ldr     x1, [sp, x0]                            /* Load guest's x1 */
370        cmp     w1, wzr
371        cset    x1, ne
372
373        /*
374         * Update the guest flag. At this stage sp point after the field
375         * guest_cpu_user_regs in cpu_info.
376         */
377        adr_cpu_info x2
378        ldr     x0, [x2, #CPUINFO_flags]
379        bfi     x0, x1, #CPUINFO_WORKAROUND_2_FLAG_SHIFT, #1
380        str     x0, [x2, #CPUINFO_flags]
381
382        /* Check that we actually need to perform the call */
383        ldr_this_cpu x0, ssbd_callback_required, x2
384        cbz     x0, wa2_end
385
386        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2_FID
387        smc     #0
388
389wa2_end:
390        /* Don't leak data from the SMC call */
391        mov     x1, xzr
392        mov     x2, xzr
393        mov     x3, xzr
394#endif /* !CONFIG_ARM_SSBD */
395        mov     x0, xzr
396        eret
397        sb
398guest_sync_slowpath:
399        /*
400         * x0/x1 may have been scratch by the fast path above, so avoid
401         * to save them.
402         */
403        guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_sync, save_x0_x1=0
404
405guest_irq:
406        guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
407
408guest_fiq_invalid:
409        entry   hyp=0, compat=0
410        invalid BAD_FIQ
411
412guest_error:
413        guest_vector compat=0, iflags=IFLAGS__AI_, trap=guest_serror
414
415guest_sync_compat:
416        guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_sync
417
418guest_irq_compat:
419        guest_vector compat=1, iflags=IFLAGS__A__, trap=irq
420
421guest_fiq_invalid_compat:
422        entry   hyp=0, compat=1
423        invalid BAD_FIQ
424
425guest_error_compat:
426        guest_vector compat=1, iflags=IFLAGS__AI_, trap=guest_serror
427
428ENTRY(return_to_new_vcpu32)
429        exit    hyp=0, compat=1
430ENTRY(return_to_new_vcpu64)
431        exit    hyp=0, compat=0
432
433return_from_trap:
434        msr     daifset, #IFLAGS___I_ /* Mask interrupts */
435
436        ldr     x21, [sp, #UREGS_PC]            /* load ELR */
437        ldr     w22, [sp, #UREGS_CPSR]          /* load SPSR */
438
439        pop     x0, x1
440        pop     x2, x3
441        pop     x4, x5
442        pop     x6, x7
443        pop     x8, x9
444
445        msr     elr_el2, x21                    /* set up the return data */
446        msr     spsr_el2, x22
447
448        pop     x10, x11
449        pop     x12, x13
450        pop     x14, x15
451        pop     x16, x17
452        pop     x18, x19
453        pop     x20, x21
454        pop     x22, x23
455        pop     x24, x25
456        pop     x26, x27
457        pop     x28, x29
458
459        ldr     lr, [sp], #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
460
461        eret
462        sb
463
464/*
465 * Consume pending SError generated by the guest if any.
466 *
467 * @return:
468 *  x19: Set to a non-zero value if a pending Abort exception took place.
469 *       Otherwise, it will be set to zero.
470 *
471 * Without RAS extension, the only way to consume a SError is to unmask
472 * it. So the function will unmask SError exception for a small window and
473 * then mask it again.
474 */
475check_pending_guest_serror:
476        /*
477         * Save elr_el2 to check whether the pending SError exception takes
478         * place while we are doing this sync exception.
479         */
480        mrs     x0, elr_el2
481
482        /* Synchronize against in-flight ld/st */
483        dsb     sy
484
485        /*
486         * Unmask PSTATE asynchronous abort bit. If there is a pending
487         * SError, the EL2 error exception will happen after PSTATE.A
488         * is cleared.
489         */
490        msr     daifclr, #IFLAGS__A__
491
492        /*
493         * This is our single instruction exception window. A pending
494         * SError is guaranteed to occur at the earliest when we unmask
495         * it, and at the latest just after the ISB.
496         *
497         * If a pending SError occurs, the program will jump to EL2 error
498         * exception handler, and the elr_el2 will be set to
499         * abort_guest_exit_start or abort_guest_exit_end.
500         */
501        .global abort_guest_exit_start
502abort_guest_exit_start:
503
504        isb
505
506        .global abort_guest_exit_end
507abort_guest_exit_end:
508        /* Mask PSTATE asynchronous abort bit, close the checking window. */
509        msr     daifset, #IFLAGS__A__
510
511        /*
512         * Compare elr_el2 and the saved value to check whether we are
513         * returning from a valid exception caused by pending SError.
514         */
515        mrs     x1, elr_el2
516        cmp     x0, x1
517
518        /*
519         * Not equal, the pending SError exception took place, set
520         * x19 to non-zero.
521         */
522        cset    x19, ne
523
524        ret
525ENDPROC(check_pending_guest_serror)
526
527/*
528 * Exception vectors.
529 */
530        .macro  ventry  label
531        .align  7
532        b       \label
533        .endm
534
535        .align  11
536ENTRY(hyp_traps_vector)
537        ventry  hyp_sync_invalid            /* Synchronous EL2t */
538        ventry  hyp_irq_invalid             /* IRQ EL2t */
539        ventry  hyp_fiq_invalid             /* FIQ EL2t */
540        ventry  hyp_error_invalid           /* Error EL2t */
541
542        ventry  hyp_sync                    /* Synchronous EL2h */
543        ventry  hyp_irq                     /* IRQ EL2h */
544        ventry  hyp_fiq_invalid             /* FIQ EL2h */
545        ventry  hyp_error                   /* Error EL2h */
546
547        ventry  guest_sync                  /* Synchronous 64-bit EL0/EL1 */
548        ventry  guest_irq                   /* IRQ 64-bit EL0/EL1 */
549        ventry  guest_fiq_invalid           /* FIQ 64-bit EL0/EL1 */
550        ventry  guest_error                 /* Error 64-bit EL0/EL1 */
551
552        ventry  guest_sync_compat           /* Synchronous 32-bit EL0/EL1 */
553        ventry  guest_irq_compat            /* IRQ 32-bit EL0/EL1 */
554        ventry  guest_fiq_invalid_compat    /* FIQ 32-bit EL0/EL1 */
555        ventry  guest_error_compat          /* Error 32-bit EL0/EL1 */
556
557/*
558 * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
559 *
560 * x0 - prev
561 * x1 - next
562 *
563 * Returns prev in x0
564 */
565ENTRY(__context_switch)
566        add     x8, x0, #VCPU_arch_saved_context
567        mov     x9, sp
568        stp     x19, x20, [x8], #16         /* store callee-saved registers */
569        stp     x21, x22, [x8], #16
570        stp     x23, x24, [x8], #16
571        stp     x25, x26, [x8], #16
572        stp     x27, x28, [x8], #16
573        stp     x29, x9, [x8], #16
574        str     lr, [x8]
575
576        add     x8, x1, #VCPU_arch_saved_context
577        ldp     x19, x20, [x8], #16         /* restore callee-saved registers */
578        ldp     x21, x22, [x8], #16
579        ldp     x23, x24, [x8], #16
580        ldp     x25, x26, [x8], #16
581        ldp     x27, x28, [x8], #16
582        ldp     x29, x9, [x8], #16
583        ldr     lr, [x8]
584        mov     sp, x9
585        ret
586
587/*
588 * Local variables:
589 * mode: ASM
590 * indent-tabs-mode: nil
591 * End:
592 */
593