1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * A stand-alone ticket spinlock implementation for use by the non-VHE
4 * KVM hypervisor code running at EL2.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 *
9 * Heavily based on the implementation removed by c11090474d70 which was:
10 * Copyright (C) 2012 ARM Ltd.
11 */
12
13 #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
14 #define __ARM64_KVM_NVHE_SPINLOCK_H__
15
16 #include <asm/alternative.h>
17 #include <asm/lse.h>
18 #include <asm/rwonce.h>
19
20 typedef union hyp_spinlock {
21 u32 __val;
22 struct {
23 #ifdef __AARCH64EB__
24 u16 next, owner;
25 #else
26 u16 owner, next;
27 #endif
28 };
29 } hyp_spinlock_t;
30
31 #define hyp_spin_lock_init(l) \
32 do { \
33 *(l) = (hyp_spinlock_t){ .__val = 0 }; \
34 } while (0)
35
hyp_spin_lock(hyp_spinlock_t * lock)36 static inline void hyp_spin_lock(hyp_spinlock_t *lock)
37 {
38 u32 tmp;
39 hyp_spinlock_t lockval, newval;
40
41 asm volatile(
42 /* Atomically increment the next ticket. */
43 ARM64_LSE_ATOMIC_INSN(
44 /* LL/SC */
45 " prfm pstl1strm, %3\n"
46 "1: ldaxr %w0, %3\n"
47 " add %w1, %w0, #(1 << 16)\n"
48 " stxr %w2, %w1, %3\n"
49 " cbnz %w2, 1b\n",
50 /* LSE atomics */
51 " mov %w2, #(1 << 16)\n"
52 " ldadda %w2, %w0, %3\n"
53 __nops(3))
54
55 /* Did we get the lock? */
56 " eor %w1, %w0, %w0, ror #16\n"
57 " cbz %w1, 3f\n"
58 /*
59 * No: spin on the owner. Send a local event to avoid missing an
60 * unlock before the exclusive load.
61 */
62 " sevl\n"
63 "2: wfe\n"
64 " ldaxrh %w2, %4\n"
65 " eor %w1, %w2, %w0, lsr #16\n"
66 " cbnz %w1, 2b\n"
67 /* We got the lock. Critical section starts here. */
68 "3:"
69 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
70 : "Q" (lock->owner)
71 : "memory");
72 }
73
hyp_spin_unlock(hyp_spinlock_t * lock)74 static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
75 {
76 u64 tmp;
77
78 asm volatile(
79 ARM64_LSE_ATOMIC_INSN(
80 /* LL/SC */
81 " ldrh %w1, %0\n"
82 " add %w1, %w1, #1\n"
83 " stlrh %w1, %0",
84 /* LSE atomics */
85 " mov %w1, #1\n"
86 " staddlh %w1, %0\n"
87 __nops(1))
88 : "=Q" (lock->owner), "=&r" (tmp)
89 :
90 : "memory");
91 }
92
hyp_spin_is_locked(hyp_spinlock_t * lock)93 static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)
94 {
95 hyp_spinlock_t lockval = READ_ONCE(*lock);
96
97 return lockval.owner != lockval.next;
98 }
99
100 #ifdef CONFIG_NVHE_EL2_DEBUG
hyp_assert_lock_held(hyp_spinlock_t * lock)101 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)
102 {
103 /*
104 * The __pkvm_init() path accesses protected data-structures without
105 * holding locks as the other CPUs are guaranteed to not enter EL2
106 * concurrently at this point in time. The point by which EL2 is
107 * initialized on all CPUs is reflected in the pkvm static key, so
108 * wait until it is set before checking the lock state.
109 */
110 if (static_branch_likely(&kvm_protected_mode_initialized))
111 BUG_ON(!hyp_spin_is_locked(lock));
112 }
113 #else
hyp_assert_lock_held(hyp_spinlock_t * lock)114 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { }
115 #endif
116
117 #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */
118