1 /*
2  * arch/arm/guest_atomics.c
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; If not, see <http://www.gnu.org/licenses/>.
15  */
16 #include <xen/cpu.h>
17 
18 #include <asm/guest_atomics.h>
19 
20 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, guest_safe_atomic_max);
21 
22 /*
23  * Heuristic to find a safe upper-limit for load-store exclusive
24  * operations on memory shared with guest.
25  *
26  * At the moment, we calculate the number of iterations of a simple
27  * load-store atomic loop in 1uS.
28  */
calibrate_safe_atomic(void)29 static void calibrate_safe_atomic(void)
30 {
31     s_time_t deadline = NOW() + MICROSECS(1);
32     unsigned int counter = 0;
33     unsigned long mem = 0;
34 
35     do
36     {
37         unsigned long res, tmp;
38 
39 #ifdef CONFIG_ARM_32
40         asm volatile (" ldrex   %2, %1\n"
41                       " add     %2, %2, #1\n"
42                       " strex   %0, %2, %1\n"
43                       : "=&r" (res), "+Q" (mem), "=&r" (tmp));
44 #else
45         asm volatile (" ldxr    %w2, %1\n"
46                       " add     %w2, %w2, #1\n"
47                       " stxr    %w0, %w2, %1\n"
48                       : "=&r" (res), "+Q" (mem), "=&r" (tmp));
49 #endif
50         counter++;
51     } while (NOW() < deadline);
52 
53     this_cpu(guest_safe_atomic_max) = counter;
54 
55     printk(XENLOG_DEBUG
56            "CPU%u: Guest atomics will try %u times before pausing the domain\n",
57            smp_processor_id(), counter);
58 }
59 
cpu_guest_safe_atomic_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)60 static int cpu_guest_safe_atomic_callback(struct notifier_block *nfb,
61                                           unsigned long action,
62                                           void *hcpu)
63 {
64     if ( action == CPU_STARTING )
65         calibrate_safe_atomic();
66 
67     return NOTIFY_DONE;
68 }
69 
70 static struct notifier_block cpu_guest_safe_atomic_nfb = {
71     .notifier_call = cpu_guest_safe_atomic_callback,
72 };
73 
guest_safe_atomic_init(void)74 static int __init guest_safe_atomic_init(void)
75 {
76     register_cpu_notifier(&cpu_guest_safe_atomic_nfb);
77 
78     calibrate_safe_atomic();
79 
80     return 0;
81 }
82 presmp_initcall(guest_safe_atomic_init);
83 
84 /*
85  * Local variables:
86  * mode: C
87  * c-file-style: "BSD"
88  * c-basic-offset: 4
89  * indent-tabs-mode: nil
90  * End:
91  */
92