1 /******************************************************************************
2 * common/stop_machine.c
3 *
4 * Facilities to put whole machine in a safe 'stop' state
5 *
6 * Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation
7 * Copyright 2008 Kevin Tian <kevin.tian@intel.com>, Intel Corporation.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <xen/init.h>
23 #include <xen/sched.h>
24 #include <xen/spinlock.h>
25 #include <xen/tasklet.h>
26 #include <xen/stop_machine.h>
27 #include <xen/errno.h>
28 #include <xen/smp.h>
29 #include <xen/cpu.h>
30 #include <asm/current.h>
31 #include <asm/processor.h>
32
33 enum stopmachine_state {
34 STOPMACHINE_START,
35 STOPMACHINE_PREPARE,
36 STOPMACHINE_DISABLE_IRQ,
37 STOPMACHINE_INVOKE,
38 STOPMACHINE_EXIT
39 };
40
41 struct stopmachine_data {
42 unsigned int nr_cpus;
43
44 enum stopmachine_state state;
45 atomic_t done;
46
47 unsigned int fn_cpu;
48 int fn_result;
49 int (*fn)(void *);
50 void *fn_data;
51 };
52
53 static DEFINE_PER_CPU(struct tasklet, stopmachine_tasklet);
54 static struct stopmachine_data stopmachine_data;
55 static DEFINE_SPINLOCK(stopmachine_lock);
56
stopmachine_set_state(enum stopmachine_state state)57 static void stopmachine_set_state(enum stopmachine_state state)
58 {
59 atomic_set(&stopmachine_data.done, 0);
60 smp_wmb();
61 stopmachine_data.state = state;
62 }
63
stopmachine_wait_state(void)64 static void stopmachine_wait_state(void)
65 {
66 while ( atomic_read(&stopmachine_data.done) != stopmachine_data.nr_cpus )
67 cpu_relax();
68 }
69
70 /*
71 * Sync all processors and call a function on one or all of them.
72 * As stop_machine_run() is using a tasklet for syncing the processors it is
73 * mandatory to be called only on an idle vcpu, as otherwise active core
74 * scheduling might hang.
75 */
stop_machine_run(int (* fn)(void *),void * data,unsigned int cpu)76 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
77 {
78 unsigned int i, nr_cpus;
79 unsigned int this = smp_processor_id();
80 int ret;
81
82 BUG_ON(!local_irq_is_enabled());
83 BUG_ON(!is_idle_vcpu(current));
84
85 /* cpu_online_map must not change. */
86 if ( !get_cpu_maps() )
87 return -EBUSY;
88
89 nr_cpus = num_online_cpus();
90 if ( cpu_online(this) )
91 nr_cpus--;
92
93 /* Must not spin here as the holder will expect us to be descheduled. */
94 if ( !spin_trylock(&stopmachine_lock) )
95 {
96 put_cpu_maps();
97 return -EBUSY;
98 }
99
100 stopmachine_data.fn = fn;
101 stopmachine_data.fn_data = data;
102 stopmachine_data.nr_cpus = nr_cpus;
103 stopmachine_data.fn_cpu = cpu;
104 stopmachine_data.fn_result = 0;
105 atomic_set(&stopmachine_data.done, 0);
106 stopmachine_data.state = STOPMACHINE_START;
107
108 smp_wmb();
109
110 for_each_online_cpu ( i )
111 if ( i != this )
112 tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
113
114 stopmachine_set_state(STOPMACHINE_PREPARE);
115 stopmachine_wait_state();
116
117 local_irq_disable();
118 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
119 stopmachine_wait_state();
120 spin_debug_disable();
121
122 stopmachine_set_state(STOPMACHINE_INVOKE);
123 if ( (cpu == this) || (cpu == NR_CPUS) )
124 {
125 ret = (*fn)(data);
126 if ( ret )
127 write_atomic(&stopmachine_data.fn_result, ret);
128 }
129 stopmachine_wait_state();
130 ret = stopmachine_data.fn_result;
131
132 spin_debug_enable();
133 stopmachine_set_state(STOPMACHINE_EXIT);
134 stopmachine_wait_state();
135 local_irq_enable();
136
137 spin_unlock(&stopmachine_lock);
138
139 put_cpu_maps();
140
141 return ret;
142 }
143
stopmachine_action(void * data)144 static void stopmachine_action(void *data)
145 {
146 unsigned int cpu = (unsigned long)data;
147 enum stopmachine_state state = STOPMACHINE_START;
148
149 BUG_ON(cpu != smp_processor_id());
150
151 smp_mb();
152
153 while ( state != STOPMACHINE_EXIT )
154 {
155 while ( stopmachine_data.state == state )
156 cpu_relax();
157
158 state = stopmachine_data.state;
159 switch ( state )
160 {
161 case STOPMACHINE_DISABLE_IRQ:
162 local_irq_disable();
163 break;
164 case STOPMACHINE_INVOKE:
165 if ( (stopmachine_data.fn_cpu == smp_processor_id()) ||
166 (stopmachine_data.fn_cpu == NR_CPUS) )
167 {
168 int ret = stopmachine_data.fn(stopmachine_data.fn_data);
169
170 if ( ret )
171 write_atomic(&stopmachine_data.fn_result, ret);
172 }
173 break;
174 default:
175 break;
176 }
177
178 smp_mb();
179 atomic_inc(&stopmachine_data.done);
180 }
181
182 local_irq_enable();
183 }
184
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)185 static int cpu_callback(
186 struct notifier_block *nfb, unsigned long action, void *hcpu)
187 {
188 unsigned int cpu = (unsigned long)hcpu;
189
190 if ( action == CPU_UP_PREPARE )
191 tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
192 stopmachine_action, hcpu);
193
194 return NOTIFY_DONE;
195 }
196
197 static struct notifier_block cpu_nfb = {
198 .notifier_call = cpu_callback
199 };
200
cpu_stopmachine_init(void)201 static int __init cpu_stopmachine_init(void)
202 {
203 unsigned int cpu;
204 for_each_online_cpu ( cpu )
205 {
206 void *hcpu = (void *)(long)cpu;
207 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
208 }
209 register_cpu_notifier(&cpu_nfb);
210 return 0;
211 }
212 __initcall(cpu_stopmachine_init);
213