1 #include <xen/cpumask.h>
2 #include <xen/cpu.h>
3 #include <xen/event.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/stop_machine.h>
7 #include <xen/rcupdate.h>
8
9 unsigned int __read_mostly nr_cpu_ids = NR_CPUS;
10 #ifndef nr_cpumask_bits
11 unsigned int __read_mostly nr_cpumask_bits
BITS_TO_LONGS(NR_CPUS)12 = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG;
13 #endif
14
15 const cpumask_t cpumask_all = {
16 .bits[0 ... (BITS_TO_LONGS(NR_CPUS) - 1)] = ~0UL
17 };
18
19 /*
20 * cpu_bit_bitmap[] is a special, "compressed" data structure that
21 * represents all NR_CPUS bits binary values of 1<<nr.
22 *
23 * It is used by cpumask_of() to get a constant address to a CPU
24 * mask value that has a single bit set only.
25 */
26
27 /* cpu_bit_bitmap[0] is empty - so we can back into it */
28 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
29 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
30 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
31 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
32
33 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
34
35 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
36 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
37 #if BITS_PER_LONG > 32
38 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
39 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
40 #endif
41 };
42
43 static DEFINE_RWLOCK(cpu_add_remove_lock);
44
get_cpu_maps(void)45 bool get_cpu_maps(void)
46 {
47 return read_trylock(&cpu_add_remove_lock);
48 }
49
put_cpu_maps(void)50 void put_cpu_maps(void)
51 {
52 read_unlock(&cpu_add_remove_lock);
53 }
54
cpu_hotplug_begin(void)55 void cpu_hotplug_begin(void)
56 {
57 rcu_barrier();
58 write_lock(&cpu_add_remove_lock);
59 }
60
cpu_hotplug_done(void)61 void cpu_hotplug_done(void)
62 {
63 write_unlock(&cpu_add_remove_lock);
64 }
65
66 static NOTIFIER_HEAD(cpu_chain);
67
register_cpu_notifier(struct notifier_block * nb)68 void __init register_cpu_notifier(struct notifier_block *nb)
69 {
70 write_lock(&cpu_add_remove_lock);
71 notifier_chain_register(&cpu_chain, nb);
72 write_unlock(&cpu_add_remove_lock);
73 }
74
cpu_notifier_call_chain(unsigned int cpu,unsigned long action,struct notifier_block ** nb,bool nofail)75 static int cpu_notifier_call_chain(unsigned int cpu, unsigned long action,
76 struct notifier_block **nb, bool nofail)
77 {
78 void *hcpu = (void *)(long)cpu;
79 int notifier_rc = notifier_call_chain(&cpu_chain, action, hcpu, nb);
80 int ret = (notifier_rc == NOTIFY_DONE) ? 0 : notifier_to_errno(notifier_rc);
81
82 BUG_ON(ret && nofail);
83
84 return ret;
85 }
86
_take_cpu_down(void * unused)87 static void _take_cpu_down(void *unused)
88 {
89 cpu_notifier_call_chain(smp_processor_id(), CPU_DYING, NULL, true);
90 __cpu_disable();
91 }
92
take_cpu_down(void * arg)93 static int take_cpu_down(void *arg)
94 {
95 _take_cpu_down(arg);
96 return 0;
97 }
98
cpu_down(unsigned int cpu)99 int cpu_down(unsigned int cpu)
100 {
101 int err;
102 struct notifier_block *nb = NULL;
103
104 cpu_hotplug_begin();
105
106 err = -EINVAL;
107 if ( (cpu >= nr_cpu_ids) || (cpu == 0) )
108 goto out;
109
110 err = -EEXIST;
111 if ( !cpu_online(cpu) )
112 goto out;
113
114 err = cpu_notifier_call_chain(cpu, CPU_DOWN_PREPARE, &nb, false);
115 if ( err )
116 goto fail;
117
118 if ( system_state < SYS_STATE_active || system_state == SYS_STATE_resume )
119 on_selected_cpus(cpumask_of(cpu), _take_cpu_down, NULL, true);
120 else if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
121 goto fail;
122
123 __cpu_die(cpu);
124 err = cpu_online(cpu);
125 BUG_ON(err);
126
127 cpu_notifier_call_chain(cpu, CPU_DEAD, NULL, true);
128
129 send_global_virq(VIRQ_PCPU_STATE);
130 cpu_hotplug_done();
131 return 0;
132
133 fail:
134 cpu_notifier_call_chain(cpu, CPU_DOWN_FAILED, &nb, true);
135 out:
136 cpu_hotplug_done();
137 return err;
138 }
139
cpu_up(unsigned int cpu)140 int cpu_up(unsigned int cpu)
141 {
142 int err;
143 struct notifier_block *nb = NULL;
144
145 cpu_hotplug_begin();
146
147 err = -EINVAL;
148 if ( (cpu >= nr_cpu_ids) || !cpu_present(cpu) )
149 goto out;
150
151 err = -EEXIST;
152 if ( cpu_online(cpu) )
153 goto out;
154
155 err = cpu_notifier_call_chain(cpu, CPU_UP_PREPARE, &nb, false);
156 if ( err )
157 goto fail;
158
159 err = __cpu_up(cpu);
160 if ( err < 0 )
161 goto fail;
162
163 cpu_notifier_call_chain(cpu, CPU_ONLINE, NULL, true);
164
165 send_global_virq(VIRQ_PCPU_STATE);
166
167 cpu_hotplug_done();
168 return 0;
169
170 fail:
171 cpu_notifier_call_chain(cpu, CPU_UP_CANCELED, &nb, true);
172 out:
173 cpu_hotplug_done();
174 return err;
175 }
176
notify_cpu_starting(unsigned int cpu)177 void notify_cpu_starting(unsigned int cpu)
178 {
179 cpu_notifier_call_chain(cpu, CPU_STARTING, NULL, true);
180 }
181
182 static cpumask_t frozen_cpus;
183
disable_nonboot_cpus(void)184 int disable_nonboot_cpus(void)
185 {
186 int cpu, error = 0;
187
188 BUG_ON(smp_processor_id() != 0);
189
190 cpumask_clear(&frozen_cpus);
191
192 printk("Disabling non-boot CPUs ...\n");
193
194 for_each_online_cpu ( cpu )
195 {
196 if ( cpu == 0 )
197 continue;
198
199 if ( (error = cpu_down(cpu)) )
200 {
201 printk("Error taking CPU%d down: %d\n", cpu, error);
202 BUG_ON(error == -EBUSY);
203 break;
204 }
205
206 __cpumask_set_cpu(cpu, &frozen_cpus);
207 }
208
209 BUG_ON(!error && (num_online_cpus() != 1));
210 return error;
211 }
212
enable_nonboot_cpus(void)213 void enable_nonboot_cpus(void)
214 {
215 int cpu, error;
216
217 printk("Enabling non-boot CPUs ...\n");
218
219 for_each_present_cpu ( cpu )
220 {
221 if ( park_offline_cpus ? cpu == smp_processor_id()
222 : !cpumask_test_cpu(cpu, &frozen_cpus) )
223 continue;
224 if ( (error = cpu_up(cpu)) )
225 {
226 printk("Error bringing CPU%d up: %d\n", cpu, error);
227 BUG_ON(error == -EBUSY);
228 }
229 else if ( !__cpumask_test_and_clear_cpu(cpu, &frozen_cpus) &&
230 (error = cpu_down(cpu)) )
231 printk("Error re-offlining CPU%d: %d\n", cpu, error);
232 }
233
234 for_each_cpu ( cpu, &frozen_cpus )
235 cpu_notifier_call_chain(cpu, CPU_RESUME_FAILED, NULL, true);
236
237 cpumask_clear(&frozen_cpus);
238 }
239