1 /*
2 * x2APIC driver.
3 *
4 * Copyright (c) 2008, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/init.h>
20 #include <xen/cpu.h>
21 #include <xen/cpumask.h>
22 #include <xen/param.h>
23 #include <asm/apicdef.h>
24 #include <asm/genapic.h>
25 #include <asm/apic.h>
26 #include <asm/io_apic.h>
27 #include <asm/msr.h>
28 #include <asm/processor.h>
29 #include <xen/smp.h>
30 #include <asm/mach-default/mach_mpparse.h>
31
32 static DEFINE_PER_CPU_READ_MOSTLY(u32, cpu_2_logical_apicid);
33 static DEFINE_PER_CPU_READ_MOSTLY(cpumask_t *, cluster_cpus);
34 static cpumask_t *cluster_cpus_spare;
35 static DEFINE_PER_CPU(cpumask_var_t, scratch_mask);
36
x2apic_cluster(unsigned int cpu)37 static inline u32 x2apic_cluster(unsigned int cpu)
38 {
39 return per_cpu(cpu_2_logical_apicid, cpu) >> 16;
40 }
41
init_apic_ldr_x2apic_cluster(void)42 static void init_apic_ldr_x2apic_cluster(void)
43 {
44 unsigned int cpu, this_cpu = smp_processor_id();
45
46 per_cpu(cpu_2_logical_apicid, this_cpu) = apic_read(APIC_LDR);
47
48 if ( per_cpu(cluster_cpus, this_cpu) )
49 {
50 ASSERT(cpumask_test_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu)));
51 return;
52 }
53
54 per_cpu(cluster_cpus, this_cpu) = cluster_cpus_spare;
55 for_each_online_cpu ( cpu )
56 {
57 if (this_cpu == cpu || x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
58 continue;
59 per_cpu(cluster_cpus, this_cpu) = per_cpu(cluster_cpus, cpu);
60 break;
61 }
62 if ( per_cpu(cluster_cpus, this_cpu) == cluster_cpus_spare )
63 cluster_cpus_spare = NULL;
64
65 cpumask_set_cpu(this_cpu, per_cpu(cluster_cpus, this_cpu));
66 }
67
clustered_apic_check_x2apic(void)68 static void __init clustered_apic_check_x2apic(void)
69 {
70 }
71
vector_allocation_cpumask_x2apic_cluster(int cpu)72 static const cpumask_t *vector_allocation_cpumask_x2apic_cluster(int cpu)
73 {
74 return per_cpu(cluster_cpus, cpu);
75 }
76
cpu_mask_to_apicid_x2apic_cluster(const cpumask_t * cpumask)77 static unsigned int cpu_mask_to_apicid_x2apic_cluster(const cpumask_t *cpumask)
78 {
79 unsigned int cpu = cpumask_any(cpumask);
80 unsigned int dest = per_cpu(cpu_2_logical_apicid, cpu);
81 const cpumask_t *cluster_cpus = per_cpu(cluster_cpus, cpu);
82
83 for_each_cpu ( cpu, cluster_cpus )
84 if ( cpumask_test_cpu(cpu, cpumask) )
85 dest |= per_cpu(cpu_2_logical_apicid, cpu);
86
87 return dest;
88 }
89
send_IPI_self_x2apic(uint8_t vector)90 static void send_IPI_self_x2apic(uint8_t vector)
91 {
92 apic_wrmsr(APIC_SELF_IPI, vector);
93 }
94
send_IPI_mask_x2apic_phys(const cpumask_t * cpumask,int vector)95 static void send_IPI_mask_x2apic_phys(const cpumask_t *cpumask, int vector)
96 {
97 unsigned int cpu;
98 unsigned long flags;
99 uint64_t msr_content;
100
101 /*
102 * Ensure that any synchronisation data written in program order by this
103 * CPU is seen by notified remote CPUs. The WRMSR contained within
104 * apic_icr_write() can otherwise be executed early.
105 *
106 * The reason smp_mb() is sufficient here is subtle: the register arguments
107 * to WRMSR must depend on a memory read executed after the barrier. This
108 * is guaranteed by cpu_physical_id(), which reads from a global array (and
109 * so cannot be hoisted above the barrier even by a clever compiler).
110 */
111 smp_mb();
112
113 local_irq_save(flags);
114
115 for_each_cpu ( cpu, cpumask )
116 {
117 if ( !cpu_online(cpu) || (cpu == smp_processor_id()) )
118 continue;
119 msr_content = cpu_physical_id(cpu);
120 msr_content = (msr_content << 32) | APIC_DM_FIXED |
121 APIC_DEST_PHYSICAL | vector;
122 apic_wrmsr(APIC_ICR, msr_content);
123 }
124
125 local_irq_restore(flags);
126 }
127
send_IPI_mask_x2apic_cluster(const cpumask_t * cpumask,int vector)128 static void send_IPI_mask_x2apic_cluster(const cpumask_t *cpumask, int vector)
129 {
130 unsigned int cpu = smp_processor_id();
131 cpumask_t *ipimask = per_cpu(scratch_mask, cpu);
132 const cpumask_t *cluster_cpus;
133 unsigned long flags;
134
135 smp_mb(); /* See above for an explanation. */
136
137 local_irq_save(flags);
138
139 cpumask_andnot(ipimask, &cpu_online_map, cpumask_of(cpu));
140
141 for ( cpumask_and(ipimask, cpumask, ipimask); !cpumask_empty(ipimask);
142 cpumask_andnot(ipimask, ipimask, cluster_cpus) )
143 {
144 uint64_t msr_content = 0;
145
146 cluster_cpus = per_cpu(cluster_cpus, cpumask_first(ipimask));
147 for_each_cpu ( cpu, cluster_cpus )
148 {
149 if ( !cpumask_test_cpu(cpu, ipimask) )
150 continue;
151 msr_content |= per_cpu(cpu_2_logical_apicid, cpu);
152 }
153
154 BUG_ON(!(msr_content & 0xffff));
155 msr_content = (msr_content << 32) | APIC_DM_FIXED |
156 APIC_DEST_LOGICAL | vector;
157 apic_wrmsr(APIC_ICR, msr_content);
158 }
159
160 local_irq_restore(flags);
161 }
162
163 static const struct genapic __initconstrel apic_x2apic_phys = {
164 APIC_INIT("x2apic_phys", NULL),
165 .int_delivery_mode = dest_Fixed,
166 .int_dest_mode = 0 /* physical delivery */,
167 .init_apic_ldr = init_apic_ldr_phys,
168 .clustered_apic_check = clustered_apic_check_x2apic,
169 .vector_allocation_cpumask = vector_allocation_cpumask_phys,
170 .cpu_mask_to_apicid = cpu_mask_to_apicid_phys,
171 .send_IPI_mask = send_IPI_mask_x2apic_phys,
172 .send_IPI_self = send_IPI_self_x2apic
173 };
174
175 static const struct genapic __initconstrel apic_x2apic_cluster = {
176 APIC_INIT("x2apic_cluster", NULL),
177 .int_delivery_mode = dest_LowestPrio,
178 .int_dest_mode = 1 /* logical delivery */,
179 .init_apic_ldr = init_apic_ldr_x2apic_cluster,
180 .clustered_apic_check = clustered_apic_check_x2apic,
181 .vector_allocation_cpumask = vector_allocation_cpumask_x2apic_cluster,
182 .cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic_cluster,
183 .send_IPI_mask = send_IPI_mask_x2apic_cluster,
184 .send_IPI_self = send_IPI_self_x2apic
185 };
186
update_clusterinfo(struct notifier_block * nfb,unsigned long action,void * hcpu)187 static int update_clusterinfo(
188 struct notifier_block *nfb, unsigned long action, void *hcpu)
189 {
190 unsigned int cpu = (unsigned long)hcpu;
191 int err = 0;
192
193 switch (action) {
194 case CPU_UP_PREPARE:
195 per_cpu(cpu_2_logical_apicid, cpu) = BAD_APICID;
196 if ( !cluster_cpus_spare )
197 cluster_cpus_spare = xzalloc(cpumask_t);
198 if ( !cluster_cpus_spare ||
199 !cond_alloc_cpumask_var(&per_cpu(scratch_mask, cpu)) )
200 err = -ENOMEM;
201 break;
202 case CPU_UP_CANCELED:
203 case CPU_DEAD:
204 case CPU_REMOVE:
205 if ( park_offline_cpus == (action != CPU_REMOVE) )
206 break;
207 if ( per_cpu(cluster_cpus, cpu) )
208 {
209 cpumask_clear_cpu(cpu, per_cpu(cluster_cpus, cpu));
210 if ( cpumask_empty(per_cpu(cluster_cpus, cpu)) )
211 XFREE(per_cpu(cluster_cpus, cpu));
212 }
213 FREE_CPUMASK_VAR(per_cpu(scratch_mask, cpu));
214 break;
215 }
216
217 return !err ? NOTIFY_DONE : notifier_from_errno(err);
218 }
219
220 static struct notifier_block x2apic_cpu_nfb = {
221 .notifier_call = update_clusterinfo
222 };
223
224 static s8 __initdata x2apic_phys = -1; /* By default we use logical cluster mode. */
225 boolean_param("x2apic_phys", x2apic_phys);
226
apic_x2apic_probe(void)227 const struct genapic *__init apic_x2apic_probe(void)
228 {
229 if ( x2apic_phys < 0 )
230 {
231 /*
232 * Force physical mode if there's no interrupt remapping support: The
233 * ID in clustered mode requires a 32 bit destination field due to
234 * the usage of the high 16 bits to hold the cluster ID.
235 */
236 x2apic_phys = !iommu_intremap ||
237 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL);
238 }
239 else if ( !x2apic_phys )
240 switch ( iommu_intremap )
241 {
242 case iommu_intremap_off:
243 case iommu_intremap_restricted:
244 printk("WARNING: x2APIC cluster mode is not supported %s interrupt remapping -"
245 " forcing phys mode\n",
246 iommu_intremap == iommu_intremap_off ? "without"
247 : "with restricted");
248 x2apic_phys = true;
249 break;
250
251 case iommu_intremap_full:
252 break;
253 }
254
255 if ( x2apic_phys )
256 return &apic_x2apic_phys;
257
258 if ( !this_cpu(cluster_cpus) )
259 {
260 update_clusterinfo(NULL, CPU_UP_PREPARE,
261 (void *)(long)smp_processor_id());
262 init_apic_ldr_x2apic_cluster();
263 register_cpu_notifier(&x2apic_cpu_nfb);
264 }
265
266 return &apic_x2apic_cluster;
267 }
268
check_x2apic_preenabled(void)269 void __init check_x2apic_preenabled(void)
270 {
271 u32 lo, hi;
272
273 if ( !cpu_has_x2apic )
274 return;
275
276 /* Check whether x2apic mode was already enabled by the BIOS. */
277 rdmsr(MSR_APIC_BASE, lo, hi);
278 if ( lo & APIC_BASE_EXTD )
279 {
280 printk("x2APIC mode is already enabled by BIOS.\n");
281 x2apic_enabled = 1;
282 genapic = *apic_x2apic_probe();
283 }
284 }
285